package minify
import (
"bytes"
"encoding/base64"
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/parse/v2/strconv"
)
var (
textMimeBytes = []byte("text/plain")
charsetASCIIBytes = []byte("charset=us-ascii")
dataBytes = []byte("data:")
base64Bytes = []byte(";base64")
)
// Epsilon is the closest number to zero that is not considered to be zero.
var Epsilon = 0.00001
// Mediatype minifies a given mediatype by removing all whitespace and lowercasing all parts except strings (which may be case sensitive).
func Mediatype(b []byte) []byte {
j := 0
inString := false
start, lastString := 0, 0
for i, c := range b {
if !inString && parse.IsWhitespace(c) {
if start != 0 {
j += copy(b[j:], b[start:i])
} else {
j += i
}
start = i + 1
} else if c == '"' {
inString = !inString
if inString {
if i-lastString < 1024 { // ToLower may otherwise slow down minification greatly
parse.ToLower(b[lastString:i])
}
} else {
lastString = j + (i + 1 - start)
}
}
}
if start != 0 {
j += copy(b[j:], b[start:])
parse.ToLower(b[lastString:j])
return b[:j]
}
parse.ToLower(b[lastString:])
return b
}
// DataURI minifies a data URI and calls a minifier by the specified mediatype. Specifications: https://www.ietf.org/rfc/rfc2397.txt.
func DataURI(m *M, dataURI []byte) []byte {
origData := parse.Copy(dataURI)
mediatype, data, err := parse.DataURI(dataURI)
if err != nil {
return dataURI
}
data, _ = m.Bytes(string(mediatype), data)
base64Len := len(";base64") + base64.StdEncoding.EncodedLen(len(data))
asciiLen := len(data)
for _, c := range data {
if parse.DataURIEncodingTable[c] {
asciiLen += 2
}
if asciiLen > base64Len {
break
}
}
if len(origData) < base64Len && len(origData) < asciiLen {
return origData
}
if base64Len < asciiLen {
encoded := make([]byte, base64Len-len(";base64"))
base64.StdEncoding.Encode(encoded, data)
data = encoded
mediatype = append(mediatype, base64Bytes...)
} else {
data = parse.EncodeURL(data, parse.DataURIEncodingTable)
}
if len("text/plain") <= len(mediatype) && parse.EqualFold(mediatype[:len("text/plain")], textMimeBytes) {
mediatype = mediatype[len("text/plain"):]
}
for i := 0; i+len(";charset=us-ascii") <= len(mediatype); i++ {
// must start with semicolon and be followed by end of mediatype or semicolon
if mediatype[i] == ';' && parse.EqualFold(mediatype[i+1:i+len(";charset=us-ascii")], charsetASCIIBytes) && (i+len(";charset=us-ascii") >= len(mediatype) || mediatype[i+len(";charset=us-ascii")] == ';') {
mediatype = append(mediatype[:i], mediatype[i+len(";charset=us-ascii"):]...)
break
}
}
return append(append(append(dataBytes, mediatype...), ','), data...)
}
// MaxInt is the maximum value of int.
const MaxInt = int(^uint(0) >> 1)
// MinInt is the minimum value of int.
const MinInt = -MaxInt - 1
// Decimal minifies a given byte slice containing a decimal and removes superfluous characters. It differs from Number in that it does not parse exponents.
// It does not parse or output exponents. prec is the number of significant digits. When prec is zero it will keep all digits. Only digits after the dot can be removed to reach the number of significant digits. Very large number may thus have more significant digits.
func Decimal(num []byte, prec int) []byte {
if len(num) <= 1 {
return num
}
// omit first + and register mantissa start and end, whether it's negative and the exponent
neg := false
start := 0
dot := -1
end := len(num)
if 0 < end && (num[0] == '+' || num[0] == '-') {
if num[0] == '-' {
neg = true
}
start++
}
for i, c := range num[start:] {
if c == '.' {
dot = start + i
break
}
}
if dot == -1 {
dot = end
}
// trim leading zeros but leave at least one digit
for start < end-1 && num[start] == '0' {
start++
}
// trim trailing zeros
i := end - 1
for ; dot < i; i-- {
if num[i] != '0' {
end = i + 1
break
}
}
if i == dot {
end = dot
if start == end {
num[start] = '0'
return num[start : start+1]
}
} else if start == end-1 && num[start] == '0' {
return num[start:end]
}
// apply precision
if 0 < prec && dot <= start+prec {
precEnd := start + prec + 1 // include dot
if dot == start { // for numbers like .012
digit := start + 1
for digit < end && num[digit] == '0' {
digit++
}
precEnd = digit + prec
}
if precEnd < end {
end = precEnd
// process either an increase from a lesser significant decimal (>= 5)
// or remove trailing zeros after the dot, or both
i := end - 1
inc := '5' <= num[end]
for ; start < i; i-- {
if i == dot {
// no-op
} else if inc && num[i] != '9' {
num[i]++
inc = false
break
} else if inc && i < dot { // end inc for integer
num[i] = '0'
} else if !inc && (i < dot || num[i] != '0') {
break
}
}
if i < dot {
end = dot
} else {
end = i + 1
}
if inc {
if dot == start && end == start+1 {
num[start] = '1'
} else if num[start] == '9' {
num[start] = '1'
num[start+1] = '0'
end++
} else {
num[start]++
}
}
}
}
if neg {
start--
num[start] = '-'
}
return num[start:end]
}
// Number minifies a given byte slice containing a number and removes superfluous characters.
func Number(num []byte, prec int) []byte {
if len(num) <= 1 {
return num
}
// omit first + and register mantissa start and end, whether it's negative and the exponent
neg := false
start := 0
dot := -1
end := len(num)
origExp := 0
if num[0] == '+' || num[0] == '-' {
if num[0] == '-' {
neg = true
}
start++
}
for i, c := range num[start:] {
if c == '.' {
dot = start + i
} else if c == 'e' || c == 'E' {
end = start + i
i += start + 1
if i < len(num) && num[i] == '+' {
i++
}
if tmpOrigExp, n := strconv.ParseInt(num[i:]); 0 < n && int64(MinInt) <= tmpOrigExp && tmpOrigExp <= int64(MaxInt) {
// range checks for when int is 32 bit
origExp = int(tmpOrigExp)
} else {
return num
}
break
}
}
if dot == -1 {
dot = end
}
// trim leading zeros but leave at least one digit
for start < end-1 && num[start] == '0' {
start++
}
// trim trailing zeros
i := end - 1
for ; dot < i; i-- {
if num[i] != '0' {
end = i + 1
break
}
}
if i == dot {
end = dot
if start == end {
num[start] = '0'
return num[start : start+1]
}
} else if start == end-1 && num[start] == '0' {
return num[start:end]
}
// apply precision
if 0 < prec { //&& (dot <= start+prec || start+prec+1 < dot || 0 < origExp) { // don't minify 9 to 10, but do 999 to 1e3 and 99e1 to 1e3
precEnd := start + prec
if dot == start { // for numbers like .012
digit := start + 1
for digit < end && num[digit] == '0' {
digit++
}
precEnd = digit + prec
} else if dot < precEnd { // for numbers where precision will include the dot
precEnd++
}
if precEnd < end && (dot < end || 1 < dot-precEnd+origExp) { // do not minify 9=>10 or 99=>100 or 9e1=>1e2 (but 90), but 999=>1e3 and 99e1=>1e3
end = precEnd
inc := '5' <= num[end]
if dot == end {
inc = end+1 < len(num) && '5' <= num[end+1]
}
if precEnd < dot {
origExp += dot - precEnd
dot = precEnd
}
// process either an increase from a lesser significant decimal (>= 5)
// and remove trailing zeros
i := end - 1
for ; start < i; i-- {
if i == dot {
// no-op
} else if inc && num[i] != '9' {
num[i]++
inc = false
break
} else if !inc && num[i] != '0' {
break
}
}
end = i + 1
if end < dot {
origExp += dot - end
dot = end
}
if inc { // single digit left
if dot == start {
num[start] = '1'
dot = start + 1
} else if num[start] == '9' {
num[start] = '1'
origExp++
} else {
num[start]++
}
}
}
}
// n is the number of significant digits
// normExp would be the exponent if it were normalised (0.1 <= f < 1)
n := 0
normExp := 0
if dot == start {
for i = dot + 1; i < end; i++ {
if num[i] != '0' {
n = end - i
normExp = dot - i + 1
break
}
}
} else if dot == end {
normExp = end - start
for i = end - 1; start <= i; i-- {
if num[i] != '0' {
n = i + 1 - start
end = i + 1
break
}
}
} else {
n = end - start - 1
normExp = dot - start
}
if origExp < 0 && (normExp < MinInt-origExp || normExp-n < MinInt-origExp) || 0 < origExp && (MaxInt-origExp < normExp || MaxInt-origExp < normExp-n) {
return num // exponent overflow
}
normExp += origExp
// intExp would be the exponent if it were an integer
intExp := normExp - n
lenIntExp := strconv.LenInt(int64(intExp))
lenNormExp := strconv.LenInt(int64(normExp))
// there are three cases to consider when printing the number
// case 1: without decimals and with a positive exponent (large numbers: 5e4)
// case 2: with decimals and with a negative exponent (small numbers with many digits: .123456e-4)
// case 3: with decimals and without an exponent (around zero: 5.6)
// case 4: without decimals and with a negative exponent (small numbers: 123456e-9)
if n <= normExp {
// case 1: print number with positive exponent
if dot < end {
// remove dot, either from the front or copy the smallest part
if dot == start {
start = end - n
} else if dot-start < end-dot-1 {
copy(num[start+1:], num[start:dot])
start++
} else {
copy(num[dot:], num[dot+1:end])
end--
}
}
if n+3 <= normExp {
num[end] = 'e'
end++
for i := end + lenIntExp - 1; end <= i; i-- {
num[i] = byte(intExp%10) + '0'
intExp /= 10
}
end += lenIntExp
} else if n+2 == normExp {
num[end] = '0'
num[end+1] = '0'
end += 2
} else if n+1 == normExp {
num[end] = '0'
end++
}
} else if normExp < -3 && lenNormExp < lenIntExp && dot < end {
// case 2: print normalized number (0.1 <= f < 1)
zeroes := -normExp + origExp
if 0 < zeroes {
copy(num[start+1:], num[start+1+zeroes:end])
end -= zeroes
} else if zeroes < 0 {
copy(num[start+1:], num[start:dot])
num[start] = '.'
}
num[end] = 'e'
num[end+1] = '-'
end += 2
for i := end + lenNormExp - 1; end <= i; i-- {
num[i] = -byte(normExp%10) + '0'
normExp /= 10
}
end += lenNormExp
} else if -lenIntExp-1 <= normExp {
// case 3: print number without exponent
zeroes := -normExp
if 0 < zeroes {
// dot placed at the front and negative exponent, adding zeroes
newDot := end - n - zeroes - 1
if newDot != dot {
d := start - newDot
if 0 < d {
if dot < end {
// copy original digits after the dot towards the end
copy(num[dot+1+d:], num[dot+1:end])
if start < dot {
// copy original digits before the dot towards the end
copy(num[start+d+1:], num[start:dot])
}
} else if start < dot {
// copy original digits before the dot towards the end
copy(num[start+d:], num[start:dot])
}
newDot = start
end += d
} else {
start += -d
}
num[newDot] = '.'
for i := 0; i < zeroes; i++ {
num[newDot+1+i] = '0'
}
}
} else {
// dot placed in the middle of the number
if dot == start {
// when there are zeroes after the dot
dot = end - n - 1
start = dot
} else if end <= dot {
// when input has no dot in it
dot = end
end++
}
newDot := start + normExp
// move digits between dot and newDot towards the end
if dot < newDot {
copy(num[dot:], num[dot+1:newDot+1])
} else if newDot < dot {
copy(num[newDot+1:], num[newDot:dot])
}
num[newDot] = '.'
}
} else {
// case 4: print number with negative exponent
// find new end, considering moving numbers to the front, removing the dot and increasing the length of the exponent
newEnd := end
if dot == start {
newEnd = start + n
} else {
newEnd--
}
newEnd += 2 + lenIntExp
exp := intExp
lenExp := lenIntExp
if newEnd < len(num) {
// it saves space to convert the decimal to an integer and decrease the exponent
if dot < end {
if dot == start {
copy(num[start:], num[end-n:end])
end = start + n
} else {
copy(num[dot:], num[dot+1:end])
end--
}
}
} else {
// it does not save space and will panic, so we revert to the original representation
exp = origExp
lenExp = 1
if origExp <= -10 || 10 <= origExp {
lenExp = strconv.LenInt(int64(origExp))
}
}
num[end] = 'e'
num[end+1] = '-'
end += 2
for i := end + lenExp - 1; end <= i; i-- {
num[i] = -byte(exp%10) + '0'
exp /= 10
}
end += lenExp
}
if neg {
start--
num[start] = '-'
}
return num[start:end]
}
func UpdateErrorPosition(err error, input *parse.Input, offset int) error {
if perr, ok := err.(*parse.Error); ok {
r := bytes.NewBuffer(input.Bytes())
line, column, _ := parse.Position(r, offset)
perr.Line += line - 1
perr.Column += column - 1
return perr
}
return err
}
// Package css minifies CSS3 following the specifications at http://www.w3.org/TR/css-syntax-3/.
package css
import (
"bytes"
"fmt"
"io"
"math"
"sort"
"strconv"
"strings"
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/parse/v2/css"
strconvParse "github.com/tdewolff/parse/v2/strconv"
)
var (
spaceBytes = []byte(" ")
colonBytes = []byte(":")
semicolonBytes = []byte(";")
commaBytes = []byte(",")
leftBracketBytes = []byte("{")
rightBracketBytes = []byte("}")
rightParenBytes = []byte(")")
urlBytes = []byte("url(")
varBytes = []byte("var(")
zeroBytes = []byte("0")
oneBytes = []byte("1")
transparentBytes = []byte("transparent")
blackBytes = []byte("#0000")
initialBytes = []byte("initial")
noneBytes = []byte("none")
autoBytes = []byte("auto")
leftBytes = []byte("left")
topBytes = []byte("top")
n400Bytes = []byte("400")
n700Bytes = []byte("700")
n50pBytes = []byte("50%")
n100pBytes = []byte("100%")
repeatXBytes = []byte("repeat-x")
repeatYBytes = []byte("repeat-y")
importantBytes = []byte("!important")
dataSchemeBytes = []byte("data:")
)
type cssMinifier struct {
m *minify.M
w io.Writer
p *css.Parser
o *Minifier
tokenBuffer []Token
tokensLevel int
}
////////////////////////////////////////////////////////////////
// Minifier is a CSS minifier.
type Minifier struct {
KeepCSS2 bool
Precision int // number of significant digits
newPrecision int // precision for new numbers
Inline bool
}
// Minify minifies CSS data, it reads from r and writes to w.
func Minify(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
return (&Minifier{}).Minify(m, w, r, params)
}
// Token is a parsed token with extra information for functions.
type Token struct {
css.TokenType
Data []byte
Args []Token // only filled for functions
Fun, Ident Hash // only filled for functions and identifiers respectively
}
func (t Token) String() string {
if len(t.Args) == 0 {
return t.TokenType.String() + "(" + string(t.Data) + ")"
}
sb := strings.Builder{}
sb.Write(t.Data)
for _, arg := range t.Args {
sb.WriteString(arg.String())
}
sb.WriteByte(')')
return sb.String()
}
// Equal returns true if both tokens are equal.
func (t Token) Equal(t2 Token) bool {
if t.TokenType == t2.TokenType && bytes.Equal(t.Data, t2.Data) && len(t.Args) == len(t2.Args) {
for i := 0; i < len(t.Args); i++ {
if !t.Args[i].Equal(t2.Args[i]) {
return false
}
}
return true
}
return false
}
// IsZero return true if a dimension, percentage, or number token is zero.
func (t Token) IsZero() bool {
// as each number is already minified, starting with a zero means it is zero
return (t.TokenType == css.DimensionToken || t.TokenType == css.PercentageToken || t.TokenType == css.NumberToken) && t.Data[0] == '0'
}
// IsLength returns true if the token is a length.
func (t Token) IsLength() bool {
if t.TokenType == css.DimensionToken {
return true
} else if t.TokenType == css.NumberToken && t.Data[0] == '0' {
return true
} else if t.TokenType == css.FunctionToken {
fun := ToHash(t.Data[:len(t.Data)-1])
if fun == Calc || fun == Min || fun == Max || fun == Clamp || fun == Attr || fun == Var || fun == Env {
return true
}
}
return false
}
// IsLengthPercentage returns true if the token is a length or percentage token.
func (t Token) IsLengthPercentage() bool {
return t.TokenType == css.PercentageToken || t.IsLength()
}
////////////////////////////////////////////////////////////////
// Minify minifies CSS data, it reads from r and writes to w.
func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
tmp := &Minifier{}
*tmp = *o
o = tmp
o.newPrecision = o.Precision
if o.newPrecision <= 0 || 15 < o.newPrecision {
o.newPrecision = 15 // minimum number of digits a double can represent exactly
}
if !o.Inline {
o.Inline = params != nil && params["inline"] == "1"
}
z := parse.NewInput(r)
defer z.Restore()
c := &cssMinifier{
m: m,
w: w,
p: css.NewParser(z, o.Inline),
o: o,
}
c.minifyGrammar()
if _, err := w.Write(nil); err != nil {
return err
}
if c.p.Err() == io.EOF {
return nil
}
return c.p.Err()
}
func (c *cssMinifier) minifyGrammar() {
semicolonQueued := false
for {
gt, _, data := c.p.Next()
switch gt {
case css.ErrorGrammar:
if c.p.HasParseError() {
if semicolonQueued {
c.w.Write(semicolonBytes)
}
// write out the offending declaration (but save the semicolon)
vals := c.p.Values()
if len(vals) > 0 && vals[len(vals)-1].TokenType == css.SemicolonToken {
vals = vals[:len(vals)-1]
semicolonQueued = true
}
for _, val := range vals {
c.w.Write(val.Data)
}
continue
}
return
case css.EndAtRuleGrammar, css.EndRulesetGrammar:
c.w.Write(rightBracketBytes)
semicolonQueued = false
continue
}
if semicolonQueued {
c.w.Write(semicolonBytes)
semicolonQueued = false
}
switch gt {
case css.AtRuleGrammar:
c.w.Write(data)
values := c.p.Values()
if ToHash(data[1:]) == Import && len(values) == 2 && values[1].TokenType == css.URLToken && 4 < len(values[1].Data) && values[1].Data[len(values[1].Data)-1] == ')' {
url := values[1].Data
if url[4] != '"' && url[4] != '\'' {
a := 4
for parse.IsWhitespace(url[a]) || parse.IsNewline(url[a]) {
a++
}
b := len(url) - 2
for a < b && (parse.IsWhitespace(url[b]) || parse.IsNewline(url[b])) {
b--
}
if a == b {
url = url[:2]
} else {
url = url[a-1 : b+2]
}
url[0] = '"'
url[len(url)-1] = '"'
} else {
url = url[4 : len(url)-1]
}
values[1].Data = url
}
for _, val := range values {
c.w.Write(val.Data)
}
semicolonQueued = true
case css.BeginAtRuleGrammar:
c.w.Write(data)
for _, val := range c.p.Values() {
c.w.Write(val.Data)
}
c.w.Write(leftBracketBytes)
case css.QualifiedRuleGrammar:
c.minifySelectors(data, c.p.Values())
c.w.Write(commaBytes)
case css.BeginRulesetGrammar:
c.minifySelectors(data, c.p.Values())
c.w.Write(leftBracketBytes)
case css.DeclarationGrammar:
c.minifyDeclaration(data, c.p.Values())
semicolonQueued = true
case css.CustomPropertyGrammar:
c.w.Write(data)
c.w.Write(colonBytes)
value := parse.TrimWhitespace(c.p.Values()[0].Data)
if len(c.p.Values()[0].Data) != 0 && len(value) == 0 {
value = spaceBytes
}
c.w.Write(value)
semicolonQueued = true
case css.CommentGrammar:
if 5 < len(data) && data[1] == '*' && data[2] == '!' {
c.w.Write(data[:3])
comment := parse.TrimWhitespace(parse.ReplaceMultipleWhitespace(data[3 : len(data)-2]))
c.w.Write(comment)
c.w.Write(data[len(data)-2:])
} else if 5 < len(data) && (data[2] == '#' || data[2] == '@') {
c.w.Write(data) // sourceMappingURL
}
default:
c.w.Write(data)
}
}
}
func (c *cssMinifier) minifySelectors(property []byte, values []css.Token) {
inAttr := false
isClass := false
for _, val := range c.p.Values() {
if !inAttr {
if val.TokenType == css.IdentToken {
if !isClass {
parse.ToLower(val.Data)
}
isClass = false
} else if val.TokenType == css.DelimToken && val.Data[0] == '.' {
isClass = true
} else if val.TokenType == css.LeftBracketToken {
inAttr = true
}
} else {
if val.TokenType == css.StringToken && len(val.Data) > 2 {
s := val.Data[1 : len(val.Data)-1]
if css.IsIdent(s) {
c.w.Write(s)
continue
}
} else if val.TokenType == css.RightBracketToken {
inAttr = false
} else if val.TokenType == css.IdentToken && len(val.Data) == 1 && (val.Data[0] == 'i' || val.Data[0] == 'I') {
c.w.Write(spaceBytes)
}
}
c.w.Write(val.Data)
}
}
func (c *cssMinifier) parseFunction(values []css.Token) ([]Token, int) {
i := 1
level := 0
args := []Token{}
for ; i < len(values); i++ {
tt := values[i].TokenType
data := values[i].Data
if tt == css.LeftParenthesisToken {
level++
} else if tt == css.RightParenthesisToken {
if level == 0 {
i++
break
}
level--
}
if tt == css.FunctionToken {
subArgs, di := c.parseFunction(values[i:])
h := ToHash(parse.ToLower(parse.Copy(data[:len(data)-1]))) // TODO: use ToHashFold
args = append(args, Token{tt, data, subArgs, h, 0})
i += di - 1
} else {
var h Hash
if tt == css.IdentToken {
h = ToHash(parse.ToLower(parse.Copy(data))) // TODO: use ToHashFold
}
args = append(args, Token{tt, data, nil, 0, h})
}
}
return args, i
}
func (c *cssMinifier) parseDeclaration(values []css.Token) []Token {
// Check if this is a simple list of values separated by whitespace or commas, otherwise we'll not be processing
prevSep := true
tokens := c.tokenBuffer[:0]
for i := 0; i < len(values); i++ {
tt := values[i].TokenType
data := values[i].Data
if tt == css.LeftParenthesisToken || tt == css.LeftBraceToken || tt == css.LeftBracketToken ||
tt == css.RightParenthesisToken || tt == css.RightBraceToken || tt == css.RightBracketToken {
return nil
}
if !prevSep && tt != css.WhitespaceToken && tt != css.CommaToken && (tt != css.DelimToken || values[i].Data[0] != '/') {
return nil
}
if tt == css.WhitespaceToken || tt == css.CommaToken || tt == css.DelimToken && values[i].Data[0] == '/' {
if tt != css.WhitespaceToken {
tokens = append(tokens, Token{tt, data, nil, 0, 0})
}
prevSep = true
} else if tt == css.FunctionToken {
args, di := c.parseFunction(values[i:])
h := ToHash(parse.ToLower(parse.Copy(data[:len(data)-1]))) // TODO: use ToHashFold
tokens = append(tokens, Token{tt, data, args, h, 0})
prevSep = true
i += di - 1
} else {
var h Hash
if tt == css.IdentToken {
h = ToHash(parse.ToLower(parse.Copy(data))) // TODO: use ToHashFold
}
tokens = append(tokens, Token{tt, data, nil, 0, h})
prevSep = tt == css.URLToken
}
}
c.tokenBuffer = tokens // update buffer size for memory reuse
return tokens
}
func (c *cssMinifier) minifyDeclaration(property []byte, components []css.Token) {
c.w.Write(property)
c.w.Write(colonBytes)
if len(components) == 0 {
return
}
// Strip !important from the component list, this will be added later separately
important := false
if len(components) > 2 && components[len(components)-2].TokenType == css.DelimToken && components[len(components)-2].Data[0] == '!' && ToHash(components[len(components)-1].Data) == Important {
components = components[:len(components)-2]
important = true
}
prop := ToHash(property)
values := c.parseDeclaration(components)
// Do not process complex values (eg. containing blocks or is not alternated between whitespace/commas and flat values
if values == nil {
if prop == Filter && len(components) == 11 {
if bytes.Equal(components[0].Data, []byte("progid")) &&
components[1].TokenType == css.ColonToken &&
bytes.Equal(components[2].Data, []byte("DXImageTransform")) &&
components[3].Data[0] == '.' &&
bytes.Equal(components[4].Data, []byte("Microsoft")) &&
components[5].Data[0] == '.' &&
bytes.Equal(components[6].Data, []byte("Alpha(")) &&
bytes.Equal(parse.ToLower(components[7].Data), []byte("opacity")) &&
components[8].Data[0] == '=' &&
components[10].Data[0] == ')' {
components = components[6:]
components[0].Data = []byte("alpha(")
}
}
for _, component := range components {
c.w.Write(component.Data)
}
if important {
c.w.Write(importantBytes)
}
return
}
values = c.minifyTokens(prop, 0, values)
if 0 < len(values) {
values = c.minifyProperty(prop, values)
}
c.writeDeclaration(values, important)
}
func (c *cssMinifier) writeFunction(args []Token) {
for _, arg := range args {
c.w.Write(arg.Data)
if arg.TokenType == css.FunctionToken {
c.writeFunction(arg.Args)
c.w.Write(rightParenBytes)
}
}
}
func (c *cssMinifier) writeDeclaration(values []Token, important bool) {
prevSep := true
for _, value := range values {
if !prevSep && value.TokenType != css.CommaToken && (value.TokenType != css.DelimToken || value.Data[0] != '/') {
c.w.Write(spaceBytes)
}
c.w.Write(value.Data)
if value.TokenType == css.FunctionToken {
c.writeFunction(value.Args)
c.w.Write(rightParenBytes)
}
if value.TokenType == css.CommaToken || value.TokenType == css.DelimToken && value.Data[0] == '/' || value.TokenType == css.FunctionToken || value.TokenType == css.URLToken {
prevSep = true
} else {
prevSep = false
}
}
if important {
c.w.Write(importantBytes)
}
}
func (c *cssMinifier) minifyTokens(prop Hash, fun Hash, values []Token) []Token {
if 100 < c.tokensLevel+1 {
return values
}
c.tokensLevel++
for i, value := range values {
tt := value.TokenType
switch tt {
case css.NumberToken:
if prop == Z_Index || prop == Counter_Increment || prop == Counter_Reset || prop == Orphans || prop == Widows {
break // integers
}
if c.o.KeepCSS2 {
values[i].Data = minify.Decimal(values[i].Data, c.o.Precision) // don't use exponents
} else {
values[i].Data = minify.Number(values[i].Data, c.o.Precision)
}
case css.PercentageToken:
n := len(values[i].Data) - 1
if c.o.KeepCSS2 {
values[i].Data = minify.Decimal(values[i].Data[:n], c.o.Precision) // don't use exponents
} else {
values[i].Data = minify.Number(values[i].Data[:n], c.o.Precision)
}
values[i].Data = append(values[i].Data, '%')
case css.DimensionToken:
var dim []byte
values[i], dim = c.minifyDimension(values[i])
if 1 < len(values[i].Data) && values[i].Data[0] == '0' && optionalZeroDimension[string(dim)] && prop != Flex && fun == 0 {
// cut dimension for zero value, TODO: don't hardcode check for Flex and remove the dimension in minifyDimension
values[i].Data = values[i].Data[:1]
}
case css.StringToken:
values[i].Data = removeMarkupNewlines(values[i].Data)
case css.URLToken:
if 10 < len(values[i].Data) {
uri := parse.TrimWhitespace(values[i].Data[4 : len(values[i].Data)-1])
delim := byte('"')
if 1 < len(uri) && (uri[0] == '\'' || uri[0] == '"') {
delim = uri[0]
uri = removeMarkupNewlines(uri)
uri = uri[1 : len(uri)-1]
}
if 4 < len(uri) && parse.EqualFold(uri[:5], dataSchemeBytes) {
uri = minify.DataURI(c.m, uri)
}
if css.IsURLUnquoted(uri) {
values[i].Data = append(append(urlBytes, uri...), ')')
} else {
values[i].Data = append(append(append(urlBytes, delim), uri...), delim, ')')
}
}
case css.FunctionToken:
values[i].Args = c.minifyTokens(prop, values[i].Fun, values[i].Args)
fun := values[i].Fun
args := values[i].Args
if fun == Rgb || fun == Rgba || fun == Hsl || fun == Hsla {
valid := true
vals := []float64{}
for i, arg := range args {
numeric := arg.TokenType == css.NumberToken || arg.TokenType == css.PercentageToken
separator := arg.TokenType == css.CommaToken || i != 5 && arg.TokenType == css.WhitespaceToken || i == 5 && arg.TokenType == css.DelimToken && arg.Data[0] == '/'
if i%2 == 0 && !numeric || i%2 == 1 && !separator {
valid = false
break
} else if numeric {
var d float64
if arg.TokenType == css.PercentageToken {
var err error
d, err = strconv.ParseFloat(string(arg.Data[:len(arg.Data)-1]), 32) // can overflow
if err != nil {
valid = false
break
}
d /= 100.0
if d < minify.Epsilon {
d = 0.0
} else if 1.0-minify.Epsilon < d {
d = 1.0
}
} else {
var err error
d, err = strconv.ParseFloat(string(arg.Data), 32) // can overflow
if err != nil {
valid = false
break
}
}
vals = append(vals, d)
}
}
if !valid {
break
}
a := 1.0
if len(vals) == 4 {
if vals[0] < minify.Epsilon && vals[1] < minify.Epsilon && vals[2] < minify.Epsilon && vals[3] < minify.Epsilon {
values[i] = Token{css.IdentToken, transparentBytes, nil, 0, Transparent}
break
} else if 1.0-minify.Epsilon < vals[3] {
vals = vals[:3]
values[i].Args = values[i].Args[:len(values[i].Args)-2]
if fun == Rgba || fun == Hsla {
values[i].Data = values[i].Data[:len(values[i].Data)-1]
values[i].Data[len(values[i].Data)-1] = '('
}
} else {
a = vals[3]
}
}
if a == 1.0 && (len(vals) == 3 || len(vals) == 4) { // only minify color if fully opaque
if fun == Rgb || fun == Rgba {
for j := 0; j < 3; j++ {
if args[j*2].TokenType == css.NumberToken {
vals[j] /= 255.0
if vals[j] < minify.Epsilon {
vals[j] = 0.0
} else if 1.0-minify.Epsilon < vals[j] {
vals[j] = 1.0
}
}
}
values[i] = rgbToToken(vals[0], vals[1], vals[2])
break
} else if fun == Hsl || fun == Hsla && args[0].TokenType == css.NumberToken && args[2].TokenType == css.PercentageToken && args[4].TokenType == css.PercentageToken {
vals[0] /= 360.0
_, vals[0] = math.Modf(vals[0])
if vals[0] < 0.0 {
vals[0] = 1.0 + vals[0]
}
r, g, b := css.HSL2RGB(vals[0], vals[1], vals[2])
values[i] = rgbToToken(r, g, b)
break
}
} else if len(vals) == 4 {
args[6] = minifyNumberPercentage(args[6])
}
if 3 <= len(vals) && (fun == Rgb || fun == Rgba) {
// 0%, 20%, 40%, 60%, 80% and 100% can be represented exactly as, 51, 102, 153, 204, and 255 respectively
removePercentage := true
for j := 0; j < 3; j++ {
if args[j*2].TokenType != css.PercentageToken || 2.0*minify.Epsilon <= math.Mod(vals[j]+minify.Epsilon, 0.2) {
removePercentage = false
break
}
}
if removePercentage {
for j := 0; j < 3; j++ {
args[j*2].TokenType = css.NumberToken
if vals[j] < minify.Epsilon {
args[j*2].Data = zeroBytes
} else if math.Abs(vals[j]-0.2) < minify.Epsilon {
args[j*2].Data = []byte("51")
} else if math.Abs(vals[j]-0.4) < minify.Epsilon {
args[j*2].Data = []byte("102")
} else if math.Abs(vals[j]-0.6) < minify.Epsilon {
args[j*2].Data = []byte("153")
} else if math.Abs(vals[j]-0.8) < minify.Epsilon {
args[j*2].Data = []byte("204")
} else if math.Abs(vals[j]-1.0) < minify.Epsilon {
args[j*2].Data = []byte("255")
}
}
}
}
}
}
}
c.tokensLevel--
return values
}
func (c *cssMinifier) minifyProperty(prop Hash, values []Token) []Token {
// limit maximum to prevent slow recursions (e.g. for background's append)
if 100 < len(values) {
return values
}
switch prop {
case Font:
if len(values) > 1 { // must contain atleast font-size and font-family
// the font-families are separated by commas and are at the end of font
// get index for last token before font family names
i := len(values) - 1
for j, value := range values[2:] {
if value.TokenType == css.CommaToken {
i = 2 + j - 1 // identifier before first comma is a font-family
break
}
}
i--
// advance i while still at font-families when they contain spaces but no quotes
for ; i > 0; i-- { // i cannot be 0, font-family must be prepended by font-size
if values[i-1].TokenType == css.DelimToken && values[i-1].Data[0] == '/' {
break
} else if values[i].TokenType != css.IdentToken && values[i].TokenType != css.StringToken {
break
} else if h := values[i].Ident; h == Xx_Small || h == X_Small || h == Small || h == Medium || h == Large || h == X_Large || h == Xx_Large || h == Smaller || h == Larger || h == Inherit || h == Initial || h == Unset {
// inherit, initial and unset are followed by an IdentToken/StringToken, so must be for font-size
break
}
}
// font-family minified in place
values = append(values[:i+1], c.minifyProperty(Font_Family, values[i+1:])...)
// fix for IE9, IE10, IE11: font name starting with `-` is not recognized
if values[i+1].Data[0] == '-' {
v := make([]byte, len(values[i+1].Data)+2)
v[0] = '\''
copy(v[1:], values[i+1].Data)
v[len(v)-1] = '\''
values[i+1].Data = v
}
if i > 0 {
// line-height
if i > 1 && values[i-1].TokenType == css.DelimToken && values[i-1].Data[0] == '/' {
if values[i].Ident == Normal {
values = append(values[:i-1], values[i+1:]...)
}
i -= 2
}
// font-size
i--
for ; i > -1; i-- {
if values[i].Ident == Normal {
values = append(values[:i], values[i+1:]...)
} else if values[i].Ident == Bold {
values[i].TokenType = css.NumberToken
values[i].Data = n700Bytes
} else if values[i].TokenType == css.NumberToken && bytes.Equal(values[i].Data, n400Bytes) {
values = append(values[:i], values[i+1:]...)
}
}
}
}
case Font_Family:
for i, value := range values {
if value.TokenType == css.StringToken && 2 < len(value.Data) {
unquote := true
parse.ToLower(value.Data)
s := value.Data[1 : len(value.Data)-1]
if 0 < len(s) {
for _, split := range bytes.Split(s, spaceBytes) {
// if len is zero, it contains two consecutive spaces
if len(split) == 0 || !css.IsIdent(split) {
unquote = false
break
}
}
}
if unquote {
values[i].Data = s
}
}
}
case Font_Weight:
if values[0].Ident == Normal {
values[0].TokenType = css.NumberToken
values[0].Data = n400Bytes
} else if values[0].Ident == Bold {
values[0].TokenType = css.NumberToken
values[0].Data = n700Bytes
}
case Url:
for i := 0; i < len(values); i++ {
if values[i].TokenType == css.FunctionToken && len(values[i].Args) == 1 {
fun := values[i].Fun
data := values[i].Args[0].Data
if fun == Local && (data[0] == '\'' || data[0] == '"') {
if css.IsURLUnquoted(data[1 : len(data)-1]) {
data = data[1 : len(data)-1]
}
values[i].Args[0].Data = data
}
}
}
case Margin, Padding, Border_Width:
switch len(values) {
case 2:
if values[0].Equal(values[1]) {
values = values[:1]
}
case 3:
if values[0].Equal(values[1]) && values[0].Equal(values[2]) {
values = values[:1]
} else if values[0].Equal(values[2]) {
values = values[:2]
}
case 4:
if values[0].Equal(values[1]) && values[0].Equal(values[2]) && values[0].Equal(values[3]) {
values = values[:1]
} else if values[0].Equal(values[2]) && values[1].Equal(values[3]) {
values = values[:2]
} else if values[1].Equal(values[3]) {
values = values[:3]
}
}
case Border, Border_Bottom, Border_Left, Border_Right, Border_Top:
for i := 0; i < len(values); i++ {
if values[i].Ident == None || values[i].Ident == Currentcolor || values[i].Ident == Medium {
values = append(values[:i], values[i+1:]...)
i--
} else {
values[i] = minifyColor(values[i])
}
}
if len(values) == 0 {
values = []Token{{css.IdentToken, noneBytes, nil, 0, None}}
}
case Outline:
for i := 0; i < len(values); i++ {
if values[i].Ident == Invert || values[i].Ident == None || values[i].Ident == Medium {
values = append(values[:i], values[i+1:]...)
i--
} else {
values[i] = minifyColor(values[i])
}
}
if len(values) == 0 {
values = []Token{{css.IdentToken, noneBytes, nil, 0, None}}
}
case Background:
start := 0
for end := 0; end <= len(values); end++ { // loop over comma-separated lists
if end != len(values) && values[end].TokenType != css.CommaToken {
continue
} else if start == end {
start++
continue
}
// minify background-size and lowercase all identifiers
for i := start; i < end; i++ {
if values[i].TokenType == css.DelimToken && values[i].Data[0] == '/' {
// background-size consists of either [<length-percentage> | auto | cover | contain] or [<length-percentage> | auto]{2}
// we can only minify the latter
if i+1 < end && (values[i+1].TokenType == css.NumberToken || values[i+1].IsLengthPercentage() || values[i+1].Ident == Auto) {
if i+2 < end && (values[i+2].TokenType == css.NumberToken || values[i+2].IsLengthPercentage() || values[i+2].Ident == Auto) {
sizeValues := c.minifyProperty(Background_Size, values[i+1:i+3])
if len(sizeValues) == 1 && sizeValues[0].Ident == Auto {
// remove background-size if it is '/ auto' after minifying the property
values = append(values[:i], values[i+3:]...)
end -= 3
i--
} else {
values = append(values[:i+1], append(sizeValues, values[i+3:]...)...)
end -= 2 - len(sizeValues)
i += len(sizeValues) - 1
}
} else if values[i+1].Ident == Auto {
// remove background-size if it is '/ auto'
values = append(values[:i], values[i+2:]...)
end -= 2
i--
}
}
}
}
// minify all other values
iPaddingBox := -1 // position of background-origin that is padding-box
for i := start; i < end; i++ {
h := values[i].Ident
values[i] = minifyColor(values[i])
if values[i].TokenType == css.IdentToken {
if i+1 < end && values[i+1].TokenType == css.IdentToken && (h == Space || h == Round || h == Repeat || h == No_Repeat) {
if h2 := values[i+1].Ident; h2 == Space || h2 == Round || h2 == Repeat || h2 == No_Repeat {
repeatValues := c.minifyProperty(Background_Repeat, values[i:i+2])
if len(repeatValues) == 1 && repeatValues[0].Ident == Repeat {
values = append(values[:i], values[i+2:]...)
end -= 2
i--
} else {
values = append(values[:i], append(repeatValues, values[i+2:]...)...)
end -= 2 - len(repeatValues)
i += len(repeatValues) - 1
}
continue
}
} else if h == None || h == Scroll || h == Transparent {
values = append(values[:i], values[i+1:]...)
end--
i--
continue
} else if h == Border_Box || h == Padding_Box {
if iPaddingBox == -1 && h == Padding_Box { // background-origin
iPaddingBox = i
} else if iPaddingBox != -1 && h == Border_Box { // background-clip
values = append(values[:i], values[i+1:]...)
values = append(values[:iPaddingBox], values[iPaddingBox+1:]...)
end -= 2
i -= 2
}
continue
}
} else if values[i].TokenType == css.HashToken && bytes.Equal(values[i].Data, blackBytes) {
values = append(values[:i], values[i+1:]...)
end--
i--
continue
} else if values[i].TokenType == css.FunctionToken && bytes.Equal(values[i].Data, varBytes) {
continue
}
// further minify background-position and background-size combination
if values[i].TokenType == css.NumberToken || values[i].IsLengthPercentage() || h == Left || h == Right || h == Top || h == Bottom || h == Center {
j := i + 1
for ; j < len(values); j++ {
if h := values[j].Ident; h == Left || h == Right || h == Top || h == Bottom || h == Center {
continue
} else if values[j].TokenType == css.NumberToken || values[j].IsLengthPercentage() {
continue
}
break
}
positionValues := c.minifyProperty(Background_Position, values[i:j])
hasSize := j < len(values) && values[j].TokenType == css.DelimToken && values[j].Data[0] == '/'
if !hasSize && len(positionValues) == 2 && positionValues[0].IsZero() && positionValues[1].IsZero() {
if end-start == 2 {
values[i] = Token{css.NumberToken, zeroBytes, nil, 0, 0}
values[i+1] = Token{css.NumberToken, zeroBytes, nil, 0, 0}
i++
} else {
values = append(values[:i], values[j:]...)
end -= j - i
i--
}
} else {
if len(positionValues) == j-i {
for k, positionValue := range positionValues {
values[i+k] = positionValue
}
} else {
values = append(values[:i], append(positionValues, values[j:]...)...)
end -= j - i - len(positionValues)
}
i += len(positionValues) - 1
}
}
}
if end-start == 0 {
values = append(values[:start], append([]Token{{css.NumberToken, zeroBytes, nil, 0, 0}, {css.NumberToken, zeroBytes, nil, 0, 0}}, values[end:]...)...)
end += 2
}
start = end + 1
}
case Background_Size:
start := 0
for end := 0; end <= len(values); end++ { // loop over comma-separated lists
if end != len(values) && values[end].TokenType != css.CommaToken {
continue
} else if start == end {
start++
continue
}
if end-start == 2 && values[start+1].Ident == Auto {
values = append(values[:start+1], values[start+2:]...)
end--
}
start = end + 1
}
case Background_Repeat:
start := 0
for end := 0; end <= len(values); end++ { // loop over comma-separated lists
if end != len(values) && values[end].TokenType != css.CommaToken {
continue
} else if start == end {
start++
continue
}
if end-start == 2 && values[start].TokenType == css.IdentToken && values[start+1].TokenType == css.IdentToken {
if values[start].Ident == values[start+1].Ident {
values = append(values[:start+1], values[start+2:]...)
end--
} else if values[start].Ident == Repeat && values[start+1].Ident == No_Repeat {
values[start].Data = repeatXBytes
values[start].Ident = Repeat_X
values = append(values[:start+1], values[start+2:]...)
end--
} else if values[start].Ident == No_Repeat && values[start+1].Ident == Repeat {
values[start].Data = repeatYBytes
values[start].Ident = Repeat_Y
values = append(values[:start+1], values[start+2:]...)
end--
}
}
start = end + 1
}
case Background_Position:
start := 0
for end := 0; end <= len(values); end++ { // loop over comma-separated lists
if end != len(values) && values[end].TokenType != css.CommaToken {
continue
} else if start == end {
start++
continue
}
if end-start == 3 || end-start == 4 {
// remove zero offsets
for _, i := range []int{end - start - 1, start + 1} {
if 2 < end-start && values[i].IsZero() {
values = append(values[:i], values[i+1:]...)
end--
}
}
j := start + 1 // position of second set of horizontal/vertical values
if 2 < end-start && values[start+2].TokenType == css.IdentToken {
j = start + 2
}
b := make([]byte, 0, 4)
offsets := make([]Token, 2)
for _, i := range []int{j, start} {
if i+1 < end && i+1 != j {
if values[i+1].TokenType == css.PercentageToken {
// change right or bottom with percentage offset to left or top respectively
if values[i].Ident == Right || values[i].Ident == Bottom {
n, _ := strconvParse.ParseInt(values[i+1].Data[:len(values[i+1].Data)-1])
b = strconv.AppendInt(b[:0], 100-n, 10)
b = append(b, '%')
values[i+1].Data = b
if values[i].Ident == Right {
values[i].Data = leftBytes
values[i].Ident = Left
} else {
values[i].Data = topBytes
values[i].Ident = Top
}
}
}
if values[i].Ident == Left {
offsets[0] = values[i+1]
} else if values[i].Ident == Top {
offsets[1] = values[i+1]
}
} else if values[i].Ident == Left {
offsets[0] = Token{css.NumberToken, zeroBytes, nil, 0, 0}
} else if values[i].Ident == Top {
offsets[1] = Token{css.NumberToken, zeroBytes, nil, 0, 0}
} else if values[i].Ident == Right {
offsets[0] = Token{css.PercentageToken, n100pBytes, nil, 0, 0}
values[i].Ident = Left
} else if values[i].Ident == Bottom {
offsets[1] = Token{css.PercentageToken, n100pBytes, nil, 0, 0}
values[i].Ident = Top
}
}
if values[start].Ident == Center || values[j].Ident == Center {
if values[start].Ident == Left || values[j].Ident == Left {
offsets = offsets[:1]
} else if values[start].Ident == Top || values[j].Ident == Top {
offsets[0] = Token{css.NumberToken, n50pBytes, nil, 0, 0}
}
}
if offsets[0].Data != nil && (len(offsets) == 1 || offsets[1].Data != nil) {
values = append(append(values[:start], offsets...), values[end:]...)
end -= end - start - len(offsets)
}
}
// removing zero offsets in the previous loop might make it eligible for the next loop
if end-start == 1 || end-start == 2 {
if end-start == 1 && (values[start].Ident == Top || values[start].Ident == Bottom) {
// we can't make this smaller, and converting to a number will break it
// (https://github.com/tdewolff/minify/issues/221#issuecomment-415419918)
break
}
if end-start == 2 && (values[start].Ident == Top || values[start].Ident == Bottom || values[start+1].Ident == Left || values[start+1].Ident == Right) {
// if it's a vertical position keyword, swap it with the next element
// since otherwise converted number positions won't be valid anymore
// (https://github.com/tdewolff/minify/issues/221#issue-353067229)
values[start], values[start+1] = values[start+1], values[start]
}
// transform keywords to lengths|percentages
for i := start; i < end; i++ {
if values[i].TokenType == css.IdentToken {
if values[i].Ident == Left || values[i].Ident == Top {
values[i].TokenType = css.NumberToken
values[i].Data = zeroBytes
values[i].Ident = 0
} else if values[i].Ident == Right || values[i].Ident == Bottom {
values[i].TokenType = css.PercentageToken
values[i].Data = n100pBytes
values[i].Ident = 0
} else if values[i].Ident == Center {
if i == start {
values[i].TokenType = css.PercentageToken
values[i].Data = n50pBytes
values[i].Ident = 0
} else {
values = append(values[:start+1], values[start+2:]...)
end--
}
}
} else if i == start+1 && values[i].TokenType == css.PercentageToken && bytes.Equal(values[i].Data, n50pBytes) {
values = append(values[:start+1], values[start+2:]...)
end--
} else if values[i].TokenType == css.PercentageToken && values[i].Data[0] == '0' {
values[i].TokenType = css.NumberToken
values[i].Data = zeroBytes
values[i].Ident = 0
}
}
}
start = end + 1
}
case Box_Shadow:
start := 0
for end := 0; end <= len(values); end++ { // loop over comma-separated lists
if end != len(values) && values[end].TokenType != css.CommaToken {
continue
} else if start == end {
start++
continue
}
if end-start == 1 && values[start].Ident == Initial {
values[start].Ident = None
values[start].Data = noneBytes
} else {
numbers := []int{}
for i := start; i < end; i++ {
if values[i].IsLength() {
numbers = append(numbers, i)
}
}
if len(numbers) == 4 && values[numbers[3]].IsZero() {
values = append(values[:numbers[3]], values[numbers[3]+1:]...)
numbers = numbers[:3]
end--
}
if len(numbers) == 3 && values[numbers[2]].IsZero() {
values = append(values[:numbers[2]], values[numbers[2]+1:]...)
end--
}
}
start = end + 1
}
case Ms_Filter:
alpha := []byte("progid:DXImageTransform.Microsoft.Alpha(Opacity=")
if values[0].TokenType == css.StringToken && 2 < len(values[0].Data) && bytes.HasPrefix(values[0].Data[1:len(values[0].Data)-1], alpha) {
values[0].Data = append(append([]byte{values[0].Data[0]}, []byte("alpha(opacity=")...), values[0].Data[1+len(alpha):]...)
}
case Color:
values[0] = minifyColor(values[0])
case Background_Color:
values[0] = minifyColor(values[0])
if !c.o.KeepCSS2 {
if values[0].Ident == Transparent {
values[0].Data = initialBytes
values[0].Ident = Initial
}
}
case Border_Color:
sameValues := true
for i := range values {
if values[i].Ident == Currentcolor {
values[i].Data = initialBytes
values[i].Ident = Initial
} else {
values[i] = minifyColor(values[i])
}
if 0 < i && sameValues && !values[0].Equal(values[i]) {
sameValues = false
}
}
if sameValues {
values = values[:1]
}
case Border_Left_Color, Border_Right_Color, Border_Top_Color, Border_Bottom_Color, Text_Decoration_Color, Text_Emphasis_Color:
if values[0].Ident == Currentcolor {
values[0].Data = initialBytes
values[0].Ident = Initial
} else {
values[0] = minifyColor(values[0])
}
case Caret_Color, Outline_Color, Fill, Stroke:
values[0] = minifyColor(values[0])
case Column_Rule:
for i := 0; i < len(values); i++ {
if values[i].Ident == Currentcolor || values[i].Ident == None || values[i].Ident == Medium {
values = append(values[:i], values[i+1:]...)
i--
} else {
values[i] = minifyColor(values[i])
}
}
if len(values) == 0 {
values = []Token{{css.IdentToken, noneBytes, nil, 0, None}}
}
case Text_Shadow:
// TODO: minify better (can be comma separated list)
for i := 0; i < len(values); i++ {
values[i] = minifyColor(values[i])
}
case Text_Decoration:
for i := 0; i < len(values); i++ {
if values[i].Ident == Currentcolor || values[i].Ident == None || values[i].Ident == Solid {
values = append(values[:i], values[i+1:]...)
i--
} else {
values[i] = minifyColor(values[i])
}
}
if len(values) == 0 {
values = []Token{{css.IdentToken, noneBytes, nil, 0, None}}
}
case Text_Emphasis:
for i := 0; i < len(values); i++ {
if values[i].Ident == Currentcolor || values[i].Ident == None {
values = append(values[:i], values[i+1:]...)
i--
} else {
values[i] = minifyColor(values[i])
}
}
if len(values) == 0 {
values = []Token{{css.IdentToken, noneBytes, nil, 0, None}}
}
case Flex:
if len(values) == 2 && values[0].TokenType == css.NumberToken {
if values[1].TokenType != css.NumberToken && values[1].IsZero() {
values = values[:1] // remove <flex-basis> if it is zero
}
} else if len(values) == 3 && values[0].TokenType == css.NumberToken && values[1].TokenType == css.NumberToken {
if len(values[0].Data) == 1 && len(values[1].Data) == 1 {
if values[2].Ident == Auto {
if values[0].Data[0] == '0' && values[1].Data[0] == '1' {
values = values[:1]
values[0].TokenType = css.IdentToken
values[0].Data = initialBytes
values[0].Ident = Initial
} else if values[0].Data[0] == '1' && values[1].Data[0] == '1' {
values = values[:1]
values[0].TokenType = css.IdentToken
values[0].Data = autoBytes
values[0].Ident = Auto
} else if values[0].Data[0] == '0' && values[1].Data[0] == '0' {
values = values[:1]
values[0].TokenType = css.IdentToken
values[0].Data = noneBytes
values[0].Ident = None
}
} else if values[1].Data[0] == '1' && values[2].IsZero() {
values = values[:1] // remove <flex-shrink> and <flex-basis> if they are 1 and 0 respectively
} else if values[2].IsZero() {
values = values[:2] // remove auto to write 2-value syntax of <flex-grow> <flex-shrink>
} else {
values[2] = minifyLengthPercentage(values[2])
}
}
}
case Flex_Basis:
if values[0].Ident == Initial {
values[0].Data = autoBytes
values[0].Ident = Auto
} else {
values[0] = minifyLengthPercentage(values[0])
}
case Order, Flex_Grow:
if values[0].Ident == Initial {
values[0].TokenType = css.NumberToken
values[0].Data = zeroBytes
values[0].Ident = 0
}
case Flex_Shrink:
if values[0].Ident == Initial {
values[0].TokenType = css.NumberToken
values[0].Data = oneBytes
values[0].Ident = 0
}
case Unicode_Range:
ranges := [][2]int{}
for _, value := range values {
if value.TokenType == css.CommaToken {
continue
} else if value.TokenType != css.UnicodeRangeToken {
return values
}
i := 2
iWildcard := 0
start := 0
for i < len(value.Data) && value.Data[i] != '-' {
start *= 16
if '0' <= value.Data[i] && value.Data[i] <= '9' {
start += int(value.Data[i] - '0')
} else if 'a' <= value.Data[i]|32 && value.Data[i]|32 <= 'f' {
start += int(value.Data[i]|32-'a') + 10
} else if iWildcard == 0 && value.Data[i] == '?' {
iWildcard = i
}
i++
}
end := start
if iWildcard != 0 {
end = start + int(math.Pow(16.0, float64(len(value.Data)-iWildcard))) - 1
} else if i < len(value.Data) && value.Data[i] == '-' {
i++
end = 0
for i < len(value.Data) {
end *= 16
if '0' <= value.Data[i] && value.Data[i] <= '9' {
end += int(value.Data[i] - '0')
} else if 'a' <= value.Data[i]|32 && value.Data[i]|32 <= 'f' {
end += int(value.Data[i]|32-'a') + 10
}
i++
}
if end <= start {
end = start
}
}
ranges = append(ranges, [2]int{start, end})
}
// sort and remove overlapping ranges
sort.Slice(ranges, func(i, j int) bool { return ranges[i][0] < ranges[j][0] })
for i := 0; i < len(ranges)-1; i++ {
if ranges[i+1][1] <= ranges[i][1] {
// next range is fully contained in the current range
ranges = append(ranges[:i+1], ranges[i+2:]...)
} else if ranges[i+1][0] <= ranges[i][1]+1 {
// next range is partially covering the current range
ranges[i][1] = ranges[i+1][1]
ranges = append(ranges[:i+1], ranges[i+2:]...)
}
}
values = values[:0]
for i, ran := range ranges {
if i != 0 {
values = append(values, Token{css.CommaToken, commaBytes, nil, 0, None})
}
if ran[0] == ran[1] {
urange := []byte(fmt.Sprintf("U+%X", ran[0]))
values = append(values, Token{css.UnicodeRangeToken, urange, nil, 0, None})
} else if ran[0] == 0 && ran[1] == 0x10FFFF {
values = append(values, Token{css.IdentToken, initialBytes, nil, 0, None})
} else {
k := 0
for k < 6 && (ran[0]>>(k*4))&0xF == 0 && (ran[1]>>(k*4))&0xF == 0xF {
k++
}
wildcards := k
for k < 6 {
if (ran[0]>>(k*4))&0xF != (ran[1]>>(k*4))&0xF {
wildcards = 0
break
}
k++
}
var urange []byte
if wildcards != 0 {
if ran[0]>>(wildcards*4) == 0 {
urange = []byte(fmt.Sprintf("U+%s", strings.Repeat("?", wildcards)))
} else {
urange = []byte(fmt.Sprintf("U+%X%s", ran[0]>>(wildcards*4), strings.Repeat("?", wildcards)))
}
} else {
urange = []byte(fmt.Sprintf("U+%X-%X", ran[0], ran[1]))
}
values = append(values, Token{css.UnicodeRangeToken, urange, nil, 0, None})
}
}
}
return values
}
func minifyColor(value Token) Token {
data := value.Data
if value.TokenType == css.IdentToken {
if hexValue, ok := ShortenColorName[value.Ident]; ok {
value.TokenType = css.HashToken
value.Data = hexValue
}
} else if value.TokenType == css.HashToken {
parse.ToLower(data[1:])
if len(data) == 9 && data[7] == data[8] {
if data[7] == 'f' {
data = data[:7]
} else if data[7] == '0' {
data = blackBytes
}
}
if ident, ok := ShortenColorHex[string(data)]; ok {
value.TokenType = css.IdentToken
data = ident
} else if len(data) == 7 && data[1] == data[2] && data[3] == data[4] && data[5] == data[6] {
value.TokenType = css.HashToken
data[2] = data[3]
data[3] = data[5]
data = data[:4]
} else if len(data) == 9 && data[1] == data[2] && data[3] == data[4] && data[5] == data[6] && data[7] == data[8] {
// from working draft Color Module Level 4
value.TokenType = css.HashToken
data[2] = data[3]
data[3] = data[5]
data[4] = data[7]
data = data[:5]
}
value.Data = data
}
return value
}
func minifyNumberPercentage(value Token) Token {
// assumes input already minified
if value.TokenType == css.PercentageToken && len(value.Data) == 3 && value.Data[len(value.Data)-2] == '0' {
value.Data[1] = value.Data[0]
value.Data[0] = '.'
value.Data = value.Data[:2]
value.TokenType = css.NumberToken
} else if value.TokenType == css.NumberToken && 2 < len(value.Data) && value.Data[0] == '.' && value.Data[1] == '0' {
if value.Data[2] == '0' {
value.Data[0] = '.'
copy(value.Data[1:], value.Data[3:])
value.Data[len(value.Data)-2] = '%'
value.Data = value.Data[:len(value.Data)-1]
value.TokenType = css.PercentageToken
} else if len(value.Data) == 3 {
value.Data[0] = value.Data[2]
value.Data[1] = '%'
value.Data = value.Data[:2]
value.TokenType = css.PercentageToken
}
}
return value
}
func minifyLengthPercentage(value Token) Token {
if value.TokenType != css.NumberToken && value.IsZero() {
value.TokenType = css.NumberToken
value.Data = value.Data[:1] // remove dimension for zero value
}
return value
}
func (c *cssMinifier) minifyDimension(value Token) (Token, []byte) {
// TODO: add check for zero value
var dim []byte
if value.TokenType == css.DimensionToken {
n := len(value.Data)
for 0 < n {
lower := 'a' <= value.Data[n-1] && value.Data[n-1] <= 'z'
upper := 'A' <= value.Data[n-1] && value.Data[n-1] <= 'Z'
if !lower && !upper {
break
} else if upper {
value.Data[n-1] = value.Data[n-1] + ('a' - 'A')
}
n--
}
num := value.Data[:n]
if c.o.KeepCSS2 {
num = minify.Decimal(num, c.o.Precision) // don't use exponents
} else {
num = minify.Number(num, c.o.Precision)
}
dim = value.Data[n:]
value.Data = append(num, dim...)
}
return value, dim
// TODO: optimize
//if value.TokenType == css.DimensionToken {
// // TODO: reverse; parse dim not number
// n := parse.Number(value.Data)
// num := value.Data[:n]
// dim = value.Data[n:]
// parse.ToLower(dim)
// if c.o.KeepCSS2 {
// num = minify.Decimal(num, c.o.Precision) // don't use exponents
// } else {
// num = minify.Number(num, c.o.Precision)
// }
// // change dimension to compress number
// h := ToHash(dim)
// if h == Px || h == Pt || h == Pc || h == In || h == Mm || h == Cm || h == Q || h == Deg || h == Grad || h == Rad || h == Turn || h == S || h == Ms || h == Hz || h == Khz || h == Dpi || h == Dpcm || h == Dppx {
// d, _ := strconv.ParseFloat(string(num), 64) // can never fail
// var dimensions []Hash
// var multipliers []float64
// switch h {
// case Px:
// //dimensions = []Hash{In, Cm, Pc, Mm, Pt, Q}
// //multipliers = []float64{0.010416666666666667, 0.026458333333333333, 0.0625, 0.26458333333333333, 0.75, 1.0583333333333333}
// dimensions = []Hash{In, Pc, Pt}
// multipliers = []float64{0.010416666666666667, 0.0625, 0.75}
// case Pt:
// //dimensions = []Hash{In, Cm, Pc, Mm, Px, Q}
// //multipliers = []float64{0.013888888888888889, 0.035277777777777778, 0.083333333333333333, 0.35277777777777778, 1.3333333333333333, 1.4111111111111111}
// dimensions = []Hash{In, Pc, Px}
// multipliers = []float64{0.013888888888888889, 0.083333333333333333, 1.3333333333333333}
// case Pc:
// //dimensions = []Hash{In, Cm, Mm, Pt, Px, Q}
// //multipliers = []float64{0.16666666666666667, 0.42333333333333333, 4.2333333333333333, 12.0, 16.0, 16.933333333333333}
// dimensions = []Hash{In, Pt, Px}
// multipliers = []float64{0.16666666666666667, 12.0, 16.0}
// case In:
// //dimensions = []Hash{Cm, Pc, Mm, Pt, Px, Q}
// //multipliers = []float64{2.54, 6.0, 25.4, 72.0, 96.0, 101.6}
// dimensions = []Hash{Pc, Pt, Px}
// multipliers = []float64{6.0, 72.0, 96.0}
// case Cm:
// //dimensions = []Hash{In, Pc, Mm, Pt, Px, Q}
// //multipliers = []float64{0.39370078740157480, 2.3622047244094488, 10.0, 28.346456692913386, 37.795275590551181, 40.0}
// dimensions = []Hash{Mm, Q}
// multipliers = []float64{10.0, 40.0}
// case Mm:
// //dimensions = []Hash{In, Cm, Pc, Pt, Px, Q}
// //multipliers = []float64{0.039370078740157480, 0.1, 0.23622047244094488, 2.8346456692913386, 3.7795275590551181, 4.0}
// dimensions = []Hash{Cm, Q}
// multipliers = []float64{0.1, 4.0}
// case Q:
// //dimensions = []Hash{In, Cm, Pc, Pt, Px} // Q to mm is never smaller
// //multipliers = []float64{0.0098425196850393701, 0.025, 0.059055118110236220, 0.70866141732283465, 0.94488188976377953}
// dimensions = []Hash{Cm} // Q to mm is never smaller
// multipliers = []float64{0.025}
// case Deg:
// //dimensions = []Hash{Turn, Rad, Grad}
// //multipliers = []float64{0.0027777777777777778, 0.017453292519943296, 1.1111111111111111}
// dimensions = []Hash{Turn, Grad}
// multipliers = []float64{0.0027777777777777778, 1.1111111111111111}
// case Grad:
// //dimensions = []Hash{Turn, Rad, Deg}
// //multipliers = []float64{0.0025, 0.015707963267948966, 0.9}
// dimensions = []Hash{Turn, Deg}
// multipliers = []float64{0.0025, 0.9}
// case Turn:
// //dimensions = []Hash{Rad, Deg, Grad}
// //multipliers = []float64{6.2831853071795865, 360.0, 400.0}
// dimensions = []Hash{Deg, Grad}
// multipliers = []float64{360.0, 400.0}
// case Rad:
// //dimensions = []Hash{Turn, Deg, Grad}
// //multipliers = []float64{0.15915494309189534, 57.295779513082321, 63.661977236758134}
// case S:
// dimensions = []Hash{Ms}
// multipliers = []float64{1000.0}
// case Ms:
// dimensions = []Hash{S}
// multipliers = []float64{0.001}
// case Hz:
// dimensions = []Hash{Khz}
// multipliers = []float64{0.001}
// case Khz:
// dimensions = []Hash{Hz}
// multipliers = []float64{1000.0}
// case Dpi:
// dimensions = []Hash{Dppx, Dpcm}
// multipliers = []float64{0.010416666666666667, 0.39370078740157480}
// case Dpcm:
// //dimensions = []Hash{Dppx, Dpi}
// //multipliers = []float64{0.026458333333333333, 2.54}
// dimensions = []Hash{Dpi}
// multipliers = []float64{2.54}
// case Dppx:
// //dimensions = []Hash{Dpcm, Dpi}
// //multipliers = []float64{37.795275590551181, 96.0}
// dimensions = []Hash{Dpi}
// multipliers = []float64{96.0}
// }
// for i := range dimensions {
// if dimensions[i] != h { //&& (d < 1.0) == (multipliers[i] > 1.0) {
// b, _ := strconvParse.AppendFloat([]byte{}, d*multipliers[i], -1)
// if c.o.KeepCSS2 {
// b = minify.Decimal(b, c.o.newPrecision) // don't use exponents
// } else {
// b = minify.Number(b, c.o.newPrecision)
// }
// newDim := []byte(dimensions[i].String())
// if len(b)+len(newDim) < len(num)+len(dim) {
// num = b
// dim = newDim
// }
// }
// }
// }
// value.Data = append(num, dim...)
//}
//return value, dim
}
package css
// uses github.com/tdewolff/hasher
//go:generate hasher -type=Hash -file=hash.go
// Hash defines perfect hashes for a predefined list of strings
type Hash uint32
// Identifiers for the hashes associated with the text in the comments.
const (
Ms_Filter Hash = 0xa // -ms-filter
Accelerator Hash = 0x3760b // accelerator
Aliceblue Hash = 0x7a209 // aliceblue
Align_Content Hash = 0xd980d // align-content
Align_Items Hash = 0x7ef0b // align-items
Align_Self Hash = 0x8cb0a // align-self
All Hash = 0x69103 // all
Alpha Hash = 0x37205 // alpha
Animation Hash = 0xca09 // animation
Animation_Delay Hash = 0x2050f // animation-delay
Animation_Direction Hash = 0x8e913 // animation-direction
Animation_Duration Hash = 0x35d12 // animation-duration
Animation_Fill_Mode Hash = 0x66c13 // animation-fill-mode
Animation_Iteration_Count Hash = 0xd4919 // animation-iteration-count
Animation_Name Hash = 0xca0e // animation-name
Animation_Play_State Hash = 0xfc14 // animation-play-state
Animation_Timing_Function Hash = 0x14119 // animation-timing-function
Antiquewhite Hash = 0x6490c // antiquewhite
Aquamarine Hash = 0x9ec0a // aquamarine
Attr Hash = 0x59804 // attr
Auto Hash = 0x44504 // auto
Azimuth Hash = 0x15a07 // azimuth
Background Hash = 0x2b0a // background
Background_Attachment Hash = 0x2b15 // background-attachment
Background_Clip Hash = 0xb6e0f // background-clip
Background_Color Hash = 0x21710 // background-color
Background_Image Hash = 0x5ad10 // background-image
Background_Origin Hash = 0x17111 // background-origin
Background_Position Hash = 0x18e13 // background-position
Background_Position_X Hash = 0x18e15 // background-position-x
Background_Position_Y Hash = 0x1a315 // background-position-y
Background_Repeat Hash = 0x1b811 // background-repeat
Background_Size Hash = 0x1cb0f // background-size
Behavior Hash = 0x1da08 // behavior
Black Hash = 0x1e205 // black
Blanchedalmond Hash = 0x1e70e // blanchedalmond
Blueviolet Hash = 0x7a70a // blueviolet
Bold Hash = 0x1fc04 // bold
Border Hash = 0x22706 // border
Border_Bottom Hash = 0x2270d // border-bottom
Border_Bottom_Color Hash = 0x22713 // border-bottom-color
Border_Bottom_Style Hash = 0x23a13 // border-bottom-style
Border_Bottom_Width Hash = 0x25d13 // border-bottom-width
Border_Box Hash = 0x27e0a // border-box
Border_Collapse Hash = 0x2b60f // border-collapse
Border_Color Hash = 0x2d30c // border-color
Border_Left Hash = 0x2df0b // border-left
Border_Left_Color Hash = 0x2df11 // border-left-color
Border_Left_Style Hash = 0x2f011 // border-left-style
Border_Left_Width Hash = 0x30111 // border-left-width
Border_Right Hash = 0x3120c // border-right
Border_Right_Color Hash = 0x31212 // border-right-color
Border_Right_Style Hash = 0x32412 // border-right-style
Border_Right_Width Hash = 0x33612 // border-right-width
Border_Spacing Hash = 0x3480e // border-spacing
Border_Style Hash = 0x3ab0c // border-style
Border_Top Hash = 0x3b70a // border-top
Border_Top_Color Hash = 0x3b710 // border-top-color
Border_Top_Style Hash = 0x3c710 // border-top-style
Border_Top_Width Hash = 0x3d710 // border-top-width
Border_Width Hash = 0x3e70c // border-width
Bottom Hash = 0x22e06 // bottom
Box_Shadow Hash = 0x2850a // box-shadow
Burlywood Hash = 0x3f309 // burlywood
Cadetblue Hash = 0x9c609 // cadetblue
Calc Hash = 0x9c304 // calc
Caption_Side Hash = 0x40f0c // caption-side
Caret_Color Hash = 0x4240b // caret-color
Center Hash = 0xdb06 // center
Charset Hash = 0x62f07 // charset
Chartreuse Hash = 0x42f0a // chartreuse
Chocolate Hash = 0x43909 // chocolate
Clamp Hash = 0x44e05 // clamp
Clear Hash = 0x45d05 // clear
Clip Hash = 0xb7904 // clip
Cm Hash = 0x53802 // cm
Color Hash = 0x2505 // color
Column_Count Hash = 0x4620c // column-count
Column_Gap Hash = 0x6a30a // column-gap
Column_Rule Hash = 0x4880b // column-rule
Column_Rule_Color Hash = 0x48811 // column-rule-color
Column_Rule_Style Hash = 0x49911 // column-rule-style
Column_Rule_Width Hash = 0x4aa11 // column-rule-width
Column_Width Hash = 0x4bb0c // column-width
Columns Hash = 0x74607 // columns
Content Hash = 0x5607 // content
Cornflowerblue Hash = 0x4c70e // cornflowerblue
Cornsilk Hash = 0x4d508 // cornsilk
Counter_Increment Hash = 0xd5d11 // counter-increment
Counter_Reset Hash = 0x4690d // counter-reset
Cue Hash = 0x4dd03 // cue
Cue_After Hash = 0x4dd09 // cue-after
Cue_Before Hash = 0x4e60a // cue-before
Currentcolor Hash = 0x5010c // currentcolor
Cursive Hash = 0x50d07 // cursive
Cursor Hash = 0x51406 // cursor
Darkblue Hash = 0x1f408 // darkblue
Darkcyan Hash = 0x1ff08 // darkcyan
Darkgoldenrod Hash = 0x3fb0d // darkgoldenrod
Darkgray Hash = 0x40708 // darkgray
Darkgreen Hash = 0x75c09 // darkgreen
Darkkhaki Hash = 0xa1409 // darkkhaki
Darkmagenta Hash = 0xce90b // darkmagenta
Darkolivegreen Hash = 0x6d90e // darkolivegreen
Darkorange Hash = 0x7500a // darkorange
Darkorchid Hash = 0xa0b0a // darkorchid
Darksalmon Hash = 0xa990a // darksalmon
Darkseagreen Hash = 0xb110c // darkseagreen
Darkslateblue Hash = 0xc1c0d // darkslateblue
Darkslategray Hash = 0xbfa0d // darkslategray
Darkturquoise Hash = 0xcaa0d // darkturquoise
Darkviolet Hash = 0x51a0a // darkviolet
Deeppink Hash = 0x67d08 // deeppink
Deepskyblue Hash = 0x4190b // deepskyblue
Default Hash = 0xa2207 // default
Deg Hash = 0x70103 // deg
Direction Hash = 0x8d909 // direction
Display Hash = 0xcce07 // display
Document Hash = 0x52408 // document
Dodgerblue Hash = 0x52c0a // dodgerblue
Dpcm Hash = 0x53604 // dpcm
Dpi Hash = 0x54f03 // dpi
Dppx Hash = 0x55b04 // dppx
Elevation Hash = 0x6d09 // elevation
Empty_Cells Hash = 0x3910b // empty-cells
Env Hash = 0x4f503 // env
Fantasy Hash = 0x3a407 // fantasy
Fill Hash = 0x67604 // fill
Filter Hash = 0x406 // filter
Firebrick Hash = 0x83509 // firebrick
Flex Hash = 0x55f04 // flex
Flex_Basis Hash = 0x89d0a // flex-basis
Flex_Direction Hash = 0x8d40e // flex-direction
Flex_Flow Hash = 0xc8709 // flex-flow
Flex_Grow Hash = 0x55f09 // flex-grow
Flex_Shrink Hash = 0x5680b // flex-shrink
Flex_Wrap Hash = 0x57309 // flex-wrap
Float Hash = 0x59505 // float
Floralwhite Hash = 0x5bd0b // floralwhite
Font Hash = 0x25404 // font
Font_Face Hash = 0x25409 // font-face
Font_Family Hash = 0x5ee0b // font-family
Font_Size Hash = 0x5f909 // font-size
Font_Size_Adjust Hash = 0x5f910 // font-size-adjust
Font_Stretch Hash = 0x6250c // font-stretch
Font_Style Hash = 0x6360a // font-style
Font_Variant Hash = 0x6400c // font-variant
Font_Weight Hash = 0x65b0b // font-weight
Forestgreen Hash = 0x4ec0b // forestgreen
Fuchsia Hash = 0x66607 // fuchsia
Function Hash = 0x15208 // function
Gainsboro Hash = 0xec09 // gainsboro
Ghostwhite Hash = 0x2990a // ghostwhite
Goldenrod Hash = 0x3ff09 // goldenrod
Grad Hash = 0x1004 // grad
Greenyellow Hash = 0x7600b // greenyellow
Grid Hash = 0x35504 // grid
Grid_Area Hash = 0x35509 // grid-area
Grid_Auto_Columns Hash = 0x7bb11 // grid-auto-columns
Grid_Auto_Flow Hash = 0x81c0e // grid-auto-flow
Grid_Auto_Rows Hash = 0x8640e // grid-auto-rows
Grid_Column Hash = 0x69e0b // grid-column
Grid_Column_End Hash = 0xcdb0f // grid-column-end
Grid_Column_Gap Hash = 0x69e0f // grid-column-gap
Grid_Column_Start Hash = 0x6bd11 // grid-column-start
Grid_Row Hash = 0x6ce08 // grid-row
Grid_Row_End Hash = 0x6ce0c // grid-row-end
Grid_Row_Gap Hash = 0x6e70c // grid-row-gap
Grid_Row_Start Hash = 0x7030e // grid-row-start
Grid_Template Hash = 0x7110d // grid-template
Grid_Template_Areas Hash = 0x71113 // grid-template-areas
Grid_Template_Columns Hash = 0x73815 // grid-template-columns
Grid_Template_Rows Hash = 0x77012 // grid-template-rows
Height Hash = 0x9306 // height
Honeydew Hash = 0x16008 // honeydew
Hsl Hash = 0x26f03 // hsl
Hsla Hash = 0x26f04 // hsla
Hz Hash = 0x68502 // hz
Ime_Mode Hash = 0xa1c08 // ime-mode
Import Hash = 0x78d06 // import
Important Hash = 0x78d09 // important
In Hash = 0x4402 // in
Include_Source Hash = 0x1800e // include-source
Indianred Hash = 0xb0909 // indianred
Inherit Hash = 0x79607 // inherit
Initial Hash = 0x79d07 // initial
Invert Hash = 0x7e406 // invert
Justify_Content Hash = 0x4e0f // justify-content
Justify_Items Hash = 0x6050d // justify-items
Justify_Self Hash = 0x82a0c // justify-self
Keyframes Hash = 0x5cb09 // keyframes
Khz Hash = 0x68403 // khz
Large Hash = 0xa905 // large
Larger Hash = 0xa906 // larger
Lavender Hash = 0x27108 // lavender
Lavenderblush Hash = 0x2710d // lavenderblush
Lawngreen Hash = 0x2ca09 // lawngreen
Layer_Background_Color Hash = 0x21116 // layer-background-color
Layer_Background_Image Hash = 0x5a716 // layer-background-image
Layout_Flow Hash = 0xcf80b // layout-flow
Layout_Grid Hash = 0x8050b // layout-grid
Layout_Grid_Char Hash = 0x80510 // layout-grid-char
Layout_Grid_Char_Spacing Hash = 0x80518 // layout-grid-char-spacing
Layout_Grid_Line Hash = 0x83e10 // layout-grid-line
Layout_Grid_Mode Hash = 0x85410 // layout-grid-mode
Layout_Grid_Type Hash = 0x88710 // layout-grid-type
Left Hash = 0x2e604 // left
Lemonchiffon Hash = 0x24b0c // lemonchiffon
Letter_Spacing Hash = 0x7ae0e // letter-spacing
Lightblue Hash = 0x8ba09 // lightblue
Lightcoral Hash = 0x8c30a // lightcoral
Lightcyan Hash = 0x8e209 // lightcyan
Lightgoldenrodyellow Hash = 0x8fc14 // lightgoldenrodyellow
Lightgray Hash = 0x91009 // lightgray
Lightgreen Hash = 0x9190a // lightgreen
Lightpink Hash = 0x92309 // lightpink
Lightsalmon Hash = 0x92c0b // lightsalmon
Lightseagreen Hash = 0x9370d // lightseagreen
Lightskyblue Hash = 0x9440c // lightskyblue
Lightslateblue Hash = 0x9500e // lightslateblue
Lightsteelblue Hash = 0x95e0e // lightsteelblue
Lightyellow Hash = 0x96c0b // lightyellow
Limegreen Hash = 0x97709 // limegreen
Line_Break Hash = 0x84a0a // line-break
Line_Height Hash = 0x8e0b // line-height
Linear_Gradient Hash = 0x9800f // linear-gradient
List_Style Hash = 0x98f0a // list-style
List_Style_Image Hash = 0x98f10 // list-style-image
List_Style_Position Hash = 0x99f13 // list-style-position
List_Style_Type Hash = 0x9b20f // list-style-type
Local Hash = 0x9c105 // local
Magenta Hash = 0xced07 // magenta
Margin Hash = 0x53906 // margin
Margin_Bottom Hash = 0xdb10d // margin-bottom
Margin_Left Hash = 0xdbd0b // margin-left
Margin_Right Hash = 0xb890c // margin-right
Margin_Top Hash = 0x5390a // margin-top
Marker_Offset Hash = 0xad00d // marker-offset
Marks Hash = 0xaee05 // marks
Mask Hash = 0x9cf04 // mask
Max Hash = 0x9d303 // max
Max_Height Hash = 0x9d30a // max-height
Max_Width Hash = 0x9dd09 // max-width
Media Hash = 0xd4505 // media
Medium Hash = 0x9e606 // medium
Mediumaquamarine Hash = 0x9e610 // mediumaquamarine
Mediumblue Hash = 0x9f60a // mediumblue
Mediumorchid Hash = 0xa000c // mediumorchid
Mediumpurple Hash = 0xa420c // mediumpurple
Mediumseagreen Hash = 0xa4e0e // mediumseagreen
Mediumslateblue Hash = 0xa5c0f // mediumslateblue
Mediumspringgreen Hash = 0xa6b11 // mediumspringgreen
Mediumturquoise Hash = 0xa7c0f // mediumturquoise
Mediumvioletred Hash = 0xa8b0f // mediumvioletred
Midnightblue Hash = 0xaa90c // midnightblue
Min Hash = 0x14d03 // min
Min_Height Hash = 0xab50a // min-height
Min_Width Hash = 0xabf09 // min-width
Mintcream Hash = 0xac809 // mintcream
Mistyrose Hash = 0xae409 // mistyrose
Mm Hash = 0xaed02 // mm
Moccasin Hash = 0xb0308 // moccasin
Monospace Hash = 0xaa009 // monospace
Ms Hash = 0x102 // ms
Namespace Hash = 0xd409 // namespace
Navajowhite Hash = 0x750b // navajowhite
No_Repeat Hash = 0xbf09 // no-repeat
None Hash = 0x38e04 // none
Normal Hash = 0x36e06 // normal
Offset Hash = 0xad706 // offset
Offset_Anchor Hash = 0xad70d // offset-anchor
Offset_Distance Hash = 0xb1d0f // offset-distance
Offset_Path Hash = 0xb2c0b // offset-path
Offset_Position Hash = 0xb370f // offset-position
Offset_Rotate Hash = 0xb460d // offset-rotate
Olivedrab Hash = 0xb6609 // olivedrab
Orangered Hash = 0x75409 // orangered
Order Hash = 0x22805 // order
Orphans Hash = 0x37f07 // orphans
Outline Hash = 0xba707 // outline
Outline_Color Hash = 0xba70d // outline-color
Outline_Style Hash = 0xbb40d // outline-style
Outline_Width Hash = 0xbc10d // outline-width
Overflow Hash = 0x9d08 // overflow
Overflow_X Hash = 0x9d0a // overflow-x
Overflow_Y Hash = 0xbce0a // overflow-y
Padding Hash = 0x45207 // padding
Padding_Bottom Hash = 0xb7c0e // padding-bottom
Padding_Box Hash = 0x4520b // padding-box
Padding_Left Hash = 0xd0a0c // padding-left
Padding_Right Hash = 0x5420d // padding-right
Padding_Top Hash = 0x57b0b // padding-top
Page Hash = 0x58504 // page
Page_Break_After Hash = 0x58510 // page-break-after
Page_Break_Before Hash = 0x6ac11 // page-break-before
Page_Break_Inside Hash = 0x6f211 // page-break-inside
Palegoldenrod Hash = 0xc100d // palegoldenrod
Palegreen Hash = 0xbd809 // palegreen
Paleturquoise Hash = 0xbe10d // paleturquoise
Palevioletred Hash = 0xbee0d // palevioletred
Papayawhip Hash = 0xc070a // papayawhip
Pause Hash = 0xc2905 // pause
Pause_After Hash = 0xc290b // pause-after
Pause_Before Hash = 0xc340c // pause-before
Pc Hash = 0x53702 // pc
Peachpuff Hash = 0x89509 // peachpuff
Pitch Hash = 0x55005 // pitch
Pitch_Range Hash = 0x5500b // pitch-range
Place_Content Hash = 0xc400d // place-content
Place_Items Hash = 0xc4d0b // place-items
Place_Self Hash = 0xc7e0a // place-self
Play_During Hash = 0xcd10b // play-during
Position Hash = 0x13908 // position
Powderblue Hash = 0xc9b0a // powderblue
Progid Hash = 0xca506 // progid
Pt Hash = 0x39302 // pt
Px Hash = 0x55d02 // px
Q Hash = 0x64d01 // q
Quotes Hash = 0xcb706 // quotes
Rad Hash = 0x903 // rad
Radial_Gradient Hash = 0x90f // radial-gradient
Repeat Hash = 0xc206 // repeat
Repeat_X Hash = 0x1c308 // repeat-x
Repeat_Y Hash = 0xc208 // repeat-y
Rgb Hash = 0x2903 // rgb
Rgba Hash = 0x2904 // rgba
Richness Hash = 0xae08 // richness
Right Hash = 0x31905 // right
Rosybrown Hash = 0xf309 // rosybrown
Round Hash = 0x3005 // round
Row_Gap Hash = 0x6ec07 // row-gap
Royalblue Hash = 0x69509 // royalblue
Ruby_Align Hash = 0xd930a // ruby-align
Ruby_Overhang Hash = 0xe00d // ruby-overhang
Ruby_Position Hash = 0x1340d // ruby-position
S Hash = 0x201 // s
Saddlebrown Hash = 0xb50b // saddlebrown
Sandybrown Hash = 0x3850a // sandybrown
Sans_Serif Hash = 0x39b0a // sans-serif
Scroll Hash = 0x12006 // scroll
Scrollbar_3d_Light_Color Hash = 0xd7c18 // scrollbar-3d-light-color
Scrollbar_Arrow_Color Hash = 0x12015 // scrollbar-arrow-color
Scrollbar_Base_Color Hash = 0x8a614 // scrollbar-base-color
Scrollbar_Dark_Shadow_Color Hash = 0x5d31b // scrollbar-dark-shadow-color
Scrollbar_Face_Color Hash = 0x61114 // scrollbar-face-color
Scrollbar_Highlight_Color Hash = 0x7cb19 // scrollbar-highlight-color
Scrollbar_Shadow_Color Hash = 0x87116 // scrollbar-shadow-color
Scrollbar_Track_Color Hash = 0x72315 // scrollbar-track-color
Seagreen Hash = 0x93c08 // seagreen
Seashell Hash = 0x2c308 // seashell
Serif Hash = 0x3a005 // serif
Size Hash = 0x1d604 // size
Slateblue Hash = 0x95509 // slateblue
Slategray Hash = 0xbfe09 // slategray
Small Hash = 0x68f05 // small
Smaller Hash = 0x68f07 // smaller
Solid Hash = 0x74c05 // solid
Space Hash = 0x6905 // space
Speak Hash = 0x78105 // speak
Speak_Header Hash = 0x7810c // speak-header
Speak_Numeral Hash = 0x7f90d // speak-numeral
Speak_Punctuation Hash = 0xaf211 // speak-punctuation
Speech_Rate Hash = 0xc570b // speech-rate
Springgreen Hash = 0xa710b // springgreen
Steelblue Hash = 0x96309 // steelblue
Stress Hash = 0x11b06 // stress
Stroke Hash = 0xc7806 // stroke
Supports Hash = 0xcbc08 // supports
Table_Layout Hash = 0xcf20c // table-layout
Text_Align Hash = 0x10e0a // text-align
Text_Align_Last Hash = 0x10e0f // text-align-last
Text_Autospace Hash = 0x4400e // text-autospace
Text_Decoration Hash = 0x7e0f // text-decoration
Text_Decoration_Color Hash = 0x2a115 // text-decoration-color
Text_Decoration_Line Hash = 0x7e14 // text-decoration-line
Text_Decoration_Style Hash = 0xb5115 // text-decoration-style
Text_Decoration_Thickness Hash = 0xc6019 // text-decoration-thickness
Text_Emphasis Hash = 0x170d // text-emphasis
Text_Emphasis_Color Hash = 0x1713 // text-emphasis-color
Text_Indent Hash = 0x3f0b // text-indent
Text_Justify Hash = 0x490c // text-justify
Text_Kashida_Space Hash = 0x5c12 // text-kashida-space
Text_Overflow Hash = 0x980d // text-overflow
Text_Shadow Hash = 0xd6d0b // text-shadow
Text_Transform Hash = 0xda40e // text-transform
Text_Underline_Position Hash = 0xdc717 // text-underline-position
Top Hash = 0x3be03 // top
Transition Hash = 0x4750a // transition
Transition_Delay Hash = 0x59a10 // transition-delay
Transition_Duration Hash = 0xb9413 // transition-duration
Transition_Property Hash = 0x47513 // transition-property
Transition_Timing_Function Hash = 0xa281a // transition-timing-function
Transparent Hash = 0xd150b // transparent
Turn Hash = 0xd1f04 // turn
Turquoise Hash = 0xa8209 // turquoise
Unicode_Bidi Hash = 0xcc40c // unicode-bidi
Unicode_Range Hash = 0xd230d // unicode-range
Unset Hash = 0xd3005 // unset
Url Hash = 0x3f403 // url
Var Hash = 0x64503 // var
Vertical_Align Hash = 0x7e60e // vertical-align
Visibility Hash = 0x4f70a // visibility
Voice_Family Hash = 0xd350c // voice-family
Volume Hash = 0xd4106 // volume
White Hash = 0x7b05 // white
White_Space Hash = 0x6500b // white-space
Whitesmoke Hash = 0x5c30a // whitesmoke
Widows Hash = 0xd7706 // widows
Width Hash = 0x26b05 // width
Word_Break Hash = 0x1670a // word-break
Word_Spacing Hash = 0x28e0c // word-spacing
Word_Wrap Hash = 0xd0209 // word-wrap
Writing_Mode Hash = 0xc8f0c // writing-mode
X_Large Hash = 0xa707 // x-large
X_Small Hash = 0x68d07 // x-small
Xx_Large Hash = 0xa608 // xx-large
Xx_Small Hash = 0x68c08 // xx-small
Yellow Hash = 0x76506 // yellow
Yellowgreen Hash = 0x7650b // yellowgreen
Z_Index Hash = 0x68607 // z-index
)
//var HashMap = map[string]Hash{
// "-ms-filter": Ms_Filter,
// "accelerator": Accelerator,
// "aliceblue": Aliceblue,
// "align-content": Align_Content,
// "align-items": Align_Items,
// "align-self": Align_Self,
// "all": All,
// "alpha": Alpha,
// "animation": Animation,
// "animation-delay": Animation_Delay,
// "animation-direction": Animation_Direction,
// "animation-duration": Animation_Duration,
// "animation-fill-mode": Animation_Fill_Mode,
// "animation-iteration-count": Animation_Iteration_Count,
// "animation-name": Animation_Name,
// "animation-play-state": Animation_Play_State,
// "animation-timing-function": Animation_Timing_Function,
// "antiquewhite": Antiquewhite,
// "aquamarine": Aquamarine,
// "attr": Attr,
// "auto": Auto,
// "azimuth": Azimuth,
// "background": Background,
// "background-attachment": Background_Attachment,
// "background-clip": Background_Clip,
// "background-color": Background_Color,
// "background-image": Background_Image,
// "background-origin": Background_Origin,
// "background-position": Background_Position,
// "background-position-x": Background_Position_X,
// "background-position-y": Background_Position_Y,
// "background-repeat": Background_Repeat,
// "background-size": Background_Size,
// "behavior": Behavior,
// "black": Black,
// "blanchedalmond": Blanchedalmond,
// "blueviolet": Blueviolet,
// "bold": Bold,
// "border": Border,
// "border-bottom": Border_Bottom,
// "border-bottom-color": Border_Bottom_Color,
// "border-bottom-style": Border_Bottom_Style,
// "border-bottom-width": Border_Bottom_Width,
// "border-box": Border_Box,
// "border-collapse": Border_Collapse,
// "border-color": Border_Color,
// "border-left": Border_Left,
// "border-left-color": Border_Left_Color,
// "border-left-style": Border_Left_Style,
// "border-left-width": Border_Left_Width,
// "border-right": Border_Right,
// "border-right-color": Border_Right_Color,
// "border-right-style": Border_Right_Style,
// "border-right-width": Border_Right_Width,
// "border-spacing": Border_Spacing,
// "border-style": Border_Style,
// "border-top": Border_Top,
// "border-top-color": Border_Top_Color,
// "border-top-style": Border_Top_Style,
// "border-top-width": Border_Top_Width,
// "border-width": Border_Width,
// "bottom": Bottom,
// "box-shadow": Box_Shadow,
// "burlywood": Burlywood,
// "cadetblue": Cadetblue,
// "calc": Calc,
// "caption-side": Caption_Side,
// "caret-color": Caret_Color,
// "center": Center,
// "charset": Charset,
// "chartreuse": Chartreuse,
// "chocolate": Chocolate,
// "clamp": Clamp,
// "clear": Clear,
// "clip": Clip,
// "cm": Cm,
// "color": Color,
// "column-count": Column_Count,
// "column-gap": Column_Gap,
// "column-rule": Column_Rule,
// "column-rule-color": Column_Rule_Color,
// "column-rule-style": Column_Rule_Style,
// "column-rule-width": Column_Rule_Width,
// "column-width": Column_Width,
// "columns": Columns,
// "content": Content,
// "cornflowerblue": Cornflowerblue,
// "cornsilk": Cornsilk,
// "counter-increment": Counter_Increment,
// "counter-reset": Counter_Reset,
// "cue": Cue,
// "cue-after": Cue_After,
// "cue-before": Cue_Before,
// "currentcolor": Currentcolor,
// "cursive": Cursive,
// "cursor": Cursor,
// "darkblue": Darkblue,
// "darkcyan": Darkcyan,
// "darkgoldenrod": Darkgoldenrod,
// "darkgray": Darkgray,
// "darkgreen": Darkgreen,
// "darkkhaki": Darkkhaki,
// "darkmagenta": Darkmagenta,
// "darkolivegreen": Darkolivegreen,
// "darkorange": Darkorange,
// "darkorchid": Darkorchid,
// "darksalmon": Darksalmon,
// "darkseagreen": Darkseagreen,
// "darkslateblue": Darkslateblue,
// "darkslategray": Darkslategray,
// "darkturquoise": Darkturquoise,
// "darkviolet": Darkviolet,
// "deeppink": Deeppink,
// "deepskyblue": Deepskyblue,
// "default": Default,
// "deg": Deg,
// "direction": Direction,
// "display": Display,
// "document": Document,
// "dodgerblue": Dodgerblue,
// "dpcm": Dpcm,
// "dpi": Dpi,
// "dppx": Dppx,
// "elevation": Elevation,
// "empty-cells": Empty_Cells,
// "env": Env,
// "fantasy": Fantasy,
// "fill": Fill,
// "filter": Filter,
// "firebrick": Firebrick,
// "flex": Flex,
// "flex-basis": Flex_Basis,
// "flex-direction": Flex_Direction,
// "flex-flow": Flex_Flow,
// "flex-grow": Flex_Grow,
// "flex-shrink": Flex_Shrink,
// "flex-wrap": Flex_Wrap,
// "float": Float,
// "floralwhite": Floralwhite,
// "font": Font,
// "font-face": Font_Face,
// "font-family": Font_Family,
// "font-size": Font_Size,
// "font-size-adjust": Font_Size_Adjust,
// "font-stretch": Font_Stretch,
// "font-style": Font_Style,
// "font-variant": Font_Variant,
// "font-weight": Font_Weight,
// "forestgreen": Forestgreen,
// "fuchsia": Fuchsia,
// "function": Function,
// "gainsboro": Gainsboro,
// "ghostwhite": Ghostwhite,
// "goldenrod": Goldenrod,
// "grad": Grad,
// "greenyellow": Greenyellow,
// "grid": Grid,
// "grid-area": Grid_Area,
// "grid-auto-columns": Grid_Auto_Columns,
// "grid-auto-flow": Grid_Auto_Flow,
// "grid-auto-rows": Grid_Auto_Rows,
// "grid-column": Grid_Column,
// "grid-column-end": Grid_Column_End,
// "grid-column-gap": Grid_Column_Gap,
// "grid-column-start": Grid_Column_Start,
// "grid-row": Grid_Row,
// "grid-row-end": Grid_Row_End,
// "grid-row-gap": Grid_Row_Gap,
// "grid-row-start": Grid_Row_Start,
// "grid-template": Grid_Template,
// "grid-template-areas": Grid_Template_Areas,
// "grid-template-columns": Grid_Template_Columns,
// "grid-template-rows": Grid_Template_Rows,
// "height": Height,
// "honeydew": Honeydew,
// "hsl": Hsl,
// "hsla": Hsla,
// "hz": Hz,
// "ime-mode": Ime_Mode,
// "import": Import,
// "important": Important,
// "in": In,
// "include-source": Include_Source,
// "indianred": Indianred,
// "inherit": Inherit,
// "initial": Initial,
// "invert": Invert,
// "justify-content": Justify_Content,
// "justify-items": Justify_Items,
// "justify-self": Justify_Self,
// "keyframes": Keyframes,
// "khz": Khz,
// "large": Large,
// "larger": Larger,
// "lavender": Lavender,
// "lavenderblush": Lavenderblush,
// "lawngreen": Lawngreen,
// "layer-background-color": Layer_Background_Color,
// "layer-background-image": Layer_Background_Image,
// "layout-flow": Layout_Flow,
// "layout-grid": Layout_Grid,
// "layout-grid-char": Layout_Grid_Char,
// "layout-grid-char-spacing": Layout_Grid_Char_Spacing,
// "layout-grid-line": Layout_Grid_Line,
// "layout-grid-mode": Layout_Grid_Mode,
// "layout-grid-type": Layout_Grid_Type,
// "left": Left,
// "lemonchiffon": Lemonchiffon,
// "letter-spacing": Letter_Spacing,
// "lightblue": Lightblue,
// "lightcoral": Lightcoral,
// "lightcyan": Lightcyan,
// "lightgoldenrodyellow": Lightgoldenrodyellow,
// "lightgray": Lightgray,
// "lightgreen": Lightgreen,
// "lightpink": Lightpink,
// "lightsalmon": Lightsalmon,
// "lightseagreen": Lightseagreen,
// "lightskyblue": Lightskyblue,
// "lightslateblue": Lightslateblue,
// "lightsteelblue": Lightsteelblue,
// "lightyellow": Lightyellow,
// "limegreen": Limegreen,
// "line-break": Line_Break,
// "line-height": Line_Height,
// "linear-gradient": Linear_Gradient,
// "list-style": List_Style,
// "list-style-image": List_Style_Image,
// "list-style-position": List_Style_Position,
// "list-style-type": List_Style_Type,
// "local": Local,
// "magenta": Magenta,
// "margin": Margin,
// "margin-bottom": Margin_Bottom,
// "margin-left": Margin_Left,
// "margin-right": Margin_Right,
// "margin-top": Margin_Top,
// "marker-offset": Marker_Offset,
// "marks": Marks,
// "mask": Mask,
// "max": Max,
// "max-height": Max_Height,
// "max-width": Max_Width,
// "media": Media,
// "medium": Medium,
// "mediumaquamarine": Mediumaquamarine,
// "mediumblue": Mediumblue,
// "mediumorchid": Mediumorchid,
// "mediumpurple": Mediumpurple,
// "mediumseagreen": Mediumseagreen,
// "mediumslateblue": Mediumslateblue,
// "mediumspringgreen": Mediumspringgreen,
// "mediumturquoise": Mediumturquoise,
// "mediumvioletred": Mediumvioletred,
// "midnightblue": Midnightblue,
// "min": Min,
// "min-height": Min_Height,
// "min-width": Min_Width,
// "mintcream": Mintcream,
// "mistyrose": Mistyrose,
// "mm": Mm,
// "moccasin": Moccasin,
// "monospace": Monospace,
// "ms": Ms,
// "namespace": Namespace,
// "navajowhite": Navajowhite,
// "no-repeat": No_Repeat,
// "none": None,
// "normal": Normal,
// "offset": Offset,
// "offset-anchor": Offset_Anchor,
// "offset-distance": Offset_Distance,
// "offset-path": Offset_Path,
// "offset-position": Offset_Position,
// "offset-rotate": Offset_Rotate,
// "olivedrab": Olivedrab,
// "orangered": Orangered,
// "order": Order,
// "orphans": Orphans,
// "outline": Outline,
// "outline-color": Outline_Color,
// "outline-style": Outline_Style,
// "outline-width": Outline_Width,
// "overflow": Overflow,
// "overflow-x": Overflow_X,
// "overflow-y": Overflow_Y,
// "padding": Padding,
// "padding-bottom": Padding_Bottom,
// "padding-box": Padding_Box,
// "padding-left": Padding_Left,
// "padding-right": Padding_Right,
// "padding-top": Padding_Top,
// "page": Page,
// "page-break-after": Page_Break_After,
// "page-break-before": Page_Break_Before,
// "page-break-inside": Page_Break_Inside,
// "palegoldenrod": Palegoldenrod,
// "palegreen": Palegreen,
// "paleturquoise": Paleturquoise,
// "palevioletred": Palevioletred,
// "papayawhip": Papayawhip,
// "pause": Pause,
// "pause-after": Pause_After,
// "pause-before": Pause_Before,
// "pc": Pc,
// "peachpuff": Peachpuff,
// "pitch": Pitch,
// "pitch-range": Pitch_Range,
// "place-content": Place_Content,
// "place-items": Place_Items,
// "place-self": Place_Self,
// "play-during": Play_During,
// "position": Position,
// "powderblue": Powderblue,
// "progid": Progid,
// "pt": Pt,
// "px": Px,
// "q": Q,
// "quotes": Quotes,
// "rad": Rad,
// "radial-gradient": Radial_Gradient,
// "repeat": Repeat,
// "repeat-x": Repeat_X,
// "repeat-y": Repeat_Y,
// "rgb": Rgb,
// "rgba": Rgba,
// "richness": Richness,
// "right": Right,
// "rosybrown": Rosybrown,
// "round": Round,
// "row-gap": Row_Gap,
// "royalblue": Royalblue,
// "ruby-align": Ruby_Align,
// "ruby-overhang": Ruby_Overhang,
// "ruby-position": Ruby_Position,
// "s": S,
// "saddlebrown": Saddlebrown,
// "sandybrown": Sandybrown,
// "sans-serif": Sans_Serif,
// "scroll": Scroll,
// "scrollbar-3d-light-color": Scrollbar_3d_Light_Color,
// "scrollbar-arrow-color": Scrollbar_Arrow_Color,
// "scrollbar-base-color": Scrollbar_Base_Color,
// "scrollbar-dark-shadow-color": Scrollbar_Dark_Shadow_Color,
// "scrollbar-face-color": Scrollbar_Face_Color,
// "scrollbar-highlight-color": Scrollbar_Highlight_Color,
// "scrollbar-shadow-color": Scrollbar_Shadow_Color,
// "scrollbar-track-color": Scrollbar_Track_Color,
// "seagreen": Seagreen,
// "seashell": Seashell,
// "serif": Serif,
// "size": Size,
// "slateblue": Slateblue,
// "slategray": Slategray,
// "small": Small,
// "smaller": Smaller,
// "solid": Solid,
// "space": Space,
// "speak": Speak,
// "speak-header": Speak_Header,
// "speak-numeral": Speak_Numeral,
// "speak-punctuation": Speak_Punctuation,
// "speech-rate": Speech_Rate,
// "springgreen": Springgreen,
// "steelblue": Steelblue,
// "stress": Stress,
// "stroke": Stroke,
// "supports": Supports,
// "table-layout": Table_Layout,
// "text-align": Text_Align,
// "text-align-last": Text_Align_Last,
// "text-autospace": Text_Autospace,
// "text-decoration": Text_Decoration,
// "text-decoration-color": Text_Decoration_Color,
// "text-decoration-line": Text_Decoration_Line,
// "text-decoration-style": Text_Decoration_Style,
// "text-decoration-thickness": Text_Decoration_Thickness,
// "text-emphasis": Text_Emphasis,
// "text-emphasis-color": Text_Emphasis_Color,
// "text-indent": Text_Indent,
// "text-justify": Text_Justify,
// "text-kashida-space": Text_Kashida_Space,
// "text-overflow": Text_Overflow,
// "text-shadow": Text_Shadow,
// "text-transform": Text_Transform,
// "text-underline-position": Text_Underline_Position,
// "top": Top,
// "transition": Transition,
// "transition-delay": Transition_Delay,
// "transition-duration": Transition_Duration,
// "transition-property": Transition_Property,
// "transition-timing-function": Transition_Timing_Function,
// "transparent": Transparent,
// "turn": Turn,
// "turquoise": Turquoise,
// "unicode-bidi": Unicode_Bidi,
// "unicode-range": UnicodeRange,
// "unset": Unset,
// "url": Url,
// "var": Var,
// "vertical-align": Vertical_Align,
// "visibility": Visibility,
// "voice-family": Voice_Family,
// "volume": Volume,
// "white": White,
// "white-space": White_Space,
// "whitesmoke": Whitesmoke,
// "widows": Widows,
// "width": Width,
// "word-break": Word_Break,
// "word-spacing": Word_Spacing,
// "word-wrap": Word_Wrap,
// "writing-mode": Writing_Mode,
// "x-large": X_Large,
// "x-small": X_Small,
// "xx-large": Xx_Large,
// "xx-small": Xx_Small,
// "yellow": Yellow,
// "yellowgreen": Yellowgreen,
// "z-index": Z_Index,
//}
// String returns the text associated with the hash.
func (i Hash) String() string {
return string(i.Bytes())
}
// Bytes returns the text associated with the hash.
func (i Hash) Bytes() []byte {
start := uint32(i >> 8)
n := uint32(i & 0xff)
if start+n > uint32(len(_Hash_text)) {
return []byte{}
}
return _Hash_text[start : start+n]
}
// ToHash returns a hash Hash for a given []byte. Hash is a uint32 that is associated with the text in []byte. It returns zero if no match found.
func ToHash(s []byte) Hash {
if len(s) == 0 || len(s) > _Hash_maxLen {
return 0
}
//if 3 < len(s) {
// return HashMap[string(s)]
//}
h := uint32(_Hash_hash0)
for i := 0; i < len(s); i++ {
h ^= uint32(s[i])
h *= 16777619
}
if i := _Hash_table[h&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
t := _Hash_text[i>>8 : i>>8+i&0xff]
for i := 0; i < len(s); i++ {
if t[i] != s[i] {
goto NEXT
}
}
return i
}
NEXT:
if i := _Hash_table[(h>>16)&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
t := _Hash_text[i>>8 : i>>8+i&0xff]
for i := 0; i < len(s); i++ {
if t[i] != s[i] {
return 0
}
}
return i
}
return 0
}
const _Hash_hash0 = 0x9acb0442
const _Hash_maxLen = 27
var _Hash_text = []byte("" +
"-ms-filteradial-gradientext-emphasis-colorgbackground-attach" +
"mentext-indentext-justify-contentext-kashida-spacelevationav" +
"ajowhitext-decoration-line-heightext-overflow-xx-largerichne" +
"ssaddlebrowno-repeat-yanimation-namespacenteruby-overhangain" +
"sborosybrownanimation-play-statext-align-lastresscrollbar-ar" +
"row-coloruby-positionanimation-timing-functionazimuthoneydew" +
"ord-breakbackground-originclude-sourcebackground-position-xb" +
"ackground-position-ybackground-repeat-xbackground-sizebehavi" +
"orblackblanchedalmondarkblueboldarkcyanimation-delayer-backg" +
"round-colorborder-bottom-colorborder-bottom-stylemonchiffont" +
"-faceborder-bottom-widthslavenderblushborder-box-shadoword-s" +
"pacinghostwhitext-decoration-colorborder-collapseashellawngr" +
"eenborder-colorborder-left-colorborder-left-styleborder-left" +
"-widthborder-right-colorborder-right-styleborder-right-width" +
"border-spacingrid-areanimation-durationormalphacceleratorpha" +
"nsandybrownonempty-cellsans-serifantasyborder-styleborder-to" +
"p-colorborder-top-styleborder-top-widthborder-widthburlywood" +
"arkgoldenrodarkgraycaption-sideepskybluecaret-colorchartreus" +
"echocolatext-autospaceclampadding-boxclearcolumn-counter-res" +
"etransition-propertycolumn-rule-colorcolumn-rule-stylecolumn" +
"-rule-widthcolumn-widthcornflowerbluecornsilkcue-aftercue-be" +
"forestgreenvisibilitycurrentcolorcursivecursordarkvioletdocu" +
"mentdodgerbluedpcmargin-topadding-rightdpitch-rangedppxflex-" +
"growflex-shrinkflex-wrapadding-topage-break-afterfloattransi" +
"tion-delayer-background-imagefloralwhitesmokeyframescrollbar" +
"-dark-shadow-colorfont-familyfont-size-adjustify-itemscrollb" +
"ar-face-colorfont-stretcharsetfont-stylefont-variantiquewhit" +
"e-spacefont-weightfuchsianimation-fill-modeeppinkhz-indexx-s" +
"malleroyalbluegrid-column-gapage-break-beforegrid-column-sta" +
"rtgrid-row-endarkolivegreengrid-row-gapage-break-insidegrid-" +
"row-startgrid-template-areascrollbar-track-colorgrid-templat" +
"e-columnsolidarkorangeredarkgreenyellowgreengrid-template-ro" +
"wspeak-headerimportantinheritinitialicebluevioletter-spacing" +
"rid-auto-columnscrollbar-highlight-colorinvertical-align-ite" +
"mspeak-numeralayout-grid-char-spacingrid-auto-flowjustify-se" +
"lfirebricklayout-grid-line-breaklayout-grid-modegrid-auto-ro" +
"wscrollbar-shadow-colorlayout-grid-typeachpufflex-basiscroll" +
"bar-base-colorlightbluelightcoralign-selflex-directionlightc" +
"yanimation-directionlightgoldenrodyellowlightgraylightgreenl" +
"ightpinklightsalmonlightseagreenlightskybluelightslateblueli" +
"ghtsteelbluelightyellowlimegreenlinear-gradientlist-style-im" +
"agelist-style-positionlist-style-typelocalcadetbluemaskmax-h" +
"eightmax-widthmediumaquamarinemediumbluemediumorchidarkorchi" +
"darkkhakime-modefaultransition-timing-functionmediumpurpleme" +
"diumseagreenmediumslatebluemediumspringgreenmediumturquoisem" +
"ediumvioletredarksalmonospacemidnightbluemin-heightmin-width" +
"mintcreamarker-offset-anchormistyrosemmarkspeak-punctuationm" +
"occasindianredarkseagreenoffset-distanceoffset-pathoffset-po" +
"sitionoffset-rotatext-decoration-styleolivedrabackground-cli" +
"padding-bottomargin-rightransition-durationoutline-coloroutl" +
"ine-styleoutline-widthoverflow-ypalegreenpaleturquoisepalevi" +
"oletredarkslategraypapayawhipalegoldenrodarkslatebluepause-a" +
"fterpause-beforeplace-contentplace-itemspeech-ratext-decorat" +
"ion-thicknesstrokeplace-selflex-flowriting-modepowderbluepro" +
"gidarkturquoisequotesupportsunicode-bidisplay-duringrid-colu" +
"mn-endarkmagentable-layout-floword-wrapadding-leftransparent" +
"urnunicode-rangeunsetvoice-familyvolumedianimation-iteration" +
"-counter-incrementext-shadowidowscrollbar-3d-light-coloruby-" +
"align-contentext-transformargin-bottomargin-leftext-underlin" +
"e-position")
var _Hash_table = [1 << 10]Hash{
0x3: 0xc290b, // pause-after
0x6: 0xd5d11, // counter-increment
0x8: 0xcce07, // display
0x9: 0x51a0a, // darkviolet
0xb: 0xbf09, // no-repeat
0xd: 0x4402, // in
0x14: 0x6f211, // page-break-inside
0x15: 0x6250c, // font-stretch
0x19: 0x5f910, // font-size-adjust
0x1a: 0x47513, // transition-property
0x1c: 0x78105, // speak
0x1f: 0x82a0c, // justify-self
0x20: 0x61114, // scrollbar-face-color
0x24: 0x2b60f, // border-collapse
0x25: 0x68607, // z-index
0x27: 0xd980d, // align-content
0x2a: 0x99f13, // list-style-position
0x2b: 0xcdb0f, // grid-column-end
0x2c: 0x14119, // animation-timing-function
0x30: 0xb0909, // indianred
0x34: 0x97709, // limegreen
0x35: 0xbc10d, // outline-width
0x3f: 0x15a07, // azimuth
0x40: 0x1e70e, // blanchedalmond
0x41: 0x84a0a, // line-break
0x42: 0x7a209, // aliceblue
0x43: 0xf309, // rosybrown
0x46: 0xa7c0f, // mediumturquoise
0x49: 0xd7706, // widows
0x4b: 0xb370f, // offset-position
0x4d: 0xd150b, // transparent
0x4e: 0x79d07, // initial
0x52: 0x1cb0f, // background-size
0x55: 0x2505, // color
0x56: 0x59a10, // transition-delay
0x5a: 0x750b, // navajowhite
0x5b: 0x7110d, // grid-template
0x5c: 0x3b710, // border-top-color
0x62: 0xbce0a, // overflow-y
0x64: 0x9370d, // lightseagreen
0x6c: 0x10e0f, // text-align-last
0x6f: 0x8050b, // layout-grid
0x70: 0xca09, // animation
0x71: 0x1da08, // behavior
0x72: 0x5390a, // margin-top
0x74: 0x3ab0c, // border-style
0x78: 0x5d31b, // scrollbar-dark-shadow-color
0x79: 0x69103, // all
0x7a: 0x3f0b, // text-indent
0x7b: 0xbe10d, // paleturquoise
0x7e: 0x58510, // page-break-after
0x80: 0x5420d, // padding-right
0x84: 0x7e60e, // vertical-align
0x85: 0x50d07, // cursive
0x8a: 0x7030e, // grid-row-start
0x8c: 0xae08, // richness
0x8e: 0x3b70a, // border-top
0x94: 0x35509, // grid-area
0x95: 0x85410, // layout-grid-mode
0x96: 0xaee05, // marks
0x97: 0x64d01, // q
0x98: 0x78d09, // important
0x9c: 0x406, // filter
0x9d: 0xa8b0f, // mediumvioletred
0xa5: 0xc570b, // speech-rate
0xa8: 0x53702, // pc
0xab: 0x90f, // radial-gradient
0xae: 0x11b06, // stress
0xb4: 0x6050d, // justify-items
0xb7: 0x9500e, // lightslateblue
0xba: 0x35504, // grid
0xbb: 0xb0308, // moccasin
0xbe: 0xd0209, // word-wrap
0xc0: 0x6d90e, // darkolivegreen
0xc5: 0xc6019, // text-decoration-thickness
0xc7: 0xdb06, // center
0xc8: 0x2a115, // text-decoration-color
0xcb: 0xabf09, // min-width
0xce: 0x5ee0b, // font-family
0xd1: 0xa1c08, // ime-mode
0xd3: 0x3d710, // border-top-width
0xd4: 0x53906, // margin
0xd9: 0x4880b, // column-rule
0xda: 0x98f0a, // list-style
0xdf: 0x6ce0c, // grid-row-end
0xe4: 0x2050f, // animation-delay
0xe8: 0x4aa11, // column-rule-width
0xec: 0x57309, // flex-wrap
0xed: 0xced07, // magenta
0xee: 0x88710, // layout-grid-type
0xef: 0x4520b, // padding-box
0xf0: 0x7e14, // text-decoration-line
0xf2: 0x4dd09, // cue-after
0xf4: 0x8640e, // grid-auto-rows
0xf5: 0x7650b, // yellowgreen
0xf8: 0x89509, // peachpuff
0xf9: 0x74607, // columns
0xfa: 0x22805, // order
0xfb: 0x3120c, // border-right
0x100: 0x1800e, // include-source
0x104: 0xc2905, // pause
0x105: 0x1fc04, // bold
0x106: 0xcc40c, // unicode-bidi
0x108: 0x67604, // fill
0x109: 0x75c09, // darkgreen
0x10b: 0x45d05, // clear
0x10c: 0x67d08, // deeppink
0x110: 0x8e913, // animation-direction
0x112: 0x1b811, // background-repeat
0x117: 0xca506, // progid
0x11d: 0x8a614, // scrollbar-base-color
0x11e: 0xa, // -ms-filter
0x11f: 0x2ca09, // lawngreen
0x120: 0x51406, // cursor
0x121: 0x44e05, // clamp
0x123: 0x48811, // column-rule-color
0x128: 0x40f0c, // caption-side
0x12a: 0xc9b0a, // powderblue
0x12b: 0xdc717, // text-underline-position
0x12d: 0x72315, // scrollbar-track-color
0x131: 0x81c0e, // grid-auto-flow
0x132: 0x7810c, // speak-header
0x133: 0x25409, // font-face
0x136: 0xa710b, // springgreen
0x13a: 0xc7e0a, // place-self
0x13d: 0xc206, // repeat
0x13e: 0x9800f, // linear-gradient
0x142: 0x5010c, // currentcolor
0x145: 0xad706, // offset
0x14a: 0x69e0f, // grid-column-gap
0x14c: 0x6905, // space
0x14e: 0x39b0a, // sans-serif
0x14f: 0x6360a, // font-style
0x153: 0x66607, // fuchsia
0x154: 0xb7904, // clip
0x155: 0xae409, // mistyrose
0x158: 0x9d08, // overflow
0x15d: 0xc7806, // stroke
0x162: 0x80510, // layout-grid-char
0x163: 0xa420c, // mediumpurple
0x165: 0x4f503, // env
0x168: 0x4690d, // counter-reset
0x16b: 0x5cb09, // keyframes
0x16f: 0x7b05, // white
0x172: 0x1004, // grad
0x174: 0xdb10d, // margin-bottom
0x175: 0x31212, // border-right-color
0x177: 0x25404, // font
0x178: 0xc100d, // palegoldenrod
0x179: 0x73815, // grid-template-columns
0x17a: 0x7e0f, // text-decoration
0x17e: 0x89d0a, // flex-basis
0x186: 0x7ef0b, // align-items
0x189: 0x4bb0c, // column-width
0x18a: 0x3c710, // border-top-style
0x18b: 0x1d604, // size
0x18c: 0xd4505, // media
0x191: 0xb7c0e, // padding-bottom
0x194: 0x2df11, // border-left-color
0x195: 0x7a70a, // blueviolet
0x198: 0x92c0b, // lightsalmon
0x19d: 0x27108, // lavender
0x19e: 0x5a716, // layer-background-image
0x1a0: 0x6500b, // white-space
0x1a3: 0xe00d, // ruby-overhang
0x1a4: 0x24b0c, // lemonchiffon
0x1a5: 0x3be03, // top
0x1a9: 0x2c308, // seashell
0x1aa: 0x7ae0e, // letter-spacing
0x1ac: 0x2b0a, // background
0x1af: 0x64503, // var
0x1b0: 0xaed02, // mm
0x1b6: 0x12015, // scrollbar-arrow-color
0x1b8: 0xda40e, // text-transform
0x1b9: 0x65b0b, // font-weight
0x1ba: 0x53802, // cm
0x1bb: 0x12006, // scroll
0x1c0: 0x21710, // background-color
0x1c1: 0x2710d, // lavenderblush
0x1c6: 0xb5115, // text-decoration-style
0x1c9: 0x79607, // inherit
0x1cf: 0x2e604, // left
0x1d0: 0x6490c, // antiquewhite
0x1d4: 0xb6609, // olivedrab
0x1da: 0x2990a, // ghostwhite
0x1dd: 0x91009, // lightgray
0x1e2: 0x26f04, // hsla
0x1e3: 0x26f03, // hsl
0x1e4: 0xbd809, // palegreen
0x1e5: 0x4190b, // deepskyblue
0x1e8: 0xac809, // mintcream
0x1ea: 0x7e406, // invert
0x1eb: 0x6400c, // font-variant
0x1ec: 0x8fc14, // lightgoldenrodyellow
0x1ee: 0x62f07, // charset
0x1ef: 0xc8f0c, // writing-mode
0x1f0: 0x5c30a, // whitesmoke
0x1f5: 0x9d0a, // overflow-x
0x1f6: 0xaa90c, // midnightblue
0x1f7: 0xcb706, // quotes
0x1f8: 0x22706, // border
0x1fa: 0x42f0a, // chartreuse
0x1fc: 0xba707, // outline
0x1fd: 0xa281a, // transition-timing-function
0x1fe: 0xcbc08, // supports
0x204: 0x1670a, // word-break
0x205: 0xaa009, // monospace
0x206: 0x2850a, // box-shadow
0x209: 0x5680b, // flex-shrink
0x20f: 0xd0a0c, // padding-left
0x214: 0xc4d0b, // place-items
0x216: 0xc070a, // papayawhip
0x217: 0x17111, // background-origin
0x218: 0x52408, // document
0x219: 0x52c0a, // dodgerblue
0x21c: 0x9440c, // lightskyblue
0x21e: 0x6bd11, // grid-column-start
0x221: 0x30111, // border-left-width
0x224: 0x68c08, // xx-small
0x226: 0x1f408, // darkblue
0x229: 0x25d13, // border-bottom-width
0x22a: 0x98f10, // list-style-image
0x22d: 0x44504, // auto
0x230: 0x1e205, // black
0x231: 0xaf211, // speak-punctuation
0x232: 0x13908, // position
0x234: 0xc340c, // pause-before
0x236: 0x95e0e, // lightsteelblue
0x23a: 0xcd10b, // play-during
0x23f: 0x83509, // firebrick
0x249: 0x6ce08, // grid-row
0x24a: 0x55d02, // px
0x24c: 0x1a315, // background-position-y
0x251: 0xd1f04, // turn
0x256: 0xba70d, // outline-color
0x257: 0x9c304, // calc
0x258: 0xd4919, // animation-iteration-count
0x259: 0xad70d, // offset-anchor
0x25b: 0xa4e0e, // mediumseagreen
0x25e: 0x4620c, // column-count
0x263: 0x10e0a, // text-align
0x266: 0x66c13, // animation-fill-mode
0x267: 0x32412, // border-right-style
0x268: 0xa707, // x-large
0x269: 0x8d40e, // flex-direction
0x26a: 0x4f70a, // visibility
0x26f: 0xb2c0b, // offset-path
0x270: 0x27e0a, // border-box
0x276: 0x70103, // deg
0x278: 0x1713, // text-emphasis-color
0x27f: 0xc1c0d, // darkslateblue
0x283: 0x55f09, // flex-grow
0x285: 0x8e209, // lightcyan
0x28a: 0x102, // ms
0x28d: 0xa906, // larger
0x28e: 0xa990a, // darksalmon
0x292: 0x2f011, // border-left-style
0x293: 0xa8209, // turquoise
0x294: 0x3a407, // fantasy
0x296: 0xec09, // gainsboro
0x297: 0x201, // s
0x298: 0x23a13, // border-bottom-style
0x299: 0xce90b, // darkmagenta
0x29b: 0xb50b, // saddlebrown
0x2a0: 0x59505, // float
0x2a3: 0x6ec07, // row-gap
0x2a5: 0xd4106, // volume
0x2a6: 0xab50a, // min-height
0x2a7: 0x77012, // grid-template-rows
0x2a9: 0x3760b, // accelerator
0x2b0: 0x68f05, // small
0x2b1: 0x59804, // attr
0x2b2: 0x28e0c, // word-spacing
0x2b3: 0x35d12, // animation-duration
0x2b5: 0x4dd03, // cue
0x2b6: 0x95509, // slateblue
0x2b8: 0x38e04, // none
0x2b9: 0x6a30a, // column-gap
0x2ba: 0x4e0f, // justify-content
0x2bb: 0x5607, // content
0x2bd: 0x54f03, // dpi
0x2be: 0x87116, // scrollbar-shadow-color
0x2bf: 0x78d06, // import
0x2c0: 0xc8709, // flex-flow
0x2c1: 0x69509, // royalblue
0x2c3: 0x9c609, // cadetblue
0x2c4: 0x490c, // text-justify
0x2cb: 0x8c30a, // lightcoral
0x2cf: 0xb890c, // margin-right
0x2d2: 0x76506, // yellow
0x2d3: 0x26b05, // width
0x2d6: 0x14d03, // min
0x2da: 0x1340d, // ruby-position
0x2dc: 0x40708, // darkgray
0x2e2: 0x69e0b, // grid-column
0x2e4: 0xa1409, // darkkhaki
0x2e5: 0xc400d, // place-content
0x2e7: 0xbee0d, // palevioletred
0x2ea: 0x5bd0b, // floralwhite
0x2eb: 0xc208, // repeat-y
0x2ee: 0x980d, // text-overflow
0x2f1: 0xca0e, // animation-name
0x2fb: 0x7cb19, // scrollbar-highlight-color
0x2fe: 0x5500b, // pitch-range
0x302: 0x3005, // round
0x305: 0x4c70e, // cornflowerblue
0x307: 0x7f90d, // speak-numeral
0x308: 0x9e606, // medium
0x30a: 0x170d, // text-emphasis
0x30d: 0x9dd09, // max-width
0x311: 0x36e06, // normal
0x312: 0x68403, // khz
0x315: 0x2903, // rgb
0x316: 0x8ba09, // lightblue
0x317: 0x8d909, // direction
0x31a: 0xd350c, // voice-family
0x31c: 0x3480e, // border-spacing
0x321: 0x6d09, // elevation
0x323: 0x1c308, // repeat-x
0x324: 0x83e10, // layout-grid-line
0x326: 0xa000c, // mediumorchid
0x32b: 0xa6b11, // mediumspringgreen
0x32d: 0xa905, // large
0x32e: 0xd930a, // ruby-align
0x330: 0xbfa0d, // darkslategray
0x332: 0x5c12, // text-kashida-space
0x334: 0xbb40d, // outline-style
0x336: 0x3a005, // serif
0x337: 0x4240b, // caret-color
0x33a: 0x37205, // alpha
0x33c: 0x71113, // grid-template-areas
0x33d: 0x49911, // column-rule-style
0x33f: 0xcf80b, // layout-flow
0x340: 0x31905, // right
0x341: 0x3e70c, // border-width
0x343: 0xb6e0f, // background-clip
0x344: 0xd230d, // unicode-range
0x345: 0x74c05, // solid
0x346: 0x2df0b, // border-left
0x348: 0x9ec0a, // aquamarine
0x349: 0x3850a, // sandybrown
0x34a: 0x16008, // honeydew
0x34b: 0x75409, // orangered
0x34c: 0xb110c, // darkseagreen
0x34d: 0x37f07, // orphans
0x34e: 0x6e70c, // grid-row-gap
0x351: 0x22e06, // bottom
0x359: 0x9c105, // local
0x35c: 0x8cb0a, // align-self
0x35e: 0x33612, // border-right-width
0x360: 0x2b15, // background-attachment
0x364: 0x9190a, // lightgreen
0x366: 0x39302, // pt
0x368: 0x4400e, // text-autospace
0x36b: 0x3f403, // url
0x36c: 0x68502, // hz
0x371: 0x9306, // height
0x372: 0x5ad10, // background-image
0x377: 0x903, // rad
0x37c: 0x21116, // layer-background-color
0x37d: 0x1ff08, // darkcyan
0x382: 0x18e13, // background-position
0x384: 0x9d303, // max
0x38c: 0xa608, // xx-large
0x38d: 0x3f309, // burlywood
0x38f: 0xd7c18, // scrollbar-3d-light-color
0x390: 0x3ff09, // goldenrod
0x392: 0x92309, // lightpink
0x393: 0x8e0b, // line-height
0x396: 0x22713, // border-bottom-color
0x398: 0x80518, // layout-grid-char-spacing
0x39c: 0x2904, // rgba
0x3a1: 0x9f60a, // mediumblue
0x3a3: 0x9d30a, // max-height
0x3a4: 0x7bb11, // grid-auto-columns
0x3a5: 0xa0b0a, // darkorchid
0x3a9: 0x7600b, // greenyellow
0x3ae: 0x96c0b, // lightyellow
0x3b1: 0x4750a, // transition
0x3b3: 0x4e60a, // cue-before
0x3b6: 0x15208, // function
0x3b9: 0x96309, // steelblue
0x3be: 0xa5c0f, // mediumslateblue
0x3bf: 0xcaa0d, // darkturquoise
0x3c0: 0x43909, // chocolate
0x3c3: 0x5f909, // font-size
0x3c5: 0x55f04, // flex
0x3c7: 0xd3005, // unset
0x3c8: 0xd6d0b, // text-shadow
0x3ca: 0x4ec0b, // forestgreen
0x3cc: 0xbfe09, // slategray
0x3cd: 0x6ac11, // page-break-before
0x3ce: 0x55b04, // dppx
0x3d0: 0x2270d, // border-bottom
0x3d3: 0xb1d0f, // offset-distance
0x3d4: 0x3fb0d, // darkgoldenrod
0x3d6: 0x53604, // dpcm
0x3d8: 0x7500a, // darkorange
0x3dc: 0xb9413, // transition-duration
0x3de: 0x2d30c, // border-color
0x3df: 0x18e15, // background-position-x
0x3e0: 0x55005, // pitch
0x3e2: 0xdbd0b, // margin-left
0x3e3: 0x58504, // page
0x3e5: 0x57b0b, // padding-top
0x3e7: 0xb460d, // offset-rotate
0x3e8: 0x93c08, // seagreen
0x3e9: 0x4d508, // cornsilk
0x3ea: 0x68f07, // smaller
0x3ec: 0xcf20c, // table-layout
0x3ed: 0xfc14, // animation-play-state
0x3ef: 0xa2207, // default
0x3f0: 0x68d07, // x-small
0x3f3: 0x9e610, // mediumaquamarine
0x3f4: 0xad00d, // marker-offset
0x3f9: 0xd409, // namespace
0x3fa: 0x9cf04, // mask
0x3fb: 0x45207, // padding
0x3fd: 0x9b20f, // list-style-type
0x3ff: 0x3910b, // empty-cells
}
package css
import (
"encoding/hex"
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/parse/v2/css"
)
func removeMarkupNewlines(data []byte) []byte {
// remove any \\\r\n \\\r \\\n
for i := 1; i < len(data)-2; i++ {
if data[i] == '\\' && (data[i+1] == '\n' || data[i+1] == '\r') {
// encountered first replacee, now start to move bytes to the front
j := i + 2
if data[i+1] == '\r' && len(data) > i+2 && data[i+2] == '\n' {
j++
}
for ; j < len(data); j++ {
if data[j] == '\\' && len(data) > j+1 && (data[j+1] == '\n' || data[j+1] == '\r') {
if data[j+1] == '\r' && len(data) > j+2 && data[j+2] == '\n' {
j++
}
j++
} else {
data[i] = data[j]
i++
}
}
data = data[:i]
break
}
}
return data
}
func rgbToToken(r, g, b float64) Token {
// r, g, b are in interval [0.0, 1.0]
rgb := []byte{byte((r * 255.0) + 0.5), byte((g * 255.0) + 0.5), byte((b * 255.0) + 0.5)}
val := make([]byte, 7)
val[0] = '#'
hex.Encode(val[1:], rgb)
parse.ToLower(val)
if s, ok := ShortenColorHex[string(val[:7])]; ok {
return Token{css.IdentToken, s, nil, 0, 0}
} else if val[1] == val[2] && val[3] == val[4] && val[5] == val[6] {
val[2] = val[3]
val[3] = val[5]
val = val[:4]
} else {
val = val[:7]
}
return Token{css.HashToken, val, nil, 0, 0}
}
package html
import (
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/parse/v2/html"
)
// Token is a single token unit with an attribute value (if given) and hash of the data.
type Token struct {
html.TokenType
Hash Hash
Data []byte
Text []byte
AttrVal []byte
Traits traits
Offset int
HasTemplate bool
}
// TokenBuffer is a buffer that allows for token look-ahead.
type TokenBuffer struct {
r *parse.Input
l *html.Lexer
buf []Token
pos int
attrBuffer []*Token
}
// NewTokenBuffer returns a new TokenBuffer.
func NewTokenBuffer(r *parse.Input, l *html.Lexer) *TokenBuffer {
return &TokenBuffer{
r: r,
l: l,
buf: make([]Token, 0, 8),
}
}
func (z *TokenBuffer) read(t *Token) {
t.Offset = z.r.Offset()
t.TokenType, t.Data = z.l.Next()
t.Text = z.l.Text()
t.HasTemplate = z.l.HasTemplate()
if t.TokenType == html.AttributeToken {
t.Offset += 1 + len(t.Text) + 1
t.AttrVal = z.l.AttrVal()
if 1 < len(t.AttrVal) && (t.AttrVal[0] == '"' || t.AttrVal[0] == '\'') {
t.Offset++
t.AttrVal = t.AttrVal[1 : len(t.AttrVal)-1] // quotes will be readded in attribute loop if necessary
}
t.Hash = ToHash(t.Text)
t.Traits = attrMap[t.Hash]
} else if t.TokenType == html.StartTagToken || t.TokenType == html.EndTagToken {
t.AttrVal = nil
t.Hash = ToHash(t.Text)
t.Traits = tagMap[t.Hash] // zero if not exist
} else {
t.AttrVal = nil
t.Hash = 0
t.Traits = 0
}
}
// Peek returns the ith element and possibly does an allocation.
// Peeking past an error will panic.
func (z *TokenBuffer) Peek(pos int) *Token {
pos += z.pos
if pos >= len(z.buf) {
if len(z.buf) > 0 && z.buf[len(z.buf)-1].TokenType == html.ErrorToken {
return &z.buf[len(z.buf)-1]
}
c := cap(z.buf)
d := len(z.buf) - z.pos
p := pos - z.pos + 1 // required peek length
var buf []Token
if 2*p > c {
buf = make([]Token, 0, 2*c+p)
} else {
buf = z.buf
}
copy(buf[:d], z.buf[z.pos:])
buf = buf[:p]
pos -= z.pos
for i := d; i < p; i++ {
z.read(&buf[i])
if buf[i].TokenType == html.ErrorToken {
buf = buf[:i+1]
pos = i
break
}
}
z.pos, z.buf = 0, buf
}
return &z.buf[pos]
}
// Shift returns the first element and advances position.
func (z *TokenBuffer) Shift() *Token {
if z.pos >= len(z.buf) {
t := &z.buf[:1][0]
z.read(t)
return t
}
t := &z.buf[z.pos]
z.pos++
return t
}
// Attributes extracts the gives attribute hashes from a tag.
// It returns in the same order pointers to the requested token data or nil.
func (z *TokenBuffer) Attributes(hashes ...Hash) []*Token {
n := 0
for {
if t := z.Peek(n); t.TokenType != html.AttributeToken {
break
}
n++
}
if len(hashes) > cap(z.attrBuffer) {
z.attrBuffer = make([]*Token, len(hashes))
} else {
z.attrBuffer = z.attrBuffer[:len(hashes)]
for i := range z.attrBuffer {
z.attrBuffer[i] = nil
}
}
for i := z.pos; i < z.pos+n; i++ {
attr := &z.buf[i]
for j, hash := range hashes {
if hash == attr.Hash {
z.attrBuffer[j] = attr
}
}
}
return z.attrBuffer
}
package html
// generated by hasher -type=Hash -file=hash.go; DO NOT EDIT, except for adding more constants to the list and rerun go generate
// uses github.com/tdewolff/hasher
//go:generate hasher -type=Hash -file=hash.go
// Hash defines perfect hashes for a predefined list of strings
type Hash uint32
// Unique hash definitions to be used instead of strings
const (
A Hash = 0x1 // a
Abbr Hash = 0x40004 // abbr
About Hash = 0x5 // about
Accept Hash = 0xc06 // accept
Accept_Charset Hash = 0xc0e // accept-charset
Accesskey Hash = 0x2c09 // accesskey
Acronym Hash = 0x3507 // acronym
Action Hash = 0x26006 // action
Address Hash = 0x6d07 // address
Allow Hash = 0x31f05 // allow
Allowfullscreen Hash = 0x31f0f // allowfullscreen
Amp_Boilerplate Hash = 0x5e0f // amp-boilerplate
Applet Hash = 0xee06 // applet
Area Hash = 0x2c304 // area
Article Hash = 0x22507 // article
As Hash = 0x2102 // as
Aside Hash = 0x9205 // aside
Async Hash = 0x8a05 // async
Audio Hash = 0x9d05 // audio
Autocapitalize Hash = 0xc30e // autocapitalize
Autocomplete Hash = 0xd10c // autocomplete
Autofocus Hash = 0xe309 // autofocus
Autoplay Hash = 0xfc08 // autoplay
B Hash = 0x101 // b
Base Hash = 0x2004 // base
Basefont Hash = 0x2008 // basefont
Bb Hash = 0x40102 // bb
Bdi Hash = 0x8303 // bdi
Bdo Hash = 0x3dc03 // bdo
Big Hash = 0x12f03 // big
Blocking Hash = 0x13208 // blocking
Blockquote Hash = 0x13a0a // blockquote
Body Hash = 0x804 // body
Br Hash = 0x14b02 // br
Button Hash = 0x14406 // button
Canvas Hash = 0x8e06 // canvas
Caption Hash = 0x23707 // caption
Capture Hash = 0x10d07 // capture
Center Hash = 0x24f06 // center
Charset Hash = 0x1307 // charset
Checked Hash = 0x37707 // checked
Cite Hash = 0x14d04 // cite
Class Hash = 0x15a05 // class
Code Hash = 0x17604 // code
Col Hash = 0x17f03 // col
Colgroup Hash = 0x17f08 // colgroup
Color Hash = 0x19e05 // color
Cols Hash = 0x1a304 // cols
Colspan Hash = 0x1a307 // colspan
Content Hash = 0x1b107 // content
Contenteditable Hash = 0x1b10f // contenteditable
Controls Hash = 0x1cc08 // controls
Coords Hash = 0x1e306 // coords
Crossorigin Hash = 0x2160b // crossorigin
Data Hash = 0xad04 // data
Datalist Hash = 0xad08 // datalist
Datatype Hash = 0x11908 // datatype
Datetime Hash = 0x28508 // datetime
Dd Hash = 0x6e02 // dd
Decoding Hash = 0x9508 // decoding
Default Hash = 0x17807 // default
Defer Hash = 0x4405 // defer
Del Hash = 0x1f203 // del
Details Hash = 0x20b07 // details
Dfn Hash = 0x16a03 // dfn
Dialog Hash = 0x28d06 // dialog
Dir Hash = 0x8403 // dir
Disabled Hash = 0x19208 // disabled
Div Hash = 0x19903 // div
Dl Hash = 0x1c302 // dl
Draggable Hash = 0x1da09 // draggable
Dt Hash = 0x40902 // dt
Em Hash = 0xdc02 // em
Embed Hash = 0x16605 // embed
Enctype Hash = 0x26a07 // enctype
Enterkeyhint Hash = 0x2500c // enterkeyhint
Fetchpriority Hash = 0x1220d // fetchpriority
Fieldset Hash = 0x22c08 // fieldset
Figcaption Hash = 0x2340a // figcaption
Figure Hash = 0x24506 // figure
Font Hash = 0x2404 // font
Footer Hash = 0x1a06 // footer
For Hash = 0x25c03 // for
Form Hash = 0x25c04 // form
Formaction Hash = 0x25c0a // formaction
Formenctype Hash = 0x2660b // formenctype
Formmethod Hash = 0x2710a // formmethod
Formnovalidate Hash = 0x27b0e // formnovalidate
Formtarget Hash = 0x2930a // formtarget
Frame Hash = 0x16e05 // frame
Frameset Hash = 0x16e08 // frameset
H1 Hash = 0x2d502 // h1
H2 Hash = 0x38602 // h2
H3 Hash = 0x39502 // h3
H4 Hash = 0x40b02 // h4
H5 Hash = 0x29d02 // h5
H6 Hash = 0x29f02 // h6
Head Hash = 0x36c04 // head
Header Hash = 0x36c06 // header
Headers Hash = 0x36c07 // headers
Height Hash = 0x2a106 // height
Hgroup Hash = 0x2b506 // hgroup
Hidden Hash = 0x2cc06 // hidden
High Hash = 0x2d204 // high
Hr Hash = 0x2d702 // hr
Href Hash = 0x2d704 // href
Hreflang Hash = 0x2d708 // hreflang
Html Hash = 0x2a504 // html
Http_Equiv Hash = 0x2df0a // http-equiv
I Hash = 0x2801 // i
Id Hash = 0x9402 // id
Iframe Hash = 0x2f206 // iframe
Image Hash = 0x30005 // image
Imagesizes Hash = 0x3000a // imagesizes
Imagesrcset Hash = 0x30d0b // imagesrcset
Img Hash = 0x31803 // img
Inert Hash = 0x10805 // inert
Inlist Hash = 0x21f06 // inlist
Input Hash = 0x3d05 // input
Inputmode Hash = 0x3d09 // inputmode
Ins Hash = 0x31b03 // ins
Is Hash = 0xb202 // is
Ismap Hash = 0x32e05 // ismap
Itemid Hash = 0x2fa06 // itemid
Itemprop Hash = 0x14e08 // itemprop
Itemref Hash = 0x34507 // itemref
Itemscope Hash = 0x35709 // itemscope
Itemtype Hash = 0x36108 // itemtype
Kbd Hash = 0x8203 // kbd
Kind Hash = 0xaa04 // kind
Label Hash = 0x1c405 // label
Lang Hash = 0x2db04 // lang
Legend Hash = 0x1be06 // legend
Li Hash = 0xb102 // li
Link Hash = 0x1c804 // link
List Hash = 0xb104 // list
Loading Hash = 0x3ad07 // loading
Loop Hash = 0x2a804 // loop
Low Hash = 0x32103 // low
Main Hash = 0x3b04 // main
Map Hash = 0xed03 // map
Mark Hash = 0x7f04 // mark
Marquee Hash = 0x3e407 // marquee
Math Hash = 0x36904 // math
Max Hash = 0x37e03 // max
Maxlength Hash = 0x37e09 // maxlength
Media Hash = 0x28b05 // media
Menu Hash = 0x2f604 // menu
Menuitem Hash = 0x2f608 // menuitem
Meta Hash = 0x5004 // meta
Meter Hash = 0x38805 // meter
Method Hash = 0x27506 // method
Min Hash = 0x38d03 // min
Minlength Hash = 0x38d09 // minlength
Multiple Hash = 0x39708 // multiple
Muted Hash = 0x39f05 // muted
Name Hash = 0x4e04 // name
Nav Hash = 0xbc03 // nav
Nobr Hash = 0x14904 // nobr
Noembed Hash = 0x16407 // noembed
Noframes Hash = 0x16c08 // noframes
Nomodule Hash = 0x1a908 // nomodule
Noscript Hash = 0x23d08 // noscript
Novalidate Hash = 0x27f0a // novalidate
Object Hash = 0xa106 // object
Ol Hash = 0x18002 // ol
Open Hash = 0x35d04 // open
Optgroup Hash = 0x2aa08 // optgroup
Optimum Hash = 0x3de07 // optimum
Option Hash = 0x2ec06 // option
Output Hash = 0x206 // output
P Hash = 0x501 // p
Param Hash = 0x7b05 // param
Pattern Hash = 0xb607 // pattern
Picture Hash = 0x18607 // picture
Ping Hash = 0x2b104 // ping
Plaintext Hash = 0x2ba09 // plaintext
Playsinline Hash = 0x1000b // playsinline
Popover Hash = 0x33207 // popover
Popovertarget Hash = 0x3320d // popovertarget
Popovertargetaction Hash = 0x33213 // popovertargetaction
Portal Hash = 0x3f406 // portal
Poster Hash = 0x41006 // poster
Pre Hash = 0x3a403 // pre
Prefix Hash = 0x3a406 // prefix
Preload Hash = 0x3aa07 // preload
Profile Hash = 0x3b407 // profile
Progress Hash = 0x3bb08 // progress
Property Hash = 0x15208 // property
Q Hash = 0x11401 // q
Rb Hash = 0x1f02 // rb
Readonly Hash = 0x2c408 // readonly
Referrerpolicy Hash = 0x3490e // referrerpolicy
Rel Hash = 0x3ab03 // rel
Required Hash = 0x11208 // required
Resource Hash = 0x24908 // resource
Rev Hash = 0x18b03 // rev
Reversed Hash = 0x18b08 // reversed
Rows Hash = 0x4804 // rows
Rowspan Hash = 0x4807 // rowspan
Rp Hash = 0x6702 // rp
Rt Hash = 0x10b02 // rt
Rtc Hash = 0x10b03 // rtc
Ruby Hash = 0x8604 // ruby
S Hash = 0x1701 // s
Samp Hash = 0x5d04 // samp
Sandbox Hash = 0x7307 // sandbox
Scope Hash = 0x35b05 // scope
Script Hash = 0x23f06 // script
Section Hash = 0x15e07 // section
Select Hash = 0x1d306 // select
Selected Hash = 0x1d308 // selected
Shadowrootdelegatesfocus Hash = 0x1e818 // shadowrootdelegatesfocus
Shadowrootmode Hash = 0x1ff0e // shadowrootmode
Shape Hash = 0x21105 // shape
Size Hash = 0x30504 // size
Sizes Hash = 0x30505 // sizes
Slot Hash = 0x30904 // slot
Small Hash = 0x31d05 // small
Source Hash = 0x24b06 // source
Span Hash = 0x4b04 // span
Spellcheck Hash = 0x3720a // spellcheck
Src Hash = 0x31203 // src
Srclang Hash = 0x3c207 // srclang
Srcset Hash = 0x31206 // srcset
Start Hash = 0x22305 // start
Step Hash = 0xb304 // step
Strike Hash = 0x3c906 // strike
Strong Hash = 0x3cf06 // strong
Style Hash = 0x3d505 // style
Sub Hash = 0x3da03 // sub
Summary Hash = 0x3eb07 // summary
Sup Hash = 0x3f203 // sup
Svg Hash = 0x3fa03 // svg
Tabindex Hash = 0x5208 // tabindex
Table Hash = 0x1bb05 // table
Target Hash = 0x29706 // target
Tbody Hash = 0x705 // tbody
Td Hash = 0x1f102 // td
Template Hash = 0xdb08 // template
Text Hash = 0x2bf04 // text
Textarea Hash = 0x2bf08 // textarea
Tfoot Hash = 0x1905 // tfoot
Th Hash = 0x27702 // th
Thead Hash = 0x36b05 // thead
Time Hash = 0x28904 // time
Title Hash = 0x2705 // title
Tr Hash = 0xa602 // tr
Track Hash = 0xa605 // track
Translate Hash = 0xf309 // translate
Tt Hash = 0xb802 // tt
Type Hash = 0x11d04 // type
Typeof Hash = 0x11d06 // typeof
U Hash = 0x301 // u
Ul Hash = 0x17c02 // ul
Usemap Hash = 0xea06 // usemap
Value Hash = 0xbe05 // value
Var Hash = 0x19b03 // var
Video Hash = 0x2e805 // video
Vocab Hash = 0x3fd05 // vocab
Wbr Hash = 0x40403 // wbr
Width Hash = 0x40705 // width
Wrap Hash = 0x40d04 // wrap
Xmlns Hash = 0x5905 // xmlns
Xmp Hash = 0x7903 // xmp
)
// String returns the hash' name.
func (i Hash) String() string {
start := uint32(i >> 8)
n := uint32(i & 0xff)
if start+n > uint32(len(_Hash_text)) {
return ""
}
return _Hash_text[start : start+n]
}
// ToHash returns the hash whose name is s. It returns zero if there is no
// such hash. It is case sensitive.
func ToHash(s []byte) Hash {
if len(s) == 0 || len(s) > _Hash_maxLen {
return 0
}
h := uint32(_Hash_hash0)
for i := 0; i < len(s); i++ {
h ^= uint32(s[i])
h *= 16777619
}
if i := _Hash_table[h&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
t := _Hash_text[i>>8 : i>>8+i&0xff]
for i := 0; i < len(s); i++ {
if t[i] != s[i] {
goto NEXT
}
}
return i
}
NEXT:
if i := _Hash_table[(h>>16)&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
t := _Hash_text[i>>8 : i>>8+i&0xff]
for i := 0; i < len(s); i++ {
if t[i] != s[i] {
return 0
}
}
return i
}
return 0
}
const _Hash_hash0 = 0x51243bbc
const _Hash_maxLen = 24
const _Hash_text = "aboutputbodyaccept-charsetfooterbasefontitleaccesskeyacronym" +
"ainputmodeferowspanametabindexmlnsamp-boilerplateaddressandb" +
"oxmparamarkbdirubyasyncanvasidecodingaudiobjectrackindatalis" +
"tepatternavalueautocapitalizeautocompletemplateautofocusemap" +
"pletranslateautoplaysinlinertcapturequiredatatypeofetchprior" +
"itybigblockingblockquotebuttonobrcitempropertyclassectionoem" +
"bedfnoframesetcodefaultcolgroupictureversedisabledivarcolorc" +
"olspanomodulecontenteditablegendlabelinkcontrolselectedragga" +
"blecoordshadowrootdelegatesfocushadowrootmodetailshapecrosso" +
"riginlistarticlefieldsetfigcaptionoscriptfiguresourcenterkey" +
"hintformactionformenctypeformmethodformnovalidatetimedialogf" +
"ormtargeth5h6heightmlooptgroupinghgrouplaintextareadonlyhidd" +
"enhigh1hreflanghttp-equivideoptioniframenuitemidimagesizeslo" +
"timagesrcsetimginsmallowfullscreenismapopovertargetactionite" +
"mreferrerpolicyitemscopenitemtypematheaderspellcheckedmaxlen" +
"gth2meterminlength3multiplemutedprefixpreloadingprofileprogr" +
"essrclangstrikestrongstylesubdoptimumarqueesummarysuportalsv" +
"gvocabbrwbrwidth4wraposter"
var _Hash_table = [1 << 9]Hash{
0x0: 0x4405, // defer
0x5: 0x18002, // ol
0x6: 0x3720a, // spellcheck
0x7: 0x40b02, // h4
0x8: 0x40705, // width
0x9: 0x9402, // id
0xb: 0x14904, // nobr
0xc: 0x31d05, // small
0xf: 0x2b506, // hgroup
0x10: 0x27702, // th
0x15: 0x24f06, // center
0x18: 0xd10c, // autocomplete
0x1b: 0x2c304, // area
0x1e: 0x17f03, // col
0x1f: 0x2a106, // height
0x21: 0x4b04, // span
0x22: 0x37e03, // max
0x23: 0x3cf06, // strong
0x24: 0x501, // p
0x29: 0x24b06, // source
0x2c: 0x8e06, // canvas
0x2d: 0x2c09, // accesskey
0x2e: 0x18607, // picture
0x30: 0x3a403, // pre
0x31: 0x5d04, // samp
0x34: 0x40902, // dt
0x36: 0x30505, // sizes
0x37: 0x1a908, // nomodule
0x39: 0x2a504, // html
0x3a: 0x31203, // src
0x3c: 0x28d06, // dialog
0x3e: 0x3ab03, // rel
0x40: 0x1a06, // footer
0x43: 0x30d0b, // imagesrcset
0x46: 0x3c906, // strike
0x47: 0x2e805, // video
0x4a: 0x2d702, // hr
0x4b: 0x36108, // itemtype
0x4c: 0x1c804, // link
0x4e: 0x6702, // rp
0x4f: 0x2801, // i
0x50: 0xee06, // applet
0x51: 0x17f08, // colgroup
0x53: 0x1905, // tfoot
0x54: 0xc06, // accept
0x57: 0x14d04, // cite
0x58: 0x1307, // charset
0x59: 0x17604, // code
0x5a: 0x4e04, // name
0x5b: 0x2bf04, // text
0x5d: 0x31f05, // allow
0x5e: 0x36c04, // head
0x61: 0x16605, // embed
0x62: 0x3fa03, // svg
0x63: 0x3fd05, // vocab
0x64: 0x5e0f, // amp-boilerplate
0x65: 0x38805, // meter
0x67: 0x3320d, // popovertarget
0x69: 0x3b04, // main
0x6a: 0x41006, // poster
0x6c: 0x1c302, // dl
0x6e: 0x26006, // action
0x71: 0x17807, // default
0x72: 0x3d05, // input
0x74: 0xb202, // is
0x75: 0x27506, // method
0x79: 0x7903, // xmp
0x7a: 0x101, // b
0x7b: 0x21f06, // inlist
0x7c: 0x25c0a, // formaction
0x7e: 0x39708, // multiple
0x80: 0x1f203, // del
0x81: 0x26a07, // enctype
0x83: 0x27b0e, // formnovalidate
0x84: 0x2404, // font
0x85: 0x11d06, // typeof
0x86: 0x2d704, // href
0x87: 0x13a0a, // blockquote
0x88: 0x4807, // rowspan
0x89: 0x3aa07, // preload
0x8a: 0x12f03, // big
0x8c: 0x38d09, // minlength
0x90: 0x1bb05, // table
0x91: 0x39f05, // muted
0x92: 0x3e407, // marquee
0x94: 0x3507, // acronym
0x96: 0x40d04, // wrap
0x98: 0x14b02, // br
0x9a: 0x10b02, // rt
0x9e: 0xa602, // tr
0x9f: 0x35709, // itemscope
0xa4: 0xad04, // data
0xa5: 0x29706, // target
0xac: 0x11908, // datatype
0xae: 0xb304, // step
0xb3: 0x1cc08, // controls
0xb5: 0xbe05, // value
0xb6: 0x2ba09, // plaintext
0xb7: 0x1da09, // draggable
0xc0: 0x8a05, // async
0xc2: 0x2a804, // loop
0xc3: 0x28904, // time
0xc6: 0x2004, // base
0xc7: 0x23f06, // script
0xce: 0x32103, // low
0xcf: 0x3dc03, // bdo
0xd1: 0x18b03, // rev
0xd2: 0x1e306, // coords
0xd3: 0x8403, // dir
0xd4: 0x2f608, // menuitem
0xd6: 0x22507, // article
0xd8: 0x11d04, // type
0xda: 0x18b08, // reversed
0xdb: 0x23707, // caption
0xdc: 0x35d04, // open
0xdd: 0x1701, // s
0xe0: 0x2705, // title
0xe1: 0x9508, // decoding
0xe3: 0xc0e, // accept-charset
0xe4: 0x15a05, // class
0xe5: 0x3f203, // sup
0xe6: 0xdb08, // template
0xe7: 0x16c08, // noframes
0xe8: 0x3ad07, // loading
0xeb: 0xa106, // object
0xee: 0x3da03, // sub
0xef: 0x2fa06, // itemid
0xf0: 0x30904, // slot
0xf1: 0x8604, // ruby
0xf4: 0x1f102, // td
0xf5: 0x11208, // required
0xf9: 0x16e05, // frame
0xfc: 0x2102, // as
0xfd: 0x37e09, // maxlength
0xff: 0x31f0f, // allowfullscreen
0x101: 0x2160b, // crossorigin
0x102: 0xed03, // map
0x104: 0x6e02, // dd
0x105: 0x705, // tbody
0x107: 0x2d502, // h1
0x109: 0x5004, // meta
0x10a: 0x1, // a
0x10c: 0x16a03, // dfn
0x10e: 0x34507, // itemref
0x110: 0x38d03, // min
0x111: 0x28508, // datetime
0x114: 0xdc02, // em
0x115: 0x7f04, // mark
0x119: 0x2d708, // hreflang
0x11a: 0x3de07, // optimum
0x11c: 0x1220d, // fetchpriority
0x11d: 0x39502, // h3
0x11e: 0x5905, // xmlns
0x11f: 0x19903, // div
0x121: 0x40403, // wbr
0x128: 0x2bf08, // textarea
0x129: 0x3d505, // style
0x12a: 0x3f406, // portal
0x12b: 0x1b107, // content
0x12d: 0x19b03, // var
0x12f: 0x40004, // abbr
0x133: 0x31803, // img
0x138: 0x35b05, // scope
0x13b: 0x30504, // size
0x13e: 0x29f02, // h6
0x141: 0xfc08, // autoplay
0x142: 0x2c408, // readonly
0x143: 0x3d09, // inputmode
0x144: 0x19208, // disabled
0x145: 0x4804, // rows
0x149: 0x3490e, // referrerpolicy
0x14a: 0x1c405, // label
0x14b: 0x36c06, // header
0x14c: 0xad08, // datalist
0x14d: 0xe309, // autofocus
0x14e: 0xb607, // pattern
0x150: 0x2cc06, // hidden
0x151: 0x5, // about
0x152: 0x14406, // button
0x154: 0x2f206, // iframe
0x155: 0x1d308, // selected
0x156: 0x3c207, // srclang
0x15b: 0xb102, // li
0x15c: 0x22305, // start
0x15d: 0x7307, // sandbox
0x15e: 0x31b03, // ins
0x162: 0x1a307, // colspan
0x163: 0x1ff0e, // shadowrootmode
0x164: 0xb104, // list
0x166: 0x5208, // tabindex
0x169: 0x3b407, // profile
0x16b: 0x301, // u
0x16c: 0x23d08, // noscript
0x16e: 0x2660b, // formenctype
0x16f: 0x16e08, // frameset
0x170: 0x28b05, // media
0x174: 0x2008, // basefont
0x176: 0x2b104, // ping
0x177: 0x3bb08, // progress
0x178: 0x206, // output
0x17a: 0x36904, // math
0x17b: 0x2930a, // formtarget
0x17d: 0x7b05, // param
0x180: 0x13208, // blocking
0x185: 0x37707, // checked
0x188: 0x32e05, // ismap
0x18a: 0x38602, // h2
0x18c: 0x2df0a, // http-equiv
0x18e: 0x10d07, // capture
0x190: 0x2db04, // lang
0x195: 0x27f0a, // novalidate
0x197: 0x1a304, // cols
0x198: 0x804, // body
0x199: 0xbc03, // nav
0x19a: 0x1b10f, // contenteditable
0x19b: 0x15e07, // section
0x19e: 0x14e08, // itemprop
0x19f: 0x15208, // property
0x1a1: 0xc30e, // autocapitalize
0x1a4: 0x3eb07, // summary
0x1a6: 0x1000b, // playsinline
0x1a9: 0x8303, // bdi
0x1ab: 0x29d02, // h5
0x1ac: 0x6d07, // address
0x1b0: 0x2d204, // high
0x1b1: 0x33207, // popover
0x1b3: 0xa605, // track
0x1b6: 0x8203, // kbd
0x1b7: 0x11401, // q
0x1b8: 0x2340a, // figcaption
0x1b9: 0x30005, // image
0x1ba: 0x25c04, // form
0x1c1: 0x3000a, // imagesizes
0x1c4: 0x1e818, // shadowrootdelegatesfocus
0x1c5: 0x2ec06, // option
0x1c6: 0x9d05, // audio
0x1c8: 0x40102, // bb
0x1c9: 0x16407, // noembed
0x1cc: 0x10805, // inert
0x1cf: 0x1d306, // select
0x1d1: 0x22c08, // fieldset
0x1d2: 0x31206, // srcset
0x1d3: 0x2f604, // menu
0x1d5: 0x36c07, // headers
0x1dd: 0x1be06, // legend
0x1de: 0xaa04, // kind
0x1e0: 0x24908, // resource
0x1e2: 0xf309, // translate
0x1e4: 0x2aa08, // optgroup
0x1e6: 0x33213, // popovertargetaction
0x1e7: 0x2710a, // formmethod
0x1e9: 0xb802, // tt
0x1ea: 0x36b05, // thead
0x1eb: 0x17c02, // ul
0x1ee: 0x3a406, // prefix
0x1ef: 0x19e05, // color
0x1f1: 0x21105, // shape
0x1f3: 0x25c03, // for
0x1f4: 0x2500c, // enterkeyhint
0x1f7: 0xea06, // usemap
0x1f8: 0x1f02, // rb
0x1fa: 0x20b07, // details
0x1fb: 0x10b03, // rtc
0x1fc: 0x9205, // aside
0x1fe: 0x24506, // figure
}
// Package html minifies HTML5 following the specifications at http://www.w3.org/TR/html5/syntax.html.
package html
import (
"bytes"
"fmt"
"io"
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/parse/v2/buffer"
"github.com/tdewolff/parse/v2/html"
)
var (
gtBytes = []byte(">")
isBytes = []byte("=")
spaceBytes = []byte(" ")
doctypeBytes = []byte("<!doctype html>")
jsMimeBytes = []byte("application/javascript")
cssMimeBytes = []byte("text/css")
htmlMimeBytes = []byte("text/html")
svgMimeBytes = []byte("image/svg+xml")
formMimeBytes = []byte("application/x-www-form-urlencoded")
mathMimeBytes = []byte("application/mathml+xml")
dataSchemeBytes = []byte("data:")
jsSchemeBytes = []byte("javascript:")
httpBytes = []byte("http")
radioBytes = []byte("radio")
onBytes = []byte("on")
textBytes = []byte("text")
noneBytes = []byte("none")
submitBytes = []byte("submit")
allBytes = []byte("all")
rectBytes = []byte("rect")
dataBytes = []byte("data")
getBytes = []byte("get")
autoBytes = []byte("auto")
oneBytes = []byte("one")
inlineParams = map[string]string{"inline": "1"}
)
////////////////////////////////////////////////////////////////
var GoTemplateDelims = [2]string{"{{", "}}"}
var HandlebarsTemplateDelims = [2]string{"{{", "}}"}
var MustacheTemplateDelims = [2]string{"{{", "}}"}
var EJSTemplateDelims = [2]string{"<%", "%>"}
var ASPTemplateDelims = [2]string{"<%", "%>"}
var PHPTemplateDelims = [2]string{"<?", "?>"}
// Minifier is an HTML minifier.
type Minifier struct {
KeepComments bool
KeepConditionalComments bool
KeepSpecialComments bool
KeepDefaultAttrVals bool
KeepDocumentTags bool
KeepEndTags bool
KeepQuotes bool
KeepWhitespace bool
TemplateDelims [2]string
}
// Minify minifies HTML data, it reads from r and writes to w.
func Minify(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
return (&Minifier{}).Minify(m, w, r, params)
}
// Minify minifies HTML data, it reads from r and writes to w.
func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, _ map[string]string) error {
var rawTagHash Hash
var rawTagMediatype []byte
if o.KeepConditionalComments {
fmt.Println("DEPRECATED: KeepConditionalComments is replaced by KeepSpecialComments")
o.KeepSpecialComments = true
o.KeepConditionalComments = false // omit next warning
}
omitSpace := true // if true the next leading space is omitted
inPre := false
attrMinifyBuffer := buffer.NewWriter(make([]byte, 0, 64))
attrByteBuffer := make([]byte, 0, 64)
z := parse.NewInput(r)
defer z.Restore()
l := html.NewTemplateLexer(z, o.TemplateDelims)
tb := NewTokenBuffer(z, l)
for {
t := *tb.Shift()
switch t.TokenType {
case html.ErrorToken:
if _, err := w.Write(nil); err != nil {
return err
}
if l.Err() == io.EOF {
return nil
}
return l.Err()
case html.DoctypeToken:
w.Write(doctypeBytes)
case html.CommentToken:
if o.KeepComments {
w.Write(t.Data)
} else if o.KeepSpecialComments {
if 6 < len(t.Text) && (bytes.HasPrefix(t.Text, []byte("[if ")) || bytes.HasSuffix(t.Text, []byte("[endif]")) || bytes.HasSuffix(t.Text, []byte("[endif]--"))) {
// [if ...] is always 7 or more characters, [endif] is only encountered for downlevel-revealed
// see https://msdn.microsoft.com/en-us/library/ms537512(v=vs.85).aspx#syntax
if bytes.HasPrefix(t.Data, []byte("<!--[if ")) && bytes.HasSuffix(t.Data, []byte("<![endif]-->")) { // downlevel-hidden
begin := bytes.IndexByte(t.Data, '>') + 1
end := len(t.Data) - len("<![endif]-->")
if begin < end {
w.Write(t.Data[:begin])
if err := o.Minify(m, w, buffer.NewReader(t.Data[begin:end]), nil); err != nil {
return minify.UpdateErrorPosition(err, z, t.Offset)
}
w.Write(t.Data[end:])
} else {
w.Write(t.Data) // malformed
}
} else {
w.Write(t.Data) // downlevel-revealed or short downlevel-hidden
}
} else if 1 < len(t.Text) && t.Text[0] == '#' {
// SSI tags
w.Write(t.Data)
}
}
case html.SvgToken:
if err := m.MinifyMimetype(svgMimeBytes, w, buffer.NewReader(t.Data), inlineParams); err != nil {
if err != minify.ErrNotExist {
return minify.UpdateErrorPosition(err, z, t.Offset)
}
w.Write(t.Data)
}
omitSpace = false
case html.MathToken:
if err := m.MinifyMimetype(mathMimeBytes, w, buffer.NewReader(t.Data), nil); err != nil {
if err != minify.ErrNotExist {
return minify.UpdateErrorPosition(err, z, t.Offset)
}
w.Write(t.Data)
}
omitSpace = false
case html.TextToken:
if t.HasTemplate {
w.Write(t.Data)
omitSpace = parse.IsWhitespace(t.Data[len(t.Data)-1])
} else if rawTagHash != 0 {
if rawTagHash == Style || rawTagHash == Script || rawTagHash == Iframe {
var mimetype []byte
var params map[string]string
if rawTagHash == Iframe {
mimetype = htmlMimeBytes
} else if 0 < len(rawTagMediatype) {
mimetype, params = parse.Mediatype(rawTagMediatype)
} else if rawTagHash == Script {
mimetype = jsMimeBytes
} else if rawTagHash == Style {
mimetype = cssMimeBytes
}
if err := m.MinifyMimetype(mimetype, w, buffer.NewReader(t.Data), params); err != nil {
if err != minify.ErrNotExist {
return minify.UpdateErrorPosition(err, z, t.Offset)
}
w.Write(t.Data)
}
} else {
w.Write(t.Data)
}
} else if inPre {
w.Write(t.Data)
// omitSpace = true after block element
} else {
t.Data = parse.ReplaceMultipleWhitespaceAndEntities(t.Data, EntitiesMap, TextRevEntitiesMap)
// whitespace removal; trim left
if omitSpace && parse.IsWhitespace(t.Data[0]) {
t.Data = t.Data[1:]
}
// whitespace removal; trim right
omitSpace = false
if len(t.Data) == 0 {
omitSpace = true
} else if parse.IsWhitespace(t.Data[len(t.Data)-1]) {
omitSpace = true
i := 0
for {
next := tb.Peek(i)
// trim if EOF, text token with leading whitespace or block token
if next.TokenType == html.ErrorToken {
t.Data = t.Data[:len(t.Data)-1]
omitSpace = false
break
} else if next.TokenType == html.TextToken && !parse.IsAllWhitespace(next.Data) {
// stop looking when text encountered
break
} else if next.TokenType == html.StartTagToken || next.TokenType == html.EndTagToken {
if o.KeepWhitespace {
break
}
// remove when followed by a block tag
if next.Traits&blockTag != 0 {
t.Data = t.Data[:len(t.Data)-1]
omitSpace = false
break
} else if next.TokenType == html.StartTagToken {
break
}
}
i++
}
}
w.Write(t.Data)
}
case html.StartTagToken, html.EndTagToken:
rawTagHash = 0
hasAttributes := false
if t.TokenType == html.StartTagToken {
if next := tb.Peek(0); next.TokenType == html.AttributeToken {
hasAttributes = true
}
if t.Traits&rawTag != 0 {
// ignore empty script and style tags
if !hasAttributes && (t.Hash == Script || t.Hash == Style) {
if next := tb.Peek(1); next.TokenType == html.EndTagToken {
tb.Shift()
tb.Shift()
break
}
}
rawTagHash = t.Hash
rawTagMediatype = nil
// do not minify content of <style amp-boilerplate>
if hasAttributes && t.Hash == Style {
if attrs := tb.Attributes(Amp_Boilerplate); attrs[0] != nil {
rawTagHash = 0
}
}
}
} else if t.Hash == Template {
omitSpace = true // EndTagToken
}
if t.Hash == Pre {
inPre = t.TokenType == html.StartTagToken
}
// remove superfluous tags, except for html, head and body tags when KeepDocumentTags is set
if !hasAttributes && (!o.KeepDocumentTags && (t.Hash == Html || t.Hash == Head || t.Hash == Body) || t.Hash == Colgroup) {
break
} else if t.TokenType == html.EndTagToken {
omitEndTag := false
if !o.KeepEndTags {
if t.Hash == Thead || t.Hash == Tbody || t.Hash == Tfoot || t.Hash == Tr || t.Hash == Th ||
t.Hash == Td || t.Hash == Option || t.Hash == Dd || t.Hash == Dt || t.Hash == Li ||
t.Hash == Rb || t.Hash == Rt || t.Hash == Rtc || t.Hash == Rp {
omitEndTag = true // omit end tags
} else if t.Hash == P {
i := 0
for {
next := tb.Peek(i)
i++
// continue if text token is empty or whitespace
if next.TokenType == html.TextToken && parse.IsAllWhitespace(next.Data) {
continue
}
if next.TokenType == html.ErrorToken || next.TokenType == html.EndTagToken && next.Traits&keepPTag == 0 || next.TokenType == html.StartTagToken && next.Traits&omitPTag != 0 {
omitEndTag = true // omit p end tag
}
break
}
} else if t.Hash == Optgroup {
i := 0
for {
next := tb.Peek(i)
i++
// continue if text token
if next.TokenType == html.TextToken {
continue
}
if next.TokenType == html.ErrorToken || next.Hash != Option {
omitEndTag = true // omit optgroup end tag
}
break
}
}
}
if !omitEndTag {
if o.KeepWhitespace || t.Traits&objectTag != 0 {
omitSpace = false
} else if t.Traits&blockTag != 0 {
omitSpace = true // omit spaces after block elements
}
if 3+len(t.Text) < len(t.Data) {
t.Data[2+len(t.Text)] = '>'
t.Data = t.Data[:3+len(t.Text)]
}
w.Write(t.Data)
}
// skip text in select and optgroup tags
if t.Hash == Option || t.Hash == Optgroup {
if next := tb.Peek(0); next.TokenType == html.TextToken {
tb.Shift()
}
}
break
}
if o.KeepWhitespace || t.Traits&objectTag != 0 {
omitSpace = false
} else if t.Traits&blockTag != 0 {
omitSpace = true // omit spaces after block elements
}
w.Write(t.Data)
if hasAttributes {
if t.Hash == Meta {
attrs := tb.Attributes(Content, Http_Equiv, Charset, Name)
if content := attrs[0]; content != nil {
if httpEquiv := attrs[1]; httpEquiv != nil {
httpEquiv.AttrVal = parse.TrimWhitespace(httpEquiv.AttrVal)
if charset := attrs[2]; charset == nil && parse.EqualFold(httpEquiv.AttrVal, []byte("content-type")) {
content.AttrVal = minify.Mediatype(content.AttrVal)
if bytes.Equal(content.AttrVal, []byte("text/html;charset=utf-8")) {
httpEquiv.Text = nil
content.Text = []byte("charset")
content.Hash = Charset
content.AttrVal = []byte("utf-8")
}
}
}
if name := attrs[3]; name != nil {
name.AttrVal = parse.TrimWhitespace(name.AttrVal)
if parse.EqualFold(name.AttrVal, []byte("keywords")) {
content.AttrVal = bytes.ReplaceAll(content.AttrVal, []byte(", "), []byte(","))
} else if parse.EqualFold(name.AttrVal, []byte("viewport")) {
content.AttrVal = bytes.ReplaceAll(content.AttrVal, []byte(" "), []byte(""))
for i := 0; i < len(content.AttrVal); i++ {
if content.AttrVal[i] == '=' && i+2 < len(content.AttrVal) {
i++
if n := parse.Number(content.AttrVal[i:]); 0 < n {
minNum := minify.Number(content.AttrVal[i:i+n], -1)
if len(minNum) < n {
copy(content.AttrVal[i:i+len(minNum)], minNum)
copy(content.AttrVal[i+len(minNum):], content.AttrVal[i+n:])
content.AttrVal = content.AttrVal[:len(content.AttrVal)+len(minNum)-n]
}
i += len(minNum)
}
i-- // mitigate for-loop increase
}
}
}
}
}
} else if t.Hash == Script {
attrs := tb.Attributes(Src, Charset)
if attrs[0] != nil && attrs[1] != nil {
attrs[1].Text = nil
}
} else if t.Hash == Input {
attrs := tb.Attributes(Type, Value)
if t, value := attrs[0], attrs[1]; t != nil && value != nil {
isRadio := parse.EqualFold(t.AttrVal, radioBytes)
if !isRadio && len(value.AttrVal) == 0 {
value.Text = nil
} else if isRadio && parse.EqualFold(value.AttrVal, onBytes) {
value.Text = nil
}
}
} else if t.Hash == A {
attrs := tb.Attributes(Id, Name)
if id, name := attrs[0], attrs[1]; id != nil && name != nil {
if bytes.Equal(id.AttrVal, name.AttrVal) {
name.Text = nil
}
}
}
// write attributes
for {
attr := *tb.Shift()
if attr.TokenType != html.AttributeToken {
break
} else if attr.Text == nil {
continue // removed attribute
} else if attr.HasTemplate {
w.Write(attr.Data)
continue // don't minify attributes that contain templates
}
val := attr.AttrVal
if attr.Traits&trimAttr != 0 {
val = parse.ReplaceMultipleWhitespaceAndEntities(val, EntitiesMap, nil)
val = parse.TrimWhitespace(val)
} else {
val = parse.ReplaceEntities(val, EntitiesMap, nil)
}
if t.Traits != 0 {
if len(val) == 0 && (attr.Hash == Class ||
attr.Hash == Dir ||
attr.Hash == Id ||
attr.Hash == Name ||
attr.Hash == Action && t.Hash == Form) {
continue // omit empty attribute values
}
if rawTagHash != 0 && attr.Hash == Type {
rawTagMediatype = parse.Copy(val)
}
if attr.Hash == Enctype ||
attr.Hash == Formenctype ||
attr.Hash == Accept ||
attr.Hash == Type && (t.Hash == A || t.Hash == Link || t.Hash == Embed || t.Hash == Object || t.Hash == Source || t.Hash == Script) {
val = minify.Mediatype(val)
}
// default attribute values can be omitted
if !o.KeepDefaultAttrVals && (attr.Hash == Type && (t.Hash == Script && jsMimetypes[string(parse.ToLower(parse.Copy(val)))] ||
t.Hash == Style && parse.EqualFold(val, cssMimeBytes) ||
t.Hash == Link && parse.EqualFold(val, cssMimeBytes) ||
t.Hash == Input && parse.EqualFold(val, textBytes) ||
t.Hash == Button && parse.EqualFold(val, submitBytes)) ||
attr.Hash == Method && parse.EqualFold(val, getBytes) ||
attr.Hash == Enctype && parse.EqualFold(val, formMimeBytes) ||
attr.Hash == Colspan && bytes.Equal(val, oneBytes) ||
attr.Hash == Rowspan && bytes.Equal(val, oneBytes) ||
attr.Hash == Shape && parse.EqualFold(val, rectBytes) ||
attr.Hash == Span && bytes.Equal(val, oneBytes) ||
attr.Hash == Media && t.Hash == Style && parse.EqualFold(val, allBytes)) {
continue
}
if attr.Hash == Style {
// CSS minifier for attribute inline code
val = parse.TrimWhitespace(val)
attrMinifyBuffer.Reset()
if err := m.MinifyMimetype(cssMimeBytes, attrMinifyBuffer, buffer.NewReader(val), inlineParams); err == nil {
val = attrMinifyBuffer.Bytes()
} else if err != minify.ErrNotExist {
return minify.UpdateErrorPosition(err, z, attr.Offset)
}
if len(val) == 0 {
continue
}
} else if 2 < len(attr.Text) && attr.Text[0] == 'o' && attr.Text[1] == 'n' {
// JS minifier for attribute inline code
val = parse.TrimWhitespace(val)
if 11 <= len(val) && parse.EqualFold(val[:11], jsSchemeBytes) {
val = val[11:]
}
attrMinifyBuffer.Reset()
if err := m.MinifyMimetype(jsMimeBytes, attrMinifyBuffer, buffer.NewReader(val), inlineParams); err == nil {
val = attrMinifyBuffer.Bytes()
} else if err != minify.ErrNotExist {
return minify.UpdateErrorPosition(err, z, attr.Offset)
}
if len(val) == 0 {
continue
}
} else if attr.Traits&urlAttr != 0 { // anchors are already handled
val = parse.TrimWhitespace(val)
if 5 < len(val) {
if parse.EqualFold(val[:4], httpBytes) {
if val[4] == ':' {
if m.URL != nil && m.URL.Scheme == "http" {
val = val[5:]
} else {
parse.ToLower(val[:4])
}
} else if (val[4] == 's' || val[4] == 'S') && val[5] == ':' {
if m.URL != nil && m.URL.Scheme == "https" {
val = val[6:]
} else {
parse.ToLower(val[:5])
}
}
} else if parse.EqualFold(val[:5], dataSchemeBytes) {
val = minify.DataURI(m, val)
}
}
}
}
w.Write(spaceBytes)
w.Write(attr.Text)
if 0 < len(val) && attr.Traits&booleanAttr == 0 {
w.Write(isBytes)
// use double quotes for RDFa attributes
isXML := attr.Hash == Vocab || attr.Hash == Typeof || attr.Hash == Property || attr.Hash == Resource || attr.Hash == Prefix || attr.Hash == Content || attr.Hash == About || attr.Hash == Rev || attr.Hash == Datatype || attr.Hash == Inlist
// no quotes if possible, else prefer single or double depending on which occurs more often in value
var quote byte
if 0 < len(attr.Data) && (attr.Data[len(attr.Data)-1] == '\'' || attr.Data[len(attr.Data)-1] == '"') {
quote = attr.Data[len(attr.Data)-1]
}
val = html.EscapeAttrVal(&attrByteBuffer, val, quote, o.KeepQuotes || isXML)
w.Write(val)
}
}
} else {
_ = tb.Shift() // StartTagClose
}
w.Write(gtBytes)
// skip text in select and optgroup tags
if t.Hash == Select || t.Hash == Optgroup {
if next := tb.Peek(0); next.TokenType == html.TextToken && !next.HasTemplate {
tb.Shift()
}
}
// keep space after phrasing tags (<i>, <span>, ...) FontAwesome etc.
if t.TokenType == html.StartTagToken && t.Traits == normalTag {
if next := tb.Peek(0); next.Hash == t.Hash && next.TokenType == html.EndTagToken {
omitSpace = false
}
}
}
}
}
// Package js minifies ECMAScript 2021 following the language specification at https://tc39.es/ecma262/.
package js
import (
"bytes"
"io"
"sort"
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/parse/v2/js"
)
type blockType int
const (
defaultBlock blockType = iota
functionBlock
iterationBlock
)
// Minifier is a JS minifier.
type Minifier struct {
Precision int // number of significant digits
KeepVarNames bool
useAlphabetVarNames bool
Version int
}
func (o *Minifier) minVersion(version int) bool {
return o.Version == 0 || version <= o.Version
}
// Minify minifies JS data, it reads from r and writes to w.
func Minify(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
return (&Minifier{}).Minify(m, w, r, params)
}
// Minify minifies JS data, it reads from r and writes to w.
func (o *Minifier) Minify(_ *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
z := parse.NewInput(r)
ast, err := js.Parse(z, js.Options{
WhileToFor: true,
Inline: params != nil && params["inline"] == "1",
})
if err != nil {
return err
}
m := &jsMinifier{
o: o,
w: w,
renamer: newRenamer(!o.KeepVarNames, !o.useAlphabetVarNames),
}
m.hoistVars(&ast.BlockStmt)
ast.List = optimizeStmtList(ast.List, functionBlock)
for _, item := range ast.List {
m.writeSemicolon()
m.minifyStmt(item)
}
if _, err := w.Write(nil); err != nil {
return err
}
return nil
}
type expectExpr int
const (
expectAny expectExpr = iota
expectExprStmt // in statement
expectExprBody // in arrow function body
)
type jsMinifier struct {
o *Minifier
w io.Writer
prev []byte
needsSemicolon bool // write a semicolon if required
needsSpace bool // write a space if next token is an identifier
expectExpr expectExpr // avoid ambiguous syntax such as an expression starting with function
groupedStmt bool // avoid ambiguous syntax by grouping the expression statement
inFor bool
spaceBefore byte
renamer *renamer
}
func (m *jsMinifier) write(b []byte) {
// 0 < len(b)
if m.needsSpace && js.IsIdentifierContinue(b) || m.spaceBefore == b[0] {
m.w.Write(spaceBytes)
}
m.w.Write(b)
m.prev = b
m.needsSpace = false
m.expectExpr = expectAny
m.spaceBefore = 0
}
func (m *jsMinifier) writeSpaceAfterIdent() {
// space after identifier and after regular expression (to prevent confusion with its tag)
if js.IsIdentifierEnd(m.prev) || 1 < len(m.prev) && m.prev[0] == '/' {
m.w.Write(spaceBytes)
}
}
func (m *jsMinifier) writeSpaceBeforeIdent() {
m.needsSpace = true
}
func (m *jsMinifier) writeSpaceBefore(c byte) {
m.spaceBefore = c
}
func (m *jsMinifier) requireSemicolon() {
m.needsSemicolon = true
}
func (m *jsMinifier) writeSemicolon() {
if m.needsSemicolon {
m.w.Write(semicolonBytes)
m.needsSemicolon = false
m.needsSpace = false
}
}
func (m *jsMinifier) minifyStmt(i js.IStmt) {
switch stmt := i.(type) {
case *js.ExprStmt:
m.expectExpr = expectExprStmt
m.minifyExpr(stmt.Value, js.OpExpr)
if m.groupedStmt {
m.write(closeParenBytes)
m.groupedStmt = false
}
m.requireSemicolon()
case *js.VarDecl:
m.minifyVarDecl(stmt, false)
m.requireSemicolon()
case *js.IfStmt:
hasIf := !isEmptyStmt(stmt.Body)
hasElse := !isEmptyStmt(stmt.Else)
if !hasIf && !hasElse {
break
}
m.write(ifOpenBytes)
m.minifyExpr(stmt.Cond, js.OpExpr)
m.write(closeParenBytes)
if !hasIf && hasElse {
m.requireSemicolon()
} else if hasIf {
if hasElse && endsInIf(stmt.Body) {
// prevent: if(a){if(b)c}else d; => if(a)if(b)c;else d;
m.write(openBraceBytes)
m.minifyStmt(stmt.Body)
m.write(closeBraceBytes)
m.needsSemicolon = false
} else {
m.minifyStmt(stmt.Body)
}
}
if hasElse {
m.writeSemicolon()
m.write(elseBytes)
m.writeSpaceBeforeIdent()
m.minifyStmt(stmt.Else)
}
case *js.BlockStmt:
m.renamer.renameScope(stmt.Scope)
m.minifyBlockStmt(stmt)
case *js.ReturnStmt:
m.write(returnBytes)
m.writeSpaceBeforeIdent()
m.minifyExpr(stmt.Value, js.OpExpr)
m.requireSemicolon()
case *js.LabelledStmt:
m.write(stmt.Label)
m.write(colonBytes)
m.minifyStmtOrBlock(stmt.Value, defaultBlock)
case *js.BranchStmt:
m.write(stmt.Type.Bytes())
if stmt.Label != nil {
m.write(spaceBytes)
m.write(stmt.Label)
}
m.requireSemicolon()
case *js.WithStmt:
m.write(withOpenBytes)
m.minifyExpr(stmt.Cond, js.OpExpr)
m.write(closeParenBytes)
m.minifyStmtOrBlock(stmt.Body, defaultBlock)
case *js.DoWhileStmt:
m.write(doBytes)
m.writeSpaceBeforeIdent()
m.minifyStmtOrBlock(stmt.Body, iterationBlock)
m.writeSemicolon()
m.write(whileOpenBytes)
m.minifyExpr(stmt.Cond, js.OpExpr)
m.write(closeParenBytes)
case *js.WhileStmt:
m.write(whileOpenBytes)
m.minifyExpr(stmt.Cond, js.OpExpr)
m.write(closeParenBytes)
m.minifyStmtOrBlock(stmt.Body, iterationBlock)
case *js.ForStmt:
stmt.Body.List = optimizeStmtList(stmt.Body.List, iterationBlock)
m.renamer.renameScope(stmt.Body.Scope)
m.write(forOpenBytes)
m.inFor = true
if decl, ok := stmt.Init.(*js.VarDecl); ok {
m.minifyVarDecl(decl, true)
} else {
m.minifyExpr(stmt.Init, js.OpLHS)
}
m.inFor = false
m.write(semicolonBytes)
m.minifyExpr(stmt.Cond, js.OpExpr)
m.write(semicolonBytes)
m.minifyExpr(stmt.Post, js.OpExpr)
m.write(closeParenBytes)
m.minifyBlockAsStmt(stmt.Body)
case *js.ForInStmt:
stmt.Body.List = optimizeStmtList(stmt.Body.List, iterationBlock)
m.renamer.renameScope(stmt.Body.Scope)
m.write(forOpenBytes)
m.inFor = true
if decl, ok := stmt.Init.(*js.VarDecl); ok {
m.minifyVarDecl(decl, false)
} else {
m.minifyExpr(stmt.Init, js.OpLHS)
}
m.inFor = false
m.writeSpaceAfterIdent()
m.write(inBytes)
m.writeSpaceBeforeIdent()
m.minifyExpr(stmt.Value, js.OpExpr)
m.write(closeParenBytes)
m.minifyBlockAsStmt(stmt.Body)
case *js.ForOfStmt:
stmt.Body.List = optimizeStmtList(stmt.Body.List, iterationBlock)
m.renamer.renameScope(stmt.Body.Scope)
if stmt.Await {
m.write(forAwaitOpenBytes)
} else {
m.write(forOpenBytes)
}
m.inFor = true
if decl, ok := stmt.Init.(*js.VarDecl); ok {
m.minifyVarDecl(decl, false)
} else {
m.minifyExpr(stmt.Init, js.OpLHS)
}
m.inFor = false
m.writeSpaceAfterIdent()
m.write(ofBytes)
m.writeSpaceBeforeIdent()
m.minifyExpr(stmt.Value, js.OpAssign)
m.write(closeParenBytes)
m.minifyBlockAsStmt(stmt.Body)
case *js.SwitchStmt:
m.write(switchOpenBytes)
m.minifyExpr(stmt.Init, js.OpExpr)
m.write(closeParenOpenBracketBytes)
m.needsSemicolon = false
for i, _ := range stmt.List {
stmt.List[i].List = optimizeStmtList(stmt.List[i].List, defaultBlock)
}
m.renamer.renameScope(stmt.Scope)
for _, clause := range stmt.List {
m.writeSemicolon()
m.write(clause.TokenType.Bytes())
if clause.Cond != nil {
m.writeSpaceBeforeIdent()
m.minifyExpr(clause.Cond, js.OpExpr)
}
m.write(colonBytes)
for _, item := range clause.List {
m.writeSemicolon()
m.minifyStmt(item)
}
}
m.write(closeBraceBytes)
m.needsSemicolon = false
case *js.ThrowStmt:
m.write(throwBytes)
m.writeSpaceBeforeIdent()
m.minifyExpr(stmt.Value, js.OpExpr)
m.requireSemicolon()
case *js.TryStmt:
m.write(tryBytes)
stmt.Body.List = optimizeStmtList(stmt.Body.List, defaultBlock)
m.renamer.renameScope(stmt.Body.Scope)
m.minifyBlockStmt(stmt.Body)
if stmt.Catch != nil {
m.write(catchBytes)
stmt.Catch.List = optimizeStmtList(stmt.Catch.List, defaultBlock)
if v, ok := stmt.Binding.(*js.Var); ok && v.Uses == 1 && m.o.minVersion(2019) {
stmt.Catch.Scope.Declared = stmt.Catch.Scope.Declared[1:]
stmt.Binding = nil
}
m.renamer.renameScope(stmt.Catch.Scope)
if stmt.Binding != nil {
m.write(openParenBytes)
m.minifyBinding(stmt.Binding)
m.write(closeParenBytes)
}
m.minifyBlockStmt(stmt.Catch)
}
if stmt.Finally != nil {
m.write(finallyBytes)
stmt.Finally.List = optimizeStmtList(stmt.Finally.List, defaultBlock)
m.renamer.renameScope(stmt.Finally.Scope)
m.minifyBlockStmt(stmt.Finally)
}
case *js.FuncDecl:
m.minifyFuncDecl(stmt, false)
case *js.ClassDecl:
m.minifyClassDecl(stmt)
case *js.DebuggerStmt:
m.write(debuggerBytes)
m.requireSemicolon()
case *js.EmptyStmt:
case *js.ImportStmt:
if stmt.Default != nil || stmt.List == nil || 0 < len(stmt.List) {
m.write(importBytes)
if stmt.Default != nil {
m.write(spaceBytes)
m.write(stmt.Default)
if stmt.List != nil {
m.write(commaBytes)
} else if stmt.Default != nil {
m.write(spaceBytes)
}
}
if len(stmt.List) == 1 && len(stmt.List[0].Name) == 1 && stmt.List[0].Name[0] == '*' {
m.writeSpaceBeforeIdent()
m.minifyAlias(stmt.List[0])
if stmt.Default != nil || len(stmt.List) != 0 {
m.write(spaceBytes)
}
} else if stmt.List != nil {
m.write(openBraceBytes)
for i, item := range stmt.List {
if i != 0 {
m.write(commaBytes)
}
m.minifyAlias(item)
}
m.write(closeBraceBytes)
}
if stmt.Default != nil || stmt.List != nil {
m.write(fromBytes)
}
m.write(minifyString(stmt.Module, false))
m.requireSemicolon()
}
case *js.ExportStmt:
m.write(exportBytes)
if stmt.Decl != nil {
if stmt.Default {
m.write(spaceDefaultBytes)
m.writeSpaceBeforeIdent()
m.minifyExpr(stmt.Decl, js.OpAssign)
_, isHoistable := stmt.Decl.(*js.FuncDecl)
_, isClass := stmt.Decl.(*js.ClassDecl)
if !isHoistable && !isClass {
m.requireSemicolon()
}
} else {
m.writeSpaceBeforeIdent()
m.minifyStmt(stmt.Decl.(js.IStmt)) // can only be variable, function, or class decl
}
} else {
if len(stmt.List) == 1 && (len(stmt.List[0].Name) == 1 && stmt.List[0].Name[0] == '*' || stmt.List[0].Name == nil && len(stmt.List[0].Binding) == 1 && stmt.List[0].Binding[0] == '*') {
m.writeSpaceBeforeIdent()
m.minifyAlias(stmt.List[0])
if stmt.Module != nil && stmt.List[0].Name != nil {
m.write(spaceBytes)
}
} else if 0 < len(stmt.List) {
m.write(openBraceBytes)
for i, item := range stmt.List {
if i != 0 {
m.write(commaBytes)
}
m.minifyAlias(item)
}
m.write(closeBraceBytes)
}
if stmt.Module != nil {
m.write(fromBytes)
m.write(minifyString(stmt.Module, false))
}
m.requireSemicolon()
}
case *js.DirectivePrologueStmt:
stmt.Value[0] = '"'
stmt.Value[len(stmt.Value)-1] = '"'
m.write(stmt.Value)
m.requireSemicolon()
case *js.Comment:
// bang comment
m.write(stmt.Value)
if stmt.Value[1] == '/' {
m.write(newlineBytes)
}
}
}
func (m *jsMinifier) minifyBlockStmt(stmt *js.BlockStmt) {
m.write(openBraceBytes)
m.needsSemicolon = false
for _, item := range stmt.List {
m.writeSemicolon()
m.minifyStmt(item)
}
m.write(closeBraceBytes)
m.needsSemicolon = false
}
func (m *jsMinifier) minifyBlockAsStmt(blockStmt *js.BlockStmt) {
// minify block when statement is expected, i.e. semicolon if empty or remove braces for single statement
// assume we already renamed the scope
hasLexicalVars := false
for _, v := range blockStmt.Scope.Declared[blockStmt.Scope.NumForDecls:] {
if v.Decl == js.LexicalDecl {
hasLexicalVars = true
break
}
}
if 1 < len(blockStmt.List) || hasLexicalVars {
m.minifyBlockStmt(blockStmt)
} else if len(blockStmt.List) == 1 {
m.minifyStmt(blockStmt.List[0])
} else {
m.write(semicolonBytes)
m.needsSemicolon = false
}
}
func (m *jsMinifier) minifyStmtOrBlock(i js.IStmt, blockType blockType) {
// minify stmt or a block
if blockStmt, ok := i.(*js.BlockStmt); ok {
blockStmt.List = optimizeStmtList(blockStmt.List, blockType)
m.renamer.renameScope(blockStmt.Scope)
m.minifyBlockAsStmt(blockStmt)
} else {
// optimizeStmtList can in some cases expand one stmt to two shorter stmts
list := optimizeStmtList([]js.IStmt{i}, blockType)
if len(list) == 1 {
m.minifyStmt(list[0])
} else if len(list) == 0 {
m.write(semicolonBytes)
m.needsSemicolon = false
} else {
m.minifyBlockStmt(&js.BlockStmt{List: list, Scope: js.Scope{}})
}
}
}
func (m *jsMinifier) minifyAlias(alias js.Alias) {
if alias.Name != nil {
if alias.Name[0] == '"' || alias.Name[0] == '\'' {
m.write(minifyString(alias.Name, false))
} else {
m.write(alias.Name)
}
if !bytes.Equal(alias.Name, starBytes) {
m.write(spaceBytes)
}
m.write(asSpaceBytes)
}
if alias.Binding != nil {
if alias.Binding[0] == '"' || alias.Binding[0] == '\'' {
m.write(minifyString(alias.Binding, false))
} else {
m.write(alias.Binding)
}
}
}
func (m *jsMinifier) minifyParams(params js.Params, removeUnused bool) {
// remove unused parameters from the end
j := len(params.List)
if removeUnused && params.Rest == nil {
for ; 0 < j; j-- {
if v, ok := params.List[j-1].Binding.(*js.Var); !ok || ok && 1 < v.Uses {
break
}
}
}
m.write(openParenBytes)
for i, item := range params.List[:j] {
if i != 0 {
m.write(commaBytes)
}
m.minifyBindingElement(item)
}
if params.Rest != nil {
if len(params.List) != 0 {
m.write(commaBytes)
}
m.write(ellipsisBytes)
m.minifyBinding(params.Rest)
}
m.write(closeParenBytes)
}
func (m *jsMinifier) minifyArguments(args js.Args) {
m.write(openParenBytes)
for i, item := range args.List {
if i != 0 {
m.write(commaBytes)
}
if item.Rest {
m.write(ellipsisBytes)
}
m.minifyExpr(item.Value, js.OpAssign)
}
m.write(closeParenBytes)
}
func (m *jsMinifier) minifyVarDecl(decl *js.VarDecl, onlyDefines bool) {
if len(decl.List) == 0 {
return
} else if decl.TokenType == js.ErrorToken {
// remove 'var' when hoisting variables
first := true
for _, item := range decl.List {
if item.Default != nil || !onlyDefines {
if !first {
m.write(commaBytes)
}
m.minifyBindingElement(item)
first = false
}
}
} else {
if decl.TokenType == js.VarToken {
sort.SliceStable(decl.List, func(i, j int) bool {
if decl.List[i].Default == nil && decl.List[j].Default == nil {
// sort single-length variables names
// TODO: why not all names? Why need identOrder?
if a, ok := decl.List[i].Binding.(*js.Var); ok && len(a.Data) == 1 {
if b, ok := decl.List[j].Binding.(*js.Var); ok && len(b.Data) == 1 {
return m.renamer.identOrder[a.Data[0]] < m.renamer.identOrder[b.Data[0]]
}
}
} else if decl.List[i].Default == nil {
if _, ok := decl.List[j].Binding.(*js.Var); j != 0 || ok {
// move non-define declarations to the front, except for the first array/object
return true
}
}
return false
})
}
m.write(decl.TokenType.Bytes())
m.writeSpaceBeforeIdent()
for i, item := range decl.List {
if i != 0 {
m.write(commaBytes)
}
m.minifyBindingElement(item)
}
}
}
func (m *jsMinifier) minifyFuncDecl(decl *js.FuncDecl, inExpr bool) {
parentRename := m.renamer.rename
m.renamer.rename = !decl.Body.Scope.HasWith && !m.o.KeepVarNames
m.hoistVars(&decl.Body)
decl.Body.List = optimizeStmtList(decl.Body.List, functionBlock)
if decl.Async {
m.write(asyncSpaceBytes)
}
m.write(functionBytes)
if decl.Generator {
m.write(starBytes)
}
// TODO: remove function name, really necessary?
//if decl.Name != nil && decl.Name.Uses == 1 {
// scope := decl.Body.Scope
// for i, vorig := range scope.Declared {
// if decl.Name == vorig {
// scope.Declared = append(scope.Declared[:i], scope.Declared[i+1:]...)
// }
// }
//}
if inExpr {
m.renamer.renameScope(decl.Body.Scope)
}
if decl.Name != nil && (!inExpr || 1 < decl.Name.Uses) {
if !decl.Generator {
m.write(spaceBytes)
}
m.write(decl.Name.Data)
}
if !inExpr {
m.renamer.renameScope(decl.Body.Scope)
}
m.minifyParams(decl.Params, true)
m.minifyBlockStmt(&decl.Body)
m.renamer.rename = parentRename
}
func (m *jsMinifier) minifyMethodDecl(decl *js.MethodDecl) {
parentRename := m.renamer.rename
m.renamer.rename = !decl.Body.Scope.HasWith && !m.o.KeepVarNames
m.hoistVars(&decl.Body)
decl.Body.List = optimizeStmtList(decl.Body.List, functionBlock)
if decl.Static {
m.write(staticBytes)
m.writeSpaceBeforeIdent()
}
if decl.Async {
m.write(asyncBytes)
if decl.Generator {
m.write(starBytes)
} else {
m.writeSpaceBeforeIdent()
}
} else if decl.Generator {
m.write(starBytes)
} else if decl.Get {
m.write(getBytes)
m.writeSpaceBeforeIdent()
} else if decl.Set {
m.write(setBytes)
m.writeSpaceBeforeIdent()
}
m.minifyPropertyName(decl.Name)
m.renamer.renameScope(decl.Body.Scope)
m.minifyParams(decl.Params, !decl.Set)
m.minifyBlockStmt(&decl.Body)
m.renamer.rename = parentRename
}
func (m *jsMinifier) minifyArrowFunc(decl *js.ArrowFunc) {
parentRename := m.renamer.rename
m.renamer.rename = !decl.Body.Scope.HasWith && !m.o.KeepVarNames
m.hoistVars(&decl.Body)
decl.Body.List = optimizeStmtList(decl.Body.List, functionBlock)
m.renamer.renameScope(decl.Body.Scope)
if decl.Async {
m.write(asyncBytes)
}
removeParens := false
if decl.Params.Rest == nil && len(decl.Params.List) == 1 && decl.Params.List[0].Default == nil {
if decl.Params.List[0].Binding == nil {
removeParens = true
} else if _, ok := decl.Params.List[0].Binding.(*js.Var); ok {
removeParens = true
}
}
if removeParens {
if decl.Async && decl.Params.List[0].Binding != nil {
// add space after async in: async a => ...
m.write(spaceBytes)
}
m.minifyBindingElement(decl.Params.List[0])
} else {
parentInFor := m.inFor
m.inFor = false
m.minifyParams(decl.Params, true)
m.inFor = parentInFor
}
m.write(arrowBytes)
removeBraces := false
if 0 < len(decl.Body.List) {
returnStmt, isReturn := decl.Body.List[len(decl.Body.List)-1].(*js.ReturnStmt)
if isReturn && returnStmt.Value != nil {
// merge expression statements to final return statement, remove function body braces
var list []js.IExpr
removeBraces = true
for _, item := range decl.Body.List[:len(decl.Body.List)-1] {
if expr, isExpr := item.(*js.ExprStmt); isExpr {
list = append(list, expr.Value)
} else {
removeBraces = false
break
}
}
if removeBraces {
list = append(list, returnStmt.Value)
expr := list[0]
if 0 < len(list) {
if 1 < len(list) {
expr = &js.CommaExpr{list}
}
expr = &js.GroupExpr{X: expr}
}
m.expectExpr = expectExprBody
m.minifyExpr(expr, js.OpAssign)
if m.groupedStmt {
m.write(closeParenBytes)
m.groupedStmt = false
}
}
} else if isReturn && returnStmt.Value == nil {
// remove empty return
decl.Body.List = decl.Body.List[:len(decl.Body.List)-1]
}
}
if !removeBraces {
m.minifyBlockStmt(&decl.Body)
}
m.renamer.rename = parentRename
}
func (m *jsMinifier) minifyClassDecl(decl *js.ClassDecl) {
m.write(classBytes)
if decl.Name != nil {
m.write(spaceBytes)
m.write(decl.Name.Data)
}
if decl.Extends != nil {
m.write(spaceExtendsBytes)
m.writeSpaceBeforeIdent()
m.minifyExpr(decl.Extends, js.OpLHS)
}
m.write(openBraceBytes)
m.needsSemicolon = false
for _, item := range decl.List {
m.writeSemicolon()
if item.StaticBlock != nil {
m.write(staticBytes)
m.minifyBlockStmt(item.StaticBlock)
} else if item.Method != nil {
m.minifyMethodDecl(item.Method)
} else {
if item.Static {
m.write(staticBytes)
if !item.Name.IsComputed() && item.Name.Literal.TokenType == js.IdentifierToken {
m.write(spaceBytes)
}
}
m.minifyPropertyName(item.Name)
if item.Init != nil {
m.write(equalBytes)
m.minifyExpr(item.Init, js.OpAssign)
}
m.requireSemicolon()
}
}
m.write(closeBraceBytes)
m.needsSemicolon = false
}
func (m *jsMinifier) minifyPropertyName(name js.PropertyName) {
if name.IsComputed() {
m.write(openBracketBytes)
m.minifyExpr(name.Computed, js.OpAssign)
m.write(closeBracketBytes)
} else if name.Literal.TokenType == js.StringToken {
m.write(minifyString(name.Literal.Data, false))
} else {
m.write(name.Literal.Data)
}
}
func (m *jsMinifier) minifyProperty(property js.Property) {
// property.Name is always set in ObjectLiteral
if property.Spread {
m.write(ellipsisBytes)
} else if v, ok := property.Value.(*js.Var); property.Name != nil && (!ok || !property.Name.IsIdent(v.Name())) {
// add 'old-name:' before BindingName as the latter will be renamed
m.minifyPropertyName(*property.Name)
m.write(colonBytes)
}
m.minifyExpr(property.Value, js.OpAssign)
if property.Init != nil {
m.write(equalBytes)
m.minifyExpr(property.Init, js.OpAssign)
}
}
func (m *jsMinifier) minifyBindingElement(element js.BindingElement) {
if element.Binding != nil {
parentInFor := m.inFor
m.inFor = false
m.minifyBinding(element.Binding)
m.inFor = parentInFor
if element.Default != nil {
m.write(equalBytes)
m.minifyExpr(element.Default, js.OpAssign)
}
}
}
func (m *jsMinifier) minifyBinding(ibinding js.IBinding) {
switch binding := ibinding.(type) {
case *js.Var:
m.write(binding.Data)
case *js.BindingArray:
m.write(openBracketBytes)
for i, item := range binding.List {
if i != 0 {
m.write(commaBytes)
}
m.minifyBindingElement(item)
}
if binding.Rest != nil {
if 0 < len(binding.List) {
m.write(commaBytes)
}
m.write(ellipsisBytes)
m.minifyBinding(binding.Rest)
}
m.write(closeBracketBytes)
case *js.BindingObject:
m.write(openBraceBytes)
for i, item := range binding.List {
if i != 0 {
m.write(commaBytes)
}
// item.Key is always set
if item.Key.IsComputed() {
m.minifyPropertyName(*item.Key)
m.write(colonBytes)
} else if v, ok := item.Value.Binding.(*js.Var); !ok || !item.Key.IsIdent(v.Data) {
// add 'old-name:' before BindingName as the latter will be renamed
m.minifyPropertyName(*item.Key)
m.write(colonBytes)
}
m.minifyBindingElement(item.Value)
}
if binding.Rest != nil {
if 0 < len(binding.List) {
m.write(commaBytes)
}
m.write(ellipsisBytes)
m.write(binding.Rest.Data)
}
m.write(closeBraceBytes)
}
}
func (m *jsMinifier) minifyExpr(i js.IExpr, prec js.OpPrec) {
if cond, ok := i.(*js.CondExpr); ok {
i = m.optimizeCondExpr(cond, prec)
} else if unary, ok := i.(*js.UnaryExpr); ok {
i = optimizeUnaryExpr(unary, prec)
}
switch expr := i.(type) {
case *js.Var:
for expr.Link != nil {
expr = expr.Link
}
data := expr.Data
if bytes.Equal(data, undefinedBytes) { // TODO: only if not defined
if js.OpUnary < prec {
m.write(groupedVoidZeroBytes)
} else {
m.write(voidZeroBytes)
}
} else if bytes.Equal(data, infinityBytes) { // TODO: only if not defined
if js.OpMul < prec {
m.write(groupedOneDivZeroBytes)
} else {
m.write(oneDivZeroBytes)
}
} else {
m.write(data)
}
case *js.LiteralExpr:
if expr.TokenType == js.DecimalToken || expr.TokenType == js.IntegerToken {
m.write(decimalNumber(expr.Data, m.o.Precision))
} else if expr.TokenType == js.BinaryToken {
m.write(binaryNumber(expr.Data, m.o.Precision))
} else if expr.TokenType == js.OctalToken {
m.write(octalNumber(expr.Data, m.o.Precision))
} else if expr.TokenType == js.HexadecimalToken {
m.write(hexadecimalNumber(expr.Data, m.o.Precision))
} else if expr.TokenType == js.TrueToken {
if js.OpUnary < prec {
m.write(groupedNotZeroBytes)
} else {
m.write(notZeroBytes)
}
} else if expr.TokenType == js.FalseToken {
if js.OpUnary < prec {
m.write(groupedNotOneBytes)
} else {
m.write(notOneBytes)
}
} else if expr.TokenType == js.StringToken {
m.write(minifyString(expr.Data, m.o.minVersion(2015)))
} else if expr.TokenType == js.RegExpToken {
// </script>/ => < /script>/
if 0 < len(m.prev) && m.prev[len(m.prev)-1] == '<' && bytes.HasPrefix(expr.Data, regExpScriptBytes) {
m.write(spaceBytes)
}
m.write(minifyRegExp(expr.Data))
} else {
m.write(expr.Data)
}
case *js.BinaryExpr:
mergeBinaryExpr(expr)
if expr.X == nil {
m.minifyExpr(expr.Y, prec)
break
}
precLeft := binaryLeftPrecMap[expr.Op]
// convert (a,b)&&c into a,b&&c but not a=(b,c)&&d into a=(b,c&&d)
if prec <= js.OpExpr {
if group, ok := expr.X.(*js.GroupExpr); ok {
if comma, ok := group.X.(*js.CommaExpr); ok && js.OpAnd <= exprPrec(comma.List[len(comma.List)-1]) {
expr.X = group.X
precLeft = js.OpExpr
}
}
}
if expr.Op == js.InstanceofToken || expr.Op == js.InToken {
group := expr.Op == js.InToken && m.inFor
if group {
m.write(openParenBytes)
}
m.minifyExpr(expr.X, precLeft)
m.writeSpaceAfterIdent()
m.write(expr.Op.Bytes())
m.writeSpaceBeforeIdent()
m.minifyExpr(expr.Y, binaryRightPrecMap[expr.Op])
if group {
m.write(closeParenBytes)
}
} else {
// TODO: has effect on GZIP?
//if expr.Op == js.EqEqToken || expr.Op == js.NotEqToken || expr.Op == js.EqEqEqToken || expr.Op == js.NotEqEqToken {
// // switch a==const for const==a, such as typeof a=="undefined" for "undefined"==typeof a (GZIP improvement)
// if _, ok := expr.Y.(*js.LiteralExpr); ok {
// expr.X, expr.Y = expr.Y, expr.X
// }
//}
if v, not, ok := isUndefinedOrNullVar(expr); ok {
// change a===null||a===undefined to a==null
op := js.EqEqToken
if not {
op = js.NotEqToken
}
expr = &js.BinaryExpr{op, v, &js.LiteralExpr{js.NullToken, nullBytes}}
}
m.minifyExpr(expr.X, precLeft)
if expr.Op == js.GtToken && m.prev[len(m.prev)-1] == '-' {
// 0 < len(m.prev) always
m.write(spaceBytes)
} else if expr.Op == js.EqEqEqToken || expr.Op == js.NotEqEqToken {
if left, ok := expr.X.(*js.UnaryExpr); ok && left.Op == js.TypeofToken {
if right, ok := expr.Y.(*js.LiteralExpr); ok && right.TokenType == js.StringToken {
if expr.Op == js.EqEqEqToken {
expr.Op = js.EqEqToken
} else {
expr.Op = js.NotEqToken
}
}
} else if right, ok := expr.Y.(*js.UnaryExpr); ok && right.Op == js.TypeofToken {
if left, ok := expr.X.(*js.LiteralExpr); ok && left.TokenType == js.StringToken {
if expr.Op == js.EqEqEqToken {
expr.Op = js.EqEqToken
} else {
expr.Op = js.NotEqToken
}
}
}
}
m.write(expr.Op.Bytes())
if expr.Op == js.AddToken {
// +++ => + ++
m.writeSpaceBefore('+')
} else if expr.Op == js.SubToken {
// --- => - --
m.writeSpaceBefore('-')
} else if expr.Op == js.DivToken {
// // => / /
m.writeSpaceBefore('/')
}
m.minifyExpr(expr.Y, binaryRightPrecMap[expr.Op])
}
case *js.UnaryExpr:
if expr.Op == js.PostIncrToken || expr.Op == js.PostDecrToken {
m.minifyExpr(expr.X, unaryPrecMap[expr.Op])
m.write(expr.Op.Bytes())
} else {
isLtNot := expr.Op == js.NotToken && 0 < len(m.prev) && m.prev[len(m.prev)-1] == '<'
m.write(expr.Op.Bytes())
if expr.Op == js.DeleteToken || expr.Op == js.VoidToken || expr.Op == js.TypeofToken || expr.Op == js.AwaitToken {
m.writeSpaceBeforeIdent()
} else if expr.Op == js.PosToken {
// +++ => + ++
m.writeSpaceBefore('+')
} else if expr.Op == js.NegToken || isLtNot {
// --- => - --
// <!-- => <! --
m.writeSpaceBefore('-')
} else if expr.Op == js.NotToken {
if lit, ok := expr.X.(*js.LiteralExpr); ok && lit.TokenType == js.StringToken {
if len(lit.Data) == 2 {
// !"" => !0
m.write(zeroBytes)
} else {
// !"string" => !1
m.write(oneBytes)
}
break
} else if ok && lit.TokenType == js.RegExpToken {
// !/regexp/ => !1
m.write(oneBytes)
break
} else if ok && (lit.TokenType == js.DecimalToken || lit.TokenType == js.IntegerToken) {
// !123 => !1 (except for !0)
if lit.Data[len(lit.Data)-1] == 'n' {
lit.Data = lit.Data[:len(lit.Data)-1]
}
if num := minify.Number(lit.Data, m.o.Precision); len(num) == 1 && num[0] == '0' {
m.write(zeroBytes)
} else {
m.write(oneBytes)
}
break
}
}
m.minifyExpr(expr.X, unaryPrecMap[expr.Op])
}
case *js.DotExpr:
optionalLeft := false
if group, ok := expr.X.(*js.GroupExpr); ok {
if lit, ok := group.X.(*js.LiteralExpr); ok && (lit.TokenType == js.DecimalToken || lit.TokenType == js.IntegerToken) {
if lit.TokenType == js.DecimalToken {
m.write(minify.Number(lit.Data, m.o.Precision))
} else {
m.write(lit.Data)
m.write(dotBytes)
}
m.write(dotBytes)
m.write(expr.Y.Data)
break
} else if dot, ok := group.X.(*js.DotExpr); ok {
optionalLeft = dot.Optional
} else if call, ok := group.X.(*js.CallExpr); ok {
optionalLeft = call.Optional
}
}
if js.OpMember <= prec || optionalLeft {
m.minifyExpr(expr.X, js.OpMember)
} else {
m.minifyExpr(expr.X, js.OpCall)
}
if expr.Optional {
m.write(questionBytes)
} else if last := m.prev[len(m.prev)-1]; '0' <= last && last <= '9' {
// 0 < len(m.prev) always
isInteger := true
for _, c := range m.prev[:len(m.prev)-1] {
if c < '0' || '9' < c {
isInteger = false
break
}
}
if isInteger {
// prevent previous integer
m.write(dotBytes)
}
}
m.write(dotBytes)
m.write(expr.Y.Data)
case *js.GroupExpr:
if cond, ok := expr.X.(*js.CondExpr); ok {
expr.X = m.optimizeCondExpr(cond, js.OpExpr)
}
precInside := exprPrec(expr.X)
if prec <= precInside || precInside == js.OpCoalesce && prec == js.OpBitOr {
m.minifyExpr(expr.X, prec)
} else {
parentInFor := m.inFor
m.inFor = false
m.write(openParenBytes)
m.minifyExpr(expr.X, js.OpExpr)
m.write(closeParenBytes)
m.inFor = parentInFor
}
case *js.ArrayExpr:
parentInFor := m.inFor
m.inFor = false
m.write(openBracketBytes)
for i, item := range expr.List {
if i != 0 {
m.write(commaBytes)
}
if item.Spread {
m.write(ellipsisBytes)
}
m.minifyExpr(item.Value, js.OpAssign)
}
if 0 < len(expr.List) && expr.List[len(expr.List)-1].Value == nil {
m.write(commaBytes)
}
m.write(closeBracketBytes)
m.inFor = parentInFor
case *js.ObjectExpr:
parentInFor := m.inFor
m.inFor = false
groupedStmt := m.expectExpr != expectAny
if groupedStmt {
m.write(openParenBracketBytes)
} else {
m.write(openBraceBytes)
}
for i, item := range expr.List {
if i != 0 {
m.write(commaBytes)
}
m.minifyProperty(item)
}
m.write(closeBraceBytes)
if groupedStmt {
m.groupedStmt = true
}
m.inFor = parentInFor
case *js.TemplateExpr:
if expr.Tag != nil {
if prec < js.OpMember {
m.minifyExpr(expr.Tag, js.OpCall)
} else {
m.minifyExpr(expr.Tag, js.OpMember)
}
if expr.Optional {
m.write(optChainBytes)
}
}
parentInFor := m.inFor
m.inFor = false
for _, item := range expr.List {
if expr.Tag == nil {
m.write(replaceEscapes(item.Value, '`', 1, 2))
} else {
m.write(item.Value)
}
m.minifyExpr(item.Expr, js.OpExpr)
}
if expr.Tag == nil {
m.write(replaceEscapes(expr.Tail, '`', 1, 1))
} else {
m.write(expr.Tail)
}
m.inFor = parentInFor
case *js.NewExpr:
if expr.Args == nil && js.OpLHS < prec && prec != js.OpNew {
m.write(openNewBytes)
m.writeSpaceBeforeIdent()
m.minifyExpr(expr.X, js.OpNew)
m.write(closeParenBytes)
} else {
m.write(newBytes)
m.writeSpaceBeforeIdent()
if expr.Args != nil {
m.minifyExpr(expr.X, js.OpMember)
m.minifyArguments(*expr.Args)
} else {
m.minifyExpr(expr.X, js.OpNew)
}
}
case *js.NewTargetExpr:
m.write(newTargetBytes)
m.writeSpaceBeforeIdent()
case *js.ImportMetaExpr:
if m.expectExpr == expectExprStmt {
m.write(openParenBytes)
m.groupedStmt = true
}
m.write(importMetaBytes)
m.writeSpaceBeforeIdent()
case *js.YieldExpr:
m.write(yieldBytes)
m.writeSpaceBeforeIdent()
if expr.X != nil {
if expr.Generator {
m.write(starBytes)
m.minifyExpr(expr.X, js.OpAssign)
} else if v, ok := expr.X.(*js.Var); !ok || !bytes.Equal(v.Name(), undefinedBytes) { // TODO: only if not defined
m.minifyExpr(expr.X, js.OpAssign)
}
}
case *js.CallExpr:
m.minifyExpr(expr.X, js.OpCall)
parentInFor := m.inFor
m.inFor = false
if expr.Optional {
m.write(optChainBytes)
}
m.minifyArguments(expr.Args)
m.inFor = parentInFor
case *js.IndexExpr:
if m.expectExpr == expectExprStmt {
if v, ok := expr.X.(*js.Var); ok && bytes.Equal(v.Name(), letBytes) {
m.write(notBytes)
}
}
if prec < js.OpMember {
m.minifyExpr(expr.X, js.OpCall)
} else {
m.minifyExpr(expr.X, js.OpMember)
}
if expr.Optional {
m.write(optChainBytes)
}
if lit, ok := expr.Y.(*js.LiteralExpr); ok && lit.TokenType == js.StringToken && 2 < len(lit.Data) {
if isIdent := js.AsIdentifierName(lit.Data[1 : len(lit.Data)-1]); isIdent {
m.write(dotBytes)
m.write(lit.Data[1 : len(lit.Data)-1])
break
} else if isNum := js.AsDecimalLiteral(lit.Data[1 : len(lit.Data)-1]); isNum {
m.write(openBracketBytes)
m.write(minify.Number(lit.Data[1:len(lit.Data)-1], 0))
m.write(closeBracketBytes)
break
}
}
parentInFor := m.inFor
m.inFor = false
m.write(openBracketBytes)
m.minifyExpr(expr.Y, js.OpExpr)
m.write(closeBracketBytes)
m.inFor = parentInFor
case *js.CondExpr:
m.minifyExpr(expr.Cond, js.OpCoalesce)
m.write(questionBytes)
m.minifyExpr(expr.X, js.OpAssign)
m.write(colonBytes)
m.minifyExpr(expr.Y, js.OpAssign)
case *js.VarDecl:
m.minifyVarDecl(expr, true) // happens in for statement or when vars were hoisted
case *js.FuncDecl:
grouped := m.expectExpr == expectExprStmt && prec != js.OpExpr
if grouped {
m.write(openParenBytes)
} else if m.expectExpr == expectExprStmt {
m.write(notBytes)
}
parentInFor, parentGroupedStmt := m.inFor, m.groupedStmt
m.inFor, m.groupedStmt = false, false
m.minifyFuncDecl(expr, true)
m.inFor, m.groupedStmt = parentInFor, parentGroupedStmt
if grouped {
m.write(closeParenBytes)
}
case *js.ArrowFunc:
parentGroupedStmt := m.groupedStmt
m.groupedStmt = false
m.minifyArrowFunc(expr)
m.groupedStmt = parentGroupedStmt
case *js.MethodDecl:
parentGroupedStmt := m.groupedStmt
m.groupedStmt = false
m.minifyMethodDecl(expr) // only happens in object literal
m.groupedStmt = parentGroupedStmt
case *js.ClassDecl:
if m.expectExpr == expectExprStmt {
m.write(notBytes)
}
parentInFor, parentGroupedStmt := m.inFor, m.groupedStmt
m.inFor, m.groupedStmt = false, false
m.minifyClassDecl(expr)
m.inFor, m.groupedStmt = parentInFor, parentGroupedStmt
case *js.CommaExpr:
for i, item := range expr.List {
if i != 0 {
m.write(commaBytes)
}
m.minifyExpr(item, js.OpAssign)
}
}
}
package js
import (
"github.com/tdewolff/parse/v2/js"
)
func optimizeStmt(i js.IStmt) js.IStmt {
// convert if/else into expression statement, and optimize blocks
if ifStmt, ok := i.(*js.IfStmt); ok {
if ifStmt.Body != nil {
ifStmt.Body = optimizeStmt(ifStmt.Body)
}
if ifStmt.Else != nil {
ifStmt.Else = optimizeStmt(ifStmt.Else)
}
hasIf := !isEmptyStmt(ifStmt.Body)
hasElse := !isEmptyStmt(ifStmt.Else)
if unaryExpr, ok := ifStmt.Cond.(*js.UnaryExpr); ok && unaryExpr.Op == js.NotToken && hasElse {
ifStmt.Cond = unaryExpr.X
ifStmt.Body, ifStmt.Else = ifStmt.Else, ifStmt.Body
hasIf, hasElse = hasElse, hasIf
}
if !hasIf && !hasElse {
if hasSideEffects(ifStmt.Cond) {
return &js.ExprStmt{Value: ifStmt.Cond}
}
return &js.EmptyStmt{}
} else if hasIf && !hasElse {
if X, isExprBody := ifStmt.Body.(*js.ExprStmt); isExprBody {
if unaryExpr, ok := ifStmt.Cond.(*js.UnaryExpr); ok && unaryExpr.Op == js.NotToken {
left := groupExpr(unaryExpr.X, binaryLeftPrecMap[js.OrToken])
right := groupExpr(X.Value, binaryRightPrecMap[js.OrToken])
return &js.ExprStmt{&js.BinaryExpr{js.OrToken, left, right}}
}
left := groupExpr(ifStmt.Cond, binaryLeftPrecMap[js.AndToken])
right := groupExpr(X.Value, binaryRightPrecMap[js.AndToken])
return &js.ExprStmt{&js.BinaryExpr{js.AndToken, left, right}}
} else if X, isIfStmt := ifStmt.Body.(*js.IfStmt); isIfStmt && isEmptyStmt(X.Else) {
left := groupExpr(ifStmt.Cond, binaryLeftPrecMap[js.AndToken])
right := groupExpr(X.Cond, binaryRightPrecMap[js.AndToken])
ifStmt.Cond = &js.BinaryExpr{js.AndToken, left, right}
ifStmt.Body = X.Body
return ifStmt
}
} else if !hasIf && hasElse {
if X, isExprElse := ifStmt.Else.(*js.ExprStmt); isExprElse {
left := groupExpr(ifStmt.Cond, binaryLeftPrecMap[js.OrToken])
right := groupExpr(X.Value, binaryRightPrecMap[js.OrToken])
return &js.ExprStmt{&js.BinaryExpr{js.OrToken, left, right}}
}
} else if hasIf && hasElse {
XExpr, isExprBody := ifStmt.Body.(*js.ExprStmt)
YExpr, isExprElse := ifStmt.Else.(*js.ExprStmt)
if isExprBody && isExprElse {
return &js.ExprStmt{condExpr(ifStmt.Cond, XExpr.Value, YExpr.Value)}
}
XReturn, isReturnBody := ifStmt.Body.(*js.ReturnStmt)
YReturn, isReturnElse := ifStmt.Else.(*js.ReturnStmt)
if isReturnBody && isReturnElse {
if XReturn.Value == nil && YReturn.Value == nil {
return &js.ReturnStmt{commaExpr(ifStmt.Cond, &js.UnaryExpr{
Op: js.VoidToken,
X: &js.LiteralExpr{js.NumericToken, zeroBytes},
})}
} else if XReturn.Value != nil && YReturn.Value != nil {
return &js.ReturnStmt{condExpr(ifStmt.Cond, XReturn.Value, YReturn.Value)}
}
return ifStmt
}
XThrow, isThrowBody := ifStmt.Body.(*js.ThrowStmt)
YThrow, isThrowElse := ifStmt.Else.(*js.ThrowStmt)
if isThrowBody && isThrowElse {
return &js.ThrowStmt{condExpr(ifStmt.Cond, XThrow.Value, YThrow.Value)}
}
}
} else if decl, ok := i.(*js.VarDecl); ok {
// TODO: remove function name in var name=function name(){}
//for _, item := range decl.List {
// if v, ok := item.Binding.(*js.Var); ok && item.Default != nil {
// if fun, ok := item.Default.(*js.FuncDecl); ok && fun.Name != nil && bytes.Equal(v.Data, fun.Name.Data) {
// scope := fun.Body.Scope
// for i, vorig := range scope.Declared {
// if fun.Name == vorig {
// scope.Declared = append(scope.Declared[:i], scope.Declared[i+1:]...)
// }
// }
// scope.AddUndeclared(v)
// v.Uses += fun.Name.Uses - 1
// fun.Name.Link = v
// fun.Name = nil
// }
// }
//}
if decl.TokenType == js.ErrorToken {
// convert hoisted var declaration to expression or empty (if there are no defines) statement
for _, item := range decl.List {
if item.Default != nil {
return &js.ExprStmt{Value: decl}
}
}
return &js.EmptyStmt{}
}
// TODO: remove unused declarations
//for i := 0; i < len(decl.List); i++ {
// if v, ok := decl.List[i].Binding.(*js.Var); ok && v.Uses < 2 {
// decl.List = append(decl.List[:i], decl.List[i+1:]...)
// i--
// }
//}
//if len(decl.List) == 0 {
// return &js.EmptyStmt{}
//}
return decl
} else if blockStmt, ok := i.(*js.BlockStmt); ok {
// merge body and remove braces if it is not a lexical declaration
blockStmt.List = optimizeStmtList(blockStmt.List, defaultBlock)
if len(blockStmt.List) == 1 {
if _, ok := blockStmt.List[0].(*js.ClassDecl); ok {
return &js.EmptyStmt{}
} else if varDecl, ok := blockStmt.List[0].(*js.VarDecl); ok && varDecl.TokenType != js.VarToken {
// remove let or const declaration in otherwise empty scope, but keep assignments
exprs := []js.IExpr{}
for _, item := range varDecl.List {
if item.Default != nil && hasSideEffects(item.Default) {
exprs = append(exprs, item.Default)
}
}
if len(exprs) == 0 {
return &js.EmptyStmt{}
} else if len(exprs) == 1 {
return &js.ExprStmt{exprs[0]}
}
return &js.ExprStmt{&js.CommaExpr{exprs}}
}
return optimizeStmt(blockStmt.List[0])
} else if len(blockStmt.List) == 0 {
return &js.EmptyStmt{}
}
return blockStmt
}
return i
}
func optimizeStmtList(list []js.IStmt, blockType blockType) []js.IStmt {
// merge expression statements as well as if/else statements followed by flow control statements
if len(list) == 0 {
return list
}
j := 0 // write index
for i := 0; i < len(list); i++ { // read index
if ifStmt, ok := list[i].(*js.IfStmt); ok && !isEmptyStmt(ifStmt.Else) {
// if(!a)b;else c => if(a)c; else b
if unary, ok := ifStmt.Cond.(*js.UnaryExpr); ok && unary.Op == js.NotToken && isFlowStmt(lastStmt(ifStmt.Else)) {
ifStmt.Cond = unary.X
ifStmt.Body, ifStmt.Else = ifStmt.Else, ifStmt.Body
}
if isFlowStmt(lastStmt(ifStmt.Body)) {
// if body ends in flow statement (return, throw, break, continue), we can remove the else statement and put its body in the current scope
if blockStmt, ok := ifStmt.Else.(*js.BlockStmt); ok {
blockStmt.Scope.Unscope()
list = append(list[:i+1], append(blockStmt.List, list[i+1:]...)...)
} else {
list = append(list[:i+1], append([]js.IStmt{ifStmt.Else}, list[i+1:]...)...)
}
ifStmt.Else = nil
}
}
list[i] = optimizeStmt(list[i])
if _, ok := list[i].(*js.EmptyStmt); ok {
k := i + 1
for ; k < len(list); k++ {
if _, ok := list[k].(*js.EmptyStmt); !ok {
break
}
}
list = append(list[:i], list[k:]...)
i--
continue
}
if 0 < i {
// merge expression statements with expression, return, and throw statements
if left, ok := list[i-1].(*js.ExprStmt); ok {
if right, ok := list[i].(*js.ExprStmt); ok {
right.Value = commaExpr(left.Value, right.Value)
j--
} else if returnStmt, ok := list[i].(*js.ReturnStmt); ok && returnStmt.Value != nil {
returnStmt.Value = commaExpr(left.Value, returnStmt.Value)
j--
} else if throwStmt, ok := list[i].(*js.ThrowStmt); ok {
throwStmt.Value = commaExpr(left.Value, throwStmt.Value)
j--
} else if forStmt, ok := list[i].(*js.ForStmt); ok {
// TODO: only merge lhs expression that don't have 'in' or 'of' keywords (slow to check?)
if forStmt.Init == nil {
forStmt.Init = left.Value
j--
} else if decl, ok := forStmt.Init.(*js.VarDecl); ok && len(decl.List) == 0 {
forStmt.Init = left.Value
j--
} else if ok && (decl.TokenType == js.VarToken || decl.TokenType == js.ErrorToken) {
// this is the second VarDecl, so we are hoisting var declarations, which means the forInit variables are already in 'left'
if merge := mergeVarDeclExprStmt(decl, left, true); merge {
j--
}
}
} else if whileStmt, ok := list[i].(*js.WhileStmt); ok {
// TODO: only merge lhs expression that don't have 'in' or 'of' keywords (slow to check?)
var body *js.BlockStmt
if blockStmt, ok := whileStmt.Body.(*js.BlockStmt); ok {
body = blockStmt
} else {
body = &js.BlockStmt{}
body.List = []js.IStmt{whileStmt.Body}
}
list[i] = &js.ForStmt{Init: left.Value, Cond: whileStmt.Cond, Post: nil, Body: body}
j--
} else if switchStmt, ok := list[i].(*js.SwitchStmt); ok {
switchStmt.Init = commaExpr(left.Value, switchStmt.Init)
j--
} else if withStmt, ok := list[i].(*js.WithStmt); ok {
withStmt.Cond = commaExpr(left.Value, withStmt.Cond)
j--
} else if ifStmt, ok := list[i].(*js.IfStmt); ok {
ifStmt.Cond = commaExpr(left.Value, ifStmt.Cond)
j--
} else if varDecl, ok := list[i].(*js.VarDecl); ok && varDecl.TokenType == js.VarToken {
if merge := mergeVarDeclExprStmt(varDecl, left, true); merge {
j--
}
}
} else if left, ok := list[i-1].(*js.VarDecl); ok {
if right, ok := list[i].(*js.VarDecl); ok && left.TokenType == right.TokenType {
// merge const and let declarations, or non-hoisted var declarations
right.List = append(left.List, right.List...)
j--
// remove from vardecls list of scope
scope := left.Scope.Func
for i, decl := range scope.VarDecls {
if left == decl {
scope.VarDecls = append(scope.VarDecls[:i], scope.VarDecls[i+1:]...)
break
}
}
} else if left.TokenType == js.VarToken {
if exprStmt, ok := list[i].(*js.ExprStmt); ok {
// pull in assignments to variables into the declaration, e.g. var a;a=5 => var a=5
if merge := mergeVarDeclExprStmt(left, exprStmt, false); merge {
list[i] = list[i-1]
j--
}
} else if forStmt, ok := list[i].(*js.ForStmt); ok {
// TODO: only merge lhs expression that don't have 'in' or 'of' keywords (slow to check?)
if forStmt.Init == nil {
forStmt.Init = left
j--
} else if decl, ok := forStmt.Init.(*js.VarDecl); ok && decl.TokenType == js.ErrorToken && !hasDefines(decl) {
forStmt.Init = left
j--
} else if ok && (decl.TokenType == js.VarToken || decl.TokenType == js.ErrorToken) {
// this is the second VarDecl, so we are hoisting var declarations, which means the forInit variables are already in 'left'
mergeVarDecls(left, decl, false)
decl.TokenType = js.VarToken
forStmt.Init = left
j--
}
} else if whileStmt, ok := list[i].(*js.WhileStmt); ok {
// TODO: only merge lhs expression that don't have 'in' or 'of' keywords (slow to check?)
var body *js.BlockStmt
if blockStmt, ok := whileStmt.Body.(*js.BlockStmt); ok {
body = blockStmt
} else {
body = &js.BlockStmt{}
body.List = []js.IStmt{whileStmt.Body}
}
list[i] = &js.ForStmt{Init: left, Cond: whileStmt.Cond, Post: nil, Body: body}
j--
}
}
}
}
list[j] = list[i]
// merge if/else with return/throw when followed by return/throw
MergeIfReturnThrow:
if 0 < j {
// separate from expression merging in case of: if(a)return b;b=c;return d
if ifStmt, ok := list[j-1].(*js.IfStmt); ok && isEmptyStmt(ifStmt.Body) != isEmptyStmt(ifStmt.Else) {
// either the if body is empty or the else body is empty. In case where both bodies have return/throw, we already rewrote that if statement to an return/throw statement
if returnStmt, ok := list[j].(*js.ReturnStmt); ok {
if returnStmt.Value == nil {
if left, ok := ifStmt.Body.(*js.ReturnStmt); ok && left.Value == nil {
list[j-1] = &js.ExprStmt{Value: ifStmt.Cond}
} else if left, ok := ifStmt.Else.(*js.ReturnStmt); ok && left.Value == nil {
list[j-1] = &js.ExprStmt{Value: ifStmt.Cond}
}
} else {
if left, ok := ifStmt.Body.(*js.ReturnStmt); ok && left.Value != nil {
returnStmt.Value = condExpr(ifStmt.Cond, left.Value, returnStmt.Value)
list[j-1] = returnStmt
j--
goto MergeIfReturnThrow
} else if left, ok := ifStmt.Else.(*js.ReturnStmt); ok && left.Value != nil {
returnStmt.Value = condExpr(ifStmt.Cond, returnStmt.Value, left.Value)
list[j-1] = returnStmt
j--
goto MergeIfReturnThrow
}
}
} else if throwStmt, ok := list[j].(*js.ThrowStmt); ok {
if left, ok := ifStmt.Body.(*js.ThrowStmt); ok {
throwStmt.Value = condExpr(ifStmt.Cond, left.Value, throwStmt.Value)
list[j-1] = throwStmt
j--
goto MergeIfReturnThrow
} else if left, ok := ifStmt.Else.(*js.ThrowStmt); ok {
throwStmt.Value = condExpr(ifStmt.Cond, throwStmt.Value, left.Value)
list[j-1] = throwStmt
j--
goto MergeIfReturnThrow
}
}
}
}
j++
}
// remove superfluous return or continue
if 0 < j {
if blockType == functionBlock {
if returnStmt, ok := list[j-1].(*js.ReturnStmt); ok {
if returnStmt.Value == nil || isUndefined(returnStmt.Value) {
j--
} else if commaExpr, ok := returnStmt.Value.(*js.CommaExpr); ok && isUndefined(commaExpr.List[len(commaExpr.List)-1]) {
// rewrite function f(){return a,void 0} => function f(){a}
if len(commaExpr.List) == 2 {
list[j-1] = &js.ExprStmt{Value: commaExpr.List[0]}
} else {
commaExpr.List = commaExpr.List[:len(commaExpr.List)-1]
}
}
}
} else if blockType == iterationBlock {
if branchStmt, ok := list[j-1].(*js.BranchStmt); ok && branchStmt.Type == js.ContinueToken && branchStmt.Label == nil {
j--
}
}
}
return list[:j]
}
package js
import (
"bytes"
"encoding/hex"
stdStrconv "strconv"
"unicode/utf8"
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/parse/v2/js"
"github.com/tdewolff/parse/v2/strconv"
)
var (
spaceBytes = []byte(" ")
newlineBytes = []byte("\n")
starBytes = []byte("*")
colonBytes = []byte(":")
semicolonBytes = []byte(";")
commaBytes = []byte(",")
dotBytes = []byte(".")
ellipsisBytes = []byte("...")
openBraceBytes = []byte("{")
closeBraceBytes = []byte("}")
openParenBytes = []byte("(")
closeParenBytes = []byte(")")
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
openParenBracketBytes = []byte("({")
closeParenOpenBracketBytes = []byte("){")
notBytes = []byte("!")
questionBytes = []byte("?")
equalBytes = []byte("=")
optChainBytes = []byte("?.")
arrowBytes = []byte("=>")
zeroBytes = []byte("0")
oneBytes = []byte("1")
letBytes = []byte("let")
getBytes = []byte("get")
setBytes = []byte("set")
asyncBytes = []byte("async")
functionBytes = []byte("function")
staticBytes = []byte("static")
ifOpenBytes = []byte("if(")
elseBytes = []byte("else")
withOpenBytes = []byte("with(")
doBytes = []byte("do")
whileOpenBytes = []byte("while(")
forOpenBytes = []byte("for(")
forAwaitOpenBytes = []byte("for await(")
inBytes = []byte("in")
ofBytes = []byte("of")
switchOpenBytes = []byte("switch(")
throwBytes = []byte("throw")
tryBytes = []byte("try")
catchBytes = []byte("catch")
finallyBytes = []byte("finally")
importBytes = []byte("import")
exportBytes = []byte("export")
fromBytes = []byte("from")
returnBytes = []byte("return")
classBytes = []byte("class")
asSpaceBytes = []byte("as ")
asyncSpaceBytes = []byte("async ")
spaceDefaultBytes = []byte(" default")
spaceExtendsBytes = []byte(" extends")
yieldBytes = []byte("yield")
newBytes = []byte("new")
openNewBytes = []byte("(new")
newTargetBytes = []byte("new.target")
importMetaBytes = []byte("import.meta")
nanBytes = []byte("NaN")
undefinedBytes = []byte("undefined")
infinityBytes = []byte("Infinity")
nullBytes = []byte("null")
voidZeroBytes = []byte("void 0")
groupedVoidZeroBytes = []byte("(void 0)")
oneDivZeroBytes = []byte("1/0")
groupedOneDivZeroBytes = []byte("(1/0)")
notZeroBytes = []byte("!0")
groupedNotZeroBytes = []byte("(!0)")
notOneBytes = []byte("!1")
groupedNotOneBytes = []byte("(!1)")
debuggerBytes = []byte("debugger")
regExpScriptBytes = []byte("/script>")
)
func isEmptyStmt(stmt js.IStmt) bool {
if stmt == nil {
return true
} else if _, ok := stmt.(*js.EmptyStmt); ok {
return true
} else if block, ok := stmt.(*js.BlockStmt); ok {
for _, item := range block.List {
if ok := isEmptyStmt(item); !ok {
return false
}
}
return true
}
return false
}
func isFlowStmt(stmt js.IStmt) bool {
if _, ok := stmt.(*js.ReturnStmt); ok {
return true
} else if _, ok := stmt.(*js.ThrowStmt); ok {
return true
} else if _, ok := stmt.(*js.BranchStmt); ok {
return true
}
return false
}
func lastStmt(stmt js.IStmt) js.IStmt {
if block, ok := stmt.(*js.BlockStmt); ok && 0 < len(block.List) {
return lastStmt(block.List[len(block.List)-1])
}
return stmt
}
func endsInIf(istmt js.IStmt) bool {
switch stmt := istmt.(type) {
case *js.IfStmt:
if stmt.Else == nil {
_, ok := optimizeStmt(stmt).(*js.IfStmt)
return ok
}
return endsInIf(stmt.Else)
case *js.BlockStmt:
if 0 < len(stmt.List) {
return endsInIf(stmt.List[len(stmt.List)-1])
}
case *js.LabelledStmt:
return endsInIf(stmt.Value)
case *js.WithStmt:
return endsInIf(stmt.Body)
case *js.WhileStmt:
return endsInIf(stmt.Body)
case *js.ForStmt:
return endsInIf(stmt.Body)
case *js.ForInStmt:
return endsInIf(stmt.Body)
case *js.ForOfStmt:
return endsInIf(stmt.Body)
}
return false
}
// precedence maps for the precedence inside the operation
var unaryPrecMap = map[js.TokenType]js.OpPrec{
js.PostIncrToken: js.OpLHS,
js.PostDecrToken: js.OpLHS,
js.PreIncrToken: js.OpUnary,
js.PreDecrToken: js.OpUnary,
js.NotToken: js.OpUnary,
js.BitNotToken: js.OpUnary,
js.TypeofToken: js.OpUnary,
js.VoidToken: js.OpUnary,
js.DeleteToken: js.OpUnary,
js.PosToken: js.OpUnary,
js.NegToken: js.OpUnary,
js.AwaitToken: js.OpUnary,
}
var binaryLeftPrecMap = map[js.TokenType]js.OpPrec{
js.EqToken: js.OpLHS,
js.MulEqToken: js.OpLHS,
js.DivEqToken: js.OpLHS,
js.ModEqToken: js.OpLHS,
js.ExpEqToken: js.OpLHS,
js.AddEqToken: js.OpLHS,
js.SubEqToken: js.OpLHS,
js.LtLtEqToken: js.OpLHS,
js.GtGtEqToken: js.OpLHS,
js.GtGtGtEqToken: js.OpLHS,
js.BitAndEqToken: js.OpLHS,
js.BitXorEqToken: js.OpLHS,
js.BitOrEqToken: js.OpLHS,
js.ExpToken: js.OpUpdate,
js.MulToken: js.OpMul,
js.DivToken: js.OpMul,
js.ModToken: js.OpMul,
js.AddToken: js.OpAdd,
js.SubToken: js.OpAdd,
js.LtLtToken: js.OpShift,
js.GtGtToken: js.OpShift,
js.GtGtGtToken: js.OpShift,
js.LtToken: js.OpCompare,
js.LtEqToken: js.OpCompare,
js.GtToken: js.OpCompare,
js.GtEqToken: js.OpCompare,
js.InToken: js.OpCompare,
js.InstanceofToken: js.OpCompare,
js.EqEqToken: js.OpEquals,
js.NotEqToken: js.OpEquals,
js.EqEqEqToken: js.OpEquals,
js.NotEqEqToken: js.OpEquals,
js.BitAndToken: js.OpBitAnd,
js.BitXorToken: js.OpBitXor,
js.BitOrToken: js.OpBitOr,
js.AndToken: js.OpAnd,
js.OrToken: js.OpOr,
js.NullishToken: js.OpBitOr, // or OpCoalesce
js.CommaToken: js.OpExpr,
}
var binaryRightPrecMap = map[js.TokenType]js.OpPrec{
js.EqToken: js.OpAssign,
js.MulEqToken: js.OpAssign,
js.DivEqToken: js.OpAssign,
js.ModEqToken: js.OpAssign,
js.ExpEqToken: js.OpAssign,
js.AddEqToken: js.OpAssign,
js.SubEqToken: js.OpAssign,
js.LtLtEqToken: js.OpAssign,
js.GtGtEqToken: js.OpAssign,
js.GtGtGtEqToken: js.OpAssign,
js.BitAndEqToken: js.OpAssign,
js.BitXorEqToken: js.OpAssign,
js.BitOrEqToken: js.OpAssign,
js.ExpToken: js.OpExp,
js.MulToken: js.OpExp,
js.DivToken: js.OpExp,
js.ModToken: js.OpExp,
js.AddToken: js.OpMul,
js.SubToken: js.OpMul,
js.LtLtToken: js.OpAdd,
js.GtGtToken: js.OpAdd,
js.GtGtGtToken: js.OpAdd,
js.LtToken: js.OpShift,
js.LtEqToken: js.OpShift,
js.GtToken: js.OpShift,
js.GtEqToken: js.OpShift,
js.InToken: js.OpShift,
js.InstanceofToken: js.OpShift,
js.EqEqToken: js.OpCompare,
js.NotEqToken: js.OpCompare,
js.EqEqEqToken: js.OpCompare,
js.NotEqEqToken: js.OpCompare,
js.BitAndToken: js.OpEquals,
js.BitXorToken: js.OpBitAnd,
js.BitOrToken: js.OpBitXor,
js.AndToken: js.OpAnd, // changes order in AST but not in execution
js.OrToken: js.OpOr, // changes order in AST but not in execution
js.NullishToken: js.OpBitOr, // or OpCoalesce
js.CommaToken: js.OpAssign,
}
// precedence maps of the operation itself
var unaryOpPrecMap = map[js.TokenType]js.OpPrec{
js.PostIncrToken: js.OpUpdate,
js.PostDecrToken: js.OpUpdate,
js.PreIncrToken: js.OpUpdate,
js.PreDecrToken: js.OpUpdate,
js.NotToken: js.OpUnary,
js.BitNotToken: js.OpUnary,
js.TypeofToken: js.OpUnary,
js.VoidToken: js.OpUnary,
js.DeleteToken: js.OpUnary,
js.PosToken: js.OpUnary,
js.NegToken: js.OpUnary,
js.AwaitToken: js.OpUnary,
}
var binaryOpPrecMap = map[js.TokenType]js.OpPrec{
js.EqToken: js.OpAssign,
js.MulEqToken: js.OpAssign,
js.DivEqToken: js.OpAssign,
js.ModEqToken: js.OpAssign,
js.ExpEqToken: js.OpAssign,
js.AddEqToken: js.OpAssign,
js.SubEqToken: js.OpAssign,
js.LtLtEqToken: js.OpAssign,
js.GtGtEqToken: js.OpAssign,
js.GtGtGtEqToken: js.OpAssign,
js.BitAndEqToken: js.OpAssign,
js.BitXorEqToken: js.OpAssign,
js.BitOrEqToken: js.OpAssign,
js.ExpToken: js.OpExp,
js.MulToken: js.OpMul,
js.DivToken: js.OpMul,
js.ModToken: js.OpMul,
js.AddToken: js.OpAdd,
js.SubToken: js.OpAdd,
js.LtLtToken: js.OpShift,
js.GtGtToken: js.OpShift,
js.GtGtGtToken: js.OpShift,
js.LtToken: js.OpCompare,
js.LtEqToken: js.OpCompare,
js.GtToken: js.OpCompare,
js.GtEqToken: js.OpCompare,
js.InToken: js.OpCompare,
js.InstanceofToken: js.OpCompare,
js.EqEqToken: js.OpEquals,
js.NotEqToken: js.OpEquals,
js.EqEqEqToken: js.OpEquals,
js.NotEqEqToken: js.OpEquals,
js.BitAndToken: js.OpBitAnd,
js.BitXorToken: js.OpBitXor,
js.BitOrToken: js.OpBitOr,
js.AndToken: js.OpAnd,
js.OrToken: js.OpOr,
js.NullishToken: js.OpCoalesce,
js.CommaToken: js.OpExpr,
}
func exprPrec(i js.IExpr) js.OpPrec {
switch expr := i.(type) {
case *js.Var, *js.LiteralExpr, *js.ArrayExpr, *js.ObjectExpr, *js.FuncDecl, *js.ClassDecl:
return js.OpPrimary
case *js.UnaryExpr:
return unaryOpPrecMap[expr.Op]
case *js.BinaryExpr:
return binaryOpPrecMap[expr.Op]
case *js.NewExpr:
if expr.Args == nil {
return js.OpNew
}
return js.OpMember
case *js.TemplateExpr:
if expr.Tag == nil {
return js.OpPrimary
}
return expr.Prec
case *js.DotExpr:
return expr.Prec
case *js.IndexExpr:
return expr.Prec
case *js.NewTargetExpr, *js.ImportMetaExpr:
return js.OpMember
case *js.CallExpr:
return js.OpCall
case *js.CondExpr, *js.YieldExpr, *js.ArrowFunc:
return js.OpAssign
case *js.GroupExpr:
return exprPrec(expr.X)
}
return js.OpExpr // CommaExpr
}
func hasSideEffects(i js.IExpr) bool {
// assume that variable usage and that the index operator themselves have no side effects
switch expr := i.(type) {
case *js.Var:
return true
case *js.LiteralExpr, *js.FuncDecl, *js.ClassDecl, *js.ArrowFunc, *js.NewTargetExpr, *js.ImportMetaExpr:
return false
case *js.NewExpr, *js.CallExpr, *js.YieldExpr:
return true
case *js.GroupExpr:
return hasSideEffects(expr.X)
case *js.DotExpr:
return true
case *js.IndexExpr:
return true
case *js.CondExpr:
return hasSideEffects(expr.Cond) || hasSideEffects(expr.X) || hasSideEffects(expr.Y)
case *js.CommaExpr:
for _, item := range expr.List {
if hasSideEffects(item) {
return true
}
}
case *js.ArrayExpr:
for _, item := range expr.List {
if hasSideEffects(item.Value) {
return true
}
}
return false
case *js.ObjectExpr:
for _, item := range expr.List {
if hasSideEffects(item.Value) || item.Init != nil && hasSideEffects(item.Init) || item.Name != nil && item.Name.IsComputed() && hasSideEffects(item.Name.Computed) {
return true
}
}
return false
case *js.TemplateExpr:
if hasSideEffects(expr.Tag) {
return true
}
for _, item := range expr.List {
if hasSideEffects(item.Expr) {
return true
}
}
return false
case *js.UnaryExpr:
if expr.Op == js.DeleteToken || expr.Op == js.PreIncrToken || expr.Op == js.PreDecrToken || expr.Op == js.PostIncrToken || expr.Op == js.PostDecrToken {
return true
}
return hasSideEffects(expr.X)
case *js.BinaryExpr:
return binaryOpPrecMap[expr.Op] == js.OpAssign
}
return true
}
// TODO: use in more cases
func groupExpr(i js.IExpr, prec js.OpPrec) js.IExpr {
precInside := exprPrec(i)
if _, ok := i.(*js.GroupExpr); !ok && precInside < prec && (precInside != js.OpCoalesce || prec != js.OpBitOr) {
return &js.GroupExpr{X: i}
}
return i
}
// TODO: use in more cases
func condExpr(cond, x, y js.IExpr) js.IExpr {
if comma, ok := cond.(*js.CommaExpr); ok {
comma.List[len(comma.List)-1] = &js.CondExpr{
Cond: groupExpr(comma.List[len(comma.List)-1], js.OpCoalesce),
X: groupExpr(x, js.OpAssign),
Y: groupExpr(y, js.OpAssign),
}
return comma
}
return &js.CondExpr{
Cond: groupExpr(cond, js.OpCoalesce),
X: groupExpr(x, js.OpAssign),
Y: groupExpr(y, js.OpAssign),
}
}
func commaExpr(x, y js.IExpr) js.IExpr {
comma, ok := x.(*js.CommaExpr)
if !ok {
comma = &js.CommaExpr{List: []js.IExpr{x}}
}
if comma2, ok := y.(*js.CommaExpr); ok {
comma.List = append(comma.List, comma2.List...)
} else {
comma.List = append(comma.List, y)
}
return comma
}
func innerExpr(i js.IExpr) js.IExpr {
for {
if group, ok := i.(*js.GroupExpr); ok {
i = group.X
} else {
return i
}
}
}
func finalExpr(i js.IExpr) js.IExpr {
i = innerExpr(i)
if comma, ok := i.(*js.CommaExpr); ok {
i = comma.List[len(comma.List)-1]
}
if binary, ok := i.(*js.BinaryExpr); ok && binary.Op == js.EqToken {
i = binary.X // return first
}
return i
}
func isTrue(i js.IExpr) bool {
i = innerExpr(i)
if lit, ok := i.(*js.LiteralExpr); ok && lit.TokenType == js.TrueToken {
return true
} else if unary, ok := i.(*js.UnaryExpr); ok && unary.Op == js.NotToken {
ret, _ := isFalsy(unary.X)
return ret
}
return false
}
func isFalse(i js.IExpr) bool {
i = innerExpr(i)
if lit, ok := i.(*js.LiteralExpr); ok {
return lit.TokenType == js.FalseToken
} else if unary, ok := i.(*js.UnaryExpr); ok && unary.Op == js.NotToken {
ret, _ := isTruthy(unary.X)
return ret
}
return false
}
func isEqualExpr(a, b js.IExpr) bool {
a = innerExpr(a)
b = innerExpr(b)
if left, ok := a.(*js.Var); ok {
if right, ok := b.(*js.Var); ok {
return bytes.Equal(left.Name(), right.Name())
}
}
// TODO: use reflect.DeepEqual?
return false
}
func toNullishExpr(condExpr *js.CondExpr) (js.IExpr, bool) {
if v, not, ok := isUndefinedOrNullVar(condExpr.Cond); ok {
left, right := condExpr.X, condExpr.Y
if not {
left, right = right, left
}
if isEqualExpr(v, right) {
// convert conditional expression to nullish: a==null?b:a => a??b
return &js.BinaryExpr{js.NullishToken, groupExpr(right, binaryLeftPrecMap[js.NullishToken]), groupExpr(left, binaryRightPrecMap[js.NullishToken])}, true
} else if isUndefined(left) {
// convert conditional expression to optional expr: a==null?undefined:a.b => a?.b
expr := right
var parent js.IExpr
for {
prevExpr := expr
if callExpr, ok := expr.(*js.CallExpr); ok {
expr = callExpr.X
} else if dotExpr, ok := expr.(*js.DotExpr); ok {
expr = dotExpr.X
} else if indexExpr, ok := expr.(*js.IndexExpr); ok {
expr = indexExpr.X
} else if templateExpr, ok := expr.(*js.TemplateExpr); ok {
expr = templateExpr.Tag
} else {
break
}
parent = prevExpr
}
if parent != nil && isEqualExpr(v, expr) {
if callExpr, ok := parent.(*js.CallExpr); ok {
callExpr.Optional = true
} else if dotExpr, ok := parent.(*js.DotExpr); ok {
dotExpr.Optional = true
} else if indexExpr, ok := parent.(*js.IndexExpr); ok {
indexExpr.Optional = true
} else if templateExpr, ok := parent.(*js.TemplateExpr); ok {
templateExpr.Optional = true
}
return right, true
}
}
}
return nil, false
}
func isUndefinedOrNullVar(i js.IExpr) (*js.Var, bool, bool) {
i = innerExpr(i)
if binary, ok := i.(*js.BinaryExpr); ok && (binary.Op == js.OrToken || binary.Op == js.AndToken) {
eqEqOp := js.EqEqToken
eqEqEqOp := js.EqEqEqToken
if binary.Op == js.AndToken {
eqEqOp = js.NotEqToken
eqEqEqOp = js.NotEqEqToken
}
left, isBinaryX := innerExpr(binary.X).(*js.BinaryExpr)
right, isBinaryY := innerExpr(binary.Y).(*js.BinaryExpr)
if isBinaryX && isBinaryY && (left.Op == eqEqOp || left.Op == eqEqEqOp) && (right.Op == eqEqOp || right.Op == eqEqEqOp) {
var leftVar, rightVar *js.Var
if v, ok := left.X.(*js.Var); ok && isUndefinedOrNull(left.Y) {
leftVar = v
} else if v, ok := left.Y.(*js.Var); ok && isUndefinedOrNull(left.X) {
leftVar = v
}
if v, ok := right.X.(*js.Var); ok && isUndefinedOrNull(right.Y) {
rightVar = v
} else if v, ok := right.Y.(*js.Var); ok && isUndefinedOrNull(right.X) {
rightVar = v
}
if leftVar != nil && leftVar == rightVar {
return leftVar, binary.Op == js.AndToken, true
}
}
} else if ok && (binary.Op == js.EqEqToken || binary.Op == js.NotEqToken) {
var variable *js.Var
if v, ok := binary.X.(*js.Var); ok && isUndefinedOrNull(binary.Y) {
variable = v
} else if v, ok := binary.Y.(*js.Var); ok && isUndefinedOrNull(binary.X) {
variable = v
}
if variable != nil {
return variable, binary.Op == js.NotEqToken, true
}
}
return nil, false, false
}
func isUndefinedOrNull(i js.IExpr) bool {
i = innerExpr(i)
if lit, ok := i.(*js.LiteralExpr); ok {
return lit.TokenType == js.NullToken
}
return isUndefined(i)
}
func isUndefined(i js.IExpr) bool {
i = innerExpr(i)
if v, ok := i.(*js.Var); ok {
if bytes.Equal(v.Name(), undefinedBytes) { // TODO: only if not defined
return true
}
} else if unary, ok := i.(*js.UnaryExpr); ok && unary.Op == js.VoidToken {
return !hasSideEffects(unary.X)
}
return false
}
// returns whether truthy and whether it could be coerced to a boolean (i.e. when returns (false,true) this means it is falsy)
func isTruthy(i js.IExpr) (bool, bool) {
if falsy, ok := isFalsy(i); ok {
return !falsy, true
}
return false, false
}
// returns whether falsy and whether it could be coerced to a boolean (i.e. when returns (false,true) this means it is truthy)
func isFalsy(i js.IExpr) (bool, bool) {
negated := false
group, isGroup := i.(*js.GroupExpr)
unary, isUnary := i.(*js.UnaryExpr)
for isGroup || isUnary && unary.Op == js.NotToken {
if isGroup {
i = group.X
} else {
i = unary.X
negated = !negated
}
group, isGroup = i.(*js.GroupExpr)
unary, isUnary = i.(*js.UnaryExpr)
}
if lit, ok := i.(*js.LiteralExpr); ok {
tt := lit.TokenType
d := lit.Data
if tt == js.FalseToken || tt == js.NullToken || tt == js.StringToken && len(lit.Data) == 0 {
return !negated, true // falsy
} else if tt == js.TrueToken || tt == js.StringToken {
return negated, true // truthy
} else if tt == js.DecimalToken || tt == js.BinaryToken || tt == js.OctalToken || tt == js.HexadecimalToken || tt == js.IntegerToken {
for _, c := range d {
if c == 'e' || c == 'E' || c == 'n' {
break
} else if c != '0' && c != '.' && c != 'x' && c != 'X' && c != 'b' && c != 'B' && c != 'o' && c != 'O' {
return negated, true // truthy
}
}
return !negated, true // falsy
}
} else if isUndefined(i) {
return !negated, true // falsy
} else if v, ok := i.(*js.Var); ok && bytes.Equal(v.Name(), nanBytes) {
return !negated, true // falsy
}
return false, false // unknown
}
func isBooleanExpr(expr js.IExpr) bool {
if unaryExpr, ok := expr.(*js.UnaryExpr); ok {
return unaryExpr.Op == js.NotToken
} else if binaryExpr, ok := expr.(*js.BinaryExpr); ok {
op := binaryOpPrecMap[binaryExpr.Op]
if op == js.OpAnd || op == js.OpOr {
return isBooleanExpr(binaryExpr.X) && isBooleanExpr(binaryExpr.Y)
}
return op == js.OpCompare || op == js.OpEquals
} else if litExpr, ok := expr.(*js.LiteralExpr); ok {
return litExpr.TokenType == js.TrueToken || litExpr.TokenType == js.FalseToken
} else if groupExpr, ok := expr.(*js.GroupExpr); ok {
return isBooleanExpr(groupExpr.X)
}
return false
}
func invertBooleanOp(op js.TokenType) js.TokenType {
if op == js.EqEqToken {
return js.NotEqToken
} else if op == js.NotEqToken {
return js.EqEqToken
} else if op == js.EqEqEqToken {
return js.NotEqEqToken
} else if op == js.NotEqEqToken {
return js.EqEqEqToken
}
return js.ErrorToken
}
func optimizeBooleanExpr(expr js.IExpr, invert bool, prec js.OpPrec) js.IExpr {
if invert {
// unary !(boolean) has already been handled
if binaryExpr, ok := expr.(*js.BinaryExpr); ok && binaryOpPrecMap[binaryExpr.Op] == js.OpEquals {
binaryExpr.Op = invertBooleanOp(binaryExpr.Op)
return expr
} else {
return optimizeUnaryExpr(&js.UnaryExpr{js.NotToken, groupExpr(expr, js.OpUnary)}, prec)
}
} else if isBooleanExpr(expr) {
return groupExpr(expr, prec)
} else {
return &js.UnaryExpr{js.NotToken, &js.UnaryExpr{js.NotToken, groupExpr(expr, js.OpUnary)}}
}
}
func optimizeUnaryExpr(expr *js.UnaryExpr, prec js.OpPrec) js.IExpr {
if expr.Op == js.NotToken {
invert := true
var expr2 js.IExpr = expr.X
for {
if unary, ok := expr2.(*js.UnaryExpr); ok && unary.Op == js.NotToken {
invert = !invert
expr2 = unary.X
} else if group, ok := expr2.(*js.GroupExpr); ok {
expr2 = group.X
} else {
break
}
}
if !invert && isBooleanExpr(expr2) {
return groupExpr(expr2, prec)
} else if binary, ok := expr2.(*js.BinaryExpr); ok && invert {
if binaryOpPrecMap[binary.Op] == js.OpEquals {
binary.Op = invertBooleanOp(binary.Op)
return groupExpr(binary, prec)
} else if binary.Op == js.AndToken || binary.Op == js.OrToken {
op := js.AndToken
if binary.Op == js.AndToken {
op = js.OrToken
}
precInside := binaryOpPrecMap[op]
needsGroup := precInside < prec && (precInside != js.OpCoalesce || prec != js.OpBitOr)
// rewrite !(a||b) to !a&&!b
// rewrite !(a==0||b==0) to a!=0&&b!=0
score := 3 // savings if rewritten (group parentheses and not-token)
if needsGroup {
score -= 2
}
score -= 2 // add two not-tokens for left and right
// == and === can become != and !==
var isEqX, isEqY bool
if binaryExpr, ok := binary.X.(*js.BinaryExpr); ok && binaryOpPrecMap[binaryExpr.Op] == js.OpEquals {
score += 1
isEqX = true
}
if binaryExpr, ok := binary.Y.(*js.BinaryExpr); ok && binaryOpPrecMap[binaryExpr.Op] == js.OpEquals {
score += 1
isEqY = true
}
// add group if it wasn't already there
var needsGroupX, needsGroupY bool
if !isEqX && binaryLeftPrecMap[binary.Op] <= exprPrec(binary.X) && exprPrec(binary.X) < js.OpUnary {
score -= 2
needsGroupX = true
}
if !isEqY && binaryRightPrecMap[binary.Op] <= exprPrec(binary.Y) && exprPrec(binary.Y) < js.OpUnary {
score -= 2
needsGroupY = true
}
// remove group
if op == js.OrToken {
if exprPrec(binary.X) == js.OpOr {
score += 2
}
if exprPrec(binary.Y) == js.OpAnd {
score += 2
}
}
if 0 < score {
binary.Op = op
if isEqX {
binary.X.(*js.BinaryExpr).Op = invertBooleanOp(binary.X.(*js.BinaryExpr).Op)
}
if isEqY {
binary.Y.(*js.BinaryExpr).Op = invertBooleanOp(binary.Y.(*js.BinaryExpr).Op)
}
if needsGroupX {
binary.X = &js.GroupExpr{binary.X}
}
if needsGroupY {
binary.Y = &js.GroupExpr{binary.Y}
}
if !isEqX {
binary.X = &js.UnaryExpr{js.NotToken, binary.X}
}
if !isEqY {
binary.Y = &js.UnaryExpr{js.NotToken, binary.Y}
}
if needsGroup {
return &js.GroupExpr{binary}
}
return binary
}
}
}
}
return expr
}
func (m *jsMinifier) optimizeCondExpr(expr *js.CondExpr, prec js.OpPrec) js.IExpr {
// remove double negative !! in condition, or switch cases for single negative !
if unary1, ok := expr.Cond.(*js.UnaryExpr); ok && unary1.Op == js.NotToken {
if unary2, ok := unary1.X.(*js.UnaryExpr); ok && unary2.Op == js.NotToken {
if isBooleanExpr(unary2.X) {
expr.Cond = unary2.X
}
} else {
expr.Cond = unary1.X
expr.X, expr.Y = expr.Y, expr.X
}
}
finalCond := finalExpr(expr.Cond)
if truthy, ok := isTruthy(expr.Cond); truthy && ok {
// if condition is truthy
return expr.X
} else if !truthy && ok {
// if condition is falsy
return expr.Y
} else if isEqualExpr(finalCond, expr.X) && (exprPrec(finalCond) < js.OpAssign || binaryLeftPrecMap[js.OrToken] <= exprPrec(finalCond)) && (exprPrec(expr.Y) < js.OpAssign || binaryRightPrecMap[js.OrToken] <= exprPrec(expr.Y)) {
// if condition is equal to true body
// for higher prec we need to add group parenthesis, and for lower prec we have parenthesis anyways. This only is shorter if len(expr.X) >= 3. isEqualExpr only checks for literal variables, which is a name will be minified to a one or two character name.
return &js.BinaryExpr{js.OrToken, groupExpr(expr.Cond, binaryLeftPrecMap[js.OrToken]), expr.Y}
} else if isEqualExpr(finalCond, expr.Y) && (exprPrec(finalCond) < js.OpAssign || binaryLeftPrecMap[js.AndToken] <= exprPrec(finalCond)) && (exprPrec(expr.X) < js.OpAssign || binaryRightPrecMap[js.AndToken] <= exprPrec(expr.X)) {
// if condition is equal to false body
// for higher prec we need to add group parenthesis, and for lower prec we have parenthesis anyways. This only is shorter if len(expr.X) >= 3. isEqualExpr only checks for literal variables, which is a name will be minified to a one or two character name.
return &js.BinaryExpr{js.AndToken, groupExpr(expr.Cond, binaryLeftPrecMap[js.AndToken]), expr.X}
} else if isEqualExpr(expr.X, expr.Y) {
// if true and false bodies are equal
return groupExpr(&js.CommaExpr{[]js.IExpr{expr.Cond, expr.X}}, prec)
} else if nullishExpr, ok := toNullishExpr(expr); ok && m.o.minVersion(2020) {
// no need to check whether left/right need to add groups, as the space saving is always more
return nullishExpr
} else {
callX, isCallX := expr.X.(*js.CallExpr)
callY, isCallY := expr.Y.(*js.CallExpr)
if isCallX && isCallY && len(callX.Args.List) == 1 && len(callY.Args.List) == 1 && !callX.Args.List[0].Rest && !callY.Args.List[0].Rest && isEqualExpr(callX.X, callY.X) {
expr.X = callX.Args.List[0].Value
expr.Y = callY.Args.List[0].Value
return &js.CallExpr{callX.X, js.Args{[]js.Arg{{expr, false}}}, false} // recompress the conditional expression inside
}
// shorten when true and false bodies are true and false
trueX, falseX := isTrue(expr.X), isFalse(expr.X)
trueY, falseY := isTrue(expr.Y), isFalse(expr.Y)
if trueX && falseY || falseX && trueY {
return optimizeBooleanExpr(expr.Cond, falseX, prec)
} else if trueX || trueY {
// trueX != trueY
cond := optimizeBooleanExpr(expr.Cond, trueY, binaryLeftPrecMap[js.OrToken])
if trueY {
return &js.BinaryExpr{js.OrToken, cond, groupExpr(expr.X, binaryRightPrecMap[js.OrToken])}
} else {
return &js.BinaryExpr{js.OrToken, cond, groupExpr(expr.Y, binaryRightPrecMap[js.OrToken])}
}
} else if falseX || falseY {
// falseX != falseY
cond := optimizeBooleanExpr(expr.Cond, falseX, binaryLeftPrecMap[js.AndToken])
if falseX {
return &js.BinaryExpr{js.AndToken, cond, groupExpr(expr.Y, binaryRightPrecMap[js.AndToken])}
} else {
return &js.BinaryExpr{js.AndToken, cond, groupExpr(expr.X, binaryRightPrecMap[js.AndToken])}
}
} else if condExpr, ok := expr.X.(*js.CondExpr); ok && isEqualExpr(expr.Y, condExpr.Y) {
// nested conditional expression with same false bodies
return &js.CondExpr{&js.BinaryExpr{js.AndToken, groupExpr(expr.Cond, binaryLeftPrecMap[js.AndToken]), groupExpr(condExpr.Cond, binaryRightPrecMap[js.AndToken])}, condExpr.X, expr.Y}
} else if prec <= js.OpExpr {
// regular conditional expression
// convert (a,b)?c:d => a,b?c:d
if group, ok := expr.Cond.(*js.GroupExpr); ok {
if comma, ok := group.X.(*js.CommaExpr); ok && js.OpCoalesce <= exprPrec(comma.List[len(comma.List)-1]) {
expr.Cond = comma.List[len(comma.List)-1]
comma.List[len(comma.List)-1] = expr
return comma // recompress the conditional expression inside
}
}
}
}
return expr
}
func isHexDigit(b byte) bool {
return '0' <= b && b <= '9' || 'a' <= b && b <= 'f' || 'A' <= b && b <= 'F'
}
func mergeBinaryExpr(expr *js.BinaryExpr) {
// merge string concatenations which may be intertwined with other additions
var ok bool
for expr.Op == js.AddToken {
if lit, ok := expr.Y.(*js.LiteralExpr); ok && lit.TokenType == js.StringToken {
left := expr
strings := []*js.LiteralExpr{lit}
n := len(lit.Data) - 2
for left.Op == js.AddToken {
if 50 < len(strings) {
return // limit recursion
}
if lit, ok := left.X.(*js.LiteralExpr); ok && lit.TokenType == js.StringToken {
strings = append(strings, lit)
n += len(lit.Data) - 2
left.X = nil
} else if newLeft, ok := left.X.(*js.BinaryExpr); ok {
if lit, ok := newLeft.Y.(*js.LiteralExpr); ok && lit.TokenType == js.StringToken {
strings = append(strings, lit)
n += len(lit.Data) - 2
left = newLeft
continue
}
}
break
}
if 1 < len(strings) {
// unescaped quotes will be repaired in minifyString later on
b := make([]byte, 0, n+2)
b = append(b, strings[len(strings)-1].Data[:len(strings[len(strings)-1].Data)-1]...)
for i := len(strings) - 2; 0 < i; i-- {
b = append(b, strings[i].Data[1:len(strings[i].Data)-1]...)
}
b = append(b, strings[0].Data[1:]...)
b[len(b)-1] = b[0]
expr.X = left.X
expr.Y.(*js.LiteralExpr).Data = b
}
}
if expr, ok = expr.X.(*js.BinaryExpr); !ok {
break
}
}
}
func minifyString(b []byte, allowTemplate bool) []byte {
if len(b) < 3 {
return []byte("\"\"")
}
// switch quotes if more optimal
singleQuotes := 0
doubleQuotes := 0
backtickQuotes := 0
newlines := 0
dollarSigns := 0
for i := 1; i < len(b)-1; i++ {
if b[i] == '\'' {
singleQuotes++
} else if b[i] == '"' {
doubleQuotes++
} else if b[i] == '`' {
backtickQuotes++
} else if b[i] == '$' && i+1 < len(b) && b[i+1] == '{' {
dollarSigns++
} else if b[i] == '\\' && i+1 < len(b) {
if b[i+1] == 'n' {
newlines++
} else if '1' <= b[i+1] && b[i+1] <= '9' && i+2 < len(b) {
if b[i+1] == '1' && b[i+2] == '2' {
newlines++
} else if b[i+1] == '4' && b[i+2] == '2' {
doubleQuotes++
} else if b[i+1] == '4' && b[i+2] == '7' {
singleQuotes++
} else if i+3 < len(b) && b[i+1] == '1' && b[i+2] == '4' && b[i+3] == '0' {
backtickQuotes++
}
} else if b[i+1] == 'x' && i+3 < len(b) {
if b[i+2] == '0' && b[i+3]|0x20 == 'a' {
newlines++
} else if b[i+2] == '2' && b[i+3] == '2' {
doubleQuotes++
} else if b[i+2] == '2' && b[i+3] == '7' {
singleQuotes++
} else if b[i+2] == '6' && b[i+3] == '0' {
backtickQuotes++
}
} else if b[i+1] == 'u' && i+5 < len(b) && b[i+2] == '0' && b[i+3] == '0' {
if b[i+4] == '0' && b[i+5]|0x20 == 'a' {
newlines++
} else if b[i+4] == '2' && b[i+5] == '2' {
doubleQuotes++
} else if b[i+4] == '2' && b[i+5] == '7' {
singleQuotes++
} else if b[i+4] == '6' && b[i+5] == '0' {
backtickQuotes++
}
} else if b[i+1] == 'u' && i+4 < len(b) && b[i+2] == '{' {
j := i + 3
for j < len(b) && b[j] == '0' {
j++
}
if j+1 < len(b) && b[j]|0x20 == 'a' && b[j+1] == '}' {
newlines++
} else if j+2 < len(b) && b[j+2] == '}' {
if b[j] == '2' && b[j+1] == '2' {
doubleQuotes++
} else if b[j] == '2' && b[j+1] == '7' {
singleQuotes++
} else if b[j] == '6' && b[j+1] == '0' {
backtickQuotes++
}
}
}
}
}
quote := byte('"') // default to " for better GZIP compression
quotes := doubleQuotes
if doubleQuotes < singleQuotes {
quote = byte('"')
} else if singleQuotes < doubleQuotes {
quote = byte('\'')
quotes = singleQuotes
}
if allowTemplate && backtickQuotes+dollarSigns < quotes+newlines {
quote = byte('`')
}
b[0] = quote
b[len(b)-1] = quote
// strip unnecessary escapes
return replaceEscapes(b, quote, 1, 1)
}
func replaceEscapes(b []byte, quote byte, prefix, suffix int) []byte {
// strip unnecessary escapes
j := 0
start := 0
for i := prefix; i < len(b)-suffix; i++ {
if c := b[i]; c == '\\' {
c = b[i+1]
if c == quote || c == '\\' || c == 'r' || quote != '`' && c == 'n' || c == '0' && (len(b)-suffix <= i+2 || b[i+2] < '0' || '7' < b[i+2]) {
// keep escape sequence
i++
continue
}
n := 1 // number of characters to skip
if c == '\n' || c == '\r' || c == 0xE2 && i+3 < len(b)-1 && b[i+2] == 0x80 && (b[i+3] == 0xA8 || b[i+3] == 0xA9) {
// line continuations
if c == 0xE2 {
n = 4
} else if c == '\r' && i+2 < len(b)-1 && b[i+2] == '\n' {
n = 3
} else {
n = 2
}
} else if c == 'x' {
if i+3 < len(b)-1 && isHexDigit(b[i+2]) && b[i+2] < '8' && isHexDigit(b[i+3]) && (!(b[i+2] == '0' && b[i+3] == '0') || i+3 == len(b) || b[i+3] != '\\' && (b[i+3] < '0' && '7' < b[i+3])) {
// don't convert \x00 to \0 if it may be an octal number
// hexadecimal escapes
_, _ = hex.Decode(b[i:i+1:i+1], b[i+2:i+4])
n = 4
if b[i] == '\\' || b[i] == quote || b[i] == '\r' || quote != '`' && b[i] == '\n' || b[i] == 0 {
if b[i] == '\n' {
b[i+1] = 'n'
} else if b[i] == '\r' {
b[i+1] = 'r'
} else {
b[i+1] = b[i]
}
b[i] = '\\'
i++
n--
}
i++
n--
} else {
i++
continue
}
} else if c == 'u' && i+2 < len(b) {
l := i + 2
if b[i+2] == '{' {
l++
}
r := l
for ; r < len(b) && (b[i+2] == '{' || r < l+4); r++ {
if b[r] < '0' || '9' < b[r] && b[r] < 'A' || 'F' < b[r] && b[r] < 'a' || 'f' < b[r] {
break
}
}
if b[i+2] == '{' && (6 < r-l || len(b) <= r || b[r] != '}') || b[i+2] != '{' && r-l != 4 {
i++
continue
}
num, err := stdStrconv.ParseInt(string(b[l:r]), 16, 32)
if err != nil || 0x10FFFF <= num {
i++
continue
}
n = 2 + r - l
if b[i+2] == '{' {
n += 2
}
if num == 0 {
// don't convert NULL to literal NULL (gives JS parsing problems)
if r == len(b) || b[r] != '\\' && (b[r] < '0' && '7' < b[r]) {
b[i+1] = '0'
i += 2
n -= 2
} else {
// don't convert NULL to \0 (may be an octal number)
b[i+1] = 'x'
b[i+2] = '0'
b[i+3] = '0'
i += 4
n -= 4
}
} else if num != 13 && (quote == '`' || num != 10) {
// decode unicode character to UTF-8 and put at the end of the escape sequence
// then skip the first part of the escape sequence until the decoded character
m := utf8.RuneLen(rune(num))
if m == -1 {
i++
continue
} else if num < 256 && quote == byte(num) {
b[i] = '\\'
i++
n--
}
utf8.EncodeRune(b[i:], rune(num))
i += m
n -= m
} else {
if num == 10 {
b[i+1] = 'n'
} else {
b[i+1] = 'r'
}
i += 2
n -= 2
}
} else if '0' <= c && c <= '7' {
// octal escapes (legacy), \0 already handled (quote != `)
num := c - '0'
n++
if i+2 < len(b)-1 && '0' <= b[i+2] && b[i+2] <= '7' {
num = num*8 + b[i+2] - '0'
n++
if num < 32 && i+3 < len(b)-1 && '0' <= b[i+3] && b[i+3] <= '7' {
num = num*8 + b[i+3] - '0'
n++
}
}
b[i] = num
if num == 0 || num == '\\' || num == quote || num == '\r' || quote != '`' && num == '\n' {
if num == 0 {
b[i+1] = '0'
} else if num == '\n' {
b[i+1] = 'n'
} else if num == '\r' {
b[i+1] = 'r'
} else {
b[i+1] = b[i]
}
b[i] = '\\'
i++
n--
}
i++
n--
} else if quote == '`' && c == 'n' {
b[i] = '\n'
i++
} else if c == 't' {
b[i] = '\t'
i++
} else if c == 'f' {
b[i] = '\f'
i++
} else if c == 'v' {
b[i] = '\v'
i++
} else if c == 'b' {
b[i] = '\b'
i++
}
// remove unnecessary escape character, anything but 0x00, 0x0A, 0x0D, \, ' or "
if start != 0 {
j += copy(b[j:], b[start:i])
} else {
j = i
}
start = i + n
i += n - 1
} else if c == quote || c == '$' && quote == '`' && (i+1 < len(b) && b[i+1] == '{' || i+2 < len(b) && b[i+1] == '\\' && b[i+2] == '{') {
// may not be escaped properly when changing quotes
if j < start {
// avoid append
j += copy(b[j:], b[start:i])
b[j] = '\\'
j++
start = i
} else {
b = append(append(b[:i], '\\'), b[i:]...)
i++
b[i] = c // was overwritten above
}
} else if c == '<' && 9 <= len(b)-1-i {
if b[i+1] == '\\' && 10 <= len(b)-1-i && bytes.Equal(b[i+2:i+10], []byte("/script>")) {
i += 9
} else if bytes.Equal(b[i+1:i+9], []byte("/script>")) {
i++
if j < start {
// avoid append
j += copy(b[j:], b[start:i])
b[j] = '\\'
j++
start = i
} else {
b = append(append(b[:i], '\\'), b[i:]...)
i++
b[i] = '/' // was overwritten above
}
}
}
}
if start != 0 {
j += copy(b[j:], b[start:])
return b[:j]
}
return b
}
var regexpEscapeTable = [256]bool{
// ASCII
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, true, false, false, false, // $
true, true, true, true, false, false, true, true, // (, ), *, +, ., /
true, true, true, true, true, true, true, true, // 0, 1, 2, 3, 4, 5, 6, 7
true, true, false, false, false, false, false, true, // 8, 9, ?
false, false, true, false, true, false, false, false, // B, D
false, false, false, false, false, false, false, false,
true, false, false, true, false, false, false, true, // P, S, W
false, false, false, true, true, true, true, false, // [, \, ], ^
false, false, true, true, true, false, true, false, // b, c, d, f
false, false, false, true, false, false, true, false, // k, n
true, false, true, true, true, true, true, true, // p, r, s, t, u, v, w
true, false, false, true, true, true, false, false, // x, {, |, }
// non-ASCII
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
}
var regexpClassEscapeTable = [256]bool{
// ASCII
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
true, true, true, true, true, true, true, true, // 0, 1, 2, 3, 4, 5, 6, 7
true, true, false, false, false, false, false, false, // 8, 9
false, false, false, false, true, false, false, false, // D
false, false, false, false, false, false, false, false,
true, false, false, true, false, false, false, true, // P, S, W
false, false, false, false, true, true, false, false, // \, ]
false, false, true, true, true, false, true, false, // b, c, d, f
false, false, false, false, false, false, true, false, // n
true, false, true, true, true, true, true, true, // p, r, s, t, u, v, w
true, false, false, false, false, false, false, false, // x
// non-ASCII
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
}
func minifyRegExp(b []byte) []byte {
inClass := false
afterDash := 0
iClass := 0
for i := 1; i < len(b)-1; i++ {
if inClass {
afterDash++
}
if b[i] == '\\' {
c := b[i+1]
escape := true
if inClass {
escape = regexpClassEscapeTable[c] || c == '-' && 2 < afterDash && i+2 < len(b) && b[i+2] != ']' || c == '^' && i == iClass+1
} else {
escape = regexpEscapeTable[c]
}
if !escape {
b = append(b[:i], b[i+1:]...)
if inClass && 2 < afterDash && c == '-' {
afterDash = 0
} else if inClass && c == '^' {
afterDash = 1
}
} else {
i++
}
} else if b[i] == '[' {
if b[i+1] == '^' {
i++
}
afterDash = 1
inClass = true
iClass = i
} else if inClass && b[i] == ']' {
inClass = false
} else if b[i] == '/' {
break
} else if inClass && 2 < afterDash && b[i] == '-' {
afterDash = 0
}
}
return b
}
func removeUnderscoresAndSuffix(b []byte) ([]byte, bool) {
for i := 0; i < len(b); i++ {
if b[i] == '_' {
b = append(b[:i], b[i+1:]...)
i--
}
}
if 0 < len(b) && b[len(b)-1] == 'n' {
return b[:len(b)-1], true
}
return b, false
}
func decimalNumber(b []byte, prec int) []byte {
var suffix bool
b, suffix = removeUnderscoresAndSuffix(b)
if suffix {
return append(b, 'n')
}
return minify.Number(b, prec)
}
func binaryNumber(b []byte, prec int) []byte {
var suffix bool
b, suffix = removeUnderscoresAndSuffix(b)
if len(b) <= 2 || 65 < len(b) {
return b
}
var n int64
for _, c := range b[2:] {
n *= 2
n += int64(c - '0')
}
i := strconv.LenInt(n) - 1
b = b[:i+1]
for 0 <= i {
b[i] = byte('0' + n%10)
n /= 10
i--
}
if suffix {
return append(b, 'n')
}
return minify.Number(b, prec)
}
func octalNumber(b []byte, prec int) []byte {
var suffix bool
b, suffix = removeUnderscoresAndSuffix(b)
if len(b) <= 2 || 23 < len(b) {
return b
}
var n int64
for _, c := range b[2:] {
n *= 8
n += int64(c - '0')
}
i := strconv.LenInt(n) - 1
b = b[:i+1]
for 0 <= i {
b[i] = byte('0' + n%10)
n /= 10
i--
}
if suffix {
return append(b, 'n')
}
return minify.Number(b, prec)
}
func hexadecimalNumber(b []byte, prec int) []byte {
var suffix bool
b, suffix = removeUnderscoresAndSuffix(b)
if len(b) <= 2 || 12 < len(b) || len(b) == 12 && ('D' < b[2] && b[2] <= 'F' || 'd' < b[2]) {
return b
}
var n int64
for _, c := range b[2:] {
n *= 16
if c <= '9' {
n += int64(c - '0')
} else if c <= 'F' {
n += 10 + int64(c-'A')
} else {
n += 10 + int64(c-'a')
}
}
i := strconv.LenInt(n) - 1
b = b[:i+1]
for 0 <= i {
b[i] = byte('0' + n%10)
n /= 10
i--
}
if suffix {
return append(b, 'n')
}
return minify.Number(b, prec)
}
package js
import (
"bytes"
"sort"
"github.com/tdewolff/parse/v2/js"
)
const identStartLen = 54
const identContinueLen = 64
type renamer struct {
identStart []byte
identContinue []byte
identOrder map[byte]int
reserved map[string]struct{}
rename bool
}
func newRenamer(rename, useCharFreq bool) *renamer {
reserved := make(map[string]struct{}, len(js.Keywords))
for name := range js.Keywords {
reserved[name] = struct{}{}
}
identStart := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_$")
identContinue := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_$0123456789")
if useCharFreq {
// sorted based on character frequency of a collection of JS samples
identStart = []byte("etnsoiarclduhmfpgvbjy_wOxCEkASMFTzDNLRPHIBV$WUKqYGXQZJ")
identContinue = []byte("etnsoiarcldu14023hm8f6pg57v9bjy_wOxCEkASMFTzDNLRPHIBV$WUKqYGXQZJ")
}
if len(identStart) != identStartLen || len(identContinue) != identContinueLen {
panic("bad identStart or identContinue lengths")
}
identOrder := map[byte]int{}
for i, c := range identStart {
identOrder[c] = i
}
return &renamer{
identStart: identStart,
identContinue: identContinue,
identOrder: identOrder,
reserved: reserved,
rename: rename,
}
}
func (r *renamer) renameScope(scope js.Scope) {
if !r.rename {
return
}
i := 0
// keep function argument declaration order to improve GZIP compression
sort.Sort(js.VarsByUses(scope.Declared[scope.NumFuncArgs:]))
for _, v := range scope.Declared {
v.Data = r.getName(v.Data, i)
i++
for r.isReserved(v.Data, scope.Undeclared) {
v.Data = r.getName(v.Data, i)
i++
}
}
}
func (r *renamer) isReserved(name []byte, undeclared js.VarArray) bool {
if 1 < len(name) { // there are no keywords or known globals that are one character long
if _, ok := r.reserved[string(name)]; ok {
return true
}
}
for _, v := range undeclared {
for v.Link != nil {
v = v.Link
}
if bytes.Equal(v.Data, name) {
return true
}
}
return false
}
func (r *renamer) getIndex(name []byte) int {
index := 0
NameLoop:
for i := len(name) - 1; 0 <= i; i-- {
chars := r.identContinue
if i == 0 {
chars = r.identStart
index *= identStartLen
} else {
index *= identContinueLen
}
for j, c := range chars {
if name[i] == c {
index += j
continue NameLoop
}
}
return -1
}
for n := 0; n < len(name)-1; n++ {
offset := identStartLen
for i := 0; i < n; i++ {
offset *= identContinueLen
}
index += offset
}
return index
}
func (r *renamer) getName(name []byte, index int) []byte {
// Generate new names for variables where the last character is (a-zA-Z$_) and others are (a-zA-Z).
// Thus we can have 54 one-character names and 52*54=2808 two-character names for every branch leaf.
// That is sufficient for virtually all input.
// one character
if index < identStartLen {
name[0] = r.identStart[index]
return name[:1]
}
index -= identStartLen
// two characters or more
n := 2
for {
offset := identStartLen
for i := 0; i < n-1; i++ {
offset *= identContinueLen
}
if index < offset {
break
}
index -= offset
n++
}
if cap(name) < n {
name = make([]byte, n)
} else {
name = name[:n]
}
name[0] = r.identStart[index%identStartLen]
index /= identStartLen
for i := 1; i < n; i++ {
name[i] = r.identContinue[index%identContinueLen]
index /= identContinueLen
}
return name
}
////////////////////////////////////////////////////////////////
func hasDefines(v *js.VarDecl) bool {
for _, item := range v.List {
if item.Default != nil {
return true
}
}
return false
}
func bindingVars(ibinding js.IBinding) (vs []*js.Var) {
switch binding := ibinding.(type) {
case *js.Var:
vs = append(vs, binding)
case *js.BindingArray:
for _, item := range binding.List {
if item.Binding != nil {
vs = append(vs, bindingVars(item.Binding)...)
}
}
if binding.Rest != nil {
vs = append(vs, bindingVars(binding.Rest)...)
}
case *js.BindingObject:
for _, item := range binding.List {
if item.Value.Binding != nil {
vs = append(vs, bindingVars(item.Value.Binding)...)
}
}
if binding.Rest != nil {
vs = append(vs, binding.Rest)
}
}
return
}
func addDefinition(decl *js.VarDecl, binding js.IBinding, value js.IExpr, forward bool) {
if decl.TokenType != js.ErrorToken {
// see if not already defined in variable declaration list
// if forward is set, binding=value comes before decl, otherwise the reverse holds true
vars := bindingVars(binding)
// remove variables in destination
RemoveVarsLoop:
for _, vbind := range vars {
for i, item := range decl.List {
if v, ok := item.Binding.(*js.Var); ok && item.Default == nil && v == vbind {
v.Uses--
decl.List = append(decl.List[:i], decl.List[i+1:]...)
continue RemoveVarsLoop
}
}
if value != nil {
// variable declaration must be somewhere else, find and remove it
for _, decl2 := range decl.Scope.Func.VarDecls {
if !decl2.InForInOf {
for i, item := range decl2.List {
if v, ok := item.Binding.(*js.Var); ok && item.Default == nil && v == vbind {
v.Uses--
decl2.List = append(decl2.List[:i], decl2.List[i+1:]...)
continue RemoveVarsLoop
}
}
}
}
}
}
}
// add declaration to destination
item := js.BindingElement{Binding: binding, Default: value}
if forward {
decl.List = append([]js.BindingElement{item}, decl.List...)
} else {
decl.List = append(decl.List, item)
}
}
func mergeVarDecls(dst, src *js.VarDecl, forward bool) {
// Merge var declarations by moving declarations from src to dst. If forward is set, src comes first and dst after, otherwise the order is reverse.
if forward {
// reverse order so we can iterate from beginning to end, sometimes addDefinition may remove another declaration in the src list
n := len(src.List) - 1
for j := 0; j < len(src.List)/2; j++ {
src.List[j], src.List[n-j] = src.List[n-j], src.List[j]
}
}
for j := 0; j < len(src.List); j++ {
addDefinition(dst, src.List[j].Binding, src.List[j].Default, forward)
}
src.List = src.List[:0]
}
func mergeVarDeclExprStmt(decl *js.VarDecl, exprStmt *js.ExprStmt, forward bool) bool {
// Merge var declarations with an assignment expression. If forward is set than expr comes first and decl after, otherwise the order is reverse.
if decl2, ok := exprStmt.Value.(*js.VarDecl); ok {
// this happens when a variable declarations is converted to an expression due to hoisting
mergeVarDecls(decl, decl2, forward)
return true
} else if commaExpr, ok := exprStmt.Value.(*js.CommaExpr); ok {
n := 0
for i := 0; i < len(commaExpr.List); i++ {
item := commaExpr.List[i]
if forward {
item = commaExpr.List[len(commaExpr.List)-i-1]
}
if src, ok := item.(*js.VarDecl); ok {
// this happens when a variable declarations is converted to an expression due to hoisting
mergeVarDecls(decl, src, forward)
n++
continue
} else if binaryExpr, ok := item.(*js.BinaryExpr); ok && binaryExpr.Op == js.EqToken {
if v, ok := binaryExpr.X.(*js.Var); ok && v.Decl == js.VariableDecl {
addDefinition(decl, v, binaryExpr.Y, forward)
n++
continue
}
}
break
}
merge := n == len(commaExpr.List)
if !forward {
commaExpr.List = commaExpr.List[n:]
} else {
commaExpr.List = commaExpr.List[:len(commaExpr.List)-n]
}
return merge
} else if binaryExpr, ok := exprStmt.Value.(*js.BinaryExpr); ok && binaryExpr.Op == js.EqToken {
if v, ok := binaryExpr.X.(*js.Var); ok && v.Decl == js.VariableDecl {
addDefinition(decl, v, binaryExpr.Y, forward)
return true
}
}
return false
}
func (m *jsMinifier) countHoistLength(ibinding js.IBinding) int {
if !m.o.KeepVarNames {
return len(bindingVars(ibinding)) * 2 // assume that var name will be of length one, +1 for the comma
}
n := 0
for _, v := range bindingVars(ibinding) {
n += len(v.Data) + 1 // +1 for the comma when added to other declaration
}
return n
}
func (m *jsMinifier) hoistVars(body *js.BlockStmt) {
// Hoist all variable declarations in the current module/function scope to the variable
// declaration that reduces file size the most. All other declarations are converted to
// expressions and their variable names are copied to the only remaining declaration.
// This is possible because an ArrayBindingPattern and ObjectBindingPattern can be converted to
// an ArrayLiteral or ObjectLiteral respectively, as they are supersets of the BindingPatterns.
if 1 < len(body.Scope.VarDecls) {
// Select which variable declarations will be hoisted (convert to expression) and which not
best := 0
scores := make([]int, len(body.Scope.VarDecls)) // savings if hoisting target
hoist := make([]bool, len(body.Scope.VarDecls))
for i, varDecl := range body.Scope.VarDecls {
hoist[i] = true
if varDecl.InForInOf {
continue
}
// variable names in for-in or for-of cannot be removed
n := 0 // total number of vars with decls
score := 3 // "var"
nArrays := 0 // of which lhs arrays
nObjects := 0 // of which lhs objects
hasDefinitions := false
for j, item := range varDecl.List {
if item.Default != nil {
// move arrays/objects to the front (saves a space)
if _, ok := item.Binding.(*js.BindingObject); ok {
if j != 0 && nArrays == 0 && nObjects == 0 {
varDecl.List[0], varDecl.List[j] = varDecl.List[j], varDecl.List[0]
}
nObjects++
} else if _, ok := item.Binding.(*js.BindingArray); ok {
if j != 0 && nArrays == 0 && nObjects == 0 {
varDecl.List[0], varDecl.List[j] = varDecl.List[j], varDecl.List[0]
}
nArrays++
}
score -= m.countHoistLength(item.Binding) // var names and commas
hasDefinitions = true
n++
}
}
if nArrays == 0 && nObjects == 0 {
score++ // required space after var
}
if !hasDefinitions && varDecl.InFor {
score-- // semicolon can be reused
}
if nObjects != 0 && !varDecl.InFor && nObjects == n {
// required parenthesis around braces to not confound it with a block statement
score -= 2
}
if score < scores[best] || body.Scope.VarDecls[best].InForInOf {
// select var decl that reduces the least when hoist target
best = i
}
if score < 0 {
// don't hoist if it increases the amount of characters
hoist[i] = false
}
scores[i] = score
}
if body.Scope.VarDecls[best].InForInOf {
// no savings possible
return
}
decl := body.Scope.VarDecls[best]
if 10000 < len(decl.List) {
return
}
hoist[best] = false
// get original declarations
orig := []*js.Var{}
for _, item := range decl.List {
orig = append(orig, bindingVars(item.Binding)...)
}
// hoist other variable declarations in this function scope but don't initialize yet
j := 0
for i, varDecl := range body.Scope.VarDecls {
if hoist[i] {
varDecl.TokenType = js.ErrorToken
for _, item := range varDecl.List {
refs := bindingVars(item.Binding)
bindingElements := make([]js.BindingElement, 0, len(refs))
DeclaredLoop:
for _, ref := range refs {
for _, v := range orig {
if ref == v {
continue DeclaredLoop
}
}
bindingElements = append(bindingElements, js.BindingElement{Binding: ref, Default: nil})
orig = append(orig, ref)
s := decl.Scope
for s != nil && s != s.Func {
s.AddUndeclared(ref)
s = s.Parent
}
if item.Default != nil {
ref.Uses++
}
}
if i < best {
// prepend
decl.List = append(decl.List[:j], append(bindingElements, decl.List[j:]...)...)
j += len(bindingElements)
} else {
// append
decl.List = append(decl.List, bindingElements...)
}
}
}
}
// rearrange to put array/object first
var prevRefs []*js.Var
BeginArrayObject:
for i, item := range decl.List {
refs := bindingVars(item.Binding)
if _, ok := item.Binding.(*js.Var); !ok {
if i != 0 {
interferes := false
if item.Default != nil {
InterferenceLoop:
for _, ref := range refs {
for _, v := range prevRefs {
if ref == v {
interferes = true
break InterferenceLoop
}
}
}
}
if !interferes {
decl.List[0], decl.List[i] = decl.List[i], decl.List[0]
break BeginArrayObject
}
} else {
break BeginArrayObject
}
}
if item.Default != nil {
prevRefs = append(prevRefs, refs...)
}
}
}
}
// Package json minifies JSON following the specifications at http://json.org/.
package json
import (
"io"
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/parse/v2/json"
)
var (
commaBytes = []byte(",")
colonBytes = []byte(":")
zeroBytes = []byte("0")
minusZeroBytes = []byte("-0")
)
////////////////////////////////////////////////////////////////
// Minifier is a JSON minifier.
type Minifier struct {
Precision int // number of significant digits
KeepNumbers bool // prevent numbers from being minified
}
// Minify minifies JSON data, it reads from r and writes to w.
func Minify(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
return (&Minifier{}).Minify(m, w, r, params)
}
// Minify minifies JSON data, it reads from r and writes to w.
func (o *Minifier) Minify(_ *minify.M, w io.Writer, r io.Reader, _ map[string]string) error {
skipComma := true
z := parse.NewInput(r)
defer z.Restore()
p := json.NewParser(z)
for {
state := p.State()
gt, text := p.Next()
if gt == json.ErrorGrammar {
if _, err := w.Write(nil); err != nil {
return err
}
if p.Err() != io.EOF {
return p.Err()
}
return nil
}
if !skipComma && gt != json.EndObjectGrammar && gt != json.EndArrayGrammar {
if state == json.ObjectKeyState || state == json.ArrayState {
w.Write(commaBytes)
} else if state == json.ObjectValueState {
w.Write(colonBytes)
}
}
skipComma = gt == json.StartObjectGrammar || gt == json.StartArrayGrammar
if !o.KeepNumbers && 0 < len(text) && ('0' <= text[0] && text[0] <= '9' || text[0] == '-') {
text = minify.Number(text, o.Precision)
if text[0] == '.' {
w.Write(zeroBytes)
} else if 1 < len(text) && text[0] == '-' && text[1] == '.' {
text = text[1:]
w.Write(minusZeroBytes)
}
}
w.Write(text)
}
}
// Package minify relates MIME type to minifiers. Several minifiers are provided in the subpackages.
package minify
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"mime"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"regexp"
"strings"
"sync"
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/parse/v2/buffer"
)
// Warning is used to report usage warnings such as using a deprecated feature
var Warning = log.New(os.Stderr, "WARNING: ", 0)
// ErrNotExist is returned when no minifier exists for a given mimetype.
var ErrNotExist = errors.New("minifier does not exist for mimetype")
// ErrClosedWriter is returned when writing to a closed writer.
var ErrClosedWriter = errors.New("write on closed writer") // TODO: DEPRECATED, remove
////////////////////////////////////////////////////////////////
// MinifierFunc is a function that implements Minifer.
type MinifierFunc func(*M, io.Writer, io.Reader, map[string]string) error
// Minify calls f(m, w, r, params)
func (f MinifierFunc) Minify(m *M, w io.Writer, r io.Reader, params map[string]string) error {
return f(m, w, r, params)
}
// Minifier is the interface for minifiers.
// The *M parameter is used for minifying embedded resources, such as JS within HTML.
type Minifier interface {
Minify(*M, io.Writer, io.Reader, map[string]string) error
}
////////////////////////////////////////////////////////////////
type patternMinifier struct {
pattern *regexp.Regexp
Minifier
}
type cmdMinifier struct {
cmd *exec.Cmd
}
var cmdArgExtension = regexp.MustCompile(`^\.[0-9a-zA-Z]+`)
func (c *cmdMinifier) Minify(_ *M, w io.Writer, r io.Reader, _ map[string]string) error {
cmd := &exec.Cmd{}
*cmd = *c.cmd // concurrency safety
var in, out *os.File
for i, arg := range cmd.Args {
if j := strings.Index(arg, "$in"); j != -1 {
var err error
ext := cmdArgExtension.FindString(arg[j+3:])
if in, err = ioutil.TempFile("", "minify-in-*"+ext); err != nil {
return err
}
cmd.Args[i] = arg[:j] + in.Name() + arg[j+3+len(ext):]
} else if j := strings.Index(arg, "$out"); j != -1 {
var err error
ext := cmdArgExtension.FindString(arg[j+4:])
if out, err = ioutil.TempFile("", "minify-out-*"+ext); err != nil {
return err
}
cmd.Args[i] = arg[:j] + out.Name() + arg[j+4+len(ext):]
}
}
if in == nil {
cmd.Stdin = r
} else if _, err := io.Copy(in, r); err != nil {
return err
}
if out == nil {
cmd.Stdout = w
} else {
defer io.Copy(w, out)
}
stderr := &bytes.Buffer{}
cmd.Stderr = stderr
err := cmd.Run()
if _, ok := err.(*exec.ExitError); ok {
if stderr.Len() != 0 {
err = fmt.Errorf("%s", stderr.String())
}
err = fmt.Errorf("command %s failed: %w", cmd.Path, err)
}
return err
}
////////////////////////////////////////////////////////////////
// M holds a map of mimetype => function to allow recursive minifier calls of the minifier functions.
type M struct {
mutex sync.RWMutex
literal map[string]Minifier
pattern []patternMinifier
URL *url.URL
}
// New returns a new M.
func New() *M {
return &M{
sync.RWMutex{},
map[string]Minifier{},
[]patternMinifier{},
nil,
}
}
// Add adds a minifier to the mimetype => function map (unsafe for concurrent use).
func (m *M) Add(mimetype string, minifier Minifier) {
m.mutex.Lock()
m.literal[mimetype] = minifier
m.mutex.Unlock()
}
// AddFunc adds a minify function to the mimetype => function map (unsafe for concurrent use).
func (m *M) AddFunc(mimetype string, minifier MinifierFunc) {
m.mutex.Lock()
m.literal[mimetype] = minifier
m.mutex.Unlock()
}
// AddRegexp adds a minifier to the mimetype => function map (unsafe for concurrent use).
func (m *M) AddRegexp(pattern *regexp.Regexp, minifier Minifier) {
m.mutex.Lock()
m.pattern = append(m.pattern, patternMinifier{pattern, minifier})
m.mutex.Unlock()
}
// AddFuncRegexp adds a minify function to the mimetype => function map (unsafe for concurrent use).
func (m *M) AddFuncRegexp(pattern *regexp.Regexp, minifier MinifierFunc) {
m.mutex.Lock()
m.pattern = append(m.pattern, patternMinifier{pattern, minifier})
m.mutex.Unlock()
}
// AddCmd adds a minify function to the mimetype => function map (unsafe for concurrent use) that executes a command to process the minification.
// It allows the use of external tools like ClosureCompiler, UglifyCSS, etc. for a specific mimetype.
func (m *M) AddCmd(mimetype string, cmd *exec.Cmd) {
m.mutex.Lock()
m.literal[mimetype] = &cmdMinifier{cmd}
m.mutex.Unlock()
}
// AddCmdRegexp adds a minify function to the mimetype => function map (unsafe for concurrent use) that executes a command to process the minification.
// It allows the use of external tools like ClosureCompiler, UglifyCSS, etc. for a specific mimetype regular expression.
func (m *M) AddCmdRegexp(pattern *regexp.Regexp, cmd *exec.Cmd) {
m.mutex.Lock()
m.pattern = append(m.pattern, patternMinifier{pattern, &cmdMinifier{cmd}})
m.mutex.Unlock()
}
// Match returns the pattern and minifier that gets matched with the mediatype.
// It returns nil when no matching minifier exists.
// It has the same matching algorithm as Minify.
func (m *M) Match(mediatype string) (string, map[string]string, MinifierFunc) {
m.mutex.RLock()
defer m.mutex.RUnlock()
mimetype, params := parse.Mediatype([]byte(mediatype))
if minifier, ok := m.literal[string(mimetype)]; ok { // string conversion is optimized away
return string(mimetype), params, minifier.Minify
}
for _, minifier := range m.pattern {
if minifier.pattern.Match(mimetype) {
return minifier.pattern.String(), params, minifier.Minify
}
}
return string(mimetype), params, nil
}
// Minify minifies the content of a Reader and writes it to a Writer (safe for concurrent use).
// An error is returned when no such mimetype exists (ErrNotExist) or when an error occurred in the minifier function.
// Mediatype may take the form of 'text/plain', 'text/*', '*/*' or 'text/plain; charset=UTF-8; version=2.0'.
func (m *M) Minify(mediatype string, w io.Writer, r io.Reader) error {
mimetype, params := parse.Mediatype([]byte(mediatype))
return m.MinifyMimetype(mimetype, w, r, params)
}
// MinifyMimetype minifies the content of a Reader and writes it to a Writer (safe for concurrent use).
// It is a lower level version of Minify and requires the mediatype to be split up into mimetype and parameters.
// It is mostly used internally by minifiers because it is faster (no need to convert a byte-slice to string and vice versa).
func (m *M) MinifyMimetype(mimetype []byte, w io.Writer, r io.Reader, params map[string]string) error {
m.mutex.RLock()
defer m.mutex.RUnlock()
if minifier, ok := m.literal[string(mimetype)]; ok { // string conversion is optimized away
return minifier.Minify(m, w, r, params)
}
for _, minifier := range m.pattern {
if minifier.pattern.Match(mimetype) {
return minifier.Minify(m, w, r, params)
}
}
return ErrNotExist
}
// Bytes minifies an array of bytes (safe for concurrent use). When an error occurs it return the original array and the error.
// It returns an error when no such mimetype exists (ErrNotExist) or any error occurred in the minifier function.
func (m *M) Bytes(mediatype string, v []byte) ([]byte, error) {
out := buffer.NewWriter(make([]byte, 0, len(v)))
if err := m.Minify(mediatype, out, buffer.NewReader(v)); err != nil {
return v, err
}
return out.Bytes(), nil
}
// String minifies a string (safe for concurrent use). When an error occurs it return the original string and the error.
// It returns an error when no such mimetype exists (ErrNotExist) or any error occurred in the minifier function.
func (m *M) String(mediatype string, v string) (string, error) {
out := buffer.NewWriter(make([]byte, 0, len(v)))
if err := m.Minify(mediatype, out, buffer.NewReader([]byte(v))); err != nil {
return v, err
}
return string(out.Bytes()), nil
}
// Reader wraps a Reader interface and minifies the stream.
// Errors from the minifier are returned by the reader.
func (m *M) Reader(mediatype string, r io.Reader) io.Reader {
pr, pw := io.Pipe()
go func() {
if err := m.Minify(mediatype, pw, r); err != nil {
pw.CloseWithError(err)
} else {
pw.Close()
}
}()
return pr
}
// writer makes sure that errors from the minifier are passed down through Close (can be blocking).
type writer struct {
io.WriteCloser
wg sync.WaitGroup
closed bool
err error
}
// Close must be called when writing has finished. It returns the error from the minifier.
func (z *writer) Close() error {
if z.closed {
return nil
}
z.closed = true
err := z.WriteCloser.Close()
z.wg.Wait()
if z.err == nil {
return err
}
return z.err
}
// Writer wraps a Writer interface and minifies the stream.
// Errors from the minifier are returned by Close on the writer.
// The writer must be closed explicitly.
func (m *M) Writer(mediatype string, w io.Writer) io.WriteCloser {
pr, pw := io.Pipe()
z := &writer{pw, sync.WaitGroup{}, false, nil}
z.wg.Add(1)
go func() {
defer z.wg.Done()
defer pr.Close()
if err := m.Minify(mediatype, w, pr); err != nil {
z.err = err
}
}()
return z
}
// responseWriter wraps an http.ResponseWriter and makes sure that errors from the minifier are passed down through Close (can be blocking).
// All writes to the response writer are intercepted and minified on the fly.
// http.ResponseWriter loses all functionality such as Pusher, Hijacker, Flusher, ...
type responseWriter struct {
http.ResponseWriter
z io.Writer
m *M
mediatype string
}
// WriteHeader intercepts any header writes and removes the Content-Length header.
func (w *responseWriter) WriteHeader(status int) {
w.ResponseWriter.Header().Del("Content-Length")
w.ResponseWriter.WriteHeader(status)
}
// Write intercepts any writes to the response writer.
// The first write will extract the Content-Type as the mediatype. Otherwise it falls back to the RequestURI extension.
func (w *responseWriter) Write(b []byte) (int, error) {
if w.z == nil {
// first write
if mediatype := w.ResponseWriter.Header().Get("Content-Type"); mediatype != "" {
w.mediatype = mediatype
}
if _, params, minifier := w.m.Match(w.mediatype); minifier != nil {
pr, pw := io.Pipe()
z := &writer{pw, sync.WaitGroup{}, false, nil}
z.wg.Add(1)
go func() {
defer z.wg.Done()
defer pr.Close()
if err := minifier(w.m, w.ResponseWriter, pr, params); err != nil {
z.err = err
}
}()
w.z = z
} else {
w.z = w.ResponseWriter
}
}
return w.z.Write(b)
}
// Close must be called when writing has finished. It returns the error from the minifier.
func (w *responseWriter) Close() error {
if closer, ok := w.z.(interface{ Close() error }); ok {
return closer.Close()
}
return nil
}
// ResponseWriter minifies any writes to the http.ResponseWriter.
// http.ResponseWriter loses all functionality such as Pusher, Hijacker, Flusher, ...
// Minification might be slower than just sending the original file! Caching is advised.
func (m *M) ResponseWriter(w http.ResponseWriter, r *http.Request) *responseWriter {
mediatype := mime.TypeByExtension(path.Ext(r.RequestURI))
return &responseWriter{w, nil, m, mediatype}
}
// Middleware provides a middleware function that minifies content on the fly by intercepting writes to http.ResponseWriter.
// http.ResponseWriter loses all functionality such as Pusher, Hijacker, Flusher, ...
// Minification might be slower than just sending the original file! Caching is advised.
func (m *M) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
mw := m.ResponseWriter(w, r)
next.ServeHTTP(mw, r)
mw.Close()
})
}
// MiddlewareWithError provides a middleware function that minifies content on the fly by intercepting writes to http.ResponseWriter. The error function allows handling minification errors.
// http.ResponseWriter loses all functionality such as Pusher, Hijacker, Flusher, ...
// Minification might be slower than just sending the original file! Caching is advised.
func (m *M) MiddlewareWithError(next http.Handler, errorFunc func(w http.ResponseWriter, r *http.Request, err error)) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
mw := m.ResponseWriter(w, r)
next.ServeHTTP(mw, r)
if err := mw.Close(); err != nil {
errorFunc(w, r, err)
return
}
})
}
package svg
import (
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/parse/v2/xml"
minifyXML "github.com/tdewolff/minify/v2/xml"
)
// Token is a single token unit with an attribute value (if given) and hash of the data.
type Token struct {
xml.TokenType
Hash Hash
Data []byte
Text []byte
AttrVal []byte
Offset int
}
// TokenBuffer is a buffer that allows for token look-ahead.
type TokenBuffer struct {
r *parse.Input
l *xml.Lexer
buf []Token
pos int
attrBuffer []*Token
}
// NewTokenBuffer returns a new TokenBuffer.
func NewTokenBuffer(r *parse.Input, l *xml.Lexer) *TokenBuffer {
return &TokenBuffer{
r: r,
l: l,
buf: make([]Token, 0, 8),
}
}
func (z *TokenBuffer) read(t *Token) {
t.Offset = z.r.Offset()
t.TokenType, t.Data = z.l.Next()
t.Text = z.l.Text()
if t.TokenType == xml.AttributeToken {
t.Offset += 1 + len(t.Text) + 1
t.AttrVal = z.l.AttrVal()
if len(t.AttrVal) > 1 && (t.AttrVal[0] == '"' || t.AttrVal[0] == '\'') {
t.Offset++
t.AttrVal = t.AttrVal[1 : len(t.AttrVal)-1] // quotes will be readded in attribute loop if necessary
t.AttrVal = parse.ReplaceMultipleWhitespaceAndEntities(t.AttrVal, minifyXML.EntitiesMap, nil)
t.AttrVal = parse.TrimWhitespace(t.AttrVal)
}
t.Hash = ToHash(t.Text)
} else if t.TokenType == xml.StartTagToken || t.TokenType == xml.EndTagToken {
t.AttrVal = nil
t.Hash = ToHash(t.Text)
} else {
t.AttrVal = nil
t.Hash = 0
}
}
// Peek returns the ith element and possibly does an allocation.
// Peeking past an error will panic.
func (z *TokenBuffer) Peek(pos int) *Token {
pos += z.pos
if pos >= len(z.buf) {
if len(z.buf) > 0 && z.buf[len(z.buf)-1].TokenType == xml.ErrorToken {
return &z.buf[len(z.buf)-1]
}
c := cap(z.buf)
d := len(z.buf) - z.pos
p := pos - z.pos + 1 // required peek length
var buf []Token
if 2*p > c {
buf = make([]Token, 0, 2*c+p)
} else {
buf = z.buf
}
copy(buf[:d], z.buf[z.pos:])
buf = buf[:p]
pos -= z.pos
for i := d; i < p; i++ {
z.read(&buf[i])
if buf[i].TokenType == xml.ErrorToken {
buf = buf[:i+1]
pos = i
break
}
}
z.pos, z.buf = 0, buf
}
return &z.buf[pos]
}
// Shift returns the first element and advances position.
func (z *TokenBuffer) Shift() *Token {
if z.pos >= len(z.buf) {
t := &z.buf[:1][0]
z.read(t)
return t
}
t := &z.buf[z.pos]
z.pos++
return t
}
// Attributes extracts the gives attribute hashes from a tag.
// It returns in the same order pointers to the requested token data or nil.
func (z *TokenBuffer) Attributes(hashes ...Hash) []*Token {
n := 0
for {
if t := z.Peek(n); t.TokenType != xml.AttributeToken {
break
}
n++
}
if len(hashes) > cap(z.attrBuffer) {
z.attrBuffer = make([]*Token, len(hashes))
} else {
z.attrBuffer = z.attrBuffer[:len(hashes)]
for i := range z.attrBuffer {
z.attrBuffer[i] = nil
}
}
for i := z.pos; i < z.pos+n; i++ {
attr := &z.buf[i]
for j, hash := range hashes {
if hash == attr.Hash {
z.attrBuffer[j] = attr
}
}
}
return z.attrBuffer
}
package svg
// generated by hasher -type=Hash -file=hash.go; DO NOT EDIT, except for adding more constants to the list and rerun go generate
// uses github.com/tdewolff/hasher
//go:generate hasher -type=Hash -file=hash.go
// Hash defines perfect hashes for a predefined list of strings
type Hash uint32
// Unique hash definitions to be used instead of strings
const (
A Hash = 0x101 // a
Alignment_Baseline Hash = 0x2e12 // alignment-baseline
BaseProfile Hash = 0xb // baseProfile
Baseline_Shift Hash = 0x380e // baseline-shift
Buffered_Rendering Hash = 0x5212 // buffered-rendering
Clip Hash = 0x6404 // clip
Clip_Path Hash = 0x6409 // clip-path
Clip_Rule Hash = 0x8009 // clip-rule
Color Hash = 0xd805 // color
Color_Interpolation Hash = 0xd813 // color-interpolation
Color_Interpolation_Filters Hash = 0xd81b // color-interpolation-filters
Color_Profile Hash = 0x1f70d // color-profile
Color_Rendering Hash = 0x2320f // color-rendering
ContentScriptType Hash = 0xa011 // contentScriptType
ContentStyleType Hash = 0xb110 // contentStyleType
Cursor Hash = 0xc106 // cursor
D Hash = 0x5901 // d
Defs Hash = 0x35d04 // defs
Direction Hash = 0x30009 // direction
Display Hash = 0x9807 // display
Dominant_Baseline Hash = 0x19211 // dominant-baseline
Enable_Background Hash = 0x8811 // enable-background
FeImage Hash = 0x14507 // feImage
Fill Hash = 0xc904 // fill
Fill_Opacity Hash = 0x3310c // fill-opacity
Fill_Rule Hash = 0xc909 // fill-rule
Filter Hash = 0xec06 // filter
Flood_Color Hash = 0xd20b // flood-color
Flood_Opacity Hash = 0x1050d // flood-opacity
Font Hash = 0x11404 // font
Font_Family Hash = 0x1140b // font-family
Font_Size Hash = 0x11f09 // font-size
Font_Size_Adjust Hash = 0x11f10 // font-size-adjust
Font_Stretch Hash = 0x1370c // font-stretch
Font_Style Hash = 0x14c0a // font-style
Font_Variant Hash = 0x1560c // font-variant
Font_Weight Hash = 0x1620b // font-weight
ForeignObject Hash = 0x16d0d // foreignObject
G Hash = 0x1601 // g
Glyph_Orientation_Horizontal Hash = 0x1d31c // glyph-orientation-horizontal
Glyph_Orientation_Vertical Hash = 0x161a // glyph-orientation-vertical
Height Hash = 0x6c06 // height
Href Hash = 0x14204 // href
Image Hash = 0x17a05 // image
Image_Rendering Hash = 0x17a0f // image-rendering
Kerning Hash = 0x1bc07 // kerning
Letter_Spacing Hash = 0x90e // letter-spacing
Lighting_Color Hash = 0x1ee0e // lighting-color
Line Hash = 0x3c04 // line
Marker Hash = 0x18906 // marker
Marker_End Hash = 0x1890a // marker-end
Marker_Mid Hash = 0x1a30a // marker-mid
Marker_Start Hash = 0x1ad0c // marker-start
Mask Hash = 0x1b904 // mask
Metadata Hash = 0x1c308 // metadata
Missing_Glyph Hash = 0x1cb0d // missing-glyph
Opacity Hash = 0x10b07 // opacity
Overflow Hash = 0x26208 // overflow
Paint_Order Hash = 0x2a20b // paint-order
Path Hash = 0x6904 // path
Pattern Hash = 0x20407 // pattern
Pointer_Events Hash = 0x20b0e // pointer-events
Points Hash = 0x22706 // points
Polygon Hash = 0x24107 // polygon
Polyline Hash = 0x24808 // polyline
PreserveAspectRatio Hash = 0x25013 // preserveAspectRatio
Rect Hash = 0x30204 // rect
Rx Hash = 0x4f02 // rx
Ry Hash = 0xc602 // ry
Script Hash = 0xf206 // script
Shape_Rendering Hash = 0x2180f // shape-rendering
Solid_Color Hash = 0x22c0b // solid-color
Solid_Opacity Hash = 0x3600d // solid-opacity
Stop_Color Hash = 0x12d0a // stop-color
Stop_Opacity Hash = 0x37a0c // stop-opacity
Stroke Hash = 0x27406 // stroke
Stroke_Dasharray Hash = 0x27410 // stroke-dasharray
Stroke_Dashoffset Hash = 0x28411 // stroke-dashoffset
Stroke_Linecap Hash = 0x2950e // stroke-linecap
Stroke_Linejoin Hash = 0x2ad0f // stroke-linejoin
Stroke_Miterlimit Hash = 0x2bc11 // stroke-miterlimit
Stroke_Opacity Hash = 0x2cd0e // stroke-opacity
Stroke_Width Hash = 0x2db0c // stroke-width
Style Hash = 0x15105 // style
Svg Hash = 0x2e703 // svg
Switch Hash = 0x2ea06 // switch
Symbol Hash = 0x2f006 // symbol
Text_Anchor Hash = 0x450b // text-anchor
Text_Decoration Hash = 0x710f // text-decoration
Text_Rendering Hash = 0xf70e // text-rendering
Type Hash = 0x11004 // type
Unicode_Bidi Hash = 0x2f60c // unicode-bidi
Use Hash = 0x30903 // use
Vector_Effect Hash = 0x30c0d // vector-effect
Version Hash = 0x31907 // version
ViewBox Hash = 0x32007 // viewBox
Viewport_Fill Hash = 0x3280d // viewport-fill
Viewport_Fill_Opacity Hash = 0x32815 // viewport-fill-opacity
Visibility Hash = 0x33d0a // visibility
White_Space Hash = 0x2690b // white-space
Width Hash = 0x2e205 // width
Word_Spacing Hash = 0x3470c // word-spacing
Writing_Mode Hash = 0x3530c // writing-mode
X Hash = 0x4701 // x
X1 Hash = 0x5002 // x1
X2 Hash = 0x32602 // x2
Xml_Space Hash = 0x36d09 // xml:space
Xmlns Hash = 0x37605 // xmlns
Y Hash = 0x1801 // y
Y1 Hash = 0x9e02 // y1
Y2 Hash = 0xc702 // y2
)
// String returns the hash' name.
func (i Hash) String() string {
start := uint32(i >> 8)
n := uint32(i & 0xff)
if start+n > uint32(len(_Hash_text)) {
return ""
}
return _Hash_text[start : start+n]
}
// ToHash returns the hash whose name is s. It returns zero if there is no
// such hash. It is case sensitive.
func ToHash(s []byte) Hash {
if len(s) == 0 || len(s) > _Hash_maxLen {
return 0
}
h := uint32(_Hash_hash0)
for i := 0; i < len(s); i++ {
h ^= uint32(s[i])
h *= 16777619
}
if i := _Hash_table[h&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
t := _Hash_text[i>>8 : i>>8+i&0xff]
for i := 0; i < len(s); i++ {
if t[i] != s[i] {
goto NEXT
}
}
return i
}
NEXT:
if i := _Hash_table[(h>>16)&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
t := _Hash_text[i>>8 : i>>8+i&0xff]
for i := 0; i < len(s); i++ {
if t[i] != s[i] {
return 0
}
}
return i
}
return 0
}
const _Hash_hash0 = 0x3562ea09
const _Hash_maxLen = 28
const _Hash_text = "baseProfiletter-spacinglyph-orientation-verticalignment-base" +
"line-shiftext-anchorx1buffered-renderingclip-patheightext-de" +
"corationclip-rulenable-backgroundisplay1contentScriptTypecon" +
"tentStyleTypecursory2fill-ruleflood-color-interpolation-filt" +
"erscriptext-renderingflood-opacitypefont-familyfont-size-adj" +
"ustop-colorfont-stretchrefeImagefont-stylefont-variantfont-w" +
"eightforeignObjectimage-renderingmarker-endominant-baselinem" +
"arker-midmarker-startmaskerningmetadatamissing-glyph-orienta" +
"tion-horizontalighting-color-profilepatternpointer-eventshap" +
"e-renderingpointsolid-color-renderingpolygonpolylinepreserve" +
"AspectRatioverflowhite-spacestroke-dasharraystroke-dashoffse" +
"tstroke-linecapaint-orderstroke-linejoinstroke-miterlimitstr" +
"oke-opacitystroke-widthsvgswitchsymbolunicode-bidirectionuse" +
"vector-effectversionviewBox2viewport-fill-opacityvisibilityw" +
"ord-spacingwriting-modefsolid-opacityxml:spacexmlnstop-opaci" +
"ty"
var _Hash_table = [1 << 7]Hash{
0x1: 0x9807, // display
0x2: 0x22706, // points
0x3: 0x710f, // text-decoration
0x5: 0xc106, // cursor
0x6: 0x30903, // use
0x7: 0xd81b, // color-interpolation-filters
0x8: 0xc602, // ry
0xb: 0x37a0c, // stop-opacity
0xc: 0xd805, // color
0xd: 0x2f60c, // unicode-bidi
0xe: 0x2950e, // stroke-linecap
0xf: 0x3280d, // viewport-fill
0x10: 0x6c06, // height
0x13: 0x1370c, // font-stretch
0x14: 0x11404, // font
0x15: 0xa011, // contentScriptType
0x16: 0x5002, // x1
0x17: 0x5901, // d
0x18: 0x1a30a, // marker-mid
0x19: 0x6409, // clip-path
0x1a: 0x2e205, // width
0x1b: 0x380e, // baseline-shift
0x1c: 0x24107, // polygon
0x1d: 0x2e703, // svg
0x1e: 0xc909, // fill-rule
0x1f: 0x19211, // dominant-baseline
0x20: 0x2bc11, // stroke-miterlimit
0x21: 0x2320f, // color-rendering
0x22: 0x2f006, // symbol
0x23: 0x2180f, // shape-rendering
0x25: 0x1f70d, // color-profile
0x26: 0x3470c, // word-spacing
0x27: 0x11f10, // font-size-adjust
0x28: 0x8009, // clip-rule
0x2a: 0x8811, // enable-background
0x2b: 0xc702, // y2
0x2c: 0x1bc07, // kerning
0x2d: 0x32602, // x2
0x2e: 0x30009, // direction
0x2f: 0x2ad0f, // stroke-linejoin
0x30: 0x3310c, // fill-opacity
0x31: 0x18906, // marker
0x33: 0x9e02, // y1
0x34: 0x1d31c, // glyph-orientation-horizontal
0x35: 0x2ea06, // switch
0x36: 0x1b904, // mask
0x37: 0x1601, // g
0x38: 0x101, // a
0x39: 0x6404, // clip
0x3a: 0xb, // baseProfile
0x3b: 0xb110, // contentStyleType
0x3c: 0x1560c, // font-variant
0x3d: 0x32815, // viewport-fill-opacity
0x3e: 0x36d09, // xml:space
0x41: 0x14204, // href
0x42: 0xc904, // fill
0x43: 0x12d0a, // stop-color
0x44: 0x16d0d, // foreignObject
0x45: 0x37605, // xmlns
0x46: 0xf206, // script
0x47: 0x4f02, // rx
0x48: 0x20407, // pattern
0x49: 0x161a, // glyph-orientation-vertical
0x4a: 0x3600d, // solid-opacity
0x4b: 0x28411, // stroke-dashoffset
0x4c: 0x450b, // text-anchor
0x4d: 0x27410, // stroke-dasharray
0x4e: 0x17a05, // image
0x50: 0x1801, // y
0x51: 0x2cd0e, // stroke-opacity
0x52: 0x25013, // preserveAspectRatio
0x53: 0xf70e, // text-rendering
0x55: 0x2690b, // white-space
0x56: 0xd813, // color-interpolation
0x57: 0x1620b, // font-weight
0x58: 0x30c0d, // vector-effect
0x59: 0x3530c, // writing-mode
0x5a: 0x11f09, // font-size
0x5b: 0x24808, // polyline
0x5c: 0x1c308, // metadata
0x5d: 0x20b0e, // pointer-events
0x5f: 0x17a0f, // image-rendering
0x60: 0x2db0c, // stroke-width
0x61: 0x11004, // type
0x62: 0x10b07, // opacity
0x63: 0x4701, // x
0x64: 0x1ee0e, // lighting-color
0x65: 0x3c04, // line
0x66: 0x2e12, // alignment-baseline
0x68: 0x90e, // letter-spacing
0x69: 0xd20b, // flood-color
0x6a: 0x1ad0c, // marker-start
0x6b: 0x30204, // rect
0x6c: 0x2a20b, // paint-order
0x6e: 0x1140b, // font-family
0x6f: 0x5212, // buffered-rendering
0x70: 0x1050d, // flood-opacity
0x71: 0x33d0a, // visibility
0x72: 0x6904, // path
0x73: 0x1cb0d, // missing-glyph
0x75: 0x14507, // feImage
0x76: 0x27406, // stroke
0x77: 0x26208, // overflow
0x78: 0x31907, // version
0x79: 0x35d04, // defs
0x7a: 0x15105, // style
0x7b: 0xec06, // filter
0x7c: 0x14c0a, // font-style
0x7d: 0x32007, // viewBox
0x7e: 0x22c0b, // solid-color
0x7f: 0x1890a, // marker-end
}
package svg
import (
"math"
strconvStdlib "strconv"
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/parse/v2/strconv"
)
// PathData represents a path data string.
type PathData struct {
o *Minifier
x, y float64
x0, y0 float64
coords [][]byte
coordFloats []float64
cx, cy float64 // last control point for cubic bezier
qx, qy float64 // last control point for quadratic bezier
state PathDataState
curBuffer []byte
altBuffer []byte
coordBuffer []byte
}
// PathDataState is the state of the current path.
type PathDataState struct {
cmd byte
prevDigit bool
prevDigitIsInt bool
prevFlag bool
}
// NewPathData returns a new PathData.
func NewPathData(o *Minifier) *PathData {
return &PathData{
o: o,
cx: math.NaN(),
cy: math.NaN(),
qx: math.NaN(),
qy: math.NaN(),
}
}
var pathCmds = map[byte]bool{
'M': true,
'm': true,
'L': true,
'l': true,
'H': true,
'h': true,
'V': true,
'v': true,
'Q': true,
'q': true,
'T': true,
't': true,
'C': true,
'c': true,
'S': true,
's': true,
'A': true,
'a': true,
'Z': true,
'z': true,
}
// ShortenPathData takes a full pathdata string and returns a shortened version. The original string is overwritten.
// It parses all commands (M, A, Z, ...) and coordinates (numbers) and calls copyInstruction for each command.
func (p *PathData) ShortenPathData(b []byte) []byte {
if 100000 < len(b) {
// prevent extremely long paths for being too costly (OSS-Fuzz)
return b
}
var cmd byte
p.x, p.y = 0.0, 0.0
p.coords = p.coords[:0]
p.coordFloats = p.coordFloats[:0]
p.state = PathDataState{}
j := 0
for i := 0; i < len(b); i++ {
c := b[i]
if c == ' ' || c == ',' || c == '\n' || c == '\r' || c == '\t' {
continue
} else if pathCmds[c] && (cmd == 0 || cmd != c || c == 'M' || c == 'm') { // any command
if cmd != 0 {
j += p.copyInstruction(b[j:], cmd)
}
cmd = c
p.coords = p.coords[:0]
p.coordFloats = p.coordFloats[:0]
} else if (cmd == 'A' || cmd == 'a') && (len(p.coordFloats)%7 == 3 || len(p.coordFloats)%7 == 4) {
// boolean flags for arc command
if c == '1' {
p.coords = append(p.coords, b[i:i+1])
p.coordFloats = append(p.coordFloats, 1.0)
} else if c == '0' {
p.coords = append(p.coords, b[i:i+1])
p.coordFloats = append(p.coordFloats, 0.0)
} else {
cmd = 0 // bad format, don't minify
}
} else if n := parse.Number(b[i:]); n > 0 {
f, _ := strconv.ParseFloat(b[i : i+n])
p.coords = append(p.coords, b[i:i+n])
p.coordFloats = append(p.coordFloats, f)
i += n - 1
}
}
if cmd == 0 {
return b
}
j += p.copyInstruction(b[j:], cmd)
return b[:j]
}
// copyInstruction copies pathdata of a single command, but may be comprised of multiple sets for that command. For example, L takes two coordinates, but this function may process 2*N coordinates. Lowercase commands are relative commands, where the coordinates are relative to the previous point. Uppercase commands have absolute coordinates.
// We update p.x and p.y (the current coordinates) according to the commands given. For each set of coordinates we call shortenCurPosInstruction and shortenAltPosInstruction. The former just minifies the coordinates, the latter will inverse the lowercase/uppercase of the command, and see if the coordinates get smaller due to that. The shortest is chosen and copied to b, i.e. b is the destination and is not read from.
func (p *PathData) copyInstruction(b []byte, cmd byte) int {
n := len(p.coords)
if n == 0 {
if cmd == 'Z' || cmd == 'z' {
p.x = p.x0
p.y = p.y0
b[0] = 'z'
return 1
}
return 0
}
isRelCmd := cmd >= 'a'
// get new cursor coordinates
di := 0
if (cmd == 'M' || cmd == 'm' || cmd == 'L' || cmd == 'l' || cmd == 'T' || cmd == 't') && n%2 == 0 {
di = 2
// reprint M always, as the first pair is a move but subsequent pairs are L
if cmd == 'M' || cmd == 'm' {
p.state.cmd = byte(0)
}
} else if cmd == 'H' || cmd == 'h' || cmd == 'V' || cmd == 'v' {
di = 1
} else if (cmd == 'S' || cmd == 's' || cmd == 'Q' || cmd == 'q') && n%4 == 0 {
di = 4
} else if (cmd == 'C' || cmd == 'c') && n%6 == 0 {
di = 6
} else if (cmd == 'A' || cmd == 'a') && n%7 == 0 {
di = 7
} else {
return 0
}
j := 0
origCmd := cmd
for i := 0; i < n; i += di {
// subsequent coordinate pairs for M are really L
if i > 0 && (origCmd == 'M' || origCmd == 'm') {
origCmd = 'L' + (origCmd - 'M')
}
cmd = origCmd
coords := p.coords[i : i+di]
coordFloats := p.coordFloats[i : i+di]
// set next coordinate
var ax, ay float64
if cmd == 'H' || cmd == 'h' {
ax = coordFloats[di-1]
if isRelCmd {
ax += p.x
}
ay = p.y
} else if cmd == 'V' || cmd == 'v' {
ax = p.x
ay = coordFloats[di-1]
if isRelCmd {
ay += p.y
}
} else {
ax = coordFloats[di-2]
ay = coordFloats[di-1]
if isRelCmd {
ax += p.x
ay += p.y
}
}
// switch from C to S whenever possible
if cmd == 'C' || cmd == 'c' || cmd == 'S' || cmd == 's' {
if math.IsNaN(p.cx) {
p.cx, p.cy = p.x, p.y
} else {
p.cx, p.cy = 2*p.x-p.cx, 2*p.y-p.cy
}
var cp1x, cp1y float64
cp2x, cp2y := coordFloats[di-4], coordFloats[di-3]
if isRelCmd {
cp2x += p.x
cp2y += p.y
}
if cmd == 'C' || cmd == 'c' {
cp1x, cp1y = coordFloats[di-6], coordFloats[di-5]
if isRelCmd {
cp1x += p.x
cp1y += p.y
}
if cp1x == p.cx && cp1y == p.cy {
if isRelCmd {
cmd = 's'
} else {
cmd = 'S'
}
coords = coords[2:]
coordFloats = coordFloats[2:]
}
} else {
cp1x, cp1y = p.cx, p.cy
}
// if control points overlap begin/end points, this is a straight line
// even though if the control points would be along the straight line, we won't minify that as the control points influence the speed along the curve (important for dashes for example)
// only change to a lines if we start with s or S and none follow
if (cmd == 'C' || cmd == 'c' || i == 0 && i+di >= n) && (cp1x == p.x && cp1y == p.y || cp1x == ax && cp1y == ay) && (cp2x == p.x && cp2y == p.y || cp2x == ax && cp2y == ay) {
if isRelCmd {
cmd = 'l'
} else {
cmd = 'L'
}
coords = coords[len(coords)-2:]
coordFloats = coordFloats[len(coordFloats)-2:]
cp2x, cp2y = math.NaN(), math.NaN()
}
p.cx, p.cy = cp2x, cp2y
} else {
p.cx, p.cy = math.NaN(), math.NaN()
}
// switch from Q to T whenever possible
if cmd == 'Q' || cmd == 'q' || cmd == 'T' || cmd == 't' {
if math.IsNaN(p.qx) {
p.qx, p.qy = p.x, p.y
} else {
p.qx, p.qy = 2*p.x-p.qx, 2*p.y-p.qy
}
var cpx, cpy float64
if cmd == 'Q' || cmd == 'q' {
cpx, cpy = coordFloats[di-4], coordFloats[di-3]
if isRelCmd {
cpx += p.x
cpy += p.y
}
if cpx == p.qx && cpy == p.qy {
if isRelCmd {
cmd = 't'
} else {
cmd = 'T'
}
coords = coords[2:]
coordFloats = coordFloats[2:]
}
} else {
cpx, cpy = p.qx, p.qy
}
// if control point overlaps begin/end points, this is a straight line
// even if the control point would be along the straight line, we won't minify that as the control point influences the speed along the curve (important for dashes for example)
// only change to line if we start with t or T and none follow
if (cmd == 'Q' || cmd == 'q' || i == 0 && i+di >= n) && (cpx == p.x && cpy == p.y || cpx == ax && cpy == ay) {
if isRelCmd {
cmd = 'l'
} else {
cmd = 'L'
}
coords = coords[len(coords)-2:]
coordFloats = coordFloats[len(coordFloats)-2:]
cpx, cpy = math.NaN(), math.NaN()
}
p.qx, p.qy = cpx, cpy
} else {
p.qx, p.qy = math.NaN(), math.NaN()
}
// switch from L to H or V whenever possible
if cmd == 'L' || cmd == 'l' {
if ax == p.x && ay == p.y {
continue
} else if ax == p.x {
if isRelCmd {
cmd = 'v'
} else {
cmd = 'V'
}
coords = coords[1:]
coordFloats = coordFloats[1:]
} else if ay == p.y {
if isRelCmd {
cmd = 'h'
} else {
cmd = 'H'
}
coords = coords[:1]
coordFloats = coordFloats[:1]
}
}
// make a current and alternated path with absolute/relative altered
var curState, altState PathDataState
curState = p.shortenCurPosInstruction(cmd, coords)
if isRelCmd {
altState = p.shortenAltPosInstruction(cmd-'a'+'A', coordFloats, p.x, p.y)
} else {
altState = p.shortenAltPosInstruction(cmd-'A'+'a', coordFloats, -p.x, -p.y)
}
// choose shortest, relative or absolute path?
if len(p.altBuffer) < len(p.curBuffer) {
j += copy(b[j:], p.altBuffer)
p.state = altState
} else {
j += copy(b[j:], p.curBuffer)
p.state = curState
}
p.x = ax
p.y = ay
if i == 0 && (origCmd == 'M' || origCmd == 'm') {
p.x0 = p.x
p.y0 = p.y
}
}
return j
}
// shortenCurPosInstruction only minifies the coordinates.
func (p *PathData) shortenCurPosInstruction(cmd byte, coords [][]byte) PathDataState {
state := p.state
p.curBuffer = p.curBuffer[:0]
if cmd != state.cmd && !(state.cmd == 'M' && cmd == 'L' || state.cmd == 'm' && cmd == 'l') {
p.curBuffer = append(p.curBuffer, cmd)
state.cmd = cmd
state.prevDigit = false
state.prevDigitIsInt = false
}
for i, coord := range coords {
// Arc has boolean flags that can only be 0 or 1. copyFlag prevents from adding a dot before a zero (instead of a space). However, when the dot already was there, the command is malformed and could make the path longer than before, introducing bugs.
if (cmd == 'A' || cmd == 'a') && (i%7 == 3 || i%7 == 4) {
state.copyFlag(&p.curBuffer, coord[0] == '1')
continue
}
coord = minify.Number(coord, p.o.Precision)
state.copyNumber(&p.curBuffer, coord)
}
return state
}
// shortenAltPosInstruction toggles the command between absolute / relative coordinates and minifies the coordinates.
func (p *PathData) shortenAltPosInstruction(cmd byte, coordFloats []float64, x, y float64) PathDataState {
state := p.state
p.altBuffer = p.altBuffer[:0]
if cmd != state.cmd && !(state.cmd == 'M' && cmd == 'L' || state.cmd == 'm' && cmd == 'l') {
p.altBuffer = append(p.altBuffer, cmd)
state.cmd = cmd
state.prevDigit = false
state.prevDigitIsInt = false
}
for i, f := range coordFloats {
if cmd == 'L' || cmd == 'l' || cmd == 'C' || cmd == 'c' || cmd == 'S' || cmd == 's' || cmd == 'Q' || cmd == 'q' || cmd == 'T' || cmd == 't' || cmd == 'M' || cmd == 'm' {
if i%2 == 0 {
f += x
} else {
f += y
}
} else if cmd == 'H' || cmd == 'h' {
f += x
} else if cmd == 'V' || cmd == 'v' {
f += y
} else if cmd == 'A' || cmd == 'a' {
if i%7 == 5 {
f += x
} else if i%7 == 6 {
f += y
} else if i%7 == 3 || i%7 == 4 {
state.copyFlag(&p.altBuffer, f == 1.0)
continue
}
}
p.coordBuffer = strconvStdlib.AppendFloat(p.coordBuffer[:0], f, 'g', -1, 64)
coord := minify.Number(p.coordBuffer, p.o.newPrecision)
state.copyNumber(&p.altBuffer, coord)
}
return state
}
// copyNumber will copy a number to the destination buffer, taking into account space or dot insertion to guarantee the shortest pathdata.
func (state *PathDataState) copyNumber(buffer *[]byte, coord []byte) {
if state.prevDigit && (coord[0] >= '0' && coord[0] <= '9' || coord[0] == '.' && state.prevDigitIsInt) {
if coord[0] == '0' && !state.prevDigitIsInt {
*buffer = append(*buffer, '.', '0') // aggresively add dot so subsequent numbers could drop leading space
// prevDigit stays true and prevDigitIsInt stays false
return
}
*buffer = append(*buffer, ' ')
}
state.prevDigit = true
state.prevDigitIsInt = true
if len(coord) > 2 && coord[len(coord)-2] == '0' && coord[len(coord)-1] == '0' {
coord[len(coord)-2] = 'e'
coord[len(coord)-1] = '2'
state.prevDigitIsInt = false
} else {
for _, c := range coord {
if c == '.' || c == 'e' || c == 'E' {
state.prevDigitIsInt = false
break
}
}
}
*buffer = append(*buffer, coord...)
state.prevFlag = false
}
func (state *PathDataState) copyFlag(buffer *[]byte, flag bool) {
if !state.prevFlag {
if flag {
*buffer = append(*buffer, ' ', '1')
} else {
*buffer = append(*buffer, ' ', '0')
}
} else {
if flag {
*buffer = append(*buffer, '1')
} else {
*buffer = append(*buffer, '0')
}
}
state.prevFlag = true
state.prevDigit = false
state.prevDigitIsInt = false
}
// Package svg minifies SVG1.1 following the specifications at http://www.w3.org/TR/SVG11/.
package svg
import (
"bytes"
"io"
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/minify/v2/css"
minifyXML "github.com/tdewolff/minify/v2/xml"
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/parse/v2/buffer"
"github.com/tdewolff/parse/v2/xml"
)
var (
voidBytes = []byte("/>")
isBytes = []byte("=")
spaceBytes = []byte(" ")
cdataEndBytes = []byte("]]>")
zeroBytes = []byte("0")
cssMimeBytes = []byte("text/css")
noneBytes = []byte("none")
urlBytes = []byte("url(")
)
////////////////////////////////////////////////////////////////
// Minifier is an SVG minifier.
type Minifier struct {
KeepComments bool
Precision int // number of significant digits
newPrecision int // precision for new numbers
Inline bool
}
// Minify minifies SVG data, it reads from r and writes to w.
func Minify(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
return (&Minifier{}).Minify(m, w, r, params)
}
// Minify minifies SVG data, it reads from r and writes to w.
func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
tmp := &Minifier{}
*tmp = *o
o = tmp
o.newPrecision = o.Precision
if o.newPrecision <= 0 || 15 < o.newPrecision {
o.newPrecision = 15 // minimum number of digits a double can represent exactly
}
if !o.Inline {
o.Inline = params != nil && params["inline"] == "1"
}
var tag Hash
defaultStyleType := cssMimeBytes
defaultStyleParams := map[string]string(nil)
defaultInlineStyleParams := map[string]string{"inline": "1"}
p := NewPathData(o)
minifyBuffer := buffer.NewWriter(make([]byte, 0, 64))
attrByteBuffer := make([]byte, 0, 64)
z := parse.NewInput(r)
defer z.Restore()
l := xml.NewLexer(z)
tb := NewTokenBuffer(z, l)
for {
t := *tb.Shift()
switch t.TokenType {
case xml.ErrorToken:
if _, err := w.Write(nil); err != nil {
return err
}
if l.Err() == io.EOF {
return nil
}
return l.Err()
case xml.CommentToken:
if o.KeepComments {
w.Write(t.Data)
}
case xml.DOCTYPEToken:
if len(t.Text) > 0 && t.Text[len(t.Text)-1] == ']' {
w.Write(t.Data)
}
case xml.TextToken:
t.Data = parse.ReplaceMultipleWhitespaceAndEntities(t.Data, minifyXML.EntitiesMap, nil)
t.Data = parse.TrimWhitespace(t.Data)
if tag == Style && len(t.Data) > 0 {
if err := m.MinifyMimetype(defaultStyleType, w, buffer.NewReader(t.Data), defaultStyleParams); err != nil {
if err != minify.ErrNotExist {
return minify.UpdateErrorPosition(err, z, t.Offset)
}
w.Write(t.Data)
}
} else {
w.Write(t.Data)
}
case xml.CDATAToken:
if tag == Style {
minifyBuffer.Reset()
if err := m.MinifyMimetype(defaultStyleType, minifyBuffer, buffer.NewReader(t.Text), defaultStyleParams); err == nil {
t.Data = append(t.Data[:9], minifyBuffer.Bytes()...)
t.Text = t.Data[9:]
t.Data = append(t.Data, cdataEndBytes...)
} else if err != minify.ErrNotExist {
return minify.UpdateErrorPosition(err, z, t.Offset)
}
}
var useText bool
if t.Text, useText = xml.EscapeCDATAVal(&attrByteBuffer, t.Text); useText {
t.Text = parse.ReplaceMultipleWhitespace(t.Text)
t.Text = parse.TrimWhitespace(t.Text)
w.Write(t.Text)
} else {
w.Write(t.Data)
}
case xml.StartTagPIToken:
for {
if t := *tb.Shift(); t.TokenType == xml.StartTagClosePIToken || t.TokenType == xml.ErrorToken {
break
}
}
case xml.StartTagToken:
tag = t.Hash
if tag == Metadata {
t.Data = nil
}
if t.Data == nil {
skipTag(tb)
} else {
w.Write(t.Data)
}
case xml.AttributeToken:
if t.Text == nil { // data is nil when attribute has been removed
continue
}
attr := t.Hash
val := t.AttrVal
if n, m := parse.Dimension(val); n+m == len(val) && attr != Version { // TODO: inefficient, temporary measure
val, _ = o.shortenDimension(val)
}
if attr == Xml_Space && bytes.Equal(val, []byte("preserve")) ||
tag == Svg && (o.Inline && attr == Xmlns ||
attr == Version && bytes.Equal(val, []byte("1.1")) ||
attr == X && bytes.Equal(val, zeroBytes) ||
attr == Y && bytes.Equal(val, zeroBytes) ||
attr == PreserveAspectRatio && bytes.Equal(val, []byte("xMidYMid meet")) ||
attr == BaseProfile && bytes.Equal(val, noneBytes) ||
attr == ContentScriptType && bytes.Equal(val, []byte("application/ecmascript")) ||
attr == ContentStyleType && bytes.Equal(val, cssMimeBytes)) ||
tag == Style && attr == Type && bytes.Equal(val, cssMimeBytes) {
continue
}
w.Write(spaceBytes)
w.Write(t.Text)
w.Write(isBytes)
if tag == Svg && attr == ContentStyleType {
val = minify.Mediatype(val)
defaultStyleType = val
} else if attr == Style {
minifyBuffer.Reset()
if err := m.MinifyMimetype(defaultStyleType, minifyBuffer, buffer.NewReader(val), defaultInlineStyleParams); err == nil {
val = minifyBuffer.Bytes()
} else if err != minify.ErrNotExist {
return minify.UpdateErrorPosition(err, z, t.Offset)
}
} else if attr == D {
val = p.ShortenPathData(val)
} else if attr == ViewBox {
j := 0
newVal := val[:0]
for i := 0; i < 4; i++ {
if i != 0 {
if j >= len(val) || val[j] != ' ' && val[j] != ',' {
newVal = append(newVal, val[j:]...)
break
}
newVal = append(newVal, ' ')
j++
}
if dim, n := o.shortenDimension(val[j:]); n > 0 {
newVal = append(newVal, dim...)
j += n
} else {
newVal = append(newVal, val[j:]...)
break
}
}
val = newVal
} else if colorAttrMap[attr] && len(val) > 0 && (len(val) < 5 || !parse.EqualFold(val[:4], urlBytes)) {
parse.ToLower(val)
if val[0] == '#' {
if name, ok := css.ShortenColorHex[string(val)]; ok {
val = name
} else if len(val) == 7 && val[1] == val[2] && val[3] == val[4] && val[5] == val[6] {
val[2] = val[3]
val[3] = val[5]
val = val[:4]
}
} else if hex, ok := css.ShortenColorName[css.ToHash(val)]; ok {
val = hex
// } else if len(val) > 5 && bytes.Equal(val[:4], []byte("rgb(")) && val[len(val)-1] == ')' {
// TODO: handle rgb(x, y, z) and hsl(x, y, z)
}
}
// prefer single or double quotes depending on what occurs more often in value
val = xml.EscapeAttrVal(&attrByteBuffer, val)
w.Write(val)
case xml.StartTagCloseToken:
next := tb.Peek(0)
skipExtra := false
if next.TokenType == xml.TextToken && parse.IsAllWhitespace(next.Data) {
next = tb.Peek(1)
skipExtra = true
}
if next.TokenType == xml.EndTagToken {
// collapse empty tags to single void tag
tb.Shift()
if skipExtra {
tb.Shift()
}
w.Write(voidBytes)
} else {
w.Write(t.Data)
}
if tag == ForeignObject {
printTag(w, tb, tag)
}
case xml.StartTagCloseVoidToken:
tag = 0
w.Write(t.Data)
case xml.EndTagToken:
tag = 0
if len(t.Data) > 3+len(t.Text) {
t.Data[2+len(t.Text)] = '>'
t.Data = t.Data[:3+len(t.Text)]
}
w.Write(t.Data)
}
}
}
func (o *Minifier) shortenDimension(b []byte) ([]byte, int) {
if n, m := parse.Dimension(b); n > 0 {
unit := b[n : n+m]
b = minify.Number(b[:n], o.Precision)
if len(b) != 1 || b[0] != '0' {
if m == 2 && unit[0] == 'p' && unit[1] == 'x' {
unit = nil
} else if m > 1 { // only percentage is length 1
parse.ToLower(unit)
}
b = append(b, unit...)
}
return b, n + m
}
return b, 0
}
////////////////////////////////////////////////////////////////
func printTag(w io.Writer, tb *TokenBuffer, tag Hash) {
level := 0
inStartTag := false
for {
t := *tb.Peek(0)
switch t.TokenType {
case xml.ErrorToken:
return
case xml.StartTagToken:
inStartTag = t.Hash == tag
if t.Hash == tag {
level++
}
case xml.StartTagCloseVoidToken:
if inStartTag {
if level == 0 {
return
}
level--
}
case xml.EndTagToken:
if t.Hash == tag {
if level == 0 {
return
}
level--
}
}
w.Write(t.Data)
tb.Shift()
}
}
func skipTag(tb *TokenBuffer) {
level := 0
for {
if t := *tb.Shift(); t.TokenType == xml.ErrorToken {
break
} else if t.TokenType == xml.EndTagToken || t.TokenType == xml.StartTagCloseVoidToken {
if level == 0 {
break
}
level--
} else if t.TokenType == xml.StartTagToken {
level++
}
}
}
// +build gofuzz
package fuzz
import (
"bytes"
"io/ioutil"
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/minify/v2/css"
)
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
r := bytes.NewBuffer(data)
_ = css.Minify(minify.New(), ioutil.Discard, r, nil)
return 1
}
// +build gofuzz
package fuzz
import (
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/parse/v2"
)
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
data = parse.Copy(data) // ignore const-input error for OSS-Fuzz
_ = minify.DataURI(minify.New(), data)
return 1
}
// +build gofuzz
package fuzz
import (
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/parse/v2"
)
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
prec := 0
if len(data) > 0 {
x := data[0]
data = data[1:]
prec = int(x) % 20
}
for _, c := range data {
if (c < '0' || c > '9') && c != '.' {
return 0
}
}
data = parse.Copy(data) // ignore const-input error for OSS-Fuzz
data = minify.Decimal(data, prec)
return 1
}
// +build gofuzz
package fuzz
import (
"bytes"
"io/ioutil"
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/minify/v2/html"
)
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
r := bytes.NewBuffer(data)
_ = html.Minify(minify.New(), ioutil.Discard, r, nil)
return 1
}
// +build gofuzz
package fuzz
import (
"bytes"
"io/ioutil"
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/minify/v2/js"
)
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
r := bytes.NewBuffer(data)
_ = js.Minify(minify.New(), ioutil.Discard, r, nil)
return 1
}
// +build gofuzz
package fuzz
import (
"bytes"
"io/ioutil"
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/minify/v2/json"
)
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
r := bytes.NewBuffer(data)
_ = json.Minify(minify.New(), ioutil.Discard, r, nil)
return 1
}
// +build gofuzz
package fuzz
import (
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/parse/v2"
)
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
data = parse.Copy(data) // ignore const-input error for OSS-Fuzz
_ = minify.Mediatype(data)
return 1
}
// +build gofuzz
package fuzz
import (
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/parse/v2"
)
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
prec := 0
if len(data) > 0 {
x := data[0]
data = data[1:]
prec = int(x) % 32
}
data = parse.Copy(data) // ignore const-input error for OSS-Fuzz
data = minify.Number(data, prec)
return 1
}
// +build gofuzz
package fuzz
import (
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/minify/v2/svg"
)
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
pathDataBuffer := svg.NewPathData(&svg.Minifier{})
data = parse.Copy(data) // ignore const-input error for OSS-Fuzz
_ = pathDataBuffer.ShortenPathData(data)
return 1
}
// +build gofuzz
package fuzz
import (
"bytes"
"io/ioutil"
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/minify/v2/svg"
)
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
r := bytes.NewBuffer(data)
_ = svg.Minify(minify.New(), ioutil.Discard, r, nil)
return 1
}
// +build gofuzz
package fuzz
import (
"bytes"
"io/ioutil"
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/minify/v2/xml"
)
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
r := bytes.NewBuffer(data)
_ = xml.Minify(minify.New(), ioutil.Discard, r, nil)
return 1
}
package xml
import (
"github.com/tdewolff/parse/v2/xml"
)
// Token is a single token unit with an attribute value (if given) and hash of the data.
type Token struct {
xml.TokenType
Data []byte
Text []byte
AttrVal []byte
}
// TokenBuffer is a buffer that allows for token look-ahead.
type TokenBuffer struct {
l *xml.Lexer
buf []Token
pos int
}
// NewTokenBuffer returns a new TokenBuffer.
func NewTokenBuffer(l *xml.Lexer) *TokenBuffer {
return &TokenBuffer{
l: l,
buf: make([]Token, 0, 8),
}
}
func (z *TokenBuffer) read(t *Token) {
t.TokenType, t.Data = z.l.Next()
t.Text = z.l.Text()
if t.TokenType == xml.AttributeToken {
t.AttrVal = z.l.AttrVal()
} else {
t.AttrVal = nil
}
}
// Peek returns the ith element and possibly does an allocation.
// Peeking past an error will panic.
func (z *TokenBuffer) Peek(pos int) *Token {
pos += z.pos
if pos >= len(z.buf) {
if len(z.buf) > 0 && z.buf[len(z.buf)-1].TokenType == xml.ErrorToken {
return &z.buf[len(z.buf)-1]
}
c := cap(z.buf)
d := len(z.buf) - z.pos
p := pos - z.pos + 1 // required peek length
var buf []Token
if 2*p > c {
buf = make([]Token, 0, 2*c+p)
} else {
buf = z.buf
}
copy(buf[:d], z.buf[z.pos:])
buf = buf[:p]
pos -= z.pos
for i := d; i < p; i++ {
z.read(&buf[i])
if buf[i].TokenType == xml.ErrorToken {
buf = buf[:i+1]
pos = i
break
}
}
z.pos, z.buf = 0, buf
}
return &z.buf[pos]
}
// Shift returns the first element and advances position.
func (z *TokenBuffer) Shift() *Token {
if z.pos >= len(z.buf) {
t := &z.buf[:1][0]
z.read(t)
return t
}
t := &z.buf[z.pos]
z.pos++
return t
}
// Package xml minifies XML1.0 following the specifications at http://www.w3.org/TR/xml/.
package xml
import (
"io"
"github.com/tdewolff/minify/v2"
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/parse/v2/xml"
)
var (
isBytes = []byte("=")
spaceBytes = []byte(" ")
voidBytes = []byte("/>")
)
////////////////////////////////////////////////////////////////
// Minifier is an XML minifier.
type Minifier struct {
KeepWhitespace bool
}
// Minify minifies XML data, it reads from r and writes to w.
func Minify(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
return (&Minifier{}).Minify(m, w, r, params)
}
// Minify minifies XML data, it reads from r and writes to w.
func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, _ map[string]string) error {
omitSpace := true // on true the next text token must not start with a space
attrByteBuffer := make([]byte, 0, 64)
z := parse.NewInput(r)
defer z.Restore()
l := xml.NewLexer(z)
tb := NewTokenBuffer(l)
for {
t := *tb.Shift()
if t.TokenType == xml.CDATAToken {
// convert CDATA to regular text if smaller
if len(t.Text) == 0 {
continue
} else if text, useText := xml.EscapeCDATAVal(&attrByteBuffer, t.Text); useText {
t.Data = text
}
}
switch t.TokenType {
case xml.ErrorToken:
if _, err := w.Write(nil); err != nil {
return err
}
if l.Err() == io.EOF {
return nil
}
return l.Err()
case xml.DOCTYPEToken:
w.Write(t.Data)
case xml.CDATAToken:
w.Write(t.Data)
if len(t.Text) > 0 && parse.IsWhitespace(t.Text[len(t.Text)-1]) {
omitSpace = true
}
case xml.TextToken:
t.Data = parse.ReplaceMultipleWhitespaceAndEntities(t.Data, EntitiesMap, TextRevEntitiesMap)
// whitespace removal; trim left
if omitSpace && parse.IsWhitespace(t.Data[0]) {
t.Data = t.Data[1:]
}
// whitespace removal; trim right
omitSpace = false
if len(t.Data) == 0 {
omitSpace = true
} else if parse.IsWhitespace(t.Data[len(t.Data)-1]) {
omitSpace = true
i := 0
for {
next := tb.Peek(i)
// trim if EOF, text token with whitespace begin or block token
if next.TokenType == xml.ErrorToken {
t.Data = t.Data[:len(t.Data)-1]
omitSpace = false
break
} else if next.TokenType == xml.TextToken {
// this only happens when a comment, doctype, cdata startpi tag was in between
// remove if the text token starts with a whitespace
if len(next.Data) > 0 && parse.IsWhitespace(next.Data[0]) {
t.Data = t.Data[:len(t.Data)-1]
omitSpace = false
}
break
} else if next.TokenType == xml.CDATAToken {
if len(next.Text) > 0 && parse.IsWhitespace(next.Text[0]) {
t.Data = t.Data[:len(t.Data)-1]
omitSpace = false
}
break
} else if next.TokenType == xml.StartTagToken || next.TokenType == xml.EndTagToken {
if !o.KeepWhitespace {
t.Data = t.Data[:len(t.Data)-1]
omitSpace = false
}
break
}
i++
}
}
w.Write(t.Data)
case xml.StartTagToken:
w.Write(t.Data)
if o.KeepWhitespace {
omitSpace = false
}
case xml.StartTagPIToken:
w.Write(t.Data)
case xml.AttributeToken:
w.Write(spaceBytes)
w.Write(t.Text)
w.Write(isBytes)
if len(t.AttrVal) < 2 || t.AttrVal[0] != '"' || t.AttrVal[len(t.AttrVal)-1] != '"' {
w.Write(t.AttrVal)
} else {
val := t.AttrVal[1 : len(t.AttrVal)-1]
val = parse.ReplaceEntities(val, EntitiesMap, nil)
val = xml.EscapeAttrVal(&attrByteBuffer, val) // prefer single or double quotes depending on what occurs more often in value
w.Write(val)
}
case xml.StartTagCloseToken:
next := tb.Peek(0)
skipExtra := false
if next.TokenType == xml.TextToken && parse.IsAllWhitespace(next.Data) {
next = tb.Peek(1)
skipExtra = true
}
if next.TokenType == xml.EndTagToken {
// collapse empty tags to single void tag
tb.Shift()
if skipExtra {
tb.Shift()
}
w.Write(voidBytes)
} else {
w.Write(t.Data)
}
case xml.StartTagCloseVoidToken:
w.Write(t.Data)
case xml.StartTagClosePIToken:
w.Write(t.Data)
case xml.EndTagToken:
if len(t.Data) > 3+len(t.Text) {
t.Data[2+len(t.Text)] = '>'
t.Data = t.Data[:3+len(t.Text)]
}
w.Write(t.Data)
if o.KeepWhitespace {
omitSpace = false
}
}
}
}
package parse
import (
"encoding/binary"
"fmt"
"io"
"math"
"os"
)
// BinaryReader is a binary big endian file format reader.
type BinaryReader struct {
Endianness binary.ByteOrder
buf []byte
pos uint32
eof bool
}
// NewBinaryReader returns a big endian binary file format reader.
func NewBinaryReader(buf []byte) *BinaryReader {
if math.MaxUint32 < uint(len(buf)) {
return &BinaryReader{binary.BigEndian, nil, 0, true}
}
return &BinaryReader{binary.BigEndian, buf, 0, false}
}
// NewBinaryReaderLE returns a little endian binary file format reader.
func NewBinaryReaderLE(buf []byte) *BinaryReader {
r := NewBinaryReader(buf)
r.Endianness = binary.LittleEndian
return r
}
// Seek set the reader position in the buffer.
func (r *BinaryReader) Seek(pos uint32) error {
if uint32(len(r.buf)) < pos {
r.eof = true
return io.EOF
}
r.pos = pos
r.eof = false
return nil
}
// Pos returns the reader's position.
func (r *BinaryReader) Pos() uint32 {
return r.pos
}
// Len returns the remaining length of the buffer.
func (r *BinaryReader) Len() uint32 {
return uint32(len(r.buf)) - r.pos
}
// EOF returns true if we reached the end-of-file.
func (r *BinaryReader) EOF() bool {
return r.eof
}
// Read complies with io.Reader.
func (r *BinaryReader) Read(b []byte) (int, error) {
n := copy(b, r.buf[r.pos:])
r.pos += uint32(n)
if r.pos == uint32(len(r.buf)) {
r.eof = true
return n, io.EOF
}
return n, nil
}
// ReadBytes reads n bytes.
func (r *BinaryReader) ReadBytes(n uint32) []byte {
if r.eof || uint32(len(r.buf))-r.pos < n {
r.eof = true
return nil
}
buf := r.buf[r.pos : r.pos+n : r.pos+n]
r.pos += n
return buf
}
// ReadString reads a string of length n.
func (r *BinaryReader) ReadString(n uint32) string {
return string(r.ReadBytes(n))
}
// ReadByte reads a single byte.
func (r *BinaryReader) ReadByte() byte {
b := r.ReadBytes(1)
if b == nil {
return 0
}
return b[0]
}
// ReadUint8 reads a uint8.
func (r *BinaryReader) ReadUint8() uint8 {
return r.ReadByte()
}
// ReadUint16 reads a uint16.
func (r *BinaryReader) ReadUint16() uint16 {
b := r.ReadBytes(2)
if b == nil {
return 0
}
return r.Endianness.Uint16(b)
}
// ReadUint32 reads a uint32.
func (r *BinaryReader) ReadUint32() uint32 {
b := r.ReadBytes(4)
if b == nil {
return 0
}
return r.Endianness.Uint32(b)
}
// ReadUint64 reads a uint64.
func (r *BinaryReader) ReadUint64() uint64 {
b := r.ReadBytes(8)
if b == nil {
return 0
}
return r.Endianness.Uint64(b)
}
// ReadInt8 reads a int8.
func (r *BinaryReader) ReadInt8() int8 {
return int8(r.ReadByte())
}
// ReadInt16 reads a int16.
func (r *BinaryReader) ReadInt16() int16 {
return int16(r.ReadUint16())
}
// ReadInt32 reads a int32.
func (r *BinaryReader) ReadInt32() int32 {
return int32(r.ReadUint32())
}
// ReadInt64 reads a int64.
func (r *BinaryReader) ReadInt64() int64 {
return int64(r.ReadUint64())
}
type BinaryFileReader struct {
f *os.File
size uint64
offset uint64
Endianness binary.ByteOrder
buf []byte
pos int
}
func NewBinaryFileReader(f *os.File, chunk int) (*BinaryFileReader, error) {
var buf []byte
var size uint64
if chunk == 0 {
var err error
if buf, err = io.ReadAll(f); err != nil {
return nil, err
}
} else {
buf = make([]byte, 0, chunk)
}
if info, err := f.Stat(); err != nil {
return nil, err
} else {
size = uint64(info.Size())
}
return &BinaryFileReader{
f: f,
size: size,
Endianness: binary.BigEndian,
buf: buf,
}, nil
}
func (r *BinaryFileReader) buffer(pos, length uint64) error {
if pos < r.offset || r.offset+uint64(len(r.buf)) < pos+length {
if math.MaxInt64 < pos {
return fmt.Errorf("seek position too large")
} else if _, err := r.f.Seek(int64(pos), 0); err != nil {
return err
} else if n, err := r.f.Read(r.buf[:cap(r.buf)]); err != nil {
return err
} else {
r.offset = pos
r.buf = r.buf[:n]
r.pos = 0
}
}
return nil
}
// Seek set the reader position in the buffer.
func (r *BinaryFileReader) Seek(pos uint64) error {
if r.size <= pos {
return io.EOF
} else if err := r.buffer(pos, 0); err != nil {
return err
}
r.pos = int(pos - r.offset)
return nil
}
// Pos returns the reader's position.
func (r *BinaryFileReader) Pos() uint64 {
return r.offset + uint64(r.pos)
}
// Len returns the remaining length of the buffer.
func (r *BinaryFileReader) Len() uint64 {
return r.size - r.Pos()
}
// Offset returns the offset of the buffer.
func (r *BinaryFileReader) Offset() uint64 {
return r.offset
}
// BufferLen returns the length of the buffer.
func (r *BinaryFileReader) BufferLen() int {
return len(r.buf)
}
// Read complies with io.Reader.
func (r *BinaryFileReader) Read(b []byte) (int, error) {
if len(b) <= cap(r.buf) {
if err := r.buffer(r.offset+uint64(r.pos), uint64(len(b))); err != nil {
return 0, err
}
n := copy(b, r.buf[r.pos:])
r.pos += n
return n, nil
}
// read directly from file
if _, err := r.f.Seek(int64(r.offset)+int64(r.pos), 0); err != nil {
return 0, err
}
n, err := r.f.Read(b)
r.offset += uint64(r.pos + n)
r.pos = 0
r.buf = r.buf[:0]
return n, err
}
// ReadBytes reads n bytes.
func (r *BinaryFileReader) ReadBytes(n int) []byte {
if n < len(r.buf)-r.pos {
b := r.buf[r.pos : r.pos+n]
r.pos += n
return b
}
b := make([]byte, n)
if _, err := r.Read(b); err != nil {
return nil
}
return b
}
// ReadString reads a string of length n.
func (r *BinaryFileReader) ReadString(n int) string {
return string(r.ReadBytes(n))
}
// ReadByte reads a single byte.
func (r *BinaryFileReader) ReadByte() byte {
b := r.ReadBytes(1)
if b == nil {
return 0
}
return b[0]
}
// ReadUint8 reads a uint8.
func (r *BinaryFileReader) ReadUint8() uint8 {
return r.ReadByte()
}
// ReadUint16 reads a uint16.
func (r *BinaryFileReader) ReadUint16() uint16 {
b := r.ReadBytes(2)
if b == nil {
return 0
}
return r.Endianness.Uint16(b)
}
// ReadUint32 reads a uint32.
func (r *BinaryFileReader) ReadUint32() uint32 {
b := r.ReadBytes(4)
if b == nil {
return 0
}
return r.Endianness.Uint32(b)
}
// ReadUint64 reads a uint64.
func (r *BinaryFileReader) ReadUint64() uint64 {
b := r.ReadBytes(8)
if b == nil {
return 0
}
return r.Endianness.Uint64(b)
}
// ReadInt8 reads a int8.
func (r *BinaryFileReader) ReadInt8() int8 {
return int8(r.ReadByte())
}
// ReadInt16 reads a int16.
func (r *BinaryFileReader) ReadInt16() int16 {
return int16(r.ReadUint16())
}
// ReadInt32 reads a int32.
func (r *BinaryFileReader) ReadInt32() int32 {
return int32(r.ReadUint32())
}
// ReadInt64 reads a int64.
func (r *BinaryFileReader) ReadInt64() int64 {
return int64(r.ReadUint64())
}
// BinaryWriter is a big endian binary file format writer.
type BinaryWriter struct {
buf []byte
}
// NewBinaryWriter returns a big endian binary file format writer.
func NewBinaryWriter(buf []byte) *BinaryWriter {
return &BinaryWriter{buf}
}
// Len returns the buffer's length in bytes.
func (w *BinaryWriter) Len() uint32 {
return uint32(len(w.buf))
}
// Bytes returns the buffer's bytes.
func (w *BinaryWriter) Bytes() []byte {
return w.buf
}
// Write complies with io.Writer.
func (w *BinaryWriter) Write(b []byte) (int, error) {
w.buf = append(w.buf, b...)
return len(b), nil
}
// WriteBytes writes the given bytes to the buffer.
func (w *BinaryWriter) WriteBytes(v []byte) {
w.buf = append(w.buf, v...)
}
// WriteString writes the given string to the buffer.
func (w *BinaryWriter) WriteString(v string) {
w.WriteBytes([]byte(v))
}
// WriteByte writes the given byte to the buffer.
func (w *BinaryWriter) WriteByte(v byte) {
w.WriteBytes([]byte{v})
}
// WriteUint8 writes the given uint8 to the buffer.
func (w *BinaryWriter) WriteUint8(v uint8) {
w.WriteByte(v)
}
// WriteUint16 writes the given uint16 to the buffer.
func (w *BinaryWriter) WriteUint16(v uint16) {
pos := len(w.buf)
w.buf = append(w.buf, make([]byte, 2)...)
binary.BigEndian.PutUint16(w.buf[pos:], v)
}
// WriteUint32 writes the given uint32 to the buffer.
func (w *BinaryWriter) WriteUint32(v uint32) {
pos := len(w.buf)
w.buf = append(w.buf, make([]byte, 4)...)
binary.BigEndian.PutUint32(w.buf[pos:], v)
}
// WriteUint64 writes the given uint64 to the buffer.
func (w *BinaryWriter) WriteUint64(v uint64) {
pos := len(w.buf)
w.buf = append(w.buf, make([]byte, 8)...)
binary.BigEndian.PutUint64(w.buf[pos:], v)
}
// WriteInt8 writes the given int8 to the buffer.
func (w *BinaryWriter) WriteInt8(v int8) {
w.WriteUint8(uint8(v))
}
// WriteInt16 writes the given int16 to the buffer.
func (w *BinaryWriter) WriteInt16(v int16) {
w.WriteUint16(uint16(v))
}
// WriteInt32 writes the given int32 to the buffer.
func (w *BinaryWriter) WriteInt32(v int32) {
w.WriteUint32(uint32(v))
}
// WriteInt64 writes the given int64 to the buffer.
func (w *BinaryWriter) WriteInt64(v int64) {
w.WriteUint64(uint64(v))
}
// BitmapReader is a binary bitmap reader.
type BitmapReader struct {
buf []byte
pos uint32 // TODO: to uint64
eof bool
}
// NewBitmapReader returns a binary bitmap reader.
func NewBitmapReader(buf []byte) *BitmapReader {
return &BitmapReader{buf, 0, false}
}
// Pos returns the current bit position.
func (r *BitmapReader) Pos() uint32 {
return r.pos
}
// EOF returns if we reached the buffer's end-of-file.
func (r *BitmapReader) EOF() bool {
return r.eof
}
// Read reads the next bit.
func (r *BitmapReader) Read() bool {
if r.eof || uint32(len(r.buf)) <= (r.pos+1)/8 {
r.eof = true
return false
}
bit := r.buf[r.pos>>3]&(0x80>>(r.pos&7)) != 0
r.pos += 1
return bit
}
// BitmapWriter is a binary bitmap writer.
type BitmapWriter struct {
buf []byte
pos uint32
}
// NewBitmapWriter returns a binary bitmap writer.
func NewBitmapWriter(buf []byte) *BitmapWriter {
return &BitmapWriter{buf, 0}
}
// Len returns the buffer's length in bytes.
func (w *BitmapWriter) Len() uint32 {
return uint32(len(w.buf))
}
// Bytes returns the buffer's bytes.
func (w *BitmapWriter) Bytes() []byte {
return w.buf
}
// Write writes the next bit.
func (w *BitmapWriter) Write(bit bool) {
if uint32(len(w.buf)) <= (w.pos+1)/8 {
w.buf = append(w.buf, 0)
}
if bit {
w.buf[w.pos>>3] = w.buf[w.pos>>3] | (0x80 >> (w.pos & 7))
}
w.pos += 1
}
package buffer
import (
"io"
"io/ioutil"
)
var nullBuffer = []byte{0}
// Lexer is a buffered reader that allows peeking forward and shifting, taking an io.Reader.
// It keeps data in-memory until Free, taking a byte length, is called to move beyond the data.
type Lexer struct {
buf []byte
pos int // index in buf
start int // index in buf
err error
restore func()
}
// NewLexer returns a new Lexer for a given io.Reader, and uses ioutil.ReadAll to read it into a byte slice.
// If the io.Reader implements Bytes, that is used instead.
// It will append a NULL at the end of the buffer.
func NewLexer(r io.Reader) *Lexer {
var b []byte
if r != nil {
if buffer, ok := r.(interface {
Bytes() []byte
}); ok {
b = buffer.Bytes()
} else {
var err error
b, err = ioutil.ReadAll(r)
if err != nil {
return &Lexer{
buf: nullBuffer,
err: err,
}
}
}
}
return NewLexerBytes(b)
}
// NewLexerBytes returns a new Lexer for a given byte slice, and appends NULL at the end.
// To avoid reallocation, make sure the capacity has room for one more byte.
func NewLexerBytes(b []byte) *Lexer {
z := &Lexer{
buf: b,
}
n := len(b)
if n == 0 {
z.buf = nullBuffer
} else {
// Append NULL to buffer, but try to avoid reallocation
if cap(b) > n {
// Overwrite next byte but restore when done
b = b[:n+1]
c := b[n]
b[n] = 0
z.buf = b
z.restore = func() {
b[n] = c
}
} else {
z.buf = append(b, 0)
}
}
return z
}
// Restore restores the replaced byte past the end of the buffer by NULL.
func (z *Lexer) Restore() {
if z.restore != nil {
z.restore()
z.restore = nil
}
}
// Err returns the error returned from io.Reader or io.EOF when the end has been reached.
func (z *Lexer) Err() error {
return z.PeekErr(0)
}
// PeekErr returns the error at position pos. When pos is zero, this is the same as calling Err().
func (z *Lexer) PeekErr(pos int) error {
if z.err != nil {
return z.err
} else if z.pos+pos >= len(z.buf)-1 {
return io.EOF
}
return nil
}
// Peek returns the ith byte relative to the end position.
// Peek returns 0 when an error has occurred, Err returns the error.
func (z *Lexer) Peek(pos int) byte {
pos += z.pos
return z.buf[pos]
}
// PeekRune returns the rune and rune length of the ith byte relative to the end position.
func (z *Lexer) PeekRune(pos int) (rune, int) {
// from unicode/utf8
c := z.Peek(pos)
if c < 0xC0 || z.Peek(pos+1) == 0 {
return rune(c), 1
} else if c < 0xE0 || z.Peek(pos+2) == 0 {
return rune(c&0x1F)<<6 | rune(z.Peek(pos+1)&0x3F), 2
} else if c < 0xF0 || z.Peek(pos+3) == 0 {
return rune(c&0x0F)<<12 | rune(z.Peek(pos+1)&0x3F)<<6 | rune(z.Peek(pos+2)&0x3F), 3
}
return rune(c&0x07)<<18 | rune(z.Peek(pos+1)&0x3F)<<12 | rune(z.Peek(pos+2)&0x3F)<<6 | rune(z.Peek(pos+3)&0x3F), 4
}
// Move advances the position.
func (z *Lexer) Move(n int) {
z.pos += n
}
// Pos returns a mark to which can be rewinded.
func (z *Lexer) Pos() int {
return z.pos - z.start
}
// Rewind rewinds the position to the given position.
func (z *Lexer) Rewind(pos int) {
z.pos = z.start + pos
}
// Lexeme returns the bytes of the current selection.
func (z *Lexer) Lexeme() []byte {
return z.buf[z.start:z.pos:z.pos]
}
// Skip collapses the position to the end of the selection.
func (z *Lexer) Skip() {
z.start = z.pos
}
// Shift returns the bytes of the current selection and collapses the position to the end of the selection.
func (z *Lexer) Shift() []byte {
b := z.buf[z.start:z.pos:z.pos]
z.start = z.pos
return b
}
// Offset returns the character position in the buffer.
func (z *Lexer) Offset() int {
return z.pos
}
// Bytes returns the underlying buffer.
func (z *Lexer) Bytes() []byte {
return z.buf[: len(z.buf)-1 : len(z.buf)-1]
}
// Reset resets position to the underlying buffer.
func (z *Lexer) Reset() {
z.start = 0
z.pos = 0
}
package buffer
import "io"
// Reader implements an io.Reader over a byte slice.
type Reader struct {
buf []byte
pos int
}
// NewReader returns a new Reader for a given byte slice.
func NewReader(buf []byte) *Reader {
return &Reader{
buf: buf,
}
}
// Read reads bytes into the given byte slice and returns the number of bytes read and an error if occurred.
func (r *Reader) Read(b []byte) (n int, err error) {
if len(b) == 0 {
return 0, nil
}
if r.pos >= len(r.buf) {
return 0, io.EOF
}
n = copy(b, r.buf[r.pos:])
r.pos += n
return
}
// Bytes returns the underlying byte slice.
func (r *Reader) Bytes() []byte {
return r.buf
}
// Reset resets the position of the read pointer to the beginning of the underlying byte slice.
func (r *Reader) Reset() {
r.pos = 0
}
// Len returns the length of the buffer.
func (r *Reader) Len() int {
return len(r.buf)
}
package buffer
import (
"io"
)
type block struct {
buf []byte
next int // index in pool plus one
active bool
}
type bufferPool struct {
pool []block
head int // index in pool plus one
tail int // index in pool plus one
pos int // byte pos in tail
}
func (z *bufferPool) swap(oldBuf []byte, size int) []byte {
// find new buffer that can be reused
swap := -1
for i := 0; i < len(z.pool); i++ {
if !z.pool[i].active && size <= cap(z.pool[i].buf) {
swap = i
break
}
}
if swap == -1 { // no free buffer found for reuse
if z.tail == 0 && z.pos >= len(oldBuf) && size <= cap(oldBuf) { // but we can reuse the current buffer!
z.pos -= len(oldBuf)
return oldBuf[:0]
}
// allocate new
z.pool = append(z.pool, block{make([]byte, 0, size), 0, true})
swap = len(z.pool) - 1
}
newBuf := z.pool[swap].buf
// put current buffer into pool
z.pool[swap] = block{oldBuf, 0, true}
if z.head != 0 {
z.pool[z.head-1].next = swap + 1
}
z.head = swap + 1
if z.tail == 0 {
z.tail = swap + 1
}
return newBuf[:0]
}
func (z *bufferPool) free(n int) {
z.pos += n
// move the tail over to next buffers
for z.tail != 0 && z.pos >= len(z.pool[z.tail-1].buf) {
z.pos -= len(z.pool[z.tail-1].buf)
newTail := z.pool[z.tail-1].next
z.pool[z.tail-1].active = false // after this, any thread may pick up the inactive buffer, so it can't be used anymore
z.tail = newTail
}
if z.tail == 0 {
z.head = 0
}
}
// StreamLexer is a buffered reader that allows peeking forward and shifting, taking an io.Reader.
// It keeps data in-memory until Free, taking a byte length, is called to move beyond the data.
type StreamLexer struct {
r io.Reader
err error
pool bufferPool
buf []byte
start int // index in buf
pos int // index in buf
prevStart int
free int
}
// NewStreamLexer returns a new StreamLexer for a given io.Reader with a 4kB estimated buffer size.
// If the io.Reader implements Bytes, that buffer is used instead.
func NewStreamLexer(r io.Reader) *StreamLexer {
return NewStreamLexerSize(r, defaultBufSize)
}
// NewStreamLexerSize returns a new StreamLexer for a given io.Reader and estimated required buffer size.
// If the io.Reader implements Bytes, that buffer is used instead.
func NewStreamLexerSize(r io.Reader, size int) *StreamLexer {
// if reader has the bytes in memory already, use that instead
if buffer, ok := r.(interface {
Bytes() []byte
}); ok {
return &StreamLexer{
err: io.EOF,
buf: buffer.Bytes(),
}
}
return &StreamLexer{
r: r,
buf: make([]byte, 0, size),
}
}
func (z *StreamLexer) read(pos int) byte {
if z.err != nil {
return 0
}
// free unused bytes
z.pool.free(z.free)
z.free = 0
// get new buffer
c := cap(z.buf)
p := pos - z.start + 1
if 2*p > c { // if the token is larger than half the buffer, increase buffer size
c = 2*c + p
}
d := len(z.buf) - z.start
buf := z.pool.swap(z.buf[:z.start], c)
copy(buf[:d], z.buf[z.start:]) // copy the left-overs (unfinished token) from the old buffer
// read in new data for the rest of the buffer
var n int
for pos-z.start >= d && z.err == nil {
n, z.err = z.r.Read(buf[d:cap(buf)])
d += n
}
pos -= z.start
z.pos -= z.start
z.start, z.buf = 0, buf[:d]
if pos >= d {
return 0
}
return z.buf[pos]
}
// Err returns the error returned from io.Reader. It may still return valid bytes for a while though.
func (z *StreamLexer) Err() error {
if z.err == io.EOF && z.pos < len(z.buf) {
return nil
}
return z.err
}
// Free frees up bytes of length n from previously shifted tokens.
// Each call to Shift should at one point be followed by a call to Free with a length returned by ShiftLen.
func (z *StreamLexer) Free(n int) {
z.free += n
}
// Peek returns the ith byte relative to the end position and possibly does an allocation.
// Peek returns zero when an error has occurred, Err returns the error.
// TODO: inline function
func (z *StreamLexer) Peek(pos int) byte {
pos += z.pos
if uint(pos) < uint(len(z.buf)) { // uint for BCE
return z.buf[pos]
}
return z.read(pos)
}
// PeekRune returns the rune and rune length of the ith byte relative to the end position.
func (z *StreamLexer) PeekRune(pos int) (rune, int) {
// from unicode/utf8
c := z.Peek(pos)
if c < 0xC0 {
return rune(c), 1
} else if c < 0xE0 {
return rune(c&0x1F)<<6 | rune(z.Peek(pos+1)&0x3F), 2
} else if c < 0xF0 {
return rune(c&0x0F)<<12 | rune(z.Peek(pos+1)&0x3F)<<6 | rune(z.Peek(pos+2)&0x3F), 3
}
return rune(c&0x07)<<18 | rune(z.Peek(pos+1)&0x3F)<<12 | rune(z.Peek(pos+2)&0x3F)<<6 | rune(z.Peek(pos+3)&0x3F), 4
}
// Move advances the position.
func (z *StreamLexer) Move(n int) {
z.pos += n
}
// Pos returns a mark to which can be rewinded.
func (z *StreamLexer) Pos() int {
return z.pos - z.start
}
// Rewind rewinds the position to the given position.
func (z *StreamLexer) Rewind(pos int) {
z.pos = z.start + pos
}
// Lexeme returns the bytes of the current selection.
func (z *StreamLexer) Lexeme() []byte {
return z.buf[z.start:z.pos]
}
// Skip collapses the position to the end of the selection.
func (z *StreamLexer) Skip() {
z.start = z.pos
}
// Shift returns the bytes of the current selection and collapses the position to the end of the selection.
// It also returns the number of bytes we moved since the last call to Shift. This can be used in calls to Free.
func (z *StreamLexer) Shift() []byte {
if z.pos > len(z.buf) { // make sure we peeked at least as much as we shift
z.read(z.pos - 1)
}
b := z.buf[z.start:z.pos]
z.start = z.pos
return b
}
// ShiftLen returns the number of bytes moved since the last call to ShiftLen. This can be used in calls to Free because it takes into account multiple Shifts or Skips.
func (z *StreamLexer) ShiftLen() int {
n := z.start - z.prevStart
z.prevStart = z.start
return n
}
package buffer
import (
"io"
)
// Writer implements an io.Writer over a byte slice.
type Writer struct {
buf []byte
err error
expand bool
}
// NewWriter returns a new Writer for a given byte slice.
func NewWriter(buf []byte) *Writer {
return &Writer{
buf: buf,
expand: true,
}
}
// NewStaticWriter returns a new Writer for a given byte slice. It does not reallocate and expand the byte-slice.
func NewStaticWriter(buf []byte) *Writer {
return &Writer{
buf: buf,
expand: false,
}
}
// Write writes bytes from the given byte slice and returns the number of bytes written and an error if occurred. When err != nil, n == 0.
func (w *Writer) Write(b []byte) (int, error) {
n := len(b)
end := len(w.buf)
if end+n > cap(w.buf) {
if !w.expand {
w.err = io.EOF
return 0, io.EOF
}
buf := make([]byte, end, 2*cap(w.buf)+n)
copy(buf, w.buf)
w.buf = buf
}
w.buf = w.buf[:end+n]
return copy(w.buf[end:], b), nil
}
// Len returns the length of the underlying byte slice.
func (w *Writer) Len() int {
return len(w.buf)
}
// Bytes returns the underlying byte slice.
func (w *Writer) Bytes() []byte {
return w.buf
}
// Reset empties and reuses the current buffer. Subsequent writes will overwrite the buffer, so any reference to the underlying slice is invalidated after this call.
func (w *Writer) Reset() {
w.buf = w.buf[:0]
}
// Close returns the last error.
func (w *Writer) Close() error {
return w.err
}
// Package parse contains a collection of parsers for various formats in its subpackages.
package parse
import (
"bytes"
"encoding/base64"
"errors"
"strconv"
)
var (
dataSchemeBytes = []byte("data:")
base64Bytes = []byte("base64")
textMimeBytes = []byte("text/plain")
)
// ErrBadDataURI is returned by DataURI when the byte slice does not start with 'data:' or is too short.
var ErrBadDataURI = errors.New("not a data URI")
// Number returns the number of bytes that parse as a number of the regex format (+|-)?([0-9]+(\.[0-9]+)?|\.[0-9]+)((e|E)(+|-)?[0-9]+)?.
func Number(b []byte) int {
if len(b) == 0 {
return 0
}
i := 0
if b[i] == '+' || b[i] == '-' {
i++
if i >= len(b) {
return 0
}
}
firstDigit := (b[i] >= '0' && b[i] <= '9')
if firstDigit {
i++
for i < len(b) && b[i] >= '0' && b[i] <= '9' {
i++
}
}
if i < len(b) && b[i] == '.' {
i++
if i < len(b) && b[i] >= '0' && b[i] <= '9' {
i++
for i < len(b) && b[i] >= '0' && b[i] <= '9' {
i++
}
} else if firstDigit {
// . could belong to the next token
i--
return i
} else {
return 0
}
} else if !firstDigit {
return 0
}
iOld := i
if i < len(b) && (b[i] == 'e' || b[i] == 'E') {
i++
if i < len(b) && (b[i] == '+' || b[i] == '-') {
i++
}
if i >= len(b) || b[i] < '0' || b[i] > '9' {
// e could belong to next token
return iOld
}
for i < len(b) && b[i] >= '0' && b[i] <= '9' {
i++
}
}
return i
}
// Dimension parses a byte-slice and returns the length of the number and its unit.
func Dimension(b []byte) (int, int) {
num := Number(b)
if num == 0 || num == len(b) {
return num, 0
} else if b[num] == '%' {
return num, 1
} else if b[num] >= 'a' && b[num] <= 'z' || b[num] >= 'A' && b[num] <= 'Z' {
i := num + 1
for i < len(b) && (b[i] >= 'a' && b[i] <= 'z' || b[i] >= 'A' && b[i] <= 'Z') {
i++
}
return num, i - num
}
return num, 0
}
// Mediatype parses a given mediatype and splits the mimetype from the parameters.
// It works similar to mime.ParseMediaType but is faster.
func Mediatype(b []byte) ([]byte, map[string]string) {
i := 0
for i < len(b) && b[i] == ' ' {
i++
}
b = b[i:]
n := len(b)
mimetype := b
var params map[string]string
for i := 3; i < n; i++ { // mimetype is at least three characters long
if b[i] == ';' || b[i] == ' ' {
mimetype = b[:i]
if b[i] == ' ' {
i++ // space
for i < n && b[i] == ' ' {
i++
}
if n <= i || b[i] != ';' {
break
}
}
params = map[string]string{}
s := string(b)
PARAM:
i++ // semicolon
for i < n && s[i] == ' ' {
i++
}
start := i
for i < n && s[i] != '=' && s[i] != ';' && s[i] != ' ' {
i++
}
key := s[start:i]
for i < n && s[i] == ' ' {
i++
}
if i < n && s[i] == '=' {
i++
for i < n && s[i] == ' ' {
i++
}
start = i
for i < n && s[i] != ';' && s[i] != ' ' {
i++
}
} else {
start = i
}
params[key] = s[start:i]
for i < n && s[i] == ' ' {
i++
}
if i < n && s[i] == ';' {
goto PARAM
}
break
}
}
return mimetype, params
}
// DataURI parses the given data URI and returns the mediatype, data and ok.
func DataURI(dataURI []byte) ([]byte, []byte, error) {
if len(dataURI) > 5 && bytes.Equal(dataURI[:5], dataSchemeBytes) {
dataURI = dataURI[5:]
inBase64 := false
var mediatype []byte
i := 0
for j := 0; j < len(dataURI); j++ {
c := dataURI[j]
if c == '=' || c == ';' || c == ',' {
if c != '=' && bytes.Equal(TrimWhitespace(dataURI[i:j]), base64Bytes) {
if len(mediatype) > 0 {
mediatype = mediatype[:len(mediatype)-1]
}
inBase64 = true
i = j
} else if c != ',' {
mediatype = append(append(mediatype, TrimWhitespace(dataURI[i:j])...), c)
i = j + 1
} else {
mediatype = append(mediatype, TrimWhitespace(dataURI[i:j])...)
}
if c == ',' {
if len(mediatype) == 0 || mediatype[0] == ';' {
mediatype = textMimeBytes
}
data := dataURI[j+1:]
if inBase64 {
decoded := make([]byte, base64.StdEncoding.DecodedLen(len(data)))
n, err := base64.StdEncoding.Decode(decoded, data)
if err != nil {
return nil, nil, err
}
data = decoded[:n]
} else {
data = DecodeURL(data)
}
return mediatype, data, nil
}
}
}
}
return nil, nil, ErrBadDataURI
}
// QuoteEntity parses the given byte slice and returns the quote that got matched (' or ") and its entity length.
// TODO: deprecated
func QuoteEntity(b []byte) (quote byte, n int) {
if len(b) < 5 || b[0] != '&' {
return 0, 0
}
if b[1] == '#' {
if b[2] == 'x' {
i := 3
for i < len(b) && b[i] == '0' {
i++
}
if i+2 < len(b) && b[i] == '2' && b[i+2] == ';' {
if b[i+1] == '2' {
return '"', i + 3 // "
} else if b[i+1] == '7' {
return '\'', i + 3 // '
}
}
} else {
i := 2
for i < len(b) && b[i] == '0' {
i++
}
if i+2 < len(b) && b[i] == '3' && b[i+2] == ';' {
if b[i+1] == '4' {
return '"', i + 3 // "
} else if b[i+1] == '9' {
return '\'', i + 3 // '
}
}
}
} else if len(b) >= 6 && b[5] == ';' {
if bytes.Equal(b[1:5], []byte{'q', 'u', 'o', 't'}) {
return '"', 6 // "
} else if bytes.Equal(b[1:5], []byte{'a', 'p', 'o', 's'}) {
return '\'', 6 // '
}
}
return 0, 0
}
// ReplaceMultipleWhitespace replaces character series of space, \n, \t, \f, \r into a single space or newline (when the serie contained a \n or \r).
func ReplaceMultipleWhitespace(b []byte) []byte {
j, k := 0, 0 // j is write position, k is start of next text section
for i := 0; i < len(b); i++ {
if IsWhitespace(b[i]) {
start := i
newline := IsNewline(b[i])
i++
for ; i < len(b) && IsWhitespace(b[i]); i++ {
if IsNewline(b[i]) {
newline = true
}
}
if newline {
b[start] = '\n'
} else {
b[start] = ' '
}
if 1 < i-start { // more than one whitespace
if j == 0 {
j = start + 1
} else {
j += copy(b[j:], b[k:start+1])
}
k = i
}
}
}
if j == 0 {
return b
} else if j == 1 { // only if starts with whitespace
b[k-1] = b[0]
return b[k-1:]
} else if k < len(b) {
j += copy(b[j:], b[k:])
}
return b[:j]
}
// replaceEntities will replace in b at index i, assuming that b[i] == '&' and that i+3<len(b). The returned int will be the last character of the entity, so that the next iteration can safely do i++ to continue and not miss any entitites.
func replaceEntities(b []byte, i int, entitiesMap map[string][]byte, revEntitiesMap map[byte][]byte) ([]byte, int) {
const MaxEntityLength = 31 // longest HTML entity: CounterClockwiseContourIntegral
var r []byte
j := i + 1
if b[j] == '#' {
j++
if b[j] == 'x' {
j++
c := 0
for ; j < len(b) && (b[j] >= '0' && b[j] <= '9' || b[j] >= 'a' && b[j] <= 'f' || b[j] >= 'A' && b[j] <= 'F'); j++ {
if b[j] <= '9' {
c = c<<4 + int(b[j]-'0')
} else if b[j] <= 'F' {
c = c<<4 + int(b[j]-'A') + 10
} else if b[j] <= 'f' {
c = c<<4 + int(b[j]-'a') + 10
}
}
if j <= i+3 || 10000 <= c {
return b, j - 1
}
if c < 128 {
r = []byte{byte(c)}
} else {
r = append(r, '&', '#')
r = strconv.AppendInt(r, int64(c), 10)
r = append(r, ';')
}
} else {
c := 0
for ; j < len(b) && c < 128 && b[j] >= '0' && b[j] <= '9'; j++ {
c = c*10 + int(b[j]-'0')
}
if j <= i+2 || 128 <= c {
return b, j - 1
}
r = []byte{byte(c)}
}
} else {
for ; j < len(b) && j-i-1 <= MaxEntityLength && b[j] != ';'; j++ {
}
if j <= i+1 || len(b) <= j {
return b, j - 1
}
var ok bool
r, ok = entitiesMap[string(b[i+1:j])]
if !ok {
return b, j
}
}
// j is at semicolon
n := j + 1 - i
if j < len(b) && b[j] == ';' && 2 < n {
if len(r) == 1 {
if q, ok := revEntitiesMap[r[0]]; ok {
if len(q) == len(b[i:j+1]) && bytes.Equal(q, b[i:j+1]) {
return b, j
}
r = q
} else if r[0] == '&' {
// check if for example & is followed by something that could potentially be an entity
k := j + 1
if k < len(b) && (b[k] >= '0' && b[k] <= '9' || b[k] >= 'a' && b[k] <= 'z' || b[k] >= 'A' && b[k] <= 'Z' || b[k] == '#') {
return b, k
}
}
}
copy(b[i:], r)
copy(b[i+len(r):], b[j+1:])
b = b[:len(b)-n+len(r)]
return b, i + len(r) - 1
}
return b, i
}
// ReplaceEntities replaces all occurrences of entites (such as ") to their respective unencoded bytes.
func ReplaceEntities(b []byte, entitiesMap map[string][]byte, revEntitiesMap map[byte][]byte) []byte {
for i := 0; i < len(b); i++ {
if b[i] == '&' && i+3 < len(b) {
b, i = replaceEntities(b, i, entitiesMap, revEntitiesMap)
}
}
return b
}
// ReplaceMultipleWhitespaceAndEntities is a combination of ReplaceMultipleWhitespace and ReplaceEntities. It is faster than executing both sequentially.
func ReplaceMultipleWhitespaceAndEntities(b []byte, entitiesMap map[string][]byte, revEntitiesMap map[byte][]byte) []byte {
j, k := 0, 0 // j is write position, k is start of next text section
for i := 0; i < len(b); i++ {
if IsWhitespace(b[i]) {
start := i
newline := IsNewline(b[i])
i++
for ; i < len(b) && IsWhitespace(b[i]); i++ {
if IsNewline(b[i]) {
newline = true
}
}
if newline {
b[start] = '\n'
} else {
b[start] = ' '
}
if 1 < i-start { // more than one whitespace
if j == 0 {
j = start + 1
} else {
j += copy(b[j:], b[k:start+1])
}
k = i
}
}
if i+3 < len(b) && b[i] == '&' {
b, i = replaceEntities(b, i, entitiesMap, revEntitiesMap)
}
}
if j == 0 {
return b
} else if j == 1 { // only if starts with whitespace
b[k-1] = b[0]
return b[k-1:]
} else if k < len(b) {
j += copy(b[j:], b[k:])
}
return b[:j]
}
// URLEncodingTable is a charmap for which characters need escaping in the URL encoding scheme
var URLEncodingTable = [256]bool{
// ASCII
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, false, true, true, true, true, true, false, // space, ", #, $, %, &
false, false, false, true, true, false, false, true, // +, comma, /
false, false, false, false, false, false, false, false,
false, false, true, true, true, true, true, true, // :, ;, <, =, >, ?
true, false, false, false, false, false, false, false, // @
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, true, true, true, true, false, // [, \, ], ^
true, false, false, false, false, false, false, false, // `
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, true, true, true, false, true, // {, |, }, DEL
// non-ASCII
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
}
// DataURIEncodingTable is a charmap for which characters need escaping in the Data URI encoding scheme
// Escape only non-printable characters, unicode and %, #, &.
// IE11 additionally requires encoding of \, [, ], ", <, >, `, {, }, |, ^ which is not required by Chrome, Firefox, Opera, Edge, Safari, Yandex
// To pass the HTML validator, restricted URL characters must be escaped: non-printable characters, space, <, >, #, %, "
var DataURIEncodingTable = [256]bool{
// ASCII
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, false, true, true, false, true, true, false, // space, ", #, %, &
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, true, false, true, false, // <, >
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, true, true, true, true, false, // [, \, ], ^
true, false, false, false, false, false, false, false, // `
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, true, true, true, false, true, // {, |, }, DEL
// non-ASCII
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
}
// EncodeURL encodes bytes using the URL encoding scheme
func EncodeURL(b []byte, table [256]bool) []byte {
for i := 0; i < len(b); i++ {
c := b[i]
if table[c] {
b = append(b, 0, 0)
copy(b[i+3:], b[i+1:])
b[i+0] = '%'
b[i+1] = "0123456789ABCDEF"[c>>4]
b[i+2] = "0123456789ABCDEF"[c&15]
}
}
return b
}
// DecodeURL decodes an URL encoded using the URL encoding scheme
func DecodeURL(b []byte) []byte {
for i := 0; i < len(b); i++ {
if b[i] == '%' && i+2 < len(b) {
j := i + 1
c := 0
for ; j < i+3 && (b[j] >= '0' && b[j] <= '9' || b[j] >= 'a' && b[j] <= 'f' || b[j] >= 'A' && b[j] <= 'F'); j++ {
if b[j] <= '9' {
c = c<<4 + int(b[j]-'0')
} else if b[j] <= 'F' {
c = c<<4 + int(b[j]-'A') + 10
} else if b[j] <= 'f' {
c = c<<4 + int(b[j]-'a') + 10
}
}
if j == i+3 && c < 128 {
b[i] = byte(c)
b = append(b[:i+1], b[i+3:]...)
}
} else if b[i] == '+' {
b[i] = ' '
}
}
return b
}
package css
// generated by hasher -type=Hash -file=hash.go; DO NOT EDIT, except for adding more constants to the list and rerun go generate
// uses github.com/tdewolff/hasher
//go:generate hasher -type=Hash -file=hash.go
// Hash defines perfect hashes for a predefined list of strings
type Hash uint32
// Unique hash definitions to be used instead of strings
const (
Document Hash = 0x8 // document
Font_Face Hash = 0x809 // font-face
Keyframes Hash = 0x1109 // keyframes
Media Hash = 0x2105 // media
Page Hash = 0x2604 // page
Supports Hash = 0x1908 // supports
)
// String returns the hash' name.
func (i Hash) String() string {
start := uint32(i >> 8)
n := uint32(i & 0xff)
if start+n > uint32(len(_Hash_text)) {
return ""
}
return _Hash_text[start : start+n]
}
// ToHash returns the hash whose name is s. It returns zero if there is no
// such hash. It is case sensitive.
func ToHash(s []byte) Hash {
if len(s) == 0 || len(s) > _Hash_maxLen {
return 0
}
h := uint32(_Hash_hash0)
for i := 0; i < len(s); i++ {
h ^= uint32(s[i])
h *= 16777619
}
if i := _Hash_table[h&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
t := _Hash_text[i>>8 : i>>8+i&0xff]
for i := 0; i < len(s); i++ {
if t[i] != s[i] {
goto NEXT
}
}
return i
}
NEXT:
if i := _Hash_table[(h>>16)&uint32(len(_Hash_table)-1)]; int(i&0xff) == len(s) {
t := _Hash_text[i>>8 : i>>8+i&0xff]
for i := 0; i < len(s); i++ {
if t[i] != s[i] {
return 0
}
}
return i
}
return 0
}
const _Hash_hash0 = 0x9acb0442
const _Hash_maxLen = 9
const _Hash_text = "documentfont-facekeyframesupportsmediapage"
var _Hash_table = [1 << 3]Hash{
0x1: 0x2604, // page
0x2: 0x2105, // media
0x3: 0x809, // font-face
0x5: 0x1109, // keyframes
0x6: 0x1908, // supports
0x7: 0x8, // document
}
// Package css is a CSS3 lexer and parser following the specifications at http://www.w3.org/TR/css-syntax-3/.
package css
// TODO: \uFFFD replacement character for NULL bytes in strings for example, or atleast don't end the string early
import (
"bytes"
"io"
"strconv"
"github.com/tdewolff/parse/v2"
)
// TokenType determines the type of token, eg. a number or a semicolon.
type TokenType uint32
// TokenType values.
const (
ErrorToken TokenType = iota // extra token when errors occur
IdentToken
FunctionToken // rgb( rgba( ...
AtKeywordToken // @abc
HashToken // #abc
StringToken
BadStringToken
URLToken
BadURLToken
DelimToken // any unmatched character
NumberToken // 5
PercentageToken // 5%
DimensionToken // 5em
UnicodeRangeToken // U+554A
IncludeMatchToken // ~=
DashMatchToken // |=
PrefixMatchToken // ^=
SuffixMatchToken // $=
SubstringMatchToken // *=
ColumnToken // ||
WhitespaceToken // space \t \r \n \f
CDOToken // <!--
CDCToken // -->
ColonToken // :
SemicolonToken // ;
CommaToken // ,
LeftBracketToken // [
RightBracketToken // ]
LeftParenthesisToken // (
RightParenthesisToken // )
LeftBraceToken // {
RightBraceToken // }
CommentToken // extra token for comments
EmptyToken
CustomPropertyNameToken
CustomPropertyValueToken
)
// String returns the string representation of a TokenType.
func (tt TokenType) String() string {
switch tt {
case ErrorToken:
return "Error"
case IdentToken:
return "Ident"
case FunctionToken:
return "Function"
case AtKeywordToken:
return "AtKeyword"
case HashToken:
return "Hash"
case StringToken:
return "String"
case BadStringToken:
return "BadString"
case URLToken:
return "URL"
case BadURLToken:
return "BadURL"
case DelimToken:
return "Delim"
case NumberToken:
return "Number"
case PercentageToken:
return "Percentage"
case DimensionToken:
return "Dimension"
case UnicodeRangeToken:
return "UnicodeRange"
case IncludeMatchToken:
return "IncludeMatch"
case DashMatchToken:
return "DashMatch"
case PrefixMatchToken:
return "PrefixMatch"
case SuffixMatchToken:
return "SuffixMatch"
case SubstringMatchToken:
return "SubstringMatch"
case ColumnToken:
return "Column"
case WhitespaceToken:
return "Whitespace"
case CDOToken:
return "CDO"
case CDCToken:
return "CDC"
case ColonToken:
return "Colon"
case SemicolonToken:
return "Semicolon"
case CommaToken:
return "Comma"
case LeftBracketToken:
return "LeftBracket"
case RightBracketToken:
return "RightBracket"
case LeftParenthesisToken:
return "LeftParenthesis"
case RightParenthesisToken:
return "RightParenthesis"
case LeftBraceToken:
return "LeftBrace"
case RightBraceToken:
return "RightBrace"
case CommentToken:
return "Comment"
case EmptyToken:
return "Empty"
case CustomPropertyNameToken:
return "CustomPropertyName"
case CustomPropertyValueToken:
return "CustomPropertyValue"
}
return "Invalid(" + strconv.Itoa(int(tt)) + ")"
}
////////////////////////////////////////////////////////////////
// Lexer is the state for the lexer.
type Lexer struct {
r *parse.Input
}
// NewLexer returns a new Lexer for a given io.Reader.
func NewLexer(r *parse.Input) *Lexer {
return &Lexer{
r: r,
}
}
// Err returns the error encountered during lexing, this is often io.EOF but also other errors can be returned.
func (l *Lexer) Err() error {
return l.r.Err()
}
// Next returns the next Token. It returns ErrorToken when an error was encountered. Using Err() one can retrieve the error message.
func (l *Lexer) Next() (TokenType, []byte) {
switch l.r.Peek(0) {
case ' ', '\t', '\n', '\r', '\f':
l.r.Move(1)
for l.consumeWhitespace() {
}
return WhitespaceToken, l.r.Shift()
case ':':
l.r.Move(1)
return ColonToken, l.r.Shift()
case ';':
l.r.Move(1)
return SemicolonToken, l.r.Shift()
case ',':
l.r.Move(1)
return CommaToken, l.r.Shift()
case '(', ')', '[', ']', '{', '}':
if t := l.consumeBracket(); t != ErrorToken {
return t, l.r.Shift()
}
case '#':
if l.consumeHashToken() {
return HashToken, l.r.Shift()
}
case '"', '\'':
if t := l.consumeString(); t != ErrorToken {
return t, l.r.Shift()
}
case '.', '+':
if t := l.consumeNumeric(); t != ErrorToken {
return t, l.r.Shift()
}
case '-':
if t := l.consumeNumeric(); t != ErrorToken {
return t, l.r.Shift()
} else if t := l.consumeIdentlike(); t != ErrorToken {
return t, l.r.Shift()
} else if l.consumeCDCToken() {
return CDCToken, l.r.Shift()
} else if l.consumeCustomVariableToken() {
return CustomPropertyNameToken, l.r.Shift()
}
case '@':
if l.consumeAtKeywordToken() {
return AtKeywordToken, l.r.Shift()
}
case '$', '*', '^', '~':
if t := l.consumeMatch(); t != ErrorToken {
return t, l.r.Shift()
}
case '/':
if l.consumeComment() {
return CommentToken, l.r.Shift()
}
case '<':
if l.consumeCDOToken() {
return CDOToken, l.r.Shift()
}
case '\\':
if t := l.consumeIdentlike(); t != ErrorToken {
return t, l.r.Shift()
}
case 'u', 'U':
if l.consumeUnicodeRangeToken() {
return UnicodeRangeToken, l.r.Shift()
} else if t := l.consumeIdentlike(); t != ErrorToken {
return t, l.r.Shift()
}
case '|':
if t := l.consumeMatch(); t != ErrorToken {
return t, l.r.Shift()
} else if l.consumeColumnToken() {
return ColumnToken, l.r.Shift()
}
case 0:
if l.r.Err() != nil {
return ErrorToken, nil
}
default:
if t := l.consumeNumeric(); t != ErrorToken {
return t, l.r.Shift()
} else if t := l.consumeIdentlike(); t != ErrorToken {
return t, l.r.Shift()
}
}
// can't be rune because consumeIdentlike consumes that as an identifier
l.r.Move(1)
return DelimToken, l.r.Shift()
}
////////////////////////////////////////////////////////////////
/*
The following functions follow the railroad diagrams in http://www.w3.org/TR/css3-syntax/
*/
func (l *Lexer) consumeByte(c byte) bool {
if l.r.Peek(0) == c {
l.r.Move(1)
return true
}
return false
}
func (l *Lexer) consumeComment() bool {
if l.r.Peek(0) != '/' || l.r.Peek(1) != '*' {
return false
}
l.r.Move(2)
for {
c := l.r.Peek(0)
if c == 0 && l.r.Err() != nil {
break
} else if c == '*' && l.r.Peek(1) == '/' {
l.r.Move(2)
return true
}
l.r.Move(1)
}
return true
}
func (l *Lexer) consumeNewline() bool {
c := l.r.Peek(0)
if c == '\n' || c == '\f' {
l.r.Move(1)
return true
} else if c == '\r' {
if l.r.Peek(1) == '\n' {
l.r.Move(2)
} else {
l.r.Move(1)
}
return true
}
return false
}
func (l *Lexer) consumeWhitespace() bool {
c := l.r.Peek(0)
if c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' {
l.r.Move(1)
return true
}
return false
}
func (l *Lexer) consumeDigit() bool {
c := l.r.Peek(0)
if c >= '0' && c <= '9' {
l.r.Move(1)
return true
}
return false
}
func (l *Lexer) consumeHexDigit() bool {
c := l.r.Peek(0)
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
l.r.Move(1)
return true
}
return false
}
func (l *Lexer) consumeEscape() bool {
if l.r.Peek(0) != '\\' {
return false
}
mark := l.r.Pos()
l.r.Move(1)
if l.consumeNewline() {
l.r.Rewind(mark)
return false
} else if l.consumeHexDigit() {
for k := 1; k < 6; k++ {
if !l.consumeHexDigit() {
break
}
}
l.consumeWhitespace()
return true
} else {
c := l.r.Peek(0)
if c >= 0xC0 {
_, n := l.r.PeekRune(0)
l.r.Move(n)
return true
} else if c == 0 && l.r.Err() != nil {
l.r.Rewind(mark)
return false
}
}
l.r.Move(1)
return true
}
func (l *Lexer) consumeIdentToken() bool {
mark := l.r.Pos()
if l.r.Peek(0) == '-' {
l.r.Move(1)
}
c := l.r.Peek(0)
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c >= 0x80) {
if c != '\\' || !l.consumeEscape() {
l.r.Rewind(mark)
return false
}
} else {
l.r.Move(1)
}
for {
c := l.r.Peek(0)
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_' || c == '-' || c >= 0x80) {
if c != '\\' || !l.consumeEscape() {
break
}
} else {
l.r.Move(1)
}
}
return true
}
// support custom variables, https://www.w3.org/TR/css-variables-1/
func (l *Lexer) consumeCustomVariableToken() bool {
// expect to be on a '-'
l.r.Move(1)
if l.r.Peek(0) != '-' {
l.r.Move(-1)
return false
}
if !l.consumeIdentToken() {
l.r.Move(-1)
return false
}
return true
}
func (l *Lexer) consumeAtKeywordToken() bool {
// expect to be on an '@'
l.r.Move(1)
if !l.consumeIdentToken() {
l.r.Move(-1)
return false
}
return true
}
func (l *Lexer) consumeHashToken() bool {
// expect to be on a '#'
mark := l.r.Pos()
l.r.Move(1)
c := l.r.Peek(0)
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_' || c == '-' || c >= 0x80) {
if c != '\\' || !l.consumeEscape() {
l.r.Rewind(mark)
return false
}
} else {
l.r.Move(1)
}
for {
c := l.r.Peek(0)
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_' || c == '-' || c >= 0x80) {
if c != '\\' || !l.consumeEscape() {
break
}
} else {
l.r.Move(1)
}
}
return true
}
func (l *Lexer) consumeNumberToken() bool {
mark := l.r.Pos()
c := l.r.Peek(0)
if c == '+' || c == '-' {
l.r.Move(1)
}
firstDigit := l.consumeDigit()
if firstDigit {
for l.consumeDigit() {
}
}
if l.r.Peek(0) == '.' {
l.r.Move(1)
if l.consumeDigit() {
for l.consumeDigit() {
}
} else if firstDigit {
// . could belong to the next token
l.r.Move(-1)
return true
} else {
l.r.Rewind(mark)
return false
}
} else if !firstDigit {
l.r.Rewind(mark)
return false
}
mark = l.r.Pos()
c = l.r.Peek(0)
if c == 'e' || c == 'E' {
l.r.Move(1)
c = l.r.Peek(0)
if c == '+' || c == '-' {
l.r.Move(1)
}
if !l.consumeDigit() {
// e could belong to next token
l.r.Rewind(mark)
return true
}
for l.consumeDigit() {
}
}
return true
}
func (l *Lexer) consumeUnicodeRangeToken() bool {
c := l.r.Peek(0)
if (c != 'u' && c != 'U') || l.r.Peek(1) != '+' {
return false
}
mark := l.r.Pos()
l.r.Move(2)
// consume up to 6 hexDigits
k := 0
for l.consumeHexDigit() {
k++
}
// either a minus or a question mark or the end is expected
if l.consumeByte('-') {
if k == 0 || 6 < k {
l.r.Rewind(mark)
return false
}
// consume another up to 6 hexDigits
if l.consumeHexDigit() {
k = 1
for l.consumeHexDigit() {
k++
}
} else {
l.r.Rewind(mark)
return false
}
} else if l.consumeByte('?') {
// could be filled up to 6 characters with question marks or else regular hexDigits
k++
for l.consumeByte('?') {
k++
}
}
if k == 0 || 6 < k {
l.r.Rewind(mark)
return false
}
return true
}
func (l *Lexer) consumeColumnToken() bool {
if l.r.Peek(0) == '|' && l.r.Peek(1) == '|' {
l.r.Move(2)
return true
}
return false
}
func (l *Lexer) consumeCDOToken() bool {
if l.r.Peek(0) == '<' && l.r.Peek(1) == '!' && l.r.Peek(2) == '-' && l.r.Peek(3) == '-' {
l.r.Move(4)
return true
}
return false
}
func (l *Lexer) consumeCDCToken() bool {
if l.r.Peek(0) == '-' && l.r.Peek(1) == '-' && l.r.Peek(2) == '>' {
l.r.Move(3)
return true
}
return false
}
////////////////////////////////////////////////////////////////
// consumeMatch consumes any MatchToken.
func (l *Lexer) consumeMatch() TokenType {
if l.r.Peek(1) == '=' {
switch l.r.Peek(0) {
case '~':
l.r.Move(2)
return IncludeMatchToken
case '|':
l.r.Move(2)
return DashMatchToken
case '^':
l.r.Move(2)
return PrefixMatchToken
case '$':
l.r.Move(2)
return SuffixMatchToken
case '*':
l.r.Move(2)
return SubstringMatchToken
}
}
return ErrorToken
}
// consumeBracket consumes any bracket token.
func (l *Lexer) consumeBracket() TokenType {
switch l.r.Peek(0) {
case '(':
l.r.Move(1)
return LeftParenthesisToken
case ')':
l.r.Move(1)
return RightParenthesisToken
case '[':
l.r.Move(1)
return LeftBracketToken
case ']':
l.r.Move(1)
return RightBracketToken
case '{':
l.r.Move(1)
return LeftBraceToken
case '}':
l.r.Move(1)
return RightBraceToken
}
return ErrorToken
}
// consumeNumeric consumes NumberToken, PercentageToken or DimensionToken.
func (l *Lexer) consumeNumeric() TokenType {
if l.consumeNumberToken() {
if l.consumeByte('%') {
return PercentageToken
} else if l.consumeIdentToken() {
return DimensionToken
}
return NumberToken
}
return ErrorToken
}
// consumeString consumes a string and may return BadStringToken when a newline is encountered.
func (l *Lexer) consumeString() TokenType {
// assume to be on " or '
delim := l.r.Peek(0)
l.r.Move(1)
for {
c := l.r.Peek(0)
if c == 0 && l.r.Err() != nil {
break
} else if c == '\n' || c == '\r' || c == '\f' {
l.r.Move(1)
return BadStringToken
} else if c == delim {
l.r.Move(1)
break
} else if c == '\\' {
if !l.consumeEscape() {
// either newline or EOF after backslash
l.r.Move(1)
l.consumeNewline()
}
} else {
l.r.Move(1)
}
}
return StringToken
}
func (l *Lexer) consumeUnquotedURL() bool {
for {
c := l.r.Peek(0)
if c == 0 && l.r.Err() != nil || c == ')' {
break
} else if c == '"' || c == '\'' || c == '(' || c == '\\' || c == ' ' || c <= 0x1F || c == 0x7F {
if c != '\\' || !l.consumeEscape() {
return false
}
} else {
l.r.Move(1)
}
}
return true
}
// consumeRemnantsBadUrl consumes bytes of a BadUrlToken so that normal tokenization may continue.
func (l *Lexer) consumeRemnantsBadURL() {
for {
if l.consumeByte(')') || l.r.Err() != nil {
break
} else if !l.consumeEscape() {
l.r.Move(1)
}
}
}
// consumeIdentlike consumes IdentToken, FunctionToken or UrlToken.
func (l *Lexer) consumeIdentlike() TokenType {
if l.consumeIdentToken() {
if l.r.Peek(0) != '(' {
return IdentToken
} else if !parse.EqualFold(bytes.Replace(l.r.Lexeme(), []byte{'\\'}, nil, -1), []byte{'u', 'r', 'l'}) {
l.r.Move(1)
return FunctionToken
}
l.r.Move(1)
// consume url
for l.consumeWhitespace() {
}
if c := l.r.Peek(0); c == '"' || c == '\'' {
if l.consumeString() == BadStringToken {
l.consumeRemnantsBadURL()
return BadURLToken
}
} else if !l.consumeUnquotedURL() && !l.consumeWhitespace() { // if unquoted URL fails due to encountering whitespace, continue
l.consumeRemnantsBadURL()
return BadURLToken
}
for l.consumeWhitespace() {
}
if !l.consumeByte(')') && l.r.Err() != io.EOF {
l.consumeRemnantsBadURL()
return BadURLToken
}
return URLToken
}
return ErrorToken
}
package css
import (
"bytes"
"fmt"
"strconv"
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/parse/v2/buffer"
)
var wsBytes = []byte(" ")
var endBytes = []byte("}")
var emptyBytes = []byte("")
// GrammarType determines the type of grammar.
type GrammarType uint32
// GrammarType values.
const (
ErrorGrammar GrammarType = iota // extra token when errors occur
CommentGrammar
AtRuleGrammar
BeginAtRuleGrammar
EndAtRuleGrammar
QualifiedRuleGrammar
BeginRulesetGrammar
EndRulesetGrammar
DeclarationGrammar
TokenGrammar
CustomPropertyGrammar
)
// String returns the string representation of a GrammarType.
func (tt GrammarType) String() string {
switch tt {
case ErrorGrammar:
return "Error"
case CommentGrammar:
return "Comment"
case AtRuleGrammar:
return "AtRule"
case BeginAtRuleGrammar:
return "BeginAtRule"
case EndAtRuleGrammar:
return "EndAtRule"
case QualifiedRuleGrammar:
return "QualifiedRule"
case BeginRulesetGrammar:
return "BeginRuleset"
case EndRulesetGrammar:
return "EndRuleset"
case DeclarationGrammar:
return "Declaration"
case TokenGrammar:
return "Token"
case CustomPropertyGrammar:
return "CustomProperty"
}
return "Invalid(" + strconv.Itoa(int(tt)) + ")"
}
////////////////////////////////////////////////////////////////
// State is the state function the parser currently is in.
type State func(*Parser) GrammarType
// Token is a single TokenType and its associated data.
type Token struct {
TokenType
Data []byte
}
func (t Token) String() string {
return t.TokenType.String() + "('" + string(t.Data) + "')"
}
// Parser is the state for the parser.
type Parser struct {
l *Lexer
state []State
err string
errPos int
buf []Token
level int
data []byte
tt TokenType
keepWS bool
prevWS bool
prevEnd bool
prevComment bool
}
// NewParser returns a new CSS parser from an io.Reader. isInline specifies whether this is an inline style attribute.
func NewParser(r *parse.Input, isInline bool) *Parser {
l := NewLexer(r)
p := &Parser{
l: l,
state: make([]State, 0, 4),
}
if isInline {
p.state = append(p.state, (*Parser).parseDeclarationList)
} else {
p.state = append(p.state, (*Parser).parseStylesheet)
}
return p
}
// HasParseError returns true if there is a parse error (and not a read error).
func (p *Parser) HasParseError() bool {
return p.err != ""
}
// Err returns the error encountered during parsing, this is often io.EOF but also other errors can be returned.
func (p *Parser) Err() error {
if p.err != "" {
r := buffer.NewReader(p.l.r.Bytes())
return parse.NewError(r, p.errPos, p.err)
}
return p.l.Err()
}
// Next returns the next Grammar. It returns ErrorGrammar when an error was encountered. Using Err() one can retrieve the error message.
func (p *Parser) Next() (GrammarType, TokenType, []byte) {
p.err = ""
if p.prevEnd {
p.tt, p.data = RightBraceToken, endBytes
p.prevEnd = false
} else {
p.tt, p.data = p.popToken(true)
}
gt := p.state[len(p.state)-1](p)
return gt, p.tt, p.data
}
// Offset return offset for current Grammar
func (p *Parser) Offset() int {
return p.l.r.Offset()
}
// Values returns a slice of Tokens for the last Grammar. Only AtRuleGrammar, BeginAtRuleGrammar, BeginRulesetGrammar and Declaration will return the at-rule components, ruleset selector and declaration values respectively.
func (p *Parser) Values() []Token {
return p.buf
}
func (p *Parser) popToken(allowComment bool) (TokenType, []byte) {
p.prevWS = false
p.prevComment = false
tt, data := p.l.Next()
for !p.keepWS && tt == WhitespaceToken || tt == CommentToken {
if tt == WhitespaceToken {
p.prevWS = true
} else {
p.prevComment = true
if allowComment && len(p.state) == 1 {
break
}
}
tt, data = p.l.Next()
}
return tt, data
}
func (p *Parser) initBuf() {
p.buf = p.buf[:0]
}
func (p *Parser) pushBuf(tt TokenType, data []byte) {
p.buf = append(p.buf, Token{tt, data})
}
////////////////////////////////////////////////////////////////
func (p *Parser) parseStylesheet() GrammarType {
if p.tt == CDOToken || p.tt == CDCToken {
return TokenGrammar
} else if p.tt == AtKeywordToken {
return p.parseAtRule()
} else if p.tt == CommentToken {
return CommentGrammar
} else if p.tt == ErrorToken {
return ErrorGrammar
}
return p.parseQualifiedRule()
}
func (p *Parser) parseDeclarationList() GrammarType {
if p.tt == CommentToken {
p.tt, p.data = p.popToken(false)
}
for p.tt == SemicolonToken {
p.tt, p.data = p.popToken(false)
}
// IE hack: *color:red;
if p.tt == DelimToken && p.data[0] == '*' {
tt, data := p.popToken(false)
p.tt = tt
p.data = append(p.data, data...)
}
if p.tt == ErrorToken {
return ErrorGrammar
} else if p.tt == AtKeywordToken {
return p.parseAtRule()
} else if p.tt == IdentToken || p.tt == DelimToken {
return p.parseDeclaration()
} else if p.tt == CustomPropertyNameToken {
return p.parseCustomProperty()
}
// parse error
p.initBuf()
p.l.r.Move(-len(p.data))
p.err, p.errPos = fmt.Sprintf("unexpected token '%s' in declaration", string(p.data)), p.l.r.Offset()
p.l.r.Move(len(p.data))
if p.tt == RightBraceToken {
// right brace token will occur when we've had a decl error that ended in a right brace token
// as these are not handled by decl error, we handle it here explicitly. Normally its used to end eg. the qual rule.
p.pushBuf(p.tt, p.data)
return ErrorGrammar
}
return p.parseDeclarationError(p.tt, p.data)
}
////////////////////////////////////////////////////////////////
func (p *Parser) parseAtRule() GrammarType {
p.initBuf()
p.data = parse.ToLower(parse.Copy(p.data))
atRuleName := p.data
if len(atRuleName) > 0 && atRuleName[1] == '-' {
if i := bytes.IndexByte(atRuleName[2:], '-'); i != -1 {
atRuleName = atRuleName[i+2:] // skip vendor specific prefix
}
}
atRule := ToHash(atRuleName[1:])
first := true
skipWS := false
for {
tt, data := p.popToken(false)
if tt == LeftBraceToken && p.level == 0 {
if atRule == Font_Face || atRule == Page {
p.state = append(p.state, (*Parser).parseAtRuleDeclarationList)
} else if atRule == Document || atRule == Keyframes || atRule == Media || atRule == Supports {
p.state = append(p.state, (*Parser).parseAtRuleRuleList)
} else {
p.state = append(p.state, (*Parser).parseAtRuleUnknown)
}
return BeginAtRuleGrammar
} else if (tt == SemicolonToken || tt == RightBraceToken) && p.level == 0 || tt == ErrorToken {
p.prevEnd = (tt == RightBraceToken)
return AtRuleGrammar
} else if tt == LeftParenthesisToken || tt == LeftBraceToken || tt == LeftBracketToken || tt == FunctionToken {
p.level++
} else if tt == RightParenthesisToken || tt == RightBraceToken || tt == RightBracketToken {
if p.level == 0 {
// TODO: buggy
p.pushBuf(tt, data)
if 1 < len(p.state) {
p.state = p.state[:len(p.state)-1]
}
p.err, p.errPos = "unexpected ending in at rule", p.l.r.Offset()
return ErrorGrammar
}
p.level--
}
if first {
if tt == LeftParenthesisToken || tt == LeftBracketToken {
p.prevWS = false
}
first = false
}
if len(data) == 1 && (data[0] == ',' || data[0] == ':') {
skipWS = true
} else if p.prevWS && !skipWS && tt != RightParenthesisToken {
p.pushBuf(WhitespaceToken, wsBytes)
} else {
skipWS = false
}
if tt == LeftParenthesisToken {
skipWS = true
}
p.pushBuf(tt, data)
}
}
func (p *Parser) parseAtRuleRuleList() GrammarType {
if p.tt == RightBraceToken || p.tt == ErrorToken {
p.state = p.state[:len(p.state)-1]
return EndAtRuleGrammar
} else if p.tt == AtKeywordToken {
return p.parseAtRule()
} else {
return p.parseQualifiedRule()
}
}
func (p *Parser) parseAtRuleDeclarationList() GrammarType {
for p.tt == SemicolonToken {
p.tt, p.data = p.popToken(false)
}
if p.tt == RightBraceToken || p.tt == ErrorToken {
p.state = p.state[:len(p.state)-1]
return EndAtRuleGrammar
}
return p.parseDeclarationList()
}
func (p *Parser) parseAtRuleUnknown() GrammarType {
p.keepWS = true
if p.tt == RightBraceToken && p.level == 0 || p.tt == ErrorToken {
p.state = p.state[:len(p.state)-1]
p.keepWS = false
return EndAtRuleGrammar
}
if p.tt == LeftParenthesisToken || p.tt == LeftBraceToken || p.tt == LeftBracketToken || p.tt == FunctionToken {
p.level++
} else if p.tt == RightParenthesisToken || p.tt == RightBraceToken || p.tt == RightBracketToken {
p.level--
}
return TokenGrammar
}
func (p *Parser) parseQualifiedRule() GrammarType {
p.initBuf()
first := true
inAttrSel := false
skipWS := true
var tt TokenType
var data []byte
for {
if first {
tt, data = p.tt, p.data
p.tt = WhitespaceToken
p.data = emptyBytes
first = false
} else {
tt, data = p.popToken(false)
}
if tt == LeftBraceToken && p.level == 0 {
p.state = append(p.state, (*Parser).parseQualifiedRuleDeclarationList)
return BeginRulesetGrammar
} else if tt == ErrorToken {
p.err, p.errPos = "unexpected ending in qualified rule", p.l.r.Offset()
return ErrorGrammar
} else if tt == LeftParenthesisToken || tt == LeftBraceToken || tt == LeftBracketToken || tt == FunctionToken {
p.level++
} else if tt == RightParenthesisToken || tt == RightBraceToken || tt == RightBracketToken {
if p.level == 0 {
// TODO: buggy
p.pushBuf(tt, data)
if 1 < len(p.state) {
p.state = p.state[:len(p.state)-1]
}
p.err, p.errPos = "unexpected ending in qualified rule", p.l.r.Offset()
return ErrorGrammar
}
p.level--
}
if len(data) == 1 && (data[0] == ',' || data[0] == '>' || data[0] == '+' || data[0] == '~') {
if data[0] == ',' {
return QualifiedRuleGrammar
}
skipWS = true
} else if p.prevWS && !skipWS && !inAttrSel {
p.pushBuf(WhitespaceToken, wsBytes)
} else {
skipWS = false
}
if tt == LeftBracketToken {
inAttrSel = true
} else if tt == RightBracketToken {
inAttrSel = false
}
p.pushBuf(tt, data)
}
}
func (p *Parser) parseQualifiedRuleDeclarationList() GrammarType {
for p.tt == SemicolonToken {
p.tt, p.data = p.popToken(false)
}
if p.tt == RightBraceToken || p.tt == ErrorToken {
p.state = p.state[:len(p.state)-1]
return EndRulesetGrammar
}
return p.parseDeclarationList()
}
func (p *Parser) parseDeclaration() GrammarType {
p.initBuf()
p.data = parse.ToLower(parse.Copy(p.data))
ttName, dataName := p.tt, p.data
tt, data := p.popToken(false)
if tt != ColonToken {
p.l.r.Move(-len(data))
p.err, p.errPos = "expected colon in declaration", p.l.r.Offset()
p.l.r.Move(len(data))
p.pushBuf(ttName, dataName)
return p.parseDeclarationError(tt, data)
}
skipWS := true
for {
tt, data := p.popToken(false)
if (tt == SemicolonToken || tt == RightBraceToken) && p.level == 0 || tt == ErrorToken {
p.prevEnd = (tt == RightBraceToken)
return DeclarationGrammar
} else if tt == LeftParenthesisToken || tt == LeftBraceToken || tt == LeftBracketToken || tt == FunctionToken {
p.level++
} else if tt == RightParenthesisToken || tt == RightBraceToken || tt == RightBracketToken {
if p.level == 0 {
// TODO: buggy
p.err, p.errPos = "unexpected ending in declaration", p.l.r.Offset()
p.pushBuf(ttName, dataName)
p.pushBuf(ColonToken, []byte{':'})
return p.parseDeclarationError(tt, data)
}
p.level--
}
if len(data) == 1 && (data[0] == ',' || data[0] == '/' || data[0] == ':' || data[0] == '!' || data[0] == '=') {
skipWS = true
} else if (p.prevWS || p.prevComment) && !skipWS {
p.pushBuf(WhitespaceToken, wsBytes)
} else {
skipWS = false
}
p.pushBuf(tt, data)
}
}
func (p *Parser) parseDeclarationError(tt TokenType, data []byte) GrammarType {
// we're on the offending (tt,data), keep popping tokens till we reach ;, }, or EOF
p.tt, p.data = tt, data
for {
if (tt == SemicolonToken || tt == RightBraceToken) && p.level == 0 || tt == ErrorToken {
p.prevEnd = (tt == RightBraceToken)
if tt == SemicolonToken {
p.pushBuf(tt, data)
}
return ErrorGrammar
} else if tt == LeftParenthesisToken || tt == LeftBraceToken || tt == LeftBracketToken || tt == FunctionToken {
p.level++
} else if tt == RightParenthesisToken || tt == RightBraceToken || tt == RightBracketToken {
p.level--
}
if p.prevWS {
p.pushBuf(WhitespaceToken, wsBytes)
}
p.pushBuf(tt, data)
tt, data = p.popToken(false)
}
}
func (p *Parser) parseCustomProperty() GrammarType {
p.initBuf()
if tt, data := p.popToken(false); tt != ColonToken {
p.l.r.Move(-len(data))
p.err, p.errPos = "expected colon in custom property", p.l.r.Offset()
p.l.r.Move(len(data))
return ErrorGrammar
}
val := []byte{}
for {
tt, data := p.l.Next()
if (tt == SemicolonToken || tt == RightBraceToken) && p.level == 0 || tt == ErrorToken {
p.prevEnd = (tt == RightBraceToken)
p.pushBuf(CustomPropertyValueToken, val)
return CustomPropertyGrammar
} else if tt == LeftParenthesisToken || tt == LeftBraceToken || tt == LeftBracketToken || tt == FunctionToken {
p.level++
} else if tt == RightParenthesisToken || tt == RightBraceToken || tt == RightBracketToken {
if p.level == 0 {
// TODO: buggy
p.pushBuf(tt, data)
p.err, p.errPos = "unexpected ending in custom property", p.l.r.Offset()
return ErrorGrammar
}
p.level--
}
val = append(val, data...)
}
}
package css
import "github.com/tdewolff/parse/v2"
// IsIdent returns true if the bytes are a valid identifier.
func IsIdent(b []byte) bool {
l := NewLexer(parse.NewInputBytes(b))
l.consumeIdentToken()
l.r.Restore()
return l.r.Pos() == len(b)
}
// IsURLUnquoted returns true if the bytes are a valid unquoted URL.
func IsURLUnquoted(b []byte) bool {
l := NewLexer(parse.NewInputBytes(b))
l.consumeUnquotedURL()
l.r.Restore()
return l.r.Pos() == len(b)
}
// HSL2RGB converts HSL to RGB with all of range [0,1]
// from http://www.w3.org/TR/css3-color/#hsl-color
func HSL2RGB(h, s, l float64) (float64, float64, float64) {
m2 := l * (s + 1)
if l > 0.5 {
m2 = l + s - l*s
}
m1 := l*2 - m2
return hue2rgb(m1, m2, h+1.0/3.0), hue2rgb(m1, m2, h), hue2rgb(m1, m2, h-1.0/3.0)
}
func hue2rgb(m1, m2, h float64) float64 {
if h < 0.0 {
h += 1.0
}
if h > 1.0 {
h -= 1.0
}
if h*6.0 < 1.0 {
return m1 + (m2-m1)*h*6.0
} else if h*2.0 < 1.0 {
return m2
} else if h*3.0 < 2.0 {
return m1 + (m2-m1)*(2.0/3.0-h)*6.0
}
return m1
}
package parse
import (
"bytes"
"fmt"
"io"
)
// Error is a parsing error returned by parser. It contains a message and an offset at which the error occurred.
type Error struct {
Message string
Line int
Column int
Context string
}
// NewError creates a new error
func NewError(r io.Reader, offset int, message string, a ...interface{}) *Error {
line, column, context := Position(r, offset)
if 0 < len(a) {
message = fmt.Sprintf(message, a...)
}
return &Error{
Message: message,
Line: line,
Column: column,
Context: context,
}
}
// NewErrorLexer creates a new error from an active Lexer.
func NewErrorLexer(l *Input, message string, a ...interface{}) *Error {
r := bytes.NewBuffer(l.Bytes())
offset := l.Offset()
return NewError(r, offset, message, a...)
}
// Position returns the line, column, and context of the error.
// Context is the entire line at which the error occurred.
func (e *Error) Position() (int, int, string) {
return e.Line, e.Column, e.Context
}
// Error returns the error string, containing the context and line + column number.
func (e *Error) Error() string {
return fmt.Sprintf("%s on line %d and column %d\n%s", e.Message, e.Line, e.Column, e.Context)
}
package parse
import (
"io"
"io/ioutil"
)
var nullBuffer = []byte{0}
// Input is a buffered reader that allows peeking forward and shifting, taking an io.Input.
// It keeps data in-memory until Free, taking a byte length, is called to move beyond the data.
type Input struct {
buf []byte
pos int // index in buf
start int // index in buf
err error
restore func()
}
// NewInput returns a new Input for a given io.Input and uses ioutil.ReadAll to read it into a byte slice.
// If the io.Input implements Bytes, that is used instead. It will append a NULL at the end of the buffer.
func NewInput(r io.Reader) *Input {
var b []byte
if r != nil {
if buffer, ok := r.(interface {
Bytes() []byte
}); ok {
b = buffer.Bytes()
} else {
var err error
b, err = ioutil.ReadAll(r)
if err != nil {
return &Input{
buf: nullBuffer,
err: err,
}
}
}
}
return NewInputBytes(b)
}
// NewInputString returns a new Input for a given string and appends NULL at the end.
func NewInputString(s string) *Input {
return NewInputBytes([]byte(s))
}
// NewInputBytes returns a new Input for a given byte slice and appends NULL at the end.
// To avoid reallocation, make sure the capacity has room for one more byte.
func NewInputBytes(b []byte) *Input {
z := &Input{
buf: b,
}
n := len(b)
if n == 0 {
z.buf = nullBuffer
} else {
// Append NULL to buffer, but try to avoid reallocation
if cap(b) > n {
// Overwrite next byte but restore when done
b = b[:n+1]
c := b[n]
b[n] = 0
z.buf = b
z.restore = func() {
b[n] = c
}
} else {
z.buf = append(b, 0)
}
}
return z
}
// Restore restores the replaced byte past the end of the buffer by NULL.
func (z *Input) Restore() {
if z.restore != nil {
z.restore()
z.restore = nil
}
}
// Err returns the error returned from io.Input or io.EOF when the end has been reached.
func (z *Input) Err() error {
return z.PeekErr(0)
}
// PeekErr returns the error at position pos. When pos is zero, this is the same as calling Err().
func (z *Input) PeekErr(pos int) error {
if z.err != nil {
return z.err
} else if len(z.buf)-1 <= z.pos+pos {
return io.EOF
}
return nil
}
// Peek returns the ith byte relative to the end position.
// Peek returns 0 when an error has occurred, Err returns the erroz.
func (z *Input) Peek(pos int) byte {
pos += z.pos
return z.buf[pos]
}
// PeekRune returns the rune and rune length of the ith byte relative to the end position.
func (z *Input) PeekRune(pos int) (rune, int) {
// from unicode/utf8
c := z.Peek(pos)
if c < 0xC0 || len(z.buf)-1-z.pos < 2 {
return rune(c), 1
} else if c < 0xE0 || len(z.buf)-1-z.pos < 3 {
return rune(c&0x1F)<<6 | rune(z.Peek(pos+1)&0x3F), 2
} else if c < 0xF0 || len(z.buf)-1-z.pos < 4 {
return rune(c&0x0F)<<12 | rune(z.Peek(pos+1)&0x3F)<<6 | rune(z.Peek(pos+2)&0x3F), 3
}
return rune(c&0x07)<<18 | rune(z.Peek(pos+1)&0x3F)<<12 | rune(z.Peek(pos+2)&0x3F)<<6 | rune(z.Peek(pos+3)&0x3F), 4
}
// Move advances the position.
func (z *Input) Move(n int) {
z.pos += n
}
// MoveRune advances the position by the length of the current rune.
func (z *Input) MoveRune() {
c := z.Peek(0)
if c < 0xC0 || len(z.buf)-1-z.pos < 2 {
z.pos++
} else if c < 0xE0 || len(z.buf)-1-z.pos < 3 {
z.pos += 2
} else if c < 0xF0 || len(z.buf)-1-z.pos < 4 {
z.pos += 3
} else {
z.pos += 4
}
}
// Pos returns a mark to which can be rewinded.
func (z *Input) Pos() int {
return z.pos - z.start
}
// Rewind rewinds the position to the given position.
func (z *Input) Rewind(pos int) {
z.pos = z.start + pos
}
// Lexeme returns the bytes of the current selection.
func (z *Input) Lexeme() []byte {
return z.buf[z.start:z.pos:z.pos]
}
// Skip collapses the position to the end of the selection.
func (z *Input) Skip() {
z.start = z.pos
}
// Shift returns the bytes of the current selection and collapses the position to the end of the selection.
func (z *Input) Shift() []byte {
b := z.buf[z.start:z.pos:z.pos]
z.start = z.pos
return b
}
// Offset returns the character position in the buffez.
func (z *Input) Offset() int {
return z.pos
}
// Bytes returns the underlying buffez.
func (z *Input) Bytes() []byte {
return z.buf[: len(z.buf)-1 : len(z.buf)-1]
}
// Len returns the length of the underlying buffez.
func (z *Input) Len() int {
return len(z.buf) - 1
}
// Reset resets position to the underlying buffez.
func (z *Input) Reset() {
z.start = 0
z.pos = 0
}
package js
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/tdewolff/parse/v2"
)
var ErrInvalidJSON = fmt.Errorf("invalid JSON")
type JSONer interface {
JSON(io.Writer) error
}
// AST is the full ECMAScript abstract syntax tree.
type AST struct {
BlockStmt // module
}
func (ast AST) String() string {
s := ""
for i, item := range ast.BlockStmt.List {
if i != 0 {
s += " "
}
s += item.String()
}
return s
}
// JS writes JavaScript to writer.
func (ast AST) JS(w io.Writer) {
for i, item := range ast.List {
if i != 0 {
w.Write([]byte("\n"))
}
item.JS(w)
if _, ok := item.(*VarDecl); ok {
w.Write([]byte(";"))
}
}
}
// JSONString returns a string of JavaScript.
func (ast AST) JSString() string {
sb := strings.Builder{}
ast.JS(&sb)
return sb.String()
}
// JSON writes JSON to writer.
func (ast AST) JSON(w io.Writer) error {
if 1 < len(ast.List) {
return fmt.Errorf("%v: JS must be a single statement", ErrInvalidJSON)
} else if len(ast.List) == 0 {
return nil
}
exprStmt, ok := ast.List[0].(*ExprStmt)
if !ok {
return fmt.Errorf("%v: JS must be an expression statement", ErrInvalidJSON)
}
expr := exprStmt.Value
if group, ok := expr.(*GroupExpr); ok {
expr = group.X // allow parsing expr contained in group expr
}
if val, ok := expr.(JSONer); !ok {
return fmt.Errorf("%v: JS must be a valid JSON expression", ErrInvalidJSON)
} else {
return val.JSON(w)
}
return nil
}
// JSONString returns a string of JSON if valid.
func (ast AST) JSONString() (string, error) {
sb := strings.Builder{}
err := ast.JSON(&sb)
return sb.String(), err
}
////////////////////////////////////////////////////////////////
// DeclType specifies the kind of declaration.
type DeclType uint16
// DeclType values.
const (
NoDecl DeclType = iota // undeclared variables
VariableDecl // var
FunctionDecl // function
ArgumentDecl // function and method arguments
LexicalDecl // let, const, class
CatchDecl // catch statement argument
ExprDecl // function expression name or class expression name
)
func (decl DeclType) String() string {
switch decl {
case NoDecl:
return "NoDecl"
case VariableDecl:
return "VariableDecl"
case FunctionDecl:
return "FunctionDecl"
case ArgumentDecl:
return "ArgumentDecl"
case LexicalDecl:
return "LexicalDecl"
case CatchDecl:
return "CatchDecl"
case ExprDecl:
return "ExprDecl"
}
return "Invalid(" + strconv.Itoa(int(decl)) + ")"
}
// Var is a variable, where Decl is the type of declaration and can be var|function for function scoped variables, let|const|class for block scoped variables.
type Var struct {
Data []byte
Link *Var // is set when merging variable uses, as in: {a} {var a} where the first links to the second, only used for undeclared variables
Uses uint16
Decl DeclType
}
// Name returns the variable name.
func (v *Var) Name() []byte {
for v.Link != nil {
v = v.Link
}
return v.Data
}
func (v *Var) Info() string {
s := fmt.Sprintf("%p type=%s name='%s' uses=%d", v, v.Decl, string(v.Data), v.Uses)
links := 0
for v.Link != nil {
v = v.Link
links++
}
if 0 < links {
s += fmt.Sprintf(" links=%d => %p", links, v)
}
return s
}
func (v Var) String() string {
return string(v.Name())
}
// JS writes JavaScript to writer.
func (v Var) JS(w io.Writer) {
w.Write(v.Name())
}
// VarsByUses is sortable by uses in descending order.
type VarsByUses VarArray
func (vs VarsByUses) Len() int {
return len(vs)
}
func (vs VarsByUses) Swap(i, j int) {
vs[i], vs[j] = vs[j], vs[i]
}
func (vs VarsByUses) Less(i, j int) bool {
return vs[i].Uses > vs[j].Uses
}
////////////////////////////////////////////////////////////////
// VarArray is a set of variables in scopes.
type VarArray []*Var
func (vs VarArray) String() string {
s := "["
for i, v := range vs {
if i != 0 {
s += ", "
}
links := 0
for v.Link != nil {
v = v.Link
links++
}
s += fmt.Sprintf("Var{%v %s %v %v}", v.Decl, string(v.Data), links, v.Uses)
}
return s + "]"
}
// Scope is a function or block scope with a list of variables declared and used.
type Scope struct {
Parent, Func *Scope // Parent is nil for global scope
Declared VarArray // Link in Var are always nil
Undeclared VarArray
VarDecls []*VarDecl
NumForDecls uint16 // offset into Declared to mark variables used in for statements
NumFuncArgs uint16 // offset into Declared to mark variables used in function arguments
NumArgUses uint16 // offset into Undeclared to mark variables used in arguments
IsGlobalOrFunc bool
HasWith bool
}
func (s Scope) String() string {
return "Scope{Declared: " + s.Declared.String() + ", Undeclared: " + s.Undeclared.String() + "}"
}
// Declare declares a new variable.
func (s *Scope) Declare(decl DeclType, name []byte) (*Var, bool) {
// refer to new variable for previously undeclared symbols in the current and lower scopes
// this happens in `{ a = 5; } var a` where both a's refer to the same variable
curScope := s
if decl == VariableDecl || decl == FunctionDecl {
// find function scope for var and function declarations
for s != s.Func {
// make sure that `{let i;{var i}}` is an error
if v := s.findDeclared(name, false); v != nil && v.Decl != decl && v.Decl != CatchDecl {
return nil, false
}
s = s.Parent
}
}
if v := s.findDeclared(name, true); v != nil {
// variable already declared, might be an error or a duplicate declaration
if (ArgumentDecl < v.Decl || FunctionDecl < decl) && v.Decl != ExprDecl {
// only allow (v.Decl,decl) of: (var|function|argument,var|function), (expr,*), any other combination is a syntax error
return nil, false
}
if v.Decl == ExprDecl {
v.Decl = decl
}
v.Uses++
for s != curScope {
curScope.AddUndeclared(v) // add variable declaration as used variable to the current scope
curScope = curScope.Parent
}
return v, true
}
var v *Var
// reuse variable if previously used, as in: a;var a
if decl != ArgumentDecl { // in case of function f(a=b,b), where the first b is different from the second
for i, uv := range s.Undeclared[s.NumArgUses:] {
// no need to evaluate v.Link as v.Data stays the same and Link is nil in the active scope
if 0 < uv.Uses && uv.Decl == NoDecl && bytes.Equal(name, uv.Data) {
// must be NoDecl so that it can't be a var declaration that has been added
v = uv
s.Undeclared = append(s.Undeclared[:int(s.NumArgUses)+i], s.Undeclared[int(s.NumArgUses)+i+1:]...)
break
}
}
}
if v == nil {
// add variable to the context list and to the scope
v = &Var{name, nil, 0, decl}
} else {
v.Decl = decl
}
v.Uses++
s.Declared = append(s.Declared, v)
for s != curScope {
curScope.AddUndeclared(v) // add variable declaration as used variable to the current scope
curScope = curScope.Parent
}
return v, true
}
// Use increments the usage of a variable.
func (s *Scope) Use(name []byte) *Var {
// check if variable is declared in the current scope
v := s.findDeclared(name, false)
if v == nil {
// check if variable is already used before in the current or lower scopes
v = s.findUndeclared(name)
if v == nil {
// add variable to the context list and to the scope's undeclared
v = &Var{name, nil, 0, NoDecl}
s.Undeclared = append(s.Undeclared, v)
}
}
v.Uses++
return v
}
// findDeclared finds a declared variable in the current scope.
func (s *Scope) findDeclared(name []byte, skipForDeclared bool) *Var {
start := 0
if skipForDeclared {
// we skip the for initializer for declarations (only has effect for let/const)
start = int(s.NumForDecls)
}
// reverse order to find the inner let first in `for(let a in []){let a; {a}}`
for i := len(s.Declared) - 1; start <= i; i-- {
v := s.Declared[i]
// no need to evaluate v.Link as v.Data stays the same, and Link is always nil in Declared
if bytes.Equal(name, v.Data) {
return v
}
}
return nil
}
// findUndeclared finds an undeclared variable in the current and contained scopes.
func (s *Scope) findUndeclared(name []byte) *Var {
for _, v := range s.Undeclared {
// no need to evaluate v.Link as v.Data stays the same and Link is nil in the active scope
if 0 < v.Uses && bytes.Equal(name, v.Data) {
return v
}
}
return nil
}
// add undeclared variable to scope, this is called for the block scope when declaring a var in it
func (s *Scope) AddUndeclared(v *Var) {
// don't add undeclared symbol if it's already there
for _, vorig := range s.Undeclared {
if v == vorig {
return
}
}
s.Undeclared = append(s.Undeclared, v) // add variable declaration as used variable to the current scope
}
// MarkForStmt marks the declared variables in current scope as for statement initializer to distinguish from declarations in body.
func (s *Scope) MarkForStmt() {
s.NumForDecls = uint16(len(s.Declared))
s.NumArgUses = uint16(len(s.Undeclared)) // ensures for different b's in for(var a in b){let b}
}
// MarkFuncArgs marks the declared/undeclared variables in the current scope as function arguments.
func (s *Scope) MarkFuncArgs() {
s.NumFuncArgs = uint16(len(s.Declared))
s.NumArgUses = uint16(len(s.Undeclared)) // ensures different b's in `function f(a=b){var b}`.
}
// HoistUndeclared copies all undeclared variables of the current scope to the parent scope.
func (s *Scope) HoistUndeclared() {
for i, vorig := range s.Undeclared {
// no need to evaluate vorig.Link as vorig.Data stays the same
if 0 < vorig.Uses && vorig.Decl == NoDecl {
if v := s.Parent.findDeclared(vorig.Data, false); v != nil {
// check if variable is declared in parent scope
v.Uses += vorig.Uses
vorig.Link = v
s.Undeclared[i] = v // point reference to existing var (to avoid many Link chains)
} else if v := s.Parent.findUndeclared(vorig.Data); v != nil {
// check if variable is already used before in parent scope
v.Uses += vorig.Uses
vorig.Link = v
s.Undeclared[i] = v // point reference to existing var (to avoid many Link chains)
} else {
// add variable to the context list and to the scope's undeclared
s.Parent.Undeclared = append(s.Parent.Undeclared, vorig)
}
}
}
}
// UndeclareScope undeclares all declared variables in the current scope and adds them to the parent scope.
// Called when possible arrow func ends up being a parenthesized expression, scope is not further used.
func (s *Scope) UndeclareScope() {
// look if the variable already exists in the parent scope, if so replace the Var pointer in original use
for _, vorig := range s.Declared {
// no need to evaluate vorig.Link as vorig.Data stays the same, and Link is always nil in Declared
// vorig.Uses will be atleast 1
if v := s.Parent.findDeclared(vorig.Data, false); v != nil {
// check if variable has been declared in this scope
v.Uses += vorig.Uses
vorig.Link = v
} else if v := s.Parent.findUndeclared(vorig.Data); v != nil {
// check if variable is already used before in the current or lower scopes
v.Uses += vorig.Uses
vorig.Link = v
} else {
// add variable to the context list and to the scope's undeclared
vorig.Decl = NoDecl
s.Parent.Undeclared = append(s.Parent.Undeclared, vorig)
}
}
s.Declared = s.Declared[:0]
s.Undeclared = s.Undeclared[:0]
}
// Unscope moves all declared variables of the current scope to the parent scope. Undeclared variables are already in the parent scope.
func (s *Scope) Unscope() {
for _, vorig := range s.Declared {
// no need to evaluate vorig.Link as vorig.Data stays the same, and Link is always nil in Declared
// vorig.Uses will be atleast 1
s.Parent.Declared = append(s.Parent.Declared, vorig)
}
s.Declared = s.Declared[:0]
s.Undeclared = s.Undeclared[:0]
}
////////////////////////////////////////////////////////////////
// INode is an interface for AST nodes
type INode interface {
String() string
JS(io.Writer)
}
// IStmt is a dummy interface for statements.
type IStmt interface {
INode
stmtNode()
}
// IBinding is a dummy interface for bindings.
type IBinding interface {
INode
bindingNode()
}
// IExpr is a dummy interface for expressions.
type IExpr interface {
INode
exprNode()
}
////////////////////////////////////////////////////////////////
// Comment block or line, usually a bang comment.
type Comment struct {
Value []byte
}
func (n Comment) String() string {
return "Stmt(" + string(n.Value) + ")"
}
// JS writes JavaScript to writer.
func (n Comment) JS(w io.Writer) {
if wi, ok := w.(parse.Indenter); ok {
wi.Writer.Write(n.Value)
} else {
w.Write(n.Value)
}
}
// BlockStmt is a block statement.
type BlockStmt struct {
List []IStmt
Scope
}
func (n BlockStmt) String() string {
s := "Stmt({"
for _, item := range n.List {
s += " " + item.String()
}
return s + " })"
}
// JS writes JavaScript to writer.
func (n BlockStmt) JS(w io.Writer) {
if len(n.List) == 0 {
w.Write([]byte("{}"))
return
}
w.Write([]byte("{"))
wi := parse.NewIndenter(w, 4)
for _, item := range n.List {
wi.Write([]byte("\n"))
item.JS(wi)
if _, ok := item.(*VarDecl); ok {
w.Write([]byte(";"))
}
}
w.Write([]byte("\n}"))
}
// EmptyStmt is an empty statement.
type EmptyStmt struct{}
func (n EmptyStmt) String() string {
return "Stmt()"
}
// JS writes JavaScript to writer.
func (n EmptyStmt) JS(w io.Writer) {
w.Write([]byte(";"))
}
// ExprStmt is an expression statement.
type ExprStmt struct {
Value IExpr
}
func (n ExprStmt) String() string {
val := n.Value.String()
if val[0] == '(' && val[len(val)-1] == ')' {
return "Stmt" + n.Value.String()
}
return "Stmt(" + n.Value.String() + ")"
}
// JS writes JavaScript to writer.
func (n ExprStmt) JS(w io.Writer) {
buf := &bytes.Buffer{}
wb := io.Writer(buf)
if wi, ok := w.(parse.Indenter); ok {
// make sure that buf is indenter if w is so as well
// this is to prevent newlines in literals from indenting
wb = parse.NewIndenter(wb, wi.Indent())
w = wi.Writer
}
n.Value.JS(wb)
expr := buf.Bytes()
group := bytes.HasPrefix(expr, []byte("let "))
if group {
w.Write([]byte("("))
}
w.Write(expr)
if group {
w.Write([]byte(")"))
}
w.Write([]byte(";"))
}
// IfStmt is an if statement.
type IfStmt struct {
Cond IExpr
Body IStmt
Else IStmt // can be nil
}
func (n IfStmt) String() string {
s := "Stmt(if " + n.Cond.String() + " " + n.Body.String()
if n.Else != nil {
s += " else " + n.Else.String()
}
return s + ")"
}
// JS writes JavaScript to writer.
func (n IfStmt) JS(w io.Writer) {
w.Write([]byte("if ("))
n.Cond.JS(w)
w.Write([]byte(")"))
if _, ok := n.Body.(*EmptyStmt); !ok {
w.Write([]byte(" "))
}
n.Body.JS(w)
if _, ok := n.Body.(*VarDecl); ok {
w.Write([]byte(";"))
}
if n.Else != nil {
w.Write([]byte(" else"))
if _, ok := n.Else.(*EmptyStmt); !ok {
w.Write([]byte(" "))
}
n.Else.JS(w)
if _, ok := n.Else.(*VarDecl); ok {
w.Write([]byte(";"))
}
}
}
// DoWhileStmt is a do-while iteration statement.
type DoWhileStmt struct {
Cond IExpr
Body IStmt
}
func (n DoWhileStmt) String() string {
return "Stmt(do " + n.Body.String() + " while " + n.Cond.String() + ")"
}
// JS writes JavaScript to writer.
func (n DoWhileStmt) JS(w io.Writer) {
w.Write([]byte("do"))
if _, ok := n.Body.(*EmptyStmt); !ok {
w.Write([]byte(" "))
}
n.Body.JS(w)
if _, ok := n.Body.(*VarDecl); ok {
w.Write([]byte("; "))
} else if _, ok := n.Body.(*Comment); !ok {
w.Write([]byte(" "))
}
w.Write([]byte("while ("))
n.Cond.JS(w)
w.Write([]byte(");"))
}
// WhileStmt is a while iteration statement.
type WhileStmt struct {
Cond IExpr
Body IStmt
}
func (n WhileStmt) String() string {
return "Stmt(while " + n.Cond.String() + " " + n.Body.String() + ")"
}
// JS writes JavaScript to writer.
func (n WhileStmt) JS(w io.Writer) {
w.Write([]byte("while ("))
n.Cond.JS(w)
w.Write([]byte(")"))
if _, ok := n.Body.(*EmptyStmt); ok {
w.Write([]byte(";"))
return
}
w.Write([]byte(" "))
n.Body.JS(w)
if _, ok := n.Body.(*VarDecl); ok {
w.Write([]byte(";"))
}
}
// ForStmt is a regular for iteration statement.
type ForStmt struct {
Init IExpr // can be nil
Cond IExpr // can be nil
Post IExpr // can be nil
Body *BlockStmt
}
func (n ForStmt) String() string {
s := "Stmt(for"
if v, ok := n.Init.(*VarDecl); !ok && n.Init != nil || ok && len(v.List) != 0 {
s += " " + n.Init.String()
}
s += " ;"
if n.Cond != nil {
s += " " + n.Cond.String()
}
s += " ;"
if n.Post != nil {
s += " " + n.Post.String()
}
return s + " " + n.Body.String() + ")"
}
// JS writes JavaScript to writer.
func (n ForStmt) JS(w io.Writer) {
w.Write([]byte("for ("))
if v, ok := n.Init.(*VarDecl); !ok && n.Init != nil || ok && len(v.List) != 0 {
n.Init.JS(w)
} else {
w.Write([]byte(" "))
}
w.Write([]byte("; "))
if n.Cond != nil {
n.Cond.JS(w)
}
w.Write([]byte("; "))
if n.Post != nil {
n.Post.JS(w)
}
w.Write([]byte(") "))
n.Body.JS(w)
}
// ForInStmt is a for-in iteration statement.
type ForInStmt struct {
Init IExpr
Value IExpr
Body *BlockStmt
}
func (n ForInStmt) String() string {
return "Stmt(for " + n.Init.String() + " in " + n.Value.String() + " " + n.Body.String() + ")"
}
// JS writes JavaScript to writer.
func (n ForInStmt) JS(w io.Writer) {
w.Write([]byte("for ("))
n.Init.JS(w)
w.Write([]byte(" in "))
n.Value.JS(w)
w.Write([]byte(") "))
n.Body.JS(w)
}
// ForOfStmt is a for-of iteration statement.
type ForOfStmt struct {
Await bool
Init IExpr
Value IExpr
Body *BlockStmt
}
func (n ForOfStmt) String() string {
s := "Stmt(for"
if n.Await {
s += " await"
}
return s + " " + n.Init.String() + " of " + n.Value.String() + " " + n.Body.String() + ")"
}
// JS writes JavaScript to writer.
func (n ForOfStmt) JS(w io.Writer) {
w.Write([]byte("for"))
if n.Await {
w.Write([]byte(" await"))
}
w.Write([]byte(" ("))
n.Init.JS(w)
w.Write([]byte(" of "))
n.Value.JS(w)
w.Write([]byte(") "))
n.Body.JS(w)
}
// CaseClause is a case clause or default clause for a switch statement.
type CaseClause struct {
TokenType
Cond IExpr // can be nil
List []IStmt
}
func (n CaseClause) String() string {
s := " Clause(" + n.TokenType.String()
if n.Cond != nil {
s += " " + n.Cond.String()
}
for _, item := range n.List {
s += " " + item.String()
}
return s + ")"
}
// JS writes JavaScript to writer.
func (n CaseClause) JS(w io.Writer) {
if n.Cond != nil {
w.Write([]byte("case "))
n.Cond.JS(w)
} else {
w.Write([]byte("default"))
}
w.Write([]byte(":"))
wi := parse.NewIndenter(w, 4)
for _, item := range n.List {
wi.Write([]byte("\n"))
item.JS(wi)
if _, ok := item.(*VarDecl); ok {
w.Write([]byte(";"))
}
}
}
// SwitchStmt is a switch statement.
type SwitchStmt struct {
Init IExpr
List []CaseClause
Scope
}
func (n SwitchStmt) String() string {
s := "Stmt(switch " + n.Init.String()
for _, clause := range n.List {
s += clause.String()
}
return s + ")"
}
// JS writes JavaScript to writer.
func (n SwitchStmt) JS(w io.Writer) {
w.Write([]byte("switch ("))
n.Init.JS(w)
if len(n.List) == 0 {
w.Write([]byte(") {}"))
return
}
w.Write([]byte(") {"))
for _, clause := range n.List {
w.Write([]byte("\n"))
clause.JS(w)
}
w.Write([]byte("\n}"))
}
// BranchStmt is a continue or break statement.
type BranchStmt struct {
Type TokenType
Label []byte // can be nil
}
func (n BranchStmt) String() string {
s := "Stmt(" + n.Type.String()
if n.Label != nil {
s += " " + string(n.Label)
}
return s + ")"
}
// JS writes JavaScript to writer.
func (n BranchStmt) JS(w io.Writer) {
w.Write(n.Type.Bytes())
if n.Label != nil {
w.Write([]byte(" "))
w.Write(n.Label)
}
w.Write([]byte(";"))
}
// ReturnStmt is a return statement.
type ReturnStmt struct {
Value IExpr // can be nil
}
func (n ReturnStmt) String() string {
s := "Stmt(return"
if n.Value != nil {
s += " " + n.Value.String()
}
return s + ")"
}
// JS writes JavaScript to writer.
func (n ReturnStmt) JS(w io.Writer) {
w.Write([]byte("return"))
if n.Value != nil {
w.Write([]byte(" "))
n.Value.JS(w)
}
w.Write([]byte(";"))
}
// WithStmt is a with statement.
type WithStmt struct {
Cond IExpr
Body IStmt
}
func (n WithStmt) String() string {
return "Stmt(with " + n.Cond.String() + " " + n.Body.String() + ")"
}
// JS writes JavaScript to writer.
func (n WithStmt) JS(w io.Writer) {
w.Write([]byte("with ("))
n.Cond.JS(w)
w.Write([]byte(")"))
if _, ok := n.Body.(*EmptyStmt); !ok {
w.Write([]byte(" "))
}
n.Body.JS(w)
if _, ok := n.Body.(*VarDecl); ok {
w.Write([]byte(";"))
}
}
// LabelledStmt is a labelled statement.
type LabelledStmt struct {
Label []byte
Value IStmt
}
func (n LabelledStmt) String() string {
return "Stmt(" + string(n.Label) + " : " + n.Value.String() + ")"
}
// JS writes JavaScript to writer.
func (n LabelledStmt) JS(w io.Writer) {
w.Write(n.Label)
w.Write([]byte(":"))
if _, ok := n.Value.(*EmptyStmt); !ok {
w.Write([]byte(" "))
}
n.Value.JS(w)
if _, ok := n.Value.(*VarDecl); ok {
w.Write([]byte(";"))
}
}
// ThrowStmt is a throw statement.
type ThrowStmt struct {
Value IExpr
}
func (n ThrowStmt) String() string {
return "Stmt(throw " + n.Value.String() + ")"
}
// JS writes JavaScript to writer.
func (n ThrowStmt) JS(w io.Writer) {
w.Write([]byte("throw "))
n.Value.JS(w)
w.Write([]byte(";"))
}
// TryStmt is a try statement.
type TryStmt struct {
Body *BlockStmt
Binding IBinding // can be nil
Catch *BlockStmt // can be nil
Finally *BlockStmt // can be nil
}
func (n TryStmt) String() string {
s := "Stmt(try " + n.Body.String()
if n.Catch != nil {
s += " catch"
if n.Binding != nil {
s += " Binding(" + n.Binding.String() + ")"
}
s += " " + n.Catch.String()
}
if n.Finally != nil {
s += " finally " + n.Finally.String()
}
return s + ")"
}
// JS writes JavaScript to writer.
func (n TryStmt) JS(w io.Writer) {
w.Write([]byte("try "))
n.Body.JS(w)
if n.Catch != nil {
w.Write([]byte(" catch"))
if n.Binding != nil {
w.Write([]byte("("))
n.Binding.JS(w)
w.Write([]byte(")"))
}
w.Write([]byte(" "))
n.Catch.JS(w)
}
if n.Finally != nil {
w.Write([]byte(" finally "))
n.Finally.JS(w)
}
}
// DebuggerStmt is a debugger statement.
type DebuggerStmt struct{}
func (n DebuggerStmt) String() string {
return "Stmt(debugger)"
}
// JS writes JavaScript to writer.
func (n DebuggerStmt) JS(w io.Writer) {
w.Write([]byte("debugger;"))
}
// Alias is a name space import or import/export specifier for import/export statements.
type Alias struct {
Name []byte // can be nil
Binding []byte // can be nil
}
func (alias Alias) String() string {
s := ""
if alias.Name != nil {
s += string(alias.Name) + " as "
}
return s + string(alias.Binding)
}
// JS writes JavaScript to writer.
func (alias Alias) JS(w io.Writer) {
if alias.Name != nil {
w.Write(alias.Name)
w.Write([]byte(" as "))
}
w.Write(alias.Binding)
}
// ImportStmt is an import statement.
type ImportStmt struct {
List []Alias
Default []byte // can be nil
Module []byte
}
func (n ImportStmt) String() string {
s := "Stmt(import"
if n.Default != nil {
s += " " + string(n.Default)
if n.List != nil {
s += " ,"
}
}
if len(n.List) == 1 && len(n.List[0].Name) == 1 && n.List[0].Name[0] == '*' {
s += " " + n.List[0].String()
} else if n.List != nil {
s += " {"
for i, item := range n.List {
if i != 0 {
s += " ,"
}
if item.Binding != nil {
s += " " + item.String()
}
}
s += " }"
}
if n.Default != nil || n.List != nil {
s += " from"
}
return s + " " + string(n.Module) + ")"
}
// JS writes JavaScript to writer.
func (n ImportStmt) JS(w io.Writer) {
if wi, ok := w.(parse.Indenter); ok {
w = wi.Writer
}
w.Write([]byte("import"))
if n.Default != nil {
w.Write([]byte(" "))
w.Write(n.Default)
if n.List != nil {
w.Write([]byte(","))
}
}
if len(n.List) == 1 && len(n.List[0].Name) == 1 && n.List[0].Name[0] == '*' {
w.Write([]byte(" "))
n.List[0].JS(w)
} else if n.List != nil {
if len(n.List) == 0 {
w.Write([]byte(" {}"))
} else {
w.Write([]byte(" {"))
for j, item := range n.List {
if j != 0 {
w.Write([]byte(","))
}
if item.Binding != nil {
w.Write([]byte(" "))
item.JS(w)
}
}
w.Write([]byte(" }"))
}
}
if n.Default != nil || n.List != nil {
w.Write([]byte(" from"))
}
w.Write([]byte(" "))
w.Write(n.Module)
w.Write([]byte(";"))
}
// ExportStmt is an export statement.
type ExportStmt struct {
List []Alias
Module []byte // can be nil
Default bool
Decl IExpr
}
func (n ExportStmt) String() string {
s := "Stmt(export"
if n.Decl != nil {
if n.Default {
s += " default"
}
return s + " " + n.Decl.String() + ")"
} else if len(n.List) == 1 && (len(n.List[0].Name) == 1 && n.List[0].Name[0] == '*' || n.List[0].Name == nil && len(n.List[0].Binding) == 1 && n.List[0].Binding[0] == '*') {
s += " " + n.List[0].String()
} else if 0 < len(n.List) {
s += " {"
for i, item := range n.List {
if i != 0 {
s += " ,"
}
if item.Binding != nil {
s += " " + item.String()
}
}
s += " }"
}
if n.Module != nil {
s += " from " + string(n.Module)
}
return s + ")"
}
// JS writes JavaScript to writer.
func (n ExportStmt) JS(w io.Writer) {
if wi, ok := w.(parse.Indenter); ok {
w = wi.Writer
}
w.Write([]byte("export"))
if n.Decl != nil {
if n.Default {
w.Write([]byte(" default"))
}
w.Write([]byte(" "))
n.Decl.JS(w)
w.Write([]byte(";"))
return
} else if len(n.List) == 1 && (len(n.List[0].Name) == 1 && n.List[0].Name[0] == '*' || n.List[0].Name == nil && len(n.List[0].Binding) == 1 && n.List[0].Binding[0] == '*') {
w.Write([]byte(" "))
n.List[0].JS(w)
} else if len(n.List) == 0 {
w.Write([]byte(" {}"))
} else {
w.Write([]byte(" {"))
for j, item := range n.List {
if j != 0 {
w.Write([]byte(","))
}
if item.Binding != nil {
w.Write([]byte(" "))
item.JS(w)
}
}
w.Write([]byte(" }"))
}
if n.Module != nil {
w.Write([]byte(" from "))
w.Write(n.Module)
}
w.Write([]byte(";"))
}
// DirectivePrologueStmt is a string literal at the beginning of a function or module (usually "use strict").
type DirectivePrologueStmt struct {
Value []byte
}
func (n DirectivePrologueStmt) String() string {
return "Stmt(" + string(n.Value) + ")"
}
// JS writes JavaScript to writer.
func (n DirectivePrologueStmt) JS(w io.Writer) {
if wi, ok := w.(parse.Indenter); ok {
w = wi.Writer
}
w.Write(n.Value)
w.Write([]byte(";"))
}
func (n Comment) stmtNode() {}
func (n BlockStmt) stmtNode() {}
func (n EmptyStmt) stmtNode() {}
func (n ExprStmt) stmtNode() {}
func (n IfStmt) stmtNode() {}
func (n DoWhileStmt) stmtNode() {}
func (n WhileStmt) stmtNode() {}
func (n ForStmt) stmtNode() {}
func (n ForInStmt) stmtNode() {}
func (n ForOfStmt) stmtNode() {}
func (n SwitchStmt) stmtNode() {}
func (n BranchStmt) stmtNode() {}
func (n ReturnStmt) stmtNode() {}
func (n WithStmt) stmtNode() {}
func (n LabelledStmt) stmtNode() {}
func (n ThrowStmt) stmtNode() {}
func (n TryStmt) stmtNode() {}
func (n DebuggerStmt) stmtNode() {}
func (n ImportStmt) stmtNode() {}
func (n ExportStmt) stmtNode() {}
func (n DirectivePrologueStmt) stmtNode() {}
////////////////////////////////////////////////////////////////
// PropertyName is a property name for binding properties, method names, and in object literals.
type PropertyName struct {
Literal LiteralExpr
Computed IExpr // can be nil
}
// IsSet returns true is PropertyName is not nil.
func (n PropertyName) IsSet() bool {
return n.IsComputed() || n.Literal.TokenType != ErrorToken
}
// IsComputed returns true if PropertyName is computed.
func (n PropertyName) IsComputed() bool {
return n.Computed != nil
}
// IsIdent returns true if PropertyName equals the given identifier name.
func (n PropertyName) IsIdent(data []byte) bool {
return !n.IsComputed() && n.Literal.TokenType == IdentifierToken && bytes.Equal(data, n.Literal.Data)
}
func (n PropertyName) String() string {
if n.Computed != nil {
val := n.Computed.String()
if val[0] == '(' {
return "[" + val[1:len(val)-1] + "]"
}
return "[" + val + "]"
}
return string(n.Literal.Data)
}
// JS writes JavaScript to writer.
func (n PropertyName) JS(w io.Writer) {
if n.Computed != nil {
w.Write([]byte("["))
n.Computed.JS(w)
w.Write([]byte("]"))
return
}
if wi, ok := w.(parse.Indenter); ok {
w = wi.Writer
}
w.Write(n.Literal.Data)
}
// BindingArray is an array binding pattern.
type BindingArray struct {
List []BindingElement
Rest IBinding // can be nil
}
func (n BindingArray) String() string {
s := "["
for i, item := range n.List {
if i != 0 {
s += ","
}
s += " " + item.String()
}
if n.Rest != nil {
if len(n.List) != 0 {
s += ","
}
s += " ...Binding(" + n.Rest.String() + ")"
} else if 0 < len(n.List) && n.List[len(n.List)-1].Binding == nil {
s += ","
}
return s + " ]"
}
// JS writes JavaScript to writer.
func (n BindingArray) JS(w io.Writer) {
w.Write([]byte("["))
for j, item := range n.List {
if j != 0 {
w.Write([]byte(","))
}
if item.Binding != nil {
if j != 0 {
w.Write([]byte(" "))
}
item.JS(w)
}
}
if n.Rest != nil {
if len(n.List) != 0 {
w.Write([]byte(", "))
}
w.Write([]byte("..."))
n.Rest.JS(w)
} else if 0 < len(n.List) && n.List[len(n.List)-1].Binding == nil {
w.Write([]byte(","))
}
w.Write([]byte("]"))
}
// BindingObjectItem is a binding property.
type BindingObjectItem struct {
Key *PropertyName // can be nil
Value BindingElement
}
func (n BindingObjectItem) String() string {
s := ""
if n.Key != nil {
if v, ok := n.Value.Binding.(*Var); !ok || !n.Key.IsIdent(v.Data) {
s += " " + n.Key.String() + ":"
}
}
return s + " " + n.Value.String()
}
// JS writes JavaScript to writer.
func (n BindingObjectItem) JS(w io.Writer) {
if n.Key != nil {
if v, ok := n.Value.Binding.(*Var); !ok || !n.Key.IsIdent(v.Data) {
n.Key.JS(w)
w.Write([]byte(": "))
}
}
n.Value.JS(w)
}
// BindingObject is an object binding pattern.
type BindingObject struct {
List []BindingObjectItem
Rest *Var // can be nil
}
func (n BindingObject) String() string {
s := "{"
for i, item := range n.List {
if i != 0 {
s += ","
}
s += item.String()
}
if n.Rest != nil {
if len(n.List) != 0 {
s += ","
}
s += " ...Binding(" + string(n.Rest.Data) + ")"
}
return s + " }"
}
// JS writes JavaScript to writer.
func (n BindingObject) JS(w io.Writer) {
w.Write([]byte("{"))
for j, item := range n.List {
if j != 0 {
w.Write([]byte(", "))
}
item.JS(w)
}
if n.Rest != nil {
if len(n.List) != 0 {
w.Write([]byte(", "))
}
w.Write([]byte("..."))
w.Write(n.Rest.Data)
}
w.Write([]byte("}"))
}
// BindingElement is a binding element.
type BindingElement struct {
Binding IBinding // can be nil (in case of ellision)
Default IExpr // can be nil
}
func (n BindingElement) String() string {
if n.Binding == nil {
return "Binding()"
}
s := "Binding(" + n.Binding.String()
if n.Default != nil {
s += " = " + n.Default.String()
}
return s + ")"
}
// JS writes JavaScript to writer.
func (n BindingElement) JS(w io.Writer) {
if n.Binding == nil {
return
}
n.Binding.JS(w)
if n.Default != nil {
w.Write([]byte(" = "))
n.Default.JS(w)
}
}
func (v *Var) bindingNode() {}
func (n BindingArray) bindingNode() {}
func (n BindingObject) bindingNode() {}
////////////////////////////////////////////////////////////////
// VarDecl is a variable statement or lexical declaration.
type VarDecl struct {
TokenType
List []BindingElement
Scope *Scope
InFor, InForInOf bool
}
func (n VarDecl) String() string {
s := "Decl(" + n.TokenType.String()
for _, item := range n.List {
s += " " + item.String()
}
return s + ")"
}
// JS writes JavaScript to writer.
func (n VarDecl) JS(w io.Writer) {
w.Write(n.TokenType.Bytes())
for j, item := range n.List {
if j != 0 {
w.Write([]byte(","))
}
w.Write([]byte(" "))
item.JS(w)
}
}
// Params is a list of parameters for functions, methods, and arrow function.
type Params struct {
List []BindingElement
Rest IBinding // can be nil
}
func (n Params) String() string {
s := "Params("
for i, item := range n.List {
if i != 0 {
s += ", "
}
s += item.String()
}
if n.Rest != nil {
if len(n.List) != 0 {
s += ", "
}
s += "...Binding(" + n.Rest.String() + ")"
}
return s + ")"
}
// JS writes JavaScript to writer.
func (n Params) JS(w io.Writer) {
w.Write([]byte("("))
for j, item := range n.List {
if j != 0 {
w.Write([]byte(", "))
}
item.JS(w)
}
if n.Rest != nil {
if len(n.List) != 0 {
w.Write([]byte(", "))
}
w.Write([]byte("..."))
n.Rest.JS(w)
}
w.Write([]byte(")"))
}
// FuncDecl is an (async) (generator) function declaration or expression.
type FuncDecl struct {
Async bool
Generator bool
Name *Var // can be nil
Params Params
Body BlockStmt
}
func (n FuncDecl) String() string {
s := "Decl("
if n.Async {
s += "async function"
} else {
s += "function"
}
if n.Generator {
s += "*"
}
if n.Name != nil {
s += " " + string(n.Name.Data)
}
return s + " " + n.Params.String() + " " + n.Body.String() + ")"
}
// JS writes JavaScript to writer.
func (n FuncDecl) JS(w io.Writer) {
if n.Async {
w.Write([]byte("async function"))
} else {
w.Write([]byte("function"))
}
if n.Generator {
w.Write([]byte("*"))
}
if n.Name != nil {
w.Write([]byte(" "))
w.Write(n.Name.Data)
}
n.Params.JS(w)
w.Write([]byte(" "))
n.Body.JS(w)
}
// MethodDecl is a method definition in a class declaration.
type MethodDecl struct {
Static bool
Async bool
Generator bool
Get bool
Set bool
Name PropertyName
Params Params
Body BlockStmt
}
func (n MethodDecl) String() string {
s := ""
if n.Static {
s += " static"
}
if n.Async {
s += " async"
}
if n.Generator {
s += " *"
}
if n.Get {
s += " get"
}
if n.Set {
s += " set"
}
s += " " + n.Name.String() + " " + n.Params.String() + " " + n.Body.String()
return "Method(" + s[1:] + ")"
}
// JS writes JavaScript to writer.
func (n MethodDecl) JS(w io.Writer) {
writen := false
if n.Static {
w.Write([]byte("static"))
writen = true
}
if n.Async {
if writen {
w.Write([]byte(" "))
}
w.Write([]byte("async"))
writen = true
}
if n.Generator {
if writen {
w.Write([]byte(" "))
}
w.Write([]byte("*"))
writen = true
}
if n.Get {
if writen {
w.Write([]byte(" "))
}
w.Write([]byte("get"))
writen = true
}
if n.Set {
if writen {
w.Write([]byte(" "))
}
w.Write([]byte("set"))
writen = true
}
if writen {
w.Write([]byte(" "))
}
n.Name.JS(w)
w.Write([]byte(" "))
n.Params.JS(w)
w.Write([]byte(" "))
n.Body.JS(w)
}
// Field is a field definition in a class declaration.
type Field struct {
Static bool
Name PropertyName
Init IExpr
}
func (n Field) String() string {
s := "Field("
if n.Static {
s += "static "
}
s += n.Name.String()
if n.Init != nil {
s += " = " + n.Init.String()
}
return s + ")"
}
// JS writes JavaScript to writer.
func (n Field) JS(w io.Writer) {
if n.Static {
w.Write([]byte("static "))
}
n.Name.JS(w)
if n.Init != nil {
w.Write([]byte(" = "))
n.Init.JS(w)
}
}
// ClassElement is a class element that is either a static block, a field definition, or a class method
type ClassElement struct {
StaticBlock *BlockStmt // can be nil
Method *MethodDecl // can be nil
Field
}
func (n ClassElement) String() string {
if n.StaticBlock != nil {
return "Static(" + n.StaticBlock.String() + ")"
} else if n.Method != nil {
return n.Method.String()
}
return n.Field.String()
}
// JS writes JavaScript to writer.
func (n ClassElement) JS(w io.Writer) {
if n.StaticBlock != nil {
w.Write([]byte("static "))
n.StaticBlock.JS(w)
return
} else if n.Method != nil {
n.Method.JS(w)
return
}
n.Field.JS(w)
w.Write([]byte(";"))
}
// ClassDecl is a class declaration.
type ClassDecl struct {
Name *Var // can be nil
Extends IExpr // can be nil
List []ClassElement
}
func (n ClassDecl) String() string {
s := "Decl(class"
if n.Name != nil {
s += " " + string(n.Name.Data)
}
if n.Extends != nil {
s += " extends " + n.Extends.String()
}
for _, item := range n.List {
s += " " + item.String()
}
return s + ")"
}
// JS writes JavaScript to writer.
func (n ClassDecl) JS(w io.Writer) {
w.Write([]byte("class"))
if n.Name != nil {
w.Write([]byte(" "))
w.Write(n.Name.Data)
}
if n.Extends != nil {
w.Write([]byte(" extends "))
n.Extends.JS(w)
}
if len(n.List) == 0 {
w.Write([]byte(" {}"))
return
}
w.Write([]byte(" {"))
wi := parse.NewIndenter(w, 4)
for _, item := range n.List {
wi.Write([]byte("\n"))
item.JS(wi)
}
w.Write([]byte("\n}"))
}
func (n VarDecl) stmtNode() {}
func (n FuncDecl) stmtNode() {}
func (n ClassDecl) stmtNode() {}
func (n VarDecl) exprNode() {} // not a real IExpr, used for ForInit and ExportDecl
func (n FuncDecl) exprNode() {}
func (n ClassDecl) exprNode() {}
func (n MethodDecl) exprNode() {} // not a real IExpr, used for ObjectExpression PropertyName
////////////////////////////////////////////////////////////////
// LiteralExpr can be this, null, boolean, numeric, string, or regular expression literals.
type LiteralExpr struct {
TokenType
Data []byte
}
func (n LiteralExpr) String() string {
return string(n.Data)
}
// JS writes JavaScript to writer.
func (n LiteralExpr) JS(w io.Writer) {
if wi, ok := w.(parse.Indenter); ok {
w = wi.Writer
}
w.Write(n.Data)
}
// JSON writes JSON to writer.
func (n LiteralExpr) JSON(w io.Writer) error {
if wi, ok := w.(parse.Indenter); ok {
w = wi.Writer
}
if n.TokenType == TrueToken || n.TokenType == FalseToken || n.TokenType == NullToken || n.TokenType == DecimalToken || n.TokenType == IntegerToken {
w.Write(n.Data)
return nil
} else if n.TokenType == StringToken {
data := n.Data
if n.Data[0] == '\'' {
data = parse.Copy(data)
data = bytes.ReplaceAll(data, []byte(`\'`), []byte(`'`))
data = bytes.ReplaceAll(data, []byte(`"`), []byte(`\"`))
data[0] = '"'
data[len(data)-1] = '"'
}
w.Write(data)
return nil
}
js := &strings.Builder{}
n.JS(js)
return fmt.Errorf("%v: literal expression is not valid JSON: %v", ErrInvalidJSON, js.String())
}
// Element is an array literal element.
type Element struct {
Value IExpr // can be nil
Spread bool
}
func (n Element) String() string {
s := ""
if n.Value != nil {
if n.Spread {
s += "..."
}
s += n.Value.String()
}
return s
}
// JS writes JavaScript to writer.
func (n Element) JS(w io.Writer) {
if n.Value != nil {
if n.Spread {
w.Write([]byte("..."))
}
n.Value.JS(w)
}
}
// ArrayExpr is an array literal.
type ArrayExpr struct {
List []Element
}
func (n ArrayExpr) String() string {
s := "["
for i, item := range n.List {
if i != 0 {
s += ", "
}
if item.Value != nil {
if item.Spread {
s += "..."
}
s += item.Value.String()
}
}
if 0 < len(n.List) && n.List[len(n.List)-1].Value == nil {
s += ","
}
return s + "]"
}
// JS writes JavaScript to writer.
func (n ArrayExpr) JS(w io.Writer) {
w.Write([]byte("["))
for j, item := range n.List {
if j != 0 {
w.Write([]byte(", "))
}
if item.Value != nil {
if item.Spread {
w.Write([]byte("..."))
}
item.Value.JS(w)
}
}
if 0 < len(n.List) && n.List[len(n.List)-1].Value == nil {
w.Write([]byte(","))
}
w.Write([]byte("]"))
}
// JSON writes JSON to writer.
func (n ArrayExpr) JSON(w io.Writer) error {
w.Write([]byte("["))
for i, item := range n.List {
if i != 0 {
w.Write([]byte(", "))
}
if item.Value == nil || item.Spread {
js := &strings.Builder{}
n.JS(js)
return fmt.Errorf("%v: array literal is not valid JSON: %v", ErrInvalidJSON, js.String())
}
if val, ok := item.Value.(JSONer); !ok {
js := &strings.Builder{}
item.Value.JS(js)
return fmt.Errorf("%v: value is not valid JSON: %v", ErrInvalidJSON, js.String())
} else if err := val.JSON(w); err != nil {
return err
}
}
w.Write([]byte("]"))
return nil
}
// Property is a property definition in an object literal.
type Property struct {
// either Name or Spread are set. When Spread is set then Value is AssignmentExpression
// if Init is set then Value is IdentifierReference, otherwise it can also be MethodDefinition
Name *PropertyName // can be nil
Spread bool
Value IExpr
Init IExpr // can be nil
}
func (n Property) String() string {
s := ""
if n.Name != nil {
if v, ok := n.Value.(*Var); !ok || !n.Name.IsIdent(v.Data) {
s += n.Name.String() + ": "
}
} else if n.Spread {
s += "..."
}
s += n.Value.String()
if n.Init != nil {
s += " = " + n.Init.String()
}
return s
}
// JS writes JavaScript to writer.
func (n Property) JS(w io.Writer) {
if n.Name != nil {
if v, ok := n.Value.(*Var); !ok || !n.Name.IsIdent(v.Data) {
n.Name.JS(w)
w.Write([]byte(": "))
}
} else if n.Spread {
w.Write([]byte("..."))
}
n.Value.JS(w)
if n.Init != nil {
w.Write([]byte(" = "))
n.Init.JS(w)
}
}
// JSON writes JSON to writer.
func (n Property) JSON(w io.Writer) error {
if n.Name == nil || n.Spread || n.Init != nil {
js := &strings.Builder{}
n.JS(js)
return fmt.Errorf("%v: property is not valid JSON: %v", ErrInvalidJSON, js.String())
} else if n.Name.Literal.TokenType == StringToken {
_ = n.Name.Literal.JSON(w)
} else if n.Name.Literal.TokenType == IdentifierToken || n.Name.Literal.TokenType == IntegerToken || n.Name.Literal.TokenType == DecimalToken {
w.Write([]byte(`"`))
w.Write(n.Name.Literal.Data)
w.Write([]byte(`"`))
} else {
js := &strings.Builder{}
n.JS(js)
return fmt.Errorf("%v: property is not valid JSON: %v", ErrInvalidJSON, js.String())
}
w.Write([]byte(": "))
if val, ok := n.Value.(JSONer); !ok {
js := &strings.Builder{}
n.Value.JS(js)
return fmt.Errorf("%v: value is not valid JSON: %v", ErrInvalidJSON, js.String())
} else if err := val.JSON(w); err != nil {
return err
}
return nil
}
// ObjectExpr is an object literal.
type ObjectExpr struct {
List []Property
}
func (n ObjectExpr) String() string {
s := "{"
for i, item := range n.List {
if i != 0 {
s += ", "
}
s += item.String()
}
return s + "}"
}
// JS writes JavaScript to writer.
func (n ObjectExpr) JS(w io.Writer) {
w.Write([]byte("{"))
for j, item := range n.List {
if j != 0 {
w.Write([]byte(", "))
}
item.JS(w)
}
w.Write([]byte("}"))
}
// JSON writes JSON to writer.
func (n ObjectExpr) JSON(w io.Writer) error {
w.Write([]byte("{"))
for i, item := range n.List {
if i != 0 {
w.Write([]byte(", "))
}
if err := item.JSON(w); err != nil {
return err
}
}
w.Write([]byte("}"))
return nil
}
// TemplatePart is a template head or middle.
type TemplatePart struct {
Value []byte
Expr IExpr
}
func (n TemplatePart) String() string {
return string(n.Value) + n.Expr.String()
}
// JS writes JavaScript to writer.
func (n TemplatePart) JS(w io.Writer) {
w.Write(n.Value)
n.Expr.JS(w)
}
// TemplateExpr is a template literal or member/call expression, super property, or optional chain with template literal.
type TemplateExpr struct {
Tag IExpr // can be nil
List []TemplatePart
Tail []byte
Prec OpPrec
Optional bool
}
func (n TemplateExpr) String() string {
s := ""
if n.Tag != nil {
s += n.Tag.String()
if n.Optional {
s += "?."
}
}
for _, item := range n.List {
s += item.String()
}
return s + string(n.Tail)
}
// JS writes JavaScript to writer.
func (n TemplateExpr) JS(w io.Writer) {
if wi, ok := w.(parse.Indenter); ok {
w = wi.Writer
}
if n.Tag != nil {
n.Tag.JS(w)
if n.Optional {
w.Write([]byte("?."))
}
}
for _, item := range n.List {
item.JS(w)
}
w.Write(n.Tail)
}
// JSON writes JSON to writer.
func (n TemplateExpr) JSON(w io.Writer) error {
if wi, ok := w.(parse.Indenter); ok {
w = wi.Writer
}
if n.Tag != nil || len(n.List) != 0 {
js := &strings.Builder{}
n.JS(js)
return fmt.Errorf("%v: value is not valid JSON: %v", ErrInvalidJSON, js.String())
}
// allow template literal string to be converted to normal string (to allow for minified JS)
data := parse.Copy(n.Tail)
data = bytes.ReplaceAll(data, []byte("\n"), []byte("\\n"))
data = bytes.ReplaceAll(data, []byte("\r"), []byte("\\r"))
data = bytes.ReplaceAll(data, []byte("\\`"), []byte("`"))
data = bytes.ReplaceAll(data, []byte("\\$"), []byte("$"))
data = bytes.ReplaceAll(data, []byte(`"`), []byte(`\"`))
data[0] = '"'
data[len(data)-1] = '"'
w.Write(data)
return nil
}
// GroupExpr is a parenthesized expression.
type GroupExpr struct {
X IExpr
}
func (n GroupExpr) String() string {
return "(" + n.X.String() + ")"
}
// JS writes JavaScript to writer.
func (n GroupExpr) JS(w io.Writer) {
w.Write([]byte("("))
n.X.JS(w)
w.Write([]byte(")"))
}
// IndexExpr is a member/call expression, super property, or optional chain with an index expression.
type IndexExpr struct {
X IExpr
Y IExpr
Prec OpPrec
Optional bool
}
func (n IndexExpr) String() string {
if n.Optional {
return "(" + n.X.String() + "?.[" + n.Y.String() + "])"
}
return "(" + n.X.String() + "[" + n.Y.String() + "])"
}
// JS writes JavaScript to writer.
func (n IndexExpr) JS(w io.Writer) {
n.X.JS(w)
if n.Optional {
w.Write([]byte("?.["))
} else {
w.Write([]byte("["))
}
n.Y.JS(w)
w.Write([]byte("]"))
}
// DotExpr is a member/call expression, super property, or optional chain with a dot expression.
type DotExpr struct {
X IExpr
Y LiteralExpr
Prec OpPrec
Optional bool
}
func (n DotExpr) String() string {
if n.Optional {
return "(" + n.X.String() + "?." + n.Y.String() + ")"
}
return "(" + n.X.String() + "." + n.Y.String() + ")"
}
// JS writes JavaScript to writer.
func (n DotExpr) JS(w io.Writer) {
lit, ok := n.X.(*LiteralExpr)
group := ok && !n.Optional && (lit.TokenType == DecimalToken || lit.TokenType == IntegerToken)
if group {
w.Write([]byte("("))
}
n.X.JS(w)
if n.Optional {
w.Write([]byte("?."))
} else {
if group {
w.Write([]byte(")"))
}
w.Write([]byte("."))
}
n.Y.JS(w)
}
// NewTargetExpr is a new target meta property.
type NewTargetExpr struct{}
func (n NewTargetExpr) String() string {
return "(new.target)"
}
// JS writes JavaScript to writer.
func (n NewTargetExpr) JS(w io.Writer) {
w.Write([]byte("new.target"))
}
// ImportMetaExpr is a import meta meta property.
type ImportMetaExpr struct{}
func (n ImportMetaExpr) String() string {
return "(import.meta)"
}
// JS writes JavaScript to writer.
func (n ImportMetaExpr) JS(w io.Writer) {
w.Write([]byte("import.meta"))
}
type Arg struct {
Value IExpr
Rest bool
}
func (n Arg) String() string {
s := ""
if n.Rest {
s += "..."
}
return s + n.Value.String()
}
// JS writes JavaScript to writer.
func (n Arg) JS(w io.Writer) {
if n.Rest {
w.Write([]byte("..."))
}
n.Value.JS(w)
}
// Args is a list of arguments as used by new and call expressions.
type Args struct {
List []Arg
}
func (n Args) String() string {
s := "("
for i, item := range n.List {
if i != 0 {
s += ", "
}
s += item.String()
}
return s + ")"
}
// JS writes JavaScript to writer.
func (n Args) JS(w io.Writer) {
for j, item := range n.List {
if j != 0 {
w.Write([]byte(", "))
}
item.JS(w)
}
}
// NewExpr is a new expression or new member expression.
type NewExpr struct {
X IExpr
Args *Args // can be nil
}
func (n NewExpr) String() string {
if n.Args != nil {
return "(new " + n.X.String() + n.Args.String() + ")"
}
return "(new " + n.X.String() + ")"
}
// JS writes JavaScript to writer.
func (n NewExpr) JS(w io.Writer) {
w.Write([]byte("new "))
n.X.JS(w)
if n.Args != nil {
w.Write([]byte("("))
n.Args.JS(w)
w.Write([]byte(")"))
} else {
w.Write([]byte("()"))
}
}
// CallExpr is a call expression.
type CallExpr struct {
X IExpr
Args Args
Optional bool
}
func (n CallExpr) String() string {
if n.Optional {
return "(" + n.X.String() + "?." + n.Args.String() + ")"
}
return "(" + n.X.String() + n.Args.String() + ")"
}
// JS writes JavaScript to writer.
func (n CallExpr) JS(w io.Writer) {
n.X.JS(w)
if n.Optional {
w.Write([]byte("?.("))
} else {
w.Write([]byte("("))
}
n.Args.JS(w)
w.Write([]byte(")"))
}
// UnaryExpr is an update or unary expression.
type UnaryExpr struct {
Op TokenType
X IExpr
}
func (n UnaryExpr) String() string {
if n.Op == PostIncrToken || n.Op == PostDecrToken {
return "(" + n.X.String() + n.Op.String() + ")"
} else if IsIdentifierName(n.Op) {
return "(" + n.Op.String() + " " + n.X.String() + ")"
}
return "(" + n.Op.String() + n.X.String() + ")"
}
// JS writes JavaScript to writer.
func (n UnaryExpr) JS(w io.Writer) {
if n.Op == PostIncrToken || n.Op == PostDecrToken {
n.X.JS(w)
w.Write(n.Op.Bytes())
return
} else if unary, ok := n.X.(*UnaryExpr); ok && (n.Op == PosToken && (unary.Op == PreIncrToken || unary.Op == PosToken) || n.Op == NegToken && (unary.Op == PreDecrToken || unary.Op == NegToken)) || IsIdentifierName(n.Op) {
w.Write(n.Op.Bytes())
w.Write([]byte(" "))
n.X.JS(w)
return
}
w.Write(n.Op.Bytes())
n.X.JS(w)
}
// JSON writes JSON to writer.
func (n UnaryExpr) JSON(w io.Writer) error {
if lit, ok := n.X.(*LiteralExpr); ok && n.Op == NegToken && (lit.TokenType == DecimalToken || lit.TokenType == IntegerToken) {
w.Write([]byte("-"))
w.Write(lit.Data)
return nil
} else if n.Op == NotToken && lit.TokenType == IntegerToken && (lit.Data[0] == '0' || lit.Data[0] == '1') {
if lit.Data[0] == '0' {
w.Write([]byte("true"))
} else {
w.Write([]byte("false"))
}
return nil
}
js := &strings.Builder{}
n.JS(js)
return fmt.Errorf("%v: unary expression is not valid JSON: %v", ErrInvalidJSON, js.String())
}
// BinaryExpr is a binary expression.
type BinaryExpr struct {
Op TokenType
X, Y IExpr
}
func (n BinaryExpr) String() string {
if IsIdentifierName(n.Op) {
return "(" + n.X.String() + " " + n.Op.String() + " " + n.Y.String() + ")"
}
return "(" + n.X.String() + n.Op.String() + n.Y.String() + ")"
}
// JS writes JavaScript to writer.
func (n BinaryExpr) JS(w io.Writer) {
n.X.JS(w)
w.Write([]byte(" "))
w.Write(n.Op.Bytes())
w.Write([]byte(" "))
n.Y.JS(w)
}
// CondExpr is a conditional expression.
type CondExpr struct {
Cond, X, Y IExpr
}
func (n CondExpr) String() string {
return "(" + n.Cond.String() + " ? " + n.X.String() + " : " + n.Y.String() + ")"
}
// JS writes JavaScript to writer.
func (n CondExpr) JS(w io.Writer) {
n.Cond.JS(w)
w.Write([]byte(" ? "))
n.X.JS(w)
w.Write([]byte(" : "))
n.Y.JS(w)
}
// YieldExpr is a yield expression.
type YieldExpr struct {
Generator bool
X IExpr // can be nil
}
func (n YieldExpr) String() string {
if n.X == nil {
return "(yield)"
}
s := "(yield"
if n.Generator {
s += "*"
}
return s + " " + n.X.String() + ")"
}
// JS writes JavaScript to writer.
func (n YieldExpr) JS(w io.Writer) {
w.Write([]byte("yield"))
if n.X == nil {
return
}
if n.Generator {
w.Write([]byte("*"))
}
w.Write([]byte(" "))
n.X.JS(w)
}
// ArrowFunc is an (async) arrow function.
type ArrowFunc struct {
Async bool
Params Params
Body BlockStmt
}
func (n ArrowFunc) String() string {
s := "("
if n.Async {
s += "async "
}
return s + n.Params.String() + " => " + n.Body.String() + ")"
}
// JS writes JavaScript to writer.
func (n ArrowFunc) JS(w io.Writer) {
if n.Async {
w.Write([]byte("async "))
}
n.Params.JS(w)
w.Write([]byte(" => "))
n.Body.JS(w)
}
// CommaExpr is a series of comma expressions.
type CommaExpr struct {
List []IExpr
}
func (n CommaExpr) String() string {
s := "("
for i, item := range n.List {
if i != 0 {
s += ","
}
s += item.String()
}
return s + ")"
}
// JS writes JavaScript to writer.
func (n CommaExpr) JS(w io.Writer) {
for j, item := range n.List {
if j != 0 {
w.Write([]byte(","))
}
item.JS(w)
}
}
func (v *Var) exprNode() {}
func (n LiteralExpr) exprNode() {}
func (n ArrayExpr) exprNode() {}
func (n ObjectExpr) exprNode() {}
func (n TemplateExpr) exprNode() {}
func (n GroupExpr) exprNode() {}
func (n DotExpr) exprNode() {}
func (n IndexExpr) exprNode() {}
func (n NewTargetExpr) exprNode() {}
func (n ImportMetaExpr) exprNode() {}
func (n NewExpr) exprNode() {}
func (n CallExpr) exprNode() {}
func (n UnaryExpr) exprNode() {}
func (n BinaryExpr) exprNode() {}
func (n CondExpr) exprNode() {}
func (n YieldExpr) exprNode() {}
func (n ArrowFunc) exprNode() {}
func (n CommaExpr) exprNode() {}
// Package js is an ECMAScript5.1 lexer following the specifications at http://www.ecma-international.org/ecma-262/5.1/.
package js
import (
"unicode"
"unicode/utf8"
"github.com/tdewolff/parse/v2"
)
var identifierStart = []*unicode.RangeTable{unicode.Lu, unicode.Ll, unicode.Lt, unicode.Lm, unicode.Lo, unicode.Nl, unicode.Other_ID_Start}
var identifierContinue = []*unicode.RangeTable{unicode.Lu, unicode.Ll, unicode.Lt, unicode.Lm, unicode.Lo, unicode.Nl, unicode.Mn, unicode.Mc, unicode.Nd, unicode.Pc, unicode.Other_ID_Continue}
// IsIdentifierStart returns true if the byte-slice start is the start of an identifier
func IsIdentifierStart(b []byte) bool {
r, _ := utf8.DecodeRune(b)
return r == '$' || r == '\\' || r == '_' || unicode.IsOneOf(identifierStart, r)
}
// IsIdentifierContinue returns true if the byte-slice start is a continuation of an identifier
func IsIdentifierContinue(b []byte) bool {
r, _ := utf8.DecodeRune(b)
return r == '$' || r == '\\' || r == '\u200C' || r == '\u200D' || unicode.IsOneOf(identifierContinue, r)
}
// IsIdentifierEnd returns true if the byte-slice end is a start or continuation of an identifier
func IsIdentifierEnd(b []byte) bool {
r, _ := utf8.DecodeLastRune(b)
return r == '$' || r == '\\' || r == '\u200C' || r == '\u200D' || unicode.IsOneOf(identifierContinue, r)
}
////////////////////////////////////////////////////////////////
// Lexer is the state for the lexer.
type Lexer struct {
r *parse.Input
err error
prevLineTerminator bool
prevNumericLiteral bool
level int
templateLevels []int
}
// NewLexer returns a new Lexer for a given io.Reader.
func NewLexer(r *parse.Input) *Lexer {
return &Lexer{
r: r,
prevLineTerminator: true,
level: 0,
templateLevels: []int{},
}
}
// Err returns the error encountered during lexing, this is often io.EOF but also other errors can be returned.
func (l *Lexer) Err() error {
if l.err != nil {
return l.err
}
return l.r.Err()
}
// RegExp reparses the input stream for a regular expression. It is assumed that we just received DivToken or DivEqToken with Next(). This function will go back and read that as a regular expression.
func (l *Lexer) RegExp() (TokenType, []byte) {
if 0 < l.r.Offset() && l.r.Peek(-1) == '/' {
l.r.Move(-1)
} else if 1 < l.r.Offset() && l.r.Peek(-1) == '=' && l.r.Peek(-2) == '/' {
l.r.Move(-2)
} else {
l.err = parse.NewErrorLexer(l.r, "expected / or /=")
return ErrorToken, nil
}
l.r.Skip() // trick to set start = pos
if l.consumeRegExpToken() {
return RegExpToken, l.r.Shift()
}
l.err = parse.NewErrorLexer(l.r, "unexpected EOF or newline")
return ErrorToken, nil
}
// Next returns the next Token. It returns ErrorToken when an error was encountered. Using Err() one can retrieve the error message.
func (l *Lexer) Next() (TokenType, []byte) {
l.err = nil // clear error from previous ErrorToken
prevLineTerminator := l.prevLineTerminator
l.prevLineTerminator = false
prevNumericLiteral := l.prevNumericLiteral
l.prevNumericLiteral = false
// study on 50x jQuery shows:
// spaces: 20k
// alpha: 16k
// newlines: 14.4k
// operators: 4k
// numbers and dot: 3.6k
// (): 3.4k
// {}: 1.8k
// []: 0.9k
// "': 1k
// semicolon: 2.4k
// colon: 0.8k
// comma: 2.4k
// slash: 1.4k
// `~: almost 0
c := l.r.Peek(0)
switch c {
case ' ', '\t', '\v', '\f':
l.r.Move(1)
for l.consumeWhitespace() {
}
l.prevLineTerminator = prevLineTerminator
return WhitespaceToken, l.r.Shift()
case '\n', '\r':
l.r.Move(1)
for l.consumeLineTerminator() {
}
l.prevLineTerminator = true
return LineTerminatorToken, l.r.Shift()
case '>', '=', '!', '+', '*', '%', '&', '|', '^', '~', '?':
if tt := l.consumeOperatorToken(); tt != ErrorToken {
return tt, l.r.Shift()
}
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
if tt := l.consumeNumericToken(); tt != ErrorToken || l.r.Pos() != 0 {
l.prevNumericLiteral = true
return tt, l.r.Shift()
} else if c == '.' {
l.r.Move(1)
if l.r.Peek(0) == '.' && l.r.Peek(1) == '.' {
l.r.Move(2)
return EllipsisToken, l.r.Shift()
}
return DotToken, l.r.Shift()
}
case ',':
l.r.Move(1)
return CommaToken, l.r.Shift()
case ';':
l.r.Move(1)
return SemicolonToken, l.r.Shift()
case '(':
l.level++
l.r.Move(1)
return OpenParenToken, l.r.Shift()
case ')':
l.level--
l.r.Move(1)
return CloseParenToken, l.r.Shift()
case '/':
if tt := l.consumeCommentToken(); tt != ErrorToken || l.err != nil {
if l.err != nil {
return ErrorToken, nil
}
return tt, l.r.Shift()
} else if tt := l.consumeOperatorToken(); tt != ErrorToken {
return tt, l.r.Shift()
}
case '{':
l.level++
l.r.Move(1)
return OpenBraceToken, l.r.Shift()
case '}':
l.level--
if len(l.templateLevels) != 0 && l.level == l.templateLevels[len(l.templateLevels)-1] {
return l.consumeTemplateToken(), l.r.Shift()
}
l.r.Move(1)
return CloseBraceToken, l.r.Shift()
case ':':
l.r.Move(1)
return ColonToken, l.r.Shift()
case '\'', '"':
return l.consumeStringToken(), l.r.Shift()
case ']':
l.r.Move(1)
return CloseBracketToken, l.r.Shift()
case '[':
l.r.Move(1)
return OpenBracketToken, l.r.Shift()
case '<', '-':
if l.consumeHTMLLikeCommentToken(prevLineTerminator) {
return CommentToken, l.r.Shift()
} else if tt := l.consumeOperatorToken(); tt != ErrorToken {
return tt, l.r.Shift()
}
case '`':
l.templateLevels = append(l.templateLevels, l.level)
return l.consumeTemplateToken(), l.r.Shift()
case '#':
l.r.Move(1)
if l.consumeIdentifierToken() {
return PrivateIdentifierToken, l.r.Shift()
}
default:
if l.consumeIdentifierToken() {
if prevNumericLiteral {
l.err = parse.NewErrorLexer(l.r, "unexpected identifier after number")
return ErrorToken, nil
} else if keyword, ok := Keywords[string(l.r.Lexeme())]; ok {
return keyword, l.r.Shift()
}
return IdentifierToken, l.r.Shift()
}
if 0xC0 <= c {
if l.consumeWhitespace() {
for l.consumeWhitespace() {
}
l.prevLineTerminator = prevLineTerminator
return WhitespaceToken, l.r.Shift()
} else if l.consumeLineTerminator() {
for l.consumeLineTerminator() {
}
l.prevLineTerminator = true
return LineTerminatorToken, l.r.Shift()
}
} else if c == 0 && l.r.Err() != nil {
return ErrorToken, nil
}
}
r, _ := l.r.PeekRune(0)
l.err = parse.NewErrorLexer(l.r, "unexpected %s", parse.Printable(r))
l.r.MoveRune() // allow to continue after error
return ErrorToken, l.r.Shift()
}
////////////////////////////////////////////////////////////////
/*
The following functions follow the specifications at http://www.ecma-international.org/ecma-262/5.1/
*/
func (l *Lexer) consumeWhitespace() bool {
c := l.r.Peek(0)
if c == ' ' || c == '\t' || c == '\v' || c == '\f' {
l.r.Move(1)
return true
} else if 0xC0 <= c {
if r, n := l.r.PeekRune(0); r == '\u00A0' || r == '\uFEFF' || unicode.Is(unicode.Zs, r) {
l.r.Move(n)
return true
}
}
return false
}
func (l *Lexer) isLineTerminator() bool {
c := l.r.Peek(0)
if c == '\n' || c == '\r' {
return true
} else if c == 0xE2 && l.r.Peek(1) == 0x80 && (l.r.Peek(2) == 0xA8 || l.r.Peek(2) == 0xA9) {
return true
}
return false
}
func (l *Lexer) consumeLineTerminator() bool {
c := l.r.Peek(0)
if c == '\n' {
l.r.Move(1)
return true
} else if c == '\r' {
if l.r.Peek(1) == '\n' {
l.r.Move(2)
} else {
l.r.Move(1)
}
return true
} else if c == 0xE2 && l.r.Peek(1) == 0x80 && (l.r.Peek(2) == 0xA8 || l.r.Peek(2) == 0xA9) {
l.r.Move(3)
return true
}
return false
}
func (l *Lexer) consumeDigit() bool {
if c := l.r.Peek(0); c >= '0' && c <= '9' {
l.r.Move(1)
return true
}
return false
}
func (l *Lexer) consumeHexDigit() bool {
if c := l.r.Peek(0); (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
l.r.Move(1)
return true
}
return false
}
func (l *Lexer) consumeBinaryDigit() bool {
if c := l.r.Peek(0); c == '0' || c == '1' {
l.r.Move(1)
return true
}
return false
}
func (l *Lexer) consumeOctalDigit() bool {
if c := l.r.Peek(0); c >= '0' && c <= '7' {
l.r.Move(1)
return true
}
return false
}
func (l *Lexer) consumeUnicodeEscape() bool {
if l.r.Peek(0) != '\\' || l.r.Peek(1) != 'u' {
return false
}
mark := l.r.Pos()
l.r.Move(2)
if c := l.r.Peek(0); c == '{' {
l.r.Move(1)
if l.consumeHexDigit() {
for l.consumeHexDigit() {
}
if c := l.r.Peek(0); c == '}' {
l.r.Move(1)
return true
}
}
l.r.Rewind(mark)
return false
} else if !l.consumeHexDigit() || !l.consumeHexDigit() || !l.consumeHexDigit() || !l.consumeHexDigit() {
l.r.Rewind(mark)
return false
}
return true
}
func (l *Lexer) consumeSingleLineComment() {
for {
c := l.r.Peek(0)
if c == '\r' || c == '\n' || c == 0 && l.r.Err() != nil {
break
} else if 0xC0 <= c {
if r, _ := l.r.PeekRune(0); r == '\u2028' || r == '\u2029' {
break
}
}
l.r.Move(1)
}
}
////////////////////////////////////////////////////////////////
func (l *Lexer) consumeHTMLLikeCommentToken(prevLineTerminator bool) bool {
c := l.r.Peek(0)
if c == '<' && l.r.Peek(1) == '!' && l.r.Peek(2) == '-' && l.r.Peek(3) == '-' {
// opening HTML-style single line comment
l.r.Move(4)
l.consumeSingleLineComment()
return true
} else if prevLineTerminator && c == '-' && l.r.Peek(1) == '-' && l.r.Peek(2) == '>' {
// closing HTML-style single line comment
// (only if current line didn't contain any meaningful tokens)
l.r.Move(3)
l.consumeSingleLineComment()
return true
}
return false
}
func (l *Lexer) consumeCommentToken() TokenType {
c := l.r.Peek(1)
if c == '/' {
// single line comment
l.r.Move(2)
l.consumeSingleLineComment()
return CommentToken
} else if c == '*' {
l.r.Move(2)
tt := CommentToken
for {
c := l.r.Peek(0)
if c == '*' && l.r.Peek(1) == '/' {
l.r.Move(2)
break
} else if c == 0 && l.r.Err() != nil {
l.err = parse.NewErrorLexer(l.r, "unexpected EOF in comment")
return ErrorToken
} else if l.consumeLineTerminator() {
l.prevLineTerminator = true
tt = CommentLineTerminatorToken
} else {
l.r.Move(1)
}
}
return tt
}
return ErrorToken
}
var opTokens = map[byte]TokenType{
'=': EqToken,
'!': NotToken,
'<': LtToken,
'>': GtToken,
'+': AddToken,
'-': SubToken,
'*': MulToken,
'/': DivToken,
'%': ModToken,
'&': BitAndToken,
'|': BitOrToken,
'^': BitXorToken,
'~': BitNotToken,
'?': QuestionToken,
}
var opEqTokens = map[byte]TokenType{
'=': EqEqToken,
'!': NotEqToken,
'<': LtEqToken,
'>': GtEqToken,
'+': AddEqToken,
'-': SubEqToken,
'*': MulEqToken,
'/': DivEqToken,
'%': ModEqToken,
'&': BitAndEqToken,
'|': BitOrEqToken,
'^': BitXorEqToken,
}
var opOpTokens = map[byte]TokenType{
'<': LtLtToken,
'+': IncrToken,
'-': DecrToken,
'*': ExpToken,
'&': AndToken,
'|': OrToken,
'?': NullishToken,
}
var opOpEqTokens = map[byte]TokenType{
'<': LtLtEqToken,
'*': ExpEqToken,
'&': AndEqToken,
'|': OrEqToken,
'?': NullishEqToken,
}
func (l *Lexer) consumeOperatorToken() TokenType {
c := l.r.Peek(0)
l.r.Move(1)
if l.r.Peek(0) == '=' {
l.r.Move(1)
if l.r.Peek(0) == '=' && (c == '!' || c == '=') {
l.r.Move(1)
if c == '!' {
return NotEqEqToken
}
return EqEqEqToken
}
return opEqTokens[c]
} else if l.r.Peek(0) == c && (c == '+' || c == '-' || c == '*' || c == '&' || c == '|' || c == '?' || c == '<') {
l.r.Move(1)
if l.r.Peek(0) == '=' && c != '+' && c != '-' {
l.r.Move(1)
return opOpEqTokens[c]
}
return opOpTokens[c]
} else if c == '?' && l.r.Peek(0) == '.' && (l.r.Peek(1) < '0' || l.r.Peek(1) > '9') {
l.r.Move(1)
return OptChainToken
} else if c == '=' && l.r.Peek(0) == '>' {
l.r.Move(1)
return ArrowToken
} else if c == '>' && l.r.Peek(0) == '>' {
l.r.Move(1)
if l.r.Peek(0) == '>' {
l.r.Move(1)
if l.r.Peek(0) == '=' {
l.r.Move(1)
return GtGtGtEqToken
}
return GtGtGtToken
} else if l.r.Peek(0) == '=' {
l.r.Move(1)
return GtGtEqToken
}
return GtGtToken
}
return opTokens[c]
}
func (l *Lexer) consumeIdentifierToken() bool {
c := l.r.Peek(0)
if identifierStartTable[c] {
l.r.Move(1)
} else if 0xC0 <= c {
if r, n := l.r.PeekRune(0); unicode.IsOneOf(identifierStart, r) {
l.r.Move(n)
} else {
return false
}
} else if !l.consumeUnicodeEscape() {
return false
}
for {
c := l.r.Peek(0)
if identifierTable[c] {
l.r.Move(1)
} else if 0xC0 <= c {
if r, n := l.r.PeekRune(0); r == '\u200C' || r == '\u200D' || unicode.IsOneOf(identifierContinue, r) {
l.r.Move(n)
} else {
break
}
} else if !l.consumeUnicodeEscape() {
break
}
}
return true
}
func (l *Lexer) consumeNumericSeparator(f func() bool) bool {
if l.r.Peek(0) != '_' {
return false
}
l.r.Move(1)
if !f() {
l.r.Move(-1)
return false
}
return true
}
func (l *Lexer) consumeNumericToken() TokenType {
// assume to be on 0 1 2 3 4 5 6 7 8 9 .
first := l.r.Peek(0)
if first == '0' {
l.r.Move(1)
if l.r.Peek(0) == 'x' || l.r.Peek(0) == 'X' {
l.r.Move(1)
if l.consumeHexDigit() {
for l.consumeHexDigit() || l.consumeNumericSeparator(l.consumeHexDigit) {
}
if l.r.Peek(0) == 'n' {
l.r.Move(1)
}
return HexadecimalToken
}
l.r.Move(-1)
return IntegerToken
} else if l.r.Peek(0) == 'b' || l.r.Peek(0) == 'B' {
l.r.Move(1)
if l.consumeBinaryDigit() {
for l.consumeBinaryDigit() || l.consumeNumericSeparator(l.consumeBinaryDigit) {
}
if l.r.Peek(0) == 'n' {
l.r.Move(1)
}
return BinaryToken
}
l.r.Move(-1)
return IntegerToken
} else if l.r.Peek(0) == 'o' || l.r.Peek(0) == 'O' {
l.r.Move(1)
if l.consumeOctalDigit() {
for l.consumeOctalDigit() || l.consumeNumericSeparator(l.consumeOctalDigit) {
}
if l.r.Peek(0) == 'n' {
l.r.Move(1)
}
return OctalToken
}
l.r.Move(-1)
return IntegerToken
} else if l.r.Peek(0) == 'n' {
l.r.Move(1)
return IntegerToken
} else if '0' <= l.r.Peek(0) && l.r.Peek(0) <= '9' {
l.err = parse.NewErrorLexer(l.r, "legacy octal numbers are not supported")
return ErrorToken
}
} else if first != '.' {
for l.consumeDigit() || l.consumeNumericSeparator(l.consumeDigit) {
}
}
// we have parsed a 0 or an integer number
c := l.r.Peek(0)
if c == '.' {
l.r.Move(1)
if l.consumeDigit() {
for l.consumeDigit() || l.consumeNumericSeparator(l.consumeDigit) {
}
c = l.r.Peek(0)
} else if first == '.' {
// number starts with a dot and must be followed by digits
l.r.Move(-1)
return ErrorToken // may be dot or ellipsis
} else {
c = l.r.Peek(0)
}
} else if c == 'n' {
l.r.Move(1)
return IntegerToken
} else if c != 'e' && c != 'E' {
return IntegerToken
}
if c == 'e' || c == 'E' {
l.r.Move(1)
c = l.r.Peek(0)
if c == '+' || c == '-' {
l.r.Move(1)
}
if !l.consumeDigit() {
l.err = parse.NewErrorLexer(l.r, "invalid number")
return ErrorToken
}
for l.consumeDigit() || l.consumeNumericSeparator(l.consumeDigit) {
}
}
return DecimalToken
}
func (l *Lexer) consumeStringToken() TokenType {
// assume to be on ' or "
delim := l.r.Peek(0)
l.r.Move(1)
for {
c := l.r.Peek(0)
if c == delim {
l.r.Move(1)
break
} else if c == '\\' {
l.r.Move(1)
if !l.consumeLineTerminator() {
if c := l.r.Peek(0); c == delim || c == '\\' {
l.r.Move(1)
}
}
continue
} else if c == '\n' || c == '\r' || c == 0 && l.r.Err() != nil {
l.err = parse.NewErrorLexer(l.r, "unterminated string literal")
return ErrorToken
}
l.r.Move(1)
}
return StringToken
}
func (l *Lexer) consumeRegExpToken() bool {
// assume to be on /
l.r.Move(1)
inClass := false
for {
c := l.r.Peek(0)
if !inClass && c == '/' {
l.r.Move(1)
break
} else if c == '[' {
inClass = true
} else if c == ']' {
inClass = false
} else if c == '\\' {
l.r.Move(1)
if l.isLineTerminator() || l.r.Peek(0) == 0 && l.r.Err() != nil {
return false
}
} else if l.isLineTerminator() || c == 0 && l.r.Err() != nil {
return false
}
l.r.Move(1)
}
// flags
for {
c := l.r.Peek(0)
if identifierTable[c] {
l.r.Move(1)
} else if 0xC0 <= c {
if r, n := l.r.PeekRune(0); r == '\u200C' || r == '\u200D' || unicode.IsOneOf(identifierContinue, r) {
l.r.Move(n)
} else {
break
}
} else {
break
}
}
return true
}
func (l *Lexer) consumeTemplateToken() TokenType {
// assume to be on ` or } when already within template
continuation := l.r.Peek(0) == '}'
l.r.Move(1)
for {
c := l.r.Peek(0)
if c == '`' {
l.templateLevels = l.templateLevels[:len(l.templateLevels)-1]
l.r.Move(1)
if continuation {
return TemplateEndToken
}
return TemplateToken
} else if c == '$' && l.r.Peek(1) == '{' {
l.level++
l.r.Move(2)
if continuation {
return TemplateMiddleToken
}
return TemplateStartToken
} else if c == '\\' {
l.r.Move(1)
if c := l.r.Peek(0); c != 0 {
l.r.Move(1)
}
continue
} else if c == 0 && l.r.Err() != nil {
l.err = parse.NewErrorLexer(l.r, "unterminated template literal")
return ErrorToken
}
l.r.Move(1)
}
}
var identifierStartTable = [256]bool{
// ASCII
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, true, false, false, false, // $
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, true, true, true, true, true, true, true, // A, B, C, D, E, F, G
true, true, true, true, true, true, true, true, // H, I, J, K, L, M, N, O
true, true, true, true, true, true, true, true, // P, Q, R, S, T, U, V, W
true, true, true, false, false, false, false, true, // X, Y, Z, _
false, true, true, true, true, true, true, true, // a, b, c, d, e, f, g
true, true, true, true, true, true, true, true, // h, i, j, k, l, m, n, o
true, true, true, true, true, true, true, true, // p, q, r, s, t, u, v, w
true, true, true, false, false, false, false, false, // x, y, z
// non-ASCII
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
}
var identifierTable = [256]bool{
// ASCII
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, true, false, false, false, // $
false, false, false, false, false, false, false, false,
true, true, true, true, true, true, true, true, // 0, 1, 2, 3, 4, 5, 6, 7
true, true, false, false, false, false, false, false, // 8, 9
false, true, true, true, true, true, true, true, // A, B, C, D, E, F, G
true, true, true, true, true, true, true, true, // H, I, J, K, L, M, N, O
true, true, true, true, true, true, true, true, // P, Q, R, S, T, U, V, W
true, true, true, false, false, false, false, true, // X, Y, Z, _
false, true, true, true, true, true, true, true, // a, b, c, d, e, f, g
true, true, true, true, true, true, true, true, // h, i, j, k, l, m, n, o
true, true, true, true, true, true, true, true, // p, q, r, s, t, u, v, w
true, true, true, false, false, false, false, false, // x, y, z
// non-ASCII
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
}
package js
import (
"bytes"
"errors"
"fmt"
"io"
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/parse/v2/buffer"
)
type Options struct {
WhileToFor bool
Inline bool
}
// Parser is the state for the parser.
type Parser struct {
l *Lexer
o Options
err error
data []byte
tt TokenType
prevLT bool
in, await, yield, deflt, retrn bool
assumeArrowFunc bool
allowDirectivePrologue bool
comments []IStmt
stmtLevel int
exprLevel int
scope *Scope
}
// Parse returns a JS AST tree of.
func Parse(r *parse.Input, o Options) (*AST, error) {
ast := &AST{}
p := &Parser{
l: NewLexer(r),
o: o,
tt: WhitespaceToken, // trick so that next() works
in: true,
await: true,
}
if o.Inline {
p.next()
p.retrn = true
p.allowDirectivePrologue = true
p.enterScope(&ast.BlockStmt.Scope, true)
for {
if p.tt == ErrorToken {
break
}
ast.BlockStmt.List = append(ast.BlockStmt.List, p.parseStmt(true))
}
} else {
// catch shebang in first line
var shebang []byte
if r.Peek(0) == '#' && r.Peek(1) == '!' {
r.Move(2)
p.l.consumeSingleLineComment() // consume till end-of-line
shebang = r.Shift()
}
// parse JS module
p.next()
ast.BlockStmt = p.parseModule()
if 0 < len(shebang) {
ast.BlockStmt.List = append([]IStmt{&Comment{shebang}}, ast.BlockStmt.List...)
}
}
if p.err != nil {
offset := p.l.r.Offset() - len(p.data)
return nil, parse.NewError(buffer.NewReader(p.l.r.Bytes()), offset, p.err.Error())
} else if p.l.Err() != nil && p.l.Err() != io.EOF {
return nil, p.l.Err()
}
return ast, nil
}
////////////////////////////////////////////////////////////////
func (p *Parser) next() {
p.prevLT = false
p.tt, p.data = p.l.Next()
Loop:
for {
switch p.tt {
case WhitespaceToken:
// no-op
case LineTerminatorToken:
p.prevLT = true
case CommentToken, CommentLineTerminatorToken:
if 2 < len(p.data) && p.data[2] == '!' {
p.comments = append(p.comments, &Comment{p.data})
}
if p.tt == CommentLineTerminatorToken {
p.prevLT = true
}
default:
break Loop
}
p.tt, p.data = p.l.Next()
}
}
func (p *Parser) failMessage(msg string, args ...interface{}) {
if p.err == nil {
p.err = fmt.Errorf(msg, args...)
p.tt = ErrorToken
}
}
func (p *Parser) fail(in string, expected ...TokenType) {
if p.err == nil {
msg := "unexpected"
if 0 < len(expected) {
msg = "expected"
for i, tt := range expected[:len(expected)-1] {
if 0 < i {
msg += ","
}
msg += " " + tt.String() + ""
}
if 2 < len(expected) {
msg += ", or"
} else if 1 < len(expected) {
msg += " or"
}
msg += " " + expected[len(expected)-1].String() + " instead of"
}
if p.tt == ErrorToken {
if p.l.Err() == io.EOF {
msg += " EOF"
} else if lexerErr, ok := p.l.Err().(*parse.Error); ok {
msg = lexerErr.Message
} else {
// does not happen
}
} else {
msg += " " + string(p.data) + ""
}
if in != "" {
msg += " in " + in
}
p.err = errors.New(msg)
p.tt = ErrorToken
}
}
func (p *Parser) consume(in string, tt TokenType) bool {
if p.tt != tt {
p.fail(in, tt)
return false
}
p.next()
return true
}
func (p *Parser) enterScope(scope *Scope, isFunc bool) *Scope {
// create a new scope object and add it to the parent
parent := p.scope
p.scope = scope
*scope = Scope{
Parent: parent,
}
if isFunc {
scope.Func = scope
} else if parent != nil {
scope.Func = parent.Func
}
return parent
}
func (p *Parser) exitScope(parent *Scope) {
p.scope.HoistUndeclared()
p.scope = parent
}
func (p *Parser) parseModule() (module BlockStmt) {
p.enterScope(&module.Scope, true)
p.allowDirectivePrologue = true
for {
switch p.tt {
case ErrorToken:
if 0 < len(p.comments) {
module.List = append(p.comments, module.List...)
p.comments = p.comments[:0]
}
return
case ImportToken:
p.next()
if p.tt == OpenParenToken {
// could be an import call expression
left := &LiteralExpr{ImportToken, []byte("import")}
p.exprLevel++
suffix := p.parseExpressionSuffix(left, OpExpr, OpCall)
p.exprLevel--
module.List = append(module.List, &ExprStmt{suffix})
if !p.prevLT && p.tt == SemicolonToken {
p.next()
}
} else {
importStmt := p.parseImportStmt()
module.List = append(module.List, &importStmt)
}
case ExportToken:
exportStmt := p.parseExportStmt()
module.List = append(module.List, &exportStmt)
default:
module.List = append(module.List, p.parseStmt(true))
}
}
}
func (p *Parser) parseStmt(allowDeclaration bool) (stmt IStmt) {
p.stmtLevel++
if 1000 < p.stmtLevel {
p.failMessage("too many nested statements")
return nil
}
allowDirectivePrologue := p.allowDirectivePrologue
p.allowDirectivePrologue = false
switch tt := p.tt; tt {
case OpenBraceToken:
stmt = p.parseBlockStmt("block statement")
case ConstToken, VarToken:
if !allowDeclaration && tt == ConstToken {
p.fail("statement")
return
}
p.next()
varDecl := p.parseVarDecl(tt, true)
stmt = varDecl
if !p.prevLT && p.tt != SemicolonToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
if tt == ConstToken {
p.fail("const declaration")
} else {
p.fail("var statement")
}
return
}
case LetToken:
let := p.data
p.next()
if allowDeclaration && (IsIdentifier(p.tt) || p.tt == YieldToken || p.tt == AwaitToken || p.tt == OpenBracketToken || p.tt == OpenBraceToken) {
stmt = p.parseVarDecl(tt, false)
if !p.prevLT && p.tt != SemicolonToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
p.fail("let declaration")
return
}
} else if p.tt == OpenBracketToken {
p.failMessage("unexpected let [ in single-statement context")
return
} else {
// expression
stmt = &ExprStmt{p.parseIdentifierExpression(OpExpr, let)}
if !p.prevLT && p.tt != SemicolonToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
p.fail("expression")
return
}
}
case IfToken:
p.next()
if !p.consume("if statement", OpenParenToken) {
return
}
cond := p.parseExpression(OpExpr)
if !p.consume("if statement", CloseParenToken) {
return
}
body := p.parseStmt(false)
var elseBody IStmt
if p.tt == ElseToken {
p.next()
elseBody = p.parseStmt(false)
}
stmt = &IfStmt{cond, body, elseBody}
case ContinueToken, BreakToken:
tt := p.tt
p.next()
var label []byte
if !p.prevLT && p.isIdentifierReference(p.tt) {
label = p.data
p.next()
}
stmt = &BranchStmt{tt, label}
case WithToken:
p.next()
if !p.consume("with statement", OpenParenToken) {
return
}
cond := p.parseExpression(OpExpr)
if !p.consume("with statement", CloseParenToken) {
return
}
p.scope.Func.HasWith = true
stmt = &WithStmt{cond, p.parseStmt(false)}
case DoToken:
stmt = &DoWhileStmt{}
p.next()
body := p.parseStmt(false)
if !p.consume("do-while statement", WhileToken) {
return
}
if !p.consume("do-while statement", OpenParenToken) {
return
}
stmt = &DoWhileStmt{p.parseExpression(OpExpr), body}
if !p.consume("do-while statement", CloseParenToken) {
return
}
case WhileToken:
p.next()
if !p.consume("while statement", OpenParenToken) {
return
}
cond := p.parseExpression(OpExpr)
if !p.consume("while statement", CloseParenToken) {
return
}
body := p.parseStmt(false)
if p.o.WhileToFor {
varDecl := &VarDecl{TokenType: VarToken, Scope: p.scope, InFor: true}
p.scope.Func.VarDecls = append(p.scope.Func.VarDecls, varDecl)
block, ok := body.(*BlockStmt)
if !ok {
block = &BlockStmt{List: []IStmt{body}}
}
stmt = &ForStmt{varDecl, cond, nil, block}
} else {
stmt = &WhileStmt{cond, body}
}
case ForToken:
p.next()
await := p.await && p.tt == AwaitToken
if await {
p.next()
}
if !p.consume("for statement", OpenParenToken) {
return
}
body := &BlockStmt{}
parent := p.enterScope(&body.Scope, false)
var init IExpr
p.in = false
if p.tt == VarToken || p.tt == LetToken || p.tt == ConstToken {
tt := p.tt
p.next()
varDecl := p.parseVarDecl(tt, true)
if p.err != nil {
return
} else if p.tt != SemicolonToken && (1 < len(varDecl.List) || varDecl.List[0].Default != nil) {
p.fail("for statement")
return
} else if p.tt == SemicolonToken && varDecl.List[0].Default == nil {
// all but the first item were already verified
if _, ok := varDecl.List[0].Binding.(*Var); !ok {
p.fail("for statement")
return
}
}
init = varDecl
} else if await {
init = p.parseExpression(OpLHS)
} else if p.tt != SemicolonToken {
init = p.parseExpression(OpExpr)
}
p.in = true
isLHSExpr := isLHSExpr(init)
if isLHSExpr && p.tt == InToken {
if await {
p.fail("for statement", OfToken)
return
}
p.next()
value := p.parseExpression(OpExpr)
if !p.consume("for statement", CloseParenToken) {
return
}
p.scope.MarkForStmt()
if p.tt == OpenBraceToken {
body.List = p.parseStmtList("")
} else if p.tt != SemicolonToken {
body.List = []IStmt{p.parseStmt(false)}
} else {
p.next()
}
if varDecl, ok := init.(*VarDecl); ok {
varDecl.InForInOf = true
}
stmt = &ForInStmt{init, value, body}
} else if isLHSExpr && p.tt == OfToken {
p.next()
value := p.parseExpression(OpAssign)
if !p.consume("for statement", CloseParenToken) {
return
}
p.scope.MarkForStmt()
if p.tt == OpenBraceToken {
body.List = p.parseStmtList("")
} else if p.tt != SemicolonToken {
body.List = []IStmt{p.parseStmt(false)}
} else {
p.next()
}
if varDecl, ok := init.(*VarDecl); ok {
varDecl.InForInOf = true
}
stmt = &ForOfStmt{await, init, value, body}
} else if p.tt == SemicolonToken {
var cond, post IExpr
if await {
p.fail("for statement", OfToken)
return
}
p.next()
if p.tt != SemicolonToken {
cond = p.parseExpression(OpExpr)
}
if !p.consume("for statement", SemicolonToken) {
return
}
if p.tt != CloseParenToken {
post = p.parseExpression(OpExpr)
}
if !p.consume("for statement", CloseParenToken) {
return
}
p.scope.MarkForStmt()
if p.tt == OpenBraceToken {
body.List = p.parseStmtList("")
} else if p.tt != SemicolonToken {
body.List = []IStmt{p.parseStmt(false)}
} else {
p.next()
}
if init == nil {
varDecl := &VarDecl{TokenType: VarToken, Scope: p.scope, InFor: true}
p.scope.Func.VarDecls = append(p.scope.Func.VarDecls, varDecl)
init = varDecl
} else if varDecl, ok := init.(*VarDecl); ok {
varDecl.InFor = true
}
stmt = &ForStmt{init, cond, post, body}
} else if isLHSExpr {
p.fail("for statement", InToken, OfToken, SemicolonToken)
return
} else {
p.fail("for statement", SemicolonToken)
return
}
p.exitScope(parent)
case SwitchToken:
p.next()
if !p.consume("switch statement", OpenParenToken) {
return
}
init := p.parseExpression(OpExpr)
if !p.consume("switch statement", CloseParenToken) {
return
}
// case block
if !p.consume("switch statement", OpenBraceToken) {
return
}
switchStmt := &SwitchStmt{Init: init}
parent := p.enterScope(&switchStmt.Scope, false)
for {
if p.tt == ErrorToken {
p.fail("switch statement")
return
} else if p.tt == CloseBraceToken {
p.next()
break
}
clause := p.tt
var list IExpr
if p.tt == CaseToken {
p.next()
list = p.parseExpression(OpExpr)
} else if p.tt == DefaultToken {
p.next()
} else {
p.fail("switch statement", CaseToken, DefaultToken)
return
}
if !p.consume("switch statement", ColonToken) {
return
}
var stmts []IStmt
for p.tt != CaseToken && p.tt != DefaultToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
stmts = append(stmts, p.parseStmt(true))
}
switchStmt.List = append(switchStmt.List, CaseClause{clause, list, stmts})
}
p.exitScope(parent)
stmt = switchStmt
case FunctionToken:
if !allowDeclaration {
p.fail("statement")
return
}
stmt = p.parseFuncDecl()
case AsyncToken: // async function
async := p.data
p.next()
if p.tt == FunctionToken && !p.prevLT {
if !allowDeclaration {
p.fail("statement")
return
}
stmt = p.parseAsyncFuncDecl()
} else {
// expression
stmt = &ExprStmt{p.parseAsyncExpression(OpExpr, async)}
if !p.prevLT && p.tt != SemicolonToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
p.fail("expression")
return
}
}
case ClassToken:
if !allowDeclaration {
p.fail("statement")
return
}
stmt = p.parseClassDecl()
case ThrowToken:
p.next()
if p.prevLT {
p.failMessage("unexpected newline in throw statement")
return
}
stmt = &ThrowStmt{p.parseExpression(OpExpr)}
case TryToken:
p.next()
body := p.parseBlockStmt("try statement")
var binding IBinding
var catch, finally *BlockStmt
if p.tt == CatchToken {
p.next()
catch = &BlockStmt{}
parent := p.enterScope(&catch.Scope, false)
if p.tt == OpenParenToken {
p.next()
binding = p.parseBinding(CatchDecl) // local to block scope of catch
if !p.consume("try-catch statement", CloseParenToken) {
return
}
}
catch.List = p.parseStmtList("try-catch statement")
p.exitScope(parent)
} else if p.tt != FinallyToken {
p.fail("try statement", CatchToken, FinallyToken)
return
}
if p.tt == FinallyToken {
p.next()
finally = p.parseBlockStmt("try-finally statement")
}
stmt = &TryStmt{body, binding, catch, finally}
case DebuggerToken:
stmt = &DebuggerStmt{}
p.next()
case SemicolonToken:
stmt = &EmptyStmt{}
p.next()
case ErrorToken:
stmt = &EmptyStmt{}
return
default:
if p.retrn && p.tt == ReturnToken {
p.next()
var value IExpr
if !p.prevLT && p.tt != SemicolonToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
value = p.parseExpression(OpExpr)
}
stmt = &ReturnStmt{value}
} else if p.isIdentifierReference(p.tt) {
// LabelledStatement, Expression
label := p.data
p.next()
if p.tt == ColonToken {
p.next()
prevDeflt := p.deflt
if p.tt == FunctionToken {
p.deflt = false
}
stmt = &LabelledStmt{label, p.parseStmt(true)} // allows illegal async function, generator function, let, const, or class declarations
p.deflt = prevDeflt
} else {
// expression
stmt = &ExprStmt{p.parseIdentifierExpression(OpExpr, label)}
if !p.prevLT && p.tt != SemicolonToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
p.fail("expression")
return
}
}
} else {
// expression
stmt = &ExprStmt{p.parseExpression(OpExpr)}
if !p.prevLT && p.tt != SemicolonToken && p.tt != CloseBraceToken && p.tt != ErrorToken {
p.fail("expression")
return
} else if lit, ok := stmt.(*ExprStmt).Value.(*LiteralExpr); ok && allowDirectivePrologue && lit.TokenType == StringToken && len(lit.Data) == 12 && bytes.Equal(lit.Data[1:11], []byte("use strict")) {
stmt = &DirectivePrologueStmt{lit.Data}
p.allowDirectivePrologue = true
}
}
}
if !p.prevLT && p.tt == SemicolonToken {
p.next()
}
p.stmtLevel--
return
}
func (p *Parser) parseStmtList(in string) (list []IStmt) {
comments := len(p.comments)
if !p.consume(in, OpenBraceToken) {
return
}
for {
if p.tt == ErrorToken {
p.fail("")
return
} else if p.tt == CloseBraceToken {
p.next()
break
}
list = append(list, p.parseStmt(true))
}
if comments < len(p.comments) {
list2 := make([]IStmt, 0, len(p.comments)-comments+len(list))
list2 = append(list2, p.comments[comments:]...)
list2 = append(list2, list...)
list = list2
p.comments = p.comments[:comments]
}
return
}
func (p *Parser) parseBlockStmt(in string) (blockStmt *BlockStmt) {
blockStmt = &BlockStmt{}
parent := p.enterScope(&blockStmt.Scope, false)
blockStmt.List = p.parseStmtList(in)
p.exitScope(parent)
return
}
func (p *Parser) parseImportStmt() (importStmt ImportStmt) {
// assume we're passed import
if p.tt == StringToken {
importStmt.Module = p.data
p.next()
} else {
expectClause := true
if IsIdentifier(p.tt) || p.tt == YieldToken {
importStmt.Default = p.data
p.next()
expectClause = p.tt == CommaToken
if expectClause {
p.next()
}
}
if expectClause && p.tt == MulToken {
star := p.data
p.next()
if !p.consume("import statement", AsToken) {
return
}
if !IsIdentifier(p.tt) && p.tt != YieldToken {
p.fail("import statement", IdentifierToken)
return
}
importStmt.List = []Alias{Alias{star, p.data}}
p.next()
} else if expectClause && p.tt == OpenBraceToken {
p.next()
importStmt.List = []Alias{}
for IsIdentifierName(p.tt) || p.tt == StringToken {
tt := p.tt
var name, binding []byte = nil, p.data
p.next()
if p.tt == AsToken {
p.next()
if !IsIdentifier(p.tt) && p.tt != YieldToken {
p.fail("import statement", IdentifierToken)
return
}
name = binding
binding = p.data
p.next()
} else if !IsIdentifier(tt) && tt != YieldToken || tt == StringToken {
p.fail("import statement", IdentifierToken, StringToken)
return
}
importStmt.List = append(importStmt.List, Alias{name, binding})
if p.tt == CommaToken {
p.next()
if p.tt == CloseBraceToken {
importStmt.List = append(importStmt.List, Alias{})
break
}
}
}
if !p.consume("import statement", CloseBraceToken) {
return
}
} else if expectClause && importStmt.Default != nil {
p.fail("import statement", MulToken, OpenBraceToken)
return
} else if importStmt.Default == nil {
p.fail("import statement", StringToken, IdentifierToken, MulToken, OpenBraceToken)
return
}
if !p.consume("import statement", FromToken) {
return
}
if p.tt != StringToken {
p.fail("import statement", StringToken)
return
}
importStmt.Module = p.data
p.next()
}
if p.tt == SemicolonToken {
p.next()
}
return
}
func (p *Parser) parseExportStmt() (exportStmt ExportStmt) {
// assume we're at export
p.next()
prevYield, prevAwait, prevDeflt := p.yield, p.await, p.deflt
p.yield, p.await, p.deflt = false, true, true
if p.tt == MulToken || p.tt == OpenBraceToken {
if p.tt == MulToken {
star := p.data
p.next()
if p.tt == AsToken {
p.next()
if !IsIdentifierName(p.tt) && p.tt != StringToken {
p.fail("export statement", IdentifierToken, StringToken)
return
}
exportStmt.List = []Alias{Alias{star, p.data}}
p.next()
} else {
exportStmt.List = []Alias{Alias{nil, star}}
}
if p.tt != FromToken {
p.fail("export statement", FromToken)
return
}
} else {
p.next()
for IsIdentifierName(p.tt) || p.tt == StringToken {
var name, binding []byte = nil, p.data
p.next()
if p.tt == AsToken {
p.next()
if !IsIdentifierName(p.tt) && p.tt != StringToken {
p.fail("export statement", IdentifierToken, StringToken)
return
}
name = binding
binding = p.data
p.next()
}
exportStmt.List = append(exportStmt.List, Alias{name, binding})
if p.tt == CommaToken {
p.next()
if p.tt == CloseBraceToken {
exportStmt.List = append(exportStmt.List, Alias{})
break
}
}
}
if !p.consume("export statement", CloseBraceToken) {
return
}
}
if p.tt == FromToken {
p.next()
if p.tt != StringToken {
p.fail("export statement", StringToken)
return
}
exportStmt.Module = p.data
p.next()
}
} else if p.tt == VarToken || p.tt == ConstToken || p.tt == LetToken {
tt := p.tt
p.next()
exportStmt.Decl = p.parseVarDecl(tt, false)
} else if p.tt == FunctionToken {
exportStmt.Decl = p.parseFuncDecl()
} else if p.tt == AsyncToken { // async function
p.next()
if p.tt != FunctionToken || p.prevLT {
p.fail("export statement", FunctionToken)
return
}
exportStmt.Decl = p.parseAsyncFuncDecl()
} else if p.tt == ClassToken {
exportStmt.Decl = p.parseClassDecl()
} else if p.tt == DefaultToken {
exportStmt.Default = true
p.next()
if p.tt == FunctionToken {
// hoistable declaration
exportStmt.Decl = p.parseFuncDecl()
} else if p.tt == AsyncToken { // async function or async arrow function
async := p.data
p.next()
if p.tt == FunctionToken && !p.prevLT {
// hoistable declaration
exportStmt.Decl = p.parseAsyncFuncDecl()
} else {
// expression
exportStmt.Decl = p.parseAsyncExpression(OpAssign, async)
}
} else if p.tt == ClassToken {
exportStmt.Decl = p.parseClassDecl()
} else {
exportStmt.Decl = p.parseExpression(OpAssign)
}
} else {
p.fail("export statement", MulToken, OpenBraceToken, VarToken, LetToken, ConstToken, FunctionToken, AsyncToken, ClassToken, DefaultToken)
return
}
if p.tt == SemicolonToken {
p.next()
}
p.yield, p.await, p.deflt = prevYield, prevAwait, prevDeflt
return
}
func (p *Parser) parseVarDecl(tt TokenType, canBeHoisted bool) (varDecl *VarDecl) {
// assume we're past var, let or const
varDecl = &VarDecl{
TokenType: tt,
Scope: p.scope,
}
declType := LexicalDecl
if tt == VarToken {
declType = VariableDecl
if canBeHoisted {
p.scope.Func.VarDecls = append(p.scope.Func.VarDecls, varDecl)
}
}
for {
// binding element, var declaration in for-in or for-of can never have a default
var bindingElement BindingElement
bindingElement.Binding = p.parseBinding(declType)
if p.tt == EqToken {
p.next()
bindingElement.Default = p.parseExpression(OpAssign)
} else if _, ok := bindingElement.Binding.(*Var); !ok && (p.in || 0 < len(varDecl.List)) {
p.fail("var statement", EqToken)
return
} else if tt == ConstToken && (p.in || !p.in && p.tt != OfToken && p.tt != InToken) {
p.fail("const statement", EqToken)
}
varDecl.List = append(varDecl.List, bindingElement)
if p.tt == CommaToken {
p.next()
} else {
break
}
}
return
}
func (p *Parser) parseFuncParams(in string) (params Params) {
// FormalParameters
if !p.consume(in, OpenParenToken) {
return
}
for p.tt != CloseParenToken && p.tt != ErrorToken {
if p.tt == EllipsisToken {
// binding rest element
p.next()
params.Rest = p.parseBinding(ArgumentDecl)
p.consume(in, CloseParenToken)
return
}
params.List = append(params.List, p.parseBindingElement(ArgumentDecl))
if p.tt != CommaToken {
break
}
p.next()
}
if p.tt != CloseParenToken {
p.fail(in)
return
}
p.next()
// mark undeclared vars as arguments in `function f(a=b){var b}` where the b's are different vars
p.scope.MarkFuncArgs()
return
}
func (p *Parser) parseFuncDecl() (funcDecl *FuncDecl) {
return p.parseFunc(false, false)
}
func (p *Parser) parseAsyncFuncDecl() (funcDecl *FuncDecl) {
return p.parseFunc(true, false)
}
func (p *Parser) parseFuncExpr() (funcDecl *FuncDecl) {
return p.parseFunc(false, true)
}
func (p *Parser) parseAsyncFuncExpr() (funcDecl *FuncDecl) {
return p.parseFunc(true, true)
}
func (p *Parser) parseFunc(async, expr bool) (funcDecl *FuncDecl) {
// assume we're at function
p.next()
funcDecl = &FuncDecl{}
funcDecl.Async = async
funcDecl.Generator = p.tt == MulToken
if funcDecl.Generator {
p.next()
}
var ok bool
var name []byte
if expr && (IsIdentifier(p.tt) || p.tt == YieldToken || p.tt == AwaitToken) || !expr && p.isIdentifierReference(p.tt) {
name = p.data
if !expr {
funcDecl.Name, ok = p.scope.Declare(FunctionDecl, p.data)
if !ok {
p.failMessage("identifier %s has already been declared", string(p.data))
return
}
}
p.next()
} else if !expr && !p.deflt {
p.fail("function declaration", IdentifierToken)
return
} else if p.tt != OpenParenToken {
p.fail("function declaration", IdentifierToken, OpenParenToken)
return
}
parent := p.enterScope(&funcDecl.Body.Scope, true)
prevAwait, prevYield, prevRetrn := p.await, p.yield, p.retrn
p.await, p.yield, p.retrn = funcDecl.Async, funcDecl.Generator, true
if expr && name != nil {
funcDecl.Name, _ = p.scope.Declare(ExprDecl, name) // cannot fail
}
funcDecl.Params = p.parseFuncParams("function declaration")
prevAllowDirectivePrologue, prevExprLevel := p.allowDirectivePrologue, p.exprLevel
p.allowDirectivePrologue, p.exprLevel = true, 0
funcDecl.Body.List = p.parseStmtList("function declaration")
p.allowDirectivePrologue, p.exprLevel = prevAllowDirectivePrologue, prevExprLevel
p.await, p.yield, p.retrn = prevAwait, prevYield, prevRetrn
p.exitScope(parent)
return
}
func (p *Parser) parseClassDecl() (classDecl *ClassDecl) {
return p.parseAnyClass(false)
}
func (p *Parser) parseClassExpr() (classDecl *ClassDecl) {
return p.parseAnyClass(true)
}
func (p *Parser) parseAnyClass(expr bool) (classDecl *ClassDecl) {
// assume we're at class
p.next()
classDecl = &ClassDecl{}
if IsIdentifier(p.tt) || p.tt == YieldToken || p.tt == AwaitToken {
if !expr {
var ok bool
classDecl.Name, ok = p.scope.Declare(LexicalDecl, p.data)
if !ok {
p.failMessage("identifier %s has already been declared", string(p.data))
return
}
} else {
//classDecl.Name, ok = p.scope.Declare(ExprDecl, p.data) // classes do not register vars
classDecl.Name = &Var{p.data, nil, 1, ExprDecl}
}
p.next()
} else if !expr && !p.deflt {
p.fail("class declaration", IdentifierToken)
return
}
if p.tt == ExtendsToken {
p.next()
classDecl.Extends = p.parseExpression(OpLHS)
}
if !p.consume("class declaration", OpenBraceToken) {
return
}
for {
if p.tt == ErrorToken {
p.fail("class declaration")
return
} else if p.tt == SemicolonToken {
p.next()
continue
} else if p.tt == CloseBraceToken {
p.next()
break
}
classDecl.List = append(classDecl.List, p.parseClassElement())
}
return
}
func (p *Parser) parseClassElement() ClassElement {
method := &MethodDecl{}
var data []byte // either static, async, get, or set
if p.tt == StaticToken {
method.Static = true
data = p.data
p.next()
if p.tt == OpenBraceToken {
prevYield, prevAwait, prevRetrn := p.yield, p.await, p.retrn
p.yield, p.await, p.retrn = false, true, false
elem := ClassElement{StaticBlock: p.parseBlockStmt("class static block")}
p.yield, p.await, p.retrn = prevYield, prevAwait, prevRetrn
return elem
}
}
if p.tt == MulToken {
method.Generator = true
p.next()
} else if p.tt == AsyncToken {
data = p.data
p.next()
if !p.prevLT {
method.Async = true
if p.tt == MulToken {
method.Generator = true
data = nil
p.next()
}
}
} else if p.tt == GetToken {
method.Get = true
data = p.data
p.next()
} else if p.tt == SetToken {
method.Set = true
data = p.data
p.next()
}
isField := false
if data != nil && p.tt == OpenParenToken {
// (static) method name is: static, async, get, or set
method.Name.Literal = LiteralExpr{IdentifierToken, data}
if method.Async || method.Get || method.Set {
method.Async = false
method.Get = false
method.Set = false
} else {
method.Static = false
}
} else if data != nil && (p.tt == EqToken || p.tt == SemicolonToken || p.tt == CloseBraceToken) {
// (static) field name is: static, async, get, or set
method.Name.Literal = LiteralExpr{IdentifierToken, data}
if !method.Async && !method.Get && !method.Set {
method.Static = false
}
isField = true
} else {
if p.tt == PrivateIdentifierToken {
method.Name.Literal = LiteralExpr{p.tt, p.data}
p.next()
} else {
method.Name = p.parsePropertyName("method or field definition")
}
if (data == nil || method.Static) && p.tt != OpenParenToken {
isField = true
}
}
if isField {
var init IExpr
if p.tt == EqToken {
p.next()
init = p.parseExpression(OpAssign)
}
return ClassElement{Field: Field{Static: method.Static, Name: method.Name, Init: init}}
}
parent := p.enterScope(&method.Body.Scope, true)
prevAwait, prevYield, prevRetrn := p.await, p.yield, p.retrn
p.await, p.yield, p.retrn = method.Async, method.Generator, true
method.Params = p.parseFuncParams("method definition")
prevAllowDirectivePrologue, prevExprLevel := p.allowDirectivePrologue, p.exprLevel
p.allowDirectivePrologue, p.exprLevel = true, 0
method.Body.List = p.parseStmtList("method function")
p.allowDirectivePrologue, p.exprLevel = prevAllowDirectivePrologue, prevExprLevel
p.await, p.yield, p.retrn = prevAwait, prevYield, prevRetrn
p.exitScope(parent)
return ClassElement{Method: method}
}
func (p *Parser) parsePropertyName(in string) (propertyName PropertyName) {
if IsIdentifierName(p.tt) {
propertyName.Literal = LiteralExpr{IdentifierToken, p.data}
p.next()
} else if p.tt == StringToken {
// reinterpret string as identifier or number if we can, except for empty strings
if isIdent := AsIdentifierName(p.data[1 : len(p.data)-1]); isIdent {
propertyName.Literal = LiteralExpr{IdentifierToken, p.data[1 : len(p.data)-1]}
} else if isNum := AsDecimalLiteral(p.data[1 : len(p.data)-1]); isNum {
propertyName.Literal = LiteralExpr{DecimalToken, p.data[1 : len(p.data)-1]}
} else {
propertyName.Literal = LiteralExpr{p.tt, p.data}
}
p.next()
} else if IsNumeric(p.tt) {
propertyName.Literal = LiteralExpr{p.tt, p.data}
p.next()
} else if p.tt == OpenBracketToken {
p.next()
propertyName.Computed = p.parseExpression(OpAssign)
if !p.consume(in, CloseBracketToken) {
return
}
} else {
p.fail(in, IdentifierToken, StringToken, NumericToken, OpenBracketToken)
return
}
return
}
func (p *Parser) parseBindingElement(decl DeclType) (bindingElement BindingElement) {
// BindingElement
bindingElement.Binding = p.parseBinding(decl)
if p.tt == EqToken {
p.next()
bindingElement.Default = p.parseExpression(OpAssign)
}
return
}
func (p *Parser) parseBinding(decl DeclType) (binding IBinding) {
// BindingIdentifier, BindingPattern
if p.isIdentifierReference(p.tt) {
var ok bool
binding, ok = p.scope.Declare(decl, p.data)
if !ok {
p.failMessage("identifier %s has already been declared", string(p.data))
return
}
p.next()
} else if p.tt == OpenBracketToken {
p.next()
array := BindingArray{}
if p.tt == CommaToken {
array.List = append(array.List, BindingElement{})
}
last := 0
for p.tt != CloseBracketToken {
// elision
for p.tt == CommaToken {
p.next()
if p.tt == CommaToken {
array.List = append(array.List, BindingElement{})
}
}
// binding rest element
if p.tt == EllipsisToken {
p.next()
array.Rest = p.parseBinding(decl)
if p.tt != CloseBracketToken {
p.fail("array binding pattern", CloseBracketToken)
return
}
break
} else if p.tt == CloseBracketToken {
array.List = array.List[:last]
break
}
array.List = append(array.List, p.parseBindingElement(decl))
last = len(array.List)
if p.tt != CommaToken && p.tt != CloseBracketToken {
p.fail("array binding pattern", CommaToken, CloseBracketToken)
return
}
}
p.next() // always CloseBracketToken
binding = &array
} else if p.tt == OpenBraceToken {
p.next()
object := BindingObject{}
for p.tt != CloseBraceToken {
// binding rest property
if p.tt == EllipsisToken {
p.next()
if !p.isIdentifierReference(p.tt) {
p.fail("object binding pattern", IdentifierToken)
return
}
var ok bool
object.Rest, ok = p.scope.Declare(decl, p.data)
if !ok {
p.failMessage("identifier %s has already been declared", string(p.data))
return
}
p.next()
if p.tt != CloseBraceToken {
p.fail("object binding pattern", CloseBraceToken)
return
}
break
}
item := BindingObjectItem{}
if p.isIdentifierReference(p.tt) {
name := p.data
item.Key = &PropertyName{LiteralExpr{IdentifierToken, p.data}, nil}
p.next()
if p.tt == ColonToken {
// property name + : + binding element
p.next()
item.Value = p.parseBindingElement(decl)
} else {
// single name binding
var ok bool
item.Key.Literal.Data = parse.Copy(item.Key.Literal.Data) // copy so that renaming doesn't rename the key
item.Value.Binding, ok = p.scope.Declare(decl, name)
if !ok {
p.failMessage("identifier %s has already been declared", string(name))
return
}
if p.tt == EqToken {
p.next()
item.Value.Default = p.parseExpression(OpAssign)
}
}
} else {
propertyName := p.parsePropertyName("object binding pattern")
item.Key = &propertyName
if !p.consume("object binding pattern", ColonToken) {
return
}
item.Value = p.parseBindingElement(decl)
}
object.List = append(object.List, item)
if p.tt == CommaToken {
p.next()
} else if p.tt != CloseBraceToken {
p.fail("object binding pattern", CommaToken, CloseBraceToken)
return
}
}
p.next() // always CloseBracketToken
binding = &object
} else {
p.fail("binding")
return
}
return
}
func (p *Parser) parseArrayLiteral() (array ArrayExpr) {
// assume we're on [
p.next()
prevComma := true
for {
if p.tt == ErrorToken {
p.fail("expression")
return
} else if p.tt == CloseBracketToken {
p.next()
break
} else if p.tt == CommaToken {
if prevComma {
array.List = append(array.List, Element{})
}
prevComma = true
p.next()
} else {
spread := p.tt == EllipsisToken
if spread {
p.next()
}
array.List = append(array.List, Element{p.parseAssignExprOrParam(), spread})
prevComma = false
if spread && p.tt != CloseBracketToken {
p.assumeArrowFunc = false
}
}
}
return
}
func (p *Parser) parseObjectLiteral() (object ObjectExpr) {
// assume we're on {
p.next()
for {
if p.tt == ErrorToken {
p.fail("object literal", CloseBraceToken)
return
} else if p.tt == CloseBraceToken {
p.next()
break
}
property := Property{}
if p.tt == EllipsisToken {
p.next()
property.Spread = true
property.Value = p.parseAssignExprOrParam()
if _, isIdent := property.Value.(*Var); !isIdent || p.tt != CloseBraceToken {
p.assumeArrowFunc = false
}
} else {
// try to parse as MethodDefinition, otherwise fall back to PropertyName:AssignExpr or IdentifierReference
var data []byte
method := MethodDecl{}
if p.tt == MulToken {
p.next()
method.Generator = true
} else if p.tt == AsyncToken {
data = p.data
p.next()
if !p.prevLT {
method.Async = true
if p.tt == MulToken {
p.next()
method.Generator = true
data = nil
}
} else {
method.Name.Literal = LiteralExpr{IdentifierToken, data}
data = nil
}
} else if p.tt == GetToken {
data = p.data
p.next()
method.Get = true
} else if p.tt == SetToken {
data = p.data
p.next()
method.Set = true
}
// PropertyName
if data != nil && !method.Generator && (p.tt == EqToken || p.tt == CommaToken || p.tt == CloseBraceToken || p.tt == ColonToken || p.tt == OpenParenToken) {
method.Name.Literal = LiteralExpr{IdentifierToken, data}
method.Async = false
method.Get = false
method.Set = false
} else if !method.Name.IsSet() { // did not parse async [LT]
method.Name = p.parsePropertyName("object literal")
if !method.Name.IsSet() {
return
}
}
if p.tt == OpenParenToken {
// MethodDefinition
parent := p.enterScope(&method.Body.Scope, true)
prevAwait, prevYield, prevRetrn := p.await, p.yield, p.retrn
p.await, p.yield, p.retrn = method.Async, method.Generator, true
method.Params = p.parseFuncParams("method definition")
method.Body.List = p.parseStmtList("method definition")
p.await, p.yield, p.retrn = prevAwait, prevYield, prevRetrn
p.exitScope(parent)
property.Value = &method
p.assumeArrowFunc = false
} else if p.tt == ColonToken {
// PropertyName : AssignmentExpression
p.next()
property.Name = &method.Name
property.Value = p.parseAssignExprOrParam()
} else if method.Name.IsComputed() || !p.isIdentifierReference(method.Name.Literal.TokenType) {
p.fail("object literal", ColonToken, OpenParenToken)
return
} else {
// IdentifierReference (= AssignmentExpression)?
name := method.Name.Literal.Data
method.Name.Literal.Data = parse.Copy(method.Name.Literal.Data) // copy so that renaming doesn't rename the key
property.Name = &method.Name // set key explicitly so after renaming the original is still known
if p.assumeArrowFunc {
var ok bool
property.Value, ok = p.scope.Declare(ArgumentDecl, name)
if !ok {
property.Value = p.scope.Use(name)
p.assumeArrowFunc = false
}
} else {
property.Value = p.scope.Use(name)
}
if p.tt == EqToken {
p.next()
prevAssumeArrowFunc := p.assumeArrowFunc
p.assumeArrowFunc = false
property.Init = p.parseExpression(OpAssign)
p.assumeArrowFunc = prevAssumeArrowFunc
}
}
}
object.List = append(object.List, property)
if p.tt == CommaToken {
p.next()
} else if p.tt != CloseBraceToken {
p.fail("object literal")
return
}
}
return
}
func (p *Parser) parseTemplateLiteral(precLeft OpPrec) (template TemplateExpr) {
// assume we're on 'Template' or 'TemplateStart'
template.Prec = OpMember
if precLeft < OpMember {
template.Prec = OpCall
}
for p.tt == TemplateStartToken || p.tt == TemplateMiddleToken {
tpl := p.data
p.next()
template.List = append(template.List, TemplatePart{tpl, p.parseExpression(OpExpr)})
}
if p.tt != TemplateToken && p.tt != TemplateEndToken {
p.fail("template literal", TemplateToken)
return
}
template.Tail = p.data
p.next() // TemplateEndToken
return
}
func (p *Parser) parseArguments() (args Args) {
// assume we're on (
p.next()
args.List = make([]Arg, 0, 4)
for p.tt != CloseParenToken && p.tt != ErrorToken {
rest := p.tt == EllipsisToken
if rest {
p.next()
}
args.List = append(args.List, Arg{
Value: p.parseExpression(OpAssign),
Rest: rest,
})
if p.tt != CloseParenToken {
if p.tt != CommaToken {
p.fail("arguments", CommaToken, CloseParenToken)
return
} else {
p.next() // CommaToken
}
}
}
p.consume("arguments", CloseParenToken)
return
}
func (p *Parser) parseAsyncArrowFunc() (arrowFunc *ArrowFunc) {
// expect we're at Identifier or Yield or (
arrowFunc = &ArrowFunc{}
parent := p.enterScope(&arrowFunc.Body.Scope, true)
prevAwait, prevYield := p.await, p.yield
p.await, p.yield = true, false
if IsIdentifier(p.tt) || !prevYield && p.tt == YieldToken {
ref, _ := p.scope.Declare(ArgumentDecl, p.data) // cannot fail
p.next()
arrowFunc.Params.List = []BindingElement{{Binding: ref}}
} else {
arrowFunc.Params = p.parseFuncParams("arrow function")
// CallExpression of 'async(params)' already handled
}
arrowFunc.Async = true
arrowFunc.Body.List = p.parseArrowFuncBody()
p.await, p.yield = prevAwait, prevYield
p.exitScope(parent)
return
}
func (p *Parser) parseIdentifierArrowFunc(v *Var) (arrowFunc *ArrowFunc) {
// expect we're at =>
arrowFunc = &ArrowFunc{}
parent := p.enterScope(&arrowFunc.Body.Scope, true)
prevAwait, prevYield := p.await, p.yield
p.await, p.yield = false, false
if 1 < v.Uses {
v.Uses--
v, _ = p.scope.Declare(ArgumentDecl, parse.Copy(v.Data)) // cannot fail
} else {
// if v.Uses==1 it must be undeclared and be the last added
p.scope.Parent.Undeclared = p.scope.Parent.Undeclared[:len(p.scope.Parent.Undeclared)-1]
v.Decl = ArgumentDecl
p.scope.Declared = append(p.scope.Declared, v)
}
arrowFunc.Params.List = []BindingElement{{v, nil}}
arrowFunc.Body.List = p.parseArrowFuncBody()
p.await, p.yield = prevAwait, prevYield
p.exitScope(parent)
return
}
func (p *Parser) parseArrowFuncBody() (list []IStmt) {
// expect we're at arrow
if p.tt != ArrowToken {
p.fail("arrow function", ArrowToken)
return
} else if p.prevLT {
p.fail("expression")
return
}
p.next()
// mark undeclared vars as arguments in `function f(a=b){var b}` where the b's are different vars
p.scope.MarkFuncArgs()
if p.tt == OpenBraceToken {
prevIn, prevRetrn := p.in, p.retrn
p.in, p.retrn = true, true
prevAllowDirectivePrologue, prevExprLevel := p.allowDirectivePrologue, p.exprLevel
p.allowDirectivePrologue, p.exprLevel = true, 0
list = p.parseStmtList("arrow function")
p.allowDirectivePrologue, p.exprLevel = prevAllowDirectivePrologue, prevExprLevel
p.in, p.retrn = prevIn, prevRetrn
} else {
list = []IStmt{&ReturnStmt{p.parseExpression(OpAssign)}}
}
return
}
func (p *Parser) parseIdentifierExpression(prec OpPrec, ident []byte) IExpr {
var left IExpr
left = p.scope.Use(ident)
return p.parseExpressionSuffix(left, prec, OpPrimary)
}
func (p *Parser) parseAsyncExpression(prec OpPrec, async []byte) IExpr {
// IdentifierReference, AsyncFunctionExpression, AsyncGeneratorExpression
// CoverCallExpressionAndAsyncArrowHead, AsyncArrowFunction
// assume we're at a token after async
var left IExpr
precLeft := OpPrimary
if !p.prevLT && p.tt == FunctionToken {
// primary expression
left = p.parseAsyncFuncExpr()
} else if !p.prevLT && prec <= OpAssign && (p.tt == OpenParenToken || IsIdentifier(p.tt) || p.tt == YieldToken || p.tt == AwaitToken) {
// async arrow function expression or call expression
if p.tt == AwaitToken || p.yield && p.tt == YieldToken {
p.fail("arrow function")
return nil
} else if p.tt == OpenParenToken {
return p.parseParenthesizedExpression(prec, async)
}
left = p.parseAsyncArrowFunc()
precLeft = OpAssign
} else {
left = p.scope.Use(async)
}
// can be async(args), async => ..., or e.g. async + ...
return p.parseExpressionSuffix(left, prec, precLeft)
}
// parseExpression parses an expression that has a precedence of prec or higher.
func (p *Parser) parseExpression(prec OpPrec) IExpr {
p.exprLevel++
if 1000 < p.exprLevel {
p.failMessage("too many nested expressions")
return nil
}
// reparse input if we have / or /= as the beginning of a new expression, this should be a regular expression!
if p.tt == DivToken || p.tt == DivEqToken {
p.tt, p.data = p.l.RegExp()
if p.tt == ErrorToken {
p.fail("regular expression")
return nil
}
}
var left IExpr
precLeft := OpPrimary
if IsIdentifier(p.tt) && p.tt != AsyncToken {
left = p.scope.Use(p.data)
p.next()
suffix := p.parseExpressionSuffix(left, prec, precLeft)
p.exprLevel--
return suffix
} else if IsNumeric(p.tt) {
left = &LiteralExpr{p.tt, p.data}
p.next()
suffix := p.parseExpressionSuffix(left, prec, precLeft)
p.exprLevel--
return suffix
}
switch tt := p.tt; tt {
case StringToken, ThisToken, NullToken, TrueToken, FalseToken, RegExpToken:
left = &LiteralExpr{p.tt, p.data}
p.next()
case OpenBracketToken:
prevIn := p.in
p.in = true
array := p.parseArrayLiteral()
p.in = prevIn
left = &array
case OpenBraceToken:
prevIn := p.in
p.in = true
object := p.parseObjectLiteral()
p.in = prevIn
left = &object
case OpenParenToken:
// parenthesized expression or arrow parameter list
if OpAssign < prec {
// must be a parenthesized expression
p.next()
prevIn := p.in
p.in = true
left = &GroupExpr{p.parseExpression(OpExpr)}
p.in = prevIn
if !p.consume("expression", CloseParenToken) {
return nil
}
break
}
suffix := p.parseParenthesizedExpression(prec, nil)
p.exprLevel--
return suffix
case NotToken, BitNotToken, TypeofToken, VoidToken, DeleteToken:
if OpUnary < prec {
p.fail("expression")
return nil
}
p.next()
left = &UnaryExpr{tt, p.parseExpression(OpUnary)}
precLeft = OpUnary
case AddToken:
if OpUnary < prec {
p.fail("expression")
return nil
}
p.next()
left = &UnaryExpr{PosToken, p.parseExpression(OpUnary)}
precLeft = OpUnary
case SubToken:
if OpUnary < prec {
p.fail("expression")
return nil
}
p.next()
left = &UnaryExpr{NegToken, p.parseExpression(OpUnary)}
precLeft = OpUnary
case IncrToken:
if OpUpdate < prec {
p.fail("expression")
return nil
}
p.next()
left = &UnaryExpr{PreIncrToken, p.parseExpression(OpUnary)}
precLeft = OpUnary
case DecrToken:
if OpUpdate < prec {
p.fail("expression")
return nil
}
p.next()
left = &UnaryExpr{PreDecrToken, p.parseExpression(OpUnary)}
precLeft = OpUnary
case AwaitToken:
// either accepted as IdentifierReference or as AwaitExpression
if p.await && prec <= OpUnary {
p.next()
left = &UnaryExpr{tt, p.parseExpression(OpUnary)}
precLeft = OpUnary
} else if p.await {
p.fail("expression")
return nil
} else {
left = p.scope.Use(p.data)
p.next()
}
case NewToken:
p.next()
if p.tt == DotToken {
p.next()
if !p.consume("new.target expression", TargetToken) {
return nil
}
left = &NewTargetExpr{}
precLeft = OpMember
} else {
newExpr := &NewExpr{p.parseExpression(OpNew), nil}
if p.tt == OpenParenToken {
args := p.parseArguments()
if len(args.List) != 0 {
newExpr.Args = &args
}
precLeft = OpMember
} else {
precLeft = OpNew
}
left = newExpr
}
case ImportToken:
// OpMember < prec does never happen
left = &LiteralExpr{p.tt, p.data}
p.next()
if p.tt == DotToken {
p.next()
if !p.consume("import.meta expression", MetaToken) {
return nil
}
left = &ImportMetaExpr{}
precLeft = OpMember
} else if p.tt != OpenParenToken {
p.fail("import expression", OpenParenToken)
return nil
} else if OpCall < prec {
p.fail("expression")
return nil
} else {
precLeft = OpCall
}
case SuperToken:
// OpMember < prec does never happen
left = &LiteralExpr{p.tt, p.data}
p.next()
if OpCall < prec && p.tt != DotToken && p.tt != OpenBracketToken {
p.fail("super expression", OpenBracketToken, DotToken)
return nil
} else if p.tt != DotToken && p.tt != OpenBracketToken && p.tt != OpenParenToken {
p.fail("super expression", OpenBracketToken, OpenParenToken, DotToken)
return nil
}
if OpCall < prec {
precLeft = OpMember
} else {
precLeft = OpCall
}
case YieldToken:
// either accepted as IdentifierReference or as YieldExpression
if p.yield && prec <= OpAssign {
// YieldExpression
p.next()
yieldExpr := YieldExpr{}
if !p.prevLT {
yieldExpr.Generator = p.tt == MulToken
if yieldExpr.Generator {
p.next()
yieldExpr.X = p.parseExpression(OpAssign)
} else if p.tt != CloseBraceToken && p.tt != CloseBracketToken && p.tt != CloseParenToken && p.tt != ColonToken && p.tt != CommaToken && p.tt != SemicolonToken {
yieldExpr.X = p.parseExpression(OpAssign)
}
}
left = &yieldExpr
precLeft = OpAssign
} else if p.yield {
p.fail("expression")
return nil
} else {
left = p.scope.Use(p.data)
p.next()
}
case AsyncToken:
async := p.data
p.next()
prevIn := p.in
p.in = true
left = p.parseAsyncExpression(prec, async)
p.in = prevIn
case ClassToken:
prevIn := p.in
p.in = true
left = p.parseClassExpr()
p.in = prevIn
case FunctionToken:
prevIn := p.in
p.in = true
left = p.parseFuncExpr()
p.in = prevIn
case TemplateToken, TemplateStartToken:
prevIn := p.in
p.in = true
template := p.parseTemplateLiteral(precLeft)
left = &template
p.in = prevIn
case PrivateIdentifierToken:
if OpCompare < prec || !p.in {
p.fail("expression")
return nil
}
left = &LiteralExpr{p.tt, p.data}
p.next()
if p.tt != InToken {
p.fail("relational expression", InToken)
return nil
}
default:
p.fail("expression")
return nil
}
suffix := p.parseExpressionSuffix(left, prec, precLeft)
p.exprLevel--
return suffix
}
func (p *Parser) parseExpressionSuffix(left IExpr, prec, precLeft OpPrec) IExpr {
for i := 0; ; i++ {
if 1000 < p.exprLevel+i {
p.failMessage("too many nested expressions")
return nil
}
switch tt := p.tt; tt {
case EqToken, MulEqToken, DivEqToken, ModEqToken, ExpEqToken, AddEqToken, SubEqToken, LtLtEqToken, GtGtEqToken, GtGtGtEqToken, BitAndEqToken, BitXorEqToken, BitOrEqToken, AndEqToken, OrEqToken, NullishEqToken:
if OpAssign < prec {
return left
} else if precLeft < OpLHS {
p.fail("expression")
return nil
}
p.next()
left = &BinaryExpr{tt, left, p.parseExpression(OpAssign)}
precLeft = OpAssign
case LtToken, LtEqToken, GtToken, GtEqToken, InToken, InstanceofToken:
if OpCompare < prec || !p.in && tt == InToken {
return left
} else if precLeft < OpCompare {
// can only fail after a yield or arrow function expression
p.fail("expression")
return nil
}
p.next()
left = &BinaryExpr{tt, left, p.parseExpression(OpShift)}
precLeft = OpCompare
case EqEqToken, NotEqToken, EqEqEqToken, NotEqEqToken:
if OpEquals < prec {
return left
} else if precLeft < OpEquals {
// can only fail after a yield or arrow function expression
p.fail("expression")
return nil
}
p.next()
left = &BinaryExpr{tt, left, p.parseExpression(OpCompare)}
precLeft = OpEquals
case AndToken:
if OpAnd < prec {
return left
} else if precLeft < OpAnd {
p.fail("expression")
return nil
}
p.next()
left = &BinaryExpr{tt, left, p.parseExpression(OpBitOr)}
precLeft = OpAnd
case OrToken:
if OpOr < prec {
return left
} else if precLeft < OpOr {
p.fail("expression")
return nil
}
p.next()
left = &BinaryExpr{tt, left, p.parseExpression(OpAnd)}
precLeft = OpOr
case NullishToken:
if OpCoalesce < prec {
return left
} else if precLeft < OpBitOr && precLeft != OpCoalesce {
p.fail("expression")
return nil
}
p.next()
left = &BinaryExpr{tt, left, p.parseExpression(OpBitOr)}
precLeft = OpCoalesce
case DotToken:
// OpMember < prec does never happen
if precLeft < OpCall {
p.fail("expression")
return nil
}
p.next()
if !IsIdentifierName(p.tt) && p.tt != PrivateIdentifierToken {
p.fail("dot expression", IdentifierToken)
return nil
}
exprPrec := OpMember
if precLeft < OpMember {
exprPrec = OpCall
}
if p.tt != PrivateIdentifierToken {
p.tt = IdentifierToken
}
left = &DotExpr{left, LiteralExpr{p.tt, p.data}, exprPrec, false}
p.next()
if precLeft < OpMember {
precLeft = OpCall
} else {
precLeft = OpMember
}
case OpenBracketToken:
// OpMember < prec does never happen
if precLeft < OpCall {
p.fail("expression")
return nil
}
p.next()
exprPrec := OpMember
if precLeft < OpMember {
exprPrec = OpCall
}
prevIn := p.in
p.in = true
left = &IndexExpr{left, p.parseExpression(OpExpr), exprPrec, false}
p.in = prevIn
if !p.consume("index expression", CloseBracketToken) {
return nil
}
if precLeft < OpMember {
precLeft = OpCall
} else {
precLeft = OpMember
}
case OpenParenToken:
if OpCall < prec {
return left
} else if precLeft < OpCall {
p.fail("expression")
return nil
}
prevIn := p.in
p.in = true
left = &CallExpr{left, p.parseArguments(), false}
precLeft = OpCall
p.in = prevIn
case TemplateToken, TemplateStartToken:
// OpMember < prec does never happen
if precLeft < OpCall {
p.fail("expression")
return nil
}
prevIn := p.in
p.in = true
template := p.parseTemplateLiteral(precLeft)
template.Tag = left
left = &template
if precLeft < OpMember {
precLeft = OpCall
} else {
precLeft = OpMember
}
p.in = prevIn
case OptChainToken:
if OpCall < prec {
return left
} else if precLeft < OpCall {
p.fail("expression")
return nil
}
p.next()
if p.tt == OpenParenToken {
left = &CallExpr{left, p.parseArguments(), true}
} else if p.tt == OpenBracketToken {
p.next()
left = &IndexExpr{left, p.parseExpression(OpExpr), OpCall, true}
if !p.consume("optional chaining expression", CloseBracketToken) {
return nil
}
} else if p.tt == TemplateToken || p.tt == TemplateStartToken {
template := p.parseTemplateLiteral(precLeft)
template.Prec = OpCall
template.Tag = left
template.Optional = true
left = &template
} else if IsIdentifierName(p.tt) {
left = &DotExpr{left, LiteralExpr{IdentifierToken, p.data}, OpCall, true}
p.next()
} else if p.tt == PrivateIdentifierToken {
left = &DotExpr{left, LiteralExpr{p.tt, p.data}, OpCall, true}
p.next()
} else {
p.fail("optional chaining expression", IdentifierToken, OpenParenToken, OpenBracketToken, TemplateToken)
return nil
}
precLeft = OpCall
case IncrToken:
if p.prevLT || OpUpdate < prec {
return left
} else if precLeft < OpLHS {
p.fail("expression")
return nil
}
p.next()
left = &UnaryExpr{PostIncrToken, left}
precLeft = OpUpdate
case DecrToken:
if p.prevLT || OpUpdate < prec {
return left
} else if precLeft < OpLHS {
p.fail("expression")
return nil
}
p.next()
left = &UnaryExpr{PostDecrToken, left}
precLeft = OpUpdate
case ExpToken:
if OpExp < prec {
return left
} else if precLeft < OpUpdate {
p.fail("expression")
return nil
}
p.next()
left = &BinaryExpr{tt, left, p.parseExpression(OpExp)}
precLeft = OpExp
case MulToken, DivToken, ModToken:
if OpMul < prec {
return left
} else if precLeft < OpMul {
p.fail("expression")
return nil
}
p.next()
left = &BinaryExpr{tt, left, p.parseExpression(OpExp)}
precLeft = OpMul
case AddToken, SubToken:
if OpAdd < prec {
return left
} else if precLeft < OpAdd {
p.fail("expression")
return nil
}
p.next()
left = &BinaryExpr{tt, left, p.parseExpression(OpMul)}
precLeft = OpAdd
case LtLtToken, GtGtToken, GtGtGtToken:
if OpShift < prec {
return left
} else if precLeft < OpShift {
p.fail("expression")
return nil
}
p.next()
left = &BinaryExpr{tt, left, p.parseExpression(OpAdd)}
precLeft = OpShift
case BitAndToken:
if OpBitAnd < prec {
return left
} else if precLeft < OpBitAnd {
p.fail("expression")
return nil
}
p.next()
left = &BinaryExpr{tt, left, p.parseExpression(OpEquals)}
precLeft = OpBitAnd
case BitXorToken:
if OpBitXor < prec {
return left
} else if precLeft < OpBitXor {
p.fail("expression")
return nil
}
p.next()
left = &BinaryExpr{tt, left, p.parseExpression(OpBitAnd)}
precLeft = OpBitXor
case BitOrToken:
if OpBitOr < prec {
return left
} else if precLeft < OpBitOr {
p.fail("expression")
return nil
}
p.next()
left = &BinaryExpr{tt, left, p.parseExpression(OpBitXor)}
precLeft = OpBitOr
case QuestionToken:
if OpAssign < prec {
return left
} else if precLeft < OpCoalesce {
p.fail("expression")
return nil
}
p.next()
prevIn := p.in
p.in = true
ifExpr := p.parseExpression(OpAssign)
p.in = prevIn
if !p.consume("conditional expression", ColonToken) {
return nil
}
elseExpr := p.parseExpression(OpAssign)
left = &CondExpr{left, ifExpr, elseExpr}
precLeft = OpAssign
case CommaToken:
if OpExpr < prec {
return left
}
p.next()
if commaExpr, ok := left.(*CommaExpr); ok {
commaExpr.List = append(commaExpr.List, p.parseExpression(OpAssign))
i-- // adjust expression nesting limit
} else {
left = &CommaExpr{[]IExpr{left, p.parseExpression(OpAssign)}}
}
precLeft = OpExpr
case ArrowToken:
// handle identifier => ..., where identifier could also be yield or await
if OpAssign < prec {
return left
} else if precLeft < OpPrimary || p.prevLT {
p.fail("expression")
return nil
}
v, ok := left.(*Var)
if !ok {
p.fail("expression")
return nil
}
left = p.parseIdentifierArrowFunc(v)
precLeft = OpAssign
default:
return left
}
}
}
func (p *Parser) parseAssignExprOrParam() IExpr {
// this could be a BindingElement or an AssignmentExpression. Here we handle BindingIdentifier with a possible Initializer, BindingPattern will be handled by parseArrayLiteral or parseObjectLiteral
if p.assumeArrowFunc && p.isIdentifierReference(p.tt) {
tt := p.tt
data := p.data
p.next()
if p.tt == EqToken || p.tt == CommaToken || p.tt == CloseParenToken || p.tt == CloseBraceToken || p.tt == CloseBracketToken {
var ok bool
var left IExpr
left, ok = p.scope.Declare(ArgumentDecl, data)
if ok {
p.assumeArrowFunc = false
left = p.parseExpressionSuffix(left, OpAssign, OpPrimary)
p.assumeArrowFunc = true
return left
}
}
p.assumeArrowFunc = false
if tt == AsyncToken {
return p.parseAsyncExpression(OpAssign, data)
}
return p.parseIdentifierExpression(OpAssign, data)
} else if p.tt != OpenBracketToken && p.tt != OpenBraceToken {
p.assumeArrowFunc = false
}
return p.parseExpression(OpAssign)
}
func (p *Parser) parseParenthesizedExpression(prec OpPrec, async []byte) IExpr {
// parse ArrowFunc, AsyncArrowFunc, AsyncCallExpr, ParenthesizedExpr
var left IExpr
precLeft := OpPrimary
// expect to be at (
p.next()
isAsync := async != nil // prevLT is false before open parenthesis
arrowFunc := &ArrowFunc{}
parent := p.enterScope(&arrowFunc.Body.Scope, true)
prevAssumeArrowFunc, prevIn := p.assumeArrowFunc, p.in
p.assumeArrowFunc, p.in = true, true
// parse an Arguments expression but assume we might be parsing an (async) arrow function or ParenthesisedExpression. If this is really an arrow function, parsing as an Arguments expression cannot fail as AssignmentExpression, ArrayLiteral, and ObjectLiteral are supersets of SingleNameBinding, ArrayBindingPattern, and ObjectBindingPattern respectively. Any identifier that would be a BindingIdentifier in case of an arrow function, will be added as such to the scope. If finally this is not an arrow function, we will demote those variables as undeclared and merge them with the parent scope.
rests := 0
var args Args
for p.tt != CloseParenToken && p.tt != ErrorToken {
if 0 < len(args.List) && args.List[len(args.List)-1].Rest {
// only last parameter can have ellipsis
p.assumeArrowFunc = false
if !isAsync {
p.fail("arrow function", CloseParenToken)
}
}
rest := p.tt == EllipsisToken
if rest {
p.next()
rests++
}
args.List = append(args.List, Arg{p.parseAssignExprOrParam(), rest})
if p.tt != CommaToken {
break
}
p.next()
}
if p.tt != CloseParenToken {
p.fail("expression")
return nil
}
p.next()
isArrowFunc := !p.prevLT && p.tt == ArrowToken && p.assumeArrowFunc
hasLastRest := 0 < rests && p.assumeArrowFunc
p.assumeArrowFunc, p.in = prevAssumeArrowFunc, prevIn
if isArrowFunc {
prevAwait, prevYield := p.await, p.yield
p.await, p.yield = isAsync, false
// arrow function
arrowFunc.Async = isAsync
arrowFunc.Params = Params{List: make([]BindingElement, 0, len(args.List)-rests)}
for _, arg := range args.List {
if arg.Rest {
arrowFunc.Params.Rest = p.exprToBinding(arg.Value)
} else {
arrowFunc.Params.List = append(arrowFunc.Params.List, p.exprToBindingElement(arg.Value)) // can not fail when assumArrowFunc is set
}
}
arrowFunc.Body.List = p.parseArrowFuncBody()
p.await, p.yield = prevAwait, prevYield
p.exitScope(parent)
left = arrowFunc
precLeft = OpAssign
} else if !isAsync && (len(args.List) == 0 || hasLastRest) {
p.fail("arrow function", ArrowToken)
return nil
} else if isAsync && OpCall < prec || !isAsync && 0 < rests {
p.fail("expression")
return nil
} else {
// for any nested FuncExpr/ArrowFunc scope, Parent will point to the temporary scope created in case this was an arrow function instead of a parenthesized expression. This is not a problem as Parent is only used for defining new variables, and we already parsed all the nested scopes so that Parent (not Func) are not relevant anymore. Anyways, the Parent will just point to an empty scope, whose Parent/Func will point to valid scopes. This should not be a big deal.
// Here we move all declared ArgumentDecls (in case of an arrow function) to its parent scope as undeclared variables (identifiers used in a parenthesized expression).
p.exitScope(parent)
arrowFunc.Body.Scope.UndeclareScope()
if isAsync {
// call expression
left = p.scope.Use(async)
left = &CallExpr{left, args, false}
precLeft = OpCall
} else {
// parenthesized expression
if 1 < len(args.List) {
commaExpr := &CommaExpr{}
for _, arg := range args.List {
commaExpr.List = append(commaExpr.List, arg.Value)
}
left = &GroupExpr{commaExpr}
} else {
left = &GroupExpr{args.List[0].Value}
}
}
}
return p.parseExpressionSuffix(left, prec, precLeft)
}
// exprToBindingElement and exprToBinding convert a CoverParenthesizedExpressionAndArrowParameterList into FormalParameters.
// Any unbound variables of the parameters (Initializer, ComputedPropertyName) are kept in the parent scope
func (p *Parser) exprToBindingElement(expr IExpr) (bindingElement BindingElement) {
if assign, ok := expr.(*BinaryExpr); ok && assign.Op == EqToken {
bindingElement.Binding = p.exprToBinding(assign.X)
bindingElement.Default = assign.Y
} else {
bindingElement.Binding = p.exprToBinding(expr)
}
return
}
func (p *Parser) exprToBinding(expr IExpr) (binding IBinding) {
if expr == nil {
// no-op
} else if v, ok := expr.(*Var); ok {
binding = v
} else if array, ok := expr.(*ArrayExpr); ok {
bindingArray := BindingArray{}
for _, item := range array.List {
if item.Spread {
// can only BindingIdentifier or BindingPattern
bindingArray.Rest = p.exprToBinding(item.Value)
break
}
var bindingElement BindingElement
bindingElement = p.exprToBindingElement(item.Value)
bindingArray.List = append(bindingArray.List, bindingElement)
}
binding = &bindingArray
} else if object, ok := expr.(*ObjectExpr); ok {
bindingObject := BindingObject{}
for _, item := range object.List {
if item.Spread {
// can only be BindingIdentifier
bindingObject.Rest = item.Value.(*Var)
break
}
bindingElement := p.exprToBindingElement(item.Value)
if v, ok := item.Value.(*Var); item.Name == nil || (ok && item.Name.IsIdent(v.Data)) {
// IdentifierReference : Initializer
bindingElement.Default = item.Init
}
bindingObject.List = append(bindingObject.List, BindingObjectItem{Key: item.Name, Value: bindingElement})
}
binding = &bindingObject
} else {
p.failMessage("invalid parameters in arrow function")
}
return
}
func (p *Parser) isIdentifierReference(tt TokenType) bool {
return IsIdentifier(tt) || !p.yield && tt == YieldToken || !p.await && tt == AwaitToken
}
package js
import "strconv"
// OpPrec is the operator precedence
type OpPrec int
// OpPrec values.
const (
OpExpr OpPrec = iota // a,b
OpAssign // a?b:c, yield x, ()=>x, async ()=>x, a=b, a+=b, ...
OpCoalesce // a??b
OpOr // a||b
OpAnd // a&&b
OpBitOr // a|b
OpBitXor // a^b
OpBitAnd // a&b
OpEquals // a==b, a!=b, a===b, a!==b
OpCompare // a<b, a>b, a<=b, a>=b, a instanceof b, a in b
OpShift // a<<b, a>>b, a>>>b
OpAdd // a+b, a-b
OpMul // a*b, a/b, a%b
OpExp // a**b
OpUnary // ++x, --x, delete x, void x, typeof x, +x, -x, ~x, !x, await x
OpUpdate // x++, x--
OpLHS // CallExpr/OptChainExpr or NewExpr
OpCall // a?.b, a(b), super(a), import(a)
OpNew // new a
OpMember // a[b], a.b, a`b`, super[x], super.x, new.target, import.meta, new a(b)
OpPrimary // literal, function, class, parenthesized
)
func (prec OpPrec) String() string {
switch prec {
case OpExpr:
return "OpExpr"
case OpAssign:
return "OpAssign"
case OpCoalesce:
return "OpCoalesce"
case OpOr:
return "OpOr"
case OpAnd:
return "OpAnd"
case OpBitOr:
return "OpBitOr"
case OpBitXor:
return "OpBitXor"
case OpBitAnd:
return "OpBitAnd"
case OpEquals:
return "OpEquals"
case OpCompare:
return "OpCompare"
case OpShift:
return "OpShift"
case OpAdd:
return "OAdd"
case OpMul:
return "OpMul"
case OpExp:
return "OpExp"
case OpUnary:
return "OpUnary"
case OpUpdate:
return "OpUpdate"
case OpLHS:
return "OpLHS"
case OpCall:
return "OpCall"
case OpNew:
return "OpNew"
case OpMember:
return "OpMember"
case OpPrimary:
return "OpPrimary"
}
return "Invalid(" + strconv.Itoa(int(prec)) + ")"
}
// Keywords is a map of reserved, strict, and other keywords
var Keywords = map[string]TokenType{
// reserved
"await": AwaitToken,
"break": BreakToken,
"case": CaseToken,
"catch": CatchToken,
"class": ClassToken,
"const": ConstToken,
"continue": ContinueToken,
"debugger": DebuggerToken,
"default": DefaultToken,
"delete": DeleteToken,
"do": DoToken,
"else": ElseToken,
"enum": EnumToken,
"export": ExportToken,
"extends": ExtendsToken,
"false": FalseToken,
"finally": FinallyToken,
"for": ForToken,
"function": FunctionToken,
"if": IfToken,
"import": ImportToken,
"in": InToken,
"instanceof": InstanceofToken,
"new": NewToken,
"null": NullToken,
"return": ReturnToken,
"super": SuperToken,
"switch": SwitchToken,
"this": ThisToken,
"throw": ThrowToken,
"true": TrueToken,
"try": TryToken,
"typeof": TypeofToken,
"var": VarToken,
"void": VoidToken,
"while": WhileToken,
"with": WithToken,
"yield": YieldToken,
// strict mode
"let": LetToken,
"static": StaticToken,
"implements": ImplementsToken,
"interface": InterfaceToken,
"package": PackageToken,
"private": PrivateToken,
"protected": ProtectedToken,
"public": PublicToken,
// extra
"as": AsToken,
"async": AsyncToken,
"from": FromToken,
"get": GetToken,
"meta": MetaToken,
"of": OfToken,
"set": SetToken,
"target": TargetToken,
}
package js
import "strconv"
// TokenType determines the type of token, eg. a number or a semicolon.
type TokenType uint16 // from LSB to MSB: 8 bits for tokens per category, 1 bit for numeric, 1 bit for punctuator, 1 bit for operator, 1 bit for identifier, 4 bits unused
// TokenType values.
const (
ErrorToken TokenType = iota // extra token when errors occur
WhitespaceToken
LineTerminatorToken // \r \n \r\n
CommentToken
CommentLineTerminatorToken
StringToken
TemplateToken
TemplateStartToken
TemplateMiddleToken
TemplateEndToken
RegExpToken
PrivateIdentifierToken
)
// Numeric token values.
const (
NumericToken TokenType = 0x0100 + iota
DecimalToken
BinaryToken
OctalToken
HexadecimalToken
IntegerToken
)
// Punctuator token values.
const (
PunctuatorToken TokenType = 0x0200 + iota
OpenBraceToken // {
CloseBraceToken // }
OpenParenToken // (
CloseParenToken // )
OpenBracketToken // [
CloseBracketToken // ]
DotToken // .
SemicolonToken // ;
CommaToken // ,
QuestionToken // ?
ColonToken // :
ArrowToken // =>
EllipsisToken // ...
)
// Operator token values.
const (
OperatorToken TokenType = 0x0600 + iota
EqToken // =
EqEqToken // ==
EqEqEqToken // ===
NotToken // !
NotEqToken // !=
NotEqEqToken // !==
LtToken // <
LtEqToken // <=
LtLtToken // <<
LtLtEqToken // <<=
GtToken // >
GtEqToken // >=
GtGtToken // >>
GtGtEqToken // >>=
GtGtGtToken // >>>
GtGtGtEqToken // >>>=
AddToken // +
AddEqToken // +=
IncrToken // ++
SubToken // -
SubEqToken // -=
DecrToken // --
MulToken // *
MulEqToken // *=
ExpToken // **
ExpEqToken // **=
DivToken // /
DivEqToken // /=
ModToken // %
ModEqToken // %=
BitAndToken // &
BitOrToken // |
BitXorToken // ^
BitNotToken // ~
BitAndEqToken // &=
BitOrEqToken // |=
BitXorEqToken // ^=
AndToken // &&
OrToken // ||
NullishToken // ??
AndEqToken // &&=
OrEqToken // ||=
NullishEqToken // ??=
OptChainToken // ?.
// unused in lexer
PosToken // +a
NegToken // -a
PreIncrToken // ++a
PreDecrToken // --a
PostIncrToken // a++
PostDecrToken // a--
)
// Reserved token values.
const (
ReservedToken TokenType = 0x0800 + iota
AwaitToken
BreakToken
CaseToken
CatchToken
ClassToken
ConstToken
ContinueToken
DebuggerToken
DefaultToken
DeleteToken
DoToken
ElseToken
EnumToken
ExportToken
ExtendsToken
FalseToken
FinallyToken
ForToken
FunctionToken
IfToken
ImportToken
InToken
InstanceofToken
NewToken
NullToken
ReturnToken
SuperToken
SwitchToken
ThisToken
ThrowToken
TrueToken
TryToken
TypeofToken
YieldToken
VarToken
VoidToken
WhileToken
WithToken
)
// Identifier token values.
const (
IdentifierToken TokenType = 0x1000 + iota
AsToken
AsyncToken
FromToken
GetToken
ImplementsToken
InterfaceToken
LetToken
MetaToken
OfToken
PackageToken
PrivateToken
ProtectedToken
PublicToken
SetToken
StaticToken
TargetToken
)
// IsNumeric return true if token is numeric.
func IsNumeric(tt TokenType) bool {
return tt&0x0100 != 0
}
// IsPunctuator return true if token is a punctuator.
func IsPunctuator(tt TokenType) bool {
return tt&0x0200 != 0
}
// IsOperator return true if token is an operator.
func IsOperator(tt TokenType) bool {
return tt&0x0400 != 0
}
// IsIdentifierName matches IdentifierName, i.e. any identifier
func IsIdentifierName(tt TokenType) bool {
return tt&0x1800 != 0
}
// IsReservedWord matches ReservedWord
func IsReservedWord(tt TokenType) bool {
return tt&0x0800 != 0
}
// IsIdentifier matches Identifier, i.e. IdentifierName but not ReservedWord. Does not match yield or await.
func IsIdentifier(tt TokenType) bool {
return tt&0x1000 != 0
}
func (tt TokenType) String() string {
s := tt.Bytes()
if s == nil {
return "Invalid(" + strconv.Itoa(int(tt)) + ")"
}
return string(s)
}
var operatorBytes = [][]byte{
[]byte("Operator"),
[]byte("="),
[]byte("=="),
[]byte("==="),
[]byte("!"),
[]byte("!="),
[]byte("!=="),
[]byte("<"),
[]byte("<="),
[]byte("<<"),
[]byte("<<="),
[]byte(">"),
[]byte(">="),
[]byte(">>"),
[]byte(">>="),
[]byte(">>>"),
[]byte(">>>="),
[]byte("+"),
[]byte("+="),
[]byte("++"),
[]byte("-"),
[]byte("-="),
[]byte("--"),
[]byte("*"),
[]byte("*="),
[]byte("**"),
[]byte("**="),
[]byte("/"),
[]byte("/="),
[]byte("%"),
[]byte("%="),
[]byte("&"),
[]byte("|"),
[]byte("^"),
[]byte("~"),
[]byte("&="),
[]byte("|="),
[]byte("^="),
[]byte("&&"),
[]byte("||"),
[]byte("??"),
[]byte("&&="),
[]byte("||="),
[]byte("??="),
[]byte("?."),
[]byte("+"),
[]byte("-"),
[]byte("++"),
[]byte("--"),
[]byte("++"),
[]byte("--"),
}
var reservedWordBytes = [][]byte{
[]byte("Reserved"),
[]byte("await"),
[]byte("break"),
[]byte("case"),
[]byte("catch"),
[]byte("class"),
[]byte("const"),
[]byte("continue"),
[]byte("debugger"),
[]byte("default"),
[]byte("delete"),
[]byte("do"),
[]byte("else"),
[]byte("enum"),
[]byte("export"),
[]byte("extends"),
[]byte("false"),
[]byte("finally"),
[]byte("for"),
[]byte("function"),
[]byte("if"),
[]byte("import"),
[]byte("in"),
[]byte("instanceof"),
[]byte("new"),
[]byte("null"),
[]byte("return"),
[]byte("super"),
[]byte("switch"),
[]byte("this"),
[]byte("throw"),
[]byte("true"),
[]byte("try"),
[]byte("typeof"),
[]byte("yield"),
[]byte("var"),
[]byte("void"),
[]byte("while"),
[]byte("with"),
}
var identifierBytes = [][]byte{
[]byte("Identifier"),
[]byte("as"),
[]byte("async"),
[]byte("from"),
[]byte("get"),
[]byte("implements"),
[]byte("interface"),
[]byte("let"),
[]byte("meta"),
[]byte("of"),
[]byte("package"),
[]byte("private"),
[]byte("protected"),
[]byte("public"),
[]byte("set"),
[]byte("static"),
[]byte("target"),
}
// Bytes returns the string representation of a TokenType.
func (tt TokenType) Bytes() []byte {
if IsOperator(tt) && int(tt-OperatorToken) < len(operatorBytes) {
return operatorBytes[tt-OperatorToken]
} else if IsReservedWord(tt) && int(tt-ReservedToken) < len(reservedWordBytes) {
return reservedWordBytes[tt-ReservedToken]
} else if IsIdentifier(tt) && int(tt-IdentifierToken) < len(identifierBytes) {
return identifierBytes[tt-IdentifierToken]
}
switch tt {
case ErrorToken:
return []byte("Error")
case WhitespaceToken:
return []byte("Whitespace")
case LineTerminatorToken:
return []byte("LineTerminator")
case CommentToken:
return []byte("Comment")
case CommentLineTerminatorToken:
return []byte("CommentLineTerminator")
case StringToken:
return []byte("String")
case TemplateToken:
return []byte("Template")
case TemplateStartToken:
return []byte("TemplateStart")
case TemplateMiddleToken:
return []byte("TemplateMiddle")
case TemplateEndToken:
return []byte("TemplateEnd")
case RegExpToken:
return []byte("RegExp")
case PrivateIdentifierToken:
return []byte("PrivateIdentifier")
case NumericToken:
return []byte("Numeric")
case DecimalToken:
return []byte("Decimal")
case BinaryToken:
return []byte("Binary")
case OctalToken:
return []byte("Octal")
case HexadecimalToken:
return []byte("Hexadecimal")
case IntegerToken:
return []byte("Integer")
case PunctuatorToken:
return []byte("Punctuator")
case OpenBraceToken:
return []byte("{")
case CloseBraceToken:
return []byte("}")
case OpenParenToken:
return []byte("(")
case CloseParenToken:
return []byte(")")
case OpenBracketToken:
return []byte("[")
case CloseBracketToken:
return []byte("]")
case DotToken:
return []byte(".")
case SemicolonToken:
return []byte(";")
case CommaToken:
return []byte(",")
case QuestionToken:
return []byte("?")
case ColonToken:
return []byte(":")
case ArrowToken:
return []byte("=>")
case EllipsisToken:
return []byte("...")
}
return nil
}
package js
func isLHSExpr(i IExpr) bool {
switch i.(type) {
case *CommaExpr, *CondExpr, *YieldExpr, *ArrowFunc, *BinaryExpr, *UnaryExpr:
return false
}
return true
}
// AsIdentifierName returns true if a valid identifier name is given.
func AsIdentifierName(b []byte) bool {
if len(b) == 0 || !identifierStartTable[b[0]] {
return false
}
i := 1
for i < len(b) {
if identifierTable[b[i]] {
i++
} else {
return false
}
}
return true
}
// AsDecimalLiteral returns true if a valid decimal literal is given.
func AsDecimalLiteral(b []byte) bool {
if len(b) == 0 || (b[0] < '0' || '9' < b[0]) && (b[0] != '.' || len(b) == 1) {
return false
} else if b[0] == '0' {
return len(b) == 1
}
i := 1
for i < len(b) && '0' <= b[i] && b[i] <= '9' {
i++
}
if i < len(b) && b[i] == '.' && b[0] != '.' {
i++
for i < len(b) && '0' <= b[i] && b[i] <= '9' {
i++
}
}
return i == len(b)
}
package js
// IVisitor represents the AST Visitor
// Each INode encountered by `Walk` is passed to `Enter`, children nodes will be ignored if the returned IVisitor is nil
// `Exit` is called upon the exit of a node
type IVisitor interface {
Enter(n INode) IVisitor
Exit(n INode)
}
// Walk traverses an AST in depth-first order
func Walk(v IVisitor, n INode) {
if n == nil {
return
}
if v = v.Enter(n); v == nil {
return
}
defer v.Exit(n)
switch n := n.(type) {
case *AST:
Walk(v, &n.BlockStmt)
case *Var:
return
case *BlockStmt:
if n.List != nil {
for i := 0; i < len(n.List); i++ {
Walk(v, n.List[i])
}
}
case *EmptyStmt:
return
case *ExprStmt:
Walk(v, n.Value)
case *IfStmt:
Walk(v, n.Body)
Walk(v, n.Else)
Walk(v, n.Cond)
case *DoWhileStmt:
Walk(v, n.Body)
Walk(v, n.Cond)
case *WhileStmt:
Walk(v, n.Body)
Walk(v, n.Cond)
case *ForStmt:
if n.Body != nil {
Walk(v, n.Body)
}
Walk(v, n.Init)
Walk(v, n.Cond)
Walk(v, n.Post)
case *ForInStmt:
if n.Body != nil {
Walk(v, n.Body)
}
Walk(v, n.Init)
Walk(v, n.Value)
case *ForOfStmt:
if n.Body != nil {
Walk(v, n.Body)
}
Walk(v, n.Init)
Walk(v, n.Value)
case *CaseClause:
if n.List != nil {
for i := 0; i < len(n.List); i++ {
Walk(v, n.List[i])
}
}
Walk(v, n.Cond)
case *SwitchStmt:
if n.List != nil {
for i := 0; i < len(n.List); i++ {
Walk(v, &n.List[i])
}
}
Walk(v, n.Init)
case *BranchStmt:
return
case *ReturnStmt:
Walk(v, n.Value)
case *WithStmt:
Walk(v, n.Body)
Walk(v, n.Cond)
case *LabelledStmt:
Walk(v, n.Value)
case *ThrowStmt:
Walk(v, n.Value)
case *TryStmt:
if n.Body != nil {
Walk(v, n.Body)
}
if n.Catch != nil {
Walk(v, n.Catch)
}
if n.Finally != nil {
Walk(v, n.Finally)
}
Walk(v, n.Binding)
case *DebuggerStmt:
return
case *Alias:
return
case *ImportStmt:
if n.List != nil {
for i := 0; i < len(n.List); i++ {
Walk(v, &n.List[i])
}
}
case *ExportStmt:
if n.List != nil {
for i := 0; i < len(n.List); i++ {
Walk(v, &n.List[i])
}
}
Walk(v, n.Decl)
case *DirectivePrologueStmt:
return
case *PropertyName:
Walk(v, &n.Literal)
Walk(v, n.Computed)
case *BindingArray:
if n.List != nil {
for i := 0; i < len(n.List); i++ {
Walk(v, &n.List[i])
}
}
Walk(v, n.Rest)
case *BindingObjectItem:
if n.Key != nil {
Walk(v, n.Key)
}
Walk(v, &n.Value)
case *BindingObject:
if n.List != nil {
for i := 0; i < len(n.List); i++ {
Walk(v, &n.List[i])
}
}
if n.Rest != nil {
Walk(v, n.Rest)
}
case *BindingElement:
Walk(v, n.Binding)
Walk(v, n.Default)
case *VarDecl:
if n.List != nil {
for i := 0; i < len(n.List); i++ {
Walk(v, &n.List[i])
}
}
case *Params:
if n.List != nil {
for i := 0; i < len(n.List); i++ {
Walk(v, &n.List[i])
}
}
Walk(v, n.Rest)
case *FuncDecl:
Walk(v, &n.Body)
Walk(v, &n.Params)
if n.Name != nil {
Walk(v, n.Name)
}
case *MethodDecl:
Walk(v, &n.Body)
Walk(v, &n.Params)
Walk(v, &n.Name)
case *Field:
Walk(v, &n.Name)
Walk(v, n.Init)
case *ClassDecl:
if n.Name != nil {
Walk(v, n.Name)
}
Walk(v, n.Extends)
for _, item := range n.List {
if item.StaticBlock != nil {
Walk(v, item.StaticBlock)
} else if item.Method != nil {
Walk(v, item.Method)
} else {
Walk(v, &item.Field)
}
}
case *LiteralExpr:
return
case *Element:
Walk(v, n.Value)
case *ArrayExpr:
if n.List != nil {
for i := 0; i < len(n.List); i++ {
Walk(v, &n.List[i])
}
}
case *Property:
if n.Name != nil {
Walk(v, n.Name)
}
Walk(v, n.Value)
Walk(v, n.Init)
case *ObjectExpr:
if n.List != nil {
for i := 0; i < len(n.List); i++ {
Walk(v, &n.List[i])
}
}
case *TemplatePart:
Walk(v, n.Expr)
case *TemplateExpr:
if n.List != nil {
for i := 0; i < len(n.List); i++ {
Walk(v, &n.List[i])
}
}
Walk(v, n.Tag)
case *GroupExpr:
Walk(v, n.X)
case *IndexExpr:
Walk(v, n.X)
Walk(v, n.Y)
case *DotExpr:
Walk(v, n.X)
Walk(v, &n.Y)
case *NewTargetExpr:
return
case *ImportMetaExpr:
return
case *Arg:
Walk(v, n.Value)
case *Args:
if n.List != nil {
for i := 0; i < len(n.List); i++ {
Walk(v, &n.List[i])
}
}
case *NewExpr:
if n.Args != nil {
Walk(v, n.Args)
}
Walk(v, n.X)
case *CallExpr:
Walk(v, &n.Args)
Walk(v, n.X)
case *UnaryExpr:
Walk(v, n.X)
case *BinaryExpr:
Walk(v, n.X)
Walk(v, n.Y)
case *CondExpr:
Walk(v, n.Cond)
Walk(v, n.X)
Walk(v, n.Y)
case *YieldExpr:
Walk(v, n.X)
case *ArrowFunc:
Walk(v, &n.Body)
Walk(v, &n.Params)
case *CommaExpr:
for _, item := range n.List {
Walk(v, item)
}
default:
return
}
}
package parse
import (
"fmt"
"io"
"strings"
"unicode"
)
// Position returns the line and column number for a certain position in a file. It is useful for recovering the position in a file that caused an error.
// It only treates \n, \r, and \r\n as newlines, which might be different from some languages also recognizing \f, \u2028, and \u2029 to be newlines.
func Position(r io.Reader, offset int) (line, col int, context string) {
l := NewInput(r)
line = 1
for l.Pos() < offset {
c := l.Peek(0)
n := 1
newline := false
if c == '\n' {
newline = true
} else if c == '\r' {
if l.Peek(1) == '\n' {
newline = true
n = 2
} else {
newline = true
}
} else if c >= 0xC0 {
var r rune
if r, n = l.PeekRune(0); r == '\u2028' || r == '\u2029' {
newline = true
}
} else if c == 0 && l.Err() != nil {
break
}
if 1 < n && offset < l.Pos()+n {
break
}
l.Move(n)
if newline {
line++
offset -= l.Pos()
l.Skip()
}
}
col = len([]rune(string(l.Lexeme()))) + 1
context = positionContext(l, line, col)
return
}
func positionContext(l *Input, line, col int) (context string) {
for {
c := l.Peek(0)
if c == 0 && l.Err() != nil || c == '\n' || c == '\r' {
break
}
l.Move(1)
}
rs := []rune(string(l.Lexeme()))
// cut off front or rear of context to stay between 60 characters
limit := 60
offset := 20
ellipsisFront := ""
ellipsisRear := ""
if limit < len(rs) {
if col <= limit-offset {
ellipsisRear = "..."
rs = rs[:limit-3]
} else if col >= len(rs)-offset-3 {
ellipsisFront = "..."
col -= len(rs) - offset - offset - 7
rs = rs[len(rs)-offset-offset-4:]
} else {
ellipsisFront = "..."
ellipsisRear = "..."
rs = rs[col-offset-1 : col+offset]
col = offset + 4
}
}
// replace unprintable characters by a space
for i, r := range rs {
if !unicode.IsGraphic(r) {
rs[i] = 'ยท'
}
}
context += fmt.Sprintf("%5d: %s%s%s\n", line, ellipsisFront, string(rs), ellipsisRear)
context += fmt.Sprintf("%s^", strings.Repeat(" ", 6+col))
return
}
// +build gofuzz
package fuzz
import (
"github.com/tdewolff/parse/v2/css"
)
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
_ = css.IsIdent(data)
return 1
}
// +build gofuzz
package fuzz
import "github.com/tdewolff/parse/v2"
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
data = parse.Copy(data)
_, _, _ = parse.DataURI(data)
return 1
}
// +build gofuzz
package fuzz
import "github.com/tdewolff/parse/v2"
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
_, _ = parse.Dimension(data)
return 1
}
// +build gofuzz
package fuzz
import (
"fmt"
"strings"
"unicode/utf8"
"github.com/tdewolff/parse/v2"
"github.com/tdewolff/parse/v2/js"
)
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
if !utf8.Valid(data) {
return 0
}
o := js.Options{}
input := parse.NewInputBytes(data)
if ast, err := js.Parse(input, o); err == nil {
src := ast.JSString()
input2 := parse.NewInputString(src)
if ast2, err := js.Parse(input2, o); err != nil {
if !strings.HasPrefix(err.Error(), "too many nested") {
panic(err)
}
} else if src2 := ast2.JSString(); src != src2 {
fmt.Println("JS1:", src)
fmt.Println("JS2:", src2)
panic("ASTs not equal")
}
return 1
}
return 0
}
// +build gofuzz
package fuzz
import "github.com/tdewolff/parse/v2"
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
_, _ = parse.Mediatype(data)
return 1
}
// +build gofuzz
package fuzz
import "github.com/tdewolff/parse/v2"
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
_ = parse.Number(data)
return 1
}
// +build gofuzz
package fuzz
import "github.com/tdewolff/parse/v2"
// Fuzz is a fuzz test.
func Fuzz(data []byte) int {
data = parse.Copy(data) // ignore const-input error for OSS-Fuzz
newData := parse.ReplaceEntities(data, map[string][]byte{
"test": []byte("&t;"),
"test3": []byte("&test;"),
"test5": []byte(""),
"quot": []byte("\""),
"apos": []byte("'"),
}, map[byte][]byte{
'\'': []byte("""),
'"': []byte("'"),
})
if len(newData) > len(data) {
panic("output longer than input")
}
return 1
}
package parse
import (
"fmt"
"io"
"unicode"
)
// Copy returns a copy of the given byte slice.
func Copy(src []byte) (dst []byte) {
dst = make([]byte, len(src))
copy(dst, src)
return
}
// ToLower converts all characters in the byte slice from A-Z to a-z.
func ToLower(src []byte) []byte {
for i, c := range src {
if c >= 'A' && c <= 'Z' {
src[i] = c + ('a' - 'A')
}
}
return src
}
// EqualFold returns true when s matches case-insensitively the targetLower (which must be lowercase).
func EqualFold(s, targetLower []byte) bool {
if len(s) != len(targetLower) {
return false
}
for i, c := range targetLower {
d := s[i]
if d != c && (d < 'A' || d > 'Z' || d+('a'-'A') != c) {
return false
}
}
return true
}
// Printable returns a printable string for given rune
func Printable(r rune) string {
if unicode.IsGraphic(r) {
return fmt.Sprintf("%c", r)
} else if r < 128 {
return fmt.Sprintf("0x%02X", r)
}
return fmt.Sprintf("%U", r)
}
var whitespaceTable = [256]bool{
// ASCII
false, false, false, false, false, false, false, false,
false, true, true, false, true, true, false, false, // tab, new line, form feed, carriage return
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
true, false, false, false, false, false, false, false, // space
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
// non-ASCII
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
}
// IsWhitespace returns true for space, \n, \r, \t, \f.
func IsWhitespace(c byte) bool {
return whitespaceTable[c]
}
var newlineTable = [256]bool{
// ASCII
false, false, false, false, false, false, false, false,
false, false, true, false, false, true, false, false, // new line, carriage return
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
// non-ASCII
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
}
// IsNewline returns true for \n, \r.
func IsNewline(c byte) bool {
return newlineTable[c]
}
// IsAllWhitespace returns true when the entire byte slice consists of space, \n, \r, \t, \f.
func IsAllWhitespace(b []byte) bool {
for _, c := range b {
if !IsWhitespace(c) {
return false
}
}
return true
}
// TrimWhitespace removes any leading and trailing whitespace characters.
func TrimWhitespace(b []byte) []byte {
n := len(b)
start := n
for i := 0; i < n; i++ {
if !IsWhitespace(b[i]) {
start = i
break
}
}
end := n
for i := n - 1; i >= start; i-- {
if !IsWhitespace(b[i]) {
end = i + 1
break
}
}
return b[start:end]
}
type Indenter struct {
io.Writer
b []byte
}
func NewIndenter(w io.Writer, n int) Indenter {
if wi, ok := w.(Indenter); ok {
w = wi.Writer
n += len(wi.b)
}
b := make([]byte, n)
for i := range b {
b[i] = ' '
}
return Indenter{
Writer: w,
b: b,
}
}
func (in Indenter) Indent() int {
return len(in.b)
}
func (in Indenter) Write(b []byte) (int, error) {
n, j := 0, 0
for i, c := range b {
if c == '\n' {
m, _ := in.Writer.Write(b[j : i+1])
n += m
m, _ = in.Writer.Write(in.b)
n += m
j = i + 1
}
}
m, err := in.Writer.Write(b[j:])
return n + m, err
}