summaryrefslogtreecommitdiff
path: root/vendor/github.com/yuin/goldmark/parser
diff options
context:
space:
mode:
authorLibravatar Autumn! <86073772+autumnull@users.noreply.github.com>2022-12-16 11:20:22 +0000
committerLibravatar GitHub <noreply@github.com>2022-12-16 12:20:22 +0100
commiteb08529f35ce33ed98c34fb48013f0f4a5fc9635 (patch)
tree394fd774a943f5c33ce793c67b5865f2570b46c5 /vendor/github.com/yuin/goldmark/parser
parent[bugfix] use match-sorter for filtering domain blocks (#1270) (diff)
downloadgotosocial-eb08529f35ce33ed98c34fb48013f0f4a5fc9635.tar.xz
[chore/bugfix] Switch markdown from blackfriday to goldmark (#1267)
Co-authored-by: Autumn! <autumnull@posteo.net>
Diffstat (limited to 'vendor/github.com/yuin/goldmark/parser')
-rw-r--r--vendor/github.com/yuin/goldmark/parser/attribute.go328
-rw-r--r--vendor/github.com/yuin/goldmark/parser/atx_heading.go246
-rw-r--r--vendor/github.com/yuin/goldmark/parser/auto_link.go42
-rw-r--r--vendor/github.com/yuin/goldmark/parser/blockquote.go69
-rw-r--r--vendor/github.com/yuin/goldmark/parser/code_block.go100
-rw-r--r--vendor/github.com/yuin/goldmark/parser/code_span.go84
-rw-r--r--vendor/github.com/yuin/goldmark/parser/delimiter.go238
-rw-r--r--vendor/github.com/yuin/goldmark/parser/emphasis.go50
-rw-r--r--vendor/github.com/yuin/goldmark/parser/fcode_block.go121
-rw-r--r--vendor/github.com/yuin/goldmark/parser/html_block.go228
-rw-r--r--vendor/github.com/yuin/goldmark/parser/link.go409
-rw-r--r--vendor/github.com/yuin/goldmark/parser/link_ref.go152
-rw-r--r--vendor/github.com/yuin/goldmark/parser/list.go287
-rw-r--r--vendor/github.com/yuin/goldmark/parser/list_item.go90
-rw-r--r--vendor/github.com/yuin/goldmark/parser/paragraph.go72
-rw-r--r--vendor/github.com/yuin/goldmark/parser/parser.go1253
-rw-r--r--vendor/github.com/yuin/goldmark/parser/raw_html.go163
-rw-r--r--vendor/github.com/yuin/goldmark/parser/setext_headings.go126
-rw-r--r--vendor/github.com/yuin/goldmark/parser/thematic_break.go75
19 files changed, 4133 insertions, 0 deletions
diff --git a/vendor/github.com/yuin/goldmark/parser/attribute.go b/vendor/github.com/yuin/goldmark/parser/attribute.go
new file mode 100644
index 000000000..f86c83610
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/attribute.go
@@ -0,0 +1,328 @@
+package parser
+
+import (
+ "bytes"
+ "io"
+ "strconv"
+
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+var attrNameID = []byte("id")
+var attrNameClass = []byte("class")
+
+// An Attribute is an attribute of the markdown elements
+type Attribute struct {
+ Name []byte
+ Value interface{}
+}
+
+// An Attributes is a collection of attributes.
+type Attributes []Attribute
+
+// Find returns a (value, true) if an attribute correspond with given name is found, otherwise (nil, false).
+func (as Attributes) Find(name []byte) (interface{}, bool) {
+ for _, a := range as {
+ if bytes.Equal(a.Name, name) {
+ return a.Value, true
+ }
+ }
+ return nil, false
+}
+
+func (as Attributes) findUpdate(name []byte, cb func(v interface{}) interface{}) bool {
+ for i, a := range as {
+ if bytes.Equal(a.Name, name) {
+ as[i].Value = cb(a.Value)
+ return true
+ }
+ }
+ return false
+}
+
+// ParseAttributes parses attributes into a map.
+// ParseAttributes returns a parsed attributes and true if could parse
+// attributes, otherwise nil and false.
+func ParseAttributes(reader text.Reader) (Attributes, bool) {
+ savedLine, savedPosition := reader.Position()
+ reader.SkipSpaces()
+ if reader.Peek() != '{' {
+ reader.SetPosition(savedLine, savedPosition)
+ return nil, false
+ }
+ reader.Advance(1)
+ attrs := Attributes{}
+ for {
+ if reader.Peek() == '}' {
+ reader.Advance(1)
+ return attrs, true
+ }
+ attr, ok := parseAttribute(reader)
+ if !ok {
+ reader.SetPosition(savedLine, savedPosition)
+ return nil, false
+ }
+ if bytes.Equal(attr.Name, attrNameClass) {
+ if !attrs.findUpdate(attrNameClass, func(v interface{}) interface{} {
+ ret := make([]byte, 0, len(v.([]byte))+1+len(attr.Value.([]byte)))
+ ret = append(ret, v.([]byte)...)
+ return append(append(ret, ' '), attr.Value.([]byte)...)
+ }) {
+ attrs = append(attrs, attr)
+ }
+ } else {
+ attrs = append(attrs, attr)
+ }
+ reader.SkipSpaces()
+ if reader.Peek() == ',' {
+ reader.Advance(1)
+ reader.SkipSpaces()
+ }
+ }
+}
+
+func parseAttribute(reader text.Reader) (Attribute, bool) {
+ reader.SkipSpaces()
+ c := reader.Peek()
+ if c == '#' || c == '.' {
+ reader.Advance(1)
+ line, _ := reader.PeekLine()
+ i := 0
+ // HTML5 allows any kind of characters as id, but XHTML restricts characters for id.
+ // CommonMark is basically defined for XHTML(even though it is legacy).
+ // So we restrict id characters.
+ for ; i < len(line) && !util.IsSpace(line[i]) &&
+ (!util.IsPunct(line[i]) || line[i] == '_' || line[i] == '-' || line[i] == ':' || line[i] == '.'); i++ {
+ }
+ name := attrNameClass
+ if c == '#' {
+ name = attrNameID
+ }
+ reader.Advance(i)
+ return Attribute{Name: name, Value: line[0:i]}, true
+ }
+ line, _ := reader.PeekLine()
+ if len(line) == 0 {
+ return Attribute{}, false
+ }
+ c = line[0]
+ if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
+ c == '_' || c == ':') {
+ return Attribute{}, false
+ }
+ i := 0
+ for ; i < len(line); i++ {
+ c = line[i]
+ if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
+ (c >= '0' && c <= '9') ||
+ c == '_' || c == ':' || c == '.' || c == '-') {
+ break
+ }
+ }
+ name := line[:i]
+ reader.Advance(i)
+ reader.SkipSpaces()
+ c = reader.Peek()
+ if c != '=' {
+ return Attribute{}, false
+ }
+ reader.Advance(1)
+ reader.SkipSpaces()
+ value, ok := parseAttributeValue(reader)
+ if !ok {
+ return Attribute{}, false
+ }
+ if bytes.Equal(name, attrNameClass) {
+ if _, ok = value.([]byte); !ok {
+ return Attribute{}, false
+ }
+ }
+ return Attribute{Name: name, Value: value}, true
+}
+
+func parseAttributeValue(reader text.Reader) (interface{}, bool) {
+ reader.SkipSpaces()
+ c := reader.Peek()
+ var value interface{}
+ ok := false
+ switch c {
+ case text.EOF:
+ return Attribute{}, false
+ case '{':
+ value, ok = ParseAttributes(reader)
+ case '[':
+ value, ok = parseAttributeArray(reader)
+ case '"':
+ value, ok = parseAttributeString(reader)
+ default:
+ if c == '-' || c == '+' || util.IsNumeric(c) {
+ value, ok = parseAttributeNumber(reader)
+ } else {
+ value, ok = parseAttributeOthers(reader)
+ }
+ }
+ if !ok {
+ return nil, false
+ }
+ return value, true
+}
+
+func parseAttributeArray(reader text.Reader) ([]interface{}, bool) {
+ reader.Advance(1) // skip [
+ ret := []interface{}{}
+ for i := 0; ; i++ {
+ c := reader.Peek()
+ comma := false
+ if i != 0 && c == ',' {
+ reader.Advance(1)
+ comma = true
+ }
+ if c == ']' {
+ if !comma {
+ reader.Advance(1)
+ return ret, true
+ }
+ return nil, false
+ }
+ reader.SkipSpaces()
+ value, ok := parseAttributeValue(reader)
+ if !ok {
+ return nil, false
+ }
+ ret = append(ret, value)
+ reader.SkipSpaces()
+ }
+}
+
+func parseAttributeString(reader text.Reader) ([]byte, bool) {
+ reader.Advance(1) // skip "
+ line, _ := reader.PeekLine()
+ i := 0
+ l := len(line)
+ var buf bytes.Buffer
+ for i < l {
+ c := line[i]
+ if c == '\\' && i != l-1 {
+ n := line[i+1]
+ switch n {
+ case '"', '/', '\\':
+ buf.WriteByte(n)
+ i += 2
+ case 'b':
+ buf.WriteString("\b")
+ i += 2
+ case 'f':
+ buf.WriteString("\f")
+ i += 2
+ case 'n':
+ buf.WriteString("\n")
+ i += 2
+ case 'r':
+ buf.WriteString("\r")
+ i += 2
+ case 't':
+ buf.WriteString("\t")
+ i += 2
+ default:
+ buf.WriteByte('\\')
+ i++
+ }
+ continue
+ }
+ if c == '"' {
+ reader.Advance(i + 1)
+ return buf.Bytes(), true
+ }
+ buf.WriteByte(c)
+ i++
+ }
+ return nil, false
+}
+
+func scanAttributeDecimal(reader text.Reader, w io.ByteWriter) {
+ for {
+ c := reader.Peek()
+ if util.IsNumeric(c) {
+ w.WriteByte(c)
+ } else {
+ return
+ }
+ reader.Advance(1)
+ }
+}
+
+func parseAttributeNumber(reader text.Reader) (float64, bool) {
+ sign := 1
+ c := reader.Peek()
+ if c == '-' {
+ sign = -1
+ reader.Advance(1)
+ } else if c == '+' {
+ reader.Advance(1)
+ }
+ var buf bytes.Buffer
+ if !util.IsNumeric(reader.Peek()) {
+ return 0, false
+ }
+ scanAttributeDecimal(reader, &buf)
+ if buf.Len() == 0 {
+ return 0, false
+ }
+ c = reader.Peek()
+ if c == '.' {
+ buf.WriteByte(c)
+ reader.Advance(1)
+ scanAttributeDecimal(reader, &buf)
+ }
+ c = reader.Peek()
+ if c == 'e' || c == 'E' {
+ buf.WriteByte(c)
+ reader.Advance(1)
+ c = reader.Peek()
+ if c == '-' || c == '+' {
+ buf.WriteByte(c)
+ reader.Advance(1)
+ }
+ scanAttributeDecimal(reader, &buf)
+ }
+ f, err := strconv.ParseFloat(buf.String(), 10)
+ if err != nil {
+ return 0, false
+ }
+ return float64(sign) * f, true
+}
+
+var bytesTrue = []byte("true")
+var bytesFalse = []byte("false")
+var bytesNull = []byte("null")
+
+func parseAttributeOthers(reader text.Reader) (interface{}, bool) {
+ line, _ := reader.PeekLine()
+ c := line[0]
+ if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
+ c == '_' || c == ':') {
+ return nil, false
+ }
+ i := 0
+ for ; i < len(line); i++ {
+ c := line[i]
+ if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
+ (c >= '0' && c <= '9') ||
+ c == '_' || c == ':' || c == '.' || c == '-') {
+ break
+ }
+ }
+ value := line[:i]
+ reader.Advance(i)
+ if bytes.Equal(value, bytesTrue) {
+ return true, true
+ }
+ if bytes.Equal(value, bytesFalse) {
+ return false, true
+ }
+ if bytes.Equal(value, bytesNull) {
+ return nil, true
+ }
+ return value, true
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/atx_heading.go b/vendor/github.com/yuin/goldmark/parser/atx_heading.go
new file mode 100644
index 000000000..13a198b52
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/atx_heading.go
@@ -0,0 +1,246 @@
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+// A HeadingConfig struct is a data structure that holds configuration of the renderers related to headings.
+type HeadingConfig struct {
+ AutoHeadingID bool
+ Attribute bool
+}
+
+// SetOption implements SetOptioner.
+func (b *HeadingConfig) SetOption(name OptionName, value interface{}) {
+ switch name {
+ case optAutoHeadingID:
+ b.AutoHeadingID = true
+ case optAttribute:
+ b.Attribute = true
+ }
+}
+
+// A HeadingOption interface sets options for heading parsers.
+type HeadingOption interface {
+ Option
+ SetHeadingOption(*HeadingConfig)
+}
+
+// AutoHeadingID is an option name that enables auto IDs for headings.
+const optAutoHeadingID OptionName = "AutoHeadingID"
+
+type withAutoHeadingID struct {
+}
+
+func (o *withAutoHeadingID) SetParserOption(c *Config) {
+ c.Options[optAutoHeadingID] = true
+}
+
+func (o *withAutoHeadingID) SetHeadingOption(p *HeadingConfig) {
+ p.AutoHeadingID = true
+}
+
+// WithAutoHeadingID is a functional option that enables custom heading ids and
+// auto generated heading ids.
+func WithAutoHeadingID() HeadingOption {
+ return &withAutoHeadingID{}
+}
+
+type withHeadingAttribute struct {
+ Option
+}
+
+func (o *withHeadingAttribute) SetHeadingOption(p *HeadingConfig) {
+ p.Attribute = true
+}
+
+// WithHeadingAttribute is a functional option that enables custom heading attributes.
+func WithHeadingAttribute() HeadingOption {
+ return &withHeadingAttribute{WithAttribute()}
+}
+
+type atxHeadingParser struct {
+ HeadingConfig
+}
+
+// NewATXHeadingParser return a new BlockParser that can parse ATX headings.
+func NewATXHeadingParser(opts ...HeadingOption) BlockParser {
+ p := &atxHeadingParser{}
+ for _, o := range opts {
+ o.SetHeadingOption(&p.HeadingConfig)
+ }
+ return p
+}
+
+func (b *atxHeadingParser) Trigger() []byte {
+ return []byte{'#'}
+}
+
+func (b *atxHeadingParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ line, segment := reader.PeekLine()
+ pos := pc.BlockOffset()
+ if pos < 0 {
+ return nil, NoChildren
+ }
+ i := pos
+ for ; i < len(line) && line[i] == '#'; i++ {
+ }
+ level := i - pos
+ if i == pos || level > 6 {
+ return nil, NoChildren
+ }
+ if i == len(line) { // alone '#' (without a new line character)
+ return ast.NewHeading(level), NoChildren
+ }
+ l := util.TrimLeftSpaceLength(line[i:])
+ if l == 0 {
+ return nil, NoChildren
+ }
+ start := i + l
+ if start >= len(line) {
+ start = len(line) - 1
+ }
+ origstart := start
+ stop := len(line) - util.TrimRightSpaceLength(line)
+
+ node := ast.NewHeading(level)
+ parsed := false
+ if b.Attribute { // handles special case like ### heading ### {#id}
+ start--
+ closureClose := -1
+ closureOpen := -1
+ for j := start; j < stop; {
+ c := line[j]
+ if util.IsEscapedPunctuation(line, j) {
+ j += 2
+ } else if util.IsSpace(c) && j < stop-1 && line[j+1] == '#' {
+ closureOpen = j + 1
+ k := j + 1
+ for ; k < stop && line[k] == '#'; k++ {
+ }
+ closureClose = k
+ break
+ } else {
+ j++
+ }
+ }
+ if closureClose > 0 {
+ reader.Advance(closureClose)
+ attrs, ok := ParseAttributes(reader)
+ rest, _ := reader.PeekLine()
+ parsed = ok && util.IsBlank(rest)
+ if parsed {
+ for _, attr := range attrs {
+ node.SetAttribute(attr.Name, attr.Value)
+ }
+ node.Lines().Append(text.NewSegment(segment.Start+start+1-segment.Padding, segment.Start+closureOpen-segment.Padding))
+ }
+ }
+ }
+ if !parsed {
+ start = origstart
+ stop := len(line) - util.TrimRightSpaceLength(line)
+ if stop <= start { // empty headings like '##[space]'
+ stop = start
+ } else {
+ i = stop - 1
+ for ; line[i] == '#' && i >= start; i-- {
+ }
+ if i != stop-1 && !util.IsSpace(line[i]) {
+ i = stop - 1
+ }
+ i++
+ stop = i
+ }
+
+ if len(util.TrimRight(line[start:stop], []byte{'#'})) != 0 { // empty heading like '### ###'
+ node.Lines().Append(text.NewSegment(segment.Start+start-segment.Padding, segment.Start+stop-segment.Padding))
+ }
+ }
+ return node, NoChildren
+}
+
+func (b *atxHeadingParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ return Close
+}
+
+func (b *atxHeadingParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ if b.Attribute {
+ _, ok := node.AttributeString("id")
+ if !ok {
+ parseLastLineAttributes(node, reader, pc)
+ }
+ }
+
+ if b.AutoHeadingID {
+ id, ok := node.AttributeString("id")
+ if !ok {
+ generateAutoHeadingID(node.(*ast.Heading), reader, pc)
+ } else {
+ pc.IDs().Put(id.([]byte))
+ }
+ }
+}
+
+func (b *atxHeadingParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *atxHeadingParser) CanAcceptIndentedLine() bool {
+ return false
+}
+
+func generateAutoHeadingID(node *ast.Heading, reader text.Reader, pc Context) {
+ var line []byte
+ lastIndex := node.Lines().Len() - 1
+ if lastIndex > -1 {
+ lastLine := node.Lines().At(lastIndex)
+ line = lastLine.Value(reader.Source())
+ }
+ headingID := pc.IDs().Generate(line, ast.KindHeading)
+ node.SetAttribute(attrNameID, headingID)
+}
+
+func parseLastLineAttributes(node ast.Node, reader text.Reader, pc Context) {
+ lastIndex := node.Lines().Len() - 1
+ if lastIndex < 0 { // empty headings
+ return
+ }
+ lastLine := node.Lines().At(lastIndex)
+ line := lastLine.Value(reader.Source())
+ lr := text.NewReader(line)
+ var attrs Attributes
+ var ok bool
+ var start text.Segment
+ var sl int
+ var end text.Segment
+ for {
+ c := lr.Peek()
+ if c == text.EOF {
+ break
+ }
+ if c == '\\' {
+ lr.Advance(1)
+ if lr.Peek() == '{' {
+ lr.Advance(1)
+ }
+ continue
+ }
+ if c == '{' {
+ sl, start = lr.Position()
+ attrs, ok = ParseAttributes(lr)
+ _, end = lr.Position()
+ lr.SetPosition(sl, start)
+ }
+ lr.Advance(1)
+ }
+ if ok && util.IsBlank(line[end.Start:]) {
+ for _, attr := range attrs {
+ node.SetAttribute(attr.Name, attr.Value)
+ }
+ lastLine.Stop = lastLine.Start + start.Start
+ node.Lines().Set(lastIndex, lastLine)
+ }
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/auto_link.go b/vendor/github.com/yuin/goldmark/parser/auto_link.go
new file mode 100644
index 000000000..726a50571
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/auto_link.go
@@ -0,0 +1,42 @@
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type autoLinkParser struct {
+}
+
+var defaultAutoLinkParser = &autoLinkParser{}
+
+// NewAutoLinkParser returns a new InlineParser that parses autolinks
+// surrounded by '<' and '>' .
+func NewAutoLinkParser() InlineParser {
+ return defaultAutoLinkParser
+}
+
+func (s *autoLinkParser) Trigger() []byte {
+ return []byte{'<'}
+}
+
+func (s *autoLinkParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
+ line, segment := block.PeekLine()
+ stop := util.FindEmailIndex(line[1:])
+ typ := ast.AutoLinkType(ast.AutoLinkEmail)
+ if stop < 0 {
+ stop = util.FindURLIndex(line[1:])
+ typ = ast.AutoLinkURL
+ }
+ if stop < 0 {
+ return nil
+ }
+ stop++
+ if stop >= len(line) || line[stop] != '>' {
+ return nil
+ }
+ value := ast.NewTextSegment(text.NewSegment(segment.Start+1, segment.Start+stop))
+ block.Advance(stop + 1)
+ return ast.NewAutoLink(typ, value)
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/blockquote.go b/vendor/github.com/yuin/goldmark/parser/blockquote.go
new file mode 100644
index 000000000..e7778dca7
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/blockquote.go
@@ -0,0 +1,69 @@
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type blockquoteParser struct {
+}
+
+var defaultBlockquoteParser = &blockquoteParser{}
+
+// NewBlockquoteParser returns a new BlockParser that
+// parses blockquotes.
+func NewBlockquoteParser() BlockParser {
+ return defaultBlockquoteParser
+}
+
+func (b *blockquoteParser) process(reader text.Reader) bool {
+ line, _ := reader.PeekLine()
+ w, pos := util.IndentWidth(line, reader.LineOffset())
+ if w > 3 || pos >= len(line) || line[pos] != '>' {
+ return false
+ }
+ pos++
+ if pos >= len(line) || line[pos] == '\n' {
+ reader.Advance(pos)
+ return true
+ }
+ if line[pos] == ' ' || line[pos] == '\t' {
+ pos++
+ }
+ reader.Advance(pos)
+ if line[pos-1] == '\t' {
+ reader.SetPadding(2)
+ }
+ return true
+}
+
+func (b *blockquoteParser) Trigger() []byte {
+ return []byte{'>'}
+}
+
+func (b *blockquoteParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ if b.process(reader) {
+ return ast.NewBlockquote(), HasChildren
+ }
+ return nil, NoChildren
+}
+
+func (b *blockquoteParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ if b.process(reader) {
+ return Continue | HasChildren
+ }
+ return Close
+}
+
+func (b *blockquoteParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ // nothing to do
+}
+
+func (b *blockquoteParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *blockquoteParser) CanAcceptIndentedLine() bool {
+ return false
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/code_block.go b/vendor/github.com/yuin/goldmark/parser/code_block.go
new file mode 100644
index 000000000..732f18c65
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/code_block.go
@@ -0,0 +1,100 @@
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type codeBlockParser struct {
+}
+
+// CodeBlockParser is a BlockParser implementation that parses indented code blocks.
+var defaultCodeBlockParser = &codeBlockParser{}
+
+// NewCodeBlockParser returns a new BlockParser that
+// parses code blocks.
+func NewCodeBlockParser() BlockParser {
+ return defaultCodeBlockParser
+}
+
+func (b *codeBlockParser) Trigger() []byte {
+ return nil
+}
+
+func (b *codeBlockParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ line, segment := reader.PeekLine()
+ pos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
+ if pos < 0 || util.IsBlank(line) {
+ return nil, NoChildren
+ }
+ node := ast.NewCodeBlock()
+ reader.AdvanceAndSetPadding(pos, padding)
+ _, segment = reader.PeekLine()
+ // if code block line starts with a tab, keep a tab as it is.
+ if segment.Padding != 0 {
+ preserveLeadingTabInCodeBlock(&segment, reader, 0)
+ }
+ node.Lines().Append(segment)
+ reader.Advance(segment.Len() - 1)
+ return node, NoChildren
+
+}
+
+func (b *codeBlockParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ line, segment := reader.PeekLine()
+ if util.IsBlank(line) {
+ node.Lines().Append(segment.TrimLeftSpaceWidth(4, reader.Source()))
+ return Continue | NoChildren
+ }
+ pos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
+ if pos < 0 {
+ return Close
+ }
+ reader.AdvanceAndSetPadding(pos, padding)
+ _, segment = reader.PeekLine()
+
+ // if code block line starts with a tab, keep a tab as it is.
+ if segment.Padding != 0 {
+ preserveLeadingTabInCodeBlock(&segment, reader, 0)
+ }
+
+ node.Lines().Append(segment)
+ reader.Advance(segment.Len() - 1)
+ return Continue | NoChildren
+}
+
+func (b *codeBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ // trim trailing blank lines
+ lines := node.Lines()
+ length := lines.Len() - 1
+ source := reader.Source()
+ for length >= 0 {
+ line := lines.At(length)
+ if util.IsBlank(line.Value(source)) {
+ length--
+ } else {
+ break
+ }
+ }
+ lines.SetSliced(0, length+1)
+}
+
+func (b *codeBlockParser) CanInterruptParagraph() bool {
+ return false
+}
+
+func (b *codeBlockParser) CanAcceptIndentedLine() bool {
+ return true
+}
+
+func preserveLeadingTabInCodeBlock(segment *text.Segment, reader text.Reader, indent int) {
+ offsetWithPadding := reader.LineOffset() + indent
+ sl, ss := reader.Position()
+ reader.SetPosition(sl, text.NewSegment(ss.Start-1, ss.Stop))
+ if offsetWithPadding == reader.LineOffset() {
+ segment.Padding = 0
+ segment.Start--
+ }
+ reader.SetPosition(sl, ss)
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/code_span.go b/vendor/github.com/yuin/goldmark/parser/code_span.go
new file mode 100644
index 000000000..a74b09bc4
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/code_span.go
@@ -0,0 +1,84 @@
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+)
+
+type codeSpanParser struct {
+}
+
+var defaultCodeSpanParser = &codeSpanParser{}
+
+// NewCodeSpanParser return a new InlineParser that parses inline codes
+// surrounded by '`' .
+func NewCodeSpanParser() InlineParser {
+ return defaultCodeSpanParser
+}
+
+func (s *codeSpanParser) Trigger() []byte {
+ return []byte{'`'}
+}
+
+func (s *codeSpanParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
+ line, startSegment := block.PeekLine()
+ opener := 0
+ for ; opener < len(line) && line[opener] == '`'; opener++ {
+ }
+ block.Advance(opener)
+ l, pos := block.Position()
+ node := ast.NewCodeSpan()
+ for {
+ line, segment := block.PeekLine()
+ if line == nil {
+ block.SetPosition(l, pos)
+ return ast.NewTextSegment(startSegment.WithStop(startSegment.Start + opener))
+ }
+ for i := 0; i < len(line); i++ {
+ c := line[i]
+ if c == '`' {
+ oldi := i
+ for ; i < len(line) && line[i] == '`'; i++ {
+ }
+ closure := i - oldi
+ if closure == opener && (i >= len(line) || line[i] != '`') {
+ segment = segment.WithStop(segment.Start + i - closure)
+ if !segment.IsEmpty() {
+ node.AppendChild(node, ast.NewRawTextSegment(segment))
+ }
+ block.Advance(i)
+ goto end
+ }
+ }
+ }
+ node.AppendChild(node, ast.NewRawTextSegment(segment))
+ block.AdvanceLine()
+ }
+end:
+ if !node.IsBlank(block.Source()) {
+ // trim first halfspace and last halfspace
+ segment := node.FirstChild().(*ast.Text).Segment
+ shouldTrimmed := true
+ if !(!segment.IsEmpty() && isSpaceOrNewline(block.Source()[segment.Start])) {
+ shouldTrimmed = false
+ }
+ segment = node.LastChild().(*ast.Text).Segment
+ if !(!segment.IsEmpty() && isSpaceOrNewline(block.Source()[segment.Stop-1])) {
+ shouldTrimmed = false
+ }
+ if shouldTrimmed {
+ t := node.FirstChild().(*ast.Text)
+ segment := t.Segment
+ t.Segment = segment.WithStart(segment.Start + 1)
+ t = node.LastChild().(*ast.Text)
+ segment = node.LastChild().(*ast.Text).Segment
+ t.Segment = segment.WithStop(segment.Stop - 1)
+ }
+
+ }
+ return node
+}
+
+func isSpaceOrNewline(c byte) bool {
+ return c == ' ' || c == '\n'
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/delimiter.go b/vendor/github.com/yuin/goldmark/parser/delimiter.go
new file mode 100644
index 000000000..eb843af44
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/delimiter.go
@@ -0,0 +1,238 @@
+package parser
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+// A DelimiterProcessor interface provides a set of functions about
+// Delimiter nodes.
+type DelimiterProcessor interface {
+ // IsDelimiter returns true if given character is a delimiter, otherwise false.
+ IsDelimiter(byte) bool
+
+ // CanOpenCloser returns true if given opener can close given closer, otherwise false.
+ CanOpenCloser(opener, closer *Delimiter) bool
+
+ // OnMatch will be called when new matched delimiter found.
+ // OnMatch should return a new Node correspond to the matched delimiter.
+ OnMatch(consumes int) ast.Node
+}
+
+// A Delimiter struct represents a delimiter like '*' of the Markdown text.
+type Delimiter struct {
+ ast.BaseInline
+
+ Segment text.Segment
+
+ // CanOpen is set true if this delimiter can open a span for a new node.
+ // See https://spec.commonmark.org/0.30/#can-open-emphasis for details.
+ CanOpen bool
+
+ // CanClose is set true if this delimiter can close a span for a new node.
+ // See https://spec.commonmark.org/0.30/#can-open-emphasis for details.
+ CanClose bool
+
+ // Length is a remaining length of this delimiter.
+ Length int
+
+ // OriginalLength is a original length of this delimiter.
+ OriginalLength int
+
+ // Char is a character of this delimiter.
+ Char byte
+
+ // PreviousDelimiter is a previous sibling delimiter node of this delimiter.
+ PreviousDelimiter *Delimiter
+
+ // NextDelimiter is a next sibling delimiter node of this delimiter.
+ NextDelimiter *Delimiter
+
+ // Processor is a DelimiterProcessor associated with this delimiter.
+ Processor DelimiterProcessor
+}
+
+// Inline implements Inline.Inline.
+func (d *Delimiter) Inline() {}
+
+// Dump implements Node.Dump.
+func (d *Delimiter) Dump(source []byte, level int) {
+ fmt.Printf("%sDelimiter: \"%s\"\n", strings.Repeat(" ", level), string(d.Text(source)))
+}
+
+var kindDelimiter = ast.NewNodeKind("Delimiter")
+
+// Kind implements Node.Kind
+func (d *Delimiter) Kind() ast.NodeKind {
+ return kindDelimiter
+}
+
+// Text implements Node.Text
+func (d *Delimiter) Text(source []byte) []byte {
+ return d.Segment.Value(source)
+}
+
+// ConsumeCharacters consumes delimiters.
+func (d *Delimiter) ConsumeCharacters(n int) {
+ d.Length -= n
+ d.Segment = d.Segment.WithStop(d.Segment.Start + d.Length)
+}
+
+// CalcComsumption calculates how many characters should be used for opening
+// a new span correspond to given closer.
+func (d *Delimiter) CalcComsumption(closer *Delimiter) int {
+ if (d.CanClose || closer.CanOpen) && (d.OriginalLength+closer.OriginalLength)%3 == 0 && closer.OriginalLength%3 != 0 {
+ return 0
+ }
+ if d.Length >= 2 && closer.Length >= 2 {
+ return 2
+ }
+ return 1
+}
+
+// NewDelimiter returns a new Delimiter node.
+func NewDelimiter(canOpen, canClose bool, length int, char byte, processor DelimiterProcessor) *Delimiter {
+ c := &Delimiter{
+ BaseInline: ast.BaseInline{},
+ CanOpen: canOpen,
+ CanClose: canClose,
+ Length: length,
+ OriginalLength: length,
+ Char: char,
+ PreviousDelimiter: nil,
+ NextDelimiter: nil,
+ Processor: processor,
+ }
+ return c
+}
+
+// ScanDelimiter scans a delimiter by given DelimiterProcessor.
+func ScanDelimiter(line []byte, before rune, min int, processor DelimiterProcessor) *Delimiter {
+ i := 0
+ c := line[i]
+ j := i
+ if !processor.IsDelimiter(c) {
+ return nil
+ }
+ for ; j < len(line) && c == line[j]; j++ {
+ }
+ if (j - i) >= min {
+ after := rune(' ')
+ if j != len(line) {
+ after = util.ToRune(line, j)
+ }
+
+ canOpen, canClose := false, false
+ beforeIsPunctuation := util.IsPunctRune(before)
+ beforeIsWhitespace := util.IsSpaceRune(before)
+ afterIsPunctuation := util.IsPunctRune(after)
+ afterIsWhitespace := util.IsSpaceRune(after)
+
+ isLeft := !afterIsWhitespace &&
+ (!afterIsPunctuation || beforeIsWhitespace || beforeIsPunctuation)
+ isRight := !beforeIsWhitespace &&
+ (!beforeIsPunctuation || afterIsWhitespace || afterIsPunctuation)
+
+ if line[i] == '_' {
+ canOpen = isLeft && (!isRight || beforeIsPunctuation)
+ canClose = isRight && (!isLeft || afterIsPunctuation)
+ } else {
+ canOpen = isLeft
+ canClose = isRight
+ }
+ return NewDelimiter(canOpen, canClose, j-i, c, processor)
+ }
+ return nil
+}
+
+// ProcessDelimiters processes the delimiter list in the context.
+// Processing will be stop when reaching the bottom.
+//
+// If you implement an inline parser that can have other inline nodes as
+// children, you should call this function when nesting span has closed.
+func ProcessDelimiters(bottom ast.Node, pc Context) {
+ lastDelimiter := pc.LastDelimiter()
+ if lastDelimiter == nil {
+ return
+ }
+ var closer *Delimiter
+ if bottom != nil {
+ if bottom != lastDelimiter {
+ for c := lastDelimiter.PreviousSibling(); c != nil && c != bottom; {
+ if d, ok := c.(*Delimiter); ok {
+ closer = d
+ }
+ c = c.PreviousSibling()
+ }
+ }
+ } else {
+ closer = pc.FirstDelimiter()
+ }
+ if closer == nil {
+ pc.ClearDelimiters(bottom)
+ return
+ }
+ for closer != nil {
+ if !closer.CanClose {
+ closer = closer.NextDelimiter
+ continue
+ }
+ consume := 0
+ found := false
+ maybeOpener := false
+ var opener *Delimiter
+ for opener = closer.PreviousDelimiter; opener != nil && opener != bottom; opener = opener.PreviousDelimiter {
+ if opener.CanOpen && opener.Processor.CanOpenCloser(opener, closer) {
+ maybeOpener = true
+ consume = opener.CalcComsumption(closer)
+ if consume > 0 {
+ found = true
+ break
+ }
+ }
+ }
+ if !found {
+ next := closer.NextDelimiter
+ if !maybeOpener && !closer.CanOpen {
+ pc.RemoveDelimiter(closer)
+ }
+ closer = next
+ continue
+ }
+ opener.ConsumeCharacters(consume)
+ closer.ConsumeCharacters(consume)
+
+ node := opener.Processor.OnMatch(consume)
+
+ parent := opener.Parent()
+ child := opener.NextSibling()
+
+ for child != nil && child != closer {
+ next := child.NextSibling()
+ node.AppendChild(node, child)
+ child = next
+ }
+ parent.InsertAfter(parent, opener, node)
+
+ for c := opener.NextDelimiter; c != nil && c != closer; {
+ next := c.NextDelimiter
+ pc.RemoveDelimiter(c)
+ c = next
+ }
+
+ if opener.Length == 0 {
+ pc.RemoveDelimiter(opener)
+ }
+
+ if closer.Length == 0 {
+ next := closer.NextDelimiter
+ pc.RemoveDelimiter(closer)
+ closer = next
+ }
+ }
+ pc.ClearDelimiters(bottom)
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/emphasis.go b/vendor/github.com/yuin/goldmark/parser/emphasis.go
new file mode 100644
index 000000000..488647117
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/emphasis.go
@@ -0,0 +1,50 @@
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+)
+
+type emphasisDelimiterProcessor struct {
+}
+
+func (p *emphasisDelimiterProcessor) IsDelimiter(b byte) bool {
+ return b == '*' || b == '_'
+}
+
+func (p *emphasisDelimiterProcessor) CanOpenCloser(opener, closer *Delimiter) bool {
+ return opener.Char == closer.Char
+}
+
+func (p *emphasisDelimiterProcessor) OnMatch(consumes int) ast.Node {
+ return ast.NewEmphasis(consumes)
+}
+
+var defaultEmphasisDelimiterProcessor = &emphasisDelimiterProcessor{}
+
+type emphasisParser struct {
+}
+
+var defaultEmphasisParser = &emphasisParser{}
+
+// NewEmphasisParser return a new InlineParser that parses emphasises.
+func NewEmphasisParser() InlineParser {
+ return defaultEmphasisParser
+}
+
+func (s *emphasisParser) Trigger() []byte {
+ return []byte{'*', '_'}
+}
+
+func (s *emphasisParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
+ before := block.PrecendingCharacter()
+ line, segment := block.PeekLine()
+ node := ScanDelimiter(line, before, 1, defaultEmphasisDelimiterProcessor)
+ if node == nil {
+ return nil
+ }
+ node.Segment = segment.WithStop(segment.Start + node.OriginalLength)
+ block.Advance(node.OriginalLength)
+ pc.PushDelimiter(node)
+ return node
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/fcode_block.go b/vendor/github.com/yuin/goldmark/parser/fcode_block.go
new file mode 100644
index 000000000..e51a35ace
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/fcode_block.go
@@ -0,0 +1,121 @@
+package parser
+
+import (
+ "bytes"
+
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type fencedCodeBlockParser struct {
+}
+
+var defaultFencedCodeBlockParser = &fencedCodeBlockParser{}
+
+// NewFencedCodeBlockParser returns a new BlockParser that
+// parses fenced code blocks.
+func NewFencedCodeBlockParser() BlockParser {
+ return defaultFencedCodeBlockParser
+}
+
+type fenceData struct {
+ char byte
+ indent int
+ length int
+ node ast.Node
+}
+
+var fencedCodeBlockInfoKey = NewContextKey()
+
+func (b *fencedCodeBlockParser) Trigger() []byte {
+ return []byte{'~', '`'}
+}
+
+func (b *fencedCodeBlockParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ line, segment := reader.PeekLine()
+ pos := pc.BlockOffset()
+ if pos < 0 || (line[pos] != '`' && line[pos] != '~') {
+ return nil, NoChildren
+ }
+ findent := pos
+ fenceChar := line[pos]
+ i := pos
+ for ; i < len(line) && line[i] == fenceChar; i++ {
+ }
+ oFenceLength := i - pos
+ if oFenceLength < 3 {
+ return nil, NoChildren
+ }
+ var info *ast.Text
+ if i < len(line)-1 {
+ rest := line[i:]
+ left := util.TrimLeftSpaceLength(rest)
+ right := util.TrimRightSpaceLength(rest)
+ if left < len(rest)-right {
+ infoStart, infoStop := segment.Start-segment.Padding+i+left, segment.Stop-right
+ value := rest[left : len(rest)-right]
+ if fenceChar == '`' && bytes.IndexByte(value, '`') > -1 {
+ return nil, NoChildren
+ } else if infoStart != infoStop {
+ info = ast.NewTextSegment(text.NewSegment(infoStart, infoStop))
+ }
+ }
+ }
+ node := ast.NewFencedCodeBlock(info)
+ pc.Set(fencedCodeBlockInfoKey, &fenceData{fenceChar, findent, oFenceLength, node})
+ return node, NoChildren
+
+}
+
+func (b *fencedCodeBlockParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ line, segment := reader.PeekLine()
+ fdata := pc.Get(fencedCodeBlockInfoKey).(*fenceData)
+
+ w, pos := util.IndentWidth(line, reader.LineOffset())
+ if w < 4 {
+ i := pos
+ for ; i < len(line) && line[i] == fdata.char; i++ {
+ }
+ length := i - pos
+ if length >= fdata.length && util.IsBlank(line[i:]) {
+ newline := 1
+ if line[len(line)-1] != '\n' {
+ newline = 0
+ }
+ reader.Advance(segment.Stop - segment.Start - newline + segment.Padding)
+ return Close
+ }
+ }
+ pos, padding := util.IndentPositionPadding(line, reader.LineOffset(), segment.Padding, fdata.indent)
+ if pos < 0 {
+ pos = util.FirstNonSpacePosition(line)
+ if pos < 0 {
+ pos = 0
+ }
+ padding = 0
+ }
+ seg := text.NewSegmentPadding(segment.Start+pos, segment.Stop, padding)
+ // if code block line starts with a tab, keep a tab as it is.
+ if padding != 0 {
+ preserveLeadingTabInCodeBlock(&seg, reader, fdata.indent)
+ }
+ node.Lines().Append(seg)
+ reader.AdvanceAndSetPadding(segment.Stop-segment.Start-pos-1, padding)
+ return Continue | NoChildren
+}
+
+func (b *fencedCodeBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ fdata := pc.Get(fencedCodeBlockInfoKey).(*fenceData)
+ if fdata.node == node {
+ pc.Set(fencedCodeBlockInfoKey, nil)
+ }
+}
+
+func (b *fencedCodeBlockParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *fencedCodeBlockParser) CanAcceptIndentedLine() bool {
+ return false
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/html_block.go b/vendor/github.com/yuin/goldmark/parser/html_block.go
new file mode 100644
index 000000000..380e723f2
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/html_block.go
@@ -0,0 +1,228 @@
+package parser
+
+import (
+ "bytes"
+ "regexp"
+ "strings"
+
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+var allowedBlockTags = map[string]bool{
+ "address": true,
+ "article": true,
+ "aside": true,
+ "base": true,
+ "basefont": true,
+ "blockquote": true,
+ "body": true,
+ "caption": true,
+ "center": true,
+ "col": true,
+ "colgroup": true,
+ "dd": true,
+ "details": true,
+ "dialog": true,
+ "dir": true,
+ "div": true,
+ "dl": true,
+ "dt": true,
+ "fieldset": true,
+ "figcaption": true,
+ "figure": true,
+ "footer": true,
+ "form": true,
+ "frame": true,
+ "frameset": true,
+ "h1": true,
+ "h2": true,
+ "h3": true,
+ "h4": true,
+ "h5": true,
+ "h6": true,
+ "head": true,
+ "header": true,
+ "hr": true,
+ "html": true,
+ "iframe": true,
+ "legend": true,
+ "li": true,
+ "link": true,
+ "main": true,
+ "menu": true,
+ "menuitem": true,
+ "meta": true,
+ "nav": true,
+ "noframes": true,
+ "ol": true,
+ "optgroup": true,
+ "option": true,
+ "p": true,
+ "param": true,
+ "section": true,
+ "source": true,
+ "summary": true,
+ "table": true,
+ "tbody": true,
+ "td": true,
+ "tfoot": true,
+ "th": true,
+ "thead": true,
+ "title": true,
+ "tr": true,
+ "track": true,
+ "ul": true,
+}
+
+var htmlBlockType1OpenRegexp = regexp.MustCompile(`(?i)^[ ]{0,3}<(script|pre|style|textarea)(?:\s.*|>.*|/>.*|)(?:\r\n|\n)?$`)
+var htmlBlockType1CloseRegexp = regexp.MustCompile(`(?i)^.*</(?:script|pre|style|textarea)>.*`)
+
+var htmlBlockType2OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<!\-\-`)
+var htmlBlockType2Close = []byte{'-', '-', '>'}
+
+var htmlBlockType3OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<\?`)
+var htmlBlockType3Close = []byte{'?', '>'}
+
+var htmlBlockType4OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<![A-Z]+.*(?:\r\n|\n)?$`)
+var htmlBlockType4Close = []byte{'>'}
+
+var htmlBlockType5OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<\!\[CDATA\[`)
+var htmlBlockType5Close = []byte{']', ']', '>'}
+
+var htmlBlockType6Regexp = regexp.MustCompile(`^[ ]{0,3}<(?:/[ ]*)?([a-zA-Z]+[a-zA-Z0-9\-]*)(?:[ ].*|>.*|/>.*|)(?:\r\n|\n)?$`)
+
+var htmlBlockType7Regexp = regexp.MustCompile(`^[ ]{0,3}<(/[ ]*)?([a-zA-Z]+[a-zA-Z0-9\-]*)(` + attributePattern + `*)[ ]*(?:>|/>)[ ]*(?:\r\n|\n)?$`)
+
+type htmlBlockParser struct {
+}
+
+var defaultHTMLBlockParser = &htmlBlockParser{}
+
+// NewHTMLBlockParser return a new BlockParser that can parse html
+// blocks.
+func NewHTMLBlockParser() BlockParser {
+ return defaultHTMLBlockParser
+}
+
+func (b *htmlBlockParser) Trigger() []byte {
+ return []byte{'<'}
+}
+
+func (b *htmlBlockParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ var node *ast.HTMLBlock
+ line, segment := reader.PeekLine()
+ last := pc.LastOpenedBlock().Node
+ if pos := pc.BlockOffset(); pos < 0 || line[pos] != '<' {
+ return nil, NoChildren
+ }
+
+ if m := htmlBlockType1OpenRegexp.FindSubmatchIndex(line); m != nil {
+ node = ast.NewHTMLBlock(ast.HTMLBlockType1)
+ } else if htmlBlockType2OpenRegexp.Match(line) {
+ node = ast.NewHTMLBlock(ast.HTMLBlockType2)
+ } else if htmlBlockType3OpenRegexp.Match(line) {
+ node = ast.NewHTMLBlock(ast.HTMLBlockType3)
+ } else if htmlBlockType4OpenRegexp.Match(line) {
+ node = ast.NewHTMLBlock(ast.HTMLBlockType4)
+ } else if htmlBlockType5OpenRegexp.Match(line) {
+ node = ast.NewHTMLBlock(ast.HTMLBlockType5)
+ } else if match := htmlBlockType7Regexp.FindSubmatchIndex(line); match != nil {
+ isCloseTag := match[2] > -1 && bytes.Equal(line[match[2]:match[3]], []byte("/"))
+ hasAttr := match[6] != match[7]
+ tagName := strings.ToLower(string(line[match[4]:match[5]]))
+ _, ok := allowedBlockTags[tagName]
+ if ok {
+ node = ast.NewHTMLBlock(ast.HTMLBlockType6)
+ } else if tagName != "script" && tagName != "style" && tagName != "pre" && !ast.IsParagraph(last) && !(isCloseTag && hasAttr) { // type 7 can not interrupt paragraph
+ node = ast.NewHTMLBlock(ast.HTMLBlockType7)
+ }
+ }
+ if node == nil {
+ if match := htmlBlockType6Regexp.FindSubmatchIndex(line); match != nil {
+ tagName := string(line[match[2]:match[3]])
+ _, ok := allowedBlockTags[strings.ToLower(tagName)]
+ if ok {
+ node = ast.NewHTMLBlock(ast.HTMLBlockType6)
+ }
+ }
+ }
+ if node != nil {
+ reader.Advance(segment.Len() - 1)
+ node.Lines().Append(segment)
+ return node, NoChildren
+ }
+ return nil, NoChildren
+}
+
+func (b *htmlBlockParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ htmlBlock := node.(*ast.HTMLBlock)
+ lines := htmlBlock.Lines()
+ line, segment := reader.PeekLine()
+ var closurePattern []byte
+
+ switch htmlBlock.HTMLBlockType {
+ case ast.HTMLBlockType1:
+ if lines.Len() == 1 {
+ firstLine := lines.At(0)
+ if htmlBlockType1CloseRegexp.Match(firstLine.Value(reader.Source())) {
+ return Close
+ }
+ }
+ if htmlBlockType1CloseRegexp.Match(line) {
+ htmlBlock.ClosureLine = segment
+ reader.Advance(segment.Len() - 1)
+ return Close
+ }
+ case ast.HTMLBlockType2:
+ closurePattern = htmlBlockType2Close
+ fallthrough
+ case ast.HTMLBlockType3:
+ if closurePattern == nil {
+ closurePattern = htmlBlockType3Close
+ }
+ fallthrough
+ case ast.HTMLBlockType4:
+ if closurePattern == nil {
+ closurePattern = htmlBlockType4Close
+ }
+ fallthrough
+ case ast.HTMLBlockType5:
+ if closurePattern == nil {
+ closurePattern = htmlBlockType5Close
+ }
+
+ if lines.Len() == 1 {
+ firstLine := lines.At(0)
+ if bytes.Contains(firstLine.Value(reader.Source()), closurePattern) {
+ return Close
+ }
+ }
+ if bytes.Contains(line, closurePattern) {
+ htmlBlock.ClosureLine = segment
+ reader.Advance(segment.Len())
+ return Close
+ }
+
+ case ast.HTMLBlockType6, ast.HTMLBlockType7:
+ if util.IsBlank(line) {
+ return Close
+ }
+ }
+ node.Lines().Append(segment)
+ reader.Advance(segment.Len() - 1)
+ return Continue | NoChildren
+}
+
+func (b *htmlBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ // nothing to do
+}
+
+func (b *htmlBlockParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *htmlBlockParser) CanAcceptIndentedLine() bool {
+ return false
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/link.go b/vendor/github.com/yuin/goldmark/parser/link.go
new file mode 100644
index 000000000..99583ac2a
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/link.go
@@ -0,0 +1,409 @@
+package parser
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+var linkLabelStateKey = NewContextKey()
+
+type linkLabelState struct {
+ ast.BaseInline
+
+ Segment text.Segment
+
+ IsImage bool
+
+ Prev *linkLabelState
+
+ Next *linkLabelState
+
+ First *linkLabelState
+
+ Last *linkLabelState
+}
+
+func newLinkLabelState(segment text.Segment, isImage bool) *linkLabelState {
+ return &linkLabelState{
+ Segment: segment,
+ IsImage: isImage,
+ }
+}
+
+func (s *linkLabelState) Text(source []byte) []byte {
+ return s.Segment.Value(source)
+}
+
+func (s *linkLabelState) Dump(source []byte, level int) {
+ fmt.Printf("%slinkLabelState: \"%s\"\n", strings.Repeat(" ", level), s.Text(source))
+}
+
+var kindLinkLabelState = ast.NewNodeKind("LinkLabelState")
+
+func (s *linkLabelState) Kind() ast.NodeKind {
+ return kindLinkLabelState
+}
+
+func linkLabelStateLength(v *linkLabelState) int {
+ if v == nil || v.Last == nil || v.First == nil {
+ return 0
+ }
+ return v.Last.Segment.Stop - v.First.Segment.Start
+}
+
+func pushLinkLabelState(pc Context, v *linkLabelState) {
+ tlist := pc.Get(linkLabelStateKey)
+ var list *linkLabelState
+ if tlist == nil {
+ list = v
+ v.First = v
+ v.Last = v
+ pc.Set(linkLabelStateKey, list)
+ } else {
+ list = tlist.(*linkLabelState)
+ l := list.Last
+ list.Last = v
+ l.Next = v
+ v.Prev = l
+ }
+}
+
+func removeLinkLabelState(pc Context, d *linkLabelState) {
+ tlist := pc.Get(linkLabelStateKey)
+ var list *linkLabelState
+ if tlist == nil {
+ return
+ }
+ list = tlist.(*linkLabelState)
+
+ if d.Prev == nil {
+ list = d.Next
+ if list != nil {
+ list.First = d
+ list.Last = d.Last
+ list.Prev = nil
+ pc.Set(linkLabelStateKey, list)
+ } else {
+ pc.Set(linkLabelStateKey, nil)
+ }
+ } else {
+ d.Prev.Next = d.Next
+ if d.Next != nil {
+ d.Next.Prev = d.Prev
+ }
+ }
+ if list != nil && d.Next == nil {
+ list.Last = d.Prev
+ }
+ d.Next = nil
+ d.Prev = nil
+ d.First = nil
+ d.Last = nil
+}
+
+type linkParser struct {
+}
+
+var defaultLinkParser = &linkParser{}
+
+// NewLinkParser return a new InlineParser that parses links.
+func NewLinkParser() InlineParser {
+ return defaultLinkParser
+}
+
+func (s *linkParser) Trigger() []byte {
+ return []byte{'!', '[', ']'}
+}
+
+var linkBottom = NewContextKey()
+
+func (s *linkParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
+ line, segment := block.PeekLine()
+ if line[0] == '!' {
+ if len(line) > 1 && line[1] == '[' {
+ block.Advance(1)
+ pc.Set(linkBottom, pc.LastDelimiter())
+ return processLinkLabelOpen(block, segment.Start+1, true, pc)
+ }
+ return nil
+ }
+ if line[0] == '[' {
+ pc.Set(linkBottom, pc.LastDelimiter())
+ return processLinkLabelOpen(block, segment.Start, false, pc)
+ }
+
+ // line[0] == ']'
+ tlist := pc.Get(linkLabelStateKey)
+ if tlist == nil {
+ return nil
+ }
+ last := tlist.(*linkLabelState).Last
+ if last == nil {
+ return nil
+ }
+ block.Advance(1)
+ removeLinkLabelState(pc, last)
+ // CommonMark spec says:
+ // > A link label can have at most 999 characters inside the square brackets.
+ if linkLabelStateLength(tlist.(*linkLabelState)) > 998 {
+ ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
+ return nil
+ }
+
+ if !last.IsImage && s.containsLink(last) { // a link in a link text is not allowed
+ ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
+ return nil
+ }
+
+ c := block.Peek()
+ l, pos := block.Position()
+ var link *ast.Link
+ var hasValue bool
+ if c == '(' { // normal link
+ link = s.parseLink(parent, last, block, pc)
+ } else if c == '[' { // reference link
+ link, hasValue = s.parseReferenceLink(parent, last, block, pc)
+ if link == nil && hasValue {
+ ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
+ return nil
+ }
+ }
+
+ if link == nil {
+ // maybe shortcut reference link
+ block.SetPosition(l, pos)
+ ssegment := text.NewSegment(last.Segment.Stop, segment.Start)
+ maybeReference := block.Value(ssegment)
+ // CommonMark spec says:
+ // > A link label can have at most 999 characters inside the square brackets.
+ if len(maybeReference) > 999 {
+ ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
+ return nil
+ }
+
+ ref, ok := pc.Reference(util.ToLinkReference(maybeReference))
+ if !ok {
+ ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
+ return nil
+ }
+ link = ast.NewLink()
+ s.processLinkLabel(parent, link, last, pc)
+ link.Title = ref.Title()
+ link.Destination = ref.Destination()
+ }
+ if last.IsImage {
+ last.Parent().RemoveChild(last.Parent(), last)
+ return ast.NewImage(link)
+ }
+ last.Parent().RemoveChild(last.Parent(), last)
+ return link
+}
+
+func (s *linkParser) containsLink(n ast.Node) bool {
+ if n == nil {
+ return false
+ }
+ for c := n; c != nil; c = c.NextSibling() {
+ if _, ok := c.(*ast.Link); ok {
+ return true
+ }
+ if s.containsLink(c.FirstChild()) {
+ return true
+ }
+ }
+ return false
+}
+
+func processLinkLabelOpen(block text.Reader, pos int, isImage bool, pc Context) *linkLabelState {
+ start := pos
+ if isImage {
+ start--
+ }
+ state := newLinkLabelState(text.NewSegment(start, pos+1), isImage)
+ pushLinkLabelState(pc, state)
+ block.Advance(1)
+ return state
+}
+
+func (s *linkParser) processLinkLabel(parent ast.Node, link *ast.Link, last *linkLabelState, pc Context) {
+ var bottom ast.Node
+ if v := pc.Get(linkBottom); v != nil {
+ bottom = v.(ast.Node)
+ }
+ pc.Set(linkBottom, nil)
+ ProcessDelimiters(bottom, pc)
+ for c := last.NextSibling(); c != nil; {
+ next := c.NextSibling()
+ parent.RemoveChild(parent, c)
+ link.AppendChild(link, c)
+ c = next
+ }
+}
+
+var linkFindClosureOptions text.FindClosureOptions = text.FindClosureOptions{
+ Nesting: false,
+ Newline: true,
+ Advance: true,
+}
+
+func (s *linkParser) parseReferenceLink(parent ast.Node, last *linkLabelState, block text.Reader, pc Context) (*ast.Link, bool) {
+ _, orgpos := block.Position()
+ block.Advance(1) // skip '['
+ segments, found := block.FindClosure('[', ']', linkFindClosureOptions)
+ if !found {
+ return nil, false
+ }
+
+ var maybeReference []byte
+ if segments.Len() == 1 { // avoid allocate a new byte slice
+ maybeReference = block.Value(segments.At(0))
+ } else {
+ maybeReference = []byte{}
+ for i := 0; i < segments.Len(); i++ {
+ s := segments.At(i)
+ maybeReference = append(maybeReference, block.Value(s)...)
+ }
+ }
+ if util.IsBlank(maybeReference) { // collapsed reference link
+ s := text.NewSegment(last.Segment.Stop, orgpos.Start-1)
+ maybeReference = block.Value(s)
+ }
+ // CommonMark spec says:
+ // > A link label can have at most 999 characters inside the square brackets.
+ if len(maybeReference) > 999 {
+ return nil, true
+ }
+
+ ref, ok := pc.Reference(util.ToLinkReference(maybeReference))
+ if !ok {
+ return nil, true
+ }
+
+ link := ast.NewLink()
+ s.processLinkLabel(parent, link, last, pc)
+ link.Title = ref.Title()
+ link.Destination = ref.Destination()
+ return link, true
+}
+
+func (s *linkParser) parseLink(parent ast.Node, last *linkLabelState, block text.Reader, pc Context) *ast.Link {
+ block.Advance(1) // skip '('
+ block.SkipSpaces()
+ var title []byte
+ var destination []byte
+ var ok bool
+ if block.Peek() == ')' { // empty link like '[link]()'
+ block.Advance(1)
+ } else {
+ destination, ok = parseLinkDestination(block)
+ if !ok {
+ return nil
+ }
+ block.SkipSpaces()
+ if block.Peek() == ')' {
+ block.Advance(1)
+ } else {
+ title, ok = parseLinkTitle(block)
+ if !ok {
+ return nil
+ }
+ block.SkipSpaces()
+ if block.Peek() == ')' {
+ block.Advance(1)
+ } else {
+ return nil
+ }
+ }
+ }
+
+ link := ast.NewLink()
+ s.processLinkLabel(parent, link, last, pc)
+ link.Destination = destination
+ link.Title = title
+ return link
+}
+
+func parseLinkDestination(block text.Reader) ([]byte, bool) {
+ block.SkipSpaces()
+ line, _ := block.PeekLine()
+ if block.Peek() == '<' {
+ i := 1
+ for i < len(line) {
+ c := line[i]
+ if c == '\\' && i < len(line)-1 && util.IsPunct(line[i+1]) {
+ i += 2
+ continue
+ } else if c == '>' {
+ block.Advance(i + 1)
+ return line[1:i], true
+ }
+ i++
+ }
+ return nil, false
+ }
+ opened := 0
+ i := 0
+ for i < len(line) {
+ c := line[i]
+ if c == '\\' && i < len(line)-1 && util.IsPunct(line[i+1]) {
+ i += 2
+ continue
+ } else if c == '(' {
+ opened++
+ } else if c == ')' {
+ opened--
+ if opened < 0 {
+ break
+ }
+ } else if util.IsSpace(c) {
+ break
+ }
+ i++
+ }
+ block.Advance(i)
+ return line[:i], len(line[:i]) != 0
+}
+
+func parseLinkTitle(block text.Reader) ([]byte, bool) {
+ block.SkipSpaces()
+ opener := block.Peek()
+ if opener != '"' && opener != '\'' && opener != '(' {
+ return nil, false
+ }
+ closer := opener
+ if opener == '(' {
+ closer = ')'
+ }
+ block.Advance(1)
+ segments, found := block.FindClosure(opener, closer, linkFindClosureOptions)
+ if found {
+ if segments.Len() == 1 {
+ return block.Value(segments.At(0)), true
+ }
+ var title []byte
+ for i := 0; i < segments.Len(); i++ {
+ s := segments.At(i)
+ title = append(title, block.Value(s)...)
+ }
+ return title, true
+ }
+ return nil, false
+}
+
+func (s *linkParser) CloseBlock(parent ast.Node, block text.Reader, pc Context) {
+ pc.Set(linkBottom, nil)
+ tlist := pc.Get(linkLabelStateKey)
+ if tlist == nil {
+ return
+ }
+ for s := tlist.(*linkLabelState); s != nil; {
+ next := s.Next
+ removeLinkLabelState(pc, s)
+ s.Parent().ReplaceChild(s.Parent(), s, ast.NewTextSegment(s.Segment))
+ s = next
+ }
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/link_ref.go b/vendor/github.com/yuin/goldmark/parser/link_ref.go
new file mode 100644
index 000000000..ea3f6544a
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/link_ref.go
@@ -0,0 +1,152 @@
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type linkReferenceParagraphTransformer struct {
+}
+
+// LinkReferenceParagraphTransformer is a ParagraphTransformer implementation
+// that parses and extracts link reference from paragraphs.
+var LinkReferenceParagraphTransformer = &linkReferenceParagraphTransformer{}
+
+func (p *linkReferenceParagraphTransformer) Transform(node *ast.Paragraph, reader text.Reader, pc Context) {
+ lines := node.Lines()
+ block := text.NewBlockReader(reader.Source(), lines)
+ removes := [][2]int{}
+ for {
+ start, end := parseLinkReferenceDefinition(block, pc)
+ if start > -1 {
+ if start == end {
+ end++
+ }
+ removes = append(removes, [2]int{start, end})
+ continue
+ }
+ break
+ }
+
+ offset := 0
+ for _, remove := range removes {
+ if lines.Len() == 0 {
+ break
+ }
+ s := lines.Sliced(remove[1]-offset, lines.Len())
+ lines.SetSliced(0, remove[0]-offset)
+ lines.AppendAll(s)
+ offset = remove[1]
+ }
+
+ if lines.Len() == 0 {
+ t := ast.NewTextBlock()
+ t.SetBlankPreviousLines(node.HasBlankPreviousLines())
+ node.Parent().ReplaceChild(node.Parent(), node, t)
+ return
+ }
+
+ node.SetLines(lines)
+}
+
+func parseLinkReferenceDefinition(block text.Reader, pc Context) (int, int) {
+ block.SkipSpaces()
+ line, _ := block.PeekLine()
+ if line == nil {
+ return -1, -1
+ }
+ startLine, _ := block.Position()
+ width, pos := util.IndentWidth(line, 0)
+ if width > 3 {
+ return -1, -1
+ }
+ if width != 0 {
+ pos++
+ }
+ if line[pos] != '[' {
+ return -1, -1
+ }
+ block.Advance(pos + 1)
+ segments, found := block.FindClosure('[', ']', linkFindClosureOptions)
+ if !found {
+ return -1, -1
+ }
+ var label []byte
+ if segments.Len() == 1 {
+ label = block.Value(segments.At(0))
+ } else {
+ for i := 0; i < segments.Len(); i++ {
+ s := segments.At(i)
+ label = append(label, block.Value(s)...)
+ }
+ }
+ if util.IsBlank(label) {
+ return -1, -1
+ }
+ if block.Peek() != ':' {
+ return -1, -1
+ }
+ block.Advance(1)
+ block.SkipSpaces()
+ destination, ok := parseLinkDestination(block)
+ if !ok {
+ return -1, -1
+ }
+ line, _ = block.PeekLine()
+ isNewLine := line == nil || util.IsBlank(line)
+
+ endLine, _ := block.Position()
+ _, spaces, _ := block.SkipSpaces()
+ opener := block.Peek()
+ if opener != '"' && opener != '\'' && opener != '(' {
+ if !isNewLine {
+ return -1, -1
+ }
+ ref := NewReference(label, destination, nil)
+ pc.AddReference(ref)
+ return startLine, endLine + 1
+ }
+ if spaces == 0 {
+ return -1, -1
+ }
+ block.Advance(1)
+ closer := opener
+ if opener == '(' {
+ closer = ')'
+ }
+ segments, found = block.FindClosure(opener, closer, linkFindClosureOptions)
+ if !found {
+ if !isNewLine {
+ return -1, -1
+ }
+ ref := NewReference(label, destination, nil)
+ pc.AddReference(ref)
+ block.AdvanceLine()
+ return startLine, endLine + 1
+ }
+ var title []byte
+ if segments.Len() == 1 {
+ title = block.Value(segments.At(0))
+ } else {
+ for i := 0; i < segments.Len(); i++ {
+ s := segments.At(i)
+ title = append(title, block.Value(s)...)
+ }
+ }
+
+ line, _ = block.PeekLine()
+ if line != nil && !util.IsBlank(line) {
+ if !isNewLine {
+ return -1, -1
+ }
+ ref := NewReference(label, destination, title)
+ pc.AddReference(ref)
+ return startLine, endLine
+ }
+
+ endLine, _ = block.Position()
+ ref := NewReference(label, destination, title)
+ pc.AddReference(ref)
+ return startLine, endLine + 1
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/list.go b/vendor/github.com/yuin/goldmark/parser/list.go
new file mode 100644
index 000000000..e5cad1173
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/list.go
@@ -0,0 +1,287 @@
+package parser
+
+import (
+ "strconv"
+
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type listItemType int
+
+const (
+ notList listItemType = iota
+ bulletList
+ orderedList
+)
+
+var skipListParserKey = NewContextKey()
+var emptyListItemWithBlankLines = NewContextKey()
+var listItemFlagValue interface{} = true
+
+// Same as
+// `^(([ ]*)([\-\*\+]))(\s+.*)?\n?$`.FindSubmatchIndex or
+// `^(([ ]*)(\d{1,9}[\.\)]))(\s+.*)?\n?$`.FindSubmatchIndex
+func parseListItem(line []byte) ([6]int, listItemType) {
+ i := 0
+ l := len(line)
+ ret := [6]int{}
+ for ; i < l && line[i] == ' '; i++ {
+ c := line[i]
+ if c == '\t' {
+ return ret, notList
+ }
+ }
+ if i > 3 {
+ return ret, notList
+ }
+ ret[0] = 0
+ ret[1] = i
+ ret[2] = i
+ var typ listItemType
+ if i < l && (line[i] == '-' || line[i] == '*' || line[i] == '+') {
+ i++
+ ret[3] = i
+ typ = bulletList
+ } else if i < l {
+ for ; i < l && util.IsNumeric(line[i]); i++ {
+ }
+ ret[3] = i
+ if ret[3] == ret[2] || ret[3]-ret[2] > 9 {
+ return ret, notList
+ }
+ if i < l && (line[i] == '.' || line[i] == ')') {
+ i++
+ ret[3] = i
+ } else {
+ return ret, notList
+ }
+ typ = orderedList
+ } else {
+ return ret, notList
+ }
+ if i < l && line[i] != '\n' {
+ w, _ := util.IndentWidth(line[i:], 0)
+ if w == 0 {
+ return ret, notList
+ }
+ }
+ if i >= l {
+ ret[4] = -1
+ ret[5] = -1
+ return ret, typ
+ }
+ ret[4] = i
+ ret[5] = len(line)
+ if line[ret[5]-1] == '\n' && line[i] != '\n' {
+ ret[5]--
+ }
+ return ret, typ
+}
+
+func matchesListItem(source []byte, strict bool) ([6]int, listItemType) {
+ m, typ := parseListItem(source)
+ if typ != notList && (!strict || strict && m[1] < 4) {
+ return m, typ
+ }
+ return m, notList
+}
+
+func calcListOffset(source []byte, match [6]int) int {
+ offset := 0
+ if match[4] < 0 || util.IsBlank(source[match[4]:]) { // list item starts with a blank line
+ offset = 1
+ } else {
+ offset, _ = util.IndentWidth(source[match[4]:], match[4])
+ if offset > 4 { // offseted codeblock
+ offset = 1
+ }
+ }
+ return offset
+}
+
+func lastOffset(node ast.Node) int {
+ lastChild := node.LastChild()
+ if lastChild != nil {
+ return lastChild.(*ast.ListItem).Offset
+ }
+ return 0
+}
+
+type listParser struct {
+}
+
+var defaultListParser = &listParser{}
+
+// NewListParser returns a new BlockParser that
+// parses lists.
+// This parser must take precedence over the ListItemParser.
+func NewListParser() BlockParser {
+ return defaultListParser
+}
+
+func (b *listParser) Trigger() []byte {
+ return []byte{'-', '+', '*', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
+}
+
+func (b *listParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ last := pc.LastOpenedBlock().Node
+ if _, lok := last.(*ast.List); lok || pc.Get(skipListParserKey) != nil {
+ pc.Set(skipListParserKey, nil)
+ return nil, NoChildren
+ }
+ line, _ := reader.PeekLine()
+ match, typ := matchesListItem(line, true)
+ if typ == notList {
+ return nil, NoChildren
+ }
+ start := -1
+ if typ == orderedList {
+ number := line[match[2] : match[3]-1]
+ start, _ = strconv.Atoi(string(number))
+ }
+
+ if ast.IsParagraph(last) && last.Parent() == parent {
+ // we allow only lists starting with 1 to interrupt paragraphs.
+ if typ == orderedList && start != 1 {
+ return nil, NoChildren
+ }
+ //an empty list item cannot interrupt a paragraph:
+ if match[4] < 0 || util.IsBlank(line[match[4]:match[5]]) {
+ return nil, NoChildren
+ }
+ }
+
+ marker := line[match[3]-1]
+ node := ast.NewList(marker)
+ if start > -1 {
+ node.Start = start
+ }
+ pc.Set(emptyListItemWithBlankLines, nil)
+ return node, HasChildren
+}
+
+func (b *listParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ list := node.(*ast.List)
+ line, _ := reader.PeekLine()
+ if util.IsBlank(line) {
+ if node.LastChild().ChildCount() == 0 {
+ pc.Set(emptyListItemWithBlankLines, listItemFlagValue)
+ }
+ return Continue | HasChildren
+ }
+
+ // "offset" means a width that bar indicates.
+ // - aaaaaaaa
+ // |----|
+ //
+ // If the indent is less than the last offset like
+ // - a
+ // - b <--- current line
+ // it maybe a new child of the list.
+ //
+ // Empty list items can have multiple blanklines
+ //
+ // - <--- 1st item is an empty thus "offset" is unknown
+ //
+ //
+ // - <--- current line
+ //
+ // -> 1 list with 2 blank items
+ //
+ // So if the last item is an empty, it maybe a new child of the list.
+ //
+ offset := lastOffset(node)
+ lastIsEmpty := node.LastChild().ChildCount() == 0
+ indent, _ := util.IndentWidth(line, reader.LineOffset())
+
+ if indent < offset || lastIsEmpty {
+ if indent < 4 {
+ match, typ := matchesListItem(line, false) // may have a leading spaces more than 3
+ if typ != notList && match[1]-offset < 4 {
+ marker := line[match[3]-1]
+ if !list.CanContinue(marker, typ == orderedList) {
+ return Close
+ }
+ // Thematic Breaks take precedence over lists
+ if isThematicBreak(line[match[3]-1:], 0) {
+ isHeading := false
+ last := pc.LastOpenedBlock().Node
+ if ast.IsParagraph(last) {
+ c, ok := matchesSetextHeadingBar(line[match[3]-1:])
+ if ok && c == '-' {
+ isHeading = true
+ }
+ }
+ if !isHeading {
+ return Close
+ }
+ }
+ return Continue | HasChildren
+ }
+ }
+ if !lastIsEmpty {
+ return Close
+ }
+ }
+
+ if lastIsEmpty && indent < offset {
+ return Close
+ }
+
+ // Non empty items can not exist next to an empty list item
+ // with blank lines. So we need to close the current list
+ //
+ // -
+ //
+ // foo
+ //
+ // -> 1 list with 1 blank items and 1 paragraph
+ if pc.Get(emptyListItemWithBlankLines) != nil {
+ return Close
+ }
+ return Continue | HasChildren
+}
+
+func (b *listParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ list := node.(*ast.List)
+
+ for c := node.FirstChild(); c != nil && list.IsTight; c = c.NextSibling() {
+ if c.FirstChild() != nil && c.FirstChild() != c.LastChild() {
+ for c1 := c.FirstChild().NextSibling(); c1 != nil; c1 = c1.NextSibling() {
+ if bl, ok := c1.(ast.Node); ok && bl.HasBlankPreviousLines() {
+ list.IsTight = false
+ break
+ }
+ }
+ }
+ if c != node.FirstChild() {
+ if bl, ok := c.(ast.Node); ok && bl.HasBlankPreviousLines() {
+ list.IsTight = false
+ }
+ }
+ }
+
+ if list.IsTight {
+ for child := node.FirstChild(); child != nil; child = child.NextSibling() {
+ for gc := child.FirstChild(); gc != nil; {
+ paragraph, ok := gc.(*ast.Paragraph)
+ gc = gc.NextSibling()
+ if ok {
+ textBlock := ast.NewTextBlock()
+ textBlock.SetLines(paragraph.Lines())
+ child.ReplaceChild(child, paragraph, textBlock)
+ }
+ }
+ }
+ }
+}
+
+func (b *listParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *listParser) CanAcceptIndentedLine() bool {
+ return false
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/list_item.go b/vendor/github.com/yuin/goldmark/parser/list_item.go
new file mode 100644
index 000000000..81357a9ad
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/list_item.go
@@ -0,0 +1,90 @@
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type listItemParser struct {
+}
+
+var defaultListItemParser = &listItemParser{}
+
+// NewListItemParser returns a new BlockParser that
+// parses list items.
+func NewListItemParser() BlockParser {
+ return defaultListItemParser
+}
+
+func (b *listItemParser) Trigger() []byte {
+ return []byte{'-', '+', '*', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
+}
+
+func (b *listItemParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ list, lok := parent.(*ast.List)
+ if !lok { // list item must be a child of a list
+ return nil, NoChildren
+ }
+ offset := lastOffset(list)
+ line, _ := reader.PeekLine()
+ match, typ := matchesListItem(line, false)
+ if typ == notList {
+ return nil, NoChildren
+ }
+ if match[1]-offset > 3 {
+ return nil, NoChildren
+ }
+
+ pc.Set(emptyListItemWithBlankLines, nil)
+
+ itemOffset := calcListOffset(line, match)
+ node := ast.NewListItem(match[3] + itemOffset)
+ if match[4] < 0 || util.IsBlank(line[match[4]:match[5]]) {
+ return node, NoChildren
+ }
+
+ pos, padding := util.IndentPosition(line[match[4]:], match[4], itemOffset)
+ child := match[3] + pos
+ reader.AdvanceAndSetPadding(child, padding)
+ return node, HasChildren
+}
+
+func (b *listItemParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ line, _ := reader.PeekLine()
+ if util.IsBlank(line) {
+ reader.Advance(len(line) - 1)
+ return Continue | HasChildren
+ }
+
+ offset := lastOffset(node.Parent())
+ isEmpty := node.ChildCount() == 0
+ indent, _ := util.IndentWidth(line, reader.LineOffset())
+ if (isEmpty || indent < offset) && indent < 4 {
+ _, typ := matchesListItem(line, true)
+ // new list item found
+ if typ != notList {
+ pc.Set(skipListParserKey, listItemFlagValue)
+ return Close
+ }
+ if !isEmpty {
+ return Close
+ }
+ }
+ pos, padding := util.IndentPosition(line, reader.LineOffset(), offset)
+ reader.AdvanceAndSetPadding(pos, padding)
+
+ return Continue | HasChildren
+}
+
+func (b *listItemParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ // nothing to do
+}
+
+func (b *listItemParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *listItemParser) CanAcceptIndentedLine() bool {
+ return false
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/paragraph.go b/vendor/github.com/yuin/goldmark/parser/paragraph.go
new file mode 100644
index 000000000..9d3fa38e0
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/paragraph.go
@@ -0,0 +1,72 @@
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type paragraphParser struct {
+}
+
+var defaultParagraphParser = &paragraphParser{}
+
+// NewParagraphParser returns a new BlockParser that
+// parses paragraphs.
+func NewParagraphParser() BlockParser {
+ return defaultParagraphParser
+}
+
+func (b *paragraphParser) Trigger() []byte {
+ return nil
+}
+
+func (b *paragraphParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ _, segment := reader.PeekLine()
+ segment = segment.TrimLeftSpace(reader.Source())
+ if segment.IsEmpty() {
+ return nil, NoChildren
+ }
+ node := ast.NewParagraph()
+ node.Lines().Append(segment)
+ reader.Advance(segment.Len() - 1)
+ return node, NoChildren
+}
+
+func (b *paragraphParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ line, segment := reader.PeekLine()
+ if util.IsBlank(line) {
+ return Close
+ }
+ node.Lines().Append(segment)
+ reader.Advance(segment.Len() - 1)
+ return Continue | NoChildren
+}
+
+func (b *paragraphParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ lines := node.Lines()
+ if lines.Len() != 0 {
+ // trim leading spaces
+ for i := 0; i < lines.Len(); i++ {
+ l := lines.At(i)
+ lines.Set(i, l.TrimLeftSpace(reader.Source()))
+ }
+
+ // trim trailing spaces
+ length := lines.Len()
+ lastLine := node.Lines().At(length - 1)
+ node.Lines().Set(length-1, lastLine.TrimRightSpace(reader.Source()))
+ }
+ if lines.Len() == 0 {
+ node.Parent().RemoveChild(node.Parent(), node)
+ return
+ }
+}
+
+func (b *paragraphParser) CanInterruptParagraph() bool {
+ return false
+}
+
+func (b *paragraphParser) CanAcceptIndentedLine() bool {
+ return false
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/parser.go b/vendor/github.com/yuin/goldmark/parser/parser.go
new file mode 100644
index 000000000..a82369266
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/parser.go
@@ -0,0 +1,1253 @@
+// Package parser contains stuff that are related to parsing a Markdown text.
+package parser
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+// A Reference interface represents a link reference in Markdown text.
+type Reference interface {
+ // String implements Stringer.
+ String() string
+
+ // Label returns a label of the reference.
+ Label() []byte
+
+ // Destination returns a destination(URL) of the reference.
+ Destination() []byte
+
+ // Title returns a title of the reference.
+ Title() []byte
+}
+
+type reference struct {
+ label []byte
+ destination []byte
+ title []byte
+}
+
+// NewReference returns a new Reference.
+func NewReference(label, destination, title []byte) Reference {
+ return &reference{label, destination, title}
+}
+
+func (r *reference) Label() []byte {
+ return r.label
+}
+
+func (r *reference) Destination() []byte {
+ return r.destination
+}
+
+func (r *reference) Title() []byte {
+ return r.title
+}
+
+func (r *reference) String() string {
+ return fmt.Sprintf("Reference{Label:%s, Destination:%s, Title:%s}", r.label, r.destination, r.title)
+}
+
+// An IDs interface is a collection of the element ids.
+type IDs interface {
+ // Generate generates a new element id.
+ Generate(value []byte, kind ast.NodeKind) []byte
+
+ // Put puts a given element id to the used ids table.
+ Put(value []byte)
+}
+
+type ids struct {
+ values map[string]bool
+}
+
+func newIDs() IDs {
+ return &ids{
+ values: map[string]bool{},
+ }
+}
+
+func (s *ids) Generate(value []byte, kind ast.NodeKind) []byte {
+ value = util.TrimLeftSpace(value)
+ value = util.TrimRightSpace(value)
+ result := []byte{}
+ for i := 0; i < len(value); {
+ v := value[i]
+ l := util.UTF8Len(v)
+ i += int(l)
+ if l != 1 {
+ continue
+ }
+ if util.IsAlphaNumeric(v) {
+ if 'A' <= v && v <= 'Z' {
+ v += 'a' - 'A'
+ }
+ result = append(result, v)
+ } else if util.IsSpace(v) || v == '-' || v == '_' {
+ result = append(result, '-')
+ }
+ }
+ if len(result) == 0 {
+ if kind == ast.KindHeading {
+ result = []byte("heading")
+ } else {
+ result = []byte("id")
+ }
+ }
+ if _, ok := s.values[util.BytesToReadOnlyString(result)]; !ok {
+ s.values[util.BytesToReadOnlyString(result)] = true
+ return result
+ }
+ for i := 1; ; i++ {
+ newResult := fmt.Sprintf("%s-%d", result, i)
+ if _, ok := s.values[newResult]; !ok {
+ s.values[newResult] = true
+ return []byte(newResult)
+ }
+
+ }
+}
+
+func (s *ids) Put(value []byte) {
+ s.values[util.BytesToReadOnlyString(value)] = true
+}
+
+// ContextKey is a key that is used to set arbitrary values to the context.
+type ContextKey int
+
+// ContextKeyMax is a maximum value of the ContextKey.
+var ContextKeyMax ContextKey
+
+// NewContextKey return a new ContextKey value.
+func NewContextKey() ContextKey {
+ ContextKeyMax++
+ return ContextKeyMax
+}
+
+// A Context interface holds a information that are necessary to parse
+// Markdown text.
+type Context interface {
+ // String implements Stringer.
+ String() string
+
+ // Get returns a value associated with the given key.
+ Get(ContextKey) interface{}
+
+ // ComputeIfAbsent computes a value if a value associated with the given key is absent and returns the value.
+ ComputeIfAbsent(ContextKey, func() interface{}) interface{}
+
+ // Set sets the given value to the context.
+ Set(ContextKey, interface{})
+
+ // AddReference adds the given reference to this context.
+ AddReference(Reference)
+
+ // Reference returns (a reference, true) if a reference associated with
+ // the given label exists, otherwise (nil, false).
+ Reference(label string) (Reference, bool)
+
+ // References returns a list of references.
+ References() []Reference
+
+ // IDs returns a collection of the element ids.
+ IDs() IDs
+
+ // BlockOffset returns a first non-space character position on current line.
+ // This value is valid only for BlockParser.Open.
+ // BlockOffset returns -1 if current line is blank.
+ BlockOffset() int
+
+ // BlockOffset sets a first non-space character position on current line.
+ // This value is valid only for BlockParser.Open.
+ SetBlockOffset(int)
+
+ // BlockIndent returns an indent width on current line.
+ // This value is valid only for BlockParser.Open.
+ // BlockIndent returns -1 if current line is blank.
+ BlockIndent() int
+
+ // BlockIndent sets an indent width on current line.
+ // This value is valid only for BlockParser.Open.
+ SetBlockIndent(int)
+
+ // FirstDelimiter returns a first delimiter of the current delimiter list.
+ FirstDelimiter() *Delimiter
+
+ // LastDelimiter returns a last delimiter of the current delimiter list.
+ LastDelimiter() *Delimiter
+
+ // PushDelimiter appends the given delimiter to the tail of the current
+ // delimiter list.
+ PushDelimiter(delimiter *Delimiter)
+
+ // RemoveDelimiter removes the given delimiter from the current delimiter list.
+ RemoveDelimiter(d *Delimiter)
+
+ // ClearDelimiters clears the current delimiter list.
+ ClearDelimiters(bottom ast.Node)
+
+ // OpenedBlocks returns a list of nodes that are currently in parsing.
+ OpenedBlocks() []Block
+
+ // SetOpenedBlocks sets a list of nodes that are currently in parsing.
+ SetOpenedBlocks([]Block)
+
+ // LastOpenedBlock returns a last node that is currently in parsing.
+ LastOpenedBlock() Block
+
+ // IsInLinkLabel returns true if current position seems to be in link label.
+ IsInLinkLabel() bool
+}
+
+// A ContextConfig struct is a data structure that holds configuration of the Context.
+type ContextConfig struct {
+ IDs IDs
+}
+
+// An ContextOption is a functional option type for the Context.
+type ContextOption func(*ContextConfig)
+
+// WithIDs is a functional option for the Context.
+func WithIDs(ids IDs) ContextOption {
+ return func(c *ContextConfig) {
+ c.IDs = ids
+ }
+}
+
+type parseContext struct {
+ store []interface{}
+ ids IDs
+ refs map[string]Reference
+ blockOffset int
+ blockIndent int
+ delimiters *Delimiter
+ lastDelimiter *Delimiter
+ openedBlocks []Block
+}
+
+// NewContext returns a new Context.
+func NewContext(options ...ContextOption) Context {
+ cfg := &ContextConfig{
+ IDs: newIDs(),
+ }
+ for _, option := range options {
+ option(cfg)
+ }
+
+ return &parseContext{
+ store: make([]interface{}, ContextKeyMax+1),
+ refs: map[string]Reference{},
+ ids: cfg.IDs,
+ blockOffset: -1,
+ blockIndent: -1,
+ delimiters: nil,
+ lastDelimiter: nil,
+ openedBlocks: []Block{},
+ }
+}
+
+func (p *parseContext) Get(key ContextKey) interface{} {
+ return p.store[key]
+}
+
+func (p *parseContext) ComputeIfAbsent(key ContextKey, f func() interface{}) interface{} {
+ v := p.store[key]
+ if v == nil {
+ v = f()
+ p.store[key] = v
+ }
+ return v
+}
+
+func (p *parseContext) Set(key ContextKey, value interface{}) {
+ p.store[key] = value
+}
+
+func (p *parseContext) IDs() IDs {
+ return p.ids
+}
+
+func (p *parseContext) BlockOffset() int {
+ return p.blockOffset
+}
+
+func (p *parseContext) SetBlockOffset(v int) {
+ p.blockOffset = v
+}
+
+func (p *parseContext) BlockIndent() int {
+ return p.blockIndent
+}
+
+func (p *parseContext) SetBlockIndent(v int) {
+ p.blockIndent = v
+}
+
+func (p *parseContext) LastDelimiter() *Delimiter {
+ return p.lastDelimiter
+}
+
+func (p *parseContext) FirstDelimiter() *Delimiter {
+ return p.delimiters
+}
+
+func (p *parseContext) PushDelimiter(d *Delimiter) {
+ if p.delimiters == nil {
+ p.delimiters = d
+ p.lastDelimiter = d
+ } else {
+ l := p.lastDelimiter
+ p.lastDelimiter = d
+ l.NextDelimiter = d
+ d.PreviousDelimiter = l
+ }
+}
+
+func (p *parseContext) RemoveDelimiter(d *Delimiter) {
+ if d.PreviousDelimiter == nil {
+ p.delimiters = d.NextDelimiter
+ } else {
+ d.PreviousDelimiter.NextDelimiter = d.NextDelimiter
+ if d.NextDelimiter != nil {
+ d.NextDelimiter.PreviousDelimiter = d.PreviousDelimiter
+ }
+ }
+ if d.NextDelimiter == nil {
+ p.lastDelimiter = d.PreviousDelimiter
+ }
+ if p.delimiters != nil {
+ p.delimiters.PreviousDelimiter = nil
+ }
+ if p.lastDelimiter != nil {
+ p.lastDelimiter.NextDelimiter = nil
+ }
+ d.NextDelimiter = nil
+ d.PreviousDelimiter = nil
+ if d.Length != 0 {
+ ast.MergeOrReplaceTextSegment(d.Parent(), d, d.Segment)
+ } else {
+ d.Parent().RemoveChild(d.Parent(), d)
+ }
+}
+
+func (p *parseContext) ClearDelimiters(bottom ast.Node) {
+ if p.lastDelimiter == nil {
+ return
+ }
+ var c ast.Node
+ for c = p.lastDelimiter; c != nil && c != bottom; {
+ prev := c.PreviousSibling()
+ if d, ok := c.(*Delimiter); ok {
+ p.RemoveDelimiter(d)
+ }
+ c = prev
+ }
+}
+
+func (p *parseContext) AddReference(ref Reference) {
+ key := util.ToLinkReference(ref.Label())
+ if _, ok := p.refs[key]; !ok {
+ p.refs[key] = ref
+ }
+}
+
+func (p *parseContext) Reference(label string) (Reference, bool) {
+ v, ok := p.refs[label]
+ return v, ok
+}
+
+func (p *parseContext) References() []Reference {
+ ret := make([]Reference, 0, len(p.refs))
+ for _, v := range p.refs {
+ ret = append(ret, v)
+ }
+ return ret
+}
+
+func (p *parseContext) String() string {
+ refs := []string{}
+ for _, r := range p.refs {
+ refs = append(refs, r.String())
+ }
+
+ return fmt.Sprintf("Context{Store:%#v, Refs:%s}", p.store, strings.Join(refs, ","))
+}
+
+func (p *parseContext) OpenedBlocks() []Block {
+ return p.openedBlocks
+}
+
+func (p *parseContext) SetOpenedBlocks(v []Block) {
+ p.openedBlocks = v
+}
+
+func (p *parseContext) LastOpenedBlock() Block {
+ if l := len(p.openedBlocks); l != 0 {
+ return p.openedBlocks[l-1]
+ }
+ return Block{}
+}
+
+func (p *parseContext) IsInLinkLabel() bool {
+ tlist := p.Get(linkLabelStateKey)
+ return tlist != nil
+}
+
+// State represents parser's state.
+// State is designed to use as a bit flag.
+type State int
+
+const (
+ none State = 1 << iota
+
+ // Continue indicates parser can continue parsing.
+ Continue
+
+ // Close indicates parser cannot parse anymore.
+ Close
+
+ // HasChildren indicates parser may have child blocks.
+ HasChildren
+
+ // NoChildren indicates parser does not have child blocks.
+ NoChildren
+
+ // RequireParagraph indicates parser requires that the last node
+ // must be a paragraph and is not converted to other nodes by
+ // ParagraphTransformers.
+ RequireParagraph
+)
+
+// A Config struct is a data structure that holds configuration of the Parser.
+type Config struct {
+ Options map[OptionName]interface{}
+ BlockParsers util.PrioritizedSlice /*<BlockParser>*/
+ InlineParsers util.PrioritizedSlice /*<InlineParser>*/
+ ParagraphTransformers util.PrioritizedSlice /*<ParagraphTransformer>*/
+ ASTTransformers util.PrioritizedSlice /*<ASTTransformer>*/
+ EscapedSpace bool
+}
+
+// NewConfig returns a new Config.
+func NewConfig() *Config {
+ return &Config{
+ Options: map[OptionName]interface{}{},
+ BlockParsers: util.PrioritizedSlice{},
+ InlineParsers: util.PrioritizedSlice{},
+ ParagraphTransformers: util.PrioritizedSlice{},
+ ASTTransformers: util.PrioritizedSlice{},
+ }
+}
+
+// An Option interface is a functional option type for the Parser.
+type Option interface {
+ SetParserOption(*Config)
+}
+
+// OptionName is a name of parser options.
+type OptionName string
+
+// Attribute is an option name that spacify attributes of elements.
+const optAttribute OptionName = "Attribute"
+
+type withAttribute struct {
+}
+
+func (o *withAttribute) SetParserOption(c *Config) {
+ c.Options[optAttribute] = true
+}
+
+// WithAttribute is a functional option that enables custom attributes.
+func WithAttribute() Option {
+ return &withAttribute{}
+}
+
+// A Parser interface parses Markdown text into AST nodes.
+type Parser interface {
+ // Parse parses the given Markdown text into AST nodes.
+ Parse(reader text.Reader, opts ...ParseOption) ast.Node
+
+ // AddOption adds the given option to this parser.
+ AddOptions(...Option)
+}
+
+// A SetOptioner interface sets the given option to the object.
+type SetOptioner interface {
+ // SetOption sets the given option to the object.
+ // Unacceptable options may be passed.
+ // Thus implementations must ignore unacceptable options.
+ SetOption(name OptionName, value interface{})
+}
+
+// A BlockParser interface parses a block level element like Paragraph, List,
+// Blockquote etc.
+type BlockParser interface {
+ // Trigger returns a list of characters that triggers Parse method of
+ // this parser.
+ // If Trigger returns a nil, Open will be called with any lines.
+ Trigger() []byte
+
+ // Open parses the current line and returns a result of parsing.
+ //
+ // Open must not parse beyond the current line.
+ // If Open has been able to parse the current line, Open must advance a reader
+ // position by consumed byte length.
+ //
+ // If Open has not been able to parse the current line, Open should returns
+ // (nil, NoChildren). If Open has been able to parse the current line, Open
+ // should returns a new Block node and returns HasChildren or NoChildren.
+ Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State)
+
+ // Continue parses the current line and returns a result of parsing.
+ //
+ // Continue must not parse beyond the current line.
+ // If Continue has been able to parse the current line, Continue must advance
+ // a reader position by consumed byte length.
+ //
+ // If Continue has not been able to parse the current line, Continue should
+ // returns Close. If Continue has been able to parse the current line,
+ // Continue should returns (Continue | NoChildren) or
+ // (Continue | HasChildren)
+ Continue(node ast.Node, reader text.Reader, pc Context) State
+
+ // Close will be called when the parser returns Close.
+ Close(node ast.Node, reader text.Reader, pc Context)
+
+ // CanInterruptParagraph returns true if the parser can interrupt paragraphs,
+ // otherwise false.
+ CanInterruptParagraph() bool
+
+ // CanAcceptIndentedLine returns true if the parser can open new node when
+ // the given line is being indented more than 3 spaces.
+ CanAcceptIndentedLine() bool
+}
+
+// An InlineParser interface parses an inline level element like CodeSpan, Link etc.
+type InlineParser interface {
+ // Trigger returns a list of characters that triggers Parse method of
+ // this parser.
+ // Trigger characters must be a punctuation or a halfspace.
+ // Halfspaces triggers this parser when character is any spaces characters or
+ // a head of line
+ Trigger() []byte
+
+ // Parse parse the given block into an inline node.
+ //
+ // Parse can parse beyond the current line.
+ // If Parse has been able to parse the current line, it must advance a reader
+ // position by consumed byte length.
+ Parse(parent ast.Node, block text.Reader, pc Context) ast.Node
+}
+
+// A CloseBlocker interface is a callback function that will be
+// called when block is closed in the inline parsing.
+type CloseBlocker interface {
+ // CloseBlock will be called when a block is closed.
+ CloseBlock(parent ast.Node, block text.Reader, pc Context)
+}
+
+// A ParagraphTransformer transforms parsed Paragraph nodes.
+// For example, link references are searched in parsed Paragraphs.
+type ParagraphTransformer interface {
+ // Transform transforms the given paragraph.
+ Transform(node *ast.Paragraph, reader text.Reader, pc Context)
+}
+
+// ASTTransformer transforms entire Markdown document AST tree.
+type ASTTransformer interface {
+ // Transform transforms the given AST tree.
+ Transform(node *ast.Document, reader text.Reader, pc Context)
+}
+
+// DefaultBlockParsers returns a new list of default BlockParsers.
+// Priorities of default BlockParsers are:
+//
+// SetextHeadingParser, 100
+// ThematicBreakParser, 200
+// ListParser, 300
+// ListItemParser, 400
+// CodeBlockParser, 500
+// ATXHeadingParser, 600
+// FencedCodeBlockParser, 700
+// BlockquoteParser, 800
+// HTMLBlockParser, 900
+// ParagraphParser, 1000
+func DefaultBlockParsers() []util.PrioritizedValue {
+ return []util.PrioritizedValue{
+ util.Prioritized(NewSetextHeadingParser(), 100),
+ util.Prioritized(NewThematicBreakParser(), 200),
+ util.Prioritized(NewListParser(), 300),
+ util.Prioritized(NewListItemParser(), 400),
+ util.Prioritized(NewCodeBlockParser(), 500),
+ util.Prioritized(NewATXHeadingParser(), 600),
+ util.Prioritized(NewFencedCodeBlockParser(), 700),
+ util.Prioritized(NewBlockquoteParser(), 800),
+ util.Prioritized(NewHTMLBlockParser(), 900),
+ util.Prioritized(NewParagraphParser(), 1000),
+ }
+}
+
+// DefaultInlineParsers returns a new list of default InlineParsers.
+// Priorities of default InlineParsers are:
+//
+// CodeSpanParser, 100
+// LinkParser, 200
+// AutoLinkParser, 300
+// RawHTMLParser, 400
+// EmphasisParser, 500
+func DefaultInlineParsers() []util.PrioritizedValue {
+ return []util.PrioritizedValue{
+ util.Prioritized(NewCodeSpanParser(), 100),
+ util.Prioritized(NewLinkParser(), 200),
+ util.Prioritized(NewAutoLinkParser(), 300),
+ util.Prioritized(NewRawHTMLParser(), 400),
+ util.Prioritized(NewEmphasisParser(), 500),
+ }
+}
+
+// DefaultParagraphTransformers returns a new list of default ParagraphTransformers.
+// Priorities of default ParagraphTransformers are:
+//
+// LinkReferenceParagraphTransformer, 100
+func DefaultParagraphTransformers() []util.PrioritizedValue {
+ return []util.PrioritizedValue{
+ util.Prioritized(LinkReferenceParagraphTransformer, 100),
+ }
+}
+
+// A Block struct holds a node and correspond parser pair.
+type Block struct {
+ // Node is a BlockNode.
+ Node ast.Node
+ // Parser is a BlockParser.
+ Parser BlockParser
+}
+
+type parser struct {
+ options map[OptionName]interface{}
+ blockParsers [256][]BlockParser
+ freeBlockParsers []BlockParser
+ inlineParsers [256][]InlineParser
+ closeBlockers []CloseBlocker
+ paragraphTransformers []ParagraphTransformer
+ astTransformers []ASTTransformer
+ escapedSpace bool
+ config *Config
+ initSync sync.Once
+}
+
+type withBlockParsers struct {
+ value []util.PrioritizedValue
+}
+
+func (o *withBlockParsers) SetParserOption(c *Config) {
+ c.BlockParsers = append(c.BlockParsers, o.value...)
+}
+
+// WithBlockParsers is a functional option that allow you to add
+// BlockParsers to the parser.
+func WithBlockParsers(bs ...util.PrioritizedValue) Option {
+ return &withBlockParsers{bs}
+}
+
+type withInlineParsers struct {
+ value []util.PrioritizedValue
+}
+
+func (o *withInlineParsers) SetParserOption(c *Config) {
+ c.InlineParsers = append(c.InlineParsers, o.value...)
+}
+
+// WithInlineParsers is a functional option that allow you to add
+// InlineParsers to the parser.
+func WithInlineParsers(bs ...util.PrioritizedValue) Option {
+ return &withInlineParsers{bs}
+}
+
+type withParagraphTransformers struct {
+ value []util.PrioritizedValue
+}
+
+func (o *withParagraphTransformers) SetParserOption(c *Config) {
+ c.ParagraphTransformers = append(c.ParagraphTransformers, o.value...)
+}
+
+// WithParagraphTransformers is a functional option that allow you to add
+// ParagraphTransformers to the parser.
+func WithParagraphTransformers(ps ...util.PrioritizedValue) Option {
+ return &withParagraphTransformers{ps}
+}
+
+type withASTTransformers struct {
+ value []util.PrioritizedValue
+}
+
+func (o *withASTTransformers) SetParserOption(c *Config) {
+ c.ASTTransformers = append(c.ASTTransformers, o.value...)
+}
+
+// WithASTTransformers is a functional option that allow you to add
+// ASTTransformers to the parser.
+func WithASTTransformers(ps ...util.PrioritizedValue) Option {
+ return &withASTTransformers{ps}
+}
+
+type withEscapedSpace struct {
+}
+
+func (o *withEscapedSpace) SetParserOption(c *Config) {
+ c.EscapedSpace = true
+}
+
+// WithEscapedSpace is a functional option indicates that a '\' escaped half-space(0x20) should not trigger parsers.
+func WithEscapedSpace() Option {
+ return &withEscapedSpace{}
+}
+
+type withOption struct {
+ name OptionName
+ value interface{}
+}
+
+func (o *withOption) SetParserOption(c *Config) {
+ c.Options[o.name] = o.value
+}
+
+// WithOption is a functional option that allow you to set
+// an arbitrary option to the parser.
+func WithOption(name OptionName, value interface{}) Option {
+ return &withOption{name, value}
+}
+
+// NewParser returns a new Parser with given options.
+func NewParser(options ...Option) Parser {
+ config := NewConfig()
+ for _, opt := range options {
+ opt.SetParserOption(config)
+ }
+
+ p := &parser{
+ options: map[OptionName]interface{}{},
+ config: config,
+ }
+
+ return p
+}
+
+func (p *parser) AddOptions(opts ...Option) {
+ for _, opt := range opts {
+ opt.SetParserOption(p.config)
+ }
+}
+
+func (p *parser) addBlockParser(v util.PrioritizedValue, options map[OptionName]interface{}) {
+ bp, ok := v.Value.(BlockParser)
+ if !ok {
+ panic(fmt.Sprintf("%v is not a BlockParser", v.Value))
+ }
+ tcs := bp.Trigger()
+ so, ok := v.Value.(SetOptioner)
+ if ok {
+ for oname, ovalue := range options {
+ so.SetOption(oname, ovalue)
+ }
+ }
+ if tcs == nil {
+ p.freeBlockParsers = append(p.freeBlockParsers, bp)
+ } else {
+ for _, tc := range tcs {
+ if p.blockParsers[tc] == nil {
+ p.blockParsers[tc] = []BlockParser{}
+ }
+ p.blockParsers[tc] = append(p.blockParsers[tc], bp)
+ }
+ }
+}
+
+func (p *parser) addInlineParser(v util.PrioritizedValue, options map[OptionName]interface{}) {
+ ip, ok := v.Value.(InlineParser)
+ if !ok {
+ panic(fmt.Sprintf("%v is not a InlineParser", v.Value))
+ }
+ tcs := ip.Trigger()
+ so, ok := v.Value.(SetOptioner)
+ if ok {
+ for oname, ovalue := range options {
+ so.SetOption(oname, ovalue)
+ }
+ }
+ if cb, ok := ip.(CloseBlocker); ok {
+ p.closeBlockers = append(p.closeBlockers, cb)
+ }
+ for _, tc := range tcs {
+ if p.inlineParsers[tc] == nil {
+ p.inlineParsers[tc] = []InlineParser{}
+ }
+ p.inlineParsers[tc] = append(p.inlineParsers[tc], ip)
+ }
+}
+
+func (p *parser) addParagraphTransformer(v util.PrioritizedValue, options map[OptionName]interface{}) {
+ pt, ok := v.Value.(ParagraphTransformer)
+ if !ok {
+ panic(fmt.Sprintf("%v is not a ParagraphTransformer", v.Value))
+ }
+ so, ok := v.Value.(SetOptioner)
+ if ok {
+ for oname, ovalue := range options {
+ so.SetOption(oname, ovalue)
+ }
+ }
+ p.paragraphTransformers = append(p.paragraphTransformers, pt)
+}
+
+func (p *parser) addASTTransformer(v util.PrioritizedValue, options map[OptionName]interface{}) {
+ at, ok := v.Value.(ASTTransformer)
+ if !ok {
+ panic(fmt.Sprintf("%v is not a ASTTransformer", v.Value))
+ }
+ so, ok := v.Value.(SetOptioner)
+ if ok {
+ for oname, ovalue := range options {
+ so.SetOption(oname, ovalue)
+ }
+ }
+ p.astTransformers = append(p.astTransformers, at)
+}
+
+// A ParseConfig struct is a data structure that holds configuration of the Parser.Parse.
+type ParseConfig struct {
+ Context Context
+}
+
+// A ParseOption is a functional option type for the Parser.Parse.
+type ParseOption func(c *ParseConfig)
+
+// WithContext is a functional option that allow you to override
+// a default context.
+func WithContext(context Context) ParseOption {
+ return func(c *ParseConfig) {
+ c.Context = context
+ }
+}
+
+func (p *parser) Parse(reader text.Reader, opts ...ParseOption) ast.Node {
+ p.initSync.Do(func() {
+ p.config.BlockParsers.Sort()
+ for _, v := range p.config.BlockParsers {
+ p.addBlockParser(v, p.config.Options)
+ }
+ for i := range p.blockParsers {
+ if p.blockParsers[i] != nil {
+ p.blockParsers[i] = append(p.blockParsers[i], p.freeBlockParsers...)
+ }
+ }
+
+ p.config.InlineParsers.Sort()
+ for _, v := range p.config.InlineParsers {
+ p.addInlineParser(v, p.config.Options)
+ }
+ p.config.ParagraphTransformers.Sort()
+ for _, v := range p.config.ParagraphTransformers {
+ p.addParagraphTransformer(v, p.config.Options)
+ }
+ p.config.ASTTransformers.Sort()
+ for _, v := range p.config.ASTTransformers {
+ p.addASTTransformer(v, p.config.Options)
+ }
+ p.escapedSpace = p.config.EscapedSpace
+ p.config = nil
+ })
+ c := &ParseConfig{}
+ for _, opt := range opts {
+ opt(c)
+ }
+ if c.Context == nil {
+ c.Context = NewContext()
+ }
+ pc := c.Context
+ root := ast.NewDocument()
+ p.parseBlocks(root, reader, pc)
+
+ blockReader := text.NewBlockReader(reader.Source(), nil)
+ p.walkBlock(root, func(node ast.Node) {
+ p.parseBlock(blockReader, node, pc)
+ })
+ for _, at := range p.astTransformers {
+ at.Transform(root, reader, pc)
+ }
+ // root.Dump(reader.Source(), 0)
+ return root
+}
+
+func (p *parser) transformParagraph(node *ast.Paragraph, reader text.Reader, pc Context) bool {
+ for _, pt := range p.paragraphTransformers {
+ pt.Transform(node, reader, pc)
+ if node.Parent() == nil {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *parser) closeBlocks(from, to int, reader text.Reader, pc Context) {
+ blocks := pc.OpenedBlocks()
+ for i := from; i >= to; i-- {
+ node := blocks[i].Node
+ paragraph, ok := node.(*ast.Paragraph)
+ if ok && node.Parent() != nil {
+ p.transformParagraph(paragraph, reader, pc)
+ }
+ if node.Parent() != nil { // closes only if node has not been transformed
+ blocks[i].Parser.Close(blocks[i].Node, reader, pc)
+ }
+ }
+ if from == len(blocks)-1 {
+ blocks = blocks[0:to]
+ } else {
+ blocks = append(blocks[0:to], blocks[from+1:]...)
+ }
+ pc.SetOpenedBlocks(blocks)
+}
+
+type blockOpenResult int
+
+const (
+ paragraphContinuation blockOpenResult = iota + 1
+ newBlocksOpened
+ noBlocksOpened
+)
+
+func (p *parser) openBlocks(parent ast.Node, blankLine bool, reader text.Reader, pc Context) blockOpenResult {
+ result := blockOpenResult(noBlocksOpened)
+ continuable := false
+ lastBlock := pc.LastOpenedBlock()
+ if lastBlock.Node != nil {
+ continuable = ast.IsParagraph(lastBlock.Node)
+ }
+retry:
+ var bps []BlockParser
+ line, _ := reader.PeekLine()
+ w, pos := util.IndentWidth(line, reader.LineOffset())
+ if w >= len(line) {
+ pc.SetBlockOffset(-1)
+ pc.SetBlockIndent(-1)
+ } else {
+ pc.SetBlockOffset(pos)
+ pc.SetBlockIndent(w)
+ }
+ if line == nil || line[0] == '\n' {
+ goto continuable
+ }
+ bps = p.freeBlockParsers
+ if pos < len(line) {
+ bps = p.blockParsers[line[pos]]
+ if bps == nil {
+ bps = p.freeBlockParsers
+ }
+ }
+ if bps == nil {
+ goto continuable
+ }
+
+ for _, bp := range bps {
+ if continuable && result == noBlocksOpened && !bp.CanInterruptParagraph() {
+ continue
+ }
+ if w > 3 && !bp.CanAcceptIndentedLine() {
+ continue
+ }
+ lastBlock = pc.LastOpenedBlock()
+ last := lastBlock.Node
+ node, state := bp.Open(parent, reader, pc)
+ if node != nil {
+ // Parser requires last node to be a paragraph.
+ // With table extension:
+ //
+ // 0
+ // -:
+ // -
+ //
+ // '-' on 3rd line seems a Setext heading because 1st and 2nd lines
+ // are being paragraph when the Settext heading parser tries to parse the 3rd
+ // line.
+ // But 1st line and 2nd line are a table. Thus this paragraph will be transformed
+ // by a paragraph transformer. So this text should be converted to a table and
+ // an empty list.
+ if state&RequireParagraph != 0 {
+ if last == parent.LastChild() {
+ // Opened paragraph may be transformed by ParagraphTransformers in
+ // closeBlocks().
+ lastBlock.Parser.Close(last, reader, pc)
+ blocks := pc.OpenedBlocks()
+ pc.SetOpenedBlocks(blocks[0 : len(blocks)-1])
+ if p.transformParagraph(last.(*ast.Paragraph), reader, pc) {
+ // Paragraph has been transformed.
+ // So this parser is considered as failing.
+ continuable = false
+ goto retry
+ }
+ }
+ }
+ node.SetBlankPreviousLines(blankLine)
+ if last != nil && last.Parent() == nil {
+ lastPos := len(pc.OpenedBlocks()) - 1
+ p.closeBlocks(lastPos, lastPos, reader, pc)
+ }
+ parent.AppendChild(parent, node)
+ result = newBlocksOpened
+ be := Block{node, bp}
+ pc.SetOpenedBlocks(append(pc.OpenedBlocks(), be))
+ if state&HasChildren != 0 {
+ parent = node
+ goto retry // try child block
+ }
+ break // no children, can not open more blocks on this line
+ }
+ }
+
+continuable:
+ if result == noBlocksOpened && continuable {
+ state := lastBlock.Parser.Continue(lastBlock.Node, reader, pc)
+ if state&Continue != 0 {
+ result = paragraphContinuation
+ }
+ }
+ return result
+}
+
+type lineStat struct {
+ lineNum int
+ level int
+ isBlank bool
+}
+
+func isBlankLine(lineNum, level int, stats []lineStat) bool {
+ ret := true
+ for i := len(stats) - 1 - level; i >= 0; i-- {
+ ret = false
+ s := stats[i]
+ if s.lineNum == lineNum {
+ if s.level < level && s.isBlank {
+ return true
+ } else if s.level == level {
+ return s.isBlank
+ }
+ }
+ if s.lineNum < lineNum {
+ return ret
+ }
+ }
+ return ret
+}
+
+func (p *parser) parseBlocks(parent ast.Node, reader text.Reader, pc Context) {
+ pc.SetOpenedBlocks([]Block{})
+ blankLines := make([]lineStat, 0, 128)
+ isBlank := false
+ for { // process blocks separated by blank lines
+ _, lines, ok := reader.SkipBlankLines()
+ if !ok {
+ return
+ }
+ lineNum, _ := reader.Position()
+ if lines != 0 {
+ blankLines = blankLines[0:0]
+ l := len(pc.OpenedBlocks())
+ for i := 0; i < l; i++ {
+ blankLines = append(blankLines, lineStat{lineNum - 1, i, lines != 0})
+ }
+ }
+ isBlank = isBlankLine(lineNum-1, 0, blankLines)
+ // first, we try to open blocks
+ if p.openBlocks(parent, isBlank, reader, pc) != newBlocksOpened {
+ return
+ }
+ reader.AdvanceLine()
+ for { // process opened blocks line by line
+ openedBlocks := pc.OpenedBlocks()
+ l := len(openedBlocks)
+ if l == 0 {
+ break
+ }
+ lastIndex := l - 1
+ for i := 0; i < l; i++ {
+ be := openedBlocks[i]
+ line, _ := reader.PeekLine()
+ if line == nil {
+ p.closeBlocks(lastIndex, 0, reader, pc)
+ reader.AdvanceLine()
+ return
+ }
+ lineNum, _ := reader.Position()
+ blankLines = append(blankLines, lineStat{lineNum, i, util.IsBlank(line)})
+ // If node is a paragraph, p.openBlocks determines whether it is continuable.
+ // So we do not process paragraphs here.
+ if !ast.IsParagraph(be.Node) {
+ state := be.Parser.Continue(be.Node, reader, pc)
+ if state&Continue != 0 {
+ // When current node is a container block and has no children,
+ // we try to open new child nodes
+ if state&HasChildren != 0 && i == lastIndex {
+ isBlank = isBlankLine(lineNum-1, i, blankLines)
+ p.openBlocks(be.Node, isBlank, reader, pc)
+ break
+ }
+ continue
+ }
+ }
+ // current node may be closed or lazy continuation
+ isBlank = isBlankLine(lineNum-1, i, blankLines)
+ thisParent := parent
+ if i != 0 {
+ thisParent = openedBlocks[i-1].Node
+ }
+ lastNode := openedBlocks[lastIndex].Node
+ result := p.openBlocks(thisParent, isBlank, reader, pc)
+ if result != paragraphContinuation {
+ // lastNode is a paragraph and was transformed by the paragraph
+ // transformers.
+ if openedBlocks[lastIndex].Node != lastNode {
+ lastIndex--
+ }
+ p.closeBlocks(lastIndex, i, reader, pc)
+ }
+ break
+ }
+
+ reader.AdvanceLine()
+ }
+ }
+}
+
+func (p *parser) walkBlock(block ast.Node, cb func(node ast.Node)) {
+ for c := block.FirstChild(); c != nil; c = c.NextSibling() {
+ p.walkBlock(c, cb)
+ }
+ cb(block)
+}
+
+const (
+ lineBreakHard uint8 = 1 << iota
+ lineBreakSoft
+ lineBreakVisible
+)
+
+func (p *parser) parseBlock(block text.BlockReader, parent ast.Node, pc Context) {
+ if parent.IsRaw() {
+ return
+ }
+ escaped := false
+ source := block.Source()
+ block.Reset(parent.Lines())
+ for {
+ retry:
+ line, _ := block.PeekLine()
+ if line == nil {
+ break
+ }
+ lineLength := len(line)
+ var lineBreakFlags uint8 = 0
+ hasNewLine := line[lineLength-1] == '\n'
+ if ((lineLength >= 3 && line[lineLength-2] == '\\' && line[lineLength-3] != '\\') || (lineLength == 2 && line[lineLength-2] == '\\')) && hasNewLine { // ends with \\n
+ lineLength -= 2
+ lineBreakFlags |= lineBreakHard | lineBreakVisible
+ } else if ((lineLength >= 4 && line[lineLength-3] == '\\' && line[lineLength-2] == '\r' && line[lineLength-4] != '\\') || (lineLength == 3 && line[lineLength-3] == '\\' && line[lineLength-2] == '\r')) && hasNewLine { // ends with \\r\n
+ lineLength -= 3
+ lineBreakFlags |= lineBreakHard | lineBreakVisible
+ } else if lineLength >= 3 && line[lineLength-3] == ' ' && line[lineLength-2] == ' ' && hasNewLine { // ends with [space][space]\n
+ lineLength -= 3
+ lineBreakFlags |= lineBreakHard
+ } else if lineLength >= 4 && line[lineLength-4] == ' ' && line[lineLength-3] == ' ' && line[lineLength-2] == '\r' && hasNewLine { // ends with [space][space]\r\n
+ lineLength -= 4
+ lineBreakFlags |= lineBreakHard
+ } else if hasNewLine {
+ // If the line ends with a newline character, but it is not a hardlineBreak, then it is a softLinebreak
+ // If the line ends with a hardlineBreak, then it cannot end with a softLinebreak
+ // See https://spec.commonmark.org/0.30/#soft-line-breaks
+ lineBreakFlags |= lineBreakSoft
+ }
+
+ l, startPosition := block.Position()
+ n := 0
+ for i := 0; i < lineLength; i++ {
+ c := line[i]
+ if c == '\n' {
+ break
+ }
+ isSpace := util.IsSpace(c) && c != '\r' && c != '\n'
+ isPunct := util.IsPunct(c)
+ if (isPunct && !escaped) || isSpace && !(escaped && p.escapedSpace) || i == 0 {
+ parserChar := c
+ if isSpace || (i == 0 && !isPunct) {
+ parserChar = ' '
+ }
+ ips := p.inlineParsers[parserChar]
+ if ips != nil {
+ block.Advance(n)
+ n = 0
+ savedLine, savedPosition := block.Position()
+ if i != 0 {
+ _, currentPosition := block.Position()
+ ast.MergeOrAppendTextSegment(parent, startPosition.Between(currentPosition))
+ _, startPosition = block.Position()
+ }
+ var inlineNode ast.Node
+ for _, ip := range ips {
+ inlineNode = ip.Parse(parent, block, pc)
+ if inlineNode != nil {
+ break
+ }
+ block.SetPosition(savedLine, savedPosition)
+ }
+ if inlineNode != nil {
+ parent.AppendChild(parent, inlineNode)
+ goto retry
+ }
+ }
+ }
+ if escaped {
+ escaped = false
+ n++
+ continue
+ }
+
+ if c == '\\' {
+ escaped = true
+ n++
+ continue
+ }
+
+ escaped = false
+ n++
+ }
+ if n != 0 {
+ block.Advance(n)
+ }
+ currentL, currentPosition := block.Position()
+ if l != currentL {
+ continue
+ }
+ diff := startPosition.Between(currentPosition)
+ var text *ast.Text
+ if lineBreakFlags&(lineBreakHard|lineBreakVisible) == lineBreakHard|lineBreakVisible {
+ text = ast.NewTextSegment(diff)
+ } else {
+ text = ast.NewTextSegment(diff.TrimRightSpace(source))
+ }
+ text.SetSoftLineBreak(lineBreakFlags&lineBreakSoft != 0)
+ text.SetHardLineBreak(lineBreakFlags&lineBreakHard != 0)
+ parent.AppendChild(parent, text)
+ block.AdvanceLine()
+ }
+
+ ProcessDelimiters(nil, pc)
+ for _, ip := range p.closeBlockers {
+ ip.CloseBlock(parent, block, pc)
+ }
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/raw_html.go b/vendor/github.com/yuin/goldmark/parser/raw_html.go
new file mode 100644
index 000000000..55b9a9967
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/raw_html.go
@@ -0,0 +1,163 @@
+package parser
+
+import (
+ "bytes"
+ "regexp"
+
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type rawHTMLParser struct {
+}
+
+var defaultRawHTMLParser = &rawHTMLParser{}
+
+// NewRawHTMLParser return a new InlineParser that can parse
+// inline htmls
+func NewRawHTMLParser() InlineParser {
+ return defaultRawHTMLParser
+}
+
+func (s *rawHTMLParser) Trigger() []byte {
+ return []byte{'<'}
+}
+
+func (s *rawHTMLParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
+ line, _ := block.PeekLine()
+ if len(line) > 1 && util.IsAlphaNumeric(line[1]) {
+ return s.parseMultiLineRegexp(openTagRegexp, block, pc)
+ }
+ if len(line) > 2 && line[1] == '/' && util.IsAlphaNumeric(line[2]) {
+ return s.parseMultiLineRegexp(closeTagRegexp, block, pc)
+ }
+ if bytes.HasPrefix(line, openComment) {
+ return s.parseComment(block, pc)
+ }
+ if bytes.HasPrefix(line, openProcessingInstruction) {
+ return s.parseUntil(block, closeProcessingInstruction, pc)
+ }
+ if len(line) > 2 && line[1] == '!' && line[2] >= 'A' && line[2] <= 'Z' {
+ return s.parseUntil(block, closeDecl, pc)
+ }
+ if bytes.HasPrefix(line, openCDATA) {
+ return s.parseUntil(block, closeCDATA, pc)
+ }
+ return nil
+}
+
+var tagnamePattern = `([A-Za-z][A-Za-z0-9-]*)`
+
+var attributePattern = `(?:[\r\n \t]+[a-zA-Z_:][a-zA-Z0-9:._-]*(?:[\r\n \t]*=[\r\n \t]*(?:[^\"'=<>` + "`" + `\x00-\x20]+|'[^']*'|"[^"]*"))?)`
+var openTagRegexp = regexp.MustCompile("^<" + tagnamePattern + attributePattern + `*[ \t]*/?>`)
+var closeTagRegexp = regexp.MustCompile("^</" + tagnamePattern + `\s*>`)
+
+var openProcessingInstruction = []byte("<?")
+var closeProcessingInstruction = []byte("?>")
+var openCDATA = []byte("<![CDATA[")
+var closeCDATA = []byte("]]>")
+var closeDecl = []byte(">")
+var emptyComment = []byte("<!---->")
+var invalidComment1 = []byte("<!-->")
+var invalidComment2 = []byte("<!--->")
+var openComment = []byte("<!--")
+var closeComment = []byte("-->")
+var doubleHyphen = []byte("--")
+
+func (s *rawHTMLParser) parseComment(block text.Reader, pc Context) ast.Node {
+ savedLine, savedSegment := block.Position()
+ node := ast.NewRawHTML()
+ line, segment := block.PeekLine()
+ if bytes.HasPrefix(line, emptyComment) {
+ node.Segments.Append(segment.WithStop(segment.Start + len(emptyComment)))
+ block.Advance(len(emptyComment))
+ return node
+ }
+ if bytes.HasPrefix(line, invalidComment1) || bytes.HasPrefix(line, invalidComment2) {
+ return nil
+ }
+ offset := len(openComment)
+ line = line[offset:]
+ for {
+ hindex := bytes.Index(line, doubleHyphen)
+ if hindex > -1 {
+ hindex += offset
+ }
+ index := bytes.Index(line, closeComment) + offset
+ if index > -1 && hindex == index {
+ if index == 0 || len(line) < 2 || line[index-offset-1] != '-' {
+ node.Segments.Append(segment.WithStop(segment.Start + index + len(closeComment)))
+ block.Advance(index + len(closeComment))
+ return node
+ }
+ }
+ if hindex > 0 {
+ break
+ }
+ node.Segments.Append(segment)
+ block.AdvanceLine()
+ line, segment = block.PeekLine()
+ offset = 0
+ if line == nil {
+ break
+ }
+ }
+ block.SetPosition(savedLine, savedSegment)
+ return nil
+}
+
+func (s *rawHTMLParser) parseUntil(block text.Reader, closer []byte, pc Context) ast.Node {
+ savedLine, savedSegment := block.Position()
+ node := ast.NewRawHTML()
+ for {
+ line, segment := block.PeekLine()
+ if line == nil {
+ break
+ }
+ index := bytes.Index(line, closer)
+ if index > -1 {
+ node.Segments.Append(segment.WithStop(segment.Start + index + len(closer)))
+ block.Advance(index + len(closer))
+ return node
+ }
+ node.Segments.Append(segment)
+ block.AdvanceLine()
+ }
+ block.SetPosition(savedLine, savedSegment)
+ return nil
+}
+
+func (s *rawHTMLParser) parseMultiLineRegexp(reg *regexp.Regexp, block text.Reader, pc Context) ast.Node {
+ sline, ssegment := block.Position()
+ if block.Match(reg) {
+ node := ast.NewRawHTML()
+ eline, esegment := block.Position()
+ block.SetPosition(sline, ssegment)
+ for {
+ line, segment := block.PeekLine()
+ if line == nil {
+ break
+ }
+ l, _ := block.Position()
+ start := segment.Start
+ if l == sline {
+ start = ssegment.Start
+ }
+ end := segment.Stop
+ if l == eline {
+ end = esegment.Start
+ }
+
+ node.Segments.Append(text.NewSegment(start, end))
+ if l == eline {
+ block.Advance(end - start)
+ break
+ } else {
+ block.AdvanceLine()
+ }
+ }
+ return node
+ }
+ return nil
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/setext_headings.go b/vendor/github.com/yuin/goldmark/parser/setext_headings.go
new file mode 100644
index 000000000..686efe179
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/setext_headings.go
@@ -0,0 +1,126 @@
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+var temporaryParagraphKey = NewContextKey()
+
+type setextHeadingParser struct {
+ HeadingConfig
+}
+
+func matchesSetextHeadingBar(line []byte) (byte, bool) {
+ start := 0
+ end := len(line)
+ space := util.TrimLeftLength(line, []byte{' '})
+ if space > 3 {
+ return 0, false
+ }
+ start += space
+ level1 := util.TrimLeftLength(line[start:end], []byte{'='})
+ c := byte('=')
+ var level2 int
+ if level1 == 0 {
+ level2 = util.TrimLeftLength(line[start:end], []byte{'-'})
+ c = '-'
+ }
+ if util.IsSpace(line[end-1]) {
+ end -= util.TrimRightSpaceLength(line[start:end])
+ }
+ if !((level1 > 0 && start+level1 == end) || (level2 > 0 && start+level2 == end)) {
+ return 0, false
+ }
+ return c, true
+}
+
+// NewSetextHeadingParser return a new BlockParser that can parse Setext headings.
+func NewSetextHeadingParser(opts ...HeadingOption) BlockParser {
+ p := &setextHeadingParser{}
+ for _, o := range opts {
+ o.SetHeadingOption(&p.HeadingConfig)
+ }
+ return p
+}
+
+func (b *setextHeadingParser) Trigger() []byte {
+ return []byte{'-', '='}
+}
+
+func (b *setextHeadingParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ last := pc.LastOpenedBlock().Node
+ if last == nil {
+ return nil, NoChildren
+ }
+ paragraph, ok := last.(*ast.Paragraph)
+ if !ok || paragraph.Parent() != parent {
+ return nil, NoChildren
+ }
+ line, segment := reader.PeekLine()
+ c, ok := matchesSetextHeadingBar(line)
+ if !ok {
+ return nil, NoChildren
+ }
+ level := 1
+ if c == '-' {
+ level = 2
+ }
+ node := ast.NewHeading(level)
+ node.Lines().Append(segment)
+ pc.Set(temporaryParagraphKey, last)
+ return node, NoChildren | RequireParagraph
+}
+
+func (b *setextHeadingParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ return Close
+}
+
+func (b *setextHeadingParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ heading := node.(*ast.Heading)
+ segment := node.Lines().At(0)
+ heading.Lines().Clear()
+ tmp := pc.Get(temporaryParagraphKey).(*ast.Paragraph)
+ pc.Set(temporaryParagraphKey, nil)
+ if tmp.Lines().Len() == 0 {
+ next := heading.NextSibling()
+ segment = segment.TrimLeftSpace(reader.Source())
+ if next == nil || !ast.IsParagraph(next) {
+ para := ast.NewParagraph()
+ para.Lines().Append(segment)
+ heading.Parent().InsertAfter(heading.Parent(), heading, para)
+ } else {
+ next.(ast.Node).Lines().Unshift(segment)
+ }
+ heading.Parent().RemoveChild(heading.Parent(), heading)
+ } else {
+ heading.SetLines(tmp.Lines())
+ heading.SetBlankPreviousLines(tmp.HasBlankPreviousLines())
+ tp := tmp.Parent()
+ if tp != nil {
+ tp.RemoveChild(tp, tmp)
+ }
+ }
+
+ if b.Attribute {
+ parseLastLineAttributes(node, reader, pc)
+ }
+
+ if b.AutoHeadingID {
+ id, ok := node.AttributeString("id")
+ if !ok {
+ generateAutoHeadingID(heading, reader, pc)
+ } else {
+ pc.IDs().Put(id.([]byte))
+ }
+ }
+}
+
+func (b *setextHeadingParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *setextHeadingParser) CanAcceptIndentedLine() bool {
+ return false
+}
diff --git a/vendor/github.com/yuin/goldmark/parser/thematic_break.go b/vendor/github.com/yuin/goldmark/parser/thematic_break.go
new file mode 100644
index 000000000..db20a1e7a
--- /dev/null
+++ b/vendor/github.com/yuin/goldmark/parser/thematic_break.go
@@ -0,0 +1,75 @@
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type thematicBreakPraser struct {
+}
+
+var defaultThematicBreakPraser = &thematicBreakPraser{}
+
+// NewThematicBreakParser returns a new BlockParser that
+// parses thematic breaks.
+func NewThematicBreakParser() BlockParser {
+ return defaultThematicBreakPraser
+}
+
+func isThematicBreak(line []byte, offset int) bool {
+ w, pos := util.IndentWidth(line, offset)
+ if w > 3 {
+ return false
+ }
+ mark := byte(0)
+ count := 0
+ for i := pos; i < len(line); i++ {
+ c := line[i]
+ if util.IsSpace(c) {
+ continue
+ }
+ if mark == 0 {
+ mark = c
+ count = 1
+ if mark == '*' || mark == '-' || mark == '_' {
+ continue
+ }
+ return false
+ }
+ if c != mark {
+ return false
+ }
+ count++
+ }
+ return count > 2
+}
+
+func (b *thematicBreakPraser) Trigger() []byte {
+ return []byte{'-', '*', '_'}
+}
+
+func (b *thematicBreakPraser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ line, segment := reader.PeekLine()
+ if isThematicBreak(line, reader.LineOffset()) {
+ reader.Advance(segment.Len() - 1)
+ return ast.NewThematicBreak(), NoChildren
+ }
+ return nil, NoChildren
+}
+
+func (b *thematicBreakPraser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ return Close
+}
+
+func (b *thematicBreakPraser) Close(node ast.Node, reader text.Reader, pc Context) {
+ // nothing to do
+}
+
+func (b *thematicBreakPraser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *thematicBreakPraser) CanAcceptIndentedLine() bool {
+ return false
+}