mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-12-01 05:36:19 +01:00
Change markdown rendering from blackfriday to goldmark (#9533)
* Move to goldmark Markdown rendering moved from blackfriday to the goldmark. Multiple subtle changes required to the goldmark extensions to keep current rendering and defaults. Can go further with goldmark linkify and have this work within markdown rendering making the link processor unnecessary. Need to think about how to go about allowing extensions - at present it seems that these would be hard to do without recompilation. * linter fixes Co-authored-by: Lauris BH <lauris@nix.lv>
This commit is contained in:
parent
0c07f1de5b
commit
27757714d0
83 changed files with 13838 additions and 6297 deletions
3
go.mod
3
go.mod
|
@ -79,11 +79,9 @@ require (
|
||||||
github.com/prometheus/procfs v0.0.4 // indirect
|
github.com/prometheus/procfs v0.0.4 // indirect
|
||||||
github.com/quasoft/websspi v1.0.0
|
github.com/quasoft/websspi v1.0.0
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.0.1
|
|
||||||
github.com/satori/go.uuid v1.2.0
|
github.com/satori/go.uuid v1.2.0
|
||||||
github.com/sergi/go-diff v1.0.0
|
github.com/sergi/go-diff v1.0.0
|
||||||
github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b // indirect
|
github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b // indirect
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
|
|
||||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd
|
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd
|
||||||
github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 // indirect
|
github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 // indirect
|
||||||
github.com/stretchr/testify v1.4.0
|
github.com/stretchr/testify v1.4.0
|
||||||
|
@ -95,6 +93,7 @@ require (
|
||||||
github.com/unknwon/paginater v0.0.0-20151104151617-7748a72e0141
|
github.com/unknwon/paginater v0.0.0-20151104151617-7748a72e0141
|
||||||
github.com/urfave/cli v1.20.0
|
github.com/urfave/cli v1.20.0
|
||||||
github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53
|
github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53
|
||||||
|
github.com/yuin/goldmark v1.1.19
|
||||||
go.etcd.io/bbolt v1.3.3 // indirect
|
go.etcd.io/bbolt v1.3.3 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876
|
golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876
|
||||||
golang.org/x/net v0.0.0-20191101175033-0deb6923b6d9
|
golang.org/x/net v0.0.0-20191101175033-0deb6923b6d9
|
||||||
|
|
6
go.sum
6
go.sum
|
@ -462,16 +462,12 @@ github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001/go.mod h1:qq
|
||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
|
||||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||||
github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b h1:4kg1wyftSKxLtnPAvcRWakIPpokB9w780/KwrNLnfPA=
|
github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b h1:4kg1wyftSKxLtnPAvcRWakIPpokB9w780/KwrNLnfPA=
|
||||||
github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
|
||||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=
|
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=
|
||||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
|
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
|
||||||
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
|
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
|
||||||
|
@ -550,6 +546,8 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
|
||||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53 h1:HsIQ6yAjfjQ3IxPGrTusxp6Qxn92gNVq2x5CbvQvx3w=
|
github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53 h1:HsIQ6yAjfjQ3IxPGrTusxp6Qxn92gNVq2x5CbvQvx3w=
|
||||||
github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53/go.mod h1:f6elajwZV+xceiaqgRL090YzLEDGSbqr3poGL3ZgXYo=
|
github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53/go.mod h1:f6elajwZV+xceiaqgRL090YzLEDGSbqr3poGL3ZgXYo=
|
||||||
|
github.com/yuin/goldmark v1.1.19 h1:0s2/60x0XsFCXHeFut+F3azDVAAyIMyUfJRbRexiTYs=
|
||||||
|
github.com/yuin/goldmark v1.1.19/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
|
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
|
||||||
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
|
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
|
||||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
|
|
507
modules/markup/common/footnote.go
Normal file
507
modules/markup/common/footnote.go
Normal file
|
@ -0,0 +1,507 @@
|
||||||
|
// Copyright 2019 Yusuke Inuzuka
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Most of what follows is a subtly changed version of github.com/yuin/goldmark/extension/footnote.go
|
||||||
|
|
||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/yuin/goldmark"
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/parser"
|
||||||
|
"github.com/yuin/goldmark/renderer"
|
||||||
|
"github.com/yuin/goldmark/renderer/html"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CleanValue will clean a value to make it safe to be an id
|
||||||
|
// This function is quite different from the original goldmark function
|
||||||
|
// and more closely matches the output from the shurcooL sanitizer
|
||||||
|
// In particular Unicode letters and numbers are a lot more than a-zA-Z0-9...
|
||||||
|
func CleanValue(value []byte) []byte {
|
||||||
|
value = bytes.TrimSpace(value)
|
||||||
|
rs := bytes.Runes(value)
|
||||||
|
result := make([]rune, 0, len(rs))
|
||||||
|
needsDash := false
|
||||||
|
for _, r := range rs {
|
||||||
|
switch {
|
||||||
|
case unicode.IsLetter(r) || unicode.IsNumber(r):
|
||||||
|
if needsDash && len(result) > 0 {
|
||||||
|
result = append(result, '-')
|
||||||
|
}
|
||||||
|
needsDash = false
|
||||||
|
result = append(result, unicode.ToLower(r))
|
||||||
|
default:
|
||||||
|
needsDash = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return []byte(string(result))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Most of what follows is a subtly changed version of github.com/yuin/goldmark/extension/footnote.go
|
||||||
|
|
||||||
|
// A FootnoteLink struct represents a link to a footnote of Markdown
|
||||||
|
// (PHP Markdown Extra) text.
|
||||||
|
type FootnoteLink struct {
|
||||||
|
ast.BaseInline
|
||||||
|
Index int
|
||||||
|
Name []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *FootnoteLink) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{}
|
||||||
|
m["Index"] = fmt.Sprintf("%v", n.Index)
|
||||||
|
m["Name"] = fmt.Sprintf("%v", n.Name)
|
||||||
|
ast.DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindFootnoteLink is a NodeKind of the FootnoteLink node.
|
||||||
|
var KindFootnoteLink = ast.NewNodeKind("GiteaFootnoteLink")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *FootnoteLink) Kind() ast.NodeKind {
|
||||||
|
return KindFootnoteLink
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFootnoteLink returns a new FootnoteLink node.
|
||||||
|
func NewFootnoteLink(index int, name []byte) *FootnoteLink {
|
||||||
|
return &FootnoteLink{
|
||||||
|
Index: index,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A FootnoteBackLink struct represents a link to a footnote of Markdown
|
||||||
|
// (PHP Markdown Extra) text.
|
||||||
|
type FootnoteBackLink struct {
|
||||||
|
ast.BaseInline
|
||||||
|
Index int
|
||||||
|
Name []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *FootnoteBackLink) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{}
|
||||||
|
m["Index"] = fmt.Sprintf("%v", n.Index)
|
||||||
|
m["Name"] = fmt.Sprintf("%v", n.Name)
|
||||||
|
ast.DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindFootnoteBackLink is a NodeKind of the FootnoteBackLink node.
|
||||||
|
var KindFootnoteBackLink = ast.NewNodeKind("GiteaFootnoteBackLink")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *FootnoteBackLink) Kind() ast.NodeKind {
|
||||||
|
return KindFootnoteBackLink
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFootnoteBackLink returns a new FootnoteBackLink node.
|
||||||
|
func NewFootnoteBackLink(index int, name []byte) *FootnoteBackLink {
|
||||||
|
return &FootnoteBackLink{
|
||||||
|
Index: index,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Footnote struct represents a footnote of Markdown
|
||||||
|
// (PHP Markdown Extra) text.
|
||||||
|
type Footnote struct {
|
||||||
|
ast.BaseBlock
|
||||||
|
Ref []byte
|
||||||
|
Index int
|
||||||
|
Name []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *Footnote) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{}
|
||||||
|
m["Index"] = fmt.Sprintf("%v", n.Index)
|
||||||
|
m["Ref"] = fmt.Sprintf("%s", n.Ref)
|
||||||
|
m["Name"] = fmt.Sprintf("%v", n.Name)
|
||||||
|
ast.DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindFootnote is a NodeKind of the Footnote node.
|
||||||
|
var KindFootnote = ast.NewNodeKind("GiteaFootnote")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *Footnote) Kind() ast.NodeKind {
|
||||||
|
return KindFootnote
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFootnote returns a new Footnote node.
|
||||||
|
func NewFootnote(ref []byte) *Footnote {
|
||||||
|
return &Footnote{
|
||||||
|
Ref: ref,
|
||||||
|
Index: -1,
|
||||||
|
Name: ref,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A FootnoteList struct represents footnotes of Markdown
|
||||||
|
// (PHP Markdown Extra) text.
|
||||||
|
type FootnoteList struct {
|
||||||
|
ast.BaseBlock
|
||||||
|
Count int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *FootnoteList) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{}
|
||||||
|
m["Count"] = fmt.Sprintf("%v", n.Count)
|
||||||
|
ast.DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindFootnoteList is a NodeKind of the FootnoteList node.
|
||||||
|
var KindFootnoteList = ast.NewNodeKind("GiteaFootnoteList")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *FootnoteList) Kind() ast.NodeKind {
|
||||||
|
return KindFootnoteList
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFootnoteList returns a new FootnoteList node.
|
||||||
|
func NewFootnoteList() *FootnoteList {
|
||||||
|
return &FootnoteList{
|
||||||
|
Count: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var footnoteListKey = parser.NewContextKey()
|
||||||
|
|
||||||
|
type footnoteBlockParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultFootnoteBlockParser = &footnoteBlockParser{}
|
||||||
|
|
||||||
|
// NewFootnoteBlockParser returns a new parser.BlockParser that can parse
|
||||||
|
// footnotes of the Markdown(PHP Markdown Extra) text.
|
||||||
|
func NewFootnoteBlockParser() parser.BlockParser {
|
||||||
|
return defaultFootnoteBlockParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *footnoteBlockParser) Trigger() []byte {
|
||||||
|
return []byte{'['}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *footnoteBlockParser) Open(parent ast.Node, reader text.Reader, pc parser.Context) (ast.Node, parser.State) {
|
||||||
|
line, segment := reader.PeekLine()
|
||||||
|
pos := pc.BlockOffset()
|
||||||
|
if pos < 0 || line[pos] != '[' {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
pos++
|
||||||
|
if pos > len(line)-1 || line[pos] != '^' {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
open := pos + 1
|
||||||
|
closes := 0
|
||||||
|
closure := util.FindClosure(line[pos+1:], '[', ']', false, false)
|
||||||
|
closes = pos + 1 + closure
|
||||||
|
next := closes + 1
|
||||||
|
if closure > -1 {
|
||||||
|
if next >= len(line) || line[next] != ':' {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
padding := segment.Padding
|
||||||
|
label := reader.Value(text.NewSegment(segment.Start+open-padding, segment.Start+closes-padding))
|
||||||
|
if util.IsBlank(label) {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
item := NewFootnote(label)
|
||||||
|
|
||||||
|
pos = next + 1 - padding
|
||||||
|
if pos >= len(line) {
|
||||||
|
reader.Advance(pos)
|
||||||
|
return item, parser.NoChildren
|
||||||
|
}
|
||||||
|
reader.AdvanceAndSetPadding(pos, padding)
|
||||||
|
return item, parser.HasChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *footnoteBlockParser) Continue(node ast.Node, reader text.Reader, pc parser.Context) parser.State {
|
||||||
|
line, _ := reader.PeekLine()
|
||||||
|
if util.IsBlank(line) {
|
||||||
|
return parser.Continue | parser.HasChildren
|
||||||
|
}
|
||||||
|
childpos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
|
||||||
|
if childpos < 0 {
|
||||||
|
return parser.Close
|
||||||
|
}
|
||||||
|
reader.AdvanceAndSetPadding(childpos, padding)
|
||||||
|
return parser.Continue | parser.HasChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *footnoteBlockParser) Close(node ast.Node, reader text.Reader, pc parser.Context) {
|
||||||
|
var list *FootnoteList
|
||||||
|
if tlist := pc.Get(footnoteListKey); tlist != nil {
|
||||||
|
list = tlist.(*FootnoteList)
|
||||||
|
} else {
|
||||||
|
list = NewFootnoteList()
|
||||||
|
pc.Set(footnoteListKey, list)
|
||||||
|
node.Parent().InsertBefore(node.Parent(), node, list)
|
||||||
|
}
|
||||||
|
node.Parent().RemoveChild(node.Parent(), node)
|
||||||
|
list.AppendChild(list, node)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *footnoteBlockParser) CanInterruptParagraph() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *footnoteBlockParser) CanAcceptIndentedLine() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type footnoteParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultFootnoteParser = &footnoteParser{}
|
||||||
|
|
||||||
|
// NewFootnoteParser returns a new parser.InlineParser that can parse
|
||||||
|
// footnote links of the Markdown(PHP Markdown Extra) text.
|
||||||
|
func NewFootnoteParser() parser.InlineParser {
|
||||||
|
return defaultFootnoteParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *footnoteParser) Trigger() []byte {
|
||||||
|
// footnote syntax probably conflict with the image syntax.
|
||||||
|
// So we need trigger this parser with '!'.
|
||||||
|
return []byte{'!', '['}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *footnoteParser) Parse(parent ast.Node, block text.Reader, pc parser.Context) ast.Node {
|
||||||
|
line, segment := block.PeekLine()
|
||||||
|
pos := 1
|
||||||
|
if len(line) > 0 && line[0] == '!' {
|
||||||
|
pos++
|
||||||
|
}
|
||||||
|
if pos >= len(line) || line[pos] != '^' {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
pos++
|
||||||
|
if pos >= len(line) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
open := pos
|
||||||
|
closure := util.FindClosure(line[pos:], '[', ']', false, false)
|
||||||
|
if closure < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
closes := pos + closure
|
||||||
|
value := block.Value(text.NewSegment(segment.Start+open, segment.Start+closes))
|
||||||
|
block.Advance(closes + 1)
|
||||||
|
|
||||||
|
var list *FootnoteList
|
||||||
|
if tlist := pc.Get(footnoteListKey); tlist != nil {
|
||||||
|
list = tlist.(*FootnoteList)
|
||||||
|
}
|
||||||
|
if list == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
index := 0
|
||||||
|
name := []byte{}
|
||||||
|
for def := list.FirstChild(); def != nil; def = def.NextSibling() {
|
||||||
|
d := def.(*Footnote)
|
||||||
|
if bytes.Equal(d.Ref, value) {
|
||||||
|
if d.Index < 0 {
|
||||||
|
list.Count++
|
||||||
|
d.Index = list.Count
|
||||||
|
val := CleanValue(d.Name)
|
||||||
|
if len(val) == 0 {
|
||||||
|
val = []byte(strconv.Itoa(d.Index))
|
||||||
|
}
|
||||||
|
d.Name = pc.IDs().Generate(val, KindFootnote)
|
||||||
|
}
|
||||||
|
index = d.Index
|
||||||
|
name = d.Name
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if index == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewFootnoteLink(index, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
type footnoteASTTransformer struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultFootnoteASTTransformer = &footnoteASTTransformer{}
|
||||||
|
|
||||||
|
// NewFootnoteASTTransformer returns a new parser.ASTTransformer that
|
||||||
|
// insert a footnote list to the last of the document.
|
||||||
|
func NewFootnoteASTTransformer() parser.ASTTransformer {
|
||||||
|
return defaultFootnoteASTTransformer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *footnoteASTTransformer) Transform(node *ast.Document, reader text.Reader, pc parser.Context) {
|
||||||
|
var list *FootnoteList
|
||||||
|
if tlist := pc.Get(footnoteListKey); tlist != nil {
|
||||||
|
list = tlist.(*FootnoteList)
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pc.Set(footnoteListKey, nil)
|
||||||
|
for footnote := list.FirstChild(); footnote != nil; {
|
||||||
|
var container ast.Node = footnote
|
||||||
|
next := footnote.NextSibling()
|
||||||
|
if fc := container.LastChild(); fc != nil && ast.IsParagraph(fc) {
|
||||||
|
container = fc
|
||||||
|
}
|
||||||
|
footnoteNode := footnote.(*Footnote)
|
||||||
|
index := footnoteNode.Index
|
||||||
|
name := footnoteNode.Name
|
||||||
|
if index < 0 {
|
||||||
|
list.RemoveChild(list, footnote)
|
||||||
|
} else {
|
||||||
|
container.AppendChild(container, NewFootnoteBackLink(index, name))
|
||||||
|
}
|
||||||
|
footnote = next
|
||||||
|
}
|
||||||
|
list.SortChildren(func(n1, n2 ast.Node) int {
|
||||||
|
if n1.(*Footnote).Index < n2.(*Footnote).Index {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
})
|
||||||
|
if list.Count <= 0 {
|
||||||
|
list.Parent().RemoveChild(list.Parent(), list)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
node.AppendChild(node, list)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FootnoteHTMLRenderer is a renderer.NodeRenderer implementation that
|
||||||
|
// renders FootnoteLink nodes.
|
||||||
|
type FootnoteHTMLRenderer struct {
|
||||||
|
html.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFootnoteHTMLRenderer returns a new FootnoteHTMLRenderer.
|
||||||
|
func NewFootnoteHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
|
||||||
|
r := &FootnoteHTMLRenderer{
|
||||||
|
Config: html.NewConfig(),
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.SetHTMLOption(&r.Config)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
|
||||||
|
func (r *FootnoteHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
||||||
|
reg.Register(KindFootnoteLink, r.renderFootnoteLink)
|
||||||
|
reg.Register(KindFootnoteBackLink, r.renderFootnoteBackLink)
|
||||||
|
reg.Register(KindFootnote, r.renderFootnote)
|
||||||
|
reg.Register(KindFootnoteList, r.renderFootnoteList)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *FootnoteHTMLRenderer) renderFootnoteLink(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
n := node.(*FootnoteLink)
|
||||||
|
n.Dump(source, 0)
|
||||||
|
is := strconv.Itoa(n.Index)
|
||||||
|
_, _ = w.WriteString(`<sup id="fnref:`)
|
||||||
|
_, _ = w.Write(n.Name)
|
||||||
|
_, _ = w.WriteString(`"><a href="#fn:`)
|
||||||
|
_, _ = w.Write(n.Name)
|
||||||
|
_, _ = w.WriteString(`" class="footnote-ref" role="doc-noteref">`)
|
||||||
|
_, _ = w.WriteString(is)
|
||||||
|
_, _ = w.WriteString(`</a></sup>`)
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *FootnoteHTMLRenderer) renderFootnoteBackLink(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
n := node.(*FootnoteBackLink)
|
||||||
|
fmt.Fprintf(os.Stdout, "source:\n%s\n", string(n.Text(source)))
|
||||||
|
_, _ = w.WriteString(` <a href="#fnref:`)
|
||||||
|
_, _ = w.Write(n.Name)
|
||||||
|
_, _ = w.WriteString(`" class="footnote-backref" role="doc-backlink">`)
|
||||||
|
_, _ = w.WriteString("↩︎")
|
||||||
|
_, _ = w.WriteString(`</a>`)
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *FootnoteHTMLRenderer) renderFootnote(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
n := node.(*Footnote)
|
||||||
|
if entering {
|
||||||
|
fmt.Fprintf(os.Stdout, "source:\n%s\n", string(n.Text(source)))
|
||||||
|
_, _ = w.WriteString(`<li id="fn:`)
|
||||||
|
_, _ = w.Write(n.Name)
|
||||||
|
_, _ = w.WriteString(`" role="doc-endnote"`)
|
||||||
|
if node.Attributes() != nil {
|
||||||
|
html.RenderAttributes(w, node, html.ListItemAttributeFilter)
|
||||||
|
}
|
||||||
|
_, _ = w.WriteString(">\n")
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</li>\n")
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *FootnoteHTMLRenderer) renderFootnoteList(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
tag := "div"
|
||||||
|
if entering {
|
||||||
|
_, _ = w.WriteString("<")
|
||||||
|
_, _ = w.WriteString(tag)
|
||||||
|
_, _ = w.WriteString(` class="footnotes" role="doc-endnotes"`)
|
||||||
|
if node.Attributes() != nil {
|
||||||
|
html.RenderAttributes(w, node, html.GlobalAttributeFilter)
|
||||||
|
}
|
||||||
|
_ = w.WriteByte('>')
|
||||||
|
if r.Config.XHTML {
|
||||||
|
_, _ = w.WriteString("\n<hr />\n")
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("\n<hr>\n")
|
||||||
|
}
|
||||||
|
_, _ = w.WriteString("<ol>\n")
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</ol>\n")
|
||||||
|
_, _ = w.WriteString("</")
|
||||||
|
_, _ = w.WriteString(tag)
|
||||||
|
_, _ = w.WriteString(">\n")
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type footnoteExtension struct{}
|
||||||
|
|
||||||
|
// FootnoteExtension represents the Gitea Footnote
|
||||||
|
var FootnoteExtension = &footnoteExtension{}
|
||||||
|
|
||||||
|
// Extend extends the markdown converter with the Gitea Footnote parser
|
||||||
|
func (e *footnoteExtension) Extend(m goldmark.Markdown) {
|
||||||
|
m.Parser().AddOptions(
|
||||||
|
parser.WithBlockParsers(
|
||||||
|
util.Prioritized(NewFootnoteBlockParser(), 999),
|
||||||
|
),
|
||||||
|
parser.WithInlineParsers(
|
||||||
|
util.Prioritized(NewFootnoteParser(), 101),
|
||||||
|
),
|
||||||
|
parser.WithASTTransformers(
|
||||||
|
util.Prioritized(NewFootnoteASTTransformer(), 999),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
m.Renderer().AddOptions(renderer.WithNodeRenderers(
|
||||||
|
util.Prioritized(NewFootnoteHTMLRenderer(), 500),
|
||||||
|
))
|
||||||
|
}
|
19
modules/markup/common/html.go
Normal file
19
modules/markup/common/html.go
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"mvdan.cc/xurls/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// NOTE: All below regex matching do not perform any extra validation.
|
||||||
|
// Thus a link is produced even if the linked entity does not exist.
|
||||||
|
// While fast, this is also incorrect and lead to false positives.
|
||||||
|
// TODO: fix invalid linking issue
|
||||||
|
|
||||||
|
// LinkRegex is a regexp matching a valid link
|
||||||
|
LinkRegex, _ = xurls.StrictMatchingScheme("https?://")
|
||||||
|
)
|
156
modules/markup/common/linkify.go
Normal file
156
modules/markup/common/linkify.go
Normal file
|
@ -0,0 +1,156 @@
|
||||||
|
// Copyright 2019 Yusuke Inuzuka
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Most of this file is a subtly changed version of github.com/yuin/goldmark/extension/linkify.go
|
||||||
|
|
||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/yuin/goldmark"
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/parser"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
var wwwURLRegxp = regexp.MustCompile(`^www\.[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}((?:/|[#?])[-a-zA-Z0-9@:%_\+.~#!?&//=\(\);,'">\^{}\[\]` + "`" + `]*)?`)
|
||||||
|
|
||||||
|
type linkifyParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultLinkifyParser = &linkifyParser{}
|
||||||
|
|
||||||
|
// NewLinkifyParser return a new InlineParser can parse
|
||||||
|
// text that seems like a URL.
|
||||||
|
func NewLinkifyParser() parser.InlineParser {
|
||||||
|
return defaultLinkifyParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *linkifyParser) Trigger() []byte {
|
||||||
|
// ' ' indicates any white spaces and a line head
|
||||||
|
return []byte{' ', '*', '_', '~', '('}
|
||||||
|
}
|
||||||
|
|
||||||
|
var protoHTTP = []byte("http:")
|
||||||
|
var protoHTTPS = []byte("https:")
|
||||||
|
var protoFTP = []byte("ftp:")
|
||||||
|
var domainWWW = []byte("www.")
|
||||||
|
|
||||||
|
func (s *linkifyParser) Parse(parent ast.Node, block text.Reader, pc parser.Context) ast.Node {
|
||||||
|
if pc.IsInLinkLabel() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
line, segment := block.PeekLine()
|
||||||
|
consumes := 0
|
||||||
|
start := segment.Start
|
||||||
|
c := line[0]
|
||||||
|
// advance if current position is not a line head.
|
||||||
|
if c == ' ' || c == '*' || c == '_' || c == '~' || c == '(' {
|
||||||
|
consumes++
|
||||||
|
start++
|
||||||
|
line = line[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
var m []int
|
||||||
|
var protocol []byte
|
||||||
|
var typ ast.AutoLinkType = ast.AutoLinkURL
|
||||||
|
if bytes.HasPrefix(line, protoHTTP) || bytes.HasPrefix(line, protoHTTPS) || bytes.HasPrefix(line, protoFTP) {
|
||||||
|
m = LinkRegex.FindSubmatchIndex(line)
|
||||||
|
}
|
||||||
|
if m == nil && bytes.HasPrefix(line, domainWWW) {
|
||||||
|
m = wwwURLRegxp.FindSubmatchIndex(line)
|
||||||
|
protocol = []byte("http")
|
||||||
|
}
|
||||||
|
if m != nil {
|
||||||
|
lastChar := line[m[1]-1]
|
||||||
|
if lastChar == '.' {
|
||||||
|
m[1]--
|
||||||
|
} else if lastChar == ')' {
|
||||||
|
closing := 0
|
||||||
|
for i := m[1] - 1; i >= m[0]; i-- {
|
||||||
|
if line[i] == ')' {
|
||||||
|
closing++
|
||||||
|
} else if line[i] == '(' {
|
||||||
|
closing--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if closing > 0 {
|
||||||
|
m[1] -= closing
|
||||||
|
}
|
||||||
|
} else if lastChar == ';' {
|
||||||
|
i := m[1] - 2
|
||||||
|
for ; i >= m[0]; i-- {
|
||||||
|
if util.IsAlphaNumeric(line[i]) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if i != m[1]-2 {
|
||||||
|
if line[i] == '&' {
|
||||||
|
m[1] -= m[1] - i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m == nil {
|
||||||
|
if len(line) > 0 && util.IsPunct(line[0]) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
typ = ast.AutoLinkEmail
|
||||||
|
stop := util.FindEmailIndex(line)
|
||||||
|
if stop < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
at := bytes.IndexByte(line, '@')
|
||||||
|
m = []int{0, stop, at, stop - 1}
|
||||||
|
if m == nil || bytes.IndexByte(line[m[2]:m[3]], '.') < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
lastChar := line[m[1]-1]
|
||||||
|
if lastChar == '.' {
|
||||||
|
m[1]--
|
||||||
|
}
|
||||||
|
if m[1] < len(line) {
|
||||||
|
nextChar := line[m[1]]
|
||||||
|
if nextChar == '-' || nextChar == '_' {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if consumes != 0 {
|
||||||
|
s := segment.WithStop(segment.Start + 1)
|
||||||
|
ast.MergeOrAppendTextSegment(parent, s)
|
||||||
|
}
|
||||||
|
consumes += m[1]
|
||||||
|
block.Advance(consumes)
|
||||||
|
n := ast.NewTextSegment(text.NewSegment(start, start+m[1]))
|
||||||
|
link := ast.NewAutoLink(typ, n)
|
||||||
|
link.Protocol = protocol
|
||||||
|
return link
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *linkifyParser) CloseBlock(parent ast.Node, pc parser.Context) {
|
||||||
|
// nothing to do
|
||||||
|
}
|
||||||
|
|
||||||
|
type linkify struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Linkify is an extension that allow you to parse text that seems like a URL.
|
||||||
|
var Linkify = &linkify{}
|
||||||
|
|
||||||
|
func (e *linkify) Extend(m goldmark.Markdown) {
|
||||||
|
m.Parser().AddOptions(
|
||||||
|
parser.WithInlineParsers(
|
||||||
|
util.Prioritized(NewLinkifyParser(), 999),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
|
@ -15,6 +15,7 @@ import (
|
||||||
"code.gitea.io/gitea/modules/base"
|
"code.gitea.io/gitea/modules/base"
|
||||||
"code.gitea.io/gitea/modules/git"
|
"code.gitea.io/gitea/modules/git"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"code.gitea.io/gitea/modules/markup/common"
|
||||||
"code.gitea.io/gitea/modules/references"
|
"code.gitea.io/gitea/modules/references"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"code.gitea.io/gitea/modules/util"
|
"code.gitea.io/gitea/modules/util"
|
||||||
|
@ -57,8 +58,6 @@ var (
|
||||||
// https://html.spec.whatwg.org/multipage/input.html#e-mail-state-(type%3Demail)
|
// https://html.spec.whatwg.org/multipage/input.html#e-mail-state-(type%3Demail)
|
||||||
emailRegex = regexp.MustCompile("(?:\\s|^|\\(|\\[)([a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9]{2,}(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+)(?:\\s|$|\\)|\\]|\\.(\\s|$))")
|
emailRegex = regexp.MustCompile("(?:\\s|^|\\(|\\[)([a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9]{2,}(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+)(?:\\s|$|\\)|\\]|\\.(\\s|$))")
|
||||||
|
|
||||||
linkRegex, _ = xurls.StrictMatchingScheme("https?://")
|
|
||||||
|
|
||||||
// blackfriday extensions create IDs like fn:user-content-footnote
|
// blackfriday extensions create IDs like fn:user-content-footnote
|
||||||
blackfridayExtRegex = regexp.MustCompile(`[^:]*:user-content-`)
|
blackfridayExtRegex = regexp.MustCompile(`[^:]*:user-content-`)
|
||||||
)
|
)
|
||||||
|
@ -118,7 +117,7 @@ func CustomLinkURLSchemes(schemes []string) {
|
||||||
}
|
}
|
||||||
withAuth = append(withAuth, s)
|
withAuth = append(withAuth, s)
|
||||||
}
|
}
|
||||||
linkRegex, _ = xurls.StrictMatchingScheme(strings.Join(withAuth, "|"))
|
common.LinkRegex, _ = xurls.StrictMatchingScheme(strings.Join(withAuth, "|"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsSameDomain checks if given url string has the same hostname as current Gitea instance
|
// IsSameDomain checks if given url string has the same hostname as current Gitea instance
|
||||||
|
@ -509,6 +508,12 @@ func shortLinkProcessorFull(ctx *postProcessCtx, node *html.Node, noLink bool) {
|
||||||
(strings.HasPrefix(val, "‘") && strings.HasSuffix(val, "’")) {
|
(strings.HasPrefix(val, "‘") && strings.HasSuffix(val, "’")) {
|
||||||
const lenQuote = len("‘")
|
const lenQuote = len("‘")
|
||||||
val = val[lenQuote : len(val)-lenQuote]
|
val = val[lenQuote : len(val)-lenQuote]
|
||||||
|
} else if (strings.HasPrefix(val, "\"") && strings.HasSuffix(val, "\"")) ||
|
||||||
|
(strings.HasPrefix(val, "'") && strings.HasSuffix(val, "'")) {
|
||||||
|
val = val[1 : len(val)-1]
|
||||||
|
} else if strings.HasPrefix(val, "'") && strings.HasSuffix(val, "’") {
|
||||||
|
const lenQuote = len("‘")
|
||||||
|
val = val[1 : len(val)-lenQuote]
|
||||||
}
|
}
|
||||||
props[key] = val
|
props[key] = val
|
||||||
}
|
}
|
||||||
|
@ -803,7 +808,7 @@ func emailAddressProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||||
// linkProcessor creates links for any HTTP or HTTPS URL not captured by
|
// linkProcessor creates links for any HTTP or HTTPS URL not captured by
|
||||||
// markdown.
|
// markdown.
|
||||||
func linkProcessor(ctx *postProcessCtx, node *html.Node) {
|
func linkProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||||
m := linkRegex.FindStringIndex(node.Data)
|
m := common.LinkRegex.FindStringIndex(node.Data)
|
||||||
if m == nil {
|
if m == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -832,7 +837,7 @@ func genDefaultLinkProcessor(defaultLink string) processor {
|
||||||
|
|
||||||
// descriptionLinkProcessor creates links for DescriptionHTML
|
// descriptionLinkProcessor creates links for DescriptionHTML
|
||||||
func descriptionLinkProcessor(ctx *postProcessCtx, node *html.Node) {
|
func descriptionLinkProcessor(ctx *postProcessCtx, node *html.Node) {
|
||||||
m := linkRegex.FindStringIndex(node.Data)
|
m := common.LinkRegex.FindStringIndex(node.Data)
|
||||||
if m == nil {
|
if m == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -323,6 +323,6 @@ func TestRender_ShortLinks(t *testing.T) {
|
||||||
`<p><a href="`+notencodedImgurlWiki+`" rel="nofollow"><img src="`+notencodedImgurlWiki+`"/></a></p>`)
|
`<p><a href="`+notencodedImgurlWiki+`" rel="nofollow"><img src="`+notencodedImgurlWiki+`"/></a></p>`)
|
||||||
test(
|
test(
|
||||||
"<p><a href=\"https://example.org\">[[foobar]]</a></p>",
|
"<p><a href=\"https://example.org\">[[foobar]]</a></p>",
|
||||||
`<p></p><p><a href="https://example.org" rel="nofollow">[[foobar]]</a></p><p></p>`,
|
`<p><a href="https://example.org" rel="nofollow">[[foobar]]</a></p>`,
|
||||||
`<p></p><p><a href="https://example.org" rel="nofollow">[[foobar]]</a></p><p></p>`)
|
`<p><a href="https://example.org" rel="nofollow">[[foobar]]</a></p>`)
|
||||||
}
|
}
|
||||||
|
|
178
modules/markup/markdown/goldmark.go
Normal file
178
modules/markup/markdown/goldmark.go
Normal file
|
@ -0,0 +1,178 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package markdown
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/markup"
|
||||||
|
"code.gitea.io/gitea/modules/markup/common"
|
||||||
|
giteautil "code.gitea.io/gitea/modules/util"
|
||||||
|
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
east "github.com/yuin/goldmark/extension/ast"
|
||||||
|
"github.com/yuin/goldmark/parser"
|
||||||
|
"github.com/yuin/goldmark/renderer"
|
||||||
|
"github.com/yuin/goldmark/renderer/html"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
var byteMailto = []byte("mailto:")
|
||||||
|
|
||||||
|
// GiteaASTTransformer is a default transformer of the goldmark tree.
|
||||||
|
type GiteaASTTransformer struct{}
|
||||||
|
|
||||||
|
// Transform transforms the given AST tree.
|
||||||
|
func (g *GiteaASTTransformer) Transform(node *ast.Document, reader text.Reader, pc parser.Context) {
|
||||||
|
_ = ast.Walk(node, func(n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
if !entering {
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v := n.(type) {
|
||||||
|
case *ast.Image:
|
||||||
|
// Images need two things:
|
||||||
|
//
|
||||||
|
// 1. Their src needs to munged to be a real value
|
||||||
|
// 2. If they're not wrapped with a link they need a link wrapper
|
||||||
|
|
||||||
|
// Check if the destination is a real link
|
||||||
|
link := v.Destination
|
||||||
|
if len(link) > 0 && !markup.IsLink(link) {
|
||||||
|
prefix := pc.Get(urlPrefixKey).(string)
|
||||||
|
if pc.Get(isWikiKey).(bool) {
|
||||||
|
prefix = giteautil.URLJoin(prefix, "wiki", "raw")
|
||||||
|
}
|
||||||
|
prefix = strings.Replace(prefix, "/src/", "/media/", 1)
|
||||||
|
|
||||||
|
lnk := string(link)
|
||||||
|
lnk = giteautil.URLJoin(prefix, lnk)
|
||||||
|
lnk = strings.Replace(lnk, " ", "+", -1)
|
||||||
|
link = []byte(lnk)
|
||||||
|
}
|
||||||
|
v.Destination = link
|
||||||
|
|
||||||
|
parent := n.Parent()
|
||||||
|
// Create a link around image only if parent is not already a link
|
||||||
|
if _, ok := parent.(*ast.Link); !ok && parent != nil {
|
||||||
|
wrap := ast.NewLink()
|
||||||
|
wrap.Destination = link
|
||||||
|
wrap.Title = v.Title
|
||||||
|
parent.ReplaceChild(parent, n, wrap)
|
||||||
|
wrap.AppendChild(wrap, n)
|
||||||
|
}
|
||||||
|
case *ast.Link:
|
||||||
|
// Links need their href to munged to be a real value
|
||||||
|
link := v.Destination
|
||||||
|
if len(link) > 0 && !markup.IsLink(link) &&
|
||||||
|
link[0] != '#' && !bytes.HasPrefix(link, byteMailto) {
|
||||||
|
// special case: this is not a link, a hash link or a mailto:, so it's a
|
||||||
|
// relative URL
|
||||||
|
lnk := string(link)
|
||||||
|
if pc.Get(isWikiKey).(bool) {
|
||||||
|
lnk = giteautil.URLJoin("wiki", lnk)
|
||||||
|
}
|
||||||
|
link = []byte(giteautil.URLJoin(pc.Get(urlPrefixKey).(string), lnk))
|
||||||
|
}
|
||||||
|
v.Destination = link
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type prefixedIDs struct {
|
||||||
|
values map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate generates a new element id.
|
||||||
|
func (p *prefixedIDs) Generate(value []byte, kind ast.NodeKind) []byte {
|
||||||
|
dft := []byte("id")
|
||||||
|
if kind == ast.KindHeading {
|
||||||
|
dft = []byte("heading")
|
||||||
|
}
|
||||||
|
return p.GenerateWithDefault(value, dft)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate generates a new element id.
|
||||||
|
func (p *prefixedIDs) GenerateWithDefault(value []byte, dft []byte) []byte {
|
||||||
|
result := common.CleanValue(value)
|
||||||
|
if len(result) == 0 {
|
||||||
|
result = dft
|
||||||
|
}
|
||||||
|
if !bytes.HasPrefix(result, []byte("user-content-")) {
|
||||||
|
result = append([]byte("user-content-"), result...)
|
||||||
|
}
|
||||||
|
if _, ok := p.values[util.BytesToReadOnlyString(result)]; !ok {
|
||||||
|
p.values[util.BytesToReadOnlyString(result)] = true
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
for i := 1; ; i++ {
|
||||||
|
newResult := fmt.Sprintf("%s-%d", result, i)
|
||||||
|
if _, ok := p.values[newResult]; !ok {
|
||||||
|
p.values[newResult] = true
|
||||||
|
return []byte(newResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put puts a given element id to the used ids table.
|
||||||
|
func (p *prefixedIDs) Put(value []byte) {
|
||||||
|
p.values[util.BytesToReadOnlyString(value)] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPrefixedIDs() *prefixedIDs {
|
||||||
|
return &prefixedIDs{
|
||||||
|
values: map[string]bool{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTaskCheckBoxHTMLRenderer creates a TaskCheckBoxHTMLRenderer to render tasklists
|
||||||
|
// in the gitea form.
|
||||||
|
func NewTaskCheckBoxHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
|
||||||
|
r := &TaskCheckBoxHTMLRenderer{
|
||||||
|
Config: html.NewConfig(),
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.SetHTMLOption(&r.Config)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskCheckBoxHTMLRenderer is a renderer.NodeRenderer implementation that
|
||||||
|
// renders checkboxes in list items.
|
||||||
|
// Overrides the default goldmark one to present the gitea format
|
||||||
|
type TaskCheckBoxHTMLRenderer struct {
|
||||||
|
html.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
|
||||||
|
func (r *TaskCheckBoxHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
||||||
|
reg.Register(east.KindTaskCheckBox, r.renderTaskCheckBox)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *TaskCheckBoxHTMLRenderer) renderTaskCheckBox(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
if !entering {
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
n := node.(*east.TaskCheckBox)
|
||||||
|
|
||||||
|
end := ">"
|
||||||
|
if r.XHTML {
|
||||||
|
end = " />"
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
if n.IsChecked {
|
||||||
|
_, err = w.WriteString(`<span class="ui fitted disabled checkbox"><input type="checkbox" disabled="disabled"` + end + `<label` + end + `</span>`)
|
||||||
|
} else {
|
||||||
|
_, err = w.WriteString(`<span class="ui checked fitted disabled checkbox"><input type="checkbox" checked="" disabled="disabled"` + end + `<label` + end + `</span>`)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return ast.WalkStop, err
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
|
@ -7,161 +7,83 @@ package markdown
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io"
|
"sync"
|
||||||
"strings"
|
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/markup"
|
"code.gitea.io/gitea/modules/markup"
|
||||||
|
"code.gitea.io/gitea/modules/markup/common"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"code.gitea.io/gitea/modules/util"
|
giteautil "code.gitea.io/gitea/modules/util"
|
||||||
|
|
||||||
"github.com/russross/blackfriday/v2"
|
"github.com/yuin/goldmark"
|
||||||
|
"github.com/yuin/goldmark/extension"
|
||||||
|
"github.com/yuin/goldmark/parser"
|
||||||
|
"github.com/yuin/goldmark/renderer"
|
||||||
|
"github.com/yuin/goldmark/renderer/html"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Renderer is a extended version of underlying render object.
|
var converter goldmark.Markdown
|
||||||
type Renderer struct {
|
var once = sync.Once{}
|
||||||
blackfriday.Renderer
|
|
||||||
URLPrefix string
|
var urlPrefixKey = parser.NewContextKey()
|
||||||
IsWiki bool
|
var isWikiKey = parser.NewContextKey()
|
||||||
|
|
||||||
|
// NewGiteaParseContext creates a parser.Context with the gitea context set
|
||||||
|
func NewGiteaParseContext(urlPrefix string, isWiki bool) parser.Context {
|
||||||
|
pc := parser.NewContext(parser.WithIDs(newPrefixedIDs()))
|
||||||
|
pc.Set(urlPrefixKey, urlPrefix)
|
||||||
|
pc.Set(isWikiKey, isWiki)
|
||||||
|
return pc
|
||||||
}
|
}
|
||||||
|
|
||||||
var byteMailto = []byte("mailto:")
|
|
||||||
|
|
||||||
var htmlEscaper = [256][]byte{
|
|
||||||
'&': []byte("&"),
|
|
||||||
'<': []byte("<"),
|
|
||||||
'>': []byte(">"),
|
|
||||||
'"': []byte("""),
|
|
||||||
}
|
|
||||||
|
|
||||||
func escapeHTML(w io.Writer, s []byte) {
|
|
||||||
var start, end int
|
|
||||||
for end < len(s) {
|
|
||||||
escSeq := htmlEscaper[s[end]]
|
|
||||||
if escSeq != nil {
|
|
||||||
_, _ = w.Write(s[start:end])
|
|
||||||
_, _ = w.Write(escSeq)
|
|
||||||
start = end + 1
|
|
||||||
}
|
|
||||||
end++
|
|
||||||
}
|
|
||||||
if start < len(s) && end <= len(s) {
|
|
||||||
_, _ = w.Write(s[start:end])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RenderNode is a default renderer of a single node of a syntax tree. For
|
|
||||||
// block nodes it will be called twice: first time with entering=true, second
|
|
||||||
// time with entering=false, so that it could know when it's working on an open
|
|
||||||
// tag and when on close. It writes the result to w.
|
|
||||||
//
|
|
||||||
// The return value is a way to tell the calling walker to adjust its walk
|
|
||||||
// pattern: e.g. it can terminate the traversal by returning Terminate. Or it
|
|
||||||
// can ask the walker to skip a subtree of this node by returning SkipChildren.
|
|
||||||
// The typical behavior is to return GoToNext, which asks for the usual
|
|
||||||
// traversal to the next node.
|
|
||||||
func (r *Renderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
|
||||||
switch node.Type {
|
|
||||||
case blackfriday.Image:
|
|
||||||
prefix := r.URLPrefix
|
|
||||||
if r.IsWiki {
|
|
||||||
prefix = util.URLJoin(prefix, "wiki", "raw")
|
|
||||||
}
|
|
||||||
prefix = strings.Replace(prefix, "/src/", "/media/", 1)
|
|
||||||
link := node.LinkData.Destination
|
|
||||||
if len(link) > 0 && !markup.IsLink(link) {
|
|
||||||
lnk := string(link)
|
|
||||||
lnk = util.URLJoin(prefix, lnk)
|
|
||||||
lnk = strings.Replace(lnk, " ", "+", -1)
|
|
||||||
link = []byte(lnk)
|
|
||||||
}
|
|
||||||
node.LinkData.Destination = link
|
|
||||||
// Render link around image only if parent is not link already
|
|
||||||
if node.Parent != nil && node.Parent.Type != blackfriday.Link {
|
|
||||||
if entering {
|
|
||||||
_, _ = w.Write([]byte(`<a href="`))
|
|
||||||
escapeHTML(w, link)
|
|
||||||
_, _ = w.Write([]byte(`">`))
|
|
||||||
return r.Renderer.RenderNode(w, node, entering)
|
|
||||||
}
|
|
||||||
s := r.Renderer.RenderNode(w, node, entering)
|
|
||||||
_, _ = w.Write([]byte(`</a>`))
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return r.Renderer.RenderNode(w, node, entering)
|
|
||||||
case blackfriday.Link:
|
|
||||||
// special case: this is not a link, a hash link or a mailto:, so it's a
|
|
||||||
// relative URL
|
|
||||||
link := node.LinkData.Destination
|
|
||||||
if len(link) > 0 && !markup.IsLink(link) &&
|
|
||||||
link[0] != '#' && !bytes.HasPrefix(link, byteMailto) &&
|
|
||||||
node.LinkData.Footnote == nil {
|
|
||||||
lnk := string(link)
|
|
||||||
if r.IsWiki {
|
|
||||||
lnk = util.URLJoin("wiki", lnk)
|
|
||||||
}
|
|
||||||
link = []byte(util.URLJoin(r.URLPrefix, lnk))
|
|
||||||
}
|
|
||||||
node.LinkData.Destination = link
|
|
||||||
return r.Renderer.RenderNode(w, node, entering)
|
|
||||||
case blackfriday.Text:
|
|
||||||
isListItem := false
|
|
||||||
for n := node.Parent; n != nil; n = n.Parent {
|
|
||||||
if n.Type == blackfriday.Item {
|
|
||||||
isListItem = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if isListItem {
|
|
||||||
text := node.Literal
|
|
||||||
switch {
|
|
||||||
case bytes.HasPrefix(text, []byte("[ ] ")):
|
|
||||||
_, _ = w.Write([]byte(`<span class="ui fitted disabled checkbox"><input type="checkbox" disabled="disabled" /><label /></span>`))
|
|
||||||
text = text[3:]
|
|
||||||
case bytes.HasPrefix(text, []byte("[x] ")):
|
|
||||||
_, _ = w.Write([]byte(`<span class="ui checked fitted disabled checkbox"><input type="checkbox" checked="" disabled="disabled" /><label /></span>`))
|
|
||||||
text = text[3:]
|
|
||||||
}
|
|
||||||
node.Literal = text
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r.Renderer.RenderNode(w, node, entering)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
blackfridayExtensions = 0 |
|
|
||||||
blackfriday.NoIntraEmphasis |
|
|
||||||
blackfriday.Tables |
|
|
||||||
blackfriday.FencedCode |
|
|
||||||
blackfriday.Strikethrough |
|
|
||||||
blackfriday.NoEmptyLineBeforeBlock |
|
|
||||||
blackfriday.DefinitionLists |
|
|
||||||
blackfriday.Footnotes |
|
|
||||||
blackfriday.HeadingIDs |
|
|
||||||
blackfriday.AutoHeadingIDs
|
|
||||||
blackfridayHTMLFlags = 0 |
|
|
||||||
blackfriday.Smartypants
|
|
||||||
)
|
|
||||||
|
|
||||||
// RenderRaw renders Markdown to HTML without handling special links.
|
// RenderRaw renders Markdown to HTML without handling special links.
|
||||||
func RenderRaw(body []byte, urlPrefix string, wikiMarkdown bool) []byte {
|
func RenderRaw(body []byte, urlPrefix string, wikiMarkdown bool) []byte {
|
||||||
renderer := &Renderer{
|
once.Do(func() {
|
||||||
Renderer: blackfriday.NewHTMLRenderer(blackfriday.HTMLRendererParameters{
|
converter = goldmark.New(
|
||||||
Flags: blackfridayHTMLFlags,
|
goldmark.WithExtensions(extension.Table,
|
||||||
FootnoteAnchorPrefix: "user-content-",
|
extension.Strikethrough,
|
||||||
HeadingIDPrefix: "user-content-",
|
extension.TaskList,
|
||||||
}),
|
extension.DefinitionList,
|
||||||
URLPrefix: urlPrefix,
|
common.FootnoteExtension,
|
||||||
IsWiki: wikiMarkdown,
|
extension.NewTypographer(
|
||||||
|
extension.WithTypographicSubstitutions(extension.TypographicSubstitutions{
|
||||||
|
extension.EnDash: nil,
|
||||||
|
extension.EmDash: nil,
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
goldmark.WithParserOptions(
|
||||||
|
parser.WithAttribute(),
|
||||||
|
parser.WithAutoHeadingID(),
|
||||||
|
parser.WithASTTransformers(
|
||||||
|
util.Prioritized(&GiteaASTTransformer{}, 10000),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
goldmark.WithRendererOptions(
|
||||||
|
html.WithUnsafe(),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
// Override the original Tasklist renderer!
|
||||||
|
converter.Renderer().AddOptions(
|
||||||
|
renderer.WithNodeRenderers(
|
||||||
|
util.Prioritized(NewTaskCheckBoxHTMLRenderer(), 1000),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
if setting.Markdown.EnableHardLineBreak {
|
||||||
|
converter.Renderer().AddOptions(html.WithHardWraps())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
pc := NewGiteaParseContext(urlPrefix, wikiMarkdown)
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := converter.Convert(giteautil.NormalizeEOL(body), &buf, parser.WithContext(pc)); err != nil {
|
||||||
|
log.Error("Unable to render: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
exts := blackfridayExtensions
|
return markup.SanitizeReader(&buf).Bytes()
|
||||||
if setting.Markdown.EnableHardLineBreak {
|
|
||||||
exts |= blackfriday.HardLineBreak
|
|
||||||
}
|
|
||||||
|
|
||||||
// Need to normalize EOL to UNIX LF to have consistent results in rendering
|
|
||||||
body = blackfriday.Run(util.NormalizeEOL(body), blackfriday.WithRenderer(renderer), blackfriday.WithExtensions(exts))
|
|
||||||
return markup.SanitizeBytes(body)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -174,8 +96,7 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parser implements markup.Parser
|
// Parser implements markup.Parser
|
||||||
type Parser struct {
|
type Parser struct{}
|
||||||
}
|
|
||||||
|
|
||||||
// Name implements markup.Parser
|
// Name implements markup.Parser
|
||||||
func (Parser) Name() string {
|
func (Parser) Name() string {
|
||||||
|
|
|
@ -98,16 +98,12 @@ func TestRender_Images(t *testing.T) {
|
||||||
func testAnswers(baseURLContent, baseURLImages string) []string {
|
func testAnswers(baseURLContent, baseURLImages string) []string {
|
||||||
return []string{
|
return []string{
|
||||||
`<p>Wiki! Enjoy :)</p>
|
`<p>Wiki! Enjoy :)</p>
|
||||||
|
|
||||||
<ul>
|
<ul>
|
||||||
<li><a href="` + baseURLContent + `/Links" rel="nofollow">Links, Language bindings, Engine bindings</a></li>
|
<li><a href="` + baseURLContent + `/Links" rel="nofollow">Links, Language bindings, Engine bindings</a></li>
|
||||||
<li><a href="` + baseURLContent + `/Tips" rel="nofollow">Tips</a></li>
|
<li><a href="` + baseURLContent + `/Tips" rel="nofollow">Tips</a></li>
|
||||||
</ul>
|
</ul>
|
||||||
|
|
||||||
<p>See commit <a href="http://localhost:3000/gogits/gogs/commit/65f1bf27bc" rel="nofollow"><code>65f1bf27bc</code></a></p>
|
<p>See commit <a href="http://localhost:3000/gogits/gogs/commit/65f1bf27bc" rel="nofollow"><code>65f1bf27bc</code></a></p>
|
||||||
|
|
||||||
<p>Ideas and codes</p>
|
<p>Ideas and codes</p>
|
||||||
|
|
||||||
<ul>
|
<ul>
|
||||||
<li>Bezier widget (by <a href="` + AppURL + `r-lyeh" rel="nofollow">@r-lyeh</a>) <a href="http://localhost:3000/ocornut/imgui/issues/786" rel="nofollow">ocornut/imgui#786</a></li>
|
<li>Bezier widget (by <a href="` + AppURL + `r-lyeh" rel="nofollow">@r-lyeh</a>) <a href="http://localhost:3000/ocornut/imgui/issues/786" rel="nofollow">ocornut/imgui#786</a></li>
|
||||||
<li>Bezier widget (by <a href="` + AppURL + `r-lyeh" rel="nofollow">@r-lyeh</a>) <a href="http://localhost:3000/gogits/gogs/issues/786" rel="nofollow">#786</a></li>
|
<li>Bezier widget (by <a href="` + AppURL + `r-lyeh" rel="nofollow">@r-lyeh</a>) <a href="http://localhost:3000/gogits/gogs/issues/786" rel="nofollow">#786</a></li>
|
||||||
|
@ -117,13 +113,9 @@ func testAnswers(baseURLContent, baseURLImages string) []string {
|
||||||
</ul>
|
</ul>
|
||||||
`,
|
`,
|
||||||
`<h2 id="user-content-what-is-wine-staging">What is Wine Staging?</h2>
|
`<h2 id="user-content-what-is-wine-staging">What is Wine Staging?</h2>
|
||||||
|
|
||||||
<p><strong>Wine Staging</strong> on website <a href="http://wine-staging.com" rel="nofollow">wine-staging.com</a>.</p>
|
<p><strong>Wine Staging</strong> on website <a href="http://wine-staging.com" rel="nofollow">wine-staging.com</a>.</p>
|
||||||
|
|
||||||
<h2 id="user-content-quick-links">Quick Links</h2>
|
<h2 id="user-content-quick-links">Quick Links</h2>
|
||||||
|
|
||||||
<p>Here are some links to the most important topics. You can find the full list of pages at the sidebar.</p>
|
<p>Here are some links to the most important topics. You can find the full list of pages at the sidebar.</p>
|
||||||
|
|
||||||
<table>
|
<table>
|
||||||
<thead>
|
<thead>
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -131,7 +123,6 @@ func testAnswers(baseURLContent, baseURLImages string) []string {
|
||||||
<th><a href="` + baseURLContent + `/Installation" rel="nofollow">Installation</a></th>
|
<th><a href="` + baseURLContent + `/Installation" rel="nofollow">Installation</a></th>
|
||||||
</tr>
|
</tr>
|
||||||
</thead>
|
</thead>
|
||||||
|
|
||||||
<tbody>
|
<tbody>
|
||||||
<tr>
|
<tr>
|
||||||
<td><a href="` + baseURLImages + `/images/icon-usage.png" rel="nofollow"><img src="` + baseURLImages + `/images/icon-usage.png" title="icon-usage.png" alt="images/icon-usage.png"/></a></td>
|
<td><a href="` + baseURLImages + `/images/icon-usage.png" rel="nofollow"><img src="` + baseURLImages + `/images/icon-usage.png" title="icon-usage.png" alt="images/icon-usage.png"/></a></td>
|
||||||
|
@ -141,20 +132,15 @@ func testAnswers(baseURLContent, baseURLImages string) []string {
|
||||||
</table>
|
</table>
|
||||||
`,
|
`,
|
||||||
`<p><a href="http://www.excelsiorjet.com/" rel="nofollow">Excelsior JET</a> allows you to create native executables for Windows, Linux and Mac OS X.</p>
|
`<p><a href="http://www.excelsiorjet.com/" rel="nofollow">Excelsior JET</a> allows you to create native executables for Windows, Linux and Mac OS X.</p>
|
||||||
|
|
||||||
<ol>
|
<ol>
|
||||||
<li><a href="https://github.com/libgdx/libgdx/wiki/Gradle-on-the-Commandline#packaging-for-the-desktop" rel="nofollow">Package your libGDX application</a>
|
<li><a href="https://github.com/libgdx/libgdx/wiki/Gradle-on-the-Commandline#packaging-for-the-desktop" rel="nofollow">Package your libGDX application</a>
|
||||||
<a href="` + baseURLImages + `/images/1.png" rel="nofollow"><img src="` + baseURLImages + `/images/1.png" title="1.png" alt="images/1.png"/></a></li>
|
<a href="` + baseURLImages + `/images/1.png" rel="nofollow"><img src="` + baseURLImages + `/images/1.png" title="1.png" alt="images/1.png"/></a></li>
|
||||||
<li>Perform a test run by hitting the Run! button.
|
<li>Perform a test run by hitting the Run! button.
|
||||||
<a href="` + baseURLImages + `/images/2.png" rel="nofollow"><img src="` + baseURLImages + `/images/2.png" title="2.png" alt="images/2.png"/></a></li>
|
<a href="` + baseURLImages + `/images/2.png" rel="nofollow"><img src="` + baseURLImages + `/images/2.png" title="2.png" alt="images/2.png"/></a></li>
|
||||||
</ol>
|
</ol>
|
||||||
|
|
||||||
<h2 id="user-content-custom-id">More tests</h2>
|
<h2 id="user-content-custom-id">More tests</h2>
|
||||||
|
|
||||||
<p>(from <a href="https://www.markdownguide.org/extended-syntax/" rel="nofollow">https://www.markdownguide.org/extended-syntax/</a>)</p>
|
<p>(from <a href="https://www.markdownguide.org/extended-syntax/" rel="nofollow">https://www.markdownguide.org/extended-syntax/</a>)</p>
|
||||||
|
|
||||||
<h3 id="user-content-definition-list">Definition list</h3>
|
<h3 id="user-content-definition-list">Definition list</h3>
|
||||||
|
|
||||||
<dl>
|
<dl>
|
||||||
<dt>First Term</dt>
|
<dt>First Term</dt>
|
||||||
<dd>This is the definition of the first term.</dd>
|
<dd>This is the definition of the first term.</dd>
|
||||||
|
@ -162,27 +148,21 @@ func testAnswers(baseURLContent, baseURLImages string) []string {
|
||||||
<dd>This is one definition of the second term.</dd>
|
<dd>This is one definition of the second term.</dd>
|
||||||
<dd>This is another definition of the second term.</dd>
|
<dd>This is another definition of the second term.</dd>
|
||||||
</dl>
|
</dl>
|
||||||
|
|
||||||
<h3 id="user-content-footnotes">Footnotes</h3>
|
<h3 id="user-content-footnotes">Footnotes</h3>
|
||||||
|
|
||||||
<p>Here is a simple footnote,<sup id="fnref:user-content-1"><a href="#fn:user-content-1" rel="nofollow">1</a></sup> and here is a longer one.<sup id="fnref:user-content-bignote"><a href="#fn:user-content-bignote" rel="nofollow">2</a></sup></p>
|
<p>Here is a simple footnote,<sup id="fnref:user-content-1"><a href="#fn:user-content-1" rel="nofollow">1</a></sup> and here is a longer one.<sup id="fnref:user-content-bignote"><a href="#fn:user-content-bignote" rel="nofollow">2</a></sup></p>
|
||||||
|
|
||||||
<div>
|
<div>
|
||||||
|
|
||||||
<hr/>
|
<hr/>
|
||||||
|
|
||||||
<ol>
|
<ol>
|
||||||
<li id="fn:user-content-1">This is the first footnote.</li>
|
<li id="fn:user-content-1">
|
||||||
|
<p>This is the first footnote. <a href="#fnref:user-content-1" rel="nofollow">↩︎</a></p>
|
||||||
<li id="fn:user-content-bignote"><p>Here is one with multiple paragraphs and code.</p>
|
</li>
|
||||||
|
<li id="fn:user-content-bignote">
|
||||||
|
<p>Here is one with multiple paragraphs and code.</p>
|
||||||
<p>Indent paragraphs to include them in the footnote.</p>
|
<p>Indent paragraphs to include them in the footnote.</p>
|
||||||
|
|
||||||
<p><code>{ my code }</code></p>
|
<p><code>{ my code }</code></p>
|
||||||
|
<p>Add as many paragraphs as you like. <a href="#fnref:user-content-bignote" rel="nofollow">↩︎</a></p>
|
||||||
<p>Add as many paragraphs as you like.</p></li>
|
</li>
|
||||||
</ol>
|
</ol>
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
`,
|
`,
|
||||||
}
|
}
|
||||||
|
@ -299,15 +279,15 @@ func TestRender_RenderParagraphs(t *testing.T) {
|
||||||
test := func(t *testing.T, str string, cnt int) {
|
test := func(t *testing.T, str string, cnt int) {
|
||||||
unix := []byte(str)
|
unix := []byte(str)
|
||||||
res := string(RenderRaw(unix, "", false))
|
res := string(RenderRaw(unix, "", false))
|
||||||
assert.Equal(t, strings.Count(res, "<p"), cnt)
|
assert.Equal(t, strings.Count(res, "<p"), cnt, "Rendered result for unix should have %d paragraph(s) but has %d:\n%s\n", cnt, strings.Count(res, "<p"), res)
|
||||||
|
|
||||||
mac := []byte(strings.ReplaceAll(str, "\n", "\r"))
|
mac := []byte(strings.ReplaceAll(str, "\n", "\r"))
|
||||||
res = string(RenderRaw(mac, "", false))
|
res = string(RenderRaw(mac, "", false))
|
||||||
assert.Equal(t, strings.Count(res, "<p"), cnt)
|
assert.Equal(t, strings.Count(res, "<p"), cnt, "Rendered result for mac should have %d paragraph(s) but has %d:\n%s\n", cnt, strings.Count(res, "<p"), res)
|
||||||
|
|
||||||
dos := []byte(strings.ReplaceAll(str, "\n", "\r\n"))
|
dos := []byte(strings.ReplaceAll(str, "\n", "\r\n"))
|
||||||
res = string(RenderRaw(dos, "", false))
|
res = string(RenderRaw(dos, "", false))
|
||||||
assert.Equal(t, strings.Count(res, "<p"), cnt)
|
assert.Equal(t, strings.Count(res, "<p"), cnt, "Rendered result for windows should have %d paragraph(s) but has %d:\n%s\n", cnt, strings.Count(res, "<p"), res)
|
||||||
}
|
}
|
||||||
|
|
||||||
test(t, "\nOne\nTwo\nThree", 1)
|
test(t, "\nOne\nTwo\nThree", 1)
|
||||||
|
|
|
@ -6,33 +6,86 @@ package mdstripper
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/russross/blackfriday/v2"
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"code.gitea.io/gitea/modules/markup/common"
|
||||||
|
|
||||||
|
"github.com/yuin/goldmark"
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/extension"
|
||||||
|
"github.com/yuin/goldmark/parser"
|
||||||
|
"github.com/yuin/goldmark/renderer"
|
||||||
|
"github.com/yuin/goldmark/renderer/html"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MarkdownStripper extends blackfriday.Renderer
|
type stripRenderer struct {
|
||||||
type MarkdownStripper struct {
|
links []string
|
||||||
links []string
|
empty bool
|
||||||
coallesce bool
|
|
||||||
empty bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
func (r *stripRenderer) Render(w io.Writer, source []byte, doc ast.Node) error {
|
||||||
blackfridayExtensions = 0 |
|
return ast.Walk(doc, func(n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
blackfriday.NoIntraEmphasis |
|
if !entering {
|
||||||
blackfriday.Tables |
|
return ast.WalkContinue, nil
|
||||||
blackfriday.FencedCode |
|
}
|
||||||
blackfriday.Strikethrough |
|
switch v := n.(type) {
|
||||||
blackfriday.NoEmptyLineBeforeBlock |
|
case *ast.Text:
|
||||||
blackfriday.DefinitionLists |
|
if !v.IsRaw() {
|
||||||
blackfriday.Footnotes |
|
_, prevSibIsText := n.PreviousSibling().(*ast.Text)
|
||||||
blackfriday.HeadingIDs |
|
coalesce := prevSibIsText
|
||||||
blackfriday.AutoHeadingIDs |
|
r.processString(
|
||||||
// Not included in modules/markup/markdown/markdown.go;
|
w,
|
||||||
// required here to process inline links
|
v.Text(source),
|
||||||
blackfriday.Autolink
|
coalesce)
|
||||||
)
|
if v.SoftLineBreak() {
|
||||||
|
r.doubleSpace(w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
case *ast.Link:
|
||||||
|
r.processLink(w, v.Destination)
|
||||||
|
return ast.WalkSkipChildren, nil
|
||||||
|
case *ast.AutoLink:
|
||||||
|
r.processLink(w, v.URL(source))
|
||||||
|
return ast.WalkSkipChildren, nil
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *stripRenderer) doubleSpace(w io.Writer) {
|
||||||
|
if !r.empty {
|
||||||
|
_, _ = w.Write([]byte{'\n'})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *stripRenderer) processString(w io.Writer, text []byte, coalesce bool) {
|
||||||
|
// Always break-up words
|
||||||
|
if !coalesce {
|
||||||
|
r.doubleSpace(w)
|
||||||
|
}
|
||||||
|
_, _ = w.Write(text)
|
||||||
|
r.empty = false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *stripRenderer) processLink(w io.Writer, link []byte) {
|
||||||
|
// Links are processed out of band
|
||||||
|
r.links = append(r.links, string(link))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLinks returns the list of link data collected while parsing
|
||||||
|
func (r *stripRenderer) GetLinks() []string {
|
||||||
|
return r.links
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddOptions adds given option to this renderer.
|
||||||
|
func (r *stripRenderer) AddOptions(...renderer.Option) {
|
||||||
|
// no-op
|
||||||
|
}
|
||||||
|
|
||||||
// StripMarkdown parses markdown content by removing all markup and code blocks
|
// StripMarkdown parses markdown content by removing all markup and code blocks
|
||||||
// in order to extract links and other references
|
// in order to extract links and other references
|
||||||
|
@ -41,78 +94,40 @@ func StripMarkdown(rawBytes []byte) (string, []string) {
|
||||||
return string(buf), links
|
return string(buf), links
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var stripParser parser.Parser
|
||||||
|
var once = sync.Once{}
|
||||||
|
|
||||||
// StripMarkdownBytes parses markdown content by removing all markup and code blocks
|
// StripMarkdownBytes parses markdown content by removing all markup and code blocks
|
||||||
// in order to extract links and other references
|
// in order to extract links and other references
|
||||||
func StripMarkdownBytes(rawBytes []byte) ([]byte, []string) {
|
func StripMarkdownBytes(rawBytes []byte) ([]byte, []string) {
|
||||||
stripper := &MarkdownStripper{
|
once.Do(func() {
|
||||||
|
gdMarkdown := goldmark.New(
|
||||||
|
goldmark.WithExtensions(extension.Table,
|
||||||
|
extension.Strikethrough,
|
||||||
|
extension.TaskList,
|
||||||
|
extension.DefinitionList,
|
||||||
|
common.FootnoteExtension,
|
||||||
|
common.Linkify,
|
||||||
|
),
|
||||||
|
goldmark.WithParserOptions(
|
||||||
|
parser.WithAttribute(),
|
||||||
|
parser.WithAutoHeadingID(),
|
||||||
|
),
|
||||||
|
goldmark.WithRendererOptions(
|
||||||
|
html.WithUnsafe(),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
stripParser = gdMarkdown.Parser()
|
||||||
|
})
|
||||||
|
stripper := &stripRenderer{
|
||||||
links: make([]string, 0, 10),
|
links: make([]string, 0, 10),
|
||||||
empty: true,
|
empty: true,
|
||||||
}
|
}
|
||||||
|
reader := text.NewReader(rawBytes)
|
||||||
parser := blackfriday.New(blackfriday.WithRenderer(stripper), blackfriday.WithExtensions(blackfridayExtensions))
|
doc := stripParser.Parse(reader)
|
||||||
ast := parser.Parse(rawBytes)
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
stripper.RenderHeader(&buf, ast)
|
if err := stripper.Render(&buf, rawBytes, doc); err != nil {
|
||||||
ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
log.Error("Unable to strip: %v", err)
|
||||||
return stripper.RenderNode(&buf, node, entering)
|
}
|
||||||
})
|
|
||||||
stripper.RenderFooter(&buf, ast)
|
|
||||||
return buf.Bytes(), stripper.GetLinks()
|
return buf.Bytes(), stripper.GetLinks()
|
||||||
}
|
}
|
||||||
|
|
||||||
// RenderNode is the main rendering method. It will be called once for
|
|
||||||
// every leaf node and twice for every non-leaf node (first with
|
|
||||||
// entering=true, then with entering=false). The method should write its
|
|
||||||
// rendition of the node to the supplied writer w.
|
|
||||||
func (r *MarkdownStripper) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
|
||||||
if !entering {
|
|
||||||
return blackfriday.GoToNext
|
|
||||||
}
|
|
||||||
switch node.Type {
|
|
||||||
case blackfriday.Text:
|
|
||||||
r.processString(w, node.Literal, node.Parent == nil)
|
|
||||||
return blackfriday.GoToNext
|
|
||||||
case blackfriday.Link:
|
|
||||||
r.processLink(w, node.LinkData.Destination)
|
|
||||||
r.coallesce = false
|
|
||||||
return blackfriday.SkipChildren
|
|
||||||
}
|
|
||||||
r.coallesce = false
|
|
||||||
return blackfriday.GoToNext
|
|
||||||
}
|
|
||||||
|
|
||||||
// RenderHeader is a method that allows the renderer to produce some
|
|
||||||
// content preceding the main body of the output document.
|
|
||||||
func (r *MarkdownStripper) RenderHeader(w io.Writer, ast *blackfriday.Node) {
|
|
||||||
}
|
|
||||||
|
|
||||||
// RenderFooter is a symmetric counterpart of RenderHeader.
|
|
||||||
func (r *MarkdownStripper) RenderFooter(w io.Writer, ast *blackfriday.Node) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *MarkdownStripper) doubleSpace(w io.Writer) {
|
|
||||||
if !r.empty {
|
|
||||||
_, _ = w.Write([]byte{'\n'})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *MarkdownStripper) processString(w io.Writer, text []byte, coallesce bool) {
|
|
||||||
// Always break-up words
|
|
||||||
if !coallesce || !r.coallesce {
|
|
||||||
r.doubleSpace(w)
|
|
||||||
}
|
|
||||||
_, _ = w.Write(text)
|
|
||||||
r.coallesce = coallesce
|
|
||||||
r.empty = false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *MarkdownStripper) processLink(w io.Writer, link []byte) {
|
|
||||||
// Links are processed out of band
|
|
||||||
r.links = append(r.links, string(link))
|
|
||||||
r.coallesce = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLinks returns the list of link data collected while parsing
|
|
||||||
func (r *MarkdownStripper) GetLinks() []string {
|
|
||||||
return r.links
|
|
||||||
}
|
|
||||||
|
|
|
@ -53,6 +53,20 @@ A HIDDEN ` + "`" + `GHOST` + "`" + ` IN THIS LINE.
|
||||||
[]string{
|
[]string{
|
||||||
"link",
|
"link",
|
||||||
}},
|
}},
|
||||||
|
{
|
||||||
|
"Simply closes: #29 yes",
|
||||||
|
[]string{
|
||||||
|
"Simply closes: #29 yes",
|
||||||
|
},
|
||||||
|
[]string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Simply closes: !29 yes",
|
||||||
|
[]string{
|
||||||
|
"Simply closes: !29 yes",
|
||||||
|
},
|
||||||
|
[]string{},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range list {
|
for _, test := range list {
|
||||||
|
|
|
@ -6,6 +6,8 @@
|
||||||
package markup
|
package markup
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
"regexp"
|
"regexp"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
@ -67,6 +69,12 @@ func Sanitize(s string) string {
|
||||||
return sanitizer.policy.Sanitize(s)
|
return sanitizer.policy.Sanitize(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SanitizeReader sanitizes a Reader
|
||||||
|
func SanitizeReader(r io.Reader) *bytes.Buffer {
|
||||||
|
NewSanitizer()
|
||||||
|
return sanitizer.policy.SanitizeReader(r)
|
||||||
|
}
|
||||||
|
|
||||||
// SanitizeBytes takes a []byte slice that contains a HTML fragment or document and applies policy whitelist.
|
// SanitizeBytes takes a []byte slice that contains a HTML fragment or document and applies policy whitelist.
|
||||||
func SanitizeBytes(b []byte) []byte {
|
func SanitizeBytes(b []byte) []byte {
|
||||||
if len(b) == 0 {
|
if len(b) == 0 {
|
||||||
|
|
|
@ -43,10 +43,6 @@ func TestFindAllIssueReferences(t *testing.T) {
|
||||||
{29, "", "", "29", true, XRefActionCloses, &RefSpan{Start: 15, End: 18}, &RefSpan{Start: 7, End: 13}},
|
{29, "", "", "29", true, XRefActionCloses, &RefSpan{Start: 15, End: 18}, &RefSpan{Start: 7, End: 13}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"#123 no, this is a title.",
|
|
||||||
[]testResult{},
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
" #124 yes, this is a reference.",
|
" #124 yes, this is a reference.",
|
||||||
[]testResult{
|
[]testResult{
|
||||||
|
|
|
@ -69,7 +69,6 @@ func TestAPI_RenderGFM(t *testing.T) {
|
||||||
- Bezier widget (by @r-lyeh) https://github.com/ocornut/imgui/issues/786`,
|
- Bezier widget (by @r-lyeh) https://github.com/ocornut/imgui/issues/786`,
|
||||||
// rendered
|
// rendered
|
||||||
`<p>Wiki! Enjoy :)</p>
|
`<p>Wiki! Enjoy :)</p>
|
||||||
|
|
||||||
<ul>
|
<ul>
|
||||||
<li><a href="` + AppSubURL + `wiki/Links" rel="nofollow">Links, Language bindings, Engine bindings</a></li>
|
<li><a href="` + AppSubURL + `wiki/Links" rel="nofollow">Links, Language bindings, Engine bindings</a></li>
|
||||||
<li><a href="` + AppSubURL + `wiki/Tips" rel="nofollow">Tips</a></li>
|
<li><a href="` + AppSubURL + `wiki/Tips" rel="nofollow">Tips</a></li>
|
||||||
|
@ -88,13 +87,9 @@ Here are some links to the most important topics. You can find the full list of
|
||||||
`,
|
`,
|
||||||
// rendered
|
// rendered
|
||||||
`<h2 id="user-content-what-is-wine-staging">What is Wine Staging?</h2>
|
`<h2 id="user-content-what-is-wine-staging">What is Wine Staging?</h2>
|
||||||
|
|
||||||
<p><strong>Wine Staging</strong> on website <a href="http://wine-staging.com" rel="nofollow">wine-staging.com</a>.</p>
|
<p><strong>Wine Staging</strong> on website <a href="http://wine-staging.com" rel="nofollow">wine-staging.com</a>.</p>
|
||||||
|
|
||||||
<h2 id="user-content-quick-links">Quick Links</h2>
|
<h2 id="user-content-quick-links">Quick Links</h2>
|
||||||
|
|
||||||
<p>Here are some links to the most important topics. You can find the full list of pages at the sidebar.</p>
|
<p>Here are some links to the most important topics. You can find the full list of pages at the sidebar.</p>
|
||||||
|
|
||||||
<p><a href="` + AppSubURL + `wiki/Configuration" rel="nofollow">Configuration</a>
|
<p><a href="` + AppSubURL + `wiki/Configuration" rel="nofollow">Configuration</a>
|
||||||
<a href="` + AppSubURL + `wiki/raw/images/icon-bug.png" rel="nofollow"><img src="` + AppSubURL + `wiki/raw/images/icon-bug.png" title="icon-bug.png" alt="images/icon-bug.png"/></a></p>
|
<a href="` + AppSubURL + `wiki/raw/images/icon-bug.png" rel="nofollow"><img src="` + AppSubURL + `wiki/raw/images/icon-bug.png" title="icon-bug.png" alt="images/icon-bug.png"/></a></p>
|
||||||
`,
|
`,
|
||||||
|
|
8
vendor/github.com/russross/blackfriday/v2/.gitignore
generated
vendored
8
vendor/github.com/russross/blackfriday/v2/.gitignore
generated
vendored
|
@ -1,8 +0,0 @@
|
||||||
*.out
|
|
||||||
*.swp
|
|
||||||
*.8
|
|
||||||
*.6
|
|
||||||
_obj
|
|
||||||
_test*
|
|
||||||
markdown
|
|
||||||
tags
|
|
17
vendor/github.com/russross/blackfriday/v2/.travis.yml
generated
vendored
17
vendor/github.com/russross/blackfriday/v2/.travis.yml
generated
vendored
|
@ -1,17 +0,0 @@
|
||||||
sudo: false
|
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- "1.10.x"
|
|
||||||
- "1.11.x"
|
|
||||||
- tip
|
|
||||||
matrix:
|
|
||||||
fast_finish: true
|
|
||||||
allow_failures:
|
|
||||||
- go: tip
|
|
||||||
install:
|
|
||||||
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
|
||||||
script:
|
|
||||||
- go get -t -v ./...
|
|
||||||
- diff -u <(echo -n) <(gofmt -d -s .)
|
|
||||||
- go tool vet .
|
|
||||||
- go test -v ./...
|
|
29
vendor/github.com/russross/blackfriday/v2/LICENSE.txt
generated
vendored
29
vendor/github.com/russross/blackfriday/v2/LICENSE.txt
generated
vendored
|
@ -1,29 +0,0 @@
|
||||||
Blackfriday is distributed under the Simplified BSD License:
|
|
||||||
|
|
||||||
> Copyright © 2011 Russ Ross
|
|
||||||
> All rights reserved.
|
|
||||||
>
|
|
||||||
> Redistribution and use in source and binary forms, with or without
|
|
||||||
> modification, are permitted provided that the following conditions
|
|
||||||
> are met:
|
|
||||||
>
|
|
||||||
> 1. Redistributions of source code must retain the above copyright
|
|
||||||
> notice, this list of conditions and the following disclaimer.
|
|
||||||
>
|
|
||||||
> 2. Redistributions in binary form must reproduce the above
|
|
||||||
> copyright notice, this list of conditions and the following
|
|
||||||
> disclaimer in the documentation and/or other materials provided with
|
|
||||||
> the distribution.
|
|
||||||
>
|
|
||||||
> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
|
||||||
> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
|
||||||
> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
||||||
> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
||||||
> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
||||||
> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
||||||
> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
||||||
> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
|
||||||
> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
> POSSIBILITY OF SUCH DAMAGE.
|
|
291
vendor/github.com/russross/blackfriday/v2/README.md
generated
vendored
291
vendor/github.com/russross/blackfriday/v2/README.md
generated
vendored
|
@ -1,291 +0,0 @@
|
||||||
Blackfriday [![Build Status](https://travis-ci.org/russross/blackfriday.svg?branch=master)](https://travis-ci.org/russross/blackfriday)
|
|
||||||
===========
|
|
||||||
|
|
||||||
Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
|
|
||||||
is paranoid about its input (so you can safely feed it user-supplied
|
|
||||||
data), it is fast, it supports common extensions (tables, smart
|
|
||||||
punctuation substitutions, etc.), and it is safe for all utf-8
|
|
||||||
(unicode) input.
|
|
||||||
|
|
||||||
HTML output is currently supported, along with Smartypants
|
|
||||||
extensions.
|
|
||||||
|
|
||||||
It started as a translation from C of [Sundown][3].
|
|
||||||
|
|
||||||
|
|
||||||
Installation
|
|
||||||
------------
|
|
||||||
|
|
||||||
Blackfriday is compatible with any modern Go release. With Go 1.7 and git
|
|
||||||
installed:
|
|
||||||
|
|
||||||
go get gopkg.in/russross/blackfriday.v2
|
|
||||||
|
|
||||||
will download, compile, and install the package into your `$GOPATH`
|
|
||||||
directory hierarchy. Alternatively, you can achieve the same if you
|
|
||||||
import it into a project:
|
|
||||||
|
|
||||||
import "gopkg.in/russross/blackfriday.v2"
|
|
||||||
|
|
||||||
and `go get` without parameters.
|
|
||||||
|
|
||||||
|
|
||||||
Versions
|
|
||||||
--------
|
|
||||||
|
|
||||||
Currently maintained and recommended version of Blackfriday is `v2`. It's being
|
|
||||||
developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the
|
|
||||||
documentation is available at
|
|
||||||
https://godoc.org/gopkg.in/russross/blackfriday.v2.
|
|
||||||
|
|
||||||
It is `go get`-able via via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`,
|
|
||||||
but we highly recommend using package management tool like [dep][7] or
|
|
||||||
[Glide][8] and make use of semantic versioning. With package management you
|
|
||||||
should import `github.com/russross/blackfriday` and specify that you're using
|
|
||||||
version 2.0.0.
|
|
||||||
|
|
||||||
Version 2 offers a number of improvements over v1:
|
|
||||||
|
|
||||||
* Cleaned up API
|
|
||||||
* A separate call to [`Parse`][4], which produces an abstract syntax tree for
|
|
||||||
the document
|
|
||||||
* Latest bug fixes
|
|
||||||
* Flexibility to easily add your own rendering extensions
|
|
||||||
|
|
||||||
Potential drawbacks:
|
|
||||||
|
|
||||||
* Our benchmarks show v2 to be slightly slower than v1. Currently in the
|
|
||||||
ballpark of around 15%.
|
|
||||||
* API breakage. If you can't afford modifying your code to adhere to the new API
|
|
||||||
and don't care too much about the new features, v2 is probably not for you.
|
|
||||||
* Several bug fixes are trailing behind and still need to be forward-ported to
|
|
||||||
v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for
|
|
||||||
tracking.
|
|
||||||
|
|
||||||
Usage
|
|
||||||
-----
|
|
||||||
|
|
||||||
For the most sensible markdown processing, it is as simple as getting your input
|
|
||||||
into a byte slice and calling:
|
|
||||||
|
|
||||||
```go
|
|
||||||
output := blackfriday.Run(input)
|
|
||||||
```
|
|
||||||
|
|
||||||
Your input will be parsed and the output rendered with a set of most popular
|
|
||||||
extensions enabled. If you want the most basic feature set, corresponding with
|
|
||||||
the bare Markdown specification, use:
|
|
||||||
|
|
||||||
```go
|
|
||||||
output := blackfriday.Run(input, blackfriday.WithNoExtensions())
|
|
||||||
```
|
|
||||||
|
|
||||||
### Sanitize untrusted content
|
|
||||||
|
|
||||||
Blackfriday itself does nothing to protect against malicious content. If you are
|
|
||||||
dealing with user-supplied markdown, we recommend running Blackfriday's output
|
|
||||||
through HTML sanitizer such as [Bluemonday][5].
|
|
||||||
|
|
||||||
Here's an example of simple usage of Blackfriday together with Bluemonday:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
"github.com/microcosm-cc/bluemonday"
|
|
||||||
"github.com/russross/blackfriday"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ...
|
|
||||||
unsafe := blackfriday.Run(input)
|
|
||||||
html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Custom options
|
|
||||||
|
|
||||||
If you want to customize the set of options, use `blackfriday.WithExtensions`,
|
|
||||||
`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`.
|
|
||||||
|
|
||||||
You can also check out `blackfriday-tool` for a more complete example
|
|
||||||
of how to use it. Download and install it using:
|
|
||||||
|
|
||||||
go get github.com/russross/blackfriday-tool
|
|
||||||
|
|
||||||
This is a simple command-line tool that allows you to process a
|
|
||||||
markdown file using a standalone program. You can also browse the
|
|
||||||
source directly on github if you are just looking for some example
|
|
||||||
code:
|
|
||||||
|
|
||||||
* <http://github.com/russross/blackfriday-tool>
|
|
||||||
|
|
||||||
Note that if you have not already done so, installing
|
|
||||||
`blackfriday-tool` will be sufficient to download and install
|
|
||||||
blackfriday in addition to the tool itself. The tool binary will be
|
|
||||||
installed in `$GOPATH/bin`. This is a statically-linked binary that
|
|
||||||
can be copied to wherever you need it without worrying about
|
|
||||||
dependencies and library versions.
|
|
||||||
|
|
||||||
|
|
||||||
Features
|
|
||||||
--------
|
|
||||||
|
|
||||||
All features of Sundown are supported, including:
|
|
||||||
|
|
||||||
* **Compatibility**. The Markdown v1.0.3 test suite passes with
|
|
||||||
the `--tidy` option. Without `--tidy`, the differences are
|
|
||||||
mostly in whitespace and entity escaping, where blackfriday is
|
|
||||||
more consistent and cleaner.
|
|
||||||
|
|
||||||
* **Common extensions**, including table support, fenced code
|
|
||||||
blocks, autolinks, strikethroughs, non-strict emphasis, etc.
|
|
||||||
|
|
||||||
* **Safety**. Blackfriday is paranoid when parsing, making it safe
|
|
||||||
to feed untrusted user input without fear of bad things
|
|
||||||
happening. The test suite stress tests this and there are no
|
|
||||||
known inputs that make it crash. If you find one, please let me
|
|
||||||
know and send me the input that does it.
|
|
||||||
|
|
||||||
NOTE: "safety" in this context means *runtime safety only*. In order to
|
|
||||||
protect yourself against JavaScript injection in untrusted content, see
|
|
||||||
[this example](https://github.com/russross/blackfriday#sanitize-untrusted-content).
|
|
||||||
|
|
||||||
* **Fast processing**. It is fast enough to render on-demand in
|
|
||||||
most web applications without having to cache the output.
|
|
||||||
|
|
||||||
* **Thread safety**. You can run multiple parsers in different
|
|
||||||
goroutines without ill effect. There is no dependence on global
|
|
||||||
shared state.
|
|
||||||
|
|
||||||
* **Minimal dependencies**. Blackfriday only depends on standard
|
|
||||||
library packages in Go. The source code is pretty
|
|
||||||
self-contained, so it is easy to add to any project, including
|
|
||||||
Google App Engine projects.
|
|
||||||
|
|
||||||
* **Standards compliant**. Output successfully validates using the
|
|
||||||
W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.
|
|
||||||
|
|
||||||
|
|
||||||
Extensions
|
|
||||||
----------
|
|
||||||
|
|
||||||
In addition to the standard markdown syntax, this package
|
|
||||||
implements the following extensions:
|
|
||||||
|
|
||||||
* **Intra-word emphasis supression**. The `_` character is
|
|
||||||
commonly used inside words when discussing code, so having
|
|
||||||
markdown interpret it as an emphasis command is usually the
|
|
||||||
wrong thing. Blackfriday lets you treat all emphasis markers as
|
|
||||||
normal characters when they occur inside a word.
|
|
||||||
|
|
||||||
* **Tables**. Tables can be created by drawing them in the input
|
|
||||||
using a simple syntax:
|
|
||||||
|
|
||||||
```
|
|
||||||
Name | Age
|
|
||||||
--------|------
|
|
||||||
Bob | 27
|
|
||||||
Alice | 23
|
|
||||||
```
|
|
||||||
|
|
||||||
* **Fenced code blocks**. In addition to the normal 4-space
|
|
||||||
indentation to mark code blocks, you can explicitly mark them
|
|
||||||
and supply a language (to make syntax highlighting simple). Just
|
|
||||||
mark it like this:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func getTrue() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
You can use 3 or more backticks to mark the beginning of the
|
|
||||||
block, and the same number to mark the end of the block.
|
|
||||||
|
|
||||||
* **Definition lists**. A simple definition list is made of a single-line
|
|
||||||
term followed by a colon and the definition for that term.
|
|
||||||
|
|
||||||
Cat
|
|
||||||
: Fluffy animal everyone likes
|
|
||||||
|
|
||||||
Internet
|
|
||||||
: Vector of transmission for pictures of cats
|
|
||||||
|
|
||||||
Terms must be separated from the previous definition by a blank line.
|
|
||||||
|
|
||||||
* **Footnotes**. A marker in the text that will become a superscript number;
|
|
||||||
a footnote definition that will be placed in a list of footnotes at the
|
|
||||||
end of the document. A footnote looks like this:
|
|
||||||
|
|
||||||
This is a footnote.[^1]
|
|
||||||
|
|
||||||
[^1]: the footnote text.
|
|
||||||
|
|
||||||
* **Autolinking**. Blackfriday can find URLs that have not been
|
|
||||||
explicitly marked as links and turn them into links.
|
|
||||||
|
|
||||||
* **Strikethrough**. Use two tildes (`~~`) to mark text that
|
|
||||||
should be crossed out.
|
|
||||||
|
|
||||||
* **Hard line breaks**. With this extension enabled newlines in the input
|
|
||||||
translate into line breaks in the output. This extension is off by default.
|
|
||||||
|
|
||||||
* **Smart quotes**. Smartypants-style punctuation substitution is
|
|
||||||
supported, turning normal double- and single-quote marks into
|
|
||||||
curly quotes, etc.
|
|
||||||
|
|
||||||
* **LaTeX-style dash parsing** is an additional option, where `--`
|
|
||||||
is translated into `–`, and `---` is translated into
|
|
||||||
`—`. This differs from most smartypants processors, which
|
|
||||||
turn a single hyphen into an ndash and a double hyphen into an
|
|
||||||
mdash.
|
|
||||||
|
|
||||||
* **Smart fractions**, where anything that looks like a fraction
|
|
||||||
is translated into suitable HTML (instead of just a few special
|
|
||||||
cases like most smartypant processors). For example, `4/5`
|
|
||||||
becomes `<sup>4</sup>⁄<sub>5</sub>`, which renders as
|
|
||||||
<sup>4</sup>⁄<sub>5</sub>.
|
|
||||||
|
|
||||||
|
|
||||||
Other renderers
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Blackfriday is structured to allow alternative rendering engines. Here
|
|
||||||
are a few of note:
|
|
||||||
|
|
||||||
* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown):
|
|
||||||
provides a GitHub Flavored Markdown renderer with fenced code block
|
|
||||||
highlighting, clickable heading anchor links.
|
|
||||||
|
|
||||||
It's not customizable, and its goal is to produce HTML output
|
|
||||||
equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode),
|
|
||||||
except the rendering is performed locally.
|
|
||||||
|
|
||||||
* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
|
|
||||||
but for markdown.
|
|
||||||
|
|
||||||
* [LaTeX output](https://github.com/Ambrevar/Blackfriday-LaTeX):
|
|
||||||
renders output as LaTeX.
|
|
||||||
|
|
||||||
* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer.
|
|
||||||
|
|
||||||
|
|
||||||
Todo
|
|
||||||
----
|
|
||||||
|
|
||||||
* More unit testing
|
|
||||||
* Improve unicode support. It does not understand all unicode
|
|
||||||
rules (about what constitutes a letter, a punctuation symbol,
|
|
||||||
etc.), so it may fail to detect word boundaries correctly in
|
|
||||||
some instances. It is safe on all utf-8 input.
|
|
||||||
|
|
||||||
|
|
||||||
License
|
|
||||||
-------
|
|
||||||
|
|
||||||
[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt)
|
|
||||||
|
|
||||||
|
|
||||||
[1]: https://daringfireball.net/projects/markdown/ "Markdown"
|
|
||||||
[2]: https://golang.org/ "Go Language"
|
|
||||||
[3]: https://github.com/vmg/sundown "Sundown"
|
|
||||||
[4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func"
|
|
||||||
[5]: https://github.com/microcosm-cc/bluemonday "Bluemonday"
|
|
||||||
[6]: https://labix.org/gopkg.in "gopkg.in"
|
|
1590
vendor/github.com/russross/blackfriday/v2/block.go
generated
vendored
1590
vendor/github.com/russross/blackfriday/v2/block.go
generated
vendored
File diff suppressed because it is too large
Load diff
18
vendor/github.com/russross/blackfriday/v2/doc.go
generated
vendored
18
vendor/github.com/russross/blackfriday/v2/doc.go
generated
vendored
|
@ -1,18 +0,0 @@
|
||||||
// Package blackfriday is a markdown processor.
|
|
||||||
//
|
|
||||||
// It translates plain text with simple formatting rules into an AST, which can
|
|
||||||
// then be further processed to HTML (provided by Blackfriday itself) or other
|
|
||||||
// formats (provided by the community).
|
|
||||||
//
|
|
||||||
// The simplest way to invoke Blackfriday is to call the Run function. It will
|
|
||||||
// take a text input and produce a text output in HTML (or other format).
|
|
||||||
//
|
|
||||||
// A slightly more sophisticated way to use Blackfriday is to create a Markdown
|
|
||||||
// processor and to call Parse, which returns a syntax tree for the input
|
|
||||||
// document. You can leverage Blackfriday's parsing for content extraction from
|
|
||||||
// markdown documents. You can assign a custom renderer and set various options
|
|
||||||
// to the Markdown processor.
|
|
||||||
//
|
|
||||||
// If you're interested in calling Blackfriday from command line, see
|
|
||||||
// https://github.com/russross/blackfriday-tool.
|
|
||||||
package blackfriday
|
|
34
vendor/github.com/russross/blackfriday/v2/esc.go
generated
vendored
34
vendor/github.com/russross/blackfriday/v2/esc.go
generated
vendored
|
@ -1,34 +0,0 @@
|
||||||
package blackfriday
|
|
||||||
|
|
||||||
import (
|
|
||||||
"html"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
var htmlEscaper = [256][]byte{
|
|
||||||
'&': []byte("&"),
|
|
||||||
'<': []byte("<"),
|
|
||||||
'>': []byte(">"),
|
|
||||||
'"': []byte("""),
|
|
||||||
}
|
|
||||||
|
|
||||||
func escapeHTML(w io.Writer, s []byte) {
|
|
||||||
var start, end int
|
|
||||||
for end < len(s) {
|
|
||||||
escSeq := htmlEscaper[s[end]]
|
|
||||||
if escSeq != nil {
|
|
||||||
w.Write(s[start:end])
|
|
||||||
w.Write(escSeq)
|
|
||||||
start = end + 1
|
|
||||||
}
|
|
||||||
end++
|
|
||||||
}
|
|
||||||
if start < len(s) && end <= len(s) {
|
|
||||||
w.Write(s[start:end])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func escLink(w io.Writer, text []byte) {
|
|
||||||
unesc := html.UnescapeString(string(text))
|
|
||||||
escapeHTML(w, []byte(unesc))
|
|
||||||
}
|
|
1
vendor/github.com/russross/blackfriday/v2/go.mod
generated
vendored
1
vendor/github.com/russross/blackfriday/v2/go.mod
generated
vendored
|
@ -1 +0,0 @@
|
||||||
module github.com/russross/blackfriday/v2
|
|
949
vendor/github.com/russross/blackfriday/v2/html.go
generated
vendored
949
vendor/github.com/russross/blackfriday/v2/html.go
generated
vendored
|
@ -1,949 +0,0 @@
|
||||||
//
|
|
||||||
// Blackfriday Markdown Processor
|
|
||||||
// Available at http://github.com/russross/blackfriday
|
|
||||||
//
|
|
||||||
// Copyright © 2011 Russ Ross <russ@russross.com>.
|
|
||||||
// Distributed under the Simplified BSD License.
|
|
||||||
// See README.md for details.
|
|
||||||
//
|
|
||||||
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// HTML rendering backend
|
|
||||||
//
|
|
||||||
//
|
|
||||||
|
|
||||||
package blackfriday
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HTMLFlags control optional behavior of HTML renderer.
|
|
||||||
type HTMLFlags int
|
|
||||||
|
|
||||||
// HTML renderer configuration options.
|
|
||||||
const (
|
|
||||||
HTMLFlagsNone HTMLFlags = 0
|
|
||||||
SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks
|
|
||||||
SkipImages // Skip embedded images
|
|
||||||
SkipLinks // Skip all links
|
|
||||||
Safelink // Only link to trusted protocols
|
|
||||||
NofollowLinks // Only link with rel="nofollow"
|
|
||||||
NoreferrerLinks // Only link with rel="noreferrer"
|
|
||||||
NoopenerLinks // Only link with rel="noopener"
|
|
||||||
HrefTargetBlank // Add a blank target
|
|
||||||
CompletePage // Generate a complete HTML page
|
|
||||||
UseXHTML // Generate XHTML output instead of HTML
|
|
||||||
FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source
|
|
||||||
Smartypants // Enable smart punctuation substitutions
|
|
||||||
SmartypantsFractions // Enable smart fractions (with Smartypants)
|
|
||||||
SmartypantsDashes // Enable smart dashes (with Smartypants)
|
|
||||||
SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants)
|
|
||||||
SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering
|
|
||||||
SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants)
|
|
||||||
TOC // Generate a table of contents
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag)
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" +
|
|
||||||
processingInstruction + "|" + declaration + "|" + cdata + ")"
|
|
||||||
closeTag = "</" + tagName + "\\s*[>]"
|
|
||||||
openTag = "<" + tagName + attribute + "*" + "\\s*/?>"
|
|
||||||
attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)"
|
|
||||||
attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")"
|
|
||||||
attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")"
|
|
||||||
attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*"
|
|
||||||
cdata = "<!\\[CDATA\\[[\\s\\S]*?\\]\\]>"
|
|
||||||
declaration = "<![A-Z]+" + "\\s+[^>]*>"
|
|
||||||
doubleQuotedValue = "\"[^\"]*\""
|
|
||||||
htmlComment = "<!---->|<!--(?:-?[^>-])(?:-?[^-])*-->"
|
|
||||||
processingInstruction = "[<][?].*?[?][>]"
|
|
||||||
singleQuotedValue = "'[^']*'"
|
|
||||||
tagName = "[A-Za-z][A-Za-z0-9-]*"
|
|
||||||
unquotedValue = "[^\"'=<>`\\x00-\\x20]+"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HTMLRendererParameters is a collection of supplementary parameters tweaking
|
|
||||||
// the behavior of various parts of HTML renderer.
|
|
||||||
type HTMLRendererParameters struct {
|
|
||||||
// Prepend this text to each relative URL.
|
|
||||||
AbsolutePrefix string
|
|
||||||
// Add this text to each footnote anchor, to ensure uniqueness.
|
|
||||||
FootnoteAnchorPrefix string
|
|
||||||
// Show this text inside the <a> tag for a footnote return link, if the
|
|
||||||
// HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string
|
|
||||||
// <sup>[return]</sup> is used.
|
|
||||||
FootnoteReturnLinkContents string
|
|
||||||
// If set, add this text to the front of each Heading ID, to ensure
|
|
||||||
// uniqueness.
|
|
||||||
HeadingIDPrefix string
|
|
||||||
// If set, add this text to the back of each Heading ID, to ensure uniqueness.
|
|
||||||
HeadingIDSuffix string
|
|
||||||
// Increase heading levels: if the offset is 1, <h1> becomes <h2> etc.
|
|
||||||
// Negative offset is also valid.
|
|
||||||
// Resulting levels are clipped between 1 and 6.
|
|
||||||
HeadingLevelOffset int
|
|
||||||
|
|
||||||
Title string // Document title (used if CompletePage is set)
|
|
||||||
CSS string // Optional CSS file URL (used if CompletePage is set)
|
|
||||||
Icon string // Optional icon file URL (used if CompletePage is set)
|
|
||||||
|
|
||||||
Flags HTMLFlags // Flags allow customizing this renderer's behavior
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTMLRenderer is a type that implements the Renderer interface for HTML output.
|
|
||||||
//
|
|
||||||
// Do not create this directly, instead use the NewHTMLRenderer function.
|
|
||||||
type HTMLRenderer struct {
|
|
||||||
HTMLRendererParameters
|
|
||||||
|
|
||||||
closeTag string // how to end singleton tags: either " />" or ">"
|
|
||||||
|
|
||||||
// Track heading IDs to prevent ID collision in a single generation.
|
|
||||||
headingIDs map[string]int
|
|
||||||
|
|
||||||
lastOutputLen int
|
|
||||||
disableTags int
|
|
||||||
|
|
||||||
sr *SPRenderer
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
xhtmlClose = " />"
|
|
||||||
htmlClose = ">"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewHTMLRenderer creates and configures an HTMLRenderer object, which
|
|
||||||
// satisfies the Renderer interface.
|
|
||||||
func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer {
|
|
||||||
// configure the rendering engine
|
|
||||||
closeTag := htmlClose
|
|
||||||
if params.Flags&UseXHTML != 0 {
|
|
||||||
closeTag = xhtmlClose
|
|
||||||
}
|
|
||||||
|
|
||||||
if params.FootnoteReturnLinkContents == "" {
|
|
||||||
params.FootnoteReturnLinkContents = `<sup>[return]</sup>`
|
|
||||||
}
|
|
||||||
|
|
||||||
return &HTMLRenderer{
|
|
||||||
HTMLRendererParameters: params,
|
|
||||||
|
|
||||||
closeTag: closeTag,
|
|
||||||
headingIDs: make(map[string]int),
|
|
||||||
|
|
||||||
sr: NewSmartypantsRenderer(params.Flags),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isHTMLTag(tag []byte, tagname string) bool {
|
|
||||||
found, _ := findHTMLTagPos(tag, tagname)
|
|
||||||
return found
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for a character, but ignore it when it's in any kind of quotes, it
|
|
||||||
// might be JavaScript
|
|
||||||
func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
|
|
||||||
inSingleQuote := false
|
|
||||||
inDoubleQuote := false
|
|
||||||
inGraveQuote := false
|
|
||||||
i := start
|
|
||||||
for i < len(html) {
|
|
||||||
switch {
|
|
||||||
case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote:
|
|
||||||
return i
|
|
||||||
case html[i] == '\'':
|
|
||||||
inSingleQuote = !inSingleQuote
|
|
||||||
case html[i] == '"':
|
|
||||||
inDoubleQuote = !inDoubleQuote
|
|
||||||
case html[i] == '`':
|
|
||||||
inGraveQuote = !inGraveQuote
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
return start
|
|
||||||
}
|
|
||||||
|
|
||||||
func findHTMLTagPos(tag []byte, tagname string) (bool, int) {
|
|
||||||
i := 0
|
|
||||||
if i < len(tag) && tag[0] != '<' {
|
|
||||||
return false, -1
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
i = skipSpace(tag, i)
|
|
||||||
|
|
||||||
if i < len(tag) && tag[i] == '/' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
i = skipSpace(tag, i)
|
|
||||||
j := 0
|
|
||||||
for ; i < len(tag); i, j = i+1, j+1 {
|
|
||||||
if j >= len(tagname) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.ToLower(string(tag[i]))[0] != tagname[j] {
|
|
||||||
return false, -1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if i == len(tag) {
|
|
||||||
return false, -1
|
|
||||||
}
|
|
||||||
|
|
||||||
rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
|
|
||||||
if rightAngle >= i {
|
|
||||||
return true, rightAngle
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, -1
|
|
||||||
}
|
|
||||||
|
|
||||||
func skipSpace(tag []byte, i int) int {
|
|
||||||
for i < len(tag) && isspace(tag[i]) {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
func isRelativeLink(link []byte) (yes bool) {
|
|
||||||
// a tag begin with '#'
|
|
||||||
if link[0] == '#' {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// link begin with '/' but not '//', the second maybe a protocol relative link
|
|
||||||
if len(link) >= 2 && link[0] == '/' && link[1] != '/' {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// only the root '/'
|
|
||||||
if len(link) == 1 && link[0] == '/' {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// current directory : begin with "./"
|
|
||||||
if bytes.HasPrefix(link, []byte("./")) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// parent directory : begin with "../"
|
|
||||||
if bytes.HasPrefix(link, []byte("../")) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string {
|
|
||||||
for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] {
|
|
||||||
tmp := fmt.Sprintf("%s-%d", id, count+1)
|
|
||||||
|
|
||||||
if _, tmpFound := r.headingIDs[tmp]; !tmpFound {
|
|
||||||
r.headingIDs[id] = count + 1
|
|
||||||
id = tmp
|
|
||||||
} else {
|
|
||||||
id = id + "-1"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, found := r.headingIDs[id]; !found {
|
|
||||||
r.headingIDs[id] = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte {
|
|
||||||
if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
|
|
||||||
newDest := r.AbsolutePrefix
|
|
||||||
if link[0] != '/' {
|
|
||||||
newDest += "/"
|
|
||||||
}
|
|
||||||
newDest += string(link)
|
|
||||||
return []byte(newDest)
|
|
||||||
}
|
|
||||||
return link
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string {
|
|
||||||
if isRelativeLink(link) {
|
|
||||||
return attrs
|
|
||||||
}
|
|
||||||
val := []string{}
|
|
||||||
if flags&NofollowLinks != 0 {
|
|
||||||
val = append(val, "nofollow")
|
|
||||||
}
|
|
||||||
if flags&NoreferrerLinks != 0 {
|
|
||||||
val = append(val, "noreferrer")
|
|
||||||
}
|
|
||||||
if flags&NoopenerLinks != 0 {
|
|
||||||
val = append(val, "noopener")
|
|
||||||
}
|
|
||||||
if flags&HrefTargetBlank != 0 {
|
|
||||||
attrs = append(attrs, "target=\"_blank\"")
|
|
||||||
}
|
|
||||||
if len(val) == 0 {
|
|
||||||
return attrs
|
|
||||||
}
|
|
||||||
attr := fmt.Sprintf("rel=%q", strings.Join(val, " "))
|
|
||||||
return append(attrs, attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isMailto(link []byte) bool {
|
|
||||||
return bytes.HasPrefix(link, []byte("mailto:"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func needSkipLink(flags HTMLFlags, dest []byte) bool {
|
|
||||||
if flags&SkipLinks != 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSmartypantable(node *Node) bool {
|
|
||||||
pt := node.Parent.Type
|
|
||||||
return pt != Link && pt != CodeBlock && pt != Code
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendLanguageAttr(attrs []string, info []byte) []string {
|
|
||||||
if len(info) == 0 {
|
|
||||||
return attrs
|
|
||||||
}
|
|
||||||
endOfLang := bytes.IndexAny(info, "\t ")
|
|
||||||
if endOfLang < 0 {
|
|
||||||
endOfLang = len(info)
|
|
||||||
}
|
|
||||||
return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang]))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) {
|
|
||||||
w.Write(name)
|
|
||||||
if len(attrs) > 0 {
|
|
||||||
w.Write(spaceBytes)
|
|
||||||
w.Write([]byte(strings.Join(attrs, " ")))
|
|
||||||
}
|
|
||||||
w.Write(gtBytes)
|
|
||||||
r.lastOutputLen = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func footnoteRef(prefix string, node *Node) []byte {
|
|
||||||
urlFrag := prefix + string(slugify(node.Destination))
|
|
||||||
anchor := fmt.Sprintf(`<a href="#fn:%s">%d</a>`, urlFrag, node.NoteID)
|
|
||||||
return []byte(fmt.Sprintf(`<sup class="footnote-ref" id="fnref:%s">%s</sup>`, urlFrag, anchor))
|
|
||||||
}
|
|
||||||
|
|
||||||
func footnoteItem(prefix string, slug []byte) []byte {
|
|
||||||
return []byte(fmt.Sprintf(`<li id="fn:%s%s">`, prefix, slug))
|
|
||||||
}
|
|
||||||
|
|
||||||
func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte {
|
|
||||||
const format = ` <a class="footnote-return" href="#fnref:%s%s">%s</a>`
|
|
||||||
return []byte(fmt.Sprintf(format, prefix, slug, returnLink))
|
|
||||||
}
|
|
||||||
|
|
||||||
func itemOpenCR(node *Node) bool {
|
|
||||||
if node.Prev == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
ld := node.Parent.ListData
|
|
||||||
return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func skipParagraphTags(node *Node) bool {
|
|
||||||
grandparent := node.Parent.Parent
|
|
||||||
if grandparent == nil || grandparent.Type != List {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0
|
|
||||||
return grandparent.Type == List && tightOrTerm
|
|
||||||
}
|
|
||||||
|
|
||||||
func cellAlignment(align CellAlignFlags) string {
|
|
||||||
switch align {
|
|
||||||
case TableAlignmentLeft:
|
|
||||||
return "left"
|
|
||||||
case TableAlignmentRight:
|
|
||||||
return "right"
|
|
||||||
case TableAlignmentCenter:
|
|
||||||
return "center"
|
|
||||||
default:
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *HTMLRenderer) out(w io.Writer, text []byte) {
|
|
||||||
if r.disableTags > 0 {
|
|
||||||
w.Write(htmlTagRe.ReplaceAll(text, []byte{}))
|
|
||||||
} else {
|
|
||||||
w.Write(text)
|
|
||||||
}
|
|
||||||
r.lastOutputLen = len(text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *HTMLRenderer) cr(w io.Writer) {
|
|
||||||
if r.lastOutputLen > 0 {
|
|
||||||
r.out(w, nlBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
nlBytes = []byte{'\n'}
|
|
||||||
gtBytes = []byte{'>'}
|
|
||||||
spaceBytes = []byte{' '}
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
brTag = []byte("<br>")
|
|
||||||
brXHTMLTag = []byte("<br />")
|
|
||||||
emTag = []byte("<em>")
|
|
||||||
emCloseTag = []byte("</em>")
|
|
||||||
strongTag = []byte("<strong>")
|
|
||||||
strongCloseTag = []byte("</strong>")
|
|
||||||
delTag = []byte("<del>")
|
|
||||||
delCloseTag = []byte("</del>")
|
|
||||||
ttTag = []byte("<tt>")
|
|
||||||
ttCloseTag = []byte("</tt>")
|
|
||||||
aTag = []byte("<a")
|
|
||||||
aCloseTag = []byte("</a>")
|
|
||||||
preTag = []byte("<pre>")
|
|
||||||
preCloseTag = []byte("</pre>")
|
|
||||||
codeTag = []byte("<code>")
|
|
||||||
codeCloseTag = []byte("</code>")
|
|
||||||
pTag = []byte("<p>")
|
|
||||||
pCloseTag = []byte("</p>")
|
|
||||||
blockquoteTag = []byte("<blockquote>")
|
|
||||||
blockquoteCloseTag = []byte("</blockquote>")
|
|
||||||
hrTag = []byte("<hr>")
|
|
||||||
hrXHTMLTag = []byte("<hr />")
|
|
||||||
ulTag = []byte("<ul>")
|
|
||||||
ulCloseTag = []byte("</ul>")
|
|
||||||
olTag = []byte("<ol>")
|
|
||||||
olCloseTag = []byte("</ol>")
|
|
||||||
dlTag = []byte("<dl>")
|
|
||||||
dlCloseTag = []byte("</dl>")
|
|
||||||
liTag = []byte("<li>")
|
|
||||||
liCloseTag = []byte("</li>")
|
|
||||||
ddTag = []byte("<dd>")
|
|
||||||
ddCloseTag = []byte("</dd>")
|
|
||||||
dtTag = []byte("<dt>")
|
|
||||||
dtCloseTag = []byte("</dt>")
|
|
||||||
tableTag = []byte("<table>")
|
|
||||||
tableCloseTag = []byte("</table>")
|
|
||||||
tdTag = []byte("<td")
|
|
||||||
tdCloseTag = []byte("</td>")
|
|
||||||
thTag = []byte("<th")
|
|
||||||
thCloseTag = []byte("</th>")
|
|
||||||
theadTag = []byte("<thead>")
|
|
||||||
theadCloseTag = []byte("</thead>")
|
|
||||||
tbodyTag = []byte("<tbody>")
|
|
||||||
tbodyCloseTag = []byte("</tbody>")
|
|
||||||
trTag = []byte("<tr>")
|
|
||||||
trCloseTag = []byte("</tr>")
|
|
||||||
h1Tag = []byte("<h1")
|
|
||||||
h1CloseTag = []byte("</h1>")
|
|
||||||
h2Tag = []byte("<h2")
|
|
||||||
h2CloseTag = []byte("</h2>")
|
|
||||||
h3Tag = []byte("<h3")
|
|
||||||
h3CloseTag = []byte("</h3>")
|
|
||||||
h4Tag = []byte("<h4")
|
|
||||||
h4CloseTag = []byte("</h4>")
|
|
||||||
h5Tag = []byte("<h5")
|
|
||||||
h5CloseTag = []byte("</h5>")
|
|
||||||
h6Tag = []byte("<h6")
|
|
||||||
h6CloseTag = []byte("</h6>")
|
|
||||||
|
|
||||||
footnotesDivBytes = []byte("\n<div class=\"footnotes\">\n\n")
|
|
||||||
footnotesCloseDivBytes = []byte("\n</div>\n")
|
|
||||||
)
|
|
||||||
|
|
||||||
func headingTagsFromLevel(level int) ([]byte, []byte) {
|
|
||||||
if level <= 1 {
|
|
||||||
return h1Tag, h1CloseTag
|
|
||||||
}
|
|
||||||
switch level {
|
|
||||||
case 2:
|
|
||||||
return h2Tag, h2CloseTag
|
|
||||||
case 3:
|
|
||||||
return h3Tag, h3CloseTag
|
|
||||||
case 4:
|
|
||||||
return h4Tag, h4CloseTag
|
|
||||||
case 5:
|
|
||||||
return h5Tag, h5CloseTag
|
|
||||||
}
|
|
||||||
return h6Tag, h6CloseTag
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *HTMLRenderer) outHRTag(w io.Writer) {
|
|
||||||
if r.Flags&UseXHTML == 0 {
|
|
||||||
r.out(w, hrTag)
|
|
||||||
} else {
|
|
||||||
r.out(w, hrXHTMLTag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RenderNode is a default renderer of a single node of a syntax tree. For
|
|
||||||
// block nodes it will be called twice: first time with entering=true, second
|
|
||||||
// time with entering=false, so that it could know when it's working on an open
|
|
||||||
// tag and when on close. It writes the result to w.
|
|
||||||
//
|
|
||||||
// The return value is a way to tell the calling walker to adjust its walk
|
|
||||||
// pattern: e.g. it can terminate the traversal by returning Terminate. Or it
|
|
||||||
// can ask the walker to skip a subtree of this node by returning SkipChildren.
|
|
||||||
// The typical behavior is to return GoToNext, which asks for the usual
|
|
||||||
// traversal to the next node.
|
|
||||||
func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus {
|
|
||||||
attrs := []string{}
|
|
||||||
switch node.Type {
|
|
||||||
case Text:
|
|
||||||
if r.Flags&Smartypants != 0 {
|
|
||||||
var tmp bytes.Buffer
|
|
||||||
escapeHTML(&tmp, node.Literal)
|
|
||||||
r.sr.Process(w, tmp.Bytes())
|
|
||||||
} else {
|
|
||||||
if node.Parent.Type == Link {
|
|
||||||
escLink(w, node.Literal)
|
|
||||||
} else {
|
|
||||||
escapeHTML(w, node.Literal)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case Softbreak:
|
|
||||||
r.cr(w)
|
|
||||||
// TODO: make it configurable via out(renderer.softbreak)
|
|
||||||
case Hardbreak:
|
|
||||||
if r.Flags&UseXHTML == 0 {
|
|
||||||
r.out(w, brTag)
|
|
||||||
} else {
|
|
||||||
r.out(w, brXHTMLTag)
|
|
||||||
}
|
|
||||||
r.cr(w)
|
|
||||||
case Emph:
|
|
||||||
if entering {
|
|
||||||
r.out(w, emTag)
|
|
||||||
} else {
|
|
||||||
r.out(w, emCloseTag)
|
|
||||||
}
|
|
||||||
case Strong:
|
|
||||||
if entering {
|
|
||||||
r.out(w, strongTag)
|
|
||||||
} else {
|
|
||||||
r.out(w, strongCloseTag)
|
|
||||||
}
|
|
||||||
case Del:
|
|
||||||
if entering {
|
|
||||||
r.out(w, delTag)
|
|
||||||
} else {
|
|
||||||
r.out(w, delCloseTag)
|
|
||||||
}
|
|
||||||
case HTMLSpan:
|
|
||||||
if r.Flags&SkipHTML != 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
r.out(w, node.Literal)
|
|
||||||
case Link:
|
|
||||||
// mark it but don't link it if it is not a safe link: no smartypants
|
|
||||||
dest := node.LinkData.Destination
|
|
||||||
if needSkipLink(r.Flags, dest) {
|
|
||||||
if entering {
|
|
||||||
r.out(w, ttTag)
|
|
||||||
} else {
|
|
||||||
r.out(w, ttCloseTag)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if entering {
|
|
||||||
dest = r.addAbsPrefix(dest)
|
|
||||||
var hrefBuf bytes.Buffer
|
|
||||||
hrefBuf.WriteString("href=\"")
|
|
||||||
escLink(&hrefBuf, dest)
|
|
||||||
hrefBuf.WriteByte('"')
|
|
||||||
attrs = append(attrs, hrefBuf.String())
|
|
||||||
if node.NoteID != 0 {
|
|
||||||
r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
attrs = appendLinkAttrs(attrs, r.Flags, dest)
|
|
||||||
if len(node.LinkData.Title) > 0 {
|
|
||||||
var titleBuff bytes.Buffer
|
|
||||||
titleBuff.WriteString("title=\"")
|
|
||||||
escapeHTML(&titleBuff, node.LinkData.Title)
|
|
||||||
titleBuff.WriteByte('"')
|
|
||||||
attrs = append(attrs, titleBuff.String())
|
|
||||||
}
|
|
||||||
r.tag(w, aTag, attrs)
|
|
||||||
} else {
|
|
||||||
if node.NoteID != 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
r.out(w, aCloseTag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case Image:
|
|
||||||
if r.Flags&SkipImages != 0 {
|
|
||||||
return SkipChildren
|
|
||||||
}
|
|
||||||
if entering {
|
|
||||||
dest := node.LinkData.Destination
|
|
||||||
dest = r.addAbsPrefix(dest)
|
|
||||||
if r.disableTags == 0 {
|
|
||||||
//if options.safe && potentiallyUnsafe(dest) {
|
|
||||||
//out(w, `<img src="" alt="`)
|
|
||||||
//} else {
|
|
||||||
r.out(w, []byte(`<img src="`))
|
|
||||||
escLink(w, dest)
|
|
||||||
r.out(w, []byte(`" alt="`))
|
|
||||||
//}
|
|
||||||
}
|
|
||||||
r.disableTags++
|
|
||||||
} else {
|
|
||||||
r.disableTags--
|
|
||||||
if r.disableTags == 0 {
|
|
||||||
if node.LinkData.Title != nil {
|
|
||||||
r.out(w, []byte(`" title="`))
|
|
||||||
escapeHTML(w, node.LinkData.Title)
|
|
||||||
}
|
|
||||||
r.out(w, []byte(`" />`))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case Code:
|
|
||||||
r.out(w, codeTag)
|
|
||||||
escapeHTML(w, node.Literal)
|
|
||||||
r.out(w, codeCloseTag)
|
|
||||||
case Document:
|
|
||||||
break
|
|
||||||
case Paragraph:
|
|
||||||
if skipParagraphTags(node) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if entering {
|
|
||||||
// TODO: untangle this clusterfuck about when the newlines need
|
|
||||||
// to be added and when not.
|
|
||||||
if node.Prev != nil {
|
|
||||||
switch node.Prev.Type {
|
|
||||||
case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule:
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if node.Parent.Type == BlockQuote && node.Prev == nil {
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
r.out(w, pTag)
|
|
||||||
} else {
|
|
||||||
r.out(w, pCloseTag)
|
|
||||||
if !(node.Parent.Type == Item && node.Next == nil) {
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case BlockQuote:
|
|
||||||
if entering {
|
|
||||||
r.cr(w)
|
|
||||||
r.out(w, blockquoteTag)
|
|
||||||
} else {
|
|
||||||
r.out(w, blockquoteCloseTag)
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
case HTMLBlock:
|
|
||||||
if r.Flags&SkipHTML != 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
r.cr(w)
|
|
||||||
r.out(w, node.Literal)
|
|
||||||
r.cr(w)
|
|
||||||
case Heading:
|
|
||||||
headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level
|
|
||||||
openTag, closeTag := headingTagsFromLevel(headingLevel)
|
|
||||||
if entering {
|
|
||||||
if node.IsTitleblock {
|
|
||||||
attrs = append(attrs, `class="title"`)
|
|
||||||
}
|
|
||||||
if node.HeadingID != "" {
|
|
||||||
id := r.ensureUniqueHeadingID(node.HeadingID)
|
|
||||||
if r.HeadingIDPrefix != "" {
|
|
||||||
id = r.HeadingIDPrefix + id
|
|
||||||
}
|
|
||||||
if r.HeadingIDSuffix != "" {
|
|
||||||
id = id + r.HeadingIDSuffix
|
|
||||||
}
|
|
||||||
attrs = append(attrs, fmt.Sprintf(`id="%s"`, id))
|
|
||||||
}
|
|
||||||
r.cr(w)
|
|
||||||
r.tag(w, openTag, attrs)
|
|
||||||
} else {
|
|
||||||
r.out(w, closeTag)
|
|
||||||
if !(node.Parent.Type == Item && node.Next == nil) {
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case HorizontalRule:
|
|
||||||
r.cr(w)
|
|
||||||
r.outHRTag(w)
|
|
||||||
r.cr(w)
|
|
||||||
case List:
|
|
||||||
openTag := ulTag
|
|
||||||
closeTag := ulCloseTag
|
|
||||||
if node.ListFlags&ListTypeOrdered != 0 {
|
|
||||||
openTag = olTag
|
|
||||||
closeTag = olCloseTag
|
|
||||||
}
|
|
||||||
if node.ListFlags&ListTypeDefinition != 0 {
|
|
||||||
openTag = dlTag
|
|
||||||
closeTag = dlCloseTag
|
|
||||||
}
|
|
||||||
if entering {
|
|
||||||
if node.IsFootnotesList {
|
|
||||||
r.out(w, footnotesDivBytes)
|
|
||||||
r.outHRTag(w)
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
r.cr(w)
|
|
||||||
if node.Parent.Type == Item && node.Parent.Parent.Tight {
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
r.tag(w, openTag[:len(openTag)-1], attrs)
|
|
||||||
r.cr(w)
|
|
||||||
} else {
|
|
||||||
r.out(w, closeTag)
|
|
||||||
//cr(w)
|
|
||||||
//if node.parent.Type != Item {
|
|
||||||
// cr(w)
|
|
||||||
//}
|
|
||||||
if node.Parent.Type == Item && node.Next != nil {
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
if node.Parent.Type == Document || node.Parent.Type == BlockQuote {
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
if node.IsFootnotesList {
|
|
||||||
r.out(w, footnotesCloseDivBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case Item:
|
|
||||||
openTag := liTag
|
|
||||||
closeTag := liCloseTag
|
|
||||||
if node.ListFlags&ListTypeDefinition != 0 {
|
|
||||||
openTag = ddTag
|
|
||||||
closeTag = ddCloseTag
|
|
||||||
}
|
|
||||||
if node.ListFlags&ListTypeTerm != 0 {
|
|
||||||
openTag = dtTag
|
|
||||||
closeTag = dtCloseTag
|
|
||||||
}
|
|
||||||
if entering {
|
|
||||||
if itemOpenCR(node) {
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
if node.ListData.RefLink != nil {
|
|
||||||
slug := slugify(node.ListData.RefLink)
|
|
||||||
r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
r.out(w, openTag)
|
|
||||||
} else {
|
|
||||||
if node.ListData.RefLink != nil {
|
|
||||||
slug := slugify(node.ListData.RefLink)
|
|
||||||
if r.Flags&FootnoteReturnLinks != 0 {
|
|
||||||
r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.out(w, closeTag)
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
case CodeBlock:
|
|
||||||
attrs = appendLanguageAttr(attrs, node.Info)
|
|
||||||
r.cr(w)
|
|
||||||
r.out(w, preTag)
|
|
||||||
r.tag(w, codeTag[:len(codeTag)-1], attrs)
|
|
||||||
escapeHTML(w, node.Literal)
|
|
||||||
r.out(w, codeCloseTag)
|
|
||||||
r.out(w, preCloseTag)
|
|
||||||
if node.Parent.Type != Item {
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
case Table:
|
|
||||||
if entering {
|
|
||||||
r.cr(w)
|
|
||||||
r.out(w, tableTag)
|
|
||||||
} else {
|
|
||||||
r.out(w, tableCloseTag)
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
case TableCell:
|
|
||||||
openTag := tdTag
|
|
||||||
closeTag := tdCloseTag
|
|
||||||
if node.IsHeader {
|
|
||||||
openTag = thTag
|
|
||||||
closeTag = thCloseTag
|
|
||||||
}
|
|
||||||
if entering {
|
|
||||||
align := cellAlignment(node.Align)
|
|
||||||
if align != "" {
|
|
||||||
attrs = append(attrs, fmt.Sprintf(`align="%s"`, align))
|
|
||||||
}
|
|
||||||
if node.Prev == nil {
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
r.tag(w, openTag, attrs)
|
|
||||||
} else {
|
|
||||||
r.out(w, closeTag)
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
case TableHead:
|
|
||||||
if entering {
|
|
||||||
r.cr(w)
|
|
||||||
r.out(w, theadTag)
|
|
||||||
} else {
|
|
||||||
r.out(w, theadCloseTag)
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
case TableBody:
|
|
||||||
if entering {
|
|
||||||
r.cr(w)
|
|
||||||
r.out(w, tbodyTag)
|
|
||||||
// XXX: this is to adhere to a rather silly test. Should fix test.
|
|
||||||
if node.FirstChild == nil {
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.out(w, tbodyCloseTag)
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
case TableRow:
|
|
||||||
if entering {
|
|
||||||
r.cr(w)
|
|
||||||
r.out(w, trTag)
|
|
||||||
} else {
|
|
||||||
r.out(w, trCloseTag)
|
|
||||||
r.cr(w)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic("Unknown node type " + node.Type.String())
|
|
||||||
}
|
|
||||||
return GoToNext
|
|
||||||
}
|
|
||||||
|
|
||||||
// RenderHeader writes HTML document preamble and TOC if requested.
|
|
||||||
func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) {
|
|
||||||
r.writeDocumentHeader(w)
|
|
||||||
if r.Flags&TOC != 0 {
|
|
||||||
r.writeTOC(w, ast)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RenderFooter writes HTML document footer.
|
|
||||||
func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) {
|
|
||||||
if r.Flags&CompletePage == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
io.WriteString(w, "\n</body>\n</html>\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) {
|
|
||||||
if r.Flags&CompletePage == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ending := ""
|
|
||||||
if r.Flags&UseXHTML != 0 {
|
|
||||||
io.WriteString(w, "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" ")
|
|
||||||
io.WriteString(w, "\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n")
|
|
||||||
io.WriteString(w, "<html xmlns=\"http://www.w3.org/1999/xhtml\">\n")
|
|
||||||
ending = " /"
|
|
||||||
} else {
|
|
||||||
io.WriteString(w, "<!DOCTYPE html>\n")
|
|
||||||
io.WriteString(w, "<html>\n")
|
|
||||||
}
|
|
||||||
io.WriteString(w, "<head>\n")
|
|
||||||
io.WriteString(w, " <title>")
|
|
||||||
if r.Flags&Smartypants != 0 {
|
|
||||||
r.sr.Process(w, []byte(r.Title))
|
|
||||||
} else {
|
|
||||||
escapeHTML(w, []byte(r.Title))
|
|
||||||
}
|
|
||||||
io.WriteString(w, "</title>\n")
|
|
||||||
io.WriteString(w, " <meta name=\"GENERATOR\" content=\"Blackfriday Markdown Processor v")
|
|
||||||
io.WriteString(w, Version)
|
|
||||||
io.WriteString(w, "\"")
|
|
||||||
io.WriteString(w, ending)
|
|
||||||
io.WriteString(w, ">\n")
|
|
||||||
io.WriteString(w, " <meta charset=\"utf-8\"")
|
|
||||||
io.WriteString(w, ending)
|
|
||||||
io.WriteString(w, ">\n")
|
|
||||||
if r.CSS != "" {
|
|
||||||
io.WriteString(w, " <link rel=\"stylesheet\" type=\"text/css\" href=\"")
|
|
||||||
escapeHTML(w, []byte(r.CSS))
|
|
||||||
io.WriteString(w, "\"")
|
|
||||||
io.WriteString(w, ending)
|
|
||||||
io.WriteString(w, ">\n")
|
|
||||||
}
|
|
||||||
if r.Icon != "" {
|
|
||||||
io.WriteString(w, " <link rel=\"icon\" type=\"image/x-icon\" href=\"")
|
|
||||||
escapeHTML(w, []byte(r.Icon))
|
|
||||||
io.WriteString(w, "\"")
|
|
||||||
io.WriteString(w, ending)
|
|
||||||
io.WriteString(w, ">\n")
|
|
||||||
}
|
|
||||||
io.WriteString(w, "</head>\n")
|
|
||||||
io.WriteString(w, "<body>\n\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) {
|
|
||||||
buf := bytes.Buffer{}
|
|
||||||
|
|
||||||
inHeading := false
|
|
||||||
tocLevel := 0
|
|
||||||
headingCount := 0
|
|
||||||
|
|
||||||
ast.Walk(func(node *Node, entering bool) WalkStatus {
|
|
||||||
if node.Type == Heading && !node.HeadingData.IsTitleblock {
|
|
||||||
inHeading = entering
|
|
||||||
if entering {
|
|
||||||
node.HeadingID = fmt.Sprintf("toc_%d", headingCount)
|
|
||||||
if node.Level == tocLevel {
|
|
||||||
buf.WriteString("</li>\n\n<li>")
|
|
||||||
} else if node.Level < tocLevel {
|
|
||||||
for node.Level < tocLevel {
|
|
||||||
tocLevel--
|
|
||||||
buf.WriteString("</li>\n</ul>")
|
|
||||||
}
|
|
||||||
buf.WriteString("</li>\n\n<li>")
|
|
||||||
} else {
|
|
||||||
for node.Level > tocLevel {
|
|
||||||
tocLevel++
|
|
||||||
buf.WriteString("\n<ul>\n<li>")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(&buf, `<a href="#toc_%d">`, headingCount)
|
|
||||||
headingCount++
|
|
||||||
} else {
|
|
||||||
buf.WriteString("</a>")
|
|
||||||
}
|
|
||||||
return GoToNext
|
|
||||||
}
|
|
||||||
|
|
||||||
if inHeading {
|
|
||||||
return r.RenderNode(&buf, node, entering)
|
|
||||||
}
|
|
||||||
|
|
||||||
return GoToNext
|
|
||||||
})
|
|
||||||
|
|
||||||
for ; tocLevel > 0; tocLevel-- {
|
|
||||||
buf.WriteString("</li>\n</ul>")
|
|
||||||
}
|
|
||||||
|
|
||||||
if buf.Len() > 0 {
|
|
||||||
io.WriteString(w, "<nav>\n")
|
|
||||||
w.Write(buf.Bytes())
|
|
||||||
io.WriteString(w, "\n\n</nav>\n")
|
|
||||||
}
|
|
||||||
r.lastOutputLen = buf.Len()
|
|
||||||
}
|
|
1228
vendor/github.com/russross/blackfriday/v2/inline.go
generated
vendored
1228
vendor/github.com/russross/blackfriday/v2/inline.go
generated
vendored
File diff suppressed because it is too large
Load diff
950
vendor/github.com/russross/blackfriday/v2/markdown.go
generated
vendored
950
vendor/github.com/russross/blackfriday/v2/markdown.go
generated
vendored
|
@ -1,950 +0,0 @@
|
||||||
// Blackfriday Markdown Processor
|
|
||||||
// Available at http://github.com/russross/blackfriday
|
|
||||||
//
|
|
||||||
// Copyright © 2011 Russ Ross <russ@russross.com>.
|
|
||||||
// Distributed under the Simplified BSD License.
|
|
||||||
// See README.md for details.
|
|
||||||
|
|
||||||
package blackfriday
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
//
|
|
||||||
// Markdown parsing and processing
|
|
||||||
//
|
|
||||||
|
|
||||||
// Version string of the package. Appears in the rendered document when
|
|
||||||
// CompletePage flag is on.
|
|
||||||
const Version = "2.0"
|
|
||||||
|
|
||||||
// Extensions is a bitwise or'ed collection of enabled Blackfriday's
|
|
||||||
// extensions.
|
|
||||||
type Extensions int
|
|
||||||
|
|
||||||
// These are the supported markdown parsing extensions.
|
|
||||||
// OR these values together to select multiple extensions.
|
|
||||||
const (
|
|
||||||
NoExtensions Extensions = 0
|
|
||||||
NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words
|
|
||||||
Tables // Render tables
|
|
||||||
FencedCode // Render fenced code blocks
|
|
||||||
Autolink // Detect embedded URLs that are not explicitly marked
|
|
||||||
Strikethrough // Strikethrough text using ~~test~~
|
|
||||||
LaxHTMLBlocks // Loosen up HTML block parsing rules
|
|
||||||
SpaceHeadings // Be strict about prefix heading rules
|
|
||||||
HardLineBreak // Translate newlines into line breaks
|
|
||||||
TabSizeEight // Expand tabs to eight spaces instead of four
|
|
||||||
Footnotes // Pandoc-style footnotes
|
|
||||||
NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
|
|
||||||
HeadingIDs // specify heading IDs with {#id}
|
|
||||||
Titleblock // Titleblock ala pandoc
|
|
||||||
AutoHeadingIDs // Create the heading ID from the text
|
|
||||||
BackslashLineBreak // Translate trailing backslashes into line breaks
|
|
||||||
DefinitionLists // Render definition lists
|
|
||||||
|
|
||||||
CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants |
|
|
||||||
SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes
|
|
||||||
|
|
||||||
CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode |
|
|
||||||
Autolink | Strikethrough | SpaceHeadings | HeadingIDs |
|
|
||||||
BackslashLineBreak | DefinitionLists
|
|
||||||
)
|
|
||||||
|
|
||||||
// ListType contains bitwise or'ed flags for list and list item objects.
|
|
||||||
type ListType int
|
|
||||||
|
|
||||||
// These are the possible flag values for the ListItem renderer.
|
|
||||||
// Multiple flag values may be ORed together.
|
|
||||||
// These are mostly of interest if you are writing a new output format.
|
|
||||||
const (
|
|
||||||
ListTypeOrdered ListType = 1 << iota
|
|
||||||
ListTypeDefinition
|
|
||||||
ListTypeTerm
|
|
||||||
|
|
||||||
ListItemContainsBlock
|
|
||||||
ListItemBeginningOfList // TODO: figure out if this is of any use now
|
|
||||||
ListItemEndOfList
|
|
||||||
)
|
|
||||||
|
|
||||||
// CellAlignFlags holds a type of alignment in a table cell.
|
|
||||||
type CellAlignFlags int
|
|
||||||
|
|
||||||
// These are the possible flag values for the table cell renderer.
|
|
||||||
// Only a single one of these values will be used; they are not ORed together.
|
|
||||||
// These are mostly of interest if you are writing a new output format.
|
|
||||||
const (
|
|
||||||
TableAlignmentLeft CellAlignFlags = 1 << iota
|
|
||||||
TableAlignmentRight
|
|
||||||
TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight)
|
|
||||||
)
|
|
||||||
|
|
||||||
// The size of a tab stop.
|
|
||||||
const (
|
|
||||||
TabSizeDefault = 4
|
|
||||||
TabSizeDouble = 8
|
|
||||||
)
|
|
||||||
|
|
||||||
// blockTags is a set of tags that are recognized as HTML block tags.
|
|
||||||
// Any of these can be included in markdown text without special escaping.
|
|
||||||
var blockTags = map[string]struct{}{
|
|
||||||
"blockquote": {},
|
|
||||||
"del": {},
|
|
||||||
"div": {},
|
|
||||||
"dl": {},
|
|
||||||
"fieldset": {},
|
|
||||||
"form": {},
|
|
||||||
"h1": {},
|
|
||||||
"h2": {},
|
|
||||||
"h3": {},
|
|
||||||
"h4": {},
|
|
||||||
"h5": {},
|
|
||||||
"h6": {},
|
|
||||||
"iframe": {},
|
|
||||||
"ins": {},
|
|
||||||
"math": {},
|
|
||||||
"noscript": {},
|
|
||||||
"ol": {},
|
|
||||||
"pre": {},
|
|
||||||
"p": {},
|
|
||||||
"script": {},
|
|
||||||
"style": {},
|
|
||||||
"table": {},
|
|
||||||
"ul": {},
|
|
||||||
|
|
||||||
// HTML5
|
|
||||||
"address": {},
|
|
||||||
"article": {},
|
|
||||||
"aside": {},
|
|
||||||
"canvas": {},
|
|
||||||
"figcaption": {},
|
|
||||||
"figure": {},
|
|
||||||
"footer": {},
|
|
||||||
"header": {},
|
|
||||||
"hgroup": {},
|
|
||||||
"main": {},
|
|
||||||
"nav": {},
|
|
||||||
"output": {},
|
|
||||||
"progress": {},
|
|
||||||
"section": {},
|
|
||||||
"video": {},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Renderer is the rendering interface. This is mostly of interest if you are
|
|
||||||
// implementing a new rendering format.
|
|
||||||
//
|
|
||||||
// Only an HTML implementation is provided in this repository, see the README
|
|
||||||
// for external implementations.
|
|
||||||
type Renderer interface {
|
|
||||||
// RenderNode is the main rendering method. It will be called once for
|
|
||||||
// every leaf node and twice for every non-leaf node (first with
|
|
||||||
// entering=true, then with entering=false). The method should write its
|
|
||||||
// rendition of the node to the supplied writer w.
|
|
||||||
RenderNode(w io.Writer, node *Node, entering bool) WalkStatus
|
|
||||||
|
|
||||||
// RenderHeader is a method that allows the renderer to produce some
|
|
||||||
// content preceding the main body of the output document. The header is
|
|
||||||
// understood in the broad sense here. For example, the default HTML
|
|
||||||
// renderer will write not only the HTML document preamble, but also the
|
|
||||||
// table of contents if it was requested.
|
|
||||||
//
|
|
||||||
// The method will be passed an entire document tree, in case a particular
|
|
||||||
// implementation needs to inspect it to produce output.
|
|
||||||
//
|
|
||||||
// The output should be written to the supplied writer w. If your
|
|
||||||
// implementation has no header to write, supply an empty implementation.
|
|
||||||
RenderHeader(w io.Writer, ast *Node)
|
|
||||||
|
|
||||||
// RenderFooter is a symmetric counterpart of RenderHeader.
|
|
||||||
RenderFooter(w io.Writer, ast *Node)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Callback functions for inline parsing. One such function is defined
|
|
||||||
// for each character that triggers a response when parsing inline data.
|
|
||||||
type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node)
|
|
||||||
|
|
||||||
// Markdown is a type that holds extensions and the runtime state used by
|
|
||||||
// Parse, and the renderer. You can not use it directly, construct it with New.
|
|
||||||
type Markdown struct {
|
|
||||||
renderer Renderer
|
|
||||||
referenceOverride ReferenceOverrideFunc
|
|
||||||
refs map[string]*reference
|
|
||||||
inlineCallback [256]inlineParser
|
|
||||||
extensions Extensions
|
|
||||||
nesting int
|
|
||||||
maxNesting int
|
|
||||||
insideLink bool
|
|
||||||
|
|
||||||
// Footnotes need to be ordered as well as available to quickly check for
|
|
||||||
// presence. If a ref is also a footnote, it's stored both in refs and here
|
|
||||||
// in notes. Slice is nil if footnotes not enabled.
|
|
||||||
notes []*reference
|
|
||||||
|
|
||||||
doc *Node
|
|
||||||
tip *Node // = doc
|
|
||||||
oldTip *Node
|
|
||||||
lastMatchedContainer *Node // = doc
|
|
||||||
allClosed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Markdown) getRef(refid string) (ref *reference, found bool) {
|
|
||||||
if p.referenceOverride != nil {
|
|
||||||
r, overridden := p.referenceOverride(refid)
|
|
||||||
if overridden {
|
|
||||||
if r == nil {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
return &reference{
|
|
||||||
link: []byte(r.Link),
|
|
||||||
title: []byte(r.Title),
|
|
||||||
noteID: 0,
|
|
||||||
hasBlock: false,
|
|
||||||
text: []byte(r.Text)}, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// refs are case insensitive
|
|
||||||
ref, found = p.refs[strings.ToLower(refid)]
|
|
||||||
return ref, found
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Markdown) finalize(block *Node) {
|
|
||||||
above := block.Parent
|
|
||||||
block.open = false
|
|
||||||
p.tip = above
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Markdown) addChild(node NodeType, offset uint32) *Node {
|
|
||||||
return p.addExistingChild(NewNode(node), offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node {
|
|
||||||
for !p.tip.canContain(node.Type) {
|
|
||||||
p.finalize(p.tip)
|
|
||||||
}
|
|
||||||
p.tip.AppendChild(node)
|
|
||||||
p.tip = node
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Markdown) closeUnmatchedBlocks() {
|
|
||||||
if !p.allClosed {
|
|
||||||
for p.oldTip != p.lastMatchedContainer {
|
|
||||||
parent := p.oldTip.Parent
|
|
||||||
p.finalize(p.oldTip)
|
|
||||||
p.oldTip = parent
|
|
||||||
}
|
|
||||||
p.allClosed = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Public interface
|
|
||||||
//
|
|
||||||
//
|
|
||||||
|
|
||||||
// Reference represents the details of a link.
|
|
||||||
// See the documentation in Options for more details on use-case.
|
|
||||||
type Reference struct {
|
|
||||||
// Link is usually the URL the reference points to.
|
|
||||||
Link string
|
|
||||||
// Title is the alternate text describing the link in more detail.
|
|
||||||
Title string
|
|
||||||
// Text is the optional text to override the ref with if the syntax used was
|
|
||||||
// [refid][]
|
|
||||||
Text string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReferenceOverrideFunc is expected to be called with a reference string and
|
|
||||||
// return either a valid Reference type that the reference string maps to or
|
|
||||||
// nil. If overridden is false, the default reference logic will be executed.
|
|
||||||
// See the documentation in Options for more details on use-case.
|
|
||||||
type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
|
|
||||||
|
|
||||||
// New constructs a Markdown processor. You can use the same With* functions as
|
|
||||||
// for Run() to customize parser's behavior and the renderer.
|
|
||||||
func New(opts ...Option) *Markdown {
|
|
||||||
var p Markdown
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(&p)
|
|
||||||
}
|
|
||||||
p.refs = make(map[string]*reference)
|
|
||||||
p.maxNesting = 16
|
|
||||||
p.insideLink = false
|
|
||||||
docNode := NewNode(Document)
|
|
||||||
p.doc = docNode
|
|
||||||
p.tip = docNode
|
|
||||||
p.oldTip = docNode
|
|
||||||
p.lastMatchedContainer = docNode
|
|
||||||
p.allClosed = true
|
|
||||||
// register inline parsers
|
|
||||||
p.inlineCallback[' '] = maybeLineBreak
|
|
||||||
p.inlineCallback['*'] = emphasis
|
|
||||||
p.inlineCallback['_'] = emphasis
|
|
||||||
if p.extensions&Strikethrough != 0 {
|
|
||||||
p.inlineCallback['~'] = emphasis
|
|
||||||
}
|
|
||||||
p.inlineCallback['`'] = codeSpan
|
|
||||||
p.inlineCallback['\n'] = lineBreak
|
|
||||||
p.inlineCallback['['] = link
|
|
||||||
p.inlineCallback['<'] = leftAngle
|
|
||||||
p.inlineCallback['\\'] = escape
|
|
||||||
p.inlineCallback['&'] = entity
|
|
||||||
p.inlineCallback['!'] = maybeImage
|
|
||||||
p.inlineCallback['^'] = maybeInlineFootnote
|
|
||||||
if p.extensions&Autolink != 0 {
|
|
||||||
p.inlineCallback['h'] = maybeAutoLink
|
|
||||||
p.inlineCallback['m'] = maybeAutoLink
|
|
||||||
p.inlineCallback['f'] = maybeAutoLink
|
|
||||||
p.inlineCallback['H'] = maybeAutoLink
|
|
||||||
p.inlineCallback['M'] = maybeAutoLink
|
|
||||||
p.inlineCallback['F'] = maybeAutoLink
|
|
||||||
}
|
|
||||||
if p.extensions&Footnotes != 0 {
|
|
||||||
p.notes = make([]*reference, 0)
|
|
||||||
}
|
|
||||||
return &p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Option customizes the Markdown processor's default behavior.
|
|
||||||
type Option func(*Markdown)
|
|
||||||
|
|
||||||
// WithRenderer allows you to override the default renderer.
|
|
||||||
func WithRenderer(r Renderer) Option {
|
|
||||||
return func(p *Markdown) {
|
|
||||||
p.renderer = r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithExtensions allows you to pick some of the many extensions provided by
|
|
||||||
// Blackfriday. You can bitwise OR them.
|
|
||||||
func WithExtensions(e Extensions) Option {
|
|
||||||
return func(p *Markdown) {
|
|
||||||
p.extensions = e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithNoExtensions turns off all extensions and custom behavior.
|
|
||||||
func WithNoExtensions() Option {
|
|
||||||
return func(p *Markdown) {
|
|
||||||
p.extensions = NoExtensions
|
|
||||||
p.renderer = NewHTMLRenderer(HTMLRendererParameters{
|
|
||||||
Flags: HTMLFlagsNone,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithRefOverride sets an optional function callback that is called every
|
|
||||||
// time a reference is resolved.
|
|
||||||
//
|
|
||||||
// In Markdown, the link reference syntax can be made to resolve a link to
|
|
||||||
// a reference instead of an inline URL, in one of the following ways:
|
|
||||||
//
|
|
||||||
// * [link text][refid]
|
|
||||||
// * [refid][]
|
|
||||||
//
|
|
||||||
// Usually, the refid is defined at the bottom of the Markdown document. If
|
|
||||||
// this override function is provided, the refid is passed to the override
|
|
||||||
// function first, before consulting the defined refids at the bottom. If
|
|
||||||
// the override function indicates an override did not occur, the refids at
|
|
||||||
// the bottom will be used to fill in the link details.
|
|
||||||
func WithRefOverride(o ReferenceOverrideFunc) Option {
|
|
||||||
return func(p *Markdown) {
|
|
||||||
p.referenceOverride = o
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run is the main entry point to Blackfriday. It parses and renders a
|
|
||||||
// block of markdown-encoded text.
|
|
||||||
//
|
|
||||||
// The simplest invocation of Run takes one argument, input:
|
|
||||||
// output := Run(input)
|
|
||||||
// This will parse the input with CommonExtensions enabled and render it with
|
|
||||||
// the default HTMLRenderer (with CommonHTMLFlags).
|
|
||||||
//
|
|
||||||
// Variadic arguments opts can customize the default behavior. Since Markdown
|
|
||||||
// type does not contain exported fields, you can not use it directly. Instead,
|
|
||||||
// use the With* functions. For example, this will call the most basic
|
|
||||||
// functionality, with no extensions:
|
|
||||||
// output := Run(input, WithNoExtensions())
|
|
||||||
//
|
|
||||||
// You can use any number of With* arguments, even contradicting ones. They
|
|
||||||
// will be applied in order of appearance and the latter will override the
|
|
||||||
// former:
|
|
||||||
// output := Run(input, WithNoExtensions(), WithExtensions(exts),
|
|
||||||
// WithRenderer(yourRenderer))
|
|
||||||
func Run(input []byte, opts ...Option) []byte {
|
|
||||||
r := NewHTMLRenderer(HTMLRendererParameters{
|
|
||||||
Flags: CommonHTMLFlags,
|
|
||||||
})
|
|
||||||
optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)}
|
|
||||||
optList = append(optList, opts...)
|
|
||||||
parser := New(optList...)
|
|
||||||
ast := parser.Parse(input)
|
|
||||||
var buf bytes.Buffer
|
|
||||||
parser.renderer.RenderHeader(&buf, ast)
|
|
||||||
ast.Walk(func(node *Node, entering bool) WalkStatus {
|
|
||||||
return parser.renderer.RenderNode(&buf, node, entering)
|
|
||||||
})
|
|
||||||
parser.renderer.RenderFooter(&buf, ast)
|
|
||||||
return buf.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse is an entry point to the parsing part of Blackfriday. It takes an
|
|
||||||
// input markdown document and produces a syntax tree for its contents. This
|
|
||||||
// tree can then be rendered with a default or custom renderer, or
|
|
||||||
// analyzed/transformed by the caller to whatever non-standard needs they have.
|
|
||||||
// The return value is the root node of the syntax tree.
|
|
||||||
func (p *Markdown) Parse(input []byte) *Node {
|
|
||||||
p.block(input)
|
|
||||||
// Walk the tree and finish up some of unfinished blocks
|
|
||||||
for p.tip != nil {
|
|
||||||
p.finalize(p.tip)
|
|
||||||
}
|
|
||||||
// Walk the tree again and process inline markdown in each block
|
|
||||||
p.doc.Walk(func(node *Node, entering bool) WalkStatus {
|
|
||||||
if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell {
|
|
||||||
p.inline(node, node.content)
|
|
||||||
node.content = nil
|
|
||||||
}
|
|
||||||
return GoToNext
|
|
||||||
})
|
|
||||||
p.parseRefsToAST()
|
|
||||||
return p.doc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Markdown) parseRefsToAST() {
|
|
||||||
if p.extensions&Footnotes == 0 || len(p.notes) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.tip = p.doc
|
|
||||||
block := p.addBlock(List, nil)
|
|
||||||
block.IsFootnotesList = true
|
|
||||||
block.ListFlags = ListTypeOrdered
|
|
||||||
flags := ListItemBeginningOfList
|
|
||||||
// Note: this loop is intentionally explicit, not range-form. This is
|
|
||||||
// because the body of the loop will append nested footnotes to p.notes and
|
|
||||||
// we need to process those late additions. Range form would only walk over
|
|
||||||
// the fixed initial set.
|
|
||||||
for i := 0; i < len(p.notes); i++ {
|
|
||||||
ref := p.notes[i]
|
|
||||||
p.addExistingChild(ref.footnote, 0)
|
|
||||||
block := ref.footnote
|
|
||||||
block.ListFlags = flags | ListTypeOrdered
|
|
||||||
block.RefLink = ref.link
|
|
||||||
if ref.hasBlock {
|
|
||||||
flags |= ListItemContainsBlock
|
|
||||||
p.block(ref.title)
|
|
||||||
} else {
|
|
||||||
p.inline(block, ref.title)
|
|
||||||
}
|
|
||||||
flags &^= ListItemBeginningOfList | ListItemContainsBlock
|
|
||||||
}
|
|
||||||
above := block.Parent
|
|
||||||
finalizeList(block)
|
|
||||||
p.tip = above
|
|
||||||
block.Walk(func(node *Node, entering bool) WalkStatus {
|
|
||||||
if node.Type == Paragraph || node.Type == Heading {
|
|
||||||
p.inline(node, node.content)
|
|
||||||
node.content = nil
|
|
||||||
}
|
|
||||||
return GoToNext
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Link references
|
|
||||||
//
|
|
||||||
// This section implements support for references that (usually) appear
|
|
||||||
// as footnotes in a document, and can be referenced anywhere in the document.
|
|
||||||
// The basic format is:
|
|
||||||
//
|
|
||||||
// [1]: http://www.google.com/ "Google"
|
|
||||||
// [2]: http://www.github.com/ "Github"
|
|
||||||
//
|
|
||||||
// Anywhere in the document, the reference can be linked by referring to its
|
|
||||||
// label, i.e., 1 and 2 in this example, as in:
|
|
||||||
//
|
|
||||||
// This library is hosted on [Github][2], a git hosting site.
|
|
||||||
//
|
|
||||||
// Actual footnotes as specified in Pandoc and supported by some other Markdown
|
|
||||||
// libraries such as php-markdown are also taken care of. They look like this:
|
|
||||||
//
|
|
||||||
// This sentence needs a bit of further explanation.[^note]
|
|
||||||
//
|
|
||||||
// [^note]: This is the explanation.
|
|
||||||
//
|
|
||||||
// Footnotes should be placed at the end of the document in an ordered list.
|
|
||||||
// Finally, there are inline footnotes such as:
|
|
||||||
//
|
|
||||||
// Inline footnotes^[Also supported.] provide a quick inline explanation,
|
|
||||||
// but are rendered at the bottom of the document.
|
|
||||||
//
|
|
||||||
|
|
||||||
// reference holds all information necessary for a reference-style links or
|
|
||||||
// footnotes.
|
|
||||||
//
|
|
||||||
// Consider this markdown with reference-style links:
|
|
||||||
//
|
|
||||||
// [link][ref]
|
|
||||||
//
|
|
||||||
// [ref]: /url/ "tooltip title"
|
|
||||||
//
|
|
||||||
// It will be ultimately converted to this HTML:
|
|
||||||
//
|
|
||||||
// <p><a href=\"/url/\" title=\"title\">link</a></p>
|
|
||||||
//
|
|
||||||
// And a reference structure will be populated as follows:
|
|
||||||
//
|
|
||||||
// p.refs["ref"] = &reference{
|
|
||||||
// link: "/url/",
|
|
||||||
// title: "tooltip title",
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Alternatively, reference can contain information about a footnote. Consider
|
|
||||||
// this markdown:
|
|
||||||
//
|
|
||||||
// Text needing a footnote.[^a]
|
|
||||||
//
|
|
||||||
// [^a]: This is the note
|
|
||||||
//
|
|
||||||
// A reference structure will be populated as follows:
|
|
||||||
//
|
|
||||||
// p.refs["a"] = &reference{
|
|
||||||
// link: "a",
|
|
||||||
// title: "This is the note",
|
|
||||||
// noteID: <some positive int>,
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// TODO: As you can see, it begs for splitting into two dedicated structures
|
|
||||||
// for refs and for footnotes.
|
|
||||||
type reference struct {
|
|
||||||
link []byte
|
|
||||||
title []byte
|
|
||||||
noteID int // 0 if not a footnote ref
|
|
||||||
hasBlock bool
|
|
||||||
footnote *Node // a link to the Item node within a list of footnotes
|
|
||||||
|
|
||||||
text []byte // only gets populated by refOverride feature with Reference.Text
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *reference) String() string {
|
|
||||||
return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}",
|
|
||||||
r.link, r.title, r.text, r.noteID, r.hasBlock)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether or not data starts with a reference link.
|
|
||||||
// If so, it is parsed and stored in the list of references
|
|
||||||
// (in the render struct).
|
|
||||||
// Returns the number of bytes to skip to move past it,
|
|
||||||
// or zero if the first line is not a reference.
|
|
||||||
func isReference(p *Markdown, data []byte, tabSize int) int {
|
|
||||||
// up to 3 optional leading spaces
|
|
||||||
if len(data) < 4 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
i := 0
|
|
||||||
for i < 3 && data[i] == ' ' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
noteID := 0
|
|
||||||
|
|
||||||
// id part: anything but a newline between brackets
|
|
||||||
if data[i] != '[' {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
if p.extensions&Footnotes != 0 {
|
|
||||||
if i < len(data) && data[i] == '^' {
|
|
||||||
// we can set it to anything here because the proper noteIds will
|
|
||||||
// be assigned later during the second pass. It just has to be != 0
|
|
||||||
noteID = 1
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
idOffset := i
|
|
||||||
for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i >= len(data) || data[i] != ']' {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
idEnd := i
|
|
||||||
// footnotes can have empty ID, like this: [^], but a reference can not be
|
|
||||||
// empty like this: []. Break early if it's not a footnote and there's no ID
|
|
||||||
if noteID == 0 && idOffset == idEnd {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
// spacer: colon (space | tab)* newline? (space | tab)*
|
|
||||||
i++
|
|
||||||
if i >= len(data) || data[i] != ':' {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
|
|
||||||
i++
|
|
||||||
if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i >= len(data) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
linkOffset, linkEnd int
|
|
||||||
titleOffset, titleEnd int
|
|
||||||
lineEnd int
|
|
||||||
raw []byte
|
|
||||||
hasBlock bool
|
|
||||||
)
|
|
||||||
|
|
||||||
if p.extensions&Footnotes != 0 && noteID != 0 {
|
|
||||||
linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
|
|
||||||
lineEnd = linkEnd
|
|
||||||
} else {
|
|
||||||
linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
|
|
||||||
}
|
|
||||||
if lineEnd == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// a valid ref has been found
|
|
||||||
|
|
||||||
ref := &reference{
|
|
||||||
noteID: noteID,
|
|
||||||
hasBlock: hasBlock,
|
|
||||||
}
|
|
||||||
|
|
||||||
if noteID > 0 {
|
|
||||||
// reusing the link field for the id since footnotes don't have links
|
|
||||||
ref.link = data[idOffset:idEnd]
|
|
||||||
// if footnote, it's not really a title, it's the contained text
|
|
||||||
ref.title = raw
|
|
||||||
} else {
|
|
||||||
ref.link = data[linkOffset:linkEnd]
|
|
||||||
ref.title = data[titleOffset:titleEnd]
|
|
||||||
}
|
|
||||||
|
|
||||||
// id matches are case-insensitive
|
|
||||||
id := string(bytes.ToLower(data[idOffset:idEnd]))
|
|
||||||
|
|
||||||
p.refs[id] = ref
|
|
||||||
|
|
||||||
return lineEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
|
|
||||||
// link: whitespace-free sequence, optionally between angle brackets
|
|
||||||
if data[i] == '<' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
linkOffset = i
|
|
||||||
for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
linkEnd = i
|
|
||||||
if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
|
|
||||||
linkOffset++
|
|
||||||
linkEnd--
|
|
||||||
}
|
|
||||||
|
|
||||||
// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
|
|
||||||
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// compute end-of-line
|
|
||||||
if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
|
|
||||||
lineEnd = i
|
|
||||||
}
|
|
||||||
if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
|
|
||||||
lineEnd++
|
|
||||||
}
|
|
||||||
|
|
||||||
// optional (space|tab)* spacer after a newline
|
|
||||||
if lineEnd > 0 {
|
|
||||||
i = lineEnd + 1
|
|
||||||
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// optional title: any non-newline sequence enclosed in '"() alone on its line
|
|
||||||
if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
|
|
||||||
i++
|
|
||||||
titleOffset = i
|
|
||||||
|
|
||||||
// look for EOL
|
|
||||||
for i < len(data) && data[i] != '\n' && data[i] != '\r' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
|
|
||||||
titleEnd = i + 1
|
|
||||||
} else {
|
|
||||||
titleEnd = i
|
|
||||||
}
|
|
||||||
|
|
||||||
// step back
|
|
||||||
i--
|
|
||||||
for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
|
|
||||||
lineEnd = titleEnd
|
|
||||||
titleEnd = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// The first bit of this logic is the same as Parser.listItem, but the rest
|
|
||||||
// is much simpler. This function simply finds the entire block and shifts it
|
|
||||||
// over by one tab if it is indeed a block (just returns the line if it's not).
|
|
||||||
// blockEnd is the end of the section in the input buffer, and contents is the
|
|
||||||
// extracted text that was shifted over one tab. It will need to be rendered at
|
|
||||||
// the end of the document.
|
|
||||||
func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
|
|
||||||
if i == 0 || len(data) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// skip leading whitespace on first line
|
|
||||||
for i < len(data) && data[i] == ' ' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
blockStart = i
|
|
||||||
|
|
||||||
// find the end of the line
|
|
||||||
blockEnd = i
|
|
||||||
for i < len(data) && data[i-1] != '\n' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
// get working buffer
|
|
||||||
var raw bytes.Buffer
|
|
||||||
|
|
||||||
// put the first line into the working buffer
|
|
||||||
raw.Write(data[blockEnd:i])
|
|
||||||
blockEnd = i
|
|
||||||
|
|
||||||
// process the following lines
|
|
||||||
containsBlankLine := false
|
|
||||||
|
|
||||||
gatherLines:
|
|
||||||
for blockEnd < len(data) {
|
|
||||||
i++
|
|
||||||
|
|
||||||
// find the end of this line
|
|
||||||
for i < len(data) && data[i-1] != '\n' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
// if it is an empty line, guess that it is part of this item
|
|
||||||
// and move on to the next line
|
|
||||||
if p.isEmpty(data[blockEnd:i]) > 0 {
|
|
||||||
containsBlankLine = true
|
|
||||||
blockEnd = i
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
n := 0
|
|
||||||
if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
|
|
||||||
// this is the end of the block.
|
|
||||||
// we don't want to include this last line in the index.
|
|
||||||
break gatherLines
|
|
||||||
}
|
|
||||||
|
|
||||||
// if there were blank lines before this one, insert a new one now
|
|
||||||
if containsBlankLine {
|
|
||||||
raw.WriteByte('\n')
|
|
||||||
containsBlankLine = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// get rid of that first tab, write to buffer
|
|
||||||
raw.Write(data[blockEnd+n : i])
|
|
||||||
hasBlock = true
|
|
||||||
|
|
||||||
blockEnd = i
|
|
||||||
}
|
|
||||||
|
|
||||||
if data[blockEnd-1] != '\n' {
|
|
||||||
raw.WriteByte('\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
contents = raw.Bytes()
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Miscellaneous helper functions
|
|
||||||
//
|
|
||||||
//
|
|
||||||
|
|
||||||
// Test if a character is a punctuation symbol.
|
|
||||||
// Taken from a private function in regexp in the stdlib.
|
|
||||||
func ispunct(c byte) bool {
|
|
||||||
for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
|
|
||||||
if c == r {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test if a character is a whitespace character.
|
|
||||||
func isspace(c byte) bool {
|
|
||||||
return ishorizontalspace(c) || isverticalspace(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test if a character is a horizontal whitespace character.
|
|
||||||
func ishorizontalspace(c byte) bool {
|
|
||||||
return c == ' ' || c == '\t'
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test if a character is a vertical character.
|
|
||||||
func isverticalspace(c byte) bool {
|
|
||||||
return c == '\n' || c == '\r' || c == '\f' || c == '\v'
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test if a character is letter.
|
|
||||||
func isletter(c byte) bool {
|
|
||||||
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test if a character is a letter or a digit.
|
|
||||||
// TODO: check when this is looking for ASCII alnum and when it should use unicode
|
|
||||||
func isalnum(c byte) bool {
|
|
||||||
return (c >= '0' && c <= '9') || isletter(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
|
|
||||||
// always ends output with a newline
|
|
||||||
func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
|
|
||||||
// first, check for common cases: no tabs, or only tabs at beginning of line
|
|
||||||
i, prefix := 0, 0
|
|
||||||
slowcase := false
|
|
||||||
for i = 0; i < len(line); i++ {
|
|
||||||
if line[i] == '\t' {
|
|
||||||
if prefix == i {
|
|
||||||
prefix++
|
|
||||||
} else {
|
|
||||||
slowcase = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// no need to decode runes if all tabs are at the beginning of the line
|
|
||||||
if !slowcase {
|
|
||||||
for i = 0; i < prefix*tabSize; i++ {
|
|
||||||
out.WriteByte(' ')
|
|
||||||
}
|
|
||||||
out.Write(line[prefix:])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// the slow case: we need to count runes to figure out how
|
|
||||||
// many spaces to insert for each tab
|
|
||||||
column := 0
|
|
||||||
i = 0
|
|
||||||
for i < len(line) {
|
|
||||||
start := i
|
|
||||||
for i < len(line) && line[i] != '\t' {
|
|
||||||
_, size := utf8.DecodeRune(line[i:])
|
|
||||||
i += size
|
|
||||||
column++
|
|
||||||
}
|
|
||||||
|
|
||||||
if i > start {
|
|
||||||
out.Write(line[start:i])
|
|
||||||
}
|
|
||||||
|
|
||||||
if i >= len(line) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
out.WriteByte(' ')
|
|
||||||
column++
|
|
||||||
if column%tabSize == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find if a line counts as indented or not.
|
|
||||||
// Returns number of characters the indent is (0 = not indented).
|
|
||||||
func isIndented(data []byte, indentSize int) int {
|
|
||||||
if len(data) == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if data[0] == '\t' {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
if len(data) < indentSize {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
for i := 0; i < indentSize; i++ {
|
|
||||||
if data[i] != ' ' {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return indentSize
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a url-safe slug for fragments
|
|
||||||
func slugify(in []byte) []byte {
|
|
||||||
if len(in) == 0 {
|
|
||||||
return in
|
|
||||||
}
|
|
||||||
out := make([]byte, 0, len(in))
|
|
||||||
sym := false
|
|
||||||
|
|
||||||
for _, ch := range in {
|
|
||||||
if isalnum(ch) {
|
|
||||||
sym = false
|
|
||||||
out = append(out, ch)
|
|
||||||
} else if sym {
|
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
out = append(out, '-')
|
|
||||||
sym = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var a, b int
|
|
||||||
var ch byte
|
|
||||||
for a, ch = range out {
|
|
||||||
if ch != '-' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for b = len(out) - 1; b > 0; b-- {
|
|
||||||
if out[b] != '-' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out[a : b+1]
|
|
||||||
}
|
|
354
vendor/github.com/russross/blackfriday/v2/node.go
generated
vendored
354
vendor/github.com/russross/blackfriday/v2/node.go
generated
vendored
|
@ -1,354 +0,0 @@
|
||||||
package blackfriday
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NodeType specifies a type of a single node of a syntax tree. Usually one
|
|
||||||
// node (and its type) corresponds to a single markdown feature, e.g. emphasis
|
|
||||||
// or code block.
|
|
||||||
type NodeType int
|
|
||||||
|
|
||||||
// Constants for identifying different types of nodes. See NodeType.
|
|
||||||
const (
|
|
||||||
Document NodeType = iota
|
|
||||||
BlockQuote
|
|
||||||
List
|
|
||||||
Item
|
|
||||||
Paragraph
|
|
||||||
Heading
|
|
||||||
HorizontalRule
|
|
||||||
Emph
|
|
||||||
Strong
|
|
||||||
Del
|
|
||||||
Link
|
|
||||||
Image
|
|
||||||
Text
|
|
||||||
HTMLBlock
|
|
||||||
CodeBlock
|
|
||||||
Softbreak
|
|
||||||
Hardbreak
|
|
||||||
Code
|
|
||||||
HTMLSpan
|
|
||||||
Table
|
|
||||||
TableCell
|
|
||||||
TableHead
|
|
||||||
TableBody
|
|
||||||
TableRow
|
|
||||||
)
|
|
||||||
|
|
||||||
var nodeTypeNames = []string{
|
|
||||||
Document: "Document",
|
|
||||||
BlockQuote: "BlockQuote",
|
|
||||||
List: "List",
|
|
||||||
Item: "Item",
|
|
||||||
Paragraph: "Paragraph",
|
|
||||||
Heading: "Heading",
|
|
||||||
HorizontalRule: "HorizontalRule",
|
|
||||||
Emph: "Emph",
|
|
||||||
Strong: "Strong",
|
|
||||||
Del: "Del",
|
|
||||||
Link: "Link",
|
|
||||||
Image: "Image",
|
|
||||||
Text: "Text",
|
|
||||||
HTMLBlock: "HTMLBlock",
|
|
||||||
CodeBlock: "CodeBlock",
|
|
||||||
Softbreak: "Softbreak",
|
|
||||||
Hardbreak: "Hardbreak",
|
|
||||||
Code: "Code",
|
|
||||||
HTMLSpan: "HTMLSpan",
|
|
||||||
Table: "Table",
|
|
||||||
TableCell: "TableCell",
|
|
||||||
TableHead: "TableHead",
|
|
||||||
TableBody: "TableBody",
|
|
||||||
TableRow: "TableRow",
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t NodeType) String() string {
|
|
||||||
return nodeTypeNames[t]
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListData contains fields relevant to a List and Item node type.
|
|
||||||
type ListData struct {
|
|
||||||
ListFlags ListType
|
|
||||||
Tight bool // Skip <p>s around list item data if true
|
|
||||||
BulletChar byte // '*', '+' or '-' in bullet lists
|
|
||||||
Delimiter byte // '.' or ')' after the number in ordered lists
|
|
||||||
RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering
|
|
||||||
IsFootnotesList bool // This is a list of footnotes
|
|
||||||
}
|
|
||||||
|
|
||||||
// LinkData contains fields relevant to a Link node type.
|
|
||||||
type LinkData struct {
|
|
||||||
Destination []byte // Destination is what goes into a href
|
|
||||||
Title []byte // Title is the tooltip thing that goes in a title attribute
|
|
||||||
NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote
|
|
||||||
Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil.
|
|
||||||
}
|
|
||||||
|
|
||||||
// CodeBlockData contains fields relevant to a CodeBlock node type.
|
|
||||||
type CodeBlockData struct {
|
|
||||||
IsFenced bool // Specifies whether it's a fenced code block or an indented one
|
|
||||||
Info []byte // This holds the info string
|
|
||||||
FenceChar byte
|
|
||||||
FenceLength int
|
|
||||||
FenceOffset int
|
|
||||||
}
|
|
||||||
|
|
||||||
// TableCellData contains fields relevant to a TableCell node type.
|
|
||||||
type TableCellData struct {
|
|
||||||
IsHeader bool // This tells if it's under the header row
|
|
||||||
Align CellAlignFlags // This holds the value for align attribute
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeadingData contains fields relevant to a Heading node type.
|
|
||||||
type HeadingData struct {
|
|
||||||
Level int // This holds the heading level number
|
|
||||||
HeadingID string // This might hold heading ID, if present
|
|
||||||
IsTitleblock bool // Specifies whether it's a title block
|
|
||||||
}
|
|
||||||
|
|
||||||
// Node is a single element in the abstract syntax tree of the parsed document.
|
|
||||||
// It holds connections to the structurally neighboring nodes and, for certain
|
|
||||||
// types of nodes, additional information that might be needed when rendering.
|
|
||||||
type Node struct {
|
|
||||||
Type NodeType // Determines the type of the node
|
|
||||||
Parent *Node // Points to the parent
|
|
||||||
FirstChild *Node // Points to the first child, if any
|
|
||||||
LastChild *Node // Points to the last child, if any
|
|
||||||
Prev *Node // Previous sibling; nil if it's the first child
|
|
||||||
Next *Node // Next sibling; nil if it's the last child
|
|
||||||
|
|
||||||
Literal []byte // Text contents of the leaf nodes
|
|
||||||
|
|
||||||
HeadingData // Populated if Type is Heading
|
|
||||||
ListData // Populated if Type is List
|
|
||||||
CodeBlockData // Populated if Type is CodeBlock
|
|
||||||
LinkData // Populated if Type is Link
|
|
||||||
TableCellData // Populated if Type is TableCell
|
|
||||||
|
|
||||||
content []byte // Markdown content of the block nodes
|
|
||||||
open bool // Specifies an open block node that has not been finished to process yet
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNode allocates a node of a specified type.
|
|
||||||
func NewNode(typ NodeType) *Node {
|
|
||||||
return &Node{
|
|
||||||
Type: typ,
|
|
||||||
open: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Node) String() string {
|
|
||||||
ellipsis := ""
|
|
||||||
snippet := n.Literal
|
|
||||||
if len(snippet) > 16 {
|
|
||||||
snippet = snippet[:16]
|
|
||||||
ellipsis = "..."
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unlink removes node 'n' from the tree.
|
|
||||||
// It panics if the node is nil.
|
|
||||||
func (n *Node) Unlink() {
|
|
||||||
if n.Prev != nil {
|
|
||||||
n.Prev.Next = n.Next
|
|
||||||
} else if n.Parent != nil {
|
|
||||||
n.Parent.FirstChild = n.Next
|
|
||||||
}
|
|
||||||
if n.Next != nil {
|
|
||||||
n.Next.Prev = n.Prev
|
|
||||||
} else if n.Parent != nil {
|
|
||||||
n.Parent.LastChild = n.Prev
|
|
||||||
}
|
|
||||||
n.Parent = nil
|
|
||||||
n.Next = nil
|
|
||||||
n.Prev = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendChild adds a node 'child' as a child of 'n'.
|
|
||||||
// It panics if either node is nil.
|
|
||||||
func (n *Node) AppendChild(child *Node) {
|
|
||||||
child.Unlink()
|
|
||||||
child.Parent = n
|
|
||||||
if n.LastChild != nil {
|
|
||||||
n.LastChild.Next = child
|
|
||||||
child.Prev = n.LastChild
|
|
||||||
n.LastChild = child
|
|
||||||
} else {
|
|
||||||
n.FirstChild = child
|
|
||||||
n.LastChild = child
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertBefore inserts 'sibling' immediately before 'n'.
|
|
||||||
// It panics if either node is nil.
|
|
||||||
func (n *Node) InsertBefore(sibling *Node) {
|
|
||||||
sibling.Unlink()
|
|
||||||
sibling.Prev = n.Prev
|
|
||||||
if sibling.Prev != nil {
|
|
||||||
sibling.Prev.Next = sibling
|
|
||||||
}
|
|
||||||
sibling.Next = n
|
|
||||||
n.Prev = sibling
|
|
||||||
sibling.Parent = n.Parent
|
|
||||||
if sibling.Prev == nil {
|
|
||||||
sibling.Parent.FirstChild = sibling
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Node) isContainer() bool {
|
|
||||||
switch n.Type {
|
|
||||||
case Document:
|
|
||||||
fallthrough
|
|
||||||
case BlockQuote:
|
|
||||||
fallthrough
|
|
||||||
case List:
|
|
||||||
fallthrough
|
|
||||||
case Item:
|
|
||||||
fallthrough
|
|
||||||
case Paragraph:
|
|
||||||
fallthrough
|
|
||||||
case Heading:
|
|
||||||
fallthrough
|
|
||||||
case Emph:
|
|
||||||
fallthrough
|
|
||||||
case Strong:
|
|
||||||
fallthrough
|
|
||||||
case Del:
|
|
||||||
fallthrough
|
|
||||||
case Link:
|
|
||||||
fallthrough
|
|
||||||
case Image:
|
|
||||||
fallthrough
|
|
||||||
case Table:
|
|
||||||
fallthrough
|
|
||||||
case TableHead:
|
|
||||||
fallthrough
|
|
||||||
case TableBody:
|
|
||||||
fallthrough
|
|
||||||
case TableRow:
|
|
||||||
fallthrough
|
|
||||||
case TableCell:
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *Node) canContain(t NodeType) bool {
|
|
||||||
if n.Type == List {
|
|
||||||
return t == Item
|
|
||||||
}
|
|
||||||
if n.Type == Document || n.Type == BlockQuote || n.Type == Item {
|
|
||||||
return t != Item
|
|
||||||
}
|
|
||||||
if n.Type == Table {
|
|
||||||
return t == TableHead || t == TableBody
|
|
||||||
}
|
|
||||||
if n.Type == TableHead || n.Type == TableBody {
|
|
||||||
return t == TableRow
|
|
||||||
}
|
|
||||||
if n.Type == TableRow {
|
|
||||||
return t == TableCell
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// WalkStatus allows NodeVisitor to have some control over the tree traversal.
|
|
||||||
// It is returned from NodeVisitor and different values allow Node.Walk to
|
|
||||||
// decide which node to go to next.
|
|
||||||
type WalkStatus int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// GoToNext is the default traversal of every node.
|
|
||||||
GoToNext WalkStatus = iota
|
|
||||||
// SkipChildren tells walker to skip all children of current node.
|
|
||||||
SkipChildren
|
|
||||||
// Terminate tells walker to terminate the traversal.
|
|
||||||
Terminate
|
|
||||||
)
|
|
||||||
|
|
||||||
// NodeVisitor is a callback to be called when traversing the syntax tree.
|
|
||||||
// Called twice for every node: once with entering=true when the branch is
|
|
||||||
// first visited, then with entering=false after all the children are done.
|
|
||||||
type NodeVisitor func(node *Node, entering bool) WalkStatus
|
|
||||||
|
|
||||||
// Walk is a convenience method that instantiates a walker and starts a
|
|
||||||
// traversal of subtree rooted at n.
|
|
||||||
func (n *Node) Walk(visitor NodeVisitor) {
|
|
||||||
w := newNodeWalker(n)
|
|
||||||
for w.current != nil {
|
|
||||||
status := visitor(w.current, w.entering)
|
|
||||||
switch status {
|
|
||||||
case GoToNext:
|
|
||||||
w.next()
|
|
||||||
case SkipChildren:
|
|
||||||
w.entering = false
|
|
||||||
w.next()
|
|
||||||
case Terminate:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type nodeWalker struct {
|
|
||||||
current *Node
|
|
||||||
root *Node
|
|
||||||
entering bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newNodeWalker(root *Node) *nodeWalker {
|
|
||||||
return &nodeWalker{
|
|
||||||
current: root,
|
|
||||||
root: root,
|
|
||||||
entering: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (nw *nodeWalker) next() {
|
|
||||||
if (!nw.current.isContainer() || !nw.entering) && nw.current == nw.root {
|
|
||||||
nw.current = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if nw.entering && nw.current.isContainer() {
|
|
||||||
if nw.current.FirstChild != nil {
|
|
||||||
nw.current = nw.current.FirstChild
|
|
||||||
nw.entering = true
|
|
||||||
} else {
|
|
||||||
nw.entering = false
|
|
||||||
}
|
|
||||||
} else if nw.current.Next == nil {
|
|
||||||
nw.current = nw.current.Parent
|
|
||||||
nw.entering = false
|
|
||||||
} else {
|
|
||||||
nw.current = nw.current.Next
|
|
||||||
nw.entering = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func dump(ast *Node) {
|
|
||||||
fmt.Println(dumpString(ast))
|
|
||||||
}
|
|
||||||
|
|
||||||
func dumpR(ast *Node, depth int) string {
|
|
||||||
if ast == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
indent := bytes.Repeat([]byte("\t"), depth)
|
|
||||||
content := ast.Literal
|
|
||||||
if content == nil {
|
|
||||||
content = ast.content
|
|
||||||
}
|
|
||||||
result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content)
|
|
||||||
for n := ast.FirstChild; n != nil; n = n.Next {
|
|
||||||
result += dumpR(n, depth+1)
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func dumpString(ast *Node) string {
|
|
||||||
return dumpR(ast, 0)
|
|
||||||
}
|
|
457
vendor/github.com/russross/blackfriday/v2/smartypants.go
generated
vendored
457
vendor/github.com/russross/blackfriday/v2/smartypants.go
generated
vendored
|
@ -1,457 +0,0 @@
|
||||||
//
|
|
||||||
// Blackfriday Markdown Processor
|
|
||||||
// Available at http://github.com/russross/blackfriday
|
|
||||||
//
|
|
||||||
// Copyright © 2011 Russ Ross <russ@russross.com>.
|
|
||||||
// Distributed under the Simplified BSD License.
|
|
||||||
// See README.md for details.
|
|
||||||
//
|
|
||||||
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// SmartyPants rendering
|
|
||||||
//
|
|
||||||
//
|
|
||||||
|
|
||||||
package blackfriday
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SPRenderer is a struct containing state of a Smartypants renderer.
|
|
||||||
type SPRenderer struct {
|
|
||||||
inSingleQuote bool
|
|
||||||
inDoubleQuote bool
|
|
||||||
callbacks [256]smartCallback
|
|
||||||
}
|
|
||||||
|
|
||||||
func wordBoundary(c byte) bool {
|
|
||||||
return c == 0 || isspace(c) || ispunct(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func tolower(c byte) byte {
|
|
||||||
if c >= 'A' && c <= 'Z' {
|
|
||||||
return c - 'A' + 'a'
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func isdigit(c byte) bool {
|
|
||||||
return c >= '0' && c <= '9'
|
|
||||||
}
|
|
||||||
|
|
||||||
func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool {
|
|
||||||
// edge of the buffer is likely to be a tag that we don't get to see,
|
|
||||||
// so we treat it like text sometimes
|
|
||||||
|
|
||||||
// enumerate all sixteen possibilities for (previousChar, nextChar)
|
|
||||||
// each can be one of {0, space, punct, other}
|
|
||||||
switch {
|
|
||||||
case previousChar == 0 && nextChar == 0:
|
|
||||||
// context is not any help here, so toggle
|
|
||||||
*isOpen = !*isOpen
|
|
||||||
case isspace(previousChar) && nextChar == 0:
|
|
||||||
// [ "] might be [ "<code>foo...]
|
|
||||||
*isOpen = true
|
|
||||||
case ispunct(previousChar) && nextChar == 0:
|
|
||||||
// [!"] hmm... could be [Run!"] or [("<code>...]
|
|
||||||
*isOpen = false
|
|
||||||
case /* isnormal(previousChar) && */ nextChar == 0:
|
|
||||||
// [a"] is probably a close
|
|
||||||
*isOpen = false
|
|
||||||
case previousChar == 0 && isspace(nextChar):
|
|
||||||
// [" ] might be [...foo</code>" ]
|
|
||||||
*isOpen = false
|
|
||||||
case isspace(previousChar) && isspace(nextChar):
|
|
||||||
// [ " ] context is not any help here, so toggle
|
|
||||||
*isOpen = !*isOpen
|
|
||||||
case ispunct(previousChar) && isspace(nextChar):
|
|
||||||
// [!" ] is probably a close
|
|
||||||
*isOpen = false
|
|
||||||
case /* isnormal(previousChar) && */ isspace(nextChar):
|
|
||||||
// [a" ] this is one of the easy cases
|
|
||||||
*isOpen = false
|
|
||||||
case previousChar == 0 && ispunct(nextChar):
|
|
||||||
// ["!] hmm... could be ["$1.95] or [</code>"!...]
|
|
||||||
*isOpen = false
|
|
||||||
case isspace(previousChar) && ispunct(nextChar):
|
|
||||||
// [ "!] looks more like [ "$1.95]
|
|
||||||
*isOpen = true
|
|
||||||
case ispunct(previousChar) && ispunct(nextChar):
|
|
||||||
// [!"!] context is not any help here, so toggle
|
|
||||||
*isOpen = !*isOpen
|
|
||||||
case /* isnormal(previousChar) && */ ispunct(nextChar):
|
|
||||||
// [a"!] is probably a close
|
|
||||||
*isOpen = false
|
|
||||||
case previousChar == 0 /* && isnormal(nextChar) */ :
|
|
||||||
// ["a] is probably an open
|
|
||||||
*isOpen = true
|
|
||||||
case isspace(previousChar) /* && isnormal(nextChar) */ :
|
|
||||||
// [ "a] this is one of the easy cases
|
|
||||||
*isOpen = true
|
|
||||||
case ispunct(previousChar) /* && isnormal(nextChar) */ :
|
|
||||||
// [!"a] is probably an open
|
|
||||||
*isOpen = true
|
|
||||||
default:
|
|
||||||
// [a'b] maybe a contraction?
|
|
||||||
*isOpen = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note that with the limited lookahead, this non-breaking
|
|
||||||
// space will also be appended to single double quotes.
|
|
||||||
if addNBSP && !*isOpen {
|
|
||||||
out.WriteString(" ")
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteByte('&')
|
|
||||||
if *isOpen {
|
|
||||||
out.WriteByte('l')
|
|
||||||
} else {
|
|
||||||
out.WriteByte('r')
|
|
||||||
}
|
|
||||||
out.WriteByte(quote)
|
|
||||||
out.WriteString("quo;")
|
|
||||||
|
|
||||||
if addNBSP && *isOpen {
|
|
||||||
out.WriteString(" ")
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
|
||||||
if len(text) >= 2 {
|
|
||||||
t1 := tolower(text[1])
|
|
||||||
|
|
||||||
if t1 == '\'' {
|
|
||||||
nextChar := byte(0)
|
|
||||||
if len(text) >= 3 {
|
|
||||||
nextChar = text[2]
|
|
||||||
}
|
|
||||||
if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) {
|
|
||||||
out.WriteString("’")
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(text) >= 3 {
|
|
||||||
t2 := tolower(text[2])
|
|
||||||
|
|
||||||
if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) &&
|
|
||||||
(len(text) < 4 || wordBoundary(text[3])) {
|
|
||||||
out.WriteString("’")
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nextChar := byte(0)
|
|
||||||
if len(text) > 1 {
|
|
||||||
nextChar = text[1]
|
|
||||||
}
|
|
||||||
if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteByte(text[0])
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int {
|
|
||||||
if len(text) >= 3 {
|
|
||||||
t1 := tolower(text[1])
|
|
||||||
t2 := tolower(text[2])
|
|
||||||
|
|
||||||
if t1 == 'c' && t2 == ')' {
|
|
||||||
out.WriteString("©")
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
|
|
||||||
if t1 == 'r' && t2 == ')' {
|
|
||||||
out.WriteString("®")
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' {
|
|
||||||
out.WriteString("™")
|
|
||||||
return 3
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteByte(text[0])
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int {
|
|
||||||
if len(text) >= 2 {
|
|
||||||
if text[1] == '-' {
|
|
||||||
out.WriteString("—")
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if wordBoundary(previousChar) && wordBoundary(text[1]) {
|
|
||||||
out.WriteString("–")
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteByte(text[0])
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int {
|
|
||||||
if len(text) >= 3 && text[1] == '-' && text[2] == '-' {
|
|
||||||
out.WriteString("—")
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
if len(text) >= 2 && text[1] == '-' {
|
|
||||||
out.WriteString("–")
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteByte(text[0])
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int {
|
|
||||||
if bytes.HasPrefix(text, []byte(""")) {
|
|
||||||
nextChar := byte(0)
|
|
||||||
if len(text) >= 7 {
|
|
||||||
nextChar = text[6]
|
|
||||||
}
|
|
||||||
if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) {
|
|
||||||
return 5
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if bytes.HasPrefix(text, []byte("�")) {
|
|
||||||
return 3
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteByte('&')
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int {
|
|
||||||
var quote byte = 'd'
|
|
||||||
if angledQuotes {
|
|
||||||
quote = 'a'
|
|
||||||
}
|
|
||||||
|
|
||||||
return func(out *bytes.Buffer, previousChar byte, text []byte) int {
|
|
||||||
return r.smartAmpVariant(out, previousChar, text, quote, addNBSP)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int {
|
|
||||||
if len(text) >= 3 && text[1] == '.' && text[2] == '.' {
|
|
||||||
out.WriteString("…")
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' {
|
|
||||||
out.WriteString("…")
|
|
||||||
return 4
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteByte(text[0])
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int {
|
|
||||||
if len(text) >= 2 && text[1] == '`' {
|
|
||||||
nextChar := byte(0)
|
|
||||||
if len(text) >= 3 {
|
|
||||||
nextChar = text[2]
|
|
||||||
}
|
|
||||||
if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteByte(text[0])
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int {
|
|
||||||
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
|
|
||||||
// is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b
|
|
||||||
// note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8)
|
|
||||||
// and avoid changing dates like 1/23/2005 into fractions.
|
|
||||||
numEnd := 0
|
|
||||||
for len(text) > numEnd && isdigit(text[numEnd]) {
|
|
||||||
numEnd++
|
|
||||||
}
|
|
||||||
if numEnd == 0 {
|
|
||||||
out.WriteByte(text[0])
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
denStart := numEnd + 1
|
|
||||||
if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 {
|
|
||||||
denStart = numEnd + 3
|
|
||||||
} else if len(text) < numEnd+2 || text[numEnd] != '/' {
|
|
||||||
out.WriteByte(text[0])
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
denEnd := denStart
|
|
||||||
for len(text) > denEnd && isdigit(text[denEnd]) {
|
|
||||||
denEnd++
|
|
||||||
}
|
|
||||||
if denEnd == denStart {
|
|
||||||
out.WriteByte(text[0])
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' {
|
|
||||||
out.WriteString("<sup>")
|
|
||||||
out.Write(text[:numEnd])
|
|
||||||
out.WriteString("</sup>⁄<sub>")
|
|
||||||
out.Write(text[denStart:denEnd])
|
|
||||||
out.WriteString("</sub>")
|
|
||||||
return denEnd - 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteByte(text[0])
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int {
|
|
||||||
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
|
|
||||||
if text[0] == '1' && text[1] == '/' && text[2] == '2' {
|
|
||||||
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' {
|
|
||||||
out.WriteString("½")
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if text[0] == '1' && text[1] == '/' && text[2] == '4' {
|
|
||||||
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') {
|
|
||||||
out.WriteString("¼")
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if text[0] == '3' && text[1] == '/' && text[2] == '4' {
|
|
||||||
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') {
|
|
||||||
out.WriteString("¾")
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
out.WriteByte(text[0])
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int {
|
|
||||||
nextChar := byte(0)
|
|
||||||
if len(text) > 1 {
|
|
||||||
nextChar = text[1]
|
|
||||||
}
|
|
||||||
if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) {
|
|
||||||
out.WriteString(""")
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
|
||||||
return r.smartDoubleQuoteVariant(out, previousChar, text, 'd')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
|
||||||
return r.smartDoubleQuoteVariant(out, previousChar, text, 'a')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int {
|
|
||||||
i := 0
|
|
||||||
|
|
||||||
for i < len(text) && text[i] != '>' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
out.Write(text[:i+1])
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
|
|
||||||
type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int
|
|
||||||
|
|
||||||
// NewSmartypantsRenderer constructs a Smartypants renderer object.
|
|
||||||
func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer {
|
|
||||||
var (
|
|
||||||
r SPRenderer
|
|
||||||
|
|
||||||
smartAmpAngled = r.smartAmp(true, false)
|
|
||||||
smartAmpAngledNBSP = r.smartAmp(true, true)
|
|
||||||
smartAmpRegular = r.smartAmp(false, false)
|
|
||||||
smartAmpRegularNBSP = r.smartAmp(false, true)
|
|
||||||
|
|
||||||
addNBSP = flags&SmartypantsQuotesNBSP != 0
|
|
||||||
)
|
|
||||||
|
|
||||||
if flags&SmartypantsAngledQuotes == 0 {
|
|
||||||
r.callbacks['"'] = r.smartDoubleQuote
|
|
||||||
if !addNBSP {
|
|
||||||
r.callbacks['&'] = smartAmpRegular
|
|
||||||
} else {
|
|
||||||
r.callbacks['&'] = smartAmpRegularNBSP
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
r.callbacks['"'] = r.smartAngledDoubleQuote
|
|
||||||
if !addNBSP {
|
|
||||||
r.callbacks['&'] = smartAmpAngled
|
|
||||||
} else {
|
|
||||||
r.callbacks['&'] = smartAmpAngledNBSP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.callbacks['\''] = r.smartSingleQuote
|
|
||||||
r.callbacks['('] = r.smartParens
|
|
||||||
if flags&SmartypantsDashes != 0 {
|
|
||||||
if flags&SmartypantsLatexDashes == 0 {
|
|
||||||
r.callbacks['-'] = r.smartDash
|
|
||||||
} else {
|
|
||||||
r.callbacks['-'] = r.smartDashLatex
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.callbacks['.'] = r.smartPeriod
|
|
||||||
if flags&SmartypantsFractions == 0 {
|
|
||||||
r.callbacks['1'] = r.smartNumber
|
|
||||||
r.callbacks['3'] = r.smartNumber
|
|
||||||
} else {
|
|
||||||
for ch := '1'; ch <= '9'; ch++ {
|
|
||||||
r.callbacks[ch] = r.smartNumberGeneric
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.callbacks['<'] = r.smartLeftAngle
|
|
||||||
r.callbacks['`'] = r.smartBacktick
|
|
||||||
return &r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process is the entry point of the Smartypants renderer.
|
|
||||||
func (r *SPRenderer) Process(w io.Writer, text []byte) {
|
|
||||||
mark := 0
|
|
||||||
for i := 0; i < len(text); i++ {
|
|
||||||
if action := r.callbacks[text[i]]; action != nil {
|
|
||||||
if i > mark {
|
|
||||||
w.Write(text[mark:i])
|
|
||||||
}
|
|
||||||
previousChar := byte(0)
|
|
||||||
if i > 0 {
|
|
||||||
previousChar = text[i-1]
|
|
||||||
}
|
|
||||||
var tmp bytes.Buffer
|
|
||||||
i += action(&tmp, previousChar, text[i:])
|
|
||||||
w.Write(tmp.Bytes())
|
|
||||||
mark = i + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if mark < len(text) {
|
|
||||||
w.Write(text[mark:])
|
|
||||||
}
|
|
||||||
}
|
|
16
vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml
generated
vendored
16
vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml
generated
vendored
|
@ -1,16 +0,0 @@
|
||||||
sudo: false
|
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.x
|
|
||||||
- master
|
|
||||||
matrix:
|
|
||||||
allow_failures:
|
|
||||||
- go: master
|
|
||||||
fast_finish: true
|
|
||||||
install:
|
|
||||||
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
|
||||||
script:
|
|
||||||
- go get -t -v ./...
|
|
||||||
- diff -u <(echo -n) <(gofmt -d -s .)
|
|
||||||
- go tool vet .
|
|
||||||
- go test -v -race ./...
|
|
36
vendor/github.com/shurcooL/sanitized_anchor_name/README.md
generated
vendored
36
vendor/github.com/shurcooL/sanitized_anchor_name/README.md
generated
vendored
|
@ -1,36 +0,0 @@
|
||||||
sanitized_anchor_name
|
|
||||||
=====================
|
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.org/shurcooL/sanitized_anchor_name.svg?branch=master)](https://travis-ci.org/shurcooL/sanitized_anchor_name) [![GoDoc](https://godoc.org/github.com/shurcooL/sanitized_anchor_name?status.svg)](https://godoc.org/github.com/shurcooL/sanitized_anchor_name)
|
|
||||||
|
|
||||||
Package sanitized_anchor_name provides a func to create sanitized anchor names.
|
|
||||||
|
|
||||||
Its logic can be reused by multiple packages to create interoperable anchor names
|
|
||||||
and links to those anchors.
|
|
||||||
|
|
||||||
At this time, it does not try to ensure that generated anchor names
|
|
||||||
are unique, that responsibility falls on the caller.
|
|
||||||
|
|
||||||
Installation
|
|
||||||
------------
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get -u github.com/shurcooL/sanitized_anchor_name
|
|
||||||
```
|
|
||||||
|
|
||||||
Example
|
|
||||||
-------
|
|
||||||
|
|
||||||
```Go
|
|
||||||
anchorName := sanitized_anchor_name.Create("This is a header")
|
|
||||||
|
|
||||||
fmt.Println(anchorName)
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// this-is-a-header
|
|
||||||
```
|
|
||||||
|
|
||||||
License
|
|
||||||
-------
|
|
||||||
|
|
||||||
- [MIT License](LICENSE)
|
|
1
vendor/github.com/shurcooL/sanitized_anchor_name/go.mod
generated
vendored
1
vendor/github.com/shurcooL/sanitized_anchor_name/go.mod
generated
vendored
|
@ -1 +0,0 @@
|
||||||
module github.com/shurcooL/sanitized_anchor_name
|
|
29
vendor/github.com/shurcooL/sanitized_anchor_name/main.go
generated
vendored
29
vendor/github.com/shurcooL/sanitized_anchor_name/main.go
generated
vendored
|
@ -1,29 +0,0 @@
|
||||||
// Package sanitized_anchor_name provides a func to create sanitized anchor names.
|
|
||||||
//
|
|
||||||
// Its logic can be reused by multiple packages to create interoperable anchor names
|
|
||||||
// and links to those anchors.
|
|
||||||
//
|
|
||||||
// At this time, it does not try to ensure that generated anchor names
|
|
||||||
// are unique, that responsibility falls on the caller.
|
|
||||||
package sanitized_anchor_name // import "github.com/shurcooL/sanitized_anchor_name"
|
|
||||||
|
|
||||||
import "unicode"
|
|
||||||
|
|
||||||
// Create returns a sanitized anchor name for the given text.
|
|
||||||
func Create(text string) string {
|
|
||||||
var anchorName []rune
|
|
||||||
var futureDash = false
|
|
||||||
for _, r := range text {
|
|
||||||
switch {
|
|
||||||
case unicode.IsLetter(r) || unicode.IsNumber(r):
|
|
||||||
if futureDash && len(anchorName) > 0 {
|
|
||||||
anchorName = append(anchorName, '-')
|
|
||||||
}
|
|
||||||
futureDash = false
|
|
||||||
anchorName = append(anchorName, unicode.ToLower(r))
|
|
||||||
default:
|
|
||||||
futureDash = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(anchorName)
|
|
||||||
}
|
|
19
vendor/github.com/yuin/goldmark/.gitignore
generated
vendored
Normal file
19
vendor/github.com/yuin/goldmark/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
# Binaries for programs and plugins
|
||||||
|
*.exe
|
||||||
|
*.exe~
|
||||||
|
*.dll
|
||||||
|
*.so
|
||||||
|
*.dylib
|
||||||
|
|
||||||
|
# Test binary, build with `go test -c`
|
||||||
|
*.test
|
||||||
|
*.pprof
|
||||||
|
|
||||||
|
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||||
|
*.out
|
||||||
|
|
||||||
|
.DS_Store
|
||||||
|
fuzz/corpus
|
||||||
|
fuzz/crashers
|
||||||
|
fuzz/suppressions
|
||||||
|
fuzz/fuzz-fuzz.zip
|
|
@ -1,6 +1,6 @@
|
||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2015 Dmitri Shuralyov
|
Copyright (c) 2019 Yusuke Inuzuka
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
16
vendor/github.com/yuin/goldmark/Makefile
generated
vendored
Normal file
16
vendor/github.com/yuin/goldmark/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
.PHONY: test fuzz
|
||||||
|
|
||||||
|
test:
|
||||||
|
go test -coverprofile=profile.out -coverpkg=github.com/yuin/goldmark,github.com/yuin/goldmark/ast,github.com/yuin/goldmark/extension,github.com/yuin/goldmark/extension/ast,github.com/yuin/goldmark/parser,github.com/yuin/goldmark/renderer,github.com/yuin/goldmark/renderer/html,github.com/yuin/goldmark/text,github.com/yuin/goldmark/util ./...
|
||||||
|
|
||||||
|
cov: test
|
||||||
|
go tool cover -html=profile.out
|
||||||
|
|
||||||
|
fuzz:
|
||||||
|
which go-fuzz > /dev/null 2>&1 || (GO111MODULE=off go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build; GO111MODULE=off go get -d github.com/dvyukov/go-fuzz-corpus; true)
|
||||||
|
rm -rf ./fuzz/corpus
|
||||||
|
rm -rf ./fuzz/crashers
|
||||||
|
rm -rf ./fuzz/suppressions
|
||||||
|
rm -f ./fuzz/fuzz-fuzz.zip
|
||||||
|
cd ./fuzz && go-fuzz-build
|
||||||
|
cd ./fuzz && go-fuzz
|
343
vendor/github.com/yuin/goldmark/README.md
generated
vendored
Normal file
343
vendor/github.com/yuin/goldmark/README.md
generated
vendored
Normal file
|
@ -0,0 +1,343 @@
|
||||||
|
goldmark
|
||||||
|
==========================================
|
||||||
|
|
||||||
|
[![http://godoc.org/github.com/yuin/goldmark](https://godoc.org/github.com/yuin/goldmark?status.svg)](http://godoc.org/github.com/yuin/goldmark)
|
||||||
|
[![https://github.com/yuin/goldmark/actions?query=workflow:test](https://github.com/yuin/goldmark/workflows/test/badge.svg?branch=master&event=push)](https://github.com/yuin/goldmark/actions?query=workflow:test)
|
||||||
|
[![https://coveralls.io/github/yuin/goldmark](https://coveralls.io/repos/github/yuin/goldmark/badge.svg?branch=master)](https://coveralls.io/github/yuin/goldmark)
|
||||||
|
[![https://goreportcard.com/report/github.com/yuin/goldmark](https://goreportcard.com/badge/github.com/yuin/goldmark)](https://goreportcard.com/report/github.com/yuin/goldmark)
|
||||||
|
|
||||||
|
> A Markdown parser written in Go. Easy to extend, standard compliant, well structured.
|
||||||
|
|
||||||
|
goldmark is compliant with CommonMark 0.29.
|
||||||
|
|
||||||
|
Motivation
|
||||||
|
----------------------
|
||||||
|
I need a Markdown parser for Go that meets following conditions:
|
||||||
|
|
||||||
|
- Easy to extend.
|
||||||
|
- Markdown is poor in document expressions compared with other light markup languages like reStructuredText.
|
||||||
|
- We have extensions to the Markdown syntax, e.g. PHP Markdown Extra, GitHub Flavored Markdown.
|
||||||
|
- Standard compliant.
|
||||||
|
- Markdown has many dialects.
|
||||||
|
- GitHub Flavored Markdown is widely used and it is based on CommonMark aside from whether CommonMark is good specification or not.
|
||||||
|
- CommonMark is too complicated and hard to implement.
|
||||||
|
- Well structured.
|
||||||
|
- AST based, and preserves source position of nodes.
|
||||||
|
- Written in pure Go.
|
||||||
|
|
||||||
|
[golang-commonmark](https://gitlab.com/golang-commonmark/markdown) may be a good choice, but it seems to be a copy of [markdown-it](https://github.com/markdown-it).
|
||||||
|
|
||||||
|
[blackfriday.v2](https://github.com/russross/blackfriday/tree/v2) is a fast and widely used implementation, but it is not CommonMark compliant and cannot be extended from outside of the package since its AST uses structs instead of interfaces.
|
||||||
|
|
||||||
|
Furthermore, its behavior differs from other implementations in some cases, especially regarding lists: ([Deep nested lists don't output correctly #329](https://github.com/russross/blackfriday/issues/329), [List block cannot have a second line #244](https://github.com/russross/blackfriday/issues/244), etc).
|
||||||
|
|
||||||
|
This behavior sometimes causes problems. If you migrate your Markdown text to blackfriday-based wikis from GitHub, many lists will immediately be broken.
|
||||||
|
|
||||||
|
As mentioned above, CommonMark is too complicated and hard to implement, so Markdown parsers based on CommonMark barely exist.
|
||||||
|
|
||||||
|
Features
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
- **Standard compliant.** goldmark gets full compliance with the latest CommonMark spec.
|
||||||
|
- **Extensible.** Do you want to add a `@username` mention syntax to Markdown?
|
||||||
|
You can easily do it in goldmark. You can add your AST nodes,
|
||||||
|
parsers for block level elements, parsers for inline level elements,
|
||||||
|
transformers for paragraphs, transformers for whole AST structure, and
|
||||||
|
renderers.
|
||||||
|
- **Performance.** goldmark performs pretty much equally to cmark,
|
||||||
|
the CommonMark reference implementation written in C.
|
||||||
|
- **Robust.** goldmark is tested with [go-fuzz](https://github.com/dvyukov/go-fuzz), a fuzz testing tool.
|
||||||
|
- **Builtin extensions.** goldmark ships with common extensions like tables, strikethrough,
|
||||||
|
task lists, and definition lists.
|
||||||
|
- **Depends only on standard libraries.**
|
||||||
|
|
||||||
|
Installation
|
||||||
|
----------------------
|
||||||
|
```bash
|
||||||
|
$ go get github.com/yuin/goldmark
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Usage
|
||||||
|
----------------------
|
||||||
|
Import packages:
|
||||||
|
|
||||||
|
```
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"github.com/yuin/goldmark"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Convert Markdown documents with the CommonMark compliant mode:
|
||||||
|
|
||||||
|
```go
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := goldmark.Convert(source, &buf); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
With options
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
```go
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := goldmark.Convert(source, &buf, parser.WithContext(ctx)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
| Functional option | Type | Description |
|
||||||
|
| ----------------- | ---- | ----------- |
|
||||||
|
| `parser.WithContext` | A `parser.Context` | Context for the parsing phase. |
|
||||||
|
|
||||||
|
Context options
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
| Functional option | Type | Description |
|
||||||
|
| ----------------- | ---- | ----------- |
|
||||||
|
| `parser.WithIDs` | A `parser.IDs` | `IDs` allows you to change logics that are related to element id(ex: Auto heading id generation). |
|
||||||
|
|
||||||
|
|
||||||
|
Custom parser and renderer
|
||||||
|
--------------------------
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"github.com/yuin/goldmark"
|
||||||
|
"github.com/yuin/goldmark/extension"
|
||||||
|
"github.com/yuin/goldmark/parser"
|
||||||
|
"github.com/yuin/goldmark/renderer/html"
|
||||||
|
)
|
||||||
|
|
||||||
|
md := goldmark.New(
|
||||||
|
goldmark.WithExtensions(extension.GFM),
|
||||||
|
goldmark.WithParserOptions(
|
||||||
|
parser.WithAutoHeadingID(),
|
||||||
|
),
|
||||||
|
goldmark.WithRendererOptions(
|
||||||
|
html.WithHardWraps(),
|
||||||
|
html.WithXHTML(),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := md.Convert(source, &buf); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Parser and Renderer options
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
### Parser options
|
||||||
|
|
||||||
|
| Functional option | Type | Description |
|
||||||
|
| ----------------- | ---- | ----------- |
|
||||||
|
| `parser.WithBlockParsers` | A `util.PrioritizedSlice` whose elements are `parser.BlockParser` | Parsers for parsing block level elements. |
|
||||||
|
| `parser.WithInlineParsers` | A `util.PrioritizedSlice` whose elements are `parser.InlineParser` | Parsers for parsing inline level elements. |
|
||||||
|
| `parser.WithParagraphTransformers` | A `util.PrioritizedSlice` whose elements are `parser.ParagraphTransformer` | Transformers for transforming paragraph nodes. |
|
||||||
|
| `parser.WithASTTransformers` | A `util.PrioritizedSlice` whose elements are `parser.ASTTransformer` | Transformers for transforming an AST. |
|
||||||
|
| `parser.WithAutoHeadingID` | `-` | Enables auto heading ids. |
|
||||||
|
| `parser.WithAttribute` | `-` | Enables custom attributes. Currently only headings supports attributes. |
|
||||||
|
|
||||||
|
### HTML Renderer options
|
||||||
|
|
||||||
|
| Functional option | Type | Description |
|
||||||
|
| ----------------- | ---- | ----------- |
|
||||||
|
| `html.WithWriter` | `html.Writer` | `html.Writer` for writing contents to an `io.Writer`. |
|
||||||
|
| `html.WithHardWraps` | `-` | Render new lines as `<br>`.|
|
||||||
|
| `html.WithXHTML` | `-` | Render as XHTML. |
|
||||||
|
| `html.WithUnsafe` | `-` | By default, goldmark does not render raw HTML and potentially dangerous links. With this option, goldmark renders these contents as written. |
|
||||||
|
|
||||||
|
### Built-in extensions
|
||||||
|
|
||||||
|
- `extension.Table`
|
||||||
|
- [GitHub Flavored Markdown: Tables](https://github.github.com/gfm/#tables-extension-)
|
||||||
|
- `extension.Strikethrough`
|
||||||
|
- [GitHub Flavored Markdown: Strikethrough](https://github.github.com/gfm/#strikethrough-extension-)
|
||||||
|
- `extension.Linkify`
|
||||||
|
- [GitHub Flavored Markdown: Autolinks](https://github.github.com/gfm/#autolinks-extension-)
|
||||||
|
- `extension.TaskList`
|
||||||
|
- [GitHub Flavored Markdown: Task list items](https://github.github.com/gfm/#task-list-items-extension-)
|
||||||
|
- `extension.GFM`
|
||||||
|
- This extension enables Table, Strikethrough, Linkify and TaskList.
|
||||||
|
- This extension does not filter tags defined in [6.11: Disallowed Raw HTML (extension)](https://github.github.com/gfm/#disallowed-raw-html-extension-).
|
||||||
|
If you need to filter HTML tags, see [Security](#security)
|
||||||
|
- `extension.DefinitionList`
|
||||||
|
- [PHP Markdown Extra: Definition lists](https://michelf.ca/projects/php-markdown/extra/#def-list)
|
||||||
|
- `extension.Footnote`
|
||||||
|
- [PHP Markdown Extra: Footnotes](https://michelf.ca/projects/php-markdown/extra/#footnotes)
|
||||||
|
- `extension.Typographer`
|
||||||
|
- This extension substitutes punctuations with typographic entities like [smartypants](https://daringfireball.net/projects/smartypants/).
|
||||||
|
|
||||||
|
### Attributes
|
||||||
|
`parser.WithAttribute` option allows you to define attributes on some elements.
|
||||||
|
|
||||||
|
Currently only headings support attributes.
|
||||||
|
|
||||||
|
**Attributes are being discussed in the
|
||||||
|
[CommonMark forum](https://talk.commonmark.org/t/consistent-attribute-syntax/272).
|
||||||
|
This syntax may possibly change in the future.**
|
||||||
|
|
||||||
|
|
||||||
|
#### Headings
|
||||||
|
|
||||||
|
```
|
||||||
|
## heading ## {#id .className attrName=attrValue class="class1 class2"}
|
||||||
|
|
||||||
|
## heading {#id .className attrName=attrValue class="class1 class2"}
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
heading {#id .className attrName=attrValue}
|
||||||
|
============
|
||||||
|
```
|
||||||
|
|
||||||
|
### Typographer extension
|
||||||
|
|
||||||
|
Typographer extension translates plain ASCII punctuation characters into typographic punctuation HTML entities.
|
||||||
|
|
||||||
|
Default substitutions are:
|
||||||
|
|
||||||
|
| Punctuation | Default entity |
|
||||||
|
| ------------ | ---------- |
|
||||||
|
| `'` | `‘`, `’` |
|
||||||
|
| `"` | `“`, `”` |
|
||||||
|
| `--` | `–` |
|
||||||
|
| `---` | `—` |
|
||||||
|
| `...` | `…` |
|
||||||
|
| `<<` | `«` |
|
||||||
|
| `>>` | `»` |
|
||||||
|
|
||||||
|
You can overwrite the substitutions by `extensions.WithTypographicSubstitutions`.
|
||||||
|
|
||||||
|
```go
|
||||||
|
markdown := goldmark.New(
|
||||||
|
goldmark.WithExtensions(
|
||||||
|
extension.NewTypographer(
|
||||||
|
extension.WithTypographicSubstitutions(extension.TypographicSubstitutions{
|
||||||
|
extension.LeftSingleQuote: []byte("‚"),
|
||||||
|
extension.RightSingleQuote: nil, // nil disables a substitution
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Security
|
||||||
|
--------------------
|
||||||
|
By default, goldmark does not render raw HTML and potentially dangerous URLs.
|
||||||
|
If you need to gain more control over untrusted contents, it is recommended to
|
||||||
|
use an HTML sanitizer such as [bluemonday](https://github.com/microcosm-cc/bluemonday).
|
||||||
|
|
||||||
|
Benchmark
|
||||||
|
--------------------
|
||||||
|
You can run this benchmark in the `_benchmark` directory.
|
||||||
|
|
||||||
|
### against other golang libraries
|
||||||
|
|
||||||
|
blackfriday v2 seems to be fastest, but it is not CommonMark compliant, so the performance of
|
||||||
|
blackfriday v2 cannot simply be compared with that of the other CommonMark compliant libraries.
|
||||||
|
|
||||||
|
Though goldmark builds clean extensible AST structure and get full compliance with
|
||||||
|
CommonMark, it is reasonably fast and has lower memory consumption.
|
||||||
|
|
||||||
|
```
|
||||||
|
goos: darwin
|
||||||
|
goarch: amd64
|
||||||
|
BenchmarkMarkdown/Blackfriday-v2-12 326 3465240 ns/op 3298861 B/op 20047 allocs/op
|
||||||
|
BenchmarkMarkdown/GoldMark-12 303 3927494 ns/op 2574809 B/op 13853 allocs/op
|
||||||
|
BenchmarkMarkdown/CommonMark-12 244 4900853 ns/op 2753851 B/op 20527 allocs/op
|
||||||
|
BenchmarkMarkdown/Lute-12 130 9195245 ns/op 9175030 B/op 123534 allocs/op
|
||||||
|
BenchmarkMarkdown/GoMarkdown-12 9 113541994 ns/op 2187472 B/op 22173 allocs/op
|
||||||
|
```
|
||||||
|
|
||||||
|
### against cmark (CommonMark reference implementation written in C)
|
||||||
|
|
||||||
|
```
|
||||||
|
----------- cmark -----------
|
||||||
|
file: _data.md
|
||||||
|
iteration: 50
|
||||||
|
average: 0.0037760639 sec
|
||||||
|
go run ./goldmark_benchmark.go
|
||||||
|
------- goldmark -------
|
||||||
|
file: _data.md
|
||||||
|
iteration: 50
|
||||||
|
average: 0.0040964230 sec
|
||||||
|
```
|
||||||
|
|
||||||
|
As you can see, goldmark performs pretty much equally to cmark.
|
||||||
|
|
||||||
|
Extensions
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
- [goldmark-meta](https://github.com/yuin/goldmark-meta): A YAML metadata
|
||||||
|
extension for the goldmark Markdown parser.
|
||||||
|
- [goldmark-highlighting](https://github.com/yuin/goldmark-highlighting): A Syntax highlighting extension
|
||||||
|
for the goldmark markdown parser.
|
||||||
|
- [goldmark-mathjax](https://github.com/litao91/goldmark-mathjax): Mathjax support for goldmark markdown parser
|
||||||
|
|
||||||
|
goldmark internal(for extension developers)
|
||||||
|
----------------------------------------------
|
||||||
|
### Overview
|
||||||
|
goldmark's Markdown processing is outlined as a bellow diagram.
|
||||||
|
|
||||||
|
```
|
||||||
|
<Markdown in []byte, parser.Context>
|
||||||
|
|
|
||||||
|
V
|
||||||
|
+-------- parser.Parser ---------------------------
|
||||||
|
| 1. Parse block elements into AST
|
||||||
|
| 1. If a parsed block is a paragraph, apply
|
||||||
|
| ast.ParagraphTransformer
|
||||||
|
| 2. Traverse AST and parse blocks.
|
||||||
|
| 1. Process delimiters(emphasis) at the end of
|
||||||
|
| block parsing
|
||||||
|
| 3. Apply parser.ASTTransformers to AST
|
||||||
|
|
|
||||||
|
V
|
||||||
|
<ast.Node>
|
||||||
|
|
|
||||||
|
V
|
||||||
|
+------- renderer.Renderer ------------------------
|
||||||
|
| 1. Traverse AST and apply renderer.NodeRenderer
|
||||||
|
| corespond to the node type
|
||||||
|
|
||||||
|
|
|
||||||
|
V
|
||||||
|
<Output>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Parsing
|
||||||
|
Markdown documents are read through `text.Reader` interface.
|
||||||
|
|
||||||
|
AST nodes do not have concrete text. AST nodes have segment information of the documents. It is represented by `text.Segment` .
|
||||||
|
|
||||||
|
`text.Segment` has 3 attributes: `Start`, `End`, `Padding` .
|
||||||
|
|
||||||
|
|
||||||
|
**TODO**
|
||||||
|
|
||||||
|
See `extension` directory for examples of extensions.
|
||||||
|
|
||||||
|
Summary:
|
||||||
|
|
||||||
|
1. Define AST Node as a struct in which `ast.BaseBlock` or `ast.BaseInline` is embedded.
|
||||||
|
2. Write a parser that implements `parser.BlockParser` or `parser.InlineParser`.
|
||||||
|
3. Write a renderer that implements `renderer.NodeRenderer`.
|
||||||
|
4. Define your goldmark extension that implements `goldmark.Extender`.
|
||||||
|
|
||||||
|
|
||||||
|
Donation
|
||||||
|
--------------------
|
||||||
|
BTC: 1NEDSyUmo4SMTDP83JJQSWi1MvQUGGNMZB
|
||||||
|
|
||||||
|
License
|
||||||
|
--------------------
|
||||||
|
MIT
|
||||||
|
|
||||||
|
Author
|
||||||
|
--------------------
|
||||||
|
Yusuke Inuzuka
|
485
vendor/github.com/yuin/goldmark/ast/ast.go
generated
vendored
Normal file
485
vendor/github.com/yuin/goldmark/ast/ast.go
generated
vendored
Normal file
|
@ -0,0 +1,485 @@
|
||||||
|
// Package ast defines AST nodes that represent markdown elements.
|
||||||
|
package ast
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
textm "github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A NodeType indicates what type a node belongs to.
|
||||||
|
type NodeType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TypeBlock indicates that a node is kind of block nodes.
|
||||||
|
TypeBlock NodeType = iota + 1
|
||||||
|
// TypeInline indicates that a node is kind of inline nodes.
|
||||||
|
TypeInline
|
||||||
|
// TypeDocument indicates that a node is kind of document nodes.
|
||||||
|
TypeDocument
|
||||||
|
)
|
||||||
|
|
||||||
|
// NodeKind indicates more specific type than NodeType.
|
||||||
|
type NodeKind int
|
||||||
|
|
||||||
|
func (k NodeKind) String() string {
|
||||||
|
return kindNames[k]
|
||||||
|
}
|
||||||
|
|
||||||
|
var kindMax NodeKind
|
||||||
|
var kindNames = []string{""}
|
||||||
|
|
||||||
|
// NewNodeKind returns a new Kind value.
|
||||||
|
func NewNodeKind(name string) NodeKind {
|
||||||
|
kindMax++
|
||||||
|
kindNames = append(kindNames, name)
|
||||||
|
return kindMax
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Attribute is an attribute of the Node
|
||||||
|
type Attribute struct {
|
||||||
|
Name []byte
|
||||||
|
Value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var attrNameIDS = []byte("#")
|
||||||
|
var attrNameID = []byte("id")
|
||||||
|
var attrNameClassS = []byte(".")
|
||||||
|
var attrNameClass = []byte("class")
|
||||||
|
|
||||||
|
// A Node interface defines basic AST node functionalities.
|
||||||
|
type Node interface {
|
||||||
|
// Type returns a type of this node.
|
||||||
|
Type() NodeType
|
||||||
|
|
||||||
|
// Kind returns a kind of this node.
|
||||||
|
Kind() NodeKind
|
||||||
|
|
||||||
|
// NextSibling returns a next sibling node of this node.
|
||||||
|
NextSibling() Node
|
||||||
|
|
||||||
|
// PreviousSibling returns a previous sibling node of this node.
|
||||||
|
PreviousSibling() Node
|
||||||
|
|
||||||
|
// Parent returns a parent node of this node.
|
||||||
|
Parent() Node
|
||||||
|
|
||||||
|
// SetParent sets a parent node to this node.
|
||||||
|
SetParent(Node)
|
||||||
|
|
||||||
|
// SetPreviousSibling sets a previous sibling node to this node.
|
||||||
|
SetPreviousSibling(Node)
|
||||||
|
|
||||||
|
// SetNextSibling sets a next sibling node to this node.
|
||||||
|
SetNextSibling(Node)
|
||||||
|
|
||||||
|
// HasChildren returns true if this node has any children, otherwise false.
|
||||||
|
HasChildren() bool
|
||||||
|
|
||||||
|
// ChildCount returns a total number of children.
|
||||||
|
ChildCount() int
|
||||||
|
|
||||||
|
// FirstChild returns a first child of this node.
|
||||||
|
FirstChild() Node
|
||||||
|
|
||||||
|
// LastChild returns a last child of this node.
|
||||||
|
LastChild() Node
|
||||||
|
|
||||||
|
// AppendChild append a node child to the tail of the children.
|
||||||
|
AppendChild(self, child Node)
|
||||||
|
|
||||||
|
// RemoveChild removes a node child from this node.
|
||||||
|
// If a node child is not children of this node, RemoveChild nothing to do.
|
||||||
|
RemoveChild(self, child Node)
|
||||||
|
|
||||||
|
// RemoveChildren removes all children from this node.
|
||||||
|
RemoveChildren(self Node)
|
||||||
|
|
||||||
|
// SortChildren sorts childrens by comparator.
|
||||||
|
SortChildren(comparator func(n1, n2 Node) int)
|
||||||
|
|
||||||
|
// ReplaceChild replace a node v1 with a node insertee.
|
||||||
|
// If v1 is not children of this node, ReplaceChild append a insetee to the
|
||||||
|
// tail of the children.
|
||||||
|
ReplaceChild(self, v1, insertee Node)
|
||||||
|
|
||||||
|
// InsertBefore inserts a node insertee before a node v1.
|
||||||
|
// If v1 is not children of this node, InsertBefore append a insetee to the
|
||||||
|
// tail of the children.
|
||||||
|
InsertBefore(self, v1, insertee Node)
|
||||||
|
|
||||||
|
// InsertAfterinserts a node insertee after a node v1.
|
||||||
|
// If v1 is not children of this node, InsertBefore append a insetee to the
|
||||||
|
// tail of the children.
|
||||||
|
InsertAfter(self, v1, insertee Node)
|
||||||
|
|
||||||
|
// Dump dumps an AST tree structure to stdout.
|
||||||
|
// This function completely aimed for debugging.
|
||||||
|
// level is a indent level. Implementer should indent informations with
|
||||||
|
// 2 * level spaces.
|
||||||
|
Dump(source []byte, level int)
|
||||||
|
|
||||||
|
// Text returns text values of this node.
|
||||||
|
Text(source []byte) []byte
|
||||||
|
|
||||||
|
// HasBlankPreviousLines returns true if the row before this node is blank,
|
||||||
|
// otherwise false.
|
||||||
|
// This method is valid only for block nodes.
|
||||||
|
HasBlankPreviousLines() bool
|
||||||
|
|
||||||
|
// SetBlankPreviousLines sets whether the row before this node is blank.
|
||||||
|
// This method is valid only for block nodes.
|
||||||
|
SetBlankPreviousLines(v bool)
|
||||||
|
|
||||||
|
// Lines returns text segments that hold positions in a source.
|
||||||
|
// This method is valid only for block nodes.
|
||||||
|
Lines() *textm.Segments
|
||||||
|
|
||||||
|
// SetLines sets text segments that hold positions in a source.
|
||||||
|
// This method is valid only for block nodes.
|
||||||
|
SetLines(*textm.Segments)
|
||||||
|
|
||||||
|
// IsRaw returns true if contents should be rendered as 'raw' contents.
|
||||||
|
IsRaw() bool
|
||||||
|
|
||||||
|
// SetAttribute sets the given value to the attributes.
|
||||||
|
SetAttribute(name []byte, value interface{})
|
||||||
|
|
||||||
|
// SetAttributeString sets the given value to the attributes.
|
||||||
|
SetAttributeString(name string, value interface{})
|
||||||
|
|
||||||
|
// Attribute returns a (attribute value, true) if an attribute
|
||||||
|
// associated with the given name is found, otherwise
|
||||||
|
// (nil, false)
|
||||||
|
Attribute(name []byte) (interface{}, bool)
|
||||||
|
|
||||||
|
// AttributeString returns a (attribute value, true) if an attribute
|
||||||
|
// associated with the given name is found, otherwise
|
||||||
|
// (nil, false)
|
||||||
|
AttributeString(name string) (interface{}, bool)
|
||||||
|
|
||||||
|
// Attributes returns a list of attributes.
|
||||||
|
// This may be a nil if there are no attributes.
|
||||||
|
Attributes() []Attribute
|
||||||
|
|
||||||
|
// RemoveAttributes removes all attributes from this node.
|
||||||
|
RemoveAttributes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// A BaseNode struct implements the Node interface.
|
||||||
|
type BaseNode struct {
|
||||||
|
firstChild Node
|
||||||
|
lastChild Node
|
||||||
|
parent Node
|
||||||
|
next Node
|
||||||
|
prev Node
|
||||||
|
childCount int
|
||||||
|
attributes []Attribute
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureIsolated(v Node) {
|
||||||
|
if p := v.Parent(); p != nil {
|
||||||
|
p.RemoveChild(p, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasChildren implements Node.HasChildren .
|
||||||
|
func (n *BaseNode) HasChildren() bool {
|
||||||
|
return n.firstChild != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPreviousSibling implements Node.SetPreviousSibling .
|
||||||
|
func (n *BaseNode) SetPreviousSibling(v Node) {
|
||||||
|
n.prev = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNextSibling implements Node.SetNextSibling .
|
||||||
|
func (n *BaseNode) SetNextSibling(v Node) {
|
||||||
|
n.next = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreviousSibling implements Node.PreviousSibling .
|
||||||
|
func (n *BaseNode) PreviousSibling() Node {
|
||||||
|
return n.prev
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextSibling implements Node.NextSibling .
|
||||||
|
func (n *BaseNode) NextSibling() Node {
|
||||||
|
return n.next
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveChild implements Node.RemoveChild .
|
||||||
|
func (n *BaseNode) RemoveChild(self, v Node) {
|
||||||
|
if v.Parent() != self {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n.childCount--
|
||||||
|
prev := v.PreviousSibling()
|
||||||
|
next := v.NextSibling()
|
||||||
|
if prev != nil {
|
||||||
|
prev.SetNextSibling(next)
|
||||||
|
} else {
|
||||||
|
n.firstChild = next
|
||||||
|
}
|
||||||
|
if next != nil {
|
||||||
|
next.SetPreviousSibling(prev)
|
||||||
|
} else {
|
||||||
|
n.lastChild = prev
|
||||||
|
}
|
||||||
|
v.SetParent(nil)
|
||||||
|
v.SetPreviousSibling(nil)
|
||||||
|
v.SetNextSibling(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveChildren implements Node.RemoveChildren .
|
||||||
|
func (n *BaseNode) RemoveChildren(self Node) {
|
||||||
|
for c := n.firstChild; c != nil; c = c.NextSibling() {
|
||||||
|
c.SetParent(nil)
|
||||||
|
c.SetPreviousSibling(nil)
|
||||||
|
c.SetNextSibling(nil)
|
||||||
|
}
|
||||||
|
n.firstChild = nil
|
||||||
|
n.lastChild = nil
|
||||||
|
n.childCount = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortChildren implements Node.SortChildren
|
||||||
|
func (n *BaseNode) SortChildren(comparator func(n1, n2 Node) int) {
|
||||||
|
var sorted Node
|
||||||
|
current := n.firstChild
|
||||||
|
for current != nil {
|
||||||
|
next := current.NextSibling()
|
||||||
|
if sorted == nil || comparator(sorted, current) >= 0 {
|
||||||
|
current.SetNextSibling(sorted)
|
||||||
|
if sorted != nil {
|
||||||
|
sorted.SetPreviousSibling(current)
|
||||||
|
}
|
||||||
|
sorted = current
|
||||||
|
sorted.SetPreviousSibling(nil)
|
||||||
|
} else {
|
||||||
|
c := sorted
|
||||||
|
for c.NextSibling() != nil && comparator(c.NextSibling(), current) < 0 {
|
||||||
|
c = c.NextSibling()
|
||||||
|
}
|
||||||
|
current.SetNextSibling(c.NextSibling())
|
||||||
|
current.SetPreviousSibling(c)
|
||||||
|
if c.NextSibling() != nil {
|
||||||
|
c.NextSibling().SetPreviousSibling(current)
|
||||||
|
}
|
||||||
|
c.SetNextSibling(current)
|
||||||
|
}
|
||||||
|
current = next
|
||||||
|
}
|
||||||
|
n.firstChild = sorted
|
||||||
|
for c := n.firstChild; c != nil; c = c.NextSibling() {
|
||||||
|
n.lastChild = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstChild implements Node.FirstChild .
|
||||||
|
func (n *BaseNode) FirstChild() Node {
|
||||||
|
return n.firstChild
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastChild implements Node.LastChild .
|
||||||
|
func (n *BaseNode) LastChild() Node {
|
||||||
|
return n.lastChild
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChildCount implements Node.ChildCount .
|
||||||
|
func (n *BaseNode) ChildCount() int {
|
||||||
|
return n.childCount
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent implements Node.Parent .
|
||||||
|
func (n *BaseNode) Parent() Node {
|
||||||
|
return n.parent
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetParent implements Node.SetParent .
|
||||||
|
func (n *BaseNode) SetParent(v Node) {
|
||||||
|
n.parent = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendChild implements Node.AppendChild .
|
||||||
|
func (n *BaseNode) AppendChild(self, v Node) {
|
||||||
|
ensureIsolated(v)
|
||||||
|
if n.firstChild == nil {
|
||||||
|
n.firstChild = v
|
||||||
|
v.SetNextSibling(nil)
|
||||||
|
v.SetPreviousSibling(nil)
|
||||||
|
} else {
|
||||||
|
last := n.lastChild
|
||||||
|
last.SetNextSibling(v)
|
||||||
|
v.SetPreviousSibling(last)
|
||||||
|
}
|
||||||
|
v.SetParent(self)
|
||||||
|
n.lastChild = v
|
||||||
|
n.childCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceChild implements Node.ReplaceChild .
|
||||||
|
func (n *BaseNode) ReplaceChild(self, v1, insertee Node) {
|
||||||
|
n.InsertBefore(self, v1, insertee)
|
||||||
|
n.RemoveChild(self, v1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertAfter implements Node.InsertAfter .
|
||||||
|
func (n *BaseNode) InsertAfter(self, v1, insertee Node) {
|
||||||
|
n.InsertBefore(self, v1.NextSibling(), insertee)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertBefore implements Node.InsertBefore .
|
||||||
|
func (n *BaseNode) InsertBefore(self, v1, insertee Node) {
|
||||||
|
n.childCount++
|
||||||
|
if v1 == nil {
|
||||||
|
n.AppendChild(self, insertee)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ensureIsolated(insertee)
|
||||||
|
if v1.Parent() == self {
|
||||||
|
c := v1
|
||||||
|
prev := c.PreviousSibling()
|
||||||
|
if prev != nil {
|
||||||
|
prev.SetNextSibling(insertee)
|
||||||
|
insertee.SetPreviousSibling(prev)
|
||||||
|
} else {
|
||||||
|
n.firstChild = insertee
|
||||||
|
insertee.SetPreviousSibling(nil)
|
||||||
|
}
|
||||||
|
insertee.SetNextSibling(c)
|
||||||
|
c.SetPreviousSibling(insertee)
|
||||||
|
insertee.SetParent(self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Text implements Node.Text .
|
||||||
|
func (n *BaseNode) Text(source []byte) []byte {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for c := n.firstChild; c != nil; c = c.NextSibling() {
|
||||||
|
buf.Write(c.Text(source))
|
||||||
|
}
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAttribute implements Node.SetAttribute.
|
||||||
|
func (n *BaseNode) SetAttribute(name []byte, value interface{}) {
|
||||||
|
if n.attributes == nil {
|
||||||
|
n.attributes = make([]Attribute, 0, 10)
|
||||||
|
} else {
|
||||||
|
for i, a := range n.attributes {
|
||||||
|
if bytes.Equal(a.Name, name) {
|
||||||
|
n.attributes[i].Name = name
|
||||||
|
n.attributes[i].Value = value
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n.attributes = append(n.attributes, Attribute{name, value})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAttributeString implements Node.SetAttributeString
|
||||||
|
func (n *BaseNode) SetAttributeString(name string, value interface{}) {
|
||||||
|
n.SetAttribute(util.StringToReadOnlyBytes(name), value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attribute implements Node.Attribute.
|
||||||
|
func (n *BaseNode) Attribute(name []byte) (interface{}, bool) {
|
||||||
|
if n.attributes == nil {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
for i, a := range n.attributes {
|
||||||
|
if bytes.Equal(a.Name, name) {
|
||||||
|
return n.attributes[i].Value, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttributeString implements Node.AttributeString.
|
||||||
|
func (n *BaseNode) AttributeString(s string) (interface{}, bool) {
|
||||||
|
return n.Attribute(util.StringToReadOnlyBytes(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attributes implements Node.Attributes
|
||||||
|
func (n *BaseNode) Attributes() []Attribute {
|
||||||
|
return n.attributes
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAttributes implements Node.RemoveAttributes
|
||||||
|
func (n *BaseNode) RemoveAttributes() {
|
||||||
|
n.attributes = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DumpHelper is a helper function to implement Node.Dump.
|
||||||
|
// kv is pairs of an attribute name and an attribute value.
|
||||||
|
// cb is a function called after wrote a name and attributes.
|
||||||
|
func DumpHelper(v Node, source []byte, level int, kv map[string]string, cb func(int)) {
|
||||||
|
name := v.Kind().String()
|
||||||
|
indent := strings.Repeat(" ", level)
|
||||||
|
fmt.Printf("%s%s {\n", indent, name)
|
||||||
|
indent2 := strings.Repeat(" ", level+1)
|
||||||
|
if v.Type() == TypeBlock {
|
||||||
|
fmt.Printf("%sRawText: \"", indent2)
|
||||||
|
for i := 0; i < v.Lines().Len(); i++ {
|
||||||
|
line := v.Lines().At(i)
|
||||||
|
fmt.Printf("%s", line.Value(source))
|
||||||
|
}
|
||||||
|
fmt.Printf("\"\n")
|
||||||
|
fmt.Printf("%sHasBlankPreviousLines: %v\n", indent2, v.HasBlankPreviousLines())
|
||||||
|
}
|
||||||
|
for name, value := range kv {
|
||||||
|
fmt.Printf("%s%s: %s\n", indent2, name, value)
|
||||||
|
}
|
||||||
|
if cb != nil {
|
||||||
|
cb(level + 1)
|
||||||
|
}
|
||||||
|
for c := v.FirstChild(); c != nil; c = c.NextSibling() {
|
||||||
|
c.Dump(source, level+1)
|
||||||
|
}
|
||||||
|
fmt.Printf("%s}\n", indent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WalkStatus represents a current status of the Walk function.
|
||||||
|
type WalkStatus int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// WalkStop indicates no more walking needed.
|
||||||
|
WalkStop WalkStatus = iota + 1
|
||||||
|
|
||||||
|
// WalkSkipChildren indicates that Walk wont walk on children of current
|
||||||
|
// node.
|
||||||
|
WalkSkipChildren
|
||||||
|
|
||||||
|
// WalkContinue indicates that Walk can continue to walk.
|
||||||
|
WalkContinue
|
||||||
|
)
|
||||||
|
|
||||||
|
// Walker is a function that will be called when Walk find a
|
||||||
|
// new node.
|
||||||
|
// entering is set true before walks children, false after walked children.
|
||||||
|
// If Walker returns error, Walk function immediately stop walking.
|
||||||
|
type Walker func(n Node, entering bool) (WalkStatus, error)
|
||||||
|
|
||||||
|
// Walk walks a AST tree by the depth first search algorithm.
|
||||||
|
func Walk(n Node, walker Walker) error {
|
||||||
|
status, err := walker(n, true)
|
||||||
|
if err != nil || status == WalkStop {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if status != WalkSkipChildren {
|
||||||
|
for c := n.FirstChild(); c != nil; c = c.NextSibling() {
|
||||||
|
if err = Walk(c, walker); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
status, err = walker(n, false)
|
||||||
|
if err != nil || status == WalkStop {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
474
vendor/github.com/yuin/goldmark/ast/block.go
generated
vendored
Normal file
474
vendor/github.com/yuin/goldmark/ast/block.go
generated
vendored
Normal file
|
@ -0,0 +1,474 @@
|
||||||
|
package ast
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
textm "github.com/yuin/goldmark/text"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A BaseBlock struct implements the Node interface.
|
||||||
|
type BaseBlock struct {
|
||||||
|
BaseNode
|
||||||
|
blankPreviousLines bool
|
||||||
|
lines *textm.Segments
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type implements Node.Type
|
||||||
|
func (b *BaseBlock) Type() NodeType {
|
||||||
|
return TypeBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRaw implements Node.IsRaw
|
||||||
|
func (b *BaseBlock) IsRaw() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasBlankPreviousLines implements Node.HasBlankPreviousLines.
|
||||||
|
func (b *BaseBlock) HasBlankPreviousLines() bool {
|
||||||
|
return b.blankPreviousLines
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBlankPreviousLines implements Node.SetBlankPreviousLines.
|
||||||
|
func (b *BaseBlock) SetBlankPreviousLines(v bool) {
|
||||||
|
b.blankPreviousLines = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lines implements Node.Lines
|
||||||
|
func (b *BaseBlock) Lines() *textm.Segments {
|
||||||
|
if b.lines == nil {
|
||||||
|
b.lines = textm.NewSegments()
|
||||||
|
}
|
||||||
|
return b.lines
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLines implements Node.SetLines
|
||||||
|
func (b *BaseBlock) SetLines(v *textm.Segments) {
|
||||||
|
b.lines = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Document struct is a root node of Markdown text.
|
||||||
|
type Document struct {
|
||||||
|
BaseBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindDocument is a NodeKind of the Document node.
|
||||||
|
var KindDocument = NewNodeKind("Document")
|
||||||
|
|
||||||
|
// Dump implements Node.Dump .
|
||||||
|
func (n *Document) Dump(source []byte, level int) {
|
||||||
|
DumpHelper(n, source, level, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type implements Node.Type .
|
||||||
|
func (n *Document) Type() NodeType {
|
||||||
|
return TypeDocument
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *Document) Kind() NodeKind {
|
||||||
|
return KindDocument
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDocument returns a new Document node.
|
||||||
|
func NewDocument() *Document {
|
||||||
|
return &Document{
|
||||||
|
BaseBlock: BaseBlock{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A TextBlock struct is a node whose lines
|
||||||
|
// should be rendered without any containers.
|
||||||
|
type TextBlock struct {
|
||||||
|
BaseBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump .
|
||||||
|
func (n *TextBlock) Dump(source []byte, level int) {
|
||||||
|
DumpHelper(n, source, level, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindTextBlock is a NodeKind of the TextBlock node.
|
||||||
|
var KindTextBlock = NewNodeKind("TextBlock")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *TextBlock) Kind() NodeKind {
|
||||||
|
return KindTextBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTextBlock returns a new TextBlock node.
|
||||||
|
func NewTextBlock() *TextBlock {
|
||||||
|
return &TextBlock{
|
||||||
|
BaseBlock: BaseBlock{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Paragraph struct represents a paragraph of Markdown text.
|
||||||
|
type Paragraph struct {
|
||||||
|
BaseBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump .
|
||||||
|
func (n *Paragraph) Dump(source []byte, level int) {
|
||||||
|
DumpHelper(n, source, level, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindParagraph is a NodeKind of the Paragraph node.
|
||||||
|
var KindParagraph = NewNodeKind("Paragraph")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *Paragraph) Kind() NodeKind {
|
||||||
|
return KindParagraph
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewParagraph returns a new Paragraph node.
|
||||||
|
func NewParagraph() *Paragraph {
|
||||||
|
return &Paragraph{
|
||||||
|
BaseBlock: BaseBlock{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsParagraph returns true if the given node implements the Paragraph interface,
|
||||||
|
// otherwise false.
|
||||||
|
func IsParagraph(node Node) bool {
|
||||||
|
_, ok := node.(*Paragraph)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Heading struct represents headings like SetextHeading and ATXHeading.
|
||||||
|
type Heading struct {
|
||||||
|
BaseBlock
|
||||||
|
// Level returns a level of this heading.
|
||||||
|
// This value is between 1 and 6.
|
||||||
|
Level int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump .
|
||||||
|
func (n *Heading) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{
|
||||||
|
"Level": fmt.Sprintf("%d", n.Level),
|
||||||
|
}
|
||||||
|
DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindHeading is a NodeKind of the Heading node.
|
||||||
|
var KindHeading = NewNodeKind("Heading")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *Heading) Kind() NodeKind {
|
||||||
|
return KindHeading
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHeading returns a new Heading node.
|
||||||
|
func NewHeading(level int) *Heading {
|
||||||
|
return &Heading{
|
||||||
|
BaseBlock: BaseBlock{},
|
||||||
|
Level: level,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A ThematicBreak struct represents a thematic break of Markdown text.
|
||||||
|
type ThematicBreak struct {
|
||||||
|
BaseBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump .
|
||||||
|
func (n *ThematicBreak) Dump(source []byte, level int) {
|
||||||
|
DumpHelper(n, source, level, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindThematicBreak is a NodeKind of the ThematicBreak node.
|
||||||
|
var KindThematicBreak = NewNodeKind("ThematicBreak")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *ThematicBreak) Kind() NodeKind {
|
||||||
|
return KindThematicBreak
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewThematicBreak returns a new ThematicBreak node.
|
||||||
|
func NewThematicBreak() *ThematicBreak {
|
||||||
|
return &ThematicBreak{
|
||||||
|
BaseBlock: BaseBlock{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A CodeBlock interface represents an indented code block of Markdown text.
|
||||||
|
type CodeBlock struct {
|
||||||
|
BaseBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRaw implements Node.IsRaw.
|
||||||
|
func (n *CodeBlock) IsRaw() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump .
|
||||||
|
func (n *CodeBlock) Dump(source []byte, level int) {
|
||||||
|
DumpHelper(n, source, level, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindCodeBlock is a NodeKind of the CodeBlock node.
|
||||||
|
var KindCodeBlock = NewNodeKind("CodeBlock")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *CodeBlock) Kind() NodeKind {
|
||||||
|
return KindCodeBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCodeBlock returns a new CodeBlock node.
|
||||||
|
func NewCodeBlock() *CodeBlock {
|
||||||
|
return &CodeBlock{
|
||||||
|
BaseBlock: BaseBlock{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A FencedCodeBlock struct represents a fenced code block of Markdown text.
|
||||||
|
type FencedCodeBlock struct {
|
||||||
|
BaseBlock
|
||||||
|
// Info returns a info text of this fenced code block.
|
||||||
|
Info *Text
|
||||||
|
|
||||||
|
language []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Language returns an language in an info string.
|
||||||
|
// Language returns nil if this node does not have an info string.
|
||||||
|
func (n *FencedCodeBlock) Language(source []byte) []byte {
|
||||||
|
if n.language == nil && n.Info != nil {
|
||||||
|
segment := n.Info.Segment
|
||||||
|
info := segment.Value(source)
|
||||||
|
i := 0
|
||||||
|
for ; i < len(info); i++ {
|
||||||
|
if info[i] == ' ' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n.language = info[:i]
|
||||||
|
}
|
||||||
|
return n.language
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRaw implements Node.IsRaw.
|
||||||
|
func (n *FencedCodeBlock) IsRaw() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump .
|
||||||
|
func (n *FencedCodeBlock) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{}
|
||||||
|
if n.Info != nil {
|
||||||
|
m["Info"] = fmt.Sprintf("\"%s\"", n.Info.Text(source))
|
||||||
|
}
|
||||||
|
DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindFencedCodeBlock is a NodeKind of the FencedCodeBlock node.
|
||||||
|
var KindFencedCodeBlock = NewNodeKind("FencedCodeBlock")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *FencedCodeBlock) Kind() NodeKind {
|
||||||
|
return KindFencedCodeBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFencedCodeBlock return a new FencedCodeBlock node.
|
||||||
|
func NewFencedCodeBlock(info *Text) *FencedCodeBlock {
|
||||||
|
return &FencedCodeBlock{
|
||||||
|
BaseBlock: BaseBlock{},
|
||||||
|
Info: info,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Blockquote struct represents an blockquote block of Markdown text.
|
||||||
|
type Blockquote struct {
|
||||||
|
BaseBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump .
|
||||||
|
func (n *Blockquote) Dump(source []byte, level int) {
|
||||||
|
DumpHelper(n, source, level, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindBlockquote is a NodeKind of the Blockquote node.
|
||||||
|
var KindBlockquote = NewNodeKind("Blockquote")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *Blockquote) Kind() NodeKind {
|
||||||
|
return KindBlockquote
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBlockquote returns a new Blockquote node.
|
||||||
|
func NewBlockquote() *Blockquote {
|
||||||
|
return &Blockquote{
|
||||||
|
BaseBlock: BaseBlock{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A List structr represents a list of Markdown text.
|
||||||
|
type List struct {
|
||||||
|
BaseBlock
|
||||||
|
|
||||||
|
// Marker is a markar character like '-', '+', ')' and '.'.
|
||||||
|
Marker byte
|
||||||
|
|
||||||
|
// IsTight is a true if this list is a 'tight' list.
|
||||||
|
// See https://spec.commonmark.org/0.29/#loose for details.
|
||||||
|
IsTight bool
|
||||||
|
|
||||||
|
// Start is an initial number of this ordered list.
|
||||||
|
// If this list is not an ordered list, Start is 0.
|
||||||
|
Start int
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsOrdered returns true if this list is an ordered list, otherwise false.
|
||||||
|
func (l *List) IsOrdered() bool {
|
||||||
|
return l.Marker == '.' || l.Marker == ')'
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanContinue returns true if this list can continue with
|
||||||
|
// the given mark and a list type, otherwise false.
|
||||||
|
func (l *List) CanContinue(marker byte, isOrdered bool) bool {
|
||||||
|
return marker == l.Marker && isOrdered == l.IsOrdered()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (l *List) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{
|
||||||
|
"Ordered": fmt.Sprintf("%v", l.IsOrdered()),
|
||||||
|
"Marker": fmt.Sprintf("%c", l.Marker),
|
||||||
|
"Tight": fmt.Sprintf("%v", l.IsTight),
|
||||||
|
}
|
||||||
|
if l.IsOrdered() {
|
||||||
|
m["Start"] = fmt.Sprintf("%d", l.Start)
|
||||||
|
}
|
||||||
|
DumpHelper(l, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindList is a NodeKind of the List node.
|
||||||
|
var KindList = NewNodeKind("List")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (l *List) Kind() NodeKind {
|
||||||
|
return KindList
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewList returns a new List node.
|
||||||
|
func NewList(marker byte) *List {
|
||||||
|
return &List{
|
||||||
|
BaseBlock: BaseBlock{},
|
||||||
|
Marker: marker,
|
||||||
|
IsTight: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A ListItem struct represents a list item of Markdown text.
|
||||||
|
type ListItem struct {
|
||||||
|
BaseBlock
|
||||||
|
|
||||||
|
// Offset is an offset potision of this item.
|
||||||
|
Offset int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *ListItem) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{
|
||||||
|
"Offset": fmt.Sprintf("%d", n.Offset),
|
||||||
|
}
|
||||||
|
DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindListItem is a NodeKind of the ListItem node.
|
||||||
|
var KindListItem = NewNodeKind("ListItem")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *ListItem) Kind() NodeKind {
|
||||||
|
return KindListItem
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewListItem returns a new ListItem node.
|
||||||
|
func NewListItem(offset int) *ListItem {
|
||||||
|
return &ListItem{
|
||||||
|
BaseBlock: BaseBlock{},
|
||||||
|
Offset: offset,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTMLBlockType represents kinds of an html blocks.
|
||||||
|
// See https://spec.commonmark.org/0.29/#html-blocks
|
||||||
|
type HTMLBlockType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// HTMLBlockType1 represents type 1 html blocks
|
||||||
|
HTMLBlockType1 HTMLBlockType = iota + 1
|
||||||
|
// HTMLBlockType2 represents type 2 html blocks
|
||||||
|
HTMLBlockType2
|
||||||
|
// HTMLBlockType3 represents type 3 html blocks
|
||||||
|
HTMLBlockType3
|
||||||
|
// HTMLBlockType4 represents type 4 html blocks
|
||||||
|
HTMLBlockType4
|
||||||
|
// HTMLBlockType5 represents type 5 html blocks
|
||||||
|
HTMLBlockType5
|
||||||
|
// HTMLBlockType6 represents type 6 html blocks
|
||||||
|
HTMLBlockType6
|
||||||
|
// HTMLBlockType7 represents type 7 html blocks
|
||||||
|
HTMLBlockType7
|
||||||
|
)
|
||||||
|
|
||||||
|
// An HTMLBlock struct represents an html block of Markdown text.
|
||||||
|
type HTMLBlock struct {
|
||||||
|
BaseBlock
|
||||||
|
|
||||||
|
// Type is a type of this html block.
|
||||||
|
HTMLBlockType HTMLBlockType
|
||||||
|
|
||||||
|
// ClosureLine is a line that closes this html block.
|
||||||
|
ClosureLine textm.Segment
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRaw implements Node.IsRaw.
|
||||||
|
func (n *HTMLBlock) IsRaw() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasClosure returns true if this html block has a closure line,
|
||||||
|
// otherwise false.
|
||||||
|
func (n *HTMLBlock) HasClosure() bool {
|
||||||
|
return n.ClosureLine.Start >= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *HTMLBlock) Dump(source []byte, level int) {
|
||||||
|
indent := strings.Repeat(" ", level)
|
||||||
|
fmt.Printf("%s%s {\n", indent, "HTMLBlock")
|
||||||
|
indent2 := strings.Repeat(" ", level+1)
|
||||||
|
fmt.Printf("%sRawText: \"", indent2)
|
||||||
|
for i := 0; i < n.Lines().Len(); i++ {
|
||||||
|
s := n.Lines().At(i)
|
||||||
|
fmt.Print(string(source[s.Start:s.Stop]))
|
||||||
|
}
|
||||||
|
fmt.Printf("\"\n")
|
||||||
|
for c := n.FirstChild(); c != nil; c = c.NextSibling() {
|
||||||
|
c.Dump(source, level+1)
|
||||||
|
}
|
||||||
|
if n.HasClosure() {
|
||||||
|
cl := n.ClosureLine
|
||||||
|
fmt.Printf("%sClosure: \"%s\"\n", indent2, string(cl.Value(source)))
|
||||||
|
}
|
||||||
|
fmt.Printf("%s}\n", indent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindHTMLBlock is a NodeKind of the HTMLBlock node.
|
||||||
|
var KindHTMLBlock = NewNodeKind("HTMLBlock")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *HTMLBlock) Kind() NodeKind {
|
||||||
|
return KindHTMLBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHTMLBlock returns a new HTMLBlock node.
|
||||||
|
func NewHTMLBlock(typ HTMLBlockType) *HTMLBlock {
|
||||||
|
return &HTMLBlock{
|
||||||
|
BaseBlock: BaseBlock{},
|
||||||
|
HTMLBlockType: typ,
|
||||||
|
ClosureLine: textm.NewSegment(-1, -1),
|
||||||
|
}
|
||||||
|
}
|
548
vendor/github.com/yuin/goldmark/ast/inline.go
generated
vendored
Normal file
548
vendor/github.com/yuin/goldmark/ast/inline.go
generated
vendored
Normal file
|
@ -0,0 +1,548 @@
|
||||||
|
package ast
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
textm "github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A BaseInline struct implements the Node interface.
|
||||||
|
type BaseInline struct {
|
||||||
|
BaseNode
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type implements Node.Type
|
||||||
|
func (b *BaseInline) Type() NodeType {
|
||||||
|
return TypeInline
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRaw implements Node.IsRaw
|
||||||
|
func (b *BaseInline) IsRaw() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasBlankPreviousLines implements Node.HasBlankPreviousLines.
|
||||||
|
func (b *BaseInline) HasBlankPreviousLines() bool {
|
||||||
|
panic("can not call with inline nodes.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBlankPreviousLines implements Node.SetBlankPreviousLines.
|
||||||
|
func (b *BaseInline) SetBlankPreviousLines(v bool) {
|
||||||
|
panic("can not call with inline nodes.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lines implements Node.Lines
|
||||||
|
func (b *BaseInline) Lines() *textm.Segments {
|
||||||
|
panic("can not call with inline nodes.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLines implements Node.SetLines
|
||||||
|
func (b *BaseInline) SetLines(v *textm.Segments) {
|
||||||
|
panic("can not call with inline nodes.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Text struct represents a textual content of the Markdown text.
|
||||||
|
type Text struct {
|
||||||
|
BaseInline
|
||||||
|
// Segment is a position in a source text.
|
||||||
|
Segment textm.Segment
|
||||||
|
|
||||||
|
flags uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
textSoftLineBreak = 1 << iota
|
||||||
|
textHardLineBreak
|
||||||
|
textRaw
|
||||||
|
textCode
|
||||||
|
)
|
||||||
|
|
||||||
|
func textFlagsString(flags uint8) string {
|
||||||
|
buf := []string{}
|
||||||
|
if flags&textSoftLineBreak != 0 {
|
||||||
|
buf = append(buf, "SoftLineBreak")
|
||||||
|
}
|
||||||
|
if flags&textHardLineBreak != 0 {
|
||||||
|
buf = append(buf, "HardLineBreak")
|
||||||
|
}
|
||||||
|
if flags&textRaw != 0 {
|
||||||
|
buf = append(buf, "Raw")
|
||||||
|
}
|
||||||
|
if flags&textCode != 0 {
|
||||||
|
buf = append(buf, "Code")
|
||||||
|
}
|
||||||
|
return strings.Join(buf, ", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inline implements Inline.Inline.
|
||||||
|
func (n *Text) Inline() {
|
||||||
|
}
|
||||||
|
|
||||||
|
// SoftLineBreak returns true if this node ends with a new line,
|
||||||
|
// otherwise false.
|
||||||
|
func (n *Text) SoftLineBreak() bool {
|
||||||
|
return n.flags&textSoftLineBreak != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSoftLineBreak sets whether this node ends with a new line.
|
||||||
|
func (n *Text) SetSoftLineBreak(v bool) {
|
||||||
|
if v {
|
||||||
|
n.flags |= textSoftLineBreak
|
||||||
|
} else {
|
||||||
|
n.flags = n.flags &^ textHardLineBreak
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRaw returns true if this text should be rendered without unescaping
|
||||||
|
// back slash escapes and resolving references.
|
||||||
|
func (n *Text) IsRaw() bool {
|
||||||
|
return n.flags&textRaw != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRaw sets whether this text should be rendered as raw contents.
|
||||||
|
func (n *Text) SetRaw(v bool) {
|
||||||
|
if v {
|
||||||
|
n.flags |= textRaw
|
||||||
|
} else {
|
||||||
|
n.flags = n.flags &^ textRaw
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HardLineBreak returns true if this node ends with a hard line break.
|
||||||
|
// See https://spec.commonmark.org/0.29/#hard-line-breaks for details.
|
||||||
|
func (n *Text) HardLineBreak() bool {
|
||||||
|
return n.flags&textHardLineBreak != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetHardLineBreak sets whether this node ends with a hard line break.
|
||||||
|
func (n *Text) SetHardLineBreak(v bool) {
|
||||||
|
if v {
|
||||||
|
n.flags |= textHardLineBreak
|
||||||
|
} else {
|
||||||
|
n.flags = n.flags &^ textHardLineBreak
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge merges a Node n into this node.
|
||||||
|
// Merge returns true if the given node has been merged, otherwise false.
|
||||||
|
func (n *Text) Merge(node Node, source []byte) bool {
|
||||||
|
t, ok := node.(*Text)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if n.Segment.Stop != t.Segment.Start || t.Segment.Padding != 0 || source[n.Segment.Stop-1] == '\n' || t.IsRaw() != n.IsRaw() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
n.Segment.Stop = t.Segment.Stop
|
||||||
|
n.SetSoftLineBreak(t.SoftLineBreak())
|
||||||
|
n.SetHardLineBreak(t.HardLineBreak())
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Text implements Node.Text.
|
||||||
|
func (n *Text) Text(source []byte) []byte {
|
||||||
|
return n.Segment.Value(source)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *Text) Dump(source []byte, level int) {
|
||||||
|
fs := textFlagsString(n.flags)
|
||||||
|
if len(fs) != 0 {
|
||||||
|
fs = "(" + fs + ")"
|
||||||
|
}
|
||||||
|
fmt.Printf("%sText%s: \"%s\"\n", strings.Repeat(" ", level), fs, strings.TrimRight(string(n.Text(source)), "\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindText is a NodeKind of the Text node.
|
||||||
|
var KindText = NewNodeKind("Text")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *Text) Kind() NodeKind {
|
||||||
|
return KindText
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewText returns a new Text node.
|
||||||
|
func NewText() *Text {
|
||||||
|
return &Text{
|
||||||
|
BaseInline: BaseInline{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTextSegment returns a new Text node with the given source potision.
|
||||||
|
func NewTextSegment(v textm.Segment) *Text {
|
||||||
|
return &Text{
|
||||||
|
BaseInline: BaseInline{},
|
||||||
|
Segment: v,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRawTextSegment returns a new Text node with the given source position.
|
||||||
|
// The new node should be rendered as raw contents.
|
||||||
|
func NewRawTextSegment(v textm.Segment) *Text {
|
||||||
|
t := &Text{
|
||||||
|
BaseInline: BaseInline{},
|
||||||
|
Segment: v,
|
||||||
|
}
|
||||||
|
t.SetRaw(true)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeOrAppendTextSegment merges a given s into the last child of the parent if
|
||||||
|
// it can be merged, otherwise creates a new Text node and appends it to after current
|
||||||
|
// last child.
|
||||||
|
func MergeOrAppendTextSegment(parent Node, s textm.Segment) {
|
||||||
|
last := parent.LastChild()
|
||||||
|
t, ok := last.(*Text)
|
||||||
|
if ok && t.Segment.Stop == s.Start && !t.SoftLineBreak() {
|
||||||
|
t.Segment = t.Segment.WithStop(s.Stop)
|
||||||
|
} else {
|
||||||
|
parent.AppendChild(parent, NewTextSegment(s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeOrReplaceTextSegment merges a given s into a previous sibling of the node n
|
||||||
|
// if a previous sibling of the node n is *Text, otherwise replaces Node n with s.
|
||||||
|
func MergeOrReplaceTextSegment(parent Node, n Node, s textm.Segment) {
|
||||||
|
prev := n.PreviousSibling()
|
||||||
|
if t, ok := prev.(*Text); ok && t.Segment.Stop == s.Start && !t.SoftLineBreak() {
|
||||||
|
t.Segment = t.Segment.WithStop(s.Stop)
|
||||||
|
parent.RemoveChild(parent, n)
|
||||||
|
} else {
|
||||||
|
parent.ReplaceChild(parent, n, NewTextSegment(s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A String struct is a textual content that has a concrete value
|
||||||
|
type String struct {
|
||||||
|
BaseInline
|
||||||
|
|
||||||
|
Value []byte
|
||||||
|
flags uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inline implements Inline.Inline.
|
||||||
|
func (n *String) Inline() {
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRaw returns true if this text should be rendered without unescaping
|
||||||
|
// back slash escapes and resolving references.
|
||||||
|
func (n *String) IsRaw() bool {
|
||||||
|
return n.flags&textRaw != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRaw sets whether this text should be rendered as raw contents.
|
||||||
|
func (n *String) SetRaw(v bool) {
|
||||||
|
if v {
|
||||||
|
n.flags |= textRaw
|
||||||
|
} else {
|
||||||
|
n.flags = n.flags &^ textRaw
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCode returns true if this text should be rendered without any
|
||||||
|
// modifications.
|
||||||
|
func (n *String) IsCode() bool {
|
||||||
|
return n.flags&textCode != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCode sets whether this text should be rendered without any modifications.
|
||||||
|
func (n *String) SetCode(v bool) {
|
||||||
|
if v {
|
||||||
|
n.flags |= textCode
|
||||||
|
} else {
|
||||||
|
n.flags = n.flags &^ textCode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Text implements Node.Text.
|
||||||
|
func (n *String) Text(source []byte) []byte {
|
||||||
|
return n.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *String) Dump(source []byte, level int) {
|
||||||
|
fs := textFlagsString(n.flags)
|
||||||
|
if len(fs) != 0 {
|
||||||
|
fs = "(" + fs + ")"
|
||||||
|
}
|
||||||
|
fmt.Printf("%sString%s: \"%s\"\n", strings.Repeat(" ", level), fs, strings.TrimRight(string(n.Value), "\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindString is a NodeKind of the String node.
|
||||||
|
var KindString = NewNodeKind("String")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *String) Kind() NodeKind {
|
||||||
|
return KindString
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewString returns a new String node.
|
||||||
|
func NewString(v []byte) *String {
|
||||||
|
return &String{
|
||||||
|
Value: v,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A CodeSpan struct represents a code span of Markdown text.
|
||||||
|
type CodeSpan struct {
|
||||||
|
BaseInline
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inline implements Inline.Inline .
|
||||||
|
func (n *CodeSpan) Inline() {
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsBlank returns true if this node consists of spaces, otherwise false.
|
||||||
|
func (n *CodeSpan) IsBlank(source []byte) bool {
|
||||||
|
for c := n.FirstChild(); c != nil; c = c.NextSibling() {
|
||||||
|
text := c.(*Text).Segment
|
||||||
|
if !util.IsBlank(text.Value(source)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump
|
||||||
|
func (n *CodeSpan) Dump(source []byte, level int) {
|
||||||
|
DumpHelper(n, source, level, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindCodeSpan is a NodeKind of the CodeSpan node.
|
||||||
|
var KindCodeSpan = NewNodeKind("CodeSpan")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *CodeSpan) Kind() NodeKind {
|
||||||
|
return KindCodeSpan
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCodeSpan returns a new CodeSpan node.
|
||||||
|
func NewCodeSpan() *CodeSpan {
|
||||||
|
return &CodeSpan{
|
||||||
|
BaseInline: BaseInline{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Emphasis struct represents an emphasis of Markdown text.
|
||||||
|
type Emphasis struct {
|
||||||
|
BaseInline
|
||||||
|
|
||||||
|
// Level is a level of the emphasis.
|
||||||
|
Level int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *Emphasis) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{
|
||||||
|
"Level": fmt.Sprintf("%v", n.Level),
|
||||||
|
}
|
||||||
|
DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindEmphasis is a NodeKind of the Emphasis node.
|
||||||
|
var KindEmphasis = NewNodeKind("Emphasis")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *Emphasis) Kind() NodeKind {
|
||||||
|
return KindEmphasis
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEmphasis returns a new Emphasis node with the given level.
|
||||||
|
func NewEmphasis(level int) *Emphasis {
|
||||||
|
return &Emphasis{
|
||||||
|
BaseInline: BaseInline{},
|
||||||
|
Level: level,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type baseLink struct {
|
||||||
|
BaseInline
|
||||||
|
|
||||||
|
// Destination is a destination(URL) of this link.
|
||||||
|
Destination []byte
|
||||||
|
|
||||||
|
// Title is a title of this link.
|
||||||
|
Title []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inline implements Inline.Inline.
|
||||||
|
func (n *baseLink) Inline() {
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Link struct represents a link of the Markdown text.
|
||||||
|
type Link struct {
|
||||||
|
baseLink
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *Link) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{}
|
||||||
|
m["Destination"] = string(n.Destination)
|
||||||
|
m["Title"] = string(n.Title)
|
||||||
|
DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindLink is a NodeKind of the Link node.
|
||||||
|
var KindLink = NewNodeKind("Link")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *Link) Kind() NodeKind {
|
||||||
|
return KindLink
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLink returns a new Link node.
|
||||||
|
func NewLink() *Link {
|
||||||
|
c := &Link{
|
||||||
|
baseLink: baseLink{
|
||||||
|
BaseInline: BaseInline{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Image struct represents an image of the Markdown text.
|
||||||
|
type Image struct {
|
||||||
|
baseLink
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *Image) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{}
|
||||||
|
m["Destination"] = string(n.Destination)
|
||||||
|
m["Title"] = string(n.Title)
|
||||||
|
DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindImage is a NodeKind of the Image node.
|
||||||
|
var KindImage = NewNodeKind("Image")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *Image) Kind() NodeKind {
|
||||||
|
return KindImage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewImage returns a new Image node.
|
||||||
|
func NewImage(link *Link) *Image {
|
||||||
|
c := &Image{
|
||||||
|
baseLink: baseLink{
|
||||||
|
BaseInline: BaseInline{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
c.Destination = link.Destination
|
||||||
|
c.Title = link.Title
|
||||||
|
for n := link.FirstChild(); n != nil; {
|
||||||
|
next := n.NextSibling()
|
||||||
|
link.RemoveChild(link, n)
|
||||||
|
c.AppendChild(c, n)
|
||||||
|
n = next
|
||||||
|
}
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoLinkType defines kind of auto links.
|
||||||
|
type AutoLinkType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// AutoLinkEmail indicates that an autolink is an email address.
|
||||||
|
AutoLinkEmail AutoLinkType = iota + 1
|
||||||
|
// AutoLinkURL indicates that an autolink is a generic URL.
|
||||||
|
AutoLinkURL
|
||||||
|
)
|
||||||
|
|
||||||
|
// An AutoLink struct represents an autolink of the Markdown text.
|
||||||
|
type AutoLink struct {
|
||||||
|
BaseInline
|
||||||
|
// Type is a type of this autolink.
|
||||||
|
AutoLinkType AutoLinkType
|
||||||
|
|
||||||
|
// Protocol specified a protocol of the link.
|
||||||
|
Protocol []byte
|
||||||
|
|
||||||
|
value *Text
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inline implements Inline.Inline.
|
||||||
|
func (n *AutoLink) Inline() {}
|
||||||
|
|
||||||
|
// Dump implenets Node.Dump
|
||||||
|
func (n *AutoLink) Dump(source []byte, level int) {
|
||||||
|
segment := n.value.Segment
|
||||||
|
m := map[string]string{
|
||||||
|
"Value": string(segment.Value(source)),
|
||||||
|
}
|
||||||
|
DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindAutoLink is a NodeKind of the AutoLink node.
|
||||||
|
var KindAutoLink = NewNodeKind("AutoLink")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *AutoLink) Kind() NodeKind {
|
||||||
|
return KindAutoLink
|
||||||
|
}
|
||||||
|
|
||||||
|
// URL returns an url of this node.
|
||||||
|
func (n *AutoLink) URL(source []byte) []byte {
|
||||||
|
if n.Protocol != nil {
|
||||||
|
s := n.value.Segment
|
||||||
|
ret := make([]byte, 0, len(n.Protocol)+s.Len()+3)
|
||||||
|
ret = append(ret, n.Protocol...)
|
||||||
|
ret = append(ret, ':', '/', '/')
|
||||||
|
ret = append(ret, n.value.Text(source)...)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
return n.value.Text(source)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Label returns a label of this node.
|
||||||
|
func (n *AutoLink) Label(source []byte) []byte {
|
||||||
|
return n.value.Text(source)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAutoLink returns a new AutoLink node.
|
||||||
|
func NewAutoLink(typ AutoLinkType, value *Text) *AutoLink {
|
||||||
|
return &AutoLink{
|
||||||
|
BaseInline: BaseInline{},
|
||||||
|
value: value,
|
||||||
|
AutoLinkType: typ,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A RawHTML struct represents an inline raw HTML of the Markdown text.
|
||||||
|
type RawHTML struct {
|
||||||
|
BaseInline
|
||||||
|
Segments *textm.Segments
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inline implements Inline.Inline.
|
||||||
|
func (n *RawHTML) Inline() {}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *RawHTML) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{}
|
||||||
|
t := []string{}
|
||||||
|
for i := 0; i < n.Segments.Len(); i++ {
|
||||||
|
segment := n.Segments.At(i)
|
||||||
|
t = append(t, string(segment.Value(source)))
|
||||||
|
}
|
||||||
|
m["RawText"] = strings.Join(t, "")
|
||||||
|
DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindRawHTML is a NodeKind of the RawHTML node.
|
||||||
|
var KindRawHTML = NewNodeKind("RawHTML")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *RawHTML) Kind() NodeKind {
|
||||||
|
return KindRawHTML
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRawHTML returns a new RawHTML node.
|
||||||
|
func NewRawHTML() *RawHTML {
|
||||||
|
return &RawHTML{
|
||||||
|
Segments: textm.NewSegments(),
|
||||||
|
}
|
||||||
|
}
|
83
vendor/github.com/yuin/goldmark/extension/ast/definition_list.go
generated
vendored
Normal file
83
vendor/github.com/yuin/goldmark/extension/ast/definition_list.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
package ast
|
||||||
|
|
||||||
|
import (
|
||||||
|
gast "github.com/yuin/goldmark/ast"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A DefinitionList struct represents a definition list of Markdown
|
||||||
|
// (PHPMarkdownExtra) text.
|
||||||
|
type DefinitionList struct {
|
||||||
|
gast.BaseBlock
|
||||||
|
Offset int
|
||||||
|
TemporaryParagraph *gast.Paragraph
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *DefinitionList) Dump(source []byte, level int) {
|
||||||
|
gast.DumpHelper(n, source, level, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindDefinitionList is a NodeKind of the DefinitionList node.
|
||||||
|
var KindDefinitionList = gast.NewNodeKind("DefinitionList")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *DefinitionList) Kind() gast.NodeKind {
|
||||||
|
return KindDefinitionList
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDefinitionList returns a new DefinitionList node.
|
||||||
|
func NewDefinitionList(offset int, para *gast.Paragraph) *DefinitionList {
|
||||||
|
return &DefinitionList{
|
||||||
|
Offset: offset,
|
||||||
|
TemporaryParagraph: para,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A DefinitionTerm struct represents a definition list term of Markdown
|
||||||
|
// (PHPMarkdownExtra) text.
|
||||||
|
type DefinitionTerm struct {
|
||||||
|
gast.BaseBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *DefinitionTerm) Dump(source []byte, level int) {
|
||||||
|
gast.DumpHelper(n, source, level, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindDefinitionTerm is a NodeKind of the DefinitionTerm node.
|
||||||
|
var KindDefinitionTerm = gast.NewNodeKind("DefinitionTerm")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *DefinitionTerm) Kind() gast.NodeKind {
|
||||||
|
return KindDefinitionTerm
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDefinitionTerm returns a new DefinitionTerm node.
|
||||||
|
func NewDefinitionTerm() *DefinitionTerm {
|
||||||
|
return &DefinitionTerm{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A DefinitionDescription struct represents a definition list description of Markdown
|
||||||
|
// (PHPMarkdownExtra) text.
|
||||||
|
type DefinitionDescription struct {
|
||||||
|
gast.BaseBlock
|
||||||
|
IsTight bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *DefinitionDescription) Dump(source []byte, level int) {
|
||||||
|
gast.DumpHelper(n, source, level, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindDefinitionDescription is a NodeKind of the DefinitionDescription node.
|
||||||
|
var KindDefinitionDescription = gast.NewNodeKind("DefinitionDescription")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *DefinitionDescription) Kind() gast.NodeKind {
|
||||||
|
return KindDefinitionDescription
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDefinitionDescription returns a new DefinitionDescription node.
|
||||||
|
func NewDefinitionDescription() *DefinitionDescription {
|
||||||
|
return &DefinitionDescription{}
|
||||||
|
}
|
125
vendor/github.com/yuin/goldmark/extension/ast/footnote.go
generated
vendored
Normal file
125
vendor/github.com/yuin/goldmark/extension/ast/footnote.go
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
||||||
|
package ast
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
gast "github.com/yuin/goldmark/ast"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A FootnoteLink struct represents a link to a footnote of Markdown
|
||||||
|
// (PHP Markdown Extra) text.
|
||||||
|
type FootnoteLink struct {
|
||||||
|
gast.BaseInline
|
||||||
|
Index int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *FootnoteLink) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{}
|
||||||
|
m["Index"] = fmt.Sprintf("%v", n.Index)
|
||||||
|
gast.DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindFootnoteLink is a NodeKind of the FootnoteLink node.
|
||||||
|
var KindFootnoteLink = gast.NewNodeKind("FootnoteLink")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *FootnoteLink) Kind() gast.NodeKind {
|
||||||
|
return KindFootnoteLink
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFootnoteLink returns a new FootnoteLink node.
|
||||||
|
func NewFootnoteLink(index int) *FootnoteLink {
|
||||||
|
return &FootnoteLink{
|
||||||
|
Index: index,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A FootnoteBackLink struct represents a link to a footnote of Markdown
|
||||||
|
// (PHP Markdown Extra) text.
|
||||||
|
type FootnoteBackLink struct {
|
||||||
|
gast.BaseInline
|
||||||
|
Index int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *FootnoteBackLink) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{}
|
||||||
|
m["Index"] = fmt.Sprintf("%v", n.Index)
|
||||||
|
gast.DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindFootnoteBackLink is a NodeKind of the FootnoteBackLink node.
|
||||||
|
var KindFootnoteBackLink = gast.NewNodeKind("FootnoteBackLink")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *FootnoteBackLink) Kind() gast.NodeKind {
|
||||||
|
return KindFootnoteBackLink
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFootnoteBackLink returns a new FootnoteBackLink node.
|
||||||
|
func NewFootnoteBackLink(index int) *FootnoteBackLink {
|
||||||
|
return &FootnoteBackLink{
|
||||||
|
Index: index,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Footnote struct represents a footnote of Markdown
|
||||||
|
// (PHP Markdown Extra) text.
|
||||||
|
type Footnote struct {
|
||||||
|
gast.BaseBlock
|
||||||
|
Ref []byte
|
||||||
|
Index int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *Footnote) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{}
|
||||||
|
m["Index"] = fmt.Sprintf("%v", n.Index)
|
||||||
|
m["Ref"] = fmt.Sprintf("%s", n.Ref)
|
||||||
|
gast.DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindFootnote is a NodeKind of the Footnote node.
|
||||||
|
var KindFootnote = gast.NewNodeKind("Footnote")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *Footnote) Kind() gast.NodeKind {
|
||||||
|
return KindFootnote
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFootnote returns a new Footnote node.
|
||||||
|
func NewFootnote(ref []byte) *Footnote {
|
||||||
|
return &Footnote{
|
||||||
|
Ref: ref,
|
||||||
|
Index: -1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A FootnoteList struct represents footnotes of Markdown
|
||||||
|
// (PHP Markdown Extra) text.
|
||||||
|
type FootnoteList struct {
|
||||||
|
gast.BaseBlock
|
||||||
|
Count int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *FootnoteList) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{}
|
||||||
|
m["Count"] = fmt.Sprintf("%v", n.Count)
|
||||||
|
gast.DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindFootnoteList is a NodeKind of the FootnoteList node.
|
||||||
|
var KindFootnoteList = gast.NewNodeKind("FootnoteList")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *FootnoteList) Kind() gast.NodeKind {
|
||||||
|
return KindFootnoteList
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFootnoteList returns a new FootnoteList node.
|
||||||
|
func NewFootnoteList() *FootnoteList {
|
||||||
|
return &FootnoteList{
|
||||||
|
Count: 0,
|
||||||
|
}
|
||||||
|
}
|
29
vendor/github.com/yuin/goldmark/extension/ast/strikethrough.go
generated
vendored
Normal file
29
vendor/github.com/yuin/goldmark/extension/ast/strikethrough.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
// Package ast defines AST nodes that represents extension's elements
|
||||||
|
package ast
|
||||||
|
|
||||||
|
import (
|
||||||
|
gast "github.com/yuin/goldmark/ast"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Strikethrough struct represents a strikethrough of GFM text.
|
||||||
|
type Strikethrough struct {
|
||||||
|
gast.BaseInline
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *Strikethrough) Dump(source []byte, level int) {
|
||||||
|
gast.DumpHelper(n, source, level, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindStrikethrough is a NodeKind of the Strikethrough node.
|
||||||
|
var KindStrikethrough = gast.NewNodeKind("Strikethrough")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *Strikethrough) Kind() gast.NodeKind {
|
||||||
|
return KindStrikethrough
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStrikethrough returns a new Strikethrough node.
|
||||||
|
func NewStrikethrough() *Strikethrough {
|
||||||
|
return &Strikethrough{}
|
||||||
|
}
|
157
vendor/github.com/yuin/goldmark/extension/ast/table.go
generated
vendored
Normal file
157
vendor/github.com/yuin/goldmark/extension/ast/table.go
generated
vendored
Normal file
|
@ -0,0 +1,157 @@
|
||||||
|
package ast
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
gast "github.com/yuin/goldmark/ast"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Alignment is a text alignment of table cells.
|
||||||
|
type Alignment int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// AlignLeft indicates text should be left justified.
|
||||||
|
AlignLeft Alignment = iota + 1
|
||||||
|
|
||||||
|
// AlignRight indicates text should be right justified.
|
||||||
|
AlignRight
|
||||||
|
|
||||||
|
// AlignCenter indicates text should be centered.
|
||||||
|
AlignCenter
|
||||||
|
|
||||||
|
// AlignNone indicates text should be aligned by default manner.
|
||||||
|
AlignNone
|
||||||
|
)
|
||||||
|
|
||||||
|
func (a Alignment) String() string {
|
||||||
|
switch a {
|
||||||
|
case AlignLeft:
|
||||||
|
return "left"
|
||||||
|
case AlignRight:
|
||||||
|
return "right"
|
||||||
|
case AlignCenter:
|
||||||
|
return "center"
|
||||||
|
case AlignNone:
|
||||||
|
return "none"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Table struct represents a table of Markdown(GFM) text.
|
||||||
|
type Table struct {
|
||||||
|
gast.BaseBlock
|
||||||
|
|
||||||
|
// Alignments returns alignments of the columns.
|
||||||
|
Alignments []Alignment
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump
|
||||||
|
func (n *Table) Dump(source []byte, level int) {
|
||||||
|
gast.DumpHelper(n, source, level, nil, func(level int) {
|
||||||
|
indent := strings.Repeat(" ", level)
|
||||||
|
fmt.Printf("%sAlignments {\n", indent)
|
||||||
|
for i, alignment := range n.Alignments {
|
||||||
|
indent2 := strings.Repeat(" ", level+1)
|
||||||
|
fmt.Printf("%s%s", indent2, alignment.String())
|
||||||
|
if i != len(n.Alignments)-1 {
|
||||||
|
fmt.Println("")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Printf("\n%s}\n", indent)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindTable is a NodeKind of the Table node.
|
||||||
|
var KindTable = gast.NewNodeKind("Table")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *Table) Kind() gast.NodeKind {
|
||||||
|
return KindTable
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTable returns a new Table node.
|
||||||
|
func NewTable() *Table {
|
||||||
|
return &Table{
|
||||||
|
Alignments: []Alignment{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A TableRow struct represents a table row of Markdown(GFM) text.
|
||||||
|
type TableRow struct {
|
||||||
|
gast.BaseBlock
|
||||||
|
Alignments []Alignment
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *TableRow) Dump(source []byte, level int) {
|
||||||
|
gast.DumpHelper(n, source, level, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindTableRow is a NodeKind of the TableRow node.
|
||||||
|
var KindTableRow = gast.NewNodeKind("TableRow")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *TableRow) Kind() gast.NodeKind {
|
||||||
|
return KindTableRow
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTableRow returns a new TableRow node.
|
||||||
|
func NewTableRow(alignments []Alignment) *TableRow {
|
||||||
|
return &TableRow{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A TableHeader struct represents a table header of Markdown(GFM) text.
|
||||||
|
type TableHeader struct {
|
||||||
|
gast.BaseBlock
|
||||||
|
Alignments []Alignment
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindTableHeader is a NodeKind of the TableHeader node.
|
||||||
|
var KindTableHeader = gast.NewNodeKind("TableHeader")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *TableHeader) Kind() gast.NodeKind {
|
||||||
|
return KindTableHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *TableHeader) Dump(source []byte, level int) {
|
||||||
|
gast.DumpHelper(n, source, level, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTableHeader returns a new TableHeader node.
|
||||||
|
func NewTableHeader(row *TableRow) *TableHeader {
|
||||||
|
n := &TableHeader{}
|
||||||
|
for c := row.FirstChild(); c != nil; {
|
||||||
|
next := c.NextSibling()
|
||||||
|
n.AppendChild(n, c)
|
||||||
|
c = next
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// A TableCell struct represents a table cell of a Markdown(GFM) text.
|
||||||
|
type TableCell struct {
|
||||||
|
gast.BaseBlock
|
||||||
|
Alignment Alignment
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (n *TableCell) Dump(source []byte, level int) {
|
||||||
|
gast.DumpHelper(n, source, level, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindTableCell is a NodeKind of the TableCell node.
|
||||||
|
var KindTableCell = gast.NewNodeKind("TableCell")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *TableCell) Kind() gast.NodeKind {
|
||||||
|
return KindTableCell
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTableCell returns a new TableCell node.
|
||||||
|
func NewTableCell() *TableCell {
|
||||||
|
return &TableCell{
|
||||||
|
Alignment: AlignNone,
|
||||||
|
}
|
||||||
|
}
|
35
vendor/github.com/yuin/goldmark/extension/ast/tasklist.go
generated
vendored
Normal file
35
vendor/github.com/yuin/goldmark/extension/ast/tasklist.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
package ast
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
gast "github.com/yuin/goldmark/ast"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A TaskCheckBox struct represents a checkbox of a task list.
|
||||||
|
type TaskCheckBox struct {
|
||||||
|
gast.BaseInline
|
||||||
|
IsChecked bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump impelemtns Node.Dump.
|
||||||
|
func (n *TaskCheckBox) Dump(source []byte, level int) {
|
||||||
|
m := map[string]string{
|
||||||
|
"Checked": fmt.Sprintf("%v", n.IsChecked),
|
||||||
|
}
|
||||||
|
gast.DumpHelper(n, source, level, m, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KindTaskCheckBox is a NodeKind of the TaskCheckBox node.
|
||||||
|
var KindTaskCheckBox = gast.NewNodeKind("TaskCheckBox")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind.
|
||||||
|
func (n *TaskCheckBox) Kind() gast.NodeKind {
|
||||||
|
return KindTaskCheckBox
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTaskCheckBox returns a new TaskCheckBox node.
|
||||||
|
func NewTaskCheckBox(checked bool) *TaskCheckBox {
|
||||||
|
return &TaskCheckBox{
|
||||||
|
IsChecked: checked,
|
||||||
|
}
|
||||||
|
}
|
270
vendor/github.com/yuin/goldmark/extension/definition_list.go
generated
vendored
Normal file
270
vendor/github.com/yuin/goldmark/extension/definition_list.go
generated
vendored
Normal file
|
@ -0,0 +1,270 @@
|
||||||
|
package extension
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark"
|
||||||
|
gast "github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/extension/ast"
|
||||||
|
"github.com/yuin/goldmark/parser"
|
||||||
|
"github.com/yuin/goldmark/renderer"
|
||||||
|
"github.com/yuin/goldmark/renderer/html"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
type definitionListParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultDefinitionListParser = &definitionListParser{}
|
||||||
|
|
||||||
|
// NewDefinitionListParser return a new parser.BlockParser that
|
||||||
|
// can parse PHP Markdown Extra Definition lists.
|
||||||
|
func NewDefinitionListParser() parser.BlockParser {
|
||||||
|
return defaultDefinitionListParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *definitionListParser) Trigger() []byte {
|
||||||
|
return []byte{':'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *definitionListParser) Open(parent gast.Node, reader text.Reader, pc parser.Context) (gast.Node, parser.State) {
|
||||||
|
if _, ok := parent.(*ast.DefinitionList); ok {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
line, _ := reader.PeekLine()
|
||||||
|
pos := pc.BlockOffset()
|
||||||
|
indent := pc.BlockIndent()
|
||||||
|
if pos < 0 || line[pos] != ':' || indent != 0 {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
last := parent.LastChild()
|
||||||
|
// need 1 or more spaces after ':'
|
||||||
|
w, _ := util.IndentWidth(line[pos+1:], pos+1)
|
||||||
|
if w < 1 {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
if w >= 8 { // starts with indented code
|
||||||
|
w = 5
|
||||||
|
}
|
||||||
|
w += pos + 1 /* 1 = ':' */
|
||||||
|
|
||||||
|
para, lastIsParagraph := last.(*gast.Paragraph)
|
||||||
|
var list *ast.DefinitionList
|
||||||
|
status := parser.HasChildren
|
||||||
|
var ok bool
|
||||||
|
if lastIsParagraph {
|
||||||
|
list, ok = last.PreviousSibling().(*ast.DefinitionList)
|
||||||
|
if ok { // is not first item
|
||||||
|
list.Offset = w
|
||||||
|
list.TemporaryParagraph = para
|
||||||
|
} else { // is first item
|
||||||
|
list = ast.NewDefinitionList(w, para)
|
||||||
|
status |= parser.RequireParagraph
|
||||||
|
}
|
||||||
|
} else if list, ok = last.(*ast.DefinitionList); ok { // multiple description
|
||||||
|
list.Offset = w
|
||||||
|
list.TemporaryParagraph = nil
|
||||||
|
} else {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
return list, status
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *definitionListParser) Continue(node gast.Node, reader text.Reader, pc parser.Context) parser.State {
|
||||||
|
line, _ := reader.PeekLine()
|
||||||
|
if util.IsBlank(line) {
|
||||||
|
return parser.Continue | parser.HasChildren
|
||||||
|
}
|
||||||
|
list, _ := node.(*ast.DefinitionList)
|
||||||
|
w, _ := util.IndentWidth(line, reader.LineOffset())
|
||||||
|
if w < list.Offset {
|
||||||
|
return parser.Close
|
||||||
|
}
|
||||||
|
pos, padding := util.IndentPosition(line, reader.LineOffset(), list.Offset)
|
||||||
|
reader.AdvanceAndSetPadding(pos, padding)
|
||||||
|
return parser.Continue | parser.HasChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *definitionListParser) Close(node gast.Node, reader text.Reader, pc parser.Context) {
|
||||||
|
// nothing to do
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *definitionListParser) CanInterruptParagraph() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *definitionListParser) CanAcceptIndentedLine() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type definitionDescriptionParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultDefinitionDescriptionParser = &definitionDescriptionParser{}
|
||||||
|
|
||||||
|
// NewDefinitionDescriptionParser return a new parser.BlockParser that
|
||||||
|
// can parse definition description starts with ':'.
|
||||||
|
func NewDefinitionDescriptionParser() parser.BlockParser {
|
||||||
|
return defaultDefinitionDescriptionParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *definitionDescriptionParser) Trigger() []byte {
|
||||||
|
return []byte{':'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *definitionDescriptionParser) Open(parent gast.Node, reader text.Reader, pc parser.Context) (gast.Node, parser.State) {
|
||||||
|
line, _ := reader.PeekLine()
|
||||||
|
pos := pc.BlockOffset()
|
||||||
|
indent := pc.BlockIndent()
|
||||||
|
if pos < 0 || line[pos] != ':' || indent != 0 {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
list, _ := parent.(*ast.DefinitionList)
|
||||||
|
if list == nil {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
para := list.TemporaryParagraph
|
||||||
|
list.TemporaryParagraph = nil
|
||||||
|
if para != nil {
|
||||||
|
lines := para.Lines()
|
||||||
|
l := lines.Len()
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
term := ast.NewDefinitionTerm()
|
||||||
|
segment := lines.At(i)
|
||||||
|
term.Lines().Append(segment.TrimRightSpace(reader.Source()))
|
||||||
|
list.AppendChild(list, term)
|
||||||
|
}
|
||||||
|
para.Parent().RemoveChild(para.Parent(), para)
|
||||||
|
}
|
||||||
|
cpos, padding := util.IndentPosition(line[pos+1:], pos+1, list.Offset-pos-1)
|
||||||
|
reader.AdvanceAndSetPadding(cpos, padding)
|
||||||
|
|
||||||
|
return ast.NewDefinitionDescription(), parser.HasChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *definitionDescriptionParser) Continue(node gast.Node, reader text.Reader, pc parser.Context) parser.State {
|
||||||
|
// definitionListParser detects end of the description.
|
||||||
|
// so this method will never be called.
|
||||||
|
return parser.Continue | parser.HasChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *definitionDescriptionParser) Close(node gast.Node, reader text.Reader, pc parser.Context) {
|
||||||
|
desc := node.(*ast.DefinitionDescription)
|
||||||
|
desc.IsTight = !desc.HasBlankPreviousLines()
|
||||||
|
if desc.IsTight {
|
||||||
|
for gc := desc.FirstChild(); gc != nil; gc = gc.NextSibling() {
|
||||||
|
paragraph, ok := gc.(*gast.Paragraph)
|
||||||
|
if ok {
|
||||||
|
textBlock := gast.NewTextBlock()
|
||||||
|
textBlock.SetLines(paragraph.Lines())
|
||||||
|
desc.ReplaceChild(desc, paragraph, textBlock)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *definitionDescriptionParser) CanInterruptParagraph() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *definitionDescriptionParser) CanAcceptIndentedLine() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefinitionListHTMLRenderer is a renderer.NodeRenderer implementation that
|
||||||
|
// renders DefinitionList nodes.
|
||||||
|
type DefinitionListHTMLRenderer struct {
|
||||||
|
html.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDefinitionListHTMLRenderer returns a new DefinitionListHTMLRenderer.
|
||||||
|
func NewDefinitionListHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
|
||||||
|
r := &DefinitionListHTMLRenderer{
|
||||||
|
Config: html.NewConfig(),
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.SetHTMLOption(&r.Config)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
|
||||||
|
func (r *DefinitionListHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
||||||
|
reg.Register(ast.KindDefinitionList, r.renderDefinitionList)
|
||||||
|
reg.Register(ast.KindDefinitionTerm, r.renderDefinitionTerm)
|
||||||
|
reg.Register(ast.KindDefinitionDescription, r.renderDefinitionDescription)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefinitionListAttributeFilter defines attribute names which dl elements can have.
|
||||||
|
var DefinitionListAttributeFilter = html.GlobalAttributeFilter
|
||||||
|
|
||||||
|
func (r *DefinitionListHTMLRenderer) renderDefinitionList(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
_, _ = w.WriteString("<dl")
|
||||||
|
html.RenderAttributes(w, n, DefinitionListAttributeFilter)
|
||||||
|
_, _ = w.WriteString(">\n")
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("<dl>\n")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</dl>\n")
|
||||||
|
}
|
||||||
|
return gast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefinitionTermAttributeFilter defines attribute names which dd elements can have.
|
||||||
|
var DefinitionTermAttributeFilter = html.GlobalAttributeFilter
|
||||||
|
|
||||||
|
func (r *DefinitionListHTMLRenderer) renderDefinitionTerm(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
_, _ = w.WriteString("<dt")
|
||||||
|
html.RenderAttributes(w, n, DefinitionTermAttributeFilter)
|
||||||
|
_ = w.WriteByte('>')
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("<dt>")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</dt>\n")
|
||||||
|
}
|
||||||
|
return gast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefinitionDescriptionAttributeFilter defines attribute names which dd elements can have.
|
||||||
|
var DefinitionDescriptionAttributeFilter = html.GlobalAttributeFilter
|
||||||
|
|
||||||
|
func (r *DefinitionListHTMLRenderer) renderDefinitionDescription(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
n := node.(*ast.DefinitionDescription)
|
||||||
|
_, _ = w.WriteString("<dd")
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
html.RenderAttributes(w, n, DefinitionDescriptionAttributeFilter)
|
||||||
|
}
|
||||||
|
if n.IsTight {
|
||||||
|
_, _ = w.WriteString(">")
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString(">\n")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</dd>\n")
|
||||||
|
}
|
||||||
|
return gast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type definitionList struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefinitionList is an extension that allow you to use PHP Markdown Extra Definition lists.
|
||||||
|
var DefinitionList = &definitionList{}
|
||||||
|
|
||||||
|
func (e *definitionList) Extend(m goldmark.Markdown) {
|
||||||
|
m.Parser().AddOptions(parser.WithBlockParsers(
|
||||||
|
util.Prioritized(NewDefinitionListParser(), 101),
|
||||||
|
util.Prioritized(NewDefinitionDescriptionParser(), 102),
|
||||||
|
))
|
||||||
|
m.Renderer().AddOptions(renderer.WithNodeRenderers(
|
||||||
|
util.Prioritized(NewDefinitionListHTMLRenderer(), 500),
|
||||||
|
))
|
||||||
|
}
|
336
vendor/github.com/yuin/goldmark/extension/footnote.go
generated
vendored
Normal file
336
vendor/github.com/yuin/goldmark/extension/footnote.go
generated
vendored
Normal file
|
@ -0,0 +1,336 @@
|
||||||
|
package extension
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"github.com/yuin/goldmark"
|
||||||
|
gast "github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/extension/ast"
|
||||||
|
"github.com/yuin/goldmark/parser"
|
||||||
|
"github.com/yuin/goldmark/renderer"
|
||||||
|
"github.com/yuin/goldmark/renderer/html"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
var footnoteListKey = parser.NewContextKey()
|
||||||
|
|
||||||
|
type footnoteBlockParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultFootnoteBlockParser = &footnoteBlockParser{}
|
||||||
|
|
||||||
|
// NewFootnoteBlockParser returns a new parser.BlockParser that can parse
|
||||||
|
// footnotes of the Markdown(PHP Markdown Extra) text.
|
||||||
|
func NewFootnoteBlockParser() parser.BlockParser {
|
||||||
|
return defaultFootnoteBlockParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *footnoteBlockParser) Trigger() []byte {
|
||||||
|
return []byte{'['}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *footnoteBlockParser) Open(parent gast.Node, reader text.Reader, pc parser.Context) (gast.Node, parser.State) {
|
||||||
|
line, segment := reader.PeekLine()
|
||||||
|
pos := pc.BlockOffset()
|
||||||
|
if pos < 0 || line[pos] != '[' {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
pos++
|
||||||
|
if pos > len(line)-1 || line[pos] != '^' {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
open := pos + 1
|
||||||
|
closes := 0
|
||||||
|
closure := util.FindClosure(line[pos+1:], '[', ']', false, false)
|
||||||
|
closes = pos + 1 + closure
|
||||||
|
next := closes + 1
|
||||||
|
if closure > -1 {
|
||||||
|
if next >= len(line) || line[next] != ':' {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
padding := segment.Padding
|
||||||
|
label := reader.Value(text.NewSegment(segment.Start+open-padding, segment.Start+closes-padding))
|
||||||
|
if util.IsBlank(label) {
|
||||||
|
return nil, parser.NoChildren
|
||||||
|
}
|
||||||
|
item := ast.NewFootnote(label)
|
||||||
|
|
||||||
|
pos = next + 1 - padding
|
||||||
|
if pos >= len(line) {
|
||||||
|
reader.Advance(pos)
|
||||||
|
return item, parser.NoChildren
|
||||||
|
}
|
||||||
|
reader.AdvanceAndSetPadding(pos, padding)
|
||||||
|
return item, parser.HasChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *footnoteBlockParser) Continue(node gast.Node, reader text.Reader, pc parser.Context) parser.State {
|
||||||
|
line, _ := reader.PeekLine()
|
||||||
|
if util.IsBlank(line) {
|
||||||
|
return parser.Continue | parser.HasChildren
|
||||||
|
}
|
||||||
|
childpos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
|
||||||
|
if childpos < 0 {
|
||||||
|
return parser.Close
|
||||||
|
}
|
||||||
|
reader.AdvanceAndSetPadding(childpos, padding)
|
||||||
|
return parser.Continue | parser.HasChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *footnoteBlockParser) Close(node gast.Node, reader text.Reader, pc parser.Context) {
|
||||||
|
var list *ast.FootnoteList
|
||||||
|
if tlist := pc.Get(footnoteListKey); tlist != nil {
|
||||||
|
list = tlist.(*ast.FootnoteList)
|
||||||
|
} else {
|
||||||
|
list = ast.NewFootnoteList()
|
||||||
|
pc.Set(footnoteListKey, list)
|
||||||
|
node.Parent().InsertBefore(node.Parent(), node, list)
|
||||||
|
}
|
||||||
|
node.Parent().RemoveChild(node.Parent(), node)
|
||||||
|
list.AppendChild(list, node)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *footnoteBlockParser) CanInterruptParagraph() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *footnoteBlockParser) CanAcceptIndentedLine() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type footnoteParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultFootnoteParser = &footnoteParser{}
|
||||||
|
|
||||||
|
// NewFootnoteParser returns a new parser.InlineParser that can parse
|
||||||
|
// footnote links of the Markdown(PHP Markdown Extra) text.
|
||||||
|
func NewFootnoteParser() parser.InlineParser {
|
||||||
|
return defaultFootnoteParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *footnoteParser) Trigger() []byte {
|
||||||
|
// footnote syntax probably conflict with the image syntax.
|
||||||
|
// So we need trigger this parser with '!'.
|
||||||
|
return []byte{'!', '['}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *footnoteParser) Parse(parent gast.Node, block text.Reader, pc parser.Context) gast.Node {
|
||||||
|
line, segment := block.PeekLine()
|
||||||
|
pos := 1
|
||||||
|
if len(line) > 0 && line[0] == '!' {
|
||||||
|
pos++
|
||||||
|
}
|
||||||
|
if pos >= len(line) || line[pos] != '^' {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
pos++
|
||||||
|
if pos >= len(line) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
open := pos
|
||||||
|
closure := util.FindClosure(line[pos:], '[', ']', false, false)
|
||||||
|
if closure < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
closes := pos + closure
|
||||||
|
value := block.Value(text.NewSegment(segment.Start+open, segment.Start+closes))
|
||||||
|
block.Advance(closes + 1)
|
||||||
|
|
||||||
|
var list *ast.FootnoteList
|
||||||
|
if tlist := pc.Get(footnoteListKey); tlist != nil {
|
||||||
|
list = tlist.(*ast.FootnoteList)
|
||||||
|
}
|
||||||
|
if list == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
index := 0
|
||||||
|
for def := list.FirstChild(); def != nil; def = def.NextSibling() {
|
||||||
|
d := def.(*ast.Footnote)
|
||||||
|
if bytes.Equal(d.Ref, value) {
|
||||||
|
if d.Index < 0 {
|
||||||
|
list.Count += 1
|
||||||
|
d.Index = list.Count
|
||||||
|
}
|
||||||
|
index = d.Index
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if index == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return ast.NewFootnoteLink(index)
|
||||||
|
}
|
||||||
|
|
||||||
|
type footnoteASTTransformer struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultFootnoteASTTransformer = &footnoteASTTransformer{}
|
||||||
|
|
||||||
|
// NewFootnoteASTTransformer returns a new parser.ASTTransformer that
|
||||||
|
// insert a footnote list to the last of the document.
|
||||||
|
func NewFootnoteASTTransformer() parser.ASTTransformer {
|
||||||
|
return defaultFootnoteASTTransformer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *footnoteASTTransformer) Transform(node *gast.Document, reader text.Reader, pc parser.Context) {
|
||||||
|
var list *ast.FootnoteList
|
||||||
|
if tlist := pc.Get(footnoteListKey); tlist != nil {
|
||||||
|
list = tlist.(*ast.FootnoteList)
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pc.Set(footnoteListKey, nil)
|
||||||
|
for footnote := list.FirstChild(); footnote != nil; {
|
||||||
|
var container gast.Node = footnote
|
||||||
|
next := footnote.NextSibling()
|
||||||
|
if fc := container.LastChild(); fc != nil && gast.IsParagraph(fc) {
|
||||||
|
container = fc
|
||||||
|
}
|
||||||
|
index := footnote.(*ast.Footnote).Index
|
||||||
|
if index < 0 {
|
||||||
|
list.RemoveChild(list, footnote)
|
||||||
|
} else {
|
||||||
|
container.AppendChild(container, ast.NewFootnoteBackLink(index))
|
||||||
|
}
|
||||||
|
footnote = next
|
||||||
|
}
|
||||||
|
list.SortChildren(func(n1, n2 gast.Node) int {
|
||||||
|
if n1.(*ast.Footnote).Index < n2.(*ast.Footnote).Index {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
})
|
||||||
|
if list.Count <= 0 {
|
||||||
|
list.Parent().RemoveChild(list.Parent(), list)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
node.AppendChild(node, list)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FootnoteHTMLRenderer is a renderer.NodeRenderer implementation that
|
||||||
|
// renders FootnoteLink nodes.
|
||||||
|
type FootnoteHTMLRenderer struct {
|
||||||
|
html.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFootnoteHTMLRenderer returns a new FootnoteHTMLRenderer.
|
||||||
|
func NewFootnoteHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
|
||||||
|
r := &FootnoteHTMLRenderer{
|
||||||
|
Config: html.NewConfig(),
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.SetHTMLOption(&r.Config)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
|
||||||
|
func (r *FootnoteHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
||||||
|
reg.Register(ast.KindFootnoteLink, r.renderFootnoteLink)
|
||||||
|
reg.Register(ast.KindFootnoteBackLink, r.renderFootnoteBackLink)
|
||||||
|
reg.Register(ast.KindFootnote, r.renderFootnote)
|
||||||
|
reg.Register(ast.KindFootnoteList, r.renderFootnoteList)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *FootnoteHTMLRenderer) renderFootnoteLink(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
n := node.(*ast.FootnoteLink)
|
||||||
|
is := strconv.Itoa(n.Index)
|
||||||
|
_, _ = w.WriteString(`<sup id="fnref:`)
|
||||||
|
_, _ = w.WriteString(is)
|
||||||
|
_, _ = w.WriteString(`"><a href="#fn:`)
|
||||||
|
_, _ = w.WriteString(is)
|
||||||
|
_, _ = w.WriteString(`" class="footnote-ref" role="doc-noteref">`)
|
||||||
|
_, _ = w.WriteString(is)
|
||||||
|
_, _ = w.WriteString(`</a></sup>`)
|
||||||
|
}
|
||||||
|
return gast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *FootnoteHTMLRenderer) renderFootnoteBackLink(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
n := node.(*ast.FootnoteBackLink)
|
||||||
|
is := strconv.Itoa(n.Index)
|
||||||
|
_, _ = w.WriteString(` <a href="#fnref:`)
|
||||||
|
_, _ = w.WriteString(is)
|
||||||
|
_, _ = w.WriteString(`" class="footnote-backref" role="doc-backlink">`)
|
||||||
|
_, _ = w.WriteString("↩︎")
|
||||||
|
_, _ = w.WriteString(`</a>`)
|
||||||
|
}
|
||||||
|
return gast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *FootnoteHTMLRenderer) renderFootnote(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||||
|
n := node.(*ast.Footnote)
|
||||||
|
is := strconv.Itoa(n.Index)
|
||||||
|
if entering {
|
||||||
|
_, _ = w.WriteString(`<li id="fn:`)
|
||||||
|
_, _ = w.WriteString(is)
|
||||||
|
_, _ = w.WriteString(`" role="doc-endnote"`)
|
||||||
|
if node.Attributes() != nil {
|
||||||
|
html.RenderAttributes(w, node, html.ListItemAttributeFilter)
|
||||||
|
}
|
||||||
|
_, _ = w.WriteString(">\n")
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</li>\n")
|
||||||
|
}
|
||||||
|
return gast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *FootnoteHTMLRenderer) renderFootnoteList(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||||
|
tag := "section"
|
||||||
|
if r.Config.XHTML {
|
||||||
|
tag = "div"
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
_, _ = w.WriteString("<")
|
||||||
|
_, _ = w.WriteString(tag)
|
||||||
|
_, _ = w.WriteString(` class="footnotes" role="doc-endnotes"`)
|
||||||
|
if node.Attributes() != nil {
|
||||||
|
html.RenderAttributes(w, node, html.GlobalAttributeFilter)
|
||||||
|
}
|
||||||
|
_ = w.WriteByte('>')
|
||||||
|
if r.Config.XHTML {
|
||||||
|
_, _ = w.WriteString("\n<hr />\n")
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("\n<hr>\n")
|
||||||
|
}
|
||||||
|
_, _ = w.WriteString("<ol>\n")
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</ol>\n")
|
||||||
|
_, _ = w.WriteString("</")
|
||||||
|
_, _ = w.WriteString(tag)
|
||||||
|
_, _ = w.WriteString(">\n")
|
||||||
|
}
|
||||||
|
return gast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type footnote struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Footnote is an extension that allow you to use PHP Markdown Extra Footnotes.
|
||||||
|
var Footnote = &footnote{}
|
||||||
|
|
||||||
|
func (e *footnote) Extend(m goldmark.Markdown) {
|
||||||
|
m.Parser().AddOptions(
|
||||||
|
parser.WithBlockParsers(
|
||||||
|
util.Prioritized(NewFootnoteBlockParser(), 999),
|
||||||
|
),
|
||||||
|
parser.WithInlineParsers(
|
||||||
|
util.Prioritized(NewFootnoteParser(), 101),
|
||||||
|
),
|
||||||
|
parser.WithASTTransformers(
|
||||||
|
util.Prioritized(NewFootnoteASTTransformer(), 999),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
m.Renderer().AddOptions(renderer.WithNodeRenderers(
|
||||||
|
util.Prioritized(NewFootnoteHTMLRenderer(), 500),
|
||||||
|
))
|
||||||
|
}
|
18
vendor/github.com/yuin/goldmark/extension/gfm.go
generated
vendored
Normal file
18
vendor/github.com/yuin/goldmark/extension/gfm.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
package extension
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark"
|
||||||
|
)
|
||||||
|
|
||||||
|
type gfm struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// GFM is an extension that provides Github Flavored markdown functionalities.
|
||||||
|
var GFM = &gfm{}
|
||||||
|
|
||||||
|
func (e *gfm) Extend(m goldmark.Markdown) {
|
||||||
|
Linkify.Extend(m)
|
||||||
|
Table.Extend(m)
|
||||||
|
Strikethrough.Extend(m)
|
||||||
|
TaskList.Extend(m)
|
||||||
|
}
|
150
vendor/github.com/yuin/goldmark/extension/linkify.go
generated
vendored
Normal file
150
vendor/github.com/yuin/goldmark/extension/linkify.go
generated
vendored
Normal file
|
@ -0,0 +1,150 @@
|
||||||
|
package extension
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"github.com/yuin/goldmark"
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/parser"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var wwwURLRegxp = regexp.MustCompile(`^www\.[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}((?:/|[#?])[-a-zA-Z0-9@:%_\+.~#!?&//=\(\);,'">\^{}\[\]` + "`" + `]*)?`)
|
||||||
|
|
||||||
|
var urlRegexp = regexp.MustCompile(`^(?:http|https|ftp):\/\/(?:www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}((?:/|[#?])[-a-zA-Z0-9@:%_+.~#$!?&//=\(\);,'">\^{}\[\]` + "`" + `]*)?`)
|
||||||
|
|
||||||
|
type linkifyParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultLinkifyParser = &linkifyParser{}
|
||||||
|
|
||||||
|
// NewLinkifyParser return a new InlineParser can parse
|
||||||
|
// text that seems like a URL.
|
||||||
|
func NewLinkifyParser() parser.InlineParser {
|
||||||
|
return defaultLinkifyParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *linkifyParser) Trigger() []byte {
|
||||||
|
// ' ' indicates any white spaces and a line head
|
||||||
|
return []byte{' ', '*', '_', '~', '('}
|
||||||
|
}
|
||||||
|
|
||||||
|
var protoHTTP = []byte("http:")
|
||||||
|
var protoHTTPS = []byte("https:")
|
||||||
|
var protoFTP = []byte("ftp:")
|
||||||
|
var domainWWW = []byte("www.")
|
||||||
|
|
||||||
|
func (s *linkifyParser) Parse(parent ast.Node, block text.Reader, pc parser.Context) ast.Node {
|
||||||
|
if pc.IsInLinkLabel() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
line, segment := block.PeekLine()
|
||||||
|
consumes := 0
|
||||||
|
start := segment.Start
|
||||||
|
c := line[0]
|
||||||
|
// advance if current position is not a line head.
|
||||||
|
if c == ' ' || c == '*' || c == '_' || c == '~' || c == '(' {
|
||||||
|
consumes++
|
||||||
|
start++
|
||||||
|
line = line[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
var m []int
|
||||||
|
var protocol []byte
|
||||||
|
var typ ast.AutoLinkType = ast.AutoLinkURL
|
||||||
|
if bytes.HasPrefix(line, protoHTTP) || bytes.HasPrefix(line, protoHTTPS) || bytes.HasPrefix(line, protoFTP) {
|
||||||
|
m = urlRegexp.FindSubmatchIndex(line)
|
||||||
|
}
|
||||||
|
if m == nil && bytes.HasPrefix(line, domainWWW) {
|
||||||
|
m = wwwURLRegxp.FindSubmatchIndex(line)
|
||||||
|
protocol = []byte("http")
|
||||||
|
}
|
||||||
|
if m != nil {
|
||||||
|
lastChar := line[m[1]-1]
|
||||||
|
if lastChar == '.' {
|
||||||
|
m[1]--
|
||||||
|
} else if lastChar == ')' {
|
||||||
|
closing := 0
|
||||||
|
for i := m[1] - 1; i >= m[0]; i-- {
|
||||||
|
if line[i] == ')' {
|
||||||
|
closing++
|
||||||
|
} else if line[i] == '(' {
|
||||||
|
closing--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if closing > 0 {
|
||||||
|
m[1] -= closing
|
||||||
|
}
|
||||||
|
} else if lastChar == ';' {
|
||||||
|
i := m[1] - 2
|
||||||
|
for ; i >= m[0]; i-- {
|
||||||
|
if util.IsAlphaNumeric(line[i]) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if i != m[1]-2 {
|
||||||
|
if line[i] == '&' {
|
||||||
|
m[1] -= m[1] - i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m == nil {
|
||||||
|
if len(line) > 0 && util.IsPunct(line[0]) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
typ = ast.AutoLinkEmail
|
||||||
|
stop := util.FindEmailIndex(line)
|
||||||
|
if stop < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
at := bytes.IndexByte(line, '@')
|
||||||
|
m = []int{0, stop, at, stop - 1}
|
||||||
|
if m == nil || bytes.IndexByte(line[m[2]:m[3]], '.') < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
lastChar := line[m[1]-1]
|
||||||
|
if lastChar == '.' {
|
||||||
|
m[1]--
|
||||||
|
}
|
||||||
|
if m[1] < len(line) {
|
||||||
|
nextChar := line[m[1]]
|
||||||
|
if nextChar == '-' || nextChar == '_' {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if consumes != 0 {
|
||||||
|
s := segment.WithStop(segment.Start + 1)
|
||||||
|
ast.MergeOrAppendTextSegment(parent, s)
|
||||||
|
}
|
||||||
|
consumes += m[1]
|
||||||
|
block.Advance(consumes)
|
||||||
|
n := ast.NewTextSegment(text.NewSegment(start, start+m[1]))
|
||||||
|
link := ast.NewAutoLink(typ, n)
|
||||||
|
link.Protocol = protocol
|
||||||
|
return link
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *linkifyParser) CloseBlock(parent ast.Node, pc parser.Context) {
|
||||||
|
// nothing to do
|
||||||
|
}
|
||||||
|
|
||||||
|
type linkify struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Linkify is an extension that allow you to parse text that seems like a URL.
|
||||||
|
var Linkify = &linkify{}
|
||||||
|
|
||||||
|
func (e *linkify) Extend(m goldmark.Markdown) {
|
||||||
|
m.Parser().AddOptions(
|
||||||
|
parser.WithInlineParsers(
|
||||||
|
util.Prioritized(NewLinkifyParser(), 999),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
116
vendor/github.com/yuin/goldmark/extension/strikethrough.go
generated
vendored
Normal file
116
vendor/github.com/yuin/goldmark/extension/strikethrough.go
generated
vendored
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
package extension
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark"
|
||||||
|
gast "github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/extension/ast"
|
||||||
|
"github.com/yuin/goldmark/parser"
|
||||||
|
"github.com/yuin/goldmark/renderer"
|
||||||
|
"github.com/yuin/goldmark/renderer/html"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
type strikethroughDelimiterProcessor struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *strikethroughDelimiterProcessor) IsDelimiter(b byte) bool {
|
||||||
|
return b == '~'
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *strikethroughDelimiterProcessor) CanOpenCloser(opener, closer *parser.Delimiter) bool {
|
||||||
|
return opener.Char == closer.Char
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *strikethroughDelimiterProcessor) OnMatch(consumes int) gast.Node {
|
||||||
|
return ast.NewStrikethrough()
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultStrikethroughDelimiterProcessor = &strikethroughDelimiterProcessor{}
|
||||||
|
|
||||||
|
type strikethroughParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultStrikethroughParser = &strikethroughParser{}
|
||||||
|
|
||||||
|
// NewStrikethroughParser return a new InlineParser that parses
|
||||||
|
// strikethrough expressions.
|
||||||
|
func NewStrikethroughParser() parser.InlineParser {
|
||||||
|
return defaultStrikethroughParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *strikethroughParser) Trigger() []byte {
|
||||||
|
return []byte{'~'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *strikethroughParser) Parse(parent gast.Node, block text.Reader, pc parser.Context) gast.Node {
|
||||||
|
before := block.PrecendingCharacter()
|
||||||
|
line, segment := block.PeekLine()
|
||||||
|
node := parser.ScanDelimiter(line, before, 2, defaultStrikethroughDelimiterProcessor)
|
||||||
|
if node == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
node.Segment = segment.WithStop(segment.Start + node.OriginalLength)
|
||||||
|
block.Advance(node.OriginalLength)
|
||||||
|
pc.PushDelimiter(node)
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *strikethroughParser) CloseBlock(parent gast.Node, pc parser.Context) {
|
||||||
|
// nothing to do
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrikethroughHTMLRenderer is a renderer.NodeRenderer implementation that
|
||||||
|
// renders Strikethrough nodes.
|
||||||
|
type StrikethroughHTMLRenderer struct {
|
||||||
|
html.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStrikethroughHTMLRenderer returns a new StrikethroughHTMLRenderer.
|
||||||
|
func NewStrikethroughHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
|
||||||
|
r := &StrikethroughHTMLRenderer{
|
||||||
|
Config: html.NewConfig(),
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.SetHTMLOption(&r.Config)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
|
||||||
|
func (r *StrikethroughHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
||||||
|
reg.Register(ast.KindStrikethrough, r.renderStrikethrough)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StrikethroughAttributeFilter defines attribute names which dd elements can have.
|
||||||
|
var StrikethroughAttributeFilter = html.GlobalAttributeFilter
|
||||||
|
|
||||||
|
func (r *StrikethroughHTMLRenderer) renderStrikethrough(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
_, _ = w.WriteString("<del")
|
||||||
|
html.RenderAttributes(w, n, StrikethroughAttributeFilter)
|
||||||
|
_ = w.WriteByte('>')
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("<del>")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</del>")
|
||||||
|
}
|
||||||
|
return gast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type strikethrough struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strikethrough is an extension that allow you to use strikethrough expression like '~~text~~' .
|
||||||
|
var Strikethrough = &strikethrough{}
|
||||||
|
|
||||||
|
func (e *strikethrough) Extend(m goldmark.Markdown) {
|
||||||
|
m.Parser().AddOptions(parser.WithInlineParsers(
|
||||||
|
util.Prioritized(NewStrikethroughParser(), 500),
|
||||||
|
))
|
||||||
|
m.Renderer().AddOptions(renderer.WithNodeRenderers(
|
||||||
|
util.Prioritized(NewStrikethroughHTMLRenderer(), 500),
|
||||||
|
))
|
||||||
|
}
|
319
vendor/github.com/yuin/goldmark/extension/table.go
generated
vendored
Normal file
319
vendor/github.com/yuin/goldmark/extension/table.go
generated
vendored
Normal file
|
@ -0,0 +1,319 @@
|
||||||
|
package extension
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/yuin/goldmark"
|
||||||
|
gast "github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/extension/ast"
|
||||||
|
"github.com/yuin/goldmark/parser"
|
||||||
|
"github.com/yuin/goldmark/renderer"
|
||||||
|
"github.com/yuin/goldmark/renderer/html"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
var tableDelimRegexp = regexp.MustCompile(`^[\s\-\|\:]+$`)
|
||||||
|
var tableDelimLeft = regexp.MustCompile(`^\s*\:\-+\s*$`)
|
||||||
|
var tableDelimRight = regexp.MustCompile(`^\s*\-+\:\s*$`)
|
||||||
|
var tableDelimCenter = regexp.MustCompile(`^\s*\:\-+\:\s*$`)
|
||||||
|
var tableDelimNone = regexp.MustCompile(`^\s*\-+\s*$`)
|
||||||
|
|
||||||
|
type tableParagraphTransformer struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultTableParagraphTransformer = &tableParagraphTransformer{}
|
||||||
|
|
||||||
|
// NewTableParagraphTransformer returns a new ParagraphTransformer
|
||||||
|
// that can transform pargraphs into tables.
|
||||||
|
func NewTableParagraphTransformer() parser.ParagraphTransformer {
|
||||||
|
return defaultTableParagraphTransformer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *tableParagraphTransformer) Transform(node *gast.Paragraph, reader text.Reader, pc parser.Context) {
|
||||||
|
lines := node.Lines()
|
||||||
|
if lines.Len() < 2 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
alignments := b.parseDelimiter(lines.At(1), reader)
|
||||||
|
if alignments == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
header := b.parseRow(lines.At(0), alignments, true, reader)
|
||||||
|
if header == nil || len(alignments) != header.ChildCount() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
table := ast.NewTable()
|
||||||
|
table.Alignments = alignments
|
||||||
|
table.AppendChild(table, ast.NewTableHeader(header))
|
||||||
|
for i := 2; i < lines.Len(); i++ {
|
||||||
|
table.AppendChild(table, b.parseRow(lines.At(i), alignments, false, reader))
|
||||||
|
}
|
||||||
|
node.Parent().InsertBefore(node.Parent(), node, table)
|
||||||
|
node.Parent().RemoveChild(node.Parent(), node)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *tableParagraphTransformer) parseRow(segment text.Segment, alignments []ast.Alignment, isHeader bool, reader text.Reader) *ast.TableRow {
|
||||||
|
source := reader.Source()
|
||||||
|
line := segment.Value(source)
|
||||||
|
pos := 0
|
||||||
|
pos += util.TrimLeftSpaceLength(line)
|
||||||
|
limit := len(line)
|
||||||
|
limit -= util.TrimRightSpaceLength(line)
|
||||||
|
row := ast.NewTableRow(alignments)
|
||||||
|
if len(line) > 0 && line[pos] == '|' {
|
||||||
|
pos++
|
||||||
|
}
|
||||||
|
if len(line) > 0 && line[limit-1] == '|' {
|
||||||
|
limit--
|
||||||
|
}
|
||||||
|
i := 0
|
||||||
|
for ; pos < limit; i++ {
|
||||||
|
alignment := ast.AlignNone
|
||||||
|
if i >= len(alignments) {
|
||||||
|
if !isHeader {
|
||||||
|
return row
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
alignment = alignments[i]
|
||||||
|
}
|
||||||
|
closure := util.FindClosure(line[pos:], byte(0), '|', true, false)
|
||||||
|
if closure < 0 {
|
||||||
|
closure = len(line[pos:])
|
||||||
|
}
|
||||||
|
node := ast.NewTableCell()
|
||||||
|
seg := text.NewSegment(segment.Start+pos, segment.Start+pos+closure)
|
||||||
|
seg = seg.TrimLeftSpace(source)
|
||||||
|
seg = seg.TrimRightSpace(source)
|
||||||
|
node.Lines().Append(seg)
|
||||||
|
node.Alignment = alignment
|
||||||
|
row.AppendChild(row, node)
|
||||||
|
pos += closure + 1
|
||||||
|
}
|
||||||
|
for ; i < len(alignments); i++ {
|
||||||
|
row.AppendChild(row, ast.NewTableCell())
|
||||||
|
}
|
||||||
|
return row
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *tableParagraphTransformer) parseDelimiter(segment text.Segment, reader text.Reader) []ast.Alignment {
|
||||||
|
line := segment.Value(reader.Source())
|
||||||
|
if !tableDelimRegexp.Match(line) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
cols := bytes.Split(line, []byte{'|'})
|
||||||
|
if util.IsBlank(cols[0]) {
|
||||||
|
cols = cols[1:]
|
||||||
|
}
|
||||||
|
if len(cols) > 0 && util.IsBlank(cols[len(cols)-1]) {
|
||||||
|
cols = cols[:len(cols)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
var alignments []ast.Alignment
|
||||||
|
for _, col := range cols {
|
||||||
|
if tableDelimLeft.Match(col) {
|
||||||
|
alignments = append(alignments, ast.AlignLeft)
|
||||||
|
} else if tableDelimRight.Match(col) {
|
||||||
|
alignments = append(alignments, ast.AlignRight)
|
||||||
|
} else if tableDelimCenter.Match(col) {
|
||||||
|
alignments = append(alignments, ast.AlignCenter)
|
||||||
|
} else if tableDelimNone.Match(col) {
|
||||||
|
alignments = append(alignments, ast.AlignNone)
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return alignments
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableHTMLRenderer is a renderer.NodeRenderer implementation that
|
||||||
|
// renders Table nodes.
|
||||||
|
type TableHTMLRenderer struct {
|
||||||
|
html.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTableHTMLRenderer returns a new TableHTMLRenderer.
|
||||||
|
func NewTableHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
|
||||||
|
r := &TableHTMLRenderer{
|
||||||
|
Config: html.NewConfig(),
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.SetHTMLOption(&r.Config)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
|
||||||
|
func (r *TableHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
||||||
|
reg.Register(ast.KindTable, r.renderTable)
|
||||||
|
reg.Register(ast.KindTableHeader, r.renderTableHeader)
|
||||||
|
reg.Register(ast.KindTableRow, r.renderTableRow)
|
||||||
|
reg.Register(ast.KindTableCell, r.renderTableCell)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableAttributeFilter defines attribute names which table elements can have.
|
||||||
|
var TableAttributeFilter = html.GlobalAttributeFilter.Extend(
|
||||||
|
[]byte("align"), // [Deprecated]
|
||||||
|
[]byte("bgcolor"), // [Deprecated]
|
||||||
|
[]byte("border"), // [Deprecated]
|
||||||
|
[]byte("cellpadding"), // [Deprecated]
|
||||||
|
[]byte("cellspacing"), // [Deprecated]
|
||||||
|
[]byte("frame"), // [Deprecated]
|
||||||
|
[]byte("rules"), // [Deprecated]
|
||||||
|
[]byte("summary"), // [Deprecated]
|
||||||
|
[]byte("width"), // [Deprecated]
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *TableHTMLRenderer) renderTable(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
_, _ = w.WriteString("<table")
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
html.RenderAttributes(w, n, TableAttributeFilter)
|
||||||
|
}
|
||||||
|
_, _ = w.WriteString(">\n")
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</table>\n")
|
||||||
|
}
|
||||||
|
return gast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableHeaderAttributeFilter defines attribute names which <thead> elements can have.
|
||||||
|
var TableHeaderAttributeFilter = html.GlobalAttributeFilter.Extend(
|
||||||
|
[]byte("align"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||||
|
[]byte("bgcolor"), // [Not Standardized]
|
||||||
|
[]byte("char"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||||
|
[]byte("charoff"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||||
|
[]byte("valign"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *TableHTMLRenderer) renderTableHeader(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
_, _ = w.WriteString("<thead")
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
html.RenderAttributes(w, n, TableHeaderAttributeFilter)
|
||||||
|
}
|
||||||
|
_, _ = w.WriteString(">\n")
|
||||||
|
_, _ = w.WriteString("<tr>\n") // Header <tr> has no separate handle
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</tr>\n")
|
||||||
|
_, _ = w.WriteString("</thead>\n")
|
||||||
|
if n.NextSibling() != nil {
|
||||||
|
_, _ = w.WriteString("<tbody>\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return gast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableRowAttributeFilter defines attribute names which <tr> elements can have.
|
||||||
|
var TableRowAttributeFilter = html.GlobalAttributeFilter.Extend(
|
||||||
|
[]byte("align"), // [Obsolete since HTML5]
|
||||||
|
[]byte("bgcolor"), // [Obsolete since HTML5]
|
||||||
|
[]byte("char"), // [Obsolete since HTML5]
|
||||||
|
[]byte("charoff"), // [Obsolete since HTML5]
|
||||||
|
[]byte("valign"), // [Obsolete since HTML5]
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *TableHTMLRenderer) renderTableRow(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
_, _ = w.WriteString("<tr")
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
html.RenderAttributes(w, n, TableRowAttributeFilter)
|
||||||
|
}
|
||||||
|
_, _ = w.WriteString(">\n")
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</tr>\n")
|
||||||
|
if n.Parent().LastChild() == n {
|
||||||
|
_, _ = w.WriteString("</tbody>\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return gast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableThCellAttributeFilter defines attribute names which table <th> cells can have.
|
||||||
|
var TableThCellAttributeFilter = html.GlobalAttributeFilter.Extend(
|
||||||
|
[]byte("abbr"), // [OK] Contains a short abbreviated description of the cell's content [NOT OK in <td>]
|
||||||
|
|
||||||
|
[]byte("align"), // [Obsolete since HTML5]
|
||||||
|
[]byte("axis"), // [Obsolete since HTML5]
|
||||||
|
[]byte("bgcolor"), // [Not Standardized]
|
||||||
|
[]byte("char"), // [Obsolete since HTML5]
|
||||||
|
[]byte("charoff"), // [Obsolete since HTML5]
|
||||||
|
|
||||||
|
[]byte("colspan"), // [OK] Number of columns that the cell is to span
|
||||||
|
[]byte("headers"), // [OK] This attribute contains a list of space-separated strings, each corresponding to the id attribute of the <th> elements that apply to this element
|
||||||
|
|
||||||
|
[]byte("height"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||||
|
|
||||||
|
[]byte("rowspan"), // [OK] Number of rows that the cell is to span
|
||||||
|
[]byte("scope"), // [OK] This enumerated attribute defines the cells that the header (defined in the <th>) element relates to [NOT OK in <td>]
|
||||||
|
|
||||||
|
[]byte("valign"), // [Obsolete since HTML5]
|
||||||
|
[]byte("width"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||||
|
)
|
||||||
|
|
||||||
|
// TableTdCellAttributeFilter defines attribute names which table <td> cells can have.
|
||||||
|
var TableTdCellAttributeFilter = html.GlobalAttributeFilter.Extend(
|
||||||
|
[]byte("abbr"), // [Obsolete since HTML5] [OK in <th>]
|
||||||
|
[]byte("align"), // [Obsolete since HTML5]
|
||||||
|
[]byte("axis"), // [Obsolete since HTML5]
|
||||||
|
[]byte("bgcolor"), // [Not Standardized]
|
||||||
|
[]byte("char"), // [Obsolete since HTML5]
|
||||||
|
[]byte("charoff"), // [Obsolete since HTML5]
|
||||||
|
|
||||||
|
[]byte("colspan"), // [OK] Number of columns that the cell is to span
|
||||||
|
[]byte("headers"), // [OK] This attribute contains a list of space-separated strings, each corresponding to the id attribute of the <th> elements that apply to this element
|
||||||
|
|
||||||
|
[]byte("height"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||||
|
|
||||||
|
[]byte("rowspan"), // [OK] Number of rows that the cell is to span
|
||||||
|
|
||||||
|
[]byte("scope"), // [Obsolete since HTML5] [OK in <th>]
|
||||||
|
[]byte("valign"), // [Obsolete since HTML5]
|
||||||
|
[]byte("width"), // [Deprecated since HTML4] [Obsolete since HTML5]
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *TableHTMLRenderer) renderTableCell(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||||
|
n := node.(*ast.TableCell)
|
||||||
|
tag := "td"
|
||||||
|
if n.Parent().Kind() == ast.KindTableHeader {
|
||||||
|
tag = "th"
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
align := ""
|
||||||
|
if n.Alignment != ast.AlignNone {
|
||||||
|
if _, ok := n.AttributeString("align"); !ok { // Skip align render if overridden
|
||||||
|
// TODO: "align" is deprecated. style="text-align:%s" instead?
|
||||||
|
align = fmt.Sprintf(` align="%s"`, n.Alignment.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "<%s", tag)
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
if tag == "td" {
|
||||||
|
html.RenderAttributes(w, n, TableTdCellAttributeFilter) // <td>
|
||||||
|
} else {
|
||||||
|
html.RenderAttributes(w, n, TableThCellAttributeFilter) // <th>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "%s>", align)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(w, "</%s>\n", tag)
|
||||||
|
}
|
||||||
|
return gast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type table struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Table is an extension that allow you to use GFM tables .
|
||||||
|
var Table = &table{}
|
||||||
|
|
||||||
|
func (e *table) Extend(m goldmark.Markdown) {
|
||||||
|
m.Parser().AddOptions(parser.WithParagraphTransformers(
|
||||||
|
util.Prioritized(NewTableParagraphTransformer(), 200),
|
||||||
|
))
|
||||||
|
m.Renderer().AddOptions(renderer.WithNodeRenderers(
|
||||||
|
util.Prioritized(NewTableHTMLRenderer(), 500),
|
||||||
|
))
|
||||||
|
}
|
115
vendor/github.com/yuin/goldmark/extension/tasklist.go
generated
vendored
Normal file
115
vendor/github.com/yuin/goldmark/extension/tasklist.go
generated
vendored
Normal file
|
@ -0,0 +1,115 @@
|
||||||
|
package extension
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark"
|
||||||
|
gast "github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/extension/ast"
|
||||||
|
"github.com/yuin/goldmark/parser"
|
||||||
|
"github.com/yuin/goldmark/renderer"
|
||||||
|
"github.com/yuin/goldmark/renderer/html"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var taskListRegexp = regexp.MustCompile(`^\[([\sxX])\]\s*`)
|
||||||
|
|
||||||
|
type taskCheckBoxParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultTaskCheckBoxParser = &taskCheckBoxParser{}
|
||||||
|
|
||||||
|
// NewTaskCheckBoxParser returns a new InlineParser that can parse
|
||||||
|
// checkboxes in list items.
|
||||||
|
// This parser must take precedence over the parser.LinkParser.
|
||||||
|
func NewTaskCheckBoxParser() parser.InlineParser {
|
||||||
|
return defaultTaskCheckBoxParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *taskCheckBoxParser) Trigger() []byte {
|
||||||
|
return []byte{'['}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *taskCheckBoxParser) Parse(parent gast.Node, block text.Reader, pc parser.Context) gast.Node {
|
||||||
|
// Given AST structure must be like
|
||||||
|
// - List
|
||||||
|
// - ListItem : parent.Parent
|
||||||
|
// - TextBlock : parent
|
||||||
|
// (current line)
|
||||||
|
if parent.Parent() == nil || parent.Parent().FirstChild() != parent {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := parent.Parent().(*gast.ListItem); !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
line, _ := block.PeekLine()
|
||||||
|
m := taskListRegexp.FindSubmatchIndex(line)
|
||||||
|
if m == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
value := line[m[2]:m[3]][0]
|
||||||
|
block.Advance(m[1])
|
||||||
|
checked := value == 'x' || value == 'X'
|
||||||
|
return ast.NewTaskCheckBox(checked)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *taskCheckBoxParser) CloseBlock(parent gast.Node, pc parser.Context) {
|
||||||
|
// nothing to do
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskCheckBoxHTMLRenderer is a renderer.NodeRenderer implementation that
|
||||||
|
// renders checkboxes in list items.
|
||||||
|
type TaskCheckBoxHTMLRenderer struct {
|
||||||
|
html.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTaskCheckBoxHTMLRenderer returns a new TaskCheckBoxHTMLRenderer.
|
||||||
|
func NewTaskCheckBoxHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
|
||||||
|
r := &TaskCheckBoxHTMLRenderer{
|
||||||
|
Config: html.NewConfig(),
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.SetHTMLOption(&r.Config)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
|
||||||
|
func (r *TaskCheckBoxHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
||||||
|
reg.Register(ast.KindTaskCheckBox, r.renderTaskCheckBox)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *TaskCheckBoxHTMLRenderer) renderTaskCheckBox(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
|
||||||
|
if !entering {
|
||||||
|
return gast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
n := node.(*ast.TaskCheckBox)
|
||||||
|
|
||||||
|
if n.IsChecked {
|
||||||
|
w.WriteString(`<input checked="" disabled="" type="checkbox"`)
|
||||||
|
} else {
|
||||||
|
w.WriteString(`<input disabled="" type="checkbox"`)
|
||||||
|
}
|
||||||
|
if r.XHTML {
|
||||||
|
w.WriteString(" />")
|
||||||
|
} else {
|
||||||
|
w.WriteString(">")
|
||||||
|
}
|
||||||
|
return gast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type taskList struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskList is an extension that allow you to use GFM task lists.
|
||||||
|
var TaskList = &taskList{}
|
||||||
|
|
||||||
|
func (e *taskList) Extend(m goldmark.Markdown) {
|
||||||
|
m.Parser().AddOptions(parser.WithInlineParsers(
|
||||||
|
util.Prioritized(NewTaskCheckBoxParser(), 0),
|
||||||
|
))
|
||||||
|
m.Renderer().AddOptions(renderer.WithNodeRenderers(
|
||||||
|
util.Prioritized(NewTaskCheckBoxHTMLRenderer(), 500),
|
||||||
|
))
|
||||||
|
}
|
245
vendor/github.com/yuin/goldmark/extension/typographer.go
generated
vendored
Normal file
245
vendor/github.com/yuin/goldmark/extension/typographer.go
generated
vendored
Normal file
|
@ -0,0 +1,245 @@
|
||||||
|
package extension
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark"
|
||||||
|
gast "github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/parser"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TypographicPunctuation is a key of the punctuations that can be replaced with
|
||||||
|
// typographic entities.
|
||||||
|
type TypographicPunctuation int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// LeftSingleQuote is '
|
||||||
|
LeftSingleQuote TypographicPunctuation = iota + 1
|
||||||
|
// RightSingleQuote is '
|
||||||
|
RightSingleQuote
|
||||||
|
// LeftDoubleQuote is "
|
||||||
|
LeftDoubleQuote
|
||||||
|
// RightDoubleQuote is "
|
||||||
|
RightDoubleQuote
|
||||||
|
// EnDash is --
|
||||||
|
EnDash
|
||||||
|
// EmDash is ---
|
||||||
|
EmDash
|
||||||
|
// Ellipsis is ...
|
||||||
|
Ellipsis
|
||||||
|
// LeftAngleQuote is <<
|
||||||
|
LeftAngleQuote
|
||||||
|
// RightAngleQuote is >>
|
||||||
|
RightAngleQuote
|
||||||
|
|
||||||
|
typographicPunctuationMax
|
||||||
|
)
|
||||||
|
|
||||||
|
// An TypographerConfig struct is a data structure that holds configuration of the
|
||||||
|
// Typographer extension.
|
||||||
|
type TypographerConfig struct {
|
||||||
|
Substitutions [][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDefaultSubstitutions() [][]byte {
|
||||||
|
replacements := make([][]byte, typographicPunctuationMax)
|
||||||
|
replacements[LeftSingleQuote] = []byte("‘")
|
||||||
|
replacements[RightSingleQuote] = []byte("’")
|
||||||
|
replacements[LeftDoubleQuote] = []byte("“")
|
||||||
|
replacements[RightDoubleQuote] = []byte("”")
|
||||||
|
replacements[EnDash] = []byte("–")
|
||||||
|
replacements[EmDash] = []byte("—")
|
||||||
|
replacements[Ellipsis] = []byte("…")
|
||||||
|
replacements[LeftAngleQuote] = []byte("«")
|
||||||
|
replacements[RightAngleQuote] = []byte("»")
|
||||||
|
|
||||||
|
return replacements
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetOption implements SetOptioner.
|
||||||
|
func (b *TypographerConfig) SetOption(name parser.OptionName, value interface{}) {
|
||||||
|
switch name {
|
||||||
|
case optTypographicSubstitutions:
|
||||||
|
b.Substitutions = value.([][]byte)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A TypographerOption interface sets options for the TypographerParser.
|
||||||
|
type TypographerOption interface {
|
||||||
|
parser.Option
|
||||||
|
SetTypographerOption(*TypographerConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
const optTypographicSubstitutions parser.OptionName = "TypographicSubstitutions"
|
||||||
|
|
||||||
|
// TypographicSubstitutions is a list of the substitutions for the Typographer extension.
|
||||||
|
type TypographicSubstitutions map[TypographicPunctuation][]byte
|
||||||
|
|
||||||
|
type withTypographicSubstitutions struct {
|
||||||
|
value [][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *withTypographicSubstitutions) SetParserOption(c *parser.Config) {
|
||||||
|
c.Options[optTypographicSubstitutions] = o.value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *withTypographicSubstitutions) SetTypographerOption(p *TypographerConfig) {
|
||||||
|
p.Substitutions = o.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTypographicSubstitutions is a functional otpion that specify replacement text
|
||||||
|
// for punctuations.
|
||||||
|
func WithTypographicSubstitutions(values map[TypographicPunctuation][]byte) TypographerOption {
|
||||||
|
replacements := newDefaultSubstitutions()
|
||||||
|
for k, v := range values {
|
||||||
|
replacements[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return &withTypographicSubstitutions{replacements}
|
||||||
|
}
|
||||||
|
|
||||||
|
type typographerDelimiterProcessor struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *typographerDelimiterProcessor) IsDelimiter(b byte) bool {
|
||||||
|
return b == '\'' || b == '"'
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *typographerDelimiterProcessor) CanOpenCloser(opener, closer *parser.Delimiter) bool {
|
||||||
|
return opener.Char == closer.Char
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *typographerDelimiterProcessor) OnMatch(consumes int) gast.Node {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultTypographerDelimiterProcessor = &typographerDelimiterProcessor{}
|
||||||
|
|
||||||
|
type typographerParser struct {
|
||||||
|
TypographerConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTypographerParser return a new InlineParser that parses
|
||||||
|
// typographer expressions.
|
||||||
|
func NewTypographerParser(opts ...TypographerOption) parser.InlineParser {
|
||||||
|
p := &typographerParser{
|
||||||
|
TypographerConfig: TypographerConfig{
|
||||||
|
Substitutions: newDefaultSubstitutions(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, o := range opts {
|
||||||
|
o.SetTypographerOption(&p.TypographerConfig)
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *typographerParser) Trigger() []byte {
|
||||||
|
return []byte{'\'', '"', '-', '.', '<', '>'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *typographerParser) Parse(parent gast.Node, block text.Reader, pc parser.Context) gast.Node {
|
||||||
|
before := block.PrecendingCharacter()
|
||||||
|
line, _ := block.PeekLine()
|
||||||
|
c := line[0]
|
||||||
|
if len(line) > 2 {
|
||||||
|
if c == '-' {
|
||||||
|
if s.Substitutions[EmDash] != nil && line[1] == '-' && line[2] == '-' { // ---
|
||||||
|
node := gast.NewString(s.Substitutions[EmDash])
|
||||||
|
node.SetCode(true)
|
||||||
|
block.Advance(3)
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
} else if c == '.' {
|
||||||
|
if s.Substitutions[Ellipsis] != nil && line[1] == '.' && line[2] == '.' { // ...
|
||||||
|
node := gast.NewString(s.Substitutions[Ellipsis])
|
||||||
|
node.SetCode(true)
|
||||||
|
block.Advance(3)
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(line) > 1 {
|
||||||
|
if c == '<' {
|
||||||
|
if s.Substitutions[LeftAngleQuote] != nil && line[1] == '<' { // <<
|
||||||
|
node := gast.NewString(s.Substitutions[LeftAngleQuote])
|
||||||
|
node.SetCode(true)
|
||||||
|
block.Advance(2)
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
} else if c == '>' {
|
||||||
|
if s.Substitutions[RightAngleQuote] != nil && line[1] == '>' { // >>
|
||||||
|
node := gast.NewString(s.Substitutions[RightAngleQuote])
|
||||||
|
node.SetCode(true)
|
||||||
|
block.Advance(2)
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
} else if s.Substitutions[EnDash] != nil && c == '-' && line[1] == '-' { // --
|
||||||
|
node := gast.NewString(s.Substitutions[EnDash])
|
||||||
|
node.SetCode(true)
|
||||||
|
block.Advance(2)
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c == '\'' || c == '"' {
|
||||||
|
d := parser.ScanDelimiter(line, before, 1, defaultTypographerDelimiterProcessor)
|
||||||
|
if d == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if c == '\'' {
|
||||||
|
if s.Substitutions[LeftSingleQuote] != nil && d.CanOpen && !d.CanClose {
|
||||||
|
node := gast.NewString(s.Substitutions[LeftSingleQuote])
|
||||||
|
node.SetCode(true)
|
||||||
|
block.Advance(1)
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
if s.Substitutions[RightSingleQuote] != nil && d.CanClose && !d.CanOpen {
|
||||||
|
node := gast.NewString(s.Substitutions[RightSingleQuote])
|
||||||
|
node.SetCode(true)
|
||||||
|
block.Advance(1)
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c == '"' {
|
||||||
|
if s.Substitutions[LeftDoubleQuote] != nil && d.CanOpen && !d.CanClose {
|
||||||
|
node := gast.NewString(s.Substitutions[LeftDoubleQuote])
|
||||||
|
node.SetCode(true)
|
||||||
|
block.Advance(1)
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
if s.Substitutions[RightDoubleQuote] != nil && d.CanClose && !d.CanOpen {
|
||||||
|
node := gast.NewString(s.Substitutions[RightDoubleQuote])
|
||||||
|
node.SetCode(true)
|
||||||
|
block.Advance(1)
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *typographerParser) CloseBlock(parent gast.Node, pc parser.Context) {
|
||||||
|
// nothing to do
|
||||||
|
}
|
||||||
|
|
||||||
|
type typographer struct {
|
||||||
|
options []TypographerOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// Typographer is an extension that repalace punctuations with typographic entities.
|
||||||
|
var Typographer = &typographer{}
|
||||||
|
|
||||||
|
// NewTypographer returns a new Entender that repalace punctuations with typographic entities.
|
||||||
|
func NewTypographer(opts ...TypographerOption) goldmark.Extender {
|
||||||
|
return &typographer{
|
||||||
|
options: opts,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *typographer) Extend(m goldmark.Markdown) {
|
||||||
|
m.Parser().AddOptions(parser.WithInlineParsers(
|
||||||
|
util.Prioritized(NewTypographerParser(e.options...), 9999),
|
||||||
|
))
|
||||||
|
}
|
3
vendor/github.com/yuin/goldmark/go.mod
generated
vendored
Normal file
3
vendor/github.com/yuin/goldmark/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
module github.com/yuin/goldmark
|
||||||
|
|
||||||
|
go 1.13
|
0
vendor/github.com/yuin/goldmark/go.sum
generated
vendored
Normal file
0
vendor/github.com/yuin/goldmark/go.sum
generated
vendored
Normal file
140
vendor/github.com/yuin/goldmark/markdown.go
generated
vendored
Normal file
140
vendor/github.com/yuin/goldmark/markdown.go
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
||||||
|
// Package goldmark implements functions to convert markdown text to a desired format.
|
||||||
|
package goldmark
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark/parser"
|
||||||
|
"github.com/yuin/goldmark/renderer"
|
||||||
|
"github.com/yuin/goldmark/renderer/html"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultParser returns a new Parser that is configured by default values.
|
||||||
|
func DefaultParser() parser.Parser {
|
||||||
|
return parser.NewParser(parser.WithBlockParsers(parser.DefaultBlockParsers()...),
|
||||||
|
parser.WithInlineParsers(parser.DefaultInlineParsers()...),
|
||||||
|
parser.WithParagraphTransformers(parser.DefaultParagraphTransformers()...),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultRenderer returns a new Renderer that is configured by default values.
|
||||||
|
func DefaultRenderer() renderer.Renderer {
|
||||||
|
return renderer.NewRenderer(renderer.WithNodeRenderers(util.Prioritized(html.NewRenderer(), 1000)))
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultMarkdown = New()
|
||||||
|
|
||||||
|
// Convert interprets a UTF-8 bytes source in Markdown and
|
||||||
|
// write rendered contents to a writer w.
|
||||||
|
func Convert(source []byte, w io.Writer, opts ...parser.ParseOption) error {
|
||||||
|
return defaultMarkdown.Convert(source, w, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Markdown interface offers functions to convert Markdown text to
|
||||||
|
// a desired format.
|
||||||
|
type Markdown interface {
|
||||||
|
// Convert interprets a UTF-8 bytes source in Markdown and write rendered
|
||||||
|
// contents to a writer w.
|
||||||
|
Convert(source []byte, writer io.Writer, opts ...parser.ParseOption) error
|
||||||
|
|
||||||
|
// Parser returns a Parser that will be used for conversion.
|
||||||
|
Parser() parser.Parser
|
||||||
|
|
||||||
|
// SetParser sets a Parser to this object.
|
||||||
|
SetParser(parser.Parser)
|
||||||
|
|
||||||
|
// Parser returns a Renderer that will be used for conversion.
|
||||||
|
Renderer() renderer.Renderer
|
||||||
|
|
||||||
|
// SetRenderer sets a Renderer to this object.
|
||||||
|
SetRenderer(renderer.Renderer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Option is a functional option type for Markdown objects.
|
||||||
|
type Option func(*markdown)
|
||||||
|
|
||||||
|
// WithExtensions adds extensions.
|
||||||
|
func WithExtensions(ext ...Extender) Option {
|
||||||
|
return func(m *markdown) {
|
||||||
|
m.extensions = append(m.extensions, ext...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithParser allows you to override the default parser.
|
||||||
|
func WithParser(p parser.Parser) Option {
|
||||||
|
return func(m *markdown) {
|
||||||
|
m.parser = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithParserOptions applies options for the parser.
|
||||||
|
func WithParserOptions(opts ...parser.Option) Option {
|
||||||
|
return func(m *markdown) {
|
||||||
|
m.parser.AddOptions(opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRenderer allows you to override the default renderer.
|
||||||
|
func WithRenderer(r renderer.Renderer) Option {
|
||||||
|
return func(m *markdown) {
|
||||||
|
m.renderer = r
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRendererOptions applies options for the renderer.
|
||||||
|
func WithRendererOptions(opts ...renderer.Option) Option {
|
||||||
|
return func(m *markdown) {
|
||||||
|
m.renderer.AddOptions(opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type markdown struct {
|
||||||
|
parser parser.Parser
|
||||||
|
renderer renderer.Renderer
|
||||||
|
extensions []Extender
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new Markdown with given options.
|
||||||
|
func New(options ...Option) Markdown {
|
||||||
|
md := &markdown{
|
||||||
|
parser: DefaultParser(),
|
||||||
|
renderer: DefaultRenderer(),
|
||||||
|
extensions: []Extender{},
|
||||||
|
}
|
||||||
|
for _, opt := range options {
|
||||||
|
opt(md)
|
||||||
|
}
|
||||||
|
for _, e := range md.extensions {
|
||||||
|
e.Extend(md)
|
||||||
|
}
|
||||||
|
return md
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *markdown) Convert(source []byte, writer io.Writer, opts ...parser.ParseOption) error {
|
||||||
|
reader := text.NewReader(source)
|
||||||
|
doc := m.parser.Parse(reader, opts...)
|
||||||
|
return m.renderer.Render(writer, source, doc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *markdown) Parser() parser.Parser {
|
||||||
|
return m.parser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *markdown) SetParser(v parser.Parser) {
|
||||||
|
m.parser = v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *markdown) Renderer() renderer.Renderer {
|
||||||
|
return m.renderer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *markdown) SetRenderer(v renderer.Renderer) {
|
||||||
|
m.renderer = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Extender interface is used for extending Markdown.
|
||||||
|
type Extender interface {
|
||||||
|
// Extend extends the Markdown.
|
||||||
|
Extend(Markdown)
|
||||||
|
}
|
319
vendor/github.com/yuin/goldmark/parser/attribute.go
generated
vendored
Normal file
319
vendor/github.com/yuin/goldmark/parser/attribute.go
generated
vendored
Normal file
|
@ -0,0 +1,319 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
var attrNameID = []byte("id")
|
||||||
|
var attrNameClass = []byte("class")
|
||||||
|
|
||||||
|
// An Attribute is an attribute of the markdown elements
|
||||||
|
type Attribute struct {
|
||||||
|
Name []byte
|
||||||
|
Value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Attributes is a collection of attributes.
|
||||||
|
type Attributes []Attribute
|
||||||
|
|
||||||
|
// Find returns a (value, true) if an attribute correspond with given name is found, otherwise (nil, false).
|
||||||
|
func (as Attributes) Find(name []byte) (interface{}, bool) {
|
||||||
|
for _, a := range as {
|
||||||
|
if bytes.Equal(a.Name, name) {
|
||||||
|
return a.Value, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as Attributes) findUpdate(name []byte, cb func(v interface{}) interface{}) bool {
|
||||||
|
for i, a := range as {
|
||||||
|
if bytes.Equal(a.Name, name) {
|
||||||
|
as[i].Value = cb(a.Value)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseAttributes parses attributes into a map.
|
||||||
|
// ParseAttributes returns a parsed attributes and true if could parse
|
||||||
|
// attributes, otherwise nil and false.
|
||||||
|
func ParseAttributes(reader text.Reader) (Attributes, bool) {
|
||||||
|
savedLine, savedPosition := reader.Position()
|
||||||
|
reader.SkipSpaces()
|
||||||
|
if reader.Peek() != '{' {
|
||||||
|
reader.SetPosition(savedLine, savedPosition)
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
reader.Advance(1)
|
||||||
|
attrs := Attributes{}
|
||||||
|
for {
|
||||||
|
if reader.Peek() == '}' {
|
||||||
|
reader.Advance(1)
|
||||||
|
return attrs, true
|
||||||
|
}
|
||||||
|
attr, ok := parseAttribute(reader)
|
||||||
|
if !ok {
|
||||||
|
reader.SetPosition(savedLine, savedPosition)
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
if bytes.Equal(attr.Name, attrNameClass) {
|
||||||
|
if !attrs.findUpdate(attrNameClass, func(v interface{}) interface{} {
|
||||||
|
ret := make([]byte, 0, len(v.([]byte))+1+len(attr.Value.([]byte)))
|
||||||
|
ret = append(ret, v.([]byte)...)
|
||||||
|
return append(append(ret, ' '), attr.Value.([]byte)...)
|
||||||
|
}) {
|
||||||
|
attrs = append(attrs, attr)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
attrs = append(attrs, attr)
|
||||||
|
}
|
||||||
|
reader.SkipSpaces()
|
||||||
|
if reader.Peek() == ',' {
|
||||||
|
reader.Advance(1)
|
||||||
|
reader.SkipSpaces()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseAttribute(reader text.Reader) (Attribute, bool) {
|
||||||
|
reader.SkipSpaces()
|
||||||
|
c := reader.Peek()
|
||||||
|
if c == '#' || c == '.' {
|
||||||
|
reader.Advance(1)
|
||||||
|
line, _ := reader.PeekLine()
|
||||||
|
i := 0
|
||||||
|
for ; i < len(line) && !util.IsSpace(line[i]) && (!util.IsPunct(line[i]) || line[i] == '_' || line[i] == '-'); i++ {
|
||||||
|
}
|
||||||
|
name := attrNameClass
|
||||||
|
if c == '#' {
|
||||||
|
name = attrNameID
|
||||||
|
}
|
||||||
|
reader.Advance(i)
|
||||||
|
return Attribute{Name: name, Value: line[0:i]}, true
|
||||||
|
}
|
||||||
|
line, _ := reader.PeekLine()
|
||||||
|
if len(line) == 0 {
|
||||||
|
return Attribute{}, false
|
||||||
|
}
|
||||||
|
c = line[0]
|
||||||
|
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
|
||||||
|
c == '_' || c == ':') {
|
||||||
|
return Attribute{}, false
|
||||||
|
}
|
||||||
|
i := 0
|
||||||
|
for ; i < len(line); i++ {
|
||||||
|
c = line[i]
|
||||||
|
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
|
||||||
|
(c >= '0' && c <= '9') ||
|
||||||
|
c == '_' || c == ':' || c == '.' || c == '-') {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
name := line[:i]
|
||||||
|
reader.Advance(i)
|
||||||
|
reader.SkipSpaces()
|
||||||
|
c = reader.Peek()
|
||||||
|
if c != '=' {
|
||||||
|
return Attribute{}, false
|
||||||
|
}
|
||||||
|
reader.Advance(1)
|
||||||
|
reader.SkipSpaces()
|
||||||
|
value, ok := parseAttributeValue(reader)
|
||||||
|
if !ok {
|
||||||
|
return Attribute{}, false
|
||||||
|
}
|
||||||
|
return Attribute{Name: name, Value: value}, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseAttributeValue(reader text.Reader) (interface{}, bool) {
|
||||||
|
reader.SkipSpaces()
|
||||||
|
c := reader.Peek()
|
||||||
|
var value interface{}
|
||||||
|
ok := false
|
||||||
|
switch c {
|
||||||
|
case text.EOF:
|
||||||
|
return Attribute{}, false
|
||||||
|
case '{':
|
||||||
|
value, ok = ParseAttributes(reader)
|
||||||
|
case '[':
|
||||||
|
value, ok = parseAttributeArray(reader)
|
||||||
|
case '"':
|
||||||
|
value, ok = parseAttributeString(reader)
|
||||||
|
default:
|
||||||
|
if c == '-' || c == '+' || util.IsNumeric(c) {
|
||||||
|
value, ok = parseAttributeNumber(reader)
|
||||||
|
} else {
|
||||||
|
value, ok = parseAttributeOthers(reader)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return value, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseAttributeArray(reader text.Reader) ([]interface{}, bool) {
|
||||||
|
reader.Advance(1) // skip [
|
||||||
|
ret := []interface{}{}
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
c := reader.Peek()
|
||||||
|
comma := false
|
||||||
|
if i != 0 && c == ',' {
|
||||||
|
reader.Advance(1)
|
||||||
|
comma = true
|
||||||
|
}
|
||||||
|
if c == ']' {
|
||||||
|
if !comma {
|
||||||
|
reader.Advance(1)
|
||||||
|
return ret, true
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
reader.SkipSpaces()
|
||||||
|
value, ok := parseAttributeValue(reader)
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
ret = append(ret, value)
|
||||||
|
reader.SkipSpaces()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseAttributeString(reader text.Reader) ([]byte, bool) {
|
||||||
|
reader.Advance(1) // skip "
|
||||||
|
line, _ := reader.PeekLine()
|
||||||
|
i := 0
|
||||||
|
l := len(line)
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for i < l {
|
||||||
|
c := line[i]
|
||||||
|
if c == '\\' && i != l-1 {
|
||||||
|
n := line[i+1]
|
||||||
|
switch n {
|
||||||
|
case '"', '/', '\\':
|
||||||
|
buf.WriteByte(n)
|
||||||
|
i += 2
|
||||||
|
case 'b':
|
||||||
|
buf.WriteString("\b")
|
||||||
|
i += 2
|
||||||
|
case 'f':
|
||||||
|
buf.WriteString("\f")
|
||||||
|
i += 2
|
||||||
|
case 'n':
|
||||||
|
buf.WriteString("\n")
|
||||||
|
i += 2
|
||||||
|
case 'r':
|
||||||
|
buf.WriteString("\r")
|
||||||
|
i += 2
|
||||||
|
case 't':
|
||||||
|
buf.WriteString("\t")
|
||||||
|
i += 2
|
||||||
|
default:
|
||||||
|
buf.WriteByte('\\')
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if c == '"' {
|
||||||
|
reader.Advance(i + 1)
|
||||||
|
return buf.Bytes(), true
|
||||||
|
}
|
||||||
|
buf.WriteByte(c)
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func scanAttributeDecimal(reader text.Reader, w io.ByteWriter) {
|
||||||
|
for {
|
||||||
|
c := reader.Peek()
|
||||||
|
if util.IsNumeric(c) {
|
||||||
|
w.WriteByte(c)
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
reader.Advance(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseAttributeNumber(reader text.Reader) (float64, bool) {
|
||||||
|
sign := 1
|
||||||
|
c := reader.Peek()
|
||||||
|
if c == '-' {
|
||||||
|
sign = -1
|
||||||
|
reader.Advance(1)
|
||||||
|
} else if c == '+' {
|
||||||
|
reader.Advance(1)
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if !util.IsNumeric(reader.Peek()) {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
scanAttributeDecimal(reader, &buf)
|
||||||
|
if buf.Len() == 0 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
c = reader.Peek()
|
||||||
|
if c == '.' {
|
||||||
|
buf.WriteByte(c)
|
||||||
|
reader.Advance(1)
|
||||||
|
scanAttributeDecimal(reader, &buf)
|
||||||
|
}
|
||||||
|
c = reader.Peek()
|
||||||
|
if c == 'e' || c == 'E' {
|
||||||
|
buf.WriteByte(c)
|
||||||
|
reader.Advance(1)
|
||||||
|
c = reader.Peek()
|
||||||
|
if c == '-' || c == '+' {
|
||||||
|
buf.WriteByte(c)
|
||||||
|
reader.Advance(1)
|
||||||
|
}
|
||||||
|
scanAttributeDecimal(reader, &buf)
|
||||||
|
}
|
||||||
|
f, err := strconv.ParseFloat(buf.String(), 10)
|
||||||
|
if err != nil {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return float64(sign) * f, true
|
||||||
|
}
|
||||||
|
|
||||||
|
var bytesTrue = []byte("true")
|
||||||
|
var bytesFalse = []byte("false")
|
||||||
|
var bytesNull = []byte("null")
|
||||||
|
|
||||||
|
func parseAttributeOthers(reader text.Reader) (interface{}, bool) {
|
||||||
|
line, _ := reader.PeekLine()
|
||||||
|
c := line[0]
|
||||||
|
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
|
||||||
|
c == '_' || c == ':') {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
i := 0
|
||||||
|
for ; i < len(line); i++ {
|
||||||
|
c := line[i]
|
||||||
|
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
|
||||||
|
(c >= '0' && c <= '9') ||
|
||||||
|
c == '_' || c == ':' || c == '.' || c == '-') {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
value := line[:i]
|
||||||
|
reader.Advance(i)
|
||||||
|
if bytes.Equal(value, bytesTrue) {
|
||||||
|
return true, true
|
||||||
|
}
|
||||||
|
if bytes.Equal(value, bytesFalse) {
|
||||||
|
return false, true
|
||||||
|
}
|
||||||
|
if bytes.Equal(value, bytesNull) {
|
||||||
|
return nil, true
|
||||||
|
}
|
||||||
|
return value, true
|
||||||
|
}
|
242
vendor/github.com/yuin/goldmark/parser/atx_heading.go
generated
vendored
Normal file
242
vendor/github.com/yuin/goldmark/parser/atx_heading.go
generated
vendored
Normal file
|
@ -0,0 +1,242 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A HeadingConfig struct is a data structure that holds configuration of the renderers related to headings.
|
||||||
|
type HeadingConfig struct {
|
||||||
|
AutoHeadingID bool
|
||||||
|
Attribute bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetOption implements SetOptioner.
|
||||||
|
func (b *HeadingConfig) SetOption(name OptionName, value interface{}) {
|
||||||
|
switch name {
|
||||||
|
case optAutoHeadingID:
|
||||||
|
b.AutoHeadingID = true
|
||||||
|
case optAttribute:
|
||||||
|
b.Attribute = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A HeadingOption interface sets options for heading parsers.
|
||||||
|
type HeadingOption interface {
|
||||||
|
Option
|
||||||
|
SetHeadingOption(*HeadingConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoHeadingID is an option name that enables auto IDs for headings.
|
||||||
|
const optAutoHeadingID OptionName = "AutoHeadingID"
|
||||||
|
|
||||||
|
type withAutoHeadingID struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *withAutoHeadingID) SetParserOption(c *Config) {
|
||||||
|
c.Options[optAutoHeadingID] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *withAutoHeadingID) SetHeadingOption(p *HeadingConfig) {
|
||||||
|
p.AutoHeadingID = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAutoHeadingID is a functional option that enables custom heading ids and
|
||||||
|
// auto generated heading ids.
|
||||||
|
func WithAutoHeadingID() HeadingOption {
|
||||||
|
return &withAutoHeadingID{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type withHeadingAttribute struct {
|
||||||
|
Option
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *withHeadingAttribute) SetHeadingOption(p *HeadingConfig) {
|
||||||
|
p.Attribute = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHeadingAttribute is a functional option that enables custom heading attributes.
|
||||||
|
func WithHeadingAttribute() HeadingOption {
|
||||||
|
return &withHeadingAttribute{WithAttribute()}
|
||||||
|
}
|
||||||
|
|
||||||
|
type atxHeadingParser struct {
|
||||||
|
HeadingConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewATXHeadingParser return a new BlockParser that can parse ATX headings.
|
||||||
|
func NewATXHeadingParser(opts ...HeadingOption) BlockParser {
|
||||||
|
p := &atxHeadingParser{}
|
||||||
|
for _, o := range opts {
|
||||||
|
o.SetHeadingOption(&p.HeadingConfig)
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *atxHeadingParser) Trigger() []byte {
|
||||||
|
return []byte{'#'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *atxHeadingParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||||
|
line, segment := reader.PeekLine()
|
||||||
|
pos := pc.BlockOffset()
|
||||||
|
if pos < 0 {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
i := pos
|
||||||
|
for ; i < len(line) && line[i] == '#'; i++ {
|
||||||
|
}
|
||||||
|
level := i - pos
|
||||||
|
if i == pos || level > 6 {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
l := util.TrimLeftSpaceLength(line[i:])
|
||||||
|
if l == 0 {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
start := i + l
|
||||||
|
if start >= len(line) {
|
||||||
|
start = len(line) - 1
|
||||||
|
}
|
||||||
|
origstart := start
|
||||||
|
stop := len(line) - util.TrimRightSpaceLength(line)
|
||||||
|
|
||||||
|
node := ast.NewHeading(level)
|
||||||
|
parsed := false
|
||||||
|
if b.Attribute { // handles special case like ### heading ### {#id}
|
||||||
|
start--
|
||||||
|
closureClose := -1
|
||||||
|
closureOpen := -1
|
||||||
|
for j := start; j < stop; {
|
||||||
|
c := line[j]
|
||||||
|
if util.IsEscapedPunctuation(line, j) {
|
||||||
|
j += 2
|
||||||
|
} else if util.IsSpace(c) && j < stop-1 && line[j+1] == '#' {
|
||||||
|
closureOpen = j + 1
|
||||||
|
k := j + 1
|
||||||
|
for ; k < stop && line[k] == '#'; k++ {
|
||||||
|
}
|
||||||
|
closureClose = k
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if closureClose > 0 {
|
||||||
|
reader.Advance(closureClose)
|
||||||
|
attrs, ok := ParseAttributes(reader)
|
||||||
|
parsed = ok
|
||||||
|
if parsed {
|
||||||
|
for _, attr := range attrs {
|
||||||
|
node.SetAttribute(attr.Name, attr.Value)
|
||||||
|
}
|
||||||
|
node.Lines().Append(text.NewSegment(segment.Start+start+1-segment.Padding, segment.Start+closureOpen-segment.Padding))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !parsed {
|
||||||
|
start = origstart
|
||||||
|
stop := len(line) - util.TrimRightSpaceLength(line)
|
||||||
|
if stop <= start { // empty headings like '##[space]'
|
||||||
|
stop = start
|
||||||
|
} else {
|
||||||
|
i = stop - 1
|
||||||
|
for ; line[i] == '#' && i >= start; i-- {
|
||||||
|
}
|
||||||
|
if i != stop-1 && !util.IsSpace(line[i]) {
|
||||||
|
i = stop - 1
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
stop = i
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(util.TrimRight(line[start:stop], []byte{'#'})) != 0 { // empty heading like '### ###'
|
||||||
|
node.Lines().Append(text.NewSegment(segment.Start+start-segment.Padding, segment.Start+stop-segment.Padding))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return node, NoChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *atxHeadingParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *atxHeadingParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||||
|
if b.Attribute {
|
||||||
|
_, ok := node.AttributeString("id")
|
||||||
|
if !ok {
|
||||||
|
parseLastLineAttributes(node, reader, pc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.AutoHeadingID {
|
||||||
|
id, ok := node.AttributeString("id")
|
||||||
|
if !ok {
|
||||||
|
generateAutoHeadingID(node.(*ast.Heading), reader, pc)
|
||||||
|
} else {
|
||||||
|
pc.IDs().Put(id.([]byte))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *atxHeadingParser) CanInterruptParagraph() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *atxHeadingParser) CanAcceptIndentedLine() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateAutoHeadingID(node *ast.Heading, reader text.Reader, pc Context) {
|
||||||
|
var line []byte
|
||||||
|
lastIndex := node.Lines().Len() - 1
|
||||||
|
if lastIndex > -1 {
|
||||||
|
lastLine := node.Lines().At(lastIndex)
|
||||||
|
line = lastLine.Value(reader.Source())
|
||||||
|
}
|
||||||
|
headingID := pc.IDs().Generate(line, ast.KindHeading)
|
||||||
|
node.SetAttribute(attrNameID, headingID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseLastLineAttributes(node ast.Node, reader text.Reader, pc Context) {
|
||||||
|
lastIndex := node.Lines().Len() - 1
|
||||||
|
if lastIndex < 0 { // empty headings
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lastLine := node.Lines().At(lastIndex)
|
||||||
|
line := lastLine.Value(reader.Source())
|
||||||
|
lr := text.NewReader(line)
|
||||||
|
var attrs Attributes
|
||||||
|
var ok bool
|
||||||
|
var start text.Segment
|
||||||
|
var sl int
|
||||||
|
var end text.Segment
|
||||||
|
for {
|
||||||
|
c := lr.Peek()
|
||||||
|
if c == text.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if c == '\\' {
|
||||||
|
lr.Advance(1)
|
||||||
|
if lr.Peek() == '{' {
|
||||||
|
lr.Advance(1)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if c == '{' {
|
||||||
|
sl, start = lr.Position()
|
||||||
|
attrs, ok = ParseAttributes(lr)
|
||||||
|
_, end = lr.Position()
|
||||||
|
lr.SetPosition(sl, start)
|
||||||
|
}
|
||||||
|
lr.Advance(1)
|
||||||
|
}
|
||||||
|
if ok && util.IsBlank(line[end.Stop:]) {
|
||||||
|
for _, attr := range attrs {
|
||||||
|
node.SetAttribute(attr.Name, attr.Value)
|
||||||
|
}
|
||||||
|
lastLine.Stop = lastLine.Start + start.Start
|
||||||
|
node.Lines().Set(lastIndex, lastLine)
|
||||||
|
}
|
||||||
|
}
|
42
vendor/github.com/yuin/goldmark/parser/auto_link.go
generated
vendored
Normal file
42
vendor/github.com/yuin/goldmark/parser/auto_link.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
type autoLinkParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultAutoLinkParser = &autoLinkParser{}
|
||||||
|
|
||||||
|
// NewAutoLinkParser returns a new InlineParser that parses autolinks
|
||||||
|
// surrounded by '<' and '>' .
|
||||||
|
func NewAutoLinkParser() InlineParser {
|
||||||
|
return defaultAutoLinkParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *autoLinkParser) Trigger() []byte {
|
||||||
|
return []byte{'<'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *autoLinkParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
|
||||||
|
line, segment := block.PeekLine()
|
||||||
|
stop := util.FindEmailIndex(line[1:])
|
||||||
|
typ := ast.AutoLinkType(ast.AutoLinkEmail)
|
||||||
|
if stop < 0 {
|
||||||
|
stop = util.FindURLIndex(line[1:])
|
||||||
|
typ = ast.AutoLinkURL
|
||||||
|
}
|
||||||
|
if stop < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
stop++
|
||||||
|
if stop >= len(line) || line[stop] != '>' {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
value := ast.NewTextSegment(text.NewSegment(segment.Start+1, segment.Start+stop))
|
||||||
|
block.Advance(stop + 1)
|
||||||
|
return ast.NewAutoLink(typ, value)
|
||||||
|
}
|
69
vendor/github.com/yuin/goldmark/parser/blockquote.go
generated
vendored
Normal file
69
vendor/github.com/yuin/goldmark/parser/blockquote.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
type blockquoteParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultBlockquoteParser = &blockquoteParser{}
|
||||||
|
|
||||||
|
// NewBlockquoteParser returns a new BlockParser that
|
||||||
|
// parses blockquotes.
|
||||||
|
func NewBlockquoteParser() BlockParser {
|
||||||
|
return defaultBlockquoteParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blockquoteParser) process(reader text.Reader) bool {
|
||||||
|
line, _ := reader.PeekLine()
|
||||||
|
w, pos := util.IndentWidth(line, reader.LineOffset())
|
||||||
|
if w > 3 || pos >= len(line) || line[pos] != '>' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
pos++
|
||||||
|
if pos >= len(line) || line[pos] == '\n' {
|
||||||
|
reader.Advance(pos)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if line[pos] == ' ' || line[pos] == '\t' {
|
||||||
|
pos++
|
||||||
|
}
|
||||||
|
reader.Advance(pos)
|
||||||
|
if line[pos-1] == '\t' {
|
||||||
|
reader.SetPadding(2)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blockquoteParser) Trigger() []byte {
|
||||||
|
return []byte{'>'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blockquoteParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||||
|
if b.process(reader) {
|
||||||
|
return ast.NewBlockquote(), HasChildren
|
||||||
|
}
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blockquoteParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||||
|
if b.process(reader) {
|
||||||
|
return Continue | HasChildren
|
||||||
|
}
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blockquoteParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||||
|
// nothing to do
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blockquoteParser) CanInterruptParagraph() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blockquoteParser) CanAcceptIndentedLine() bool {
|
||||||
|
return false
|
||||||
|
}
|
79
vendor/github.com/yuin/goldmark/parser/code_block.go
generated
vendored
Normal file
79
vendor/github.com/yuin/goldmark/parser/code_block.go
generated
vendored
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
type codeBlockParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeBlockParser is a BlockParser implementation that parses indented code blocks.
|
||||||
|
var defaultCodeBlockParser = &codeBlockParser{}
|
||||||
|
|
||||||
|
// NewCodeBlockParser returns a new BlockParser that
|
||||||
|
// parses code blocks.
|
||||||
|
func NewCodeBlockParser() BlockParser {
|
||||||
|
return defaultCodeBlockParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *codeBlockParser) Trigger() []byte {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *codeBlockParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||||
|
line, segment := reader.PeekLine()
|
||||||
|
pos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
|
||||||
|
if pos < 0 || util.IsBlank(line) {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
node := ast.NewCodeBlock()
|
||||||
|
reader.AdvanceAndSetPadding(pos, padding)
|
||||||
|
_, segment = reader.PeekLine()
|
||||||
|
node.Lines().Append(segment)
|
||||||
|
reader.Advance(segment.Len() - 1)
|
||||||
|
return node, NoChildren
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *codeBlockParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||||
|
line, segment := reader.PeekLine()
|
||||||
|
if util.IsBlank(line) {
|
||||||
|
node.Lines().Append(segment.TrimLeftSpaceWidth(4, reader.Source()))
|
||||||
|
return Continue | NoChildren
|
||||||
|
}
|
||||||
|
pos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
|
||||||
|
if pos < 0 {
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
reader.AdvanceAndSetPadding(pos, padding)
|
||||||
|
_, segment = reader.PeekLine()
|
||||||
|
node.Lines().Append(segment)
|
||||||
|
reader.Advance(segment.Len() - 1)
|
||||||
|
return Continue | NoChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *codeBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||||
|
// trim trailing blank lines
|
||||||
|
lines := node.Lines()
|
||||||
|
length := lines.Len() - 1
|
||||||
|
source := reader.Source()
|
||||||
|
for length >= 0 {
|
||||||
|
line := lines.At(length)
|
||||||
|
if util.IsBlank(line.Value(source)) {
|
||||||
|
length--
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lines.SetSliced(0, length+1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *codeBlockParser) CanInterruptParagraph() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *codeBlockParser) CanAcceptIndentedLine() bool {
|
||||||
|
return true
|
||||||
|
}
|
83
vendor/github.com/yuin/goldmark/parser/code_span.go
generated
vendored
Normal file
83
vendor/github.com/yuin/goldmark/parser/code_span.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
type codeSpanParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultCodeSpanParser = &codeSpanParser{}
|
||||||
|
|
||||||
|
// NewCodeSpanParser return a new InlineParser that parses inline codes
|
||||||
|
// surrounded by '`' .
|
||||||
|
func NewCodeSpanParser() InlineParser {
|
||||||
|
return defaultCodeSpanParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *codeSpanParser) Trigger() []byte {
|
||||||
|
return []byte{'`'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *codeSpanParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
|
||||||
|
line, startSegment := block.PeekLine()
|
||||||
|
opener := 0
|
||||||
|
for ; opener < len(line) && line[opener] == '`'; opener++ {
|
||||||
|
}
|
||||||
|
block.Advance(opener)
|
||||||
|
l, pos := block.Position()
|
||||||
|
node := ast.NewCodeSpan()
|
||||||
|
for {
|
||||||
|
line, segment := block.PeekLine()
|
||||||
|
if line == nil {
|
||||||
|
block.SetPosition(l, pos)
|
||||||
|
return ast.NewTextSegment(startSegment.WithStop(startSegment.Start + opener))
|
||||||
|
}
|
||||||
|
for i := 0; i < len(line); i++ {
|
||||||
|
c := line[i]
|
||||||
|
if c == '`' {
|
||||||
|
oldi := i
|
||||||
|
for ; i < len(line) && line[i] == '`'; i++ {
|
||||||
|
}
|
||||||
|
closure := i - oldi
|
||||||
|
if closure == opener && (i >= len(line) || line[i] != '`') {
|
||||||
|
segment = segment.WithStop(segment.Start + i - closure)
|
||||||
|
if !segment.IsEmpty() {
|
||||||
|
node.AppendChild(node, ast.NewRawTextSegment(segment))
|
||||||
|
}
|
||||||
|
block.Advance(i)
|
||||||
|
goto end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !util.IsBlank(line) {
|
||||||
|
node.AppendChild(node, ast.NewRawTextSegment(segment))
|
||||||
|
}
|
||||||
|
block.AdvanceLine()
|
||||||
|
}
|
||||||
|
end:
|
||||||
|
if !node.IsBlank(block.Source()) {
|
||||||
|
// trim first halfspace and last halfspace
|
||||||
|
segment := node.FirstChild().(*ast.Text).Segment
|
||||||
|
shouldTrimmed := true
|
||||||
|
if !(!segment.IsEmpty() && block.Source()[segment.Start] == ' ') {
|
||||||
|
shouldTrimmed = false
|
||||||
|
}
|
||||||
|
segment = node.LastChild().(*ast.Text).Segment
|
||||||
|
if !(!segment.IsEmpty() && block.Source()[segment.Stop-1] == ' ') {
|
||||||
|
shouldTrimmed = false
|
||||||
|
}
|
||||||
|
if shouldTrimmed {
|
||||||
|
t := node.FirstChild().(*ast.Text)
|
||||||
|
segment := t.Segment
|
||||||
|
t.Segment = segment.WithStart(segment.Start + 1)
|
||||||
|
t = node.LastChild().(*ast.Text)
|
||||||
|
segment = node.LastChild().(*ast.Text).Segment
|
||||||
|
t.Segment = segment.WithStop(segment.Stop - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
242
vendor/github.com/yuin/goldmark/parser/delimiter.go
generated
vendored
Normal file
242
vendor/github.com/yuin/goldmark/parser/delimiter.go
generated
vendored
Normal file
|
@ -0,0 +1,242 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A DelimiterProcessor interface provides a set of functions about
|
||||||
|
// Deliiter nodes.
|
||||||
|
type DelimiterProcessor interface {
|
||||||
|
// IsDelimiter returns true if given character is a delimiter, otherwise false.
|
||||||
|
IsDelimiter(byte) bool
|
||||||
|
|
||||||
|
// CanOpenCloser returns true if given opener can close given closer, otherwise false.
|
||||||
|
CanOpenCloser(opener, closer *Delimiter) bool
|
||||||
|
|
||||||
|
// OnMatch will be called when new matched delimiter found.
|
||||||
|
// OnMatch should return a new Node correspond to the matched delimiter.
|
||||||
|
OnMatch(consumes int) ast.Node
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Delimiter struct represents a delimiter like '*' of the Markdown text.
|
||||||
|
type Delimiter struct {
|
||||||
|
ast.BaseInline
|
||||||
|
|
||||||
|
Segment text.Segment
|
||||||
|
|
||||||
|
// CanOpen is set true if this delimiter can open a span for a new node.
|
||||||
|
// See https://spec.commonmark.org/0.29/#can-open-emphasis for details.
|
||||||
|
CanOpen bool
|
||||||
|
|
||||||
|
// CanClose is set true if this delimiter can close a span for a new node.
|
||||||
|
// See https://spec.commonmark.org/0.29/#can-open-emphasis for details.
|
||||||
|
CanClose bool
|
||||||
|
|
||||||
|
// Length is a remaining length of this delmiter.
|
||||||
|
Length int
|
||||||
|
|
||||||
|
// OriginalLength is a original length of this delimiter.
|
||||||
|
OriginalLength int
|
||||||
|
|
||||||
|
// Char is a character of this delimiter.
|
||||||
|
Char byte
|
||||||
|
|
||||||
|
// PreviousDelimiter is a previous sibling delimiter node of this delimiter.
|
||||||
|
PreviousDelimiter *Delimiter
|
||||||
|
|
||||||
|
// NextDelimiter is a next sibling delimiter node of this delimiter.
|
||||||
|
NextDelimiter *Delimiter
|
||||||
|
|
||||||
|
// Processor is a DelimiterProcessor associated with this delimiter.
|
||||||
|
Processor DelimiterProcessor
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inline implements Inline.Inline.
|
||||||
|
func (d *Delimiter) Inline() {}
|
||||||
|
|
||||||
|
// Dump implements Node.Dump.
|
||||||
|
func (d *Delimiter) Dump(source []byte, level int) {
|
||||||
|
fmt.Printf("%sDelimiter: \"%s\"\n", strings.Repeat(" ", level), string(d.Text(source)))
|
||||||
|
}
|
||||||
|
|
||||||
|
var kindDelimiter = ast.NewNodeKind("Delimiter")
|
||||||
|
|
||||||
|
// Kind implements Node.Kind
|
||||||
|
func (d *Delimiter) Kind() ast.NodeKind {
|
||||||
|
return kindDelimiter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Text implements Node.Text
|
||||||
|
func (d *Delimiter) Text(source []byte) []byte {
|
||||||
|
return d.Segment.Value(source)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeCharacters consumes delimiters.
|
||||||
|
func (d *Delimiter) ConsumeCharacters(n int) {
|
||||||
|
d.Length -= n
|
||||||
|
d.Segment = d.Segment.WithStop(d.Segment.Start + d.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CalcComsumption calculates how many characters should be used for opening
|
||||||
|
// a new span correspond to given closer.
|
||||||
|
func (d *Delimiter) CalcComsumption(closer *Delimiter) int {
|
||||||
|
if (d.CanClose || closer.CanOpen) && (d.OriginalLength+closer.OriginalLength)%3 == 0 && closer.OriginalLength%3 != 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if d.Length >= 2 && closer.Length >= 2 {
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDelimiter returns a new Delimiter node.
|
||||||
|
func NewDelimiter(canOpen, canClose bool, length int, char byte, processor DelimiterProcessor) *Delimiter {
|
||||||
|
c := &Delimiter{
|
||||||
|
BaseInline: ast.BaseInline{},
|
||||||
|
CanOpen: canOpen,
|
||||||
|
CanClose: canClose,
|
||||||
|
Length: length,
|
||||||
|
OriginalLength: length,
|
||||||
|
Char: char,
|
||||||
|
PreviousDelimiter: nil,
|
||||||
|
NextDelimiter: nil,
|
||||||
|
Processor: processor,
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScanDelimiter scans a delimiter by given DelimiterProcessor.
|
||||||
|
func ScanDelimiter(line []byte, before rune, min int, processor DelimiterProcessor) *Delimiter {
|
||||||
|
i := 0
|
||||||
|
c := line[i]
|
||||||
|
j := i
|
||||||
|
if !processor.IsDelimiter(c) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for ; j < len(line) && c == line[j]; j++ {
|
||||||
|
}
|
||||||
|
if (j - i) >= min {
|
||||||
|
after := rune(' ')
|
||||||
|
if j != len(line) {
|
||||||
|
after = util.ToRune(line, j)
|
||||||
|
}
|
||||||
|
|
||||||
|
canOpen, canClose := false, false
|
||||||
|
beforeIsPunctuation := unicode.IsPunct(before)
|
||||||
|
beforeIsWhitespace := unicode.IsSpace(before)
|
||||||
|
afterIsPunctuation := unicode.IsPunct(after)
|
||||||
|
afterIsWhitespace := unicode.IsSpace(after)
|
||||||
|
|
||||||
|
isLeft := !afterIsWhitespace &&
|
||||||
|
(!afterIsPunctuation || beforeIsWhitespace || beforeIsPunctuation)
|
||||||
|
isRight := !beforeIsWhitespace &&
|
||||||
|
(!beforeIsPunctuation || afterIsWhitespace || afterIsPunctuation)
|
||||||
|
|
||||||
|
if line[i] == '_' {
|
||||||
|
canOpen = isLeft && (!isRight || beforeIsPunctuation)
|
||||||
|
canClose = isRight && (!isLeft || afterIsPunctuation)
|
||||||
|
} else {
|
||||||
|
canOpen = isLeft
|
||||||
|
canClose = isRight
|
||||||
|
}
|
||||||
|
return NewDelimiter(canOpen, canClose, j-i, c, processor)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessDelimiters processes the delimiter list in the context.
|
||||||
|
// Processing will be stop when reaching the bottom.
|
||||||
|
//
|
||||||
|
// If you implement an inline parser that can have other inline nodes as
|
||||||
|
// children, you should call this function when nesting span has closed.
|
||||||
|
func ProcessDelimiters(bottom ast.Node, pc Context) {
|
||||||
|
lastDelimiter := pc.LastDelimiter()
|
||||||
|
if lastDelimiter == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var closer *Delimiter
|
||||||
|
if bottom != nil {
|
||||||
|
if bottom != lastDelimiter {
|
||||||
|
for c := lastDelimiter.PreviousSibling(); c != nil; {
|
||||||
|
if d, ok := c.(*Delimiter); ok {
|
||||||
|
closer = d
|
||||||
|
}
|
||||||
|
prev := c.PreviousSibling()
|
||||||
|
if prev == bottom {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
c = prev
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
closer = pc.FirstDelimiter()
|
||||||
|
}
|
||||||
|
if closer == nil {
|
||||||
|
pc.ClearDelimiters(bottom)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for closer != nil {
|
||||||
|
if !closer.CanClose {
|
||||||
|
closer = closer.NextDelimiter
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
consume := 0
|
||||||
|
found := false
|
||||||
|
maybeOpener := false
|
||||||
|
var opener *Delimiter
|
||||||
|
for opener = closer.PreviousDelimiter; opener != nil; opener = opener.PreviousDelimiter {
|
||||||
|
if opener.CanOpen && opener.Processor.CanOpenCloser(opener, closer) {
|
||||||
|
maybeOpener = true
|
||||||
|
consume = opener.CalcComsumption(closer)
|
||||||
|
if consume > 0 {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
if !maybeOpener && !closer.CanOpen {
|
||||||
|
pc.RemoveDelimiter(closer)
|
||||||
|
}
|
||||||
|
closer = closer.NextDelimiter
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
opener.ConsumeCharacters(consume)
|
||||||
|
closer.ConsumeCharacters(consume)
|
||||||
|
|
||||||
|
node := opener.Processor.OnMatch(consume)
|
||||||
|
|
||||||
|
parent := opener.Parent()
|
||||||
|
child := opener.NextSibling()
|
||||||
|
|
||||||
|
for child != nil && child != closer {
|
||||||
|
next := child.NextSibling()
|
||||||
|
node.AppendChild(node, child)
|
||||||
|
child = next
|
||||||
|
}
|
||||||
|
parent.InsertAfter(parent, opener, node)
|
||||||
|
|
||||||
|
for c := opener.NextDelimiter; c != nil && c != closer; {
|
||||||
|
next := c.NextDelimiter
|
||||||
|
pc.RemoveDelimiter(c)
|
||||||
|
c = next
|
||||||
|
}
|
||||||
|
|
||||||
|
if opener.Length == 0 {
|
||||||
|
pc.RemoveDelimiter(opener)
|
||||||
|
}
|
||||||
|
|
||||||
|
if closer.Length == 0 {
|
||||||
|
next := closer.NextDelimiter
|
||||||
|
pc.RemoveDelimiter(closer)
|
||||||
|
closer = next
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pc.ClearDelimiters(bottom)
|
||||||
|
}
|
50
vendor/github.com/yuin/goldmark/parser/emphasis.go
generated
vendored
Normal file
50
vendor/github.com/yuin/goldmark/parser/emphasis.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
)
|
||||||
|
|
||||||
|
type emphasisDelimiterProcessor struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *emphasisDelimiterProcessor) IsDelimiter(b byte) bool {
|
||||||
|
return b == '*' || b == '_'
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *emphasisDelimiterProcessor) CanOpenCloser(opener, closer *Delimiter) bool {
|
||||||
|
return opener.Char == closer.Char
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *emphasisDelimiterProcessor) OnMatch(consumes int) ast.Node {
|
||||||
|
return ast.NewEmphasis(consumes)
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultEmphasisDelimiterProcessor = &emphasisDelimiterProcessor{}
|
||||||
|
|
||||||
|
type emphasisParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultEmphasisParser = &emphasisParser{}
|
||||||
|
|
||||||
|
// NewEmphasisParser return a new InlineParser that parses emphasises.
|
||||||
|
func NewEmphasisParser() InlineParser {
|
||||||
|
return defaultEmphasisParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *emphasisParser) Trigger() []byte {
|
||||||
|
return []byte{'*', '_'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *emphasisParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
|
||||||
|
before := block.PrecendingCharacter()
|
||||||
|
line, segment := block.PeekLine()
|
||||||
|
node := ScanDelimiter(line, before, 1, defaultEmphasisDelimiterProcessor)
|
||||||
|
if node == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
node.Segment = segment.WithStop(segment.Start + node.OriginalLength)
|
||||||
|
block.Advance(node.OriginalLength)
|
||||||
|
pc.PushDelimiter(node)
|
||||||
|
return node
|
||||||
|
}
|
110
vendor/github.com/yuin/goldmark/parser/fcode_block.go
generated
vendored
Normal file
110
vendor/github.com/yuin/goldmark/parser/fcode_block.go
generated
vendored
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fencedCodeBlockParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultFencedCodeBlockParser = &fencedCodeBlockParser{}
|
||||||
|
|
||||||
|
// NewFencedCodeBlockParser returns a new BlockParser that
|
||||||
|
// parses fenced code blocks.
|
||||||
|
func NewFencedCodeBlockParser() BlockParser {
|
||||||
|
return defaultFencedCodeBlockParser
|
||||||
|
}
|
||||||
|
|
||||||
|
type fenceData struct {
|
||||||
|
char byte
|
||||||
|
indent int
|
||||||
|
length int
|
||||||
|
node ast.Node
|
||||||
|
}
|
||||||
|
|
||||||
|
var fencedCodeBlockInfoKey = NewContextKey()
|
||||||
|
|
||||||
|
func (b *fencedCodeBlockParser) Trigger() []byte {
|
||||||
|
return []byte{'~', '`'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fencedCodeBlockParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||||
|
line, segment := reader.PeekLine()
|
||||||
|
pos := pc.BlockOffset()
|
||||||
|
if pos < 0 || (line[pos] != '`' && line[pos] != '~') {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
findent := pos
|
||||||
|
fenceChar := line[pos]
|
||||||
|
i := pos
|
||||||
|
for ; i < len(line) && line[i] == fenceChar; i++ {
|
||||||
|
}
|
||||||
|
oFenceLength := i - pos
|
||||||
|
if oFenceLength < 3 {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
var info *ast.Text
|
||||||
|
if i < len(line)-1 {
|
||||||
|
rest := line[i:]
|
||||||
|
left := util.TrimLeftSpaceLength(rest)
|
||||||
|
right := util.TrimRightSpaceLength(rest)
|
||||||
|
if left < len(rest)-right {
|
||||||
|
infoStart, infoStop := segment.Start-segment.Padding+i+left, segment.Stop-right
|
||||||
|
value := rest[left : len(rest)-right]
|
||||||
|
if fenceChar == '`' && bytes.IndexByte(value, '`') > -1 {
|
||||||
|
return nil, NoChildren
|
||||||
|
} else if infoStart != infoStop {
|
||||||
|
info = ast.NewTextSegment(text.NewSegment(infoStart, infoStop))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
node := ast.NewFencedCodeBlock(info)
|
||||||
|
pc.Set(fencedCodeBlockInfoKey, &fenceData{fenceChar, findent, oFenceLength, node})
|
||||||
|
return node, NoChildren
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fencedCodeBlockParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||||
|
line, segment := reader.PeekLine()
|
||||||
|
fdata := pc.Get(fencedCodeBlockInfoKey).(*fenceData)
|
||||||
|
w, pos := util.IndentWidth(line, reader.LineOffset())
|
||||||
|
if w < 4 {
|
||||||
|
i := pos
|
||||||
|
for ; i < len(line) && line[i] == fdata.char; i++ {
|
||||||
|
}
|
||||||
|
length := i - pos
|
||||||
|
if length >= fdata.length && util.IsBlank(line[i:]) {
|
||||||
|
newline := 1
|
||||||
|
if line[len(line)-1] != '\n' {
|
||||||
|
newline = 0
|
||||||
|
}
|
||||||
|
reader.Advance(segment.Stop - segment.Start - newline - segment.Padding)
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pos, padding := util.DedentPositionPadding(line, reader.LineOffset(), segment.Padding, fdata.indent)
|
||||||
|
|
||||||
|
seg := text.NewSegmentPadding(segment.Start+pos, segment.Stop, padding)
|
||||||
|
node.Lines().Append(seg)
|
||||||
|
reader.AdvanceAndSetPadding(segment.Stop-segment.Start-pos-1, padding)
|
||||||
|
return Continue | NoChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fencedCodeBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||||
|
fdata := pc.Get(fencedCodeBlockInfoKey).(*fenceData)
|
||||||
|
if fdata.node == node {
|
||||||
|
pc.Set(fencedCodeBlockInfoKey, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fencedCodeBlockParser) CanInterruptParagraph() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fencedCodeBlockParser) CanAcceptIndentedLine() bool {
|
||||||
|
return false
|
||||||
|
}
|
228
vendor/github.com/yuin/goldmark/parser/html_block.go
generated
vendored
Normal file
228
vendor/github.com/yuin/goldmark/parser/html_block.go
generated
vendored
Normal file
|
@ -0,0 +1,228 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
var allowedBlockTags = map[string]bool{
|
||||||
|
"address": true,
|
||||||
|
"article": true,
|
||||||
|
"aside": true,
|
||||||
|
"base": true,
|
||||||
|
"basefont": true,
|
||||||
|
"blockquote": true,
|
||||||
|
"body": true,
|
||||||
|
"caption": true,
|
||||||
|
"center": true,
|
||||||
|
"col": true,
|
||||||
|
"colgroup": true,
|
||||||
|
"dd": true,
|
||||||
|
"details": true,
|
||||||
|
"dialog": true,
|
||||||
|
"dir": true,
|
||||||
|
"div": true,
|
||||||
|
"dl": true,
|
||||||
|
"dt": true,
|
||||||
|
"fieldset": true,
|
||||||
|
"figcaption": true,
|
||||||
|
"figure": true,
|
||||||
|
"footer": true,
|
||||||
|
"form": true,
|
||||||
|
"frame": true,
|
||||||
|
"frameset": true,
|
||||||
|
"h1": true,
|
||||||
|
"h2": true,
|
||||||
|
"h3": true,
|
||||||
|
"h4": true,
|
||||||
|
"h5": true,
|
||||||
|
"h6": true,
|
||||||
|
"head": true,
|
||||||
|
"header": true,
|
||||||
|
"hr": true,
|
||||||
|
"html": true,
|
||||||
|
"iframe": true,
|
||||||
|
"legend": true,
|
||||||
|
"li": true,
|
||||||
|
"link": true,
|
||||||
|
"main": true,
|
||||||
|
"menu": true,
|
||||||
|
"menuitem": true,
|
||||||
|
"meta": true,
|
||||||
|
"nav": true,
|
||||||
|
"noframes": true,
|
||||||
|
"ol": true,
|
||||||
|
"optgroup": true,
|
||||||
|
"option": true,
|
||||||
|
"p": true,
|
||||||
|
"param": true,
|
||||||
|
"section": true,
|
||||||
|
"source": true,
|
||||||
|
"summary": true,
|
||||||
|
"table": true,
|
||||||
|
"tbody": true,
|
||||||
|
"td": true,
|
||||||
|
"tfoot": true,
|
||||||
|
"th": true,
|
||||||
|
"thead": true,
|
||||||
|
"title": true,
|
||||||
|
"tr": true,
|
||||||
|
"track": true,
|
||||||
|
"ul": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
var htmlBlockType1OpenRegexp = regexp.MustCompile(`(?i)^[ ]{0,3}<(script|pre|style)(?:\s.*|>.*|/>.*|)\n?$`)
|
||||||
|
var htmlBlockType1CloseRegexp = regexp.MustCompile(`(?i)^.*</(?:script|pre|style)>.*`)
|
||||||
|
|
||||||
|
var htmlBlockType2OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<!\-\-`)
|
||||||
|
var htmlBlockType2Close = []byte{'-', '-', '>'}
|
||||||
|
|
||||||
|
var htmlBlockType3OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<\?`)
|
||||||
|
var htmlBlockType3Close = []byte{'?', '>'}
|
||||||
|
|
||||||
|
var htmlBlockType4OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<![A-Z]+.*\n?$`)
|
||||||
|
var htmlBlockType4Close = []byte{'>'}
|
||||||
|
|
||||||
|
var htmlBlockType5OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<\!\[CDATA\[`)
|
||||||
|
var htmlBlockType5Close = []byte{']', ']', '>'}
|
||||||
|
|
||||||
|
var htmlBlockType6Regexp = regexp.MustCompile(`^[ ]{0,3}</?([a-zA-Z0-9]+)(?:\s.*|>.*|/>.*|)\n?$`)
|
||||||
|
|
||||||
|
var htmlBlockType7Regexp = regexp.MustCompile(`^[ ]{0,3}<(/)?([a-zA-Z0-9]+)(` + attributePattern + `*)(:?>|/>)\s*\n?$`)
|
||||||
|
|
||||||
|
type htmlBlockParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultHTMLBlockParser = &htmlBlockParser{}
|
||||||
|
|
||||||
|
// NewHTMLBlockParser return a new BlockParser that can parse html
|
||||||
|
// blocks.
|
||||||
|
func NewHTMLBlockParser() BlockParser {
|
||||||
|
return defaultHTMLBlockParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *htmlBlockParser) Trigger() []byte {
|
||||||
|
return []byte{'<'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *htmlBlockParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||||
|
var node *ast.HTMLBlock
|
||||||
|
line, segment := reader.PeekLine()
|
||||||
|
last := pc.LastOpenedBlock().Node
|
||||||
|
if pos := pc.BlockOffset(); pos < 0 || line[pos] != '<' {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
if m := htmlBlockType1OpenRegexp.FindSubmatchIndex(line); m != nil {
|
||||||
|
node = ast.NewHTMLBlock(ast.HTMLBlockType1)
|
||||||
|
} else if htmlBlockType2OpenRegexp.Match(line) {
|
||||||
|
node = ast.NewHTMLBlock(ast.HTMLBlockType2)
|
||||||
|
} else if htmlBlockType3OpenRegexp.Match(line) {
|
||||||
|
node = ast.NewHTMLBlock(ast.HTMLBlockType3)
|
||||||
|
} else if htmlBlockType4OpenRegexp.Match(line) {
|
||||||
|
node = ast.NewHTMLBlock(ast.HTMLBlockType4)
|
||||||
|
} else if htmlBlockType5OpenRegexp.Match(line) {
|
||||||
|
node = ast.NewHTMLBlock(ast.HTMLBlockType5)
|
||||||
|
} else if match := htmlBlockType7Regexp.FindSubmatchIndex(line); match != nil {
|
||||||
|
isCloseTag := match[2] > -1 && bytes.Equal(line[match[2]:match[3]], []byte("/"))
|
||||||
|
hasAttr := match[6] != match[7]
|
||||||
|
tagName := strings.ToLower(string(line[match[4]:match[5]]))
|
||||||
|
_, ok := allowedBlockTags[tagName]
|
||||||
|
if ok {
|
||||||
|
node = ast.NewHTMLBlock(ast.HTMLBlockType6)
|
||||||
|
} else if tagName != "script" && tagName != "style" && tagName != "pre" && !ast.IsParagraph(last) && !(isCloseTag && hasAttr) { // type 7 can not interrupt paragraph
|
||||||
|
node = ast.NewHTMLBlock(ast.HTMLBlockType7)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if node == nil {
|
||||||
|
if match := htmlBlockType6Regexp.FindSubmatchIndex(line); match != nil {
|
||||||
|
tagName := string(line[match[2]:match[3]])
|
||||||
|
_, ok := allowedBlockTags[strings.ToLower(tagName)]
|
||||||
|
if ok {
|
||||||
|
node = ast.NewHTMLBlock(ast.HTMLBlockType6)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if node != nil {
|
||||||
|
reader.Advance(segment.Len() - 1)
|
||||||
|
node.Lines().Append(segment)
|
||||||
|
return node, NoChildren
|
||||||
|
}
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *htmlBlockParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||||
|
htmlBlock := node.(*ast.HTMLBlock)
|
||||||
|
lines := htmlBlock.Lines()
|
||||||
|
line, segment := reader.PeekLine()
|
||||||
|
var closurePattern []byte
|
||||||
|
|
||||||
|
switch htmlBlock.HTMLBlockType {
|
||||||
|
case ast.HTMLBlockType1:
|
||||||
|
if lines.Len() == 1 {
|
||||||
|
firstLine := lines.At(0)
|
||||||
|
if htmlBlockType1CloseRegexp.Match(firstLine.Value(reader.Source())) {
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if htmlBlockType1CloseRegexp.Match(line) {
|
||||||
|
htmlBlock.ClosureLine = segment
|
||||||
|
reader.Advance(segment.Len() - 1)
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
case ast.HTMLBlockType2:
|
||||||
|
closurePattern = htmlBlockType2Close
|
||||||
|
fallthrough
|
||||||
|
case ast.HTMLBlockType3:
|
||||||
|
if closurePattern == nil {
|
||||||
|
closurePattern = htmlBlockType3Close
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
case ast.HTMLBlockType4:
|
||||||
|
if closurePattern == nil {
|
||||||
|
closurePattern = htmlBlockType4Close
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
case ast.HTMLBlockType5:
|
||||||
|
if closurePattern == nil {
|
||||||
|
closurePattern = htmlBlockType5Close
|
||||||
|
}
|
||||||
|
|
||||||
|
if lines.Len() == 1 {
|
||||||
|
firstLine := lines.At(0)
|
||||||
|
if bytes.Contains(firstLine.Value(reader.Source()), closurePattern) {
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if bytes.Contains(line, closurePattern) {
|
||||||
|
htmlBlock.ClosureLine = segment
|
||||||
|
reader.Advance(segment.Len() - 1)
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
|
||||||
|
case ast.HTMLBlockType6, ast.HTMLBlockType7:
|
||||||
|
if util.IsBlank(line) {
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
}
|
||||||
|
node.Lines().Append(segment)
|
||||||
|
reader.Advance(segment.Len() - 1)
|
||||||
|
return Continue | NoChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *htmlBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||||
|
// nothing to do
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *htmlBlockParser) CanInterruptParagraph() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *htmlBlockParser) CanAcceptIndentedLine() bool {
|
||||||
|
return false
|
||||||
|
}
|
375
vendor/github.com/yuin/goldmark/parser/link.go
generated
vendored
Normal file
375
vendor/github.com/yuin/goldmark/parser/link.go
generated
vendored
Normal file
|
@ -0,0 +1,375 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
var linkLabelStateKey = NewContextKey()
|
||||||
|
|
||||||
|
type linkLabelState struct {
|
||||||
|
ast.BaseInline
|
||||||
|
|
||||||
|
Segment text.Segment
|
||||||
|
|
||||||
|
IsImage bool
|
||||||
|
|
||||||
|
Prev *linkLabelState
|
||||||
|
|
||||||
|
Next *linkLabelState
|
||||||
|
|
||||||
|
First *linkLabelState
|
||||||
|
|
||||||
|
Last *linkLabelState
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLinkLabelState(segment text.Segment, isImage bool) *linkLabelState {
|
||||||
|
return &linkLabelState{
|
||||||
|
Segment: segment,
|
||||||
|
IsImage: isImage,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *linkLabelState) Text(source []byte) []byte {
|
||||||
|
return s.Segment.Value(source)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *linkLabelState) Dump(source []byte, level int) {
|
||||||
|
fmt.Printf("%slinkLabelState: \"%s\"\n", strings.Repeat(" ", level), s.Text(source))
|
||||||
|
}
|
||||||
|
|
||||||
|
var kindLinkLabelState = ast.NewNodeKind("LinkLabelState")
|
||||||
|
|
||||||
|
func (s *linkLabelState) Kind() ast.NodeKind {
|
||||||
|
return kindLinkLabelState
|
||||||
|
}
|
||||||
|
|
||||||
|
func pushLinkLabelState(pc Context, v *linkLabelState) {
|
||||||
|
tlist := pc.Get(linkLabelStateKey)
|
||||||
|
var list *linkLabelState
|
||||||
|
if tlist == nil {
|
||||||
|
list = v
|
||||||
|
v.First = v
|
||||||
|
v.Last = v
|
||||||
|
pc.Set(linkLabelStateKey, list)
|
||||||
|
} else {
|
||||||
|
list = tlist.(*linkLabelState)
|
||||||
|
l := list.Last
|
||||||
|
list.Last = v
|
||||||
|
l.Next = v
|
||||||
|
v.Prev = l
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeLinkLabelState(pc Context, d *linkLabelState) {
|
||||||
|
tlist := pc.Get(linkLabelStateKey)
|
||||||
|
var list *linkLabelState
|
||||||
|
if tlist == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
list = tlist.(*linkLabelState)
|
||||||
|
|
||||||
|
if d.Prev == nil {
|
||||||
|
list = d.Next
|
||||||
|
if list != nil {
|
||||||
|
list.First = d
|
||||||
|
list.Last = d.Last
|
||||||
|
list.Prev = nil
|
||||||
|
pc.Set(linkLabelStateKey, list)
|
||||||
|
} else {
|
||||||
|
pc.Set(linkLabelStateKey, nil)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
d.Prev.Next = d.Next
|
||||||
|
if d.Next != nil {
|
||||||
|
d.Next.Prev = d.Prev
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if list != nil && d.Next == nil {
|
||||||
|
list.Last = d.Prev
|
||||||
|
}
|
||||||
|
d.Next = nil
|
||||||
|
d.Prev = nil
|
||||||
|
d.First = nil
|
||||||
|
d.Last = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type linkParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultLinkParser = &linkParser{}
|
||||||
|
|
||||||
|
// NewLinkParser return a new InlineParser that parses links.
|
||||||
|
func NewLinkParser() InlineParser {
|
||||||
|
return defaultLinkParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *linkParser) Trigger() []byte {
|
||||||
|
return []byte{'!', '[', ']'}
|
||||||
|
}
|
||||||
|
|
||||||
|
var linkDestinationRegexp = regexp.MustCompile(`\s*([^\s].+)`)
|
||||||
|
var linkTitleRegexp = regexp.MustCompile(`\s+(\)|["'\(].+)`)
|
||||||
|
var linkBottom = NewContextKey()
|
||||||
|
|
||||||
|
func (s *linkParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
|
||||||
|
line, segment := block.PeekLine()
|
||||||
|
if line[0] == '!' {
|
||||||
|
if len(line) > 1 && line[1] == '[' {
|
||||||
|
block.Advance(1)
|
||||||
|
pc.Set(linkBottom, pc.LastDelimiter())
|
||||||
|
return processLinkLabelOpen(block, segment.Start+1, true, pc)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if line[0] == '[' {
|
||||||
|
pc.Set(linkBottom, pc.LastDelimiter())
|
||||||
|
return processLinkLabelOpen(block, segment.Start, false, pc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// line[0] == ']'
|
||||||
|
tlist := pc.Get(linkLabelStateKey)
|
||||||
|
if tlist == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
last := tlist.(*linkLabelState).Last
|
||||||
|
if last == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
block.Advance(1)
|
||||||
|
removeLinkLabelState(pc, last)
|
||||||
|
if s.containsLink(last) { // a link in a link text is not allowed
|
||||||
|
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
labelValue := block.Value(text.NewSegment(last.Segment.Start+1, segment.Start))
|
||||||
|
if util.IsBlank(labelValue) && !last.IsImage {
|
||||||
|
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
c := block.Peek()
|
||||||
|
l, pos := block.Position()
|
||||||
|
var link *ast.Link
|
||||||
|
var hasValue bool
|
||||||
|
if c == '(' { // normal link
|
||||||
|
link = s.parseLink(parent, last, block, pc)
|
||||||
|
} else if c == '[' { // reference link
|
||||||
|
link, hasValue = s.parseReferenceLink(parent, last, block, pc)
|
||||||
|
if link == nil && hasValue {
|
||||||
|
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if link == nil {
|
||||||
|
// maybe shortcut reference link
|
||||||
|
block.SetPosition(l, pos)
|
||||||
|
ssegment := text.NewSegment(last.Segment.Stop, segment.Start)
|
||||||
|
maybeReference := block.Value(ssegment)
|
||||||
|
ref, ok := pc.Reference(util.ToLinkReference(maybeReference))
|
||||||
|
if !ok {
|
||||||
|
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
link = ast.NewLink()
|
||||||
|
s.processLinkLabel(parent, link, last, pc)
|
||||||
|
link.Title = ref.Title()
|
||||||
|
link.Destination = ref.Destination()
|
||||||
|
}
|
||||||
|
if last.IsImage {
|
||||||
|
last.Parent().RemoveChild(last.Parent(), last)
|
||||||
|
return ast.NewImage(link)
|
||||||
|
}
|
||||||
|
last.Parent().RemoveChild(last.Parent(), last)
|
||||||
|
return link
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *linkParser) containsLink(last *linkLabelState) bool {
|
||||||
|
if last.IsImage {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
var c ast.Node
|
||||||
|
for c = last; c != nil; c = c.NextSibling() {
|
||||||
|
if _, ok := c.(*ast.Link); ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func processLinkLabelOpen(block text.Reader, pos int, isImage bool, pc Context) *linkLabelState {
|
||||||
|
start := pos
|
||||||
|
if isImage {
|
||||||
|
start--
|
||||||
|
}
|
||||||
|
state := newLinkLabelState(text.NewSegment(start, pos+1), isImage)
|
||||||
|
pushLinkLabelState(pc, state)
|
||||||
|
block.Advance(1)
|
||||||
|
return state
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *linkParser) processLinkLabel(parent ast.Node, link *ast.Link, last *linkLabelState, pc Context) {
|
||||||
|
var bottom ast.Node
|
||||||
|
if v := pc.Get(linkBottom); v != nil {
|
||||||
|
bottom = v.(ast.Node)
|
||||||
|
}
|
||||||
|
pc.Set(linkBottom, nil)
|
||||||
|
ProcessDelimiters(bottom, pc)
|
||||||
|
for c := last.NextSibling(); c != nil; {
|
||||||
|
next := c.NextSibling()
|
||||||
|
parent.RemoveChild(parent, c)
|
||||||
|
link.AppendChild(link, c)
|
||||||
|
c = next
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *linkParser) parseReferenceLink(parent ast.Node, last *linkLabelState, block text.Reader, pc Context) (*ast.Link, bool) {
|
||||||
|
_, orgpos := block.Position()
|
||||||
|
block.Advance(1) // skip '['
|
||||||
|
line, segment := block.PeekLine()
|
||||||
|
endIndex := util.FindClosure(line, '[', ']', false, true)
|
||||||
|
if endIndex < 0 {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
block.Advance(endIndex + 1)
|
||||||
|
ssegment := segment.WithStop(segment.Start + endIndex)
|
||||||
|
maybeReference := block.Value(ssegment)
|
||||||
|
if util.IsBlank(maybeReference) { // collapsed reference link
|
||||||
|
ssegment = text.NewSegment(last.Segment.Stop, orgpos.Start-1)
|
||||||
|
maybeReference = block.Value(ssegment)
|
||||||
|
}
|
||||||
|
|
||||||
|
ref, ok := pc.Reference(util.ToLinkReference(maybeReference))
|
||||||
|
if !ok {
|
||||||
|
return nil, true
|
||||||
|
}
|
||||||
|
|
||||||
|
link := ast.NewLink()
|
||||||
|
s.processLinkLabel(parent, link, last, pc)
|
||||||
|
link.Title = ref.Title()
|
||||||
|
link.Destination = ref.Destination()
|
||||||
|
return link, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *linkParser) parseLink(parent ast.Node, last *linkLabelState, block text.Reader, pc Context) *ast.Link {
|
||||||
|
block.Advance(1) // skip '('
|
||||||
|
block.SkipSpaces()
|
||||||
|
var title []byte
|
||||||
|
var destination []byte
|
||||||
|
var ok bool
|
||||||
|
if block.Peek() == ')' { // empty link like '[link]()'
|
||||||
|
block.Advance(1)
|
||||||
|
} else {
|
||||||
|
destination, ok = parseLinkDestination(block)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
block.SkipSpaces()
|
||||||
|
if block.Peek() == ')' {
|
||||||
|
block.Advance(1)
|
||||||
|
} else {
|
||||||
|
title, ok = parseLinkTitle(block)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
block.SkipSpaces()
|
||||||
|
if block.Peek() == ')' {
|
||||||
|
block.Advance(1)
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
link := ast.NewLink()
|
||||||
|
s.processLinkLabel(parent, link, last, pc)
|
||||||
|
link.Destination = destination
|
||||||
|
link.Title = title
|
||||||
|
return link
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseLinkDestination(block text.Reader) ([]byte, bool) {
|
||||||
|
block.SkipSpaces()
|
||||||
|
line, _ := block.PeekLine()
|
||||||
|
buf := []byte{}
|
||||||
|
if block.Peek() == '<' {
|
||||||
|
i := 1
|
||||||
|
for i < len(line) {
|
||||||
|
c := line[i]
|
||||||
|
if c == '\\' && i < len(line)-1 && util.IsPunct(line[i+1]) {
|
||||||
|
buf = append(buf, '\\', line[i+1])
|
||||||
|
i += 2
|
||||||
|
continue
|
||||||
|
} else if c == '>' {
|
||||||
|
block.Advance(i + 1)
|
||||||
|
return line[1:i], true
|
||||||
|
}
|
||||||
|
buf = append(buf, c)
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
opened := 0
|
||||||
|
i := 0
|
||||||
|
for i < len(line) {
|
||||||
|
c := line[i]
|
||||||
|
if c == '\\' && i < len(line)-1 && util.IsPunct(line[i+1]) {
|
||||||
|
buf = append(buf, '\\', line[i+1])
|
||||||
|
i += 2
|
||||||
|
continue
|
||||||
|
} else if c == '(' {
|
||||||
|
opened++
|
||||||
|
} else if c == ')' {
|
||||||
|
opened--
|
||||||
|
if opened < 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else if util.IsSpace(c) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
buf = append(buf, c)
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
block.Advance(i)
|
||||||
|
return line[:i], len(line[:i]) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseLinkTitle(block text.Reader) ([]byte, bool) {
|
||||||
|
block.SkipSpaces()
|
||||||
|
opener := block.Peek()
|
||||||
|
if opener != '"' && opener != '\'' && opener != '(' {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
closer := opener
|
||||||
|
if opener == '(' {
|
||||||
|
closer = ')'
|
||||||
|
}
|
||||||
|
line, _ := block.PeekLine()
|
||||||
|
pos := util.FindClosure(line[1:], opener, closer, false, true)
|
||||||
|
if pos < 0 {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
pos += 2 // opener + closer
|
||||||
|
block.Advance(pos)
|
||||||
|
return line[1 : pos-1], true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *linkParser) CloseBlock(parent ast.Node, block text.Reader, pc Context) {
|
||||||
|
tlist := pc.Get(linkLabelStateKey)
|
||||||
|
if tlist == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for s := tlist.(*linkLabelState); s != nil; {
|
||||||
|
next := s.Next
|
||||||
|
removeLinkLabelState(pc, s)
|
||||||
|
s.Parent().ReplaceChild(s.Parent(), s, ast.NewTextSegment(s.Segment))
|
||||||
|
s = next
|
||||||
|
}
|
||||||
|
}
|
163
vendor/github.com/yuin/goldmark/parser/link_ref.go
generated
vendored
Normal file
163
vendor/github.com/yuin/goldmark/parser/link_ref.go
generated
vendored
Normal file
|
@ -0,0 +1,163 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
type linkReferenceParagraphTransformer struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// LinkReferenceParagraphTransformer is a ParagraphTransformer implementation
|
||||||
|
// that parses and extracts link reference from paragraphs.
|
||||||
|
var LinkReferenceParagraphTransformer = &linkReferenceParagraphTransformer{}
|
||||||
|
|
||||||
|
func (p *linkReferenceParagraphTransformer) Transform(node *ast.Paragraph, reader text.Reader, pc Context) {
|
||||||
|
lines := node.Lines()
|
||||||
|
block := text.NewBlockReader(reader.Source(), lines)
|
||||||
|
removes := [][2]int{}
|
||||||
|
for {
|
||||||
|
start, end := parseLinkReferenceDefinition(block, pc)
|
||||||
|
if start > -1 {
|
||||||
|
if start == end {
|
||||||
|
end++
|
||||||
|
}
|
||||||
|
removes = append(removes, [2]int{start, end})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
offset := 0
|
||||||
|
for _, remove := range removes {
|
||||||
|
if lines.Len() == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s := lines.Sliced(remove[1]-offset, lines.Len())
|
||||||
|
lines.SetSliced(0, remove[0]-offset)
|
||||||
|
lines.AppendAll(s)
|
||||||
|
offset = remove[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if lines.Len() == 0 {
|
||||||
|
t := ast.NewTextBlock()
|
||||||
|
t.SetBlankPreviousLines(node.HasBlankPreviousLines())
|
||||||
|
node.Parent().ReplaceChild(node.Parent(), node, t)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
node.SetLines(lines)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseLinkReferenceDefinition(block text.Reader, pc Context) (int, int) {
|
||||||
|
block.SkipSpaces()
|
||||||
|
line, segment := block.PeekLine()
|
||||||
|
if line == nil {
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
startLine, _ := block.Position()
|
||||||
|
width, pos := util.IndentWidth(line, 0)
|
||||||
|
if width > 3 {
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
if width != 0 {
|
||||||
|
pos++
|
||||||
|
}
|
||||||
|
if line[pos] != '[' {
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
open := segment.Start + pos + 1
|
||||||
|
closes := -1
|
||||||
|
block.Advance(pos + 1)
|
||||||
|
for {
|
||||||
|
line, segment = block.PeekLine()
|
||||||
|
if line == nil {
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
closure := util.FindClosure(line, '[', ']', false, false)
|
||||||
|
if closure > -1 {
|
||||||
|
closes = segment.Start + closure
|
||||||
|
next := closure + 1
|
||||||
|
if next >= len(line) || line[next] != ':' {
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
block.Advance(next + 1)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
block.AdvanceLine()
|
||||||
|
}
|
||||||
|
if closes < 0 {
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
label := block.Value(text.NewSegment(open, closes))
|
||||||
|
if util.IsBlank(label) {
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
block.SkipSpaces()
|
||||||
|
destination, ok := parseLinkDestination(block)
|
||||||
|
if !ok {
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
line, segment = block.PeekLine()
|
||||||
|
isNewLine := line == nil || util.IsBlank(line)
|
||||||
|
|
||||||
|
endLine, _ := block.Position()
|
||||||
|
_, spaces, _ := block.SkipSpaces()
|
||||||
|
opener := block.Peek()
|
||||||
|
if opener != '"' && opener != '\'' && opener != '(' {
|
||||||
|
if !isNewLine {
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
ref := NewReference(label, destination, nil)
|
||||||
|
pc.AddReference(ref)
|
||||||
|
return startLine, endLine + 1
|
||||||
|
}
|
||||||
|
if spaces == 0 {
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
block.Advance(1)
|
||||||
|
open = -1
|
||||||
|
closes = -1
|
||||||
|
closer := opener
|
||||||
|
if opener == '(' {
|
||||||
|
closer = ')'
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
line, segment = block.PeekLine()
|
||||||
|
if line == nil {
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
if open < 0 {
|
||||||
|
open = segment.Start
|
||||||
|
}
|
||||||
|
closure := util.FindClosure(line, opener, closer, false, true)
|
||||||
|
if closure > -1 {
|
||||||
|
closes = segment.Start + closure
|
||||||
|
block.Advance(closure + 1)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
block.AdvanceLine()
|
||||||
|
}
|
||||||
|
if closes < 0 {
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
|
||||||
|
line, segment = block.PeekLine()
|
||||||
|
if line != nil && !util.IsBlank(line) {
|
||||||
|
if !isNewLine {
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
title := block.Value(text.NewSegment(open, closes))
|
||||||
|
ref := NewReference(label, destination, title)
|
||||||
|
pc.AddReference(ref)
|
||||||
|
return startLine, endLine
|
||||||
|
}
|
||||||
|
|
||||||
|
title := block.Value(text.NewSegment(open, closes))
|
||||||
|
|
||||||
|
endLine, _ = block.Position()
|
||||||
|
ref := NewReference(label, destination, title)
|
||||||
|
pc.AddReference(ref)
|
||||||
|
return startLine, endLine + 1
|
||||||
|
}
|
250
vendor/github.com/yuin/goldmark/parser/list.go
generated
vendored
Normal file
250
vendor/github.com/yuin/goldmark/parser/list.go
generated
vendored
Normal file
|
@ -0,0 +1,250 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
type listItemType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
notList listItemType = iota
|
||||||
|
bulletList
|
||||||
|
orderedList
|
||||||
|
)
|
||||||
|
|
||||||
|
// Same as
|
||||||
|
// `^(([ ]*)([\-\*\+]))(\s+.*)?\n?$`.FindSubmatchIndex or
|
||||||
|
// `^(([ ]*)(\d{1,9}[\.\)]))(\s+.*)?\n?$`.FindSubmatchIndex
|
||||||
|
func parseListItem(line []byte) ([6]int, listItemType) {
|
||||||
|
i := 0
|
||||||
|
l := len(line)
|
||||||
|
ret := [6]int{}
|
||||||
|
for ; i < l && line[i] == ' '; i++ {
|
||||||
|
c := line[i]
|
||||||
|
if c == '\t' {
|
||||||
|
return ret, notList
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i > 3 {
|
||||||
|
return ret, notList
|
||||||
|
}
|
||||||
|
ret[0] = 0
|
||||||
|
ret[1] = i
|
||||||
|
ret[2] = i
|
||||||
|
var typ listItemType
|
||||||
|
if i < l && (line[i] == '-' || line[i] == '*' || line[i] == '+') {
|
||||||
|
i++
|
||||||
|
ret[3] = i
|
||||||
|
typ = bulletList
|
||||||
|
} else if i < l {
|
||||||
|
for ; i < l && util.IsNumeric(line[i]); i++ {
|
||||||
|
}
|
||||||
|
ret[3] = i
|
||||||
|
if ret[3] == ret[2] || ret[3]-ret[2] > 9 {
|
||||||
|
return ret, notList
|
||||||
|
}
|
||||||
|
if i < l && (line[i] == '.' || line[i] == ')') {
|
||||||
|
i++
|
||||||
|
ret[3] = i
|
||||||
|
} else {
|
||||||
|
return ret, notList
|
||||||
|
}
|
||||||
|
typ = orderedList
|
||||||
|
} else {
|
||||||
|
return ret, notList
|
||||||
|
}
|
||||||
|
if i < l && line[i] != '\n' {
|
||||||
|
w, _ := util.IndentWidth(line[i:], 0)
|
||||||
|
if w == 0 {
|
||||||
|
return ret, notList
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i >= l {
|
||||||
|
ret[4] = -1
|
||||||
|
ret[5] = -1
|
||||||
|
return ret, typ
|
||||||
|
}
|
||||||
|
ret[4] = i
|
||||||
|
ret[5] = len(line)
|
||||||
|
if line[ret[5]-1] == '\n' && line[i] != '\n' {
|
||||||
|
ret[5]--
|
||||||
|
}
|
||||||
|
return ret, typ
|
||||||
|
}
|
||||||
|
|
||||||
|
func matchesListItem(source []byte, strict bool) ([6]int, listItemType) {
|
||||||
|
m, typ := parseListItem(source)
|
||||||
|
if typ != notList && (!strict || strict && m[1] < 4) {
|
||||||
|
return m, typ
|
||||||
|
}
|
||||||
|
return m, notList
|
||||||
|
}
|
||||||
|
|
||||||
|
func calcListOffset(source []byte, match [6]int) int {
|
||||||
|
offset := 0
|
||||||
|
if match[4] < 0 || util.IsBlank(source[match[4]:]) { // list item starts with a blank line
|
||||||
|
offset = 1
|
||||||
|
} else {
|
||||||
|
offset, _ = util.IndentWidth(source[match[4]:], match[4])
|
||||||
|
if offset > 4 { // offseted codeblock
|
||||||
|
offset = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return offset
|
||||||
|
}
|
||||||
|
|
||||||
|
func lastOffset(node ast.Node) int {
|
||||||
|
lastChild := node.LastChild()
|
||||||
|
if lastChild != nil {
|
||||||
|
return lastChild.(*ast.ListItem).Offset
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type listParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultListParser = &listParser{}
|
||||||
|
|
||||||
|
// NewListParser returns a new BlockParser that
|
||||||
|
// parses lists.
|
||||||
|
// This parser must take precedence over the ListItemParser.
|
||||||
|
func NewListParser() BlockParser {
|
||||||
|
return defaultListParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *listParser) Trigger() []byte {
|
||||||
|
return []byte{'-', '+', '*', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *listParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||||
|
last := pc.LastOpenedBlock().Node
|
||||||
|
if _, lok := last.(*ast.List); lok || pc.Get(skipListParser) != nil {
|
||||||
|
pc.Set(skipListParser, nil)
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
line, _ := reader.PeekLine()
|
||||||
|
match, typ := matchesListItem(line, true)
|
||||||
|
if typ == notList {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
start := -1
|
||||||
|
if typ == orderedList {
|
||||||
|
number := line[match[2] : match[3]-1]
|
||||||
|
start, _ = strconv.Atoi(string(number))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ast.IsParagraph(last) && last.Parent() == parent {
|
||||||
|
// we allow only lists starting with 1 to interrupt paragraphs.
|
||||||
|
if typ == orderedList && start != 1 {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
//an empty list item cannot interrupt a paragraph:
|
||||||
|
if match[5]-match[4] == 1 {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
marker := line[match[3]-1]
|
||||||
|
node := ast.NewList(marker)
|
||||||
|
if start > -1 {
|
||||||
|
node.Start = start
|
||||||
|
}
|
||||||
|
return node, HasChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *listParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||||
|
list := node.(*ast.List)
|
||||||
|
line, _ := reader.PeekLine()
|
||||||
|
if util.IsBlank(line) {
|
||||||
|
// A list item can begin with at most one blank line
|
||||||
|
if node.ChildCount() == 1 && node.LastChild().ChildCount() == 0 {
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
return Continue | HasChildren
|
||||||
|
}
|
||||||
|
// Thematic Breaks take precedence over lists
|
||||||
|
if isThematicBreak(line, reader.LineOffset()) {
|
||||||
|
isHeading := false
|
||||||
|
last := pc.LastOpenedBlock().Node
|
||||||
|
if ast.IsParagraph(last) {
|
||||||
|
c, ok := matchesSetextHeadingBar(line)
|
||||||
|
if ok && c == '-' {
|
||||||
|
isHeading = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !isHeading {
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// "offset" means a width that bar indicates.
|
||||||
|
// - aaaaaaaa
|
||||||
|
// |----|
|
||||||
|
//
|
||||||
|
// If the indent is less than the last offset like
|
||||||
|
// - a
|
||||||
|
// - b <--- current line
|
||||||
|
// it maybe a new child of the list.
|
||||||
|
offset := lastOffset(node)
|
||||||
|
indent, _ := util.IndentWidth(line, reader.LineOffset())
|
||||||
|
|
||||||
|
if indent < offset {
|
||||||
|
if indent < 4 {
|
||||||
|
match, typ := matchesListItem(line, false) // may have a leading spaces more than 3
|
||||||
|
if typ != notList && match[1]-offset < 4 {
|
||||||
|
marker := line[match[3]-1]
|
||||||
|
if !list.CanContinue(marker, typ == orderedList) {
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
return Continue | HasChildren
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
return Continue | HasChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *listParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||||
|
list := node.(*ast.List)
|
||||||
|
|
||||||
|
for c := node.FirstChild(); c != nil && list.IsTight; c = c.NextSibling() {
|
||||||
|
if c.FirstChild() != nil && c.FirstChild() != c.LastChild() {
|
||||||
|
for c1 := c.FirstChild().NextSibling(); c1 != nil; c1 = c1.NextSibling() {
|
||||||
|
if bl, ok := c1.(ast.Node); ok && bl.HasBlankPreviousLines() {
|
||||||
|
list.IsTight = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c != node.FirstChild() {
|
||||||
|
if bl, ok := c.(ast.Node); ok && bl.HasBlankPreviousLines() {
|
||||||
|
list.IsTight = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if list.IsTight {
|
||||||
|
for child := node.FirstChild(); child != nil; child = child.NextSibling() {
|
||||||
|
for gc := child.FirstChild(); gc != nil; gc = gc.NextSibling() {
|
||||||
|
paragraph, ok := gc.(*ast.Paragraph)
|
||||||
|
if ok {
|
||||||
|
textBlock := ast.NewTextBlock()
|
||||||
|
textBlock.SetLines(paragraph.Lines())
|
||||||
|
child.ReplaceChild(child, paragraph, textBlock)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *listParser) CanInterruptParagraph() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *listParser) CanAcceptIndentedLine() bool {
|
||||||
|
return false
|
||||||
|
}
|
85
vendor/github.com/yuin/goldmark/parser/list_item.go
generated
vendored
Normal file
85
vendor/github.com/yuin/goldmark/parser/list_item.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
type listItemParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultListItemParser = &listItemParser{}
|
||||||
|
|
||||||
|
// NewListItemParser returns a new BlockParser that
|
||||||
|
// parses list items.
|
||||||
|
func NewListItemParser() BlockParser {
|
||||||
|
return defaultListItemParser
|
||||||
|
}
|
||||||
|
|
||||||
|
var skipListParser = NewContextKey()
|
||||||
|
var skipListParserValue interface{} = true
|
||||||
|
|
||||||
|
func (b *listItemParser) Trigger() []byte {
|
||||||
|
return []byte{'-', '+', '*', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *listItemParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||||
|
list, lok := parent.(*ast.List)
|
||||||
|
if !lok { // list item must be a child of a list
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
offset := lastOffset(list)
|
||||||
|
line, _ := reader.PeekLine()
|
||||||
|
match, typ := matchesListItem(line, false)
|
||||||
|
if typ == notList {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
if match[1]-offset > 3 {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
itemOffset := calcListOffset(line, match)
|
||||||
|
node := ast.NewListItem(match[3] + itemOffset)
|
||||||
|
if match[4] < 0 || match[5]-match[4] == 1 {
|
||||||
|
return node, NoChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
pos, padding := util.IndentPosition(line[match[4]:], match[4], itemOffset)
|
||||||
|
child := match[3] + pos
|
||||||
|
reader.AdvanceAndSetPadding(child, padding)
|
||||||
|
return node, HasChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *listItemParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||||
|
line, _ := reader.PeekLine()
|
||||||
|
if util.IsBlank(line) {
|
||||||
|
return Continue | HasChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
indent, _ := util.IndentWidth(line, reader.LineOffset())
|
||||||
|
offset := lastOffset(node.Parent())
|
||||||
|
if indent < offset && indent < 4 {
|
||||||
|
_, typ := matchesListItem(line, true)
|
||||||
|
// new list item found
|
||||||
|
if typ != notList {
|
||||||
|
pc.Set(skipListParser, skipListParserValue)
|
||||||
|
}
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
pos, padding := util.IndentPosition(line, reader.LineOffset(), offset)
|
||||||
|
reader.AdvanceAndSetPadding(pos, padding)
|
||||||
|
|
||||||
|
return Continue | HasChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *listItemParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||||
|
// nothing to do
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *listItemParser) CanInterruptParagraph() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *listItemParser) CanAcceptIndentedLine() bool {
|
||||||
|
return false
|
||||||
|
}
|
71
vendor/github.com/yuin/goldmark/parser/paragraph.go
generated
vendored
Normal file
71
vendor/github.com/yuin/goldmark/parser/paragraph.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
)
|
||||||
|
|
||||||
|
type paragraphParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultParagraphParser = ¶graphParser{}
|
||||||
|
|
||||||
|
// NewParagraphParser returns a new BlockParser that
|
||||||
|
// parses paragraphs.
|
||||||
|
func NewParagraphParser() BlockParser {
|
||||||
|
return defaultParagraphParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *paragraphParser) Trigger() []byte {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *paragraphParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||||
|
_, segment := reader.PeekLine()
|
||||||
|
segment = segment.TrimLeftSpace(reader.Source())
|
||||||
|
if segment.IsEmpty() {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
node := ast.NewParagraph()
|
||||||
|
node.Lines().Append(segment)
|
||||||
|
reader.Advance(segment.Len() - 1)
|
||||||
|
return node, NoChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *paragraphParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||||
|
_, segment := reader.PeekLine()
|
||||||
|
segment = segment.TrimLeftSpace(reader.Source())
|
||||||
|
if segment.IsEmpty() {
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
node.Lines().Append(segment)
|
||||||
|
reader.Advance(segment.Len() - 1)
|
||||||
|
return Continue | NoChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *paragraphParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||||
|
parent := node.Parent()
|
||||||
|
if parent == nil {
|
||||||
|
// paragraph has been transformed
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lines := node.Lines()
|
||||||
|
if lines.Len() != 0 {
|
||||||
|
// trim trailing spaces
|
||||||
|
length := lines.Len()
|
||||||
|
lastLine := node.Lines().At(length - 1)
|
||||||
|
node.Lines().Set(length-1, lastLine.TrimRightSpace(reader.Source()))
|
||||||
|
}
|
||||||
|
if lines.Len() == 0 {
|
||||||
|
node.Parent().RemoveChild(node.Parent(), node)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *paragraphParser) CanInterruptParagraph() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *paragraphParser) CanAcceptIndentedLine() bool {
|
||||||
|
return false
|
||||||
|
}
|
1211
vendor/github.com/yuin/goldmark/parser/parser.go
generated
vendored
Normal file
1211
vendor/github.com/yuin/goldmark/parser/parser.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
108
vendor/github.com/yuin/goldmark/parser/raw_html.go
generated
vendored
Normal file
108
vendor/github.com/yuin/goldmark/parser/raw_html.go
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
type rawHTMLParser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultRawHTMLParser = &rawHTMLParser{}
|
||||||
|
|
||||||
|
// NewRawHTMLParser return a new InlineParser that can parse
|
||||||
|
// inline htmls
|
||||||
|
func NewRawHTMLParser() InlineParser {
|
||||||
|
return defaultRawHTMLParser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *rawHTMLParser) Trigger() []byte {
|
||||||
|
return []byte{'<'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *rawHTMLParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
|
||||||
|
line, _ := block.PeekLine()
|
||||||
|
if len(line) > 1 && util.IsAlphaNumeric(line[1]) {
|
||||||
|
return s.parseMultiLineRegexp(openTagRegexp, block, pc)
|
||||||
|
}
|
||||||
|
if len(line) > 2 && line[1] == '/' && util.IsAlphaNumeric(line[2]) {
|
||||||
|
return s.parseMultiLineRegexp(closeTagRegexp, block, pc)
|
||||||
|
}
|
||||||
|
if bytes.HasPrefix(line, []byte("<!--")) {
|
||||||
|
return s.parseMultiLineRegexp(commentRegexp, block, pc)
|
||||||
|
}
|
||||||
|
if bytes.HasPrefix(line, []byte("<?")) {
|
||||||
|
return s.parseSingleLineRegexp(processingInstructionRegexp, block, pc)
|
||||||
|
}
|
||||||
|
if len(line) > 2 && line[1] == '!' && line[2] >= 'A' && line[2] <= 'Z' {
|
||||||
|
return s.parseSingleLineRegexp(declRegexp, block, pc)
|
||||||
|
}
|
||||||
|
if bytes.HasPrefix(line, []byte("<![CDATA[")) {
|
||||||
|
return s.parseMultiLineRegexp(cdataRegexp, block, pc)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var tagnamePattern = `([A-Za-z][A-Za-z0-9-]*)`
|
||||||
|
var attributePattern = `(?:\s+[a-zA-Z_:][a-zA-Z0-9:._-]*(?:\s*=\s*(?:[^\"'=<>` + "`" + `\x00-\x20]+|'[^']*'|"[^"]*"))?)`
|
||||||
|
var openTagRegexp = regexp.MustCompile("^<" + tagnamePattern + attributePattern + `*\s*/?>`)
|
||||||
|
var closeTagRegexp = regexp.MustCompile("^</" + tagnamePattern + `\s*>`)
|
||||||
|
var commentRegexp = regexp.MustCompile(`^<!---->|<!--(?:-?[^>-])(?:-?[^-])*-->`)
|
||||||
|
var processingInstructionRegexp = regexp.MustCompile(`^(?:<\?).*?(?:\?>)`)
|
||||||
|
var declRegexp = regexp.MustCompile(`^<![A-Z]+\s+[^>]*>`)
|
||||||
|
var cdataRegexp = regexp.MustCompile(`<!\[CDATA\[[\s\S]*?\]\]>`)
|
||||||
|
|
||||||
|
func (s *rawHTMLParser) parseSingleLineRegexp(reg *regexp.Regexp, block text.Reader, pc Context) ast.Node {
|
||||||
|
line, segment := block.PeekLine()
|
||||||
|
match := reg.FindSubmatchIndex(line)
|
||||||
|
if match == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
node := ast.NewRawHTML()
|
||||||
|
node.Segments.Append(segment.WithStop(segment.Start + match[1]))
|
||||||
|
block.Advance(match[1])
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
var dummyMatch = [][]byte{}
|
||||||
|
|
||||||
|
func (s *rawHTMLParser) parseMultiLineRegexp(reg *regexp.Regexp, block text.Reader, pc Context) ast.Node {
|
||||||
|
sline, ssegment := block.Position()
|
||||||
|
if block.Match(reg) {
|
||||||
|
node := ast.NewRawHTML()
|
||||||
|
eline, esegment := block.Position()
|
||||||
|
block.SetPosition(sline, ssegment)
|
||||||
|
for {
|
||||||
|
line, segment := block.PeekLine()
|
||||||
|
if line == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
l, _ := block.Position()
|
||||||
|
start := segment.Start
|
||||||
|
if l == sline {
|
||||||
|
start = ssegment.Start
|
||||||
|
}
|
||||||
|
end := segment.Stop
|
||||||
|
if l == eline {
|
||||||
|
end = esegment.Start
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Segments.Append(text.NewSegment(start, end))
|
||||||
|
if l == eline {
|
||||||
|
block.Advance(end - start)
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
block.AdvanceLine()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *rawHTMLParser) CloseBlock(parent ast.Node, pc Context) {
|
||||||
|
// nothing to do
|
||||||
|
}
|
126
vendor/github.com/yuin/goldmark/parser/setext_headings.go
generated
vendored
Normal file
126
vendor/github.com/yuin/goldmark/parser/setext_headings.go
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
var temporaryParagraphKey = NewContextKey()
|
||||||
|
|
||||||
|
type setextHeadingParser struct {
|
||||||
|
HeadingConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func matchesSetextHeadingBar(line []byte) (byte, bool) {
|
||||||
|
start := 0
|
||||||
|
end := len(line)
|
||||||
|
space := util.TrimLeftLength(line, []byte{' '})
|
||||||
|
if space > 3 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
start += space
|
||||||
|
level1 := util.TrimLeftLength(line[start:end], []byte{'='})
|
||||||
|
c := byte('=')
|
||||||
|
var level2 int
|
||||||
|
if level1 == 0 {
|
||||||
|
level2 = util.TrimLeftLength(line[start:end], []byte{'-'})
|
||||||
|
c = '-'
|
||||||
|
}
|
||||||
|
if util.IsSpace(line[end-1]) {
|
||||||
|
end -= util.TrimRightSpaceLength(line[start:end])
|
||||||
|
}
|
||||||
|
if !((level1 > 0 && start+level1 == end) || (level2 > 0 && start+level2 == end)) {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return c, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSetextHeadingParser return a new BlockParser that can parse Setext headings.
|
||||||
|
func NewSetextHeadingParser(opts ...HeadingOption) BlockParser {
|
||||||
|
p := &setextHeadingParser{}
|
||||||
|
for _, o := range opts {
|
||||||
|
o.SetHeadingOption(&p.HeadingConfig)
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *setextHeadingParser) Trigger() []byte {
|
||||||
|
return []byte{'-', '='}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *setextHeadingParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||||
|
last := pc.LastOpenedBlock().Node
|
||||||
|
if last == nil {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
paragraph, ok := last.(*ast.Paragraph)
|
||||||
|
if !ok || paragraph.Parent() != parent {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
line, segment := reader.PeekLine()
|
||||||
|
c, ok := matchesSetextHeadingBar(line)
|
||||||
|
if !ok {
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
level := 1
|
||||||
|
if c == '-' {
|
||||||
|
level = 2
|
||||||
|
}
|
||||||
|
node := ast.NewHeading(level)
|
||||||
|
node.Lines().Append(segment)
|
||||||
|
pc.Set(temporaryParagraphKey, last)
|
||||||
|
return node, NoChildren | RequireParagraph
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *setextHeadingParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *setextHeadingParser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||||
|
heading := node.(*ast.Heading)
|
||||||
|
segment := node.Lines().At(0)
|
||||||
|
heading.Lines().Clear()
|
||||||
|
tmp := pc.Get(temporaryParagraphKey).(*ast.Paragraph)
|
||||||
|
pc.Set(temporaryParagraphKey, nil)
|
||||||
|
if tmp.Lines().Len() == 0 {
|
||||||
|
next := heading.NextSibling()
|
||||||
|
segment = segment.TrimLeftSpace(reader.Source())
|
||||||
|
if next == nil || !ast.IsParagraph(next) {
|
||||||
|
para := ast.NewParagraph()
|
||||||
|
para.Lines().Append(segment)
|
||||||
|
heading.Parent().InsertAfter(heading.Parent(), heading, para)
|
||||||
|
} else {
|
||||||
|
next.(ast.Node).Lines().Unshift(segment)
|
||||||
|
}
|
||||||
|
heading.Parent().RemoveChild(heading.Parent(), heading)
|
||||||
|
} else {
|
||||||
|
heading.SetLines(tmp.Lines())
|
||||||
|
heading.SetBlankPreviousLines(tmp.HasBlankPreviousLines())
|
||||||
|
tp := tmp.Parent()
|
||||||
|
if tp != nil {
|
||||||
|
tp.RemoveChild(tp, tmp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Attribute {
|
||||||
|
parseLastLineAttributes(node, reader, pc)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.AutoHeadingID {
|
||||||
|
id, ok := node.AttributeString("id")
|
||||||
|
if !ok {
|
||||||
|
generateAutoHeadingID(heading, reader, pc)
|
||||||
|
} else {
|
||||||
|
pc.IDs().Put(id.([]byte))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *setextHeadingParser) CanInterruptParagraph() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *setextHeadingParser) CanAcceptIndentedLine() bool {
|
||||||
|
return false
|
||||||
|
}
|
75
vendor/github.com/yuin/goldmark/parser/thematic_break.go
generated
vendored
Normal file
75
vendor/github.com/yuin/goldmark/parser/thematic_break.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/text"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
type thematicBreakPraser struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultThematicBreakPraser = &thematicBreakPraser{}
|
||||||
|
|
||||||
|
// NewThematicBreakParser returns a new BlockParser that
|
||||||
|
// parses thematic breaks.
|
||||||
|
func NewThematicBreakParser() BlockParser {
|
||||||
|
return defaultThematicBreakPraser
|
||||||
|
}
|
||||||
|
|
||||||
|
func isThematicBreak(line []byte, offset int) bool {
|
||||||
|
w, pos := util.IndentWidth(line, offset)
|
||||||
|
if w > 3 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
mark := byte(0)
|
||||||
|
count := 0
|
||||||
|
for i := pos; i < len(line); i++ {
|
||||||
|
c := line[i]
|
||||||
|
if util.IsSpace(c) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if mark == 0 {
|
||||||
|
mark = c
|
||||||
|
count = 1
|
||||||
|
if mark == '*' || mark == '-' || mark == '_' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if c != mark {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
return count > 2
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *thematicBreakPraser) Trigger() []byte {
|
||||||
|
return []byte{'-', '*', '_'}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *thematicBreakPraser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
|
||||||
|
line, segment := reader.PeekLine()
|
||||||
|
if isThematicBreak(line, reader.LineOffset()) {
|
||||||
|
reader.Advance(segment.Len() - 1)
|
||||||
|
return ast.NewThematicBreak(), NoChildren
|
||||||
|
}
|
||||||
|
return nil, NoChildren
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *thematicBreakPraser) Continue(node ast.Node, reader text.Reader, pc Context) State {
|
||||||
|
return Close
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *thematicBreakPraser) Close(node ast.Node, reader text.Reader, pc Context) {
|
||||||
|
// nothing to do
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *thematicBreakPraser) CanInterruptParagraph() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *thematicBreakPraser) CanAcceptIndentedLine() bool {
|
||||||
|
return false
|
||||||
|
}
|
804
vendor/github.com/yuin/goldmark/renderer/html/html.go
generated
vendored
Normal file
804
vendor/github.com/yuin/goldmark/renderer/html/html.go
generated
vendored
Normal file
|
@ -0,0 +1,804 @@
|
||||||
|
package html
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/renderer"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Config struct has configurations for the HTML based renderers.
|
||||||
|
type Config struct {
|
||||||
|
Writer Writer
|
||||||
|
HardWraps bool
|
||||||
|
XHTML bool
|
||||||
|
Unsafe bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConfig returns a new Config with defaults.
|
||||||
|
func NewConfig() Config {
|
||||||
|
return Config{
|
||||||
|
Writer: DefaultWriter,
|
||||||
|
HardWraps: false,
|
||||||
|
XHTML: false,
|
||||||
|
Unsafe: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetOption implements renderer.NodeRenderer.SetOption.
|
||||||
|
func (c *Config) SetOption(name renderer.OptionName, value interface{}) {
|
||||||
|
switch name {
|
||||||
|
case optHardWraps:
|
||||||
|
c.HardWraps = value.(bool)
|
||||||
|
case optXHTML:
|
||||||
|
c.XHTML = value.(bool)
|
||||||
|
case optUnsafe:
|
||||||
|
c.Unsafe = value.(bool)
|
||||||
|
case optTextWriter:
|
||||||
|
c.Writer = value.(Writer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Option interface sets options for HTML based renderers.
|
||||||
|
type Option interface {
|
||||||
|
SetHTMLOption(*Config)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TextWriter is an option name used in WithWriter.
|
||||||
|
const optTextWriter renderer.OptionName = "Writer"
|
||||||
|
|
||||||
|
type withWriter struct {
|
||||||
|
value Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *withWriter) SetConfig(c *renderer.Config) {
|
||||||
|
c.Options[optTextWriter] = o.value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *withWriter) SetHTMLOption(c *Config) {
|
||||||
|
c.Writer = o.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithWriter is a functional option that allow you to set the given writer to
|
||||||
|
// the renderer.
|
||||||
|
func WithWriter(writer Writer) interface {
|
||||||
|
renderer.Option
|
||||||
|
Option
|
||||||
|
} {
|
||||||
|
return &withWriter{writer}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HardWraps is an option name used in WithHardWraps.
|
||||||
|
const optHardWraps renderer.OptionName = "HardWraps"
|
||||||
|
|
||||||
|
type withHardWraps struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *withHardWraps) SetConfig(c *renderer.Config) {
|
||||||
|
c.Options[optHardWraps] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *withHardWraps) SetHTMLOption(c *Config) {
|
||||||
|
c.HardWraps = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHardWraps is a functional option that indicates whether softline breaks
|
||||||
|
// should be rendered as '<br>'.
|
||||||
|
func WithHardWraps() interface {
|
||||||
|
renderer.Option
|
||||||
|
Option
|
||||||
|
} {
|
||||||
|
return &withHardWraps{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// XHTML is an option name used in WithXHTML.
|
||||||
|
const optXHTML renderer.OptionName = "XHTML"
|
||||||
|
|
||||||
|
type withXHTML struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *withXHTML) SetConfig(c *renderer.Config) {
|
||||||
|
c.Options[optXHTML] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *withXHTML) SetHTMLOption(c *Config) {
|
||||||
|
c.XHTML = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithXHTML is a functional option indicates that nodes should be rendered in
|
||||||
|
// xhtml instead of HTML5.
|
||||||
|
func WithXHTML() interface {
|
||||||
|
Option
|
||||||
|
renderer.Option
|
||||||
|
} {
|
||||||
|
return &withXHTML{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unsafe is an option name used in WithUnsafe.
|
||||||
|
const optUnsafe renderer.OptionName = "Unsafe"
|
||||||
|
|
||||||
|
type withUnsafe struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *withUnsafe) SetConfig(c *renderer.Config) {
|
||||||
|
c.Options[optUnsafe] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *withUnsafe) SetHTMLOption(c *Config) {
|
||||||
|
c.Unsafe = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUnsafe is a functional option that renders dangerous contents
|
||||||
|
// (raw htmls and potentially dangerous links) as it is.
|
||||||
|
func WithUnsafe() interface {
|
||||||
|
renderer.Option
|
||||||
|
Option
|
||||||
|
} {
|
||||||
|
return &withUnsafe{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Renderer struct is an implementation of renderer.NodeRenderer that renders
|
||||||
|
// nodes as (X)HTML.
|
||||||
|
type Renderer struct {
|
||||||
|
Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRenderer returns a new Renderer with given options.
|
||||||
|
func NewRenderer(opts ...Option) renderer.NodeRenderer {
|
||||||
|
r := &Renderer{
|
||||||
|
Config: NewConfig(),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.SetHTMLOption(&r.Config)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterFuncs implements NodeRenderer.RegisterFuncs .
|
||||||
|
func (r *Renderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
|
||||||
|
// blocks
|
||||||
|
|
||||||
|
reg.Register(ast.KindDocument, r.renderDocument)
|
||||||
|
reg.Register(ast.KindHeading, r.renderHeading)
|
||||||
|
reg.Register(ast.KindBlockquote, r.renderBlockquote)
|
||||||
|
reg.Register(ast.KindCodeBlock, r.renderCodeBlock)
|
||||||
|
reg.Register(ast.KindFencedCodeBlock, r.renderFencedCodeBlock)
|
||||||
|
reg.Register(ast.KindHTMLBlock, r.renderHTMLBlock)
|
||||||
|
reg.Register(ast.KindList, r.renderList)
|
||||||
|
reg.Register(ast.KindListItem, r.renderListItem)
|
||||||
|
reg.Register(ast.KindParagraph, r.renderParagraph)
|
||||||
|
reg.Register(ast.KindTextBlock, r.renderTextBlock)
|
||||||
|
reg.Register(ast.KindThematicBreak, r.renderThematicBreak)
|
||||||
|
|
||||||
|
// inlines
|
||||||
|
|
||||||
|
reg.Register(ast.KindAutoLink, r.renderAutoLink)
|
||||||
|
reg.Register(ast.KindCodeSpan, r.renderCodeSpan)
|
||||||
|
reg.Register(ast.KindEmphasis, r.renderEmphasis)
|
||||||
|
reg.Register(ast.KindImage, r.renderImage)
|
||||||
|
reg.Register(ast.KindLink, r.renderLink)
|
||||||
|
reg.Register(ast.KindRawHTML, r.renderRawHTML)
|
||||||
|
reg.Register(ast.KindText, r.renderText)
|
||||||
|
reg.Register(ast.KindString, r.renderString)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Renderer) writeLines(w util.BufWriter, source []byte, n ast.Node) {
|
||||||
|
l := n.Lines().Len()
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
line := n.Lines().At(i)
|
||||||
|
r.Writer.RawWrite(w, line.Value(source))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GlobalAttributeFilter defines attribute names which any elements can have.
|
||||||
|
var GlobalAttributeFilter = util.NewBytesFilter(
|
||||||
|
[]byte("accesskey"),
|
||||||
|
[]byte("autocapitalize"),
|
||||||
|
[]byte("class"),
|
||||||
|
[]byte("contenteditable"),
|
||||||
|
[]byte("contextmenu"),
|
||||||
|
[]byte("dir"),
|
||||||
|
[]byte("draggable"),
|
||||||
|
[]byte("dropzone"),
|
||||||
|
[]byte("hidden"),
|
||||||
|
[]byte("id"),
|
||||||
|
[]byte("itemprop"),
|
||||||
|
[]byte("lang"),
|
||||||
|
[]byte("slot"),
|
||||||
|
[]byte("spellcheck"),
|
||||||
|
[]byte("style"),
|
||||||
|
[]byte("tabindex"),
|
||||||
|
[]byte("title"),
|
||||||
|
[]byte("translate"),
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *Renderer) renderDocument(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
// nothing to do
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeadingAttributeFilter defines attribute names which heading elements can have
|
||||||
|
var HeadingAttributeFilter = GlobalAttributeFilter
|
||||||
|
|
||||||
|
func (r *Renderer) renderHeading(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
n := node.(*ast.Heading)
|
||||||
|
if entering {
|
||||||
|
_, _ = w.WriteString("<h")
|
||||||
|
_ = w.WriteByte("0123456"[n.Level])
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
RenderAttributes(w, node, HeadingAttributeFilter)
|
||||||
|
}
|
||||||
|
_ = w.WriteByte('>')
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</h")
|
||||||
|
_ = w.WriteByte("0123456"[n.Level])
|
||||||
|
_, _ = w.WriteString(">\n")
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockquoteAttributeFilter defines attribute names which blockquote elements can have
|
||||||
|
var BlockquoteAttributeFilter = GlobalAttributeFilter.Extend(
|
||||||
|
[]byte("cite"),
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *Renderer) renderBlockquote(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
_, _ = w.WriteString("<blockquote")
|
||||||
|
RenderAttributes(w, n, BlockquoteAttributeFilter)
|
||||||
|
_ = w.WriteByte('>')
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("<blockquote>\n")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</blockquote>\n")
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Renderer) renderCodeBlock(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
_, _ = w.WriteString("<pre><code>")
|
||||||
|
r.writeLines(w, source, n)
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</code></pre>\n")
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Renderer) renderFencedCodeBlock(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
n := node.(*ast.FencedCodeBlock)
|
||||||
|
if entering {
|
||||||
|
_, _ = w.WriteString("<pre><code")
|
||||||
|
language := n.Language(source)
|
||||||
|
if language != nil {
|
||||||
|
_, _ = w.WriteString(" class=\"language-")
|
||||||
|
r.Writer.Write(w, language)
|
||||||
|
_, _ = w.WriteString("\"")
|
||||||
|
}
|
||||||
|
_ = w.WriteByte('>')
|
||||||
|
r.writeLines(w, source, n)
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</code></pre>\n")
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Renderer) renderHTMLBlock(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
n := node.(*ast.HTMLBlock)
|
||||||
|
if entering {
|
||||||
|
if r.Unsafe {
|
||||||
|
l := n.Lines().Len()
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
line := n.Lines().At(i)
|
||||||
|
_, _ = w.Write(line.Value(source))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("<!-- raw HTML omitted -->\n")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if n.HasClosure() {
|
||||||
|
if r.Unsafe {
|
||||||
|
closure := n.ClosureLine
|
||||||
|
_, _ = w.Write(closure.Value(source))
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("<!-- raw HTML omitted -->\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListAttributeFilter defines attribute names which list elements can have.
|
||||||
|
var ListAttributeFilter = GlobalAttributeFilter.Extend(
|
||||||
|
[]byte("start"),
|
||||||
|
[]byte("reversed"),
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *Renderer) renderList(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
n := node.(*ast.List)
|
||||||
|
tag := "ul"
|
||||||
|
if n.IsOrdered() {
|
||||||
|
tag = "ol"
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
_ = w.WriteByte('<')
|
||||||
|
_, _ = w.WriteString(tag)
|
||||||
|
if n.IsOrdered() && n.Start != 1 {
|
||||||
|
fmt.Fprintf(w, " start=\"%d\"", n.Start)
|
||||||
|
}
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
RenderAttributes(w, n, ListAttributeFilter)
|
||||||
|
}
|
||||||
|
_, _ = w.WriteString(">\n")
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</")
|
||||||
|
_, _ = w.WriteString(tag)
|
||||||
|
_, _ = w.WriteString(">\n")
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListItemAttributeFilter defines attribute names which list item elements can have.
|
||||||
|
var ListItemAttributeFilter = GlobalAttributeFilter.Extend(
|
||||||
|
[]byte("value"),
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *Renderer) renderListItem(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
_, _ = w.WriteString("<li")
|
||||||
|
RenderAttributes(w, n, ListItemAttributeFilter)
|
||||||
|
_ = w.WriteByte('>')
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("<li>")
|
||||||
|
}
|
||||||
|
fc := n.FirstChild()
|
||||||
|
if fc != nil {
|
||||||
|
if _, ok := fc.(*ast.TextBlock); !ok {
|
||||||
|
_ = w.WriteByte('\n')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</li>\n")
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParagraphAttributeFilter defines attribute names which paragraph elements can have.
|
||||||
|
var ParagraphAttributeFilter = GlobalAttributeFilter
|
||||||
|
|
||||||
|
func (r *Renderer) renderParagraph(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
_, _ = w.WriteString("<p")
|
||||||
|
RenderAttributes(w, n, ParagraphAttributeFilter)
|
||||||
|
_ = w.WriteByte('>')
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("<p>")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</p>\n")
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Renderer) renderTextBlock(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
if !entering {
|
||||||
|
if _, ok := n.NextSibling().(ast.Node); ok && n.FirstChild() != nil {
|
||||||
|
_ = w.WriteByte('\n')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ThematicAttributeFilter defines attribute names which hr elements can have.
|
||||||
|
var ThematicAttributeFilter = GlobalAttributeFilter.Extend(
|
||||||
|
[]byte("align"), // [Deprecated]
|
||||||
|
[]byte("color"), // [Not Standardized]
|
||||||
|
[]byte("noshade"), // [Deprecated]
|
||||||
|
[]byte("size"), // [Deprecated]
|
||||||
|
[]byte("width"), // [Deprecated]
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *Renderer) renderThematicBreak(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
if !entering {
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
_, _ = w.WriteString("<hr")
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
RenderAttributes(w, n, ThematicAttributeFilter)
|
||||||
|
}
|
||||||
|
if r.XHTML {
|
||||||
|
_, _ = w.WriteString(" />\n")
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString(">\n")
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LinkAttributeFilter defines attribute names which link elements can have.
|
||||||
|
var LinkAttributeFilter = GlobalAttributeFilter.Extend(
|
||||||
|
[]byte("download"),
|
||||||
|
// []byte("href"),
|
||||||
|
[]byte("hreflang"),
|
||||||
|
[]byte("media"),
|
||||||
|
[]byte("ping"),
|
||||||
|
[]byte("referrerpolicy"),
|
||||||
|
[]byte("rel"),
|
||||||
|
[]byte("shape"),
|
||||||
|
[]byte("target"),
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *Renderer) renderAutoLink(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
n := node.(*ast.AutoLink)
|
||||||
|
if !entering {
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
_, _ = w.WriteString(`<a href="`)
|
||||||
|
url := n.URL(source)
|
||||||
|
label := n.Label(source)
|
||||||
|
if n.AutoLinkType == ast.AutoLinkEmail && !bytes.HasPrefix(bytes.ToLower(url), []byte("mailto:")) {
|
||||||
|
_, _ = w.WriteString("mailto:")
|
||||||
|
}
|
||||||
|
_, _ = w.Write(util.EscapeHTML(util.URLEscape(url, false)))
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
_ = w.WriteByte('"')
|
||||||
|
RenderAttributes(w, n, LinkAttributeFilter)
|
||||||
|
_ = w.WriteByte('>')
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString(`">`)
|
||||||
|
}
|
||||||
|
_, _ = w.Write(util.EscapeHTML(label))
|
||||||
|
_, _ = w.WriteString(`</a>`)
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeAttributeFilter defines attribute names which code elements can have.
|
||||||
|
var CodeAttributeFilter = GlobalAttributeFilter
|
||||||
|
|
||||||
|
func (r *Renderer) renderCodeSpan(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
if entering {
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
_, _ = w.WriteString("<code")
|
||||||
|
RenderAttributes(w, n, CodeAttributeFilter)
|
||||||
|
_ = w.WriteByte('>')
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("<code>")
|
||||||
|
}
|
||||||
|
for c := n.FirstChild(); c != nil; c = c.NextSibling() {
|
||||||
|
segment := c.(*ast.Text).Segment
|
||||||
|
value := segment.Value(source)
|
||||||
|
if bytes.HasSuffix(value, []byte("\n")) {
|
||||||
|
r.Writer.RawWrite(w, value[:len(value)-1])
|
||||||
|
if c != n.LastChild() {
|
||||||
|
r.Writer.RawWrite(w, []byte(" "))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
r.Writer.RawWrite(w, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ast.WalkSkipChildren, nil
|
||||||
|
}
|
||||||
|
_, _ = w.WriteString("</code>")
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EmphasisAttributeFilter defines attribute names which emphasis elements can have.
|
||||||
|
var EmphasisAttributeFilter = GlobalAttributeFilter
|
||||||
|
|
||||||
|
func (r *Renderer) renderEmphasis(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
n := node.(*ast.Emphasis)
|
||||||
|
tag := "em"
|
||||||
|
if n.Level == 2 {
|
||||||
|
tag = "strong"
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
_ = w.WriteByte('<')
|
||||||
|
_, _ = w.WriteString(tag)
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
RenderAttributes(w, n, EmphasisAttributeFilter)
|
||||||
|
}
|
||||||
|
_ = w.WriteByte('>')
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</")
|
||||||
|
_, _ = w.WriteString(tag)
|
||||||
|
_ = w.WriteByte('>')
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Renderer) renderLink(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
n := node.(*ast.Link)
|
||||||
|
if entering {
|
||||||
|
_, _ = w.WriteString("<a href=\"")
|
||||||
|
if r.Unsafe || !IsDangerousURL(n.Destination) {
|
||||||
|
_, _ = w.Write(util.EscapeHTML(util.URLEscape(n.Destination, true)))
|
||||||
|
}
|
||||||
|
_ = w.WriteByte('"')
|
||||||
|
if n.Title != nil {
|
||||||
|
_, _ = w.WriteString(` title="`)
|
||||||
|
r.Writer.Write(w, n.Title)
|
||||||
|
_ = w.WriteByte('"')
|
||||||
|
}
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
RenderAttributes(w, n, LinkAttributeFilter)
|
||||||
|
}
|
||||||
|
_ = w.WriteByte('>')
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("</a>")
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImageAttributeFilter defines attribute names which image elements can have.
|
||||||
|
var ImageAttributeFilter = GlobalAttributeFilter.Extend(
|
||||||
|
[]byte("align"),
|
||||||
|
[]byte("border"),
|
||||||
|
[]byte("crossorigin"),
|
||||||
|
[]byte("decoding"),
|
||||||
|
[]byte("height"),
|
||||||
|
[]byte("importance"),
|
||||||
|
[]byte("intrinsicsize"),
|
||||||
|
[]byte("ismap"),
|
||||||
|
[]byte("loading"),
|
||||||
|
[]byte("referrerpolicy"),
|
||||||
|
[]byte("sizes"),
|
||||||
|
[]byte("srcset"),
|
||||||
|
[]byte("usemap"),
|
||||||
|
[]byte("width"),
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *Renderer) renderImage(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
if !entering {
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
n := node.(*ast.Image)
|
||||||
|
_, _ = w.WriteString("<img src=\"")
|
||||||
|
if r.Unsafe || !IsDangerousURL(n.Destination) {
|
||||||
|
_, _ = w.Write(util.EscapeHTML(util.URLEscape(n.Destination, true)))
|
||||||
|
}
|
||||||
|
_, _ = w.WriteString(`" alt="`)
|
||||||
|
_, _ = w.Write(n.Text(source))
|
||||||
|
_ = w.WriteByte('"')
|
||||||
|
if n.Title != nil {
|
||||||
|
_, _ = w.WriteString(` title="`)
|
||||||
|
r.Writer.Write(w, n.Title)
|
||||||
|
_ = w.WriteByte('"')
|
||||||
|
}
|
||||||
|
if n.Attributes() != nil {
|
||||||
|
RenderAttributes(w, n, ImageAttributeFilter)
|
||||||
|
}
|
||||||
|
if r.XHTML {
|
||||||
|
_, _ = w.WriteString(" />")
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString(">")
|
||||||
|
}
|
||||||
|
return ast.WalkSkipChildren, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Renderer) renderRawHTML(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
if !entering {
|
||||||
|
return ast.WalkSkipChildren, nil
|
||||||
|
}
|
||||||
|
if r.Unsafe {
|
||||||
|
n := node.(*ast.RawHTML)
|
||||||
|
l := n.Segments.Len()
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
segment := n.Segments.At(i)
|
||||||
|
_, _ = w.Write(segment.Value(source))
|
||||||
|
}
|
||||||
|
return ast.WalkSkipChildren, nil
|
||||||
|
}
|
||||||
|
_, _ = w.WriteString("<!-- raw HTML omitted -->")
|
||||||
|
return ast.WalkSkipChildren, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Renderer) renderText(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
if !entering {
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
n := node.(*ast.Text)
|
||||||
|
segment := n.Segment
|
||||||
|
if n.IsRaw() {
|
||||||
|
r.Writer.RawWrite(w, segment.Value(source))
|
||||||
|
} else {
|
||||||
|
r.Writer.Write(w, segment.Value(source))
|
||||||
|
if n.HardLineBreak() || (n.SoftLineBreak() && r.HardWraps) {
|
||||||
|
if r.XHTML {
|
||||||
|
_, _ = w.WriteString("<br />\n")
|
||||||
|
} else {
|
||||||
|
_, _ = w.WriteString("<br>\n")
|
||||||
|
}
|
||||||
|
} else if n.SoftLineBreak() {
|
||||||
|
_ = w.WriteByte('\n')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Renderer) renderString(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
if !entering {
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
n := node.(*ast.String)
|
||||||
|
if n.IsCode() {
|
||||||
|
_, _ = w.Write(n.Value)
|
||||||
|
} else {
|
||||||
|
if n.IsRaw() {
|
||||||
|
r.Writer.RawWrite(w, n.Value)
|
||||||
|
} else {
|
||||||
|
r.Writer.Write(w, n.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ast.WalkContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var dataPrefix = []byte("data-")
|
||||||
|
|
||||||
|
// RenderAttributes renders given node's attributes.
|
||||||
|
// You can specify attribute names to render by the filter.
|
||||||
|
// If filter is nil, RenderAttributes renders all attributes.
|
||||||
|
func RenderAttributes(w util.BufWriter, node ast.Node, filter util.BytesFilter) {
|
||||||
|
for _, attr := range node.Attributes() {
|
||||||
|
if filter != nil && !filter.Contains(attr.Name) {
|
||||||
|
if !bytes.HasPrefix(attr.Name, dataPrefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, _ = w.WriteString(" ")
|
||||||
|
_, _ = w.Write(attr.Name)
|
||||||
|
_, _ = w.WriteString(`="`)
|
||||||
|
// TODO: convert numeric values to strings
|
||||||
|
_, _ = w.Write(util.EscapeHTML(attr.Value.([]byte)))
|
||||||
|
_ = w.WriteByte('"')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Writer interface wirtes textual contents to a writer.
|
||||||
|
type Writer interface {
|
||||||
|
// Write writes the given source to writer with resolving references and unescaping
|
||||||
|
// backslash escaped characters.
|
||||||
|
Write(writer util.BufWriter, source []byte)
|
||||||
|
|
||||||
|
// RawWrite wirtes the given source to writer without resolving references and
|
||||||
|
// unescaping backslash escaped characters.
|
||||||
|
RawWrite(writer util.BufWriter, source []byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
type defaultWriter struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func escapeRune(writer util.BufWriter, r rune) {
|
||||||
|
if r < 256 {
|
||||||
|
v := util.EscapeHTMLByte(byte(r))
|
||||||
|
if v != nil {
|
||||||
|
_, _ = writer.Write(v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, _ = writer.WriteRune(util.ToValidRune(r))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *defaultWriter) RawWrite(writer util.BufWriter, source []byte) {
|
||||||
|
n := 0
|
||||||
|
l := len(source)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
v := util.EscapeHTMLByte(source[i])
|
||||||
|
if v != nil {
|
||||||
|
_, _ = writer.Write(source[i-n : i])
|
||||||
|
n = 0
|
||||||
|
_, _ = writer.Write(v)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
if n != 0 {
|
||||||
|
_, _ = writer.Write(source[l-n:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *defaultWriter) Write(writer util.BufWriter, source []byte) {
|
||||||
|
escaped := false
|
||||||
|
var ok bool
|
||||||
|
limit := len(source)
|
||||||
|
n := 0
|
||||||
|
for i := 0; i < limit; i++ {
|
||||||
|
c := source[i]
|
||||||
|
if escaped {
|
||||||
|
if util.IsPunct(c) {
|
||||||
|
d.RawWrite(writer, source[n:i-1])
|
||||||
|
n = i
|
||||||
|
escaped = false
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c == '&' {
|
||||||
|
pos := i
|
||||||
|
next := i + 1
|
||||||
|
if next < limit && source[next] == '#' {
|
||||||
|
nnext := next + 1
|
||||||
|
if nnext < limit {
|
||||||
|
nc := source[nnext]
|
||||||
|
// code point like #x22;
|
||||||
|
if nnext < limit && nc == 'x' || nc == 'X' {
|
||||||
|
start := nnext + 1
|
||||||
|
i, ok = util.ReadWhile(source, [2]int{start, limit}, util.IsHexDecimal)
|
||||||
|
if ok && i < limit && source[i] == ';' {
|
||||||
|
v, _ := strconv.ParseUint(util.BytesToReadOnlyString(source[start:i]), 16, 32)
|
||||||
|
d.RawWrite(writer, source[n:pos])
|
||||||
|
n = i + 1
|
||||||
|
escapeRune(writer, rune(v))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// code point like #1234;
|
||||||
|
} else if nc >= '0' && nc <= '9' {
|
||||||
|
start := nnext
|
||||||
|
i, ok = util.ReadWhile(source, [2]int{start, limit}, util.IsNumeric)
|
||||||
|
if ok && i < limit && i-start < 8 && source[i] == ';' {
|
||||||
|
v, _ := strconv.ParseUint(util.BytesToReadOnlyString(source[start:i]), 0, 32)
|
||||||
|
d.RawWrite(writer, source[n:pos])
|
||||||
|
n = i + 1
|
||||||
|
escapeRune(writer, rune(v))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
start := next
|
||||||
|
i, ok = util.ReadWhile(source, [2]int{start, limit}, util.IsAlphaNumeric)
|
||||||
|
// entity reference
|
||||||
|
if ok && i < limit && source[i] == ';' {
|
||||||
|
name := util.BytesToReadOnlyString(source[start:i])
|
||||||
|
entity, ok := util.LookUpHTML5EntityByName(name)
|
||||||
|
if ok {
|
||||||
|
d.RawWrite(writer, source[n:pos])
|
||||||
|
n = i + 1
|
||||||
|
d.RawWrite(writer, entity.Characters)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i = next - 1
|
||||||
|
}
|
||||||
|
if c == '\\' {
|
||||||
|
escaped = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
escaped = false
|
||||||
|
}
|
||||||
|
d.RawWrite(writer, source[n:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultWriter is a default implementation of the Writer.
|
||||||
|
var DefaultWriter = &defaultWriter{}
|
||||||
|
|
||||||
|
var bDataImage = []byte("data:image/")
|
||||||
|
var bPng = []byte("png;")
|
||||||
|
var bGif = []byte("gif;")
|
||||||
|
var bJpeg = []byte("jpeg;")
|
||||||
|
var bWebp = []byte("webp;")
|
||||||
|
var bJs = []byte("javascript:")
|
||||||
|
var bVb = []byte("vbscript:")
|
||||||
|
var bFile = []byte("file:")
|
||||||
|
var bData = []byte("data:")
|
||||||
|
|
||||||
|
// IsDangerousURL returns true if the given url seems a potentially dangerous url,
|
||||||
|
// otherwise false.
|
||||||
|
func IsDangerousURL(url []byte) bool {
|
||||||
|
if bytes.HasPrefix(url, bDataImage) && len(url) >= 11 {
|
||||||
|
v := url[11:]
|
||||||
|
if bytes.HasPrefix(v, bPng) || bytes.HasPrefix(v, bGif) ||
|
||||||
|
bytes.HasPrefix(v, bJpeg) || bytes.HasPrefix(v, bWebp) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return bytes.HasPrefix(url, bJs) || bytes.HasPrefix(url, bVb) ||
|
||||||
|
bytes.HasPrefix(url, bFile) || bytes.HasPrefix(url, bData)
|
||||||
|
}
|
174
vendor/github.com/yuin/goldmark/renderer/renderer.go
generated
vendored
Normal file
174
vendor/github.com/yuin/goldmark/renderer/renderer.go
generated
vendored
Normal file
|
@ -0,0 +1,174 @@
|
||||||
|
// Package renderer renders the given AST to certain formats.
|
||||||
|
package renderer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/yuin/goldmark/ast"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Config struct is a data structure that holds configuration of the Renderer.
|
||||||
|
type Config struct {
|
||||||
|
Options map[OptionName]interface{}
|
||||||
|
NodeRenderers util.PrioritizedSlice
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConfig returns a new Config
|
||||||
|
func NewConfig() *Config {
|
||||||
|
return &Config{
|
||||||
|
Options: map[OptionName]interface{}{},
|
||||||
|
NodeRenderers: util.PrioritizedSlice{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An OptionName is a name of the option.
|
||||||
|
type OptionName string
|
||||||
|
|
||||||
|
// An Option interface is a functional option type for the Renderer.
|
||||||
|
type Option interface {
|
||||||
|
SetConfig(*Config)
|
||||||
|
}
|
||||||
|
|
||||||
|
type withNodeRenderers struct {
|
||||||
|
value []util.PrioritizedValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *withNodeRenderers) SetConfig(c *Config) {
|
||||||
|
c.NodeRenderers = append(c.NodeRenderers, o.value...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithNodeRenderers is a functional option that allow you to add
|
||||||
|
// NodeRenderers to the renderer.
|
||||||
|
func WithNodeRenderers(ps ...util.PrioritizedValue) Option {
|
||||||
|
return &withNodeRenderers{ps}
|
||||||
|
}
|
||||||
|
|
||||||
|
type withOption struct {
|
||||||
|
name OptionName
|
||||||
|
value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *withOption) SetConfig(c *Config) {
|
||||||
|
c.Options[o.name] = o.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithOption is a functional option that allow you to set
|
||||||
|
// an arbitrary option to the parser.
|
||||||
|
func WithOption(name OptionName, value interface{}) Option {
|
||||||
|
return &withOption{name, value}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A SetOptioner interface sets given option to the object.
|
||||||
|
type SetOptioner interface {
|
||||||
|
// SetOption sets given option to the object.
|
||||||
|
// Unacceptable options may be passed.
|
||||||
|
// Thus implementations must ignore unacceptable options.
|
||||||
|
SetOption(name OptionName, value interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeRendererFunc is a function that renders a given node.
|
||||||
|
type NodeRendererFunc func(writer util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error)
|
||||||
|
|
||||||
|
// A NodeRenderer interface offers NodeRendererFuncs.
|
||||||
|
type NodeRenderer interface {
|
||||||
|
// RendererFuncs registers NodeRendererFuncs to given NodeRendererFuncRegisterer.
|
||||||
|
RegisterFuncs(NodeRendererFuncRegisterer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A NodeRendererFuncRegisterer registers
|
||||||
|
type NodeRendererFuncRegisterer interface {
|
||||||
|
// Register registers given NodeRendererFunc to this object.
|
||||||
|
Register(ast.NodeKind, NodeRendererFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Renderer interface renders given AST node to given
|
||||||
|
// writer with given Renderer.
|
||||||
|
type Renderer interface {
|
||||||
|
Render(w io.Writer, source []byte, n ast.Node) error
|
||||||
|
|
||||||
|
// AddOptions adds given option to this renderer.
|
||||||
|
AddOptions(...Option)
|
||||||
|
}
|
||||||
|
|
||||||
|
type renderer struct {
|
||||||
|
config *Config
|
||||||
|
options map[OptionName]interface{}
|
||||||
|
nodeRendererFuncsTmp map[ast.NodeKind]NodeRendererFunc
|
||||||
|
maxKind int
|
||||||
|
nodeRendererFuncs []NodeRendererFunc
|
||||||
|
initSync sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRenderer returns a new Renderer with given options.
|
||||||
|
func NewRenderer(options ...Option) Renderer {
|
||||||
|
config := NewConfig()
|
||||||
|
for _, opt := range options {
|
||||||
|
opt.SetConfig(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := &renderer{
|
||||||
|
options: map[OptionName]interface{}{},
|
||||||
|
config: config,
|
||||||
|
nodeRendererFuncsTmp: map[ast.NodeKind]NodeRendererFunc{},
|
||||||
|
}
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *renderer) AddOptions(opts ...Option) {
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.SetConfig(r.config)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *renderer) Register(kind ast.NodeKind, v NodeRendererFunc) {
|
||||||
|
r.nodeRendererFuncsTmp[kind] = v
|
||||||
|
if int(kind) > r.maxKind {
|
||||||
|
r.maxKind = int(kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render renders the given AST node to the given writer with the given Renderer.
|
||||||
|
func (r *renderer) Render(w io.Writer, source []byte, n ast.Node) error {
|
||||||
|
r.initSync.Do(func() {
|
||||||
|
r.options = r.config.Options
|
||||||
|
r.config.NodeRenderers.Sort()
|
||||||
|
l := len(r.config.NodeRenderers)
|
||||||
|
for i := l - 1; i >= 0; i-- {
|
||||||
|
v := r.config.NodeRenderers[i]
|
||||||
|
nr, _ := v.Value.(NodeRenderer)
|
||||||
|
if se, ok := v.Value.(SetOptioner); ok {
|
||||||
|
for oname, ovalue := range r.options {
|
||||||
|
se.SetOption(oname, ovalue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nr.RegisterFuncs(r)
|
||||||
|
}
|
||||||
|
r.nodeRendererFuncs = make([]NodeRendererFunc, r.maxKind+1)
|
||||||
|
for kind, nr := range r.nodeRendererFuncsTmp {
|
||||||
|
r.nodeRendererFuncs[kind] = nr
|
||||||
|
}
|
||||||
|
r.config = nil
|
||||||
|
r.nodeRendererFuncsTmp = nil
|
||||||
|
})
|
||||||
|
writer, ok := w.(util.BufWriter)
|
||||||
|
if !ok {
|
||||||
|
writer = bufio.NewWriter(w)
|
||||||
|
}
|
||||||
|
err := ast.Walk(n, func(n ast.Node, entering bool) (ast.WalkStatus, error) {
|
||||||
|
s := ast.WalkStatus(ast.WalkContinue)
|
||||||
|
var err error
|
||||||
|
f := r.nodeRendererFuncs[n.Kind()]
|
||||||
|
if f != nil {
|
||||||
|
s, err = f(writer, source, n, entering)
|
||||||
|
}
|
||||||
|
return s, err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return writer.Flush()
|
||||||
|
}
|
539
vendor/github.com/yuin/goldmark/text/reader.go
generated
vendored
Normal file
539
vendor/github.com/yuin/goldmark/text/reader.go
generated
vendored
Normal file
|
@ -0,0 +1,539 @@
|
||||||
|
package text
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
const invalidValue = -1
|
||||||
|
|
||||||
|
// EOF indicates the end of file.
|
||||||
|
const EOF = byte(0xff)
|
||||||
|
|
||||||
|
// A Reader interface provides abstracted method for reading text.
|
||||||
|
type Reader interface {
|
||||||
|
io.RuneReader
|
||||||
|
|
||||||
|
// Source returns a source of the reader.
|
||||||
|
Source() []byte
|
||||||
|
|
||||||
|
// ResetPosition resets positions.
|
||||||
|
ResetPosition()
|
||||||
|
|
||||||
|
// Peek returns a byte at current position without advancing the internal pointer.
|
||||||
|
Peek() byte
|
||||||
|
|
||||||
|
// PeekLine returns the current line without advancing the internal pointer.
|
||||||
|
PeekLine() ([]byte, Segment)
|
||||||
|
|
||||||
|
// PrecendingCharacter returns a character just before current internal pointer.
|
||||||
|
PrecendingCharacter() rune
|
||||||
|
|
||||||
|
// Value returns a value of the given segment.
|
||||||
|
Value(Segment) []byte
|
||||||
|
|
||||||
|
// LineOffset returns a distance from the line head to current position.
|
||||||
|
LineOffset() int
|
||||||
|
|
||||||
|
// Position returns current line number and position.
|
||||||
|
Position() (int, Segment)
|
||||||
|
|
||||||
|
// SetPosition sets current line number and position.
|
||||||
|
SetPosition(int, Segment)
|
||||||
|
|
||||||
|
// SetPadding sets padding to the reader.
|
||||||
|
SetPadding(int)
|
||||||
|
|
||||||
|
// Advance advances the internal pointer.
|
||||||
|
Advance(int)
|
||||||
|
|
||||||
|
// AdvanceAndSetPadding advances the internal pointer and add padding to the
|
||||||
|
// reader.
|
||||||
|
AdvanceAndSetPadding(int, int)
|
||||||
|
|
||||||
|
// AdvanceLine advances the internal pointer to the next line head.
|
||||||
|
AdvanceLine()
|
||||||
|
|
||||||
|
// SkipSpaces skips space characters and returns a non-blank line.
|
||||||
|
// If it reaches EOF, returns false.
|
||||||
|
SkipSpaces() (Segment, int, bool)
|
||||||
|
|
||||||
|
// SkipSpaces skips blank lines and returns a non-blank line.
|
||||||
|
// If it reaches EOF, returns false.
|
||||||
|
SkipBlankLines() (Segment, int, bool)
|
||||||
|
|
||||||
|
// Match performs regular expression matching to current line.
|
||||||
|
Match(reg *regexp.Regexp) bool
|
||||||
|
|
||||||
|
// Match performs regular expression searching to current line.
|
||||||
|
FindSubMatch(reg *regexp.Regexp) [][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type reader struct {
|
||||||
|
source []byte
|
||||||
|
sourceLength int
|
||||||
|
line int
|
||||||
|
peekedLine []byte
|
||||||
|
pos Segment
|
||||||
|
head int
|
||||||
|
lineOffset int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReader return a new Reader that can read UTF-8 bytes .
|
||||||
|
func NewReader(source []byte) Reader {
|
||||||
|
r := &reader{
|
||||||
|
source: source,
|
||||||
|
sourceLength: len(source),
|
||||||
|
}
|
||||||
|
r.ResetPosition()
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) ResetPosition() {
|
||||||
|
r.line = -1
|
||||||
|
r.head = 0
|
||||||
|
r.lineOffset = -1
|
||||||
|
r.AdvanceLine()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) Source() []byte {
|
||||||
|
return r.source
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) Value(seg Segment) []byte {
|
||||||
|
return seg.Value(r.source)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) Peek() byte {
|
||||||
|
if r.pos.Start >= 0 && r.pos.Start < r.sourceLength {
|
||||||
|
if r.pos.Padding != 0 {
|
||||||
|
return space[0]
|
||||||
|
}
|
||||||
|
return r.source[r.pos.Start]
|
||||||
|
}
|
||||||
|
return EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) PeekLine() ([]byte, Segment) {
|
||||||
|
if r.pos.Start >= 0 && r.pos.Start < r.sourceLength {
|
||||||
|
if r.peekedLine == nil {
|
||||||
|
r.peekedLine = r.pos.Value(r.Source())
|
||||||
|
}
|
||||||
|
return r.peekedLine, r.pos
|
||||||
|
}
|
||||||
|
return nil, r.pos
|
||||||
|
}
|
||||||
|
|
||||||
|
// io.RuneReader interface
|
||||||
|
func (r *reader) ReadRune() (rune, int, error) {
|
||||||
|
return readRuneReader(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) LineOffset() int {
|
||||||
|
if r.lineOffset < 0 {
|
||||||
|
v := 0
|
||||||
|
for i := r.head; i < r.pos.Start; i++ {
|
||||||
|
if r.source[i] == '\t' {
|
||||||
|
v += util.TabWidth(v)
|
||||||
|
} else {
|
||||||
|
v++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.lineOffset = v - r.pos.Padding
|
||||||
|
}
|
||||||
|
return r.lineOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) PrecendingCharacter() rune {
|
||||||
|
if r.pos.Start <= 0 {
|
||||||
|
if r.pos.Padding != 0 {
|
||||||
|
return rune(' ')
|
||||||
|
}
|
||||||
|
return rune('\n')
|
||||||
|
}
|
||||||
|
i := r.pos.Start - 1
|
||||||
|
for ; i >= 0; i-- {
|
||||||
|
if utf8.RuneStart(r.source[i]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rn, _ := utf8.DecodeRune(r.source[i:])
|
||||||
|
return rn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) Advance(n int) {
|
||||||
|
r.lineOffset = -1
|
||||||
|
if n < len(r.peekedLine) && r.pos.Padding == 0 {
|
||||||
|
r.pos.Start += n
|
||||||
|
r.peekedLine = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r.peekedLine = nil
|
||||||
|
l := r.sourceLength
|
||||||
|
for ; n > 0 && r.pos.Start < l; n-- {
|
||||||
|
if r.pos.Padding != 0 {
|
||||||
|
r.pos.Padding--
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if r.source[r.pos.Start] == '\n' {
|
||||||
|
r.AdvanceLine()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
r.pos.Start++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) AdvanceAndSetPadding(n, padding int) {
|
||||||
|
r.Advance(n)
|
||||||
|
if padding > r.pos.Padding {
|
||||||
|
r.SetPadding(padding)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) AdvanceLine() {
|
||||||
|
r.lineOffset = -1
|
||||||
|
r.peekedLine = nil
|
||||||
|
r.pos.Start = r.pos.Stop
|
||||||
|
r.head = r.pos.Start
|
||||||
|
if r.pos.Start < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
r.pos.Stop = r.sourceLength
|
||||||
|
for i := r.pos.Start; i < r.sourceLength; i++ {
|
||||||
|
c := r.source[i]
|
||||||
|
if c == '\n' {
|
||||||
|
r.pos.Stop = i + 1
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.line++
|
||||||
|
r.pos.Padding = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) Position() (int, Segment) {
|
||||||
|
return r.line, r.pos
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) SetPosition(line int, pos Segment) {
|
||||||
|
r.lineOffset = -1
|
||||||
|
r.line = line
|
||||||
|
r.pos = pos
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) SetPadding(v int) {
|
||||||
|
r.pos.Padding = v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) SkipSpaces() (Segment, int, bool) {
|
||||||
|
return skipSpacesReader(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) SkipBlankLines() (Segment, int, bool) {
|
||||||
|
return skipBlankLinesReader(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) Match(reg *regexp.Regexp) bool {
|
||||||
|
return matchReader(r, reg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *reader) FindSubMatch(reg *regexp.Regexp) [][]byte {
|
||||||
|
return findSubMatchReader(r, reg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A BlockReader interface is a reader that is optimized for Blocks.
|
||||||
|
type BlockReader interface {
|
||||||
|
Reader
|
||||||
|
// Reset resets current state and sets new segments to the reader.
|
||||||
|
Reset(segment *Segments)
|
||||||
|
}
|
||||||
|
|
||||||
|
type blockReader struct {
|
||||||
|
source []byte
|
||||||
|
segments *Segments
|
||||||
|
segmentsLength int
|
||||||
|
line int
|
||||||
|
pos Segment
|
||||||
|
head int
|
||||||
|
last int
|
||||||
|
lineOffset int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBlockReader returns a new BlockReader.
|
||||||
|
func NewBlockReader(source []byte, segments *Segments) BlockReader {
|
||||||
|
r := &blockReader{
|
||||||
|
source: source,
|
||||||
|
}
|
||||||
|
if segments != nil {
|
||||||
|
r.Reset(segments)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) ResetPosition() {
|
||||||
|
r.line = -1
|
||||||
|
r.head = 0
|
||||||
|
r.last = 0
|
||||||
|
r.lineOffset = -1
|
||||||
|
r.pos.Start = -1
|
||||||
|
r.pos.Stop = -1
|
||||||
|
r.pos.Padding = 0
|
||||||
|
if r.segmentsLength > 0 {
|
||||||
|
last := r.segments.At(r.segmentsLength - 1)
|
||||||
|
r.last = last.Stop
|
||||||
|
}
|
||||||
|
r.AdvanceLine()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) Reset(segments *Segments) {
|
||||||
|
r.segments = segments
|
||||||
|
r.segmentsLength = segments.Len()
|
||||||
|
r.ResetPosition()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) Source() []byte {
|
||||||
|
return r.source
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) Value(seg Segment) []byte {
|
||||||
|
line := r.segmentsLength - 1
|
||||||
|
ret := make([]byte, 0, seg.Stop-seg.Start+1)
|
||||||
|
for ; line >= 0; line-- {
|
||||||
|
if seg.Start >= r.segments.At(line).Start {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i := seg.Start
|
||||||
|
for ; line < r.segmentsLength; line++ {
|
||||||
|
s := r.segments.At(line)
|
||||||
|
if i < 0 {
|
||||||
|
i = s.Start
|
||||||
|
}
|
||||||
|
ret = s.ConcatPadding(ret)
|
||||||
|
for ; i < seg.Stop && i < s.Stop; i++ {
|
||||||
|
ret = append(ret, r.source[i])
|
||||||
|
}
|
||||||
|
i = -1
|
||||||
|
if s.Stop > seg.Stop {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// io.RuneReader interface
|
||||||
|
func (r *blockReader) ReadRune() (rune, int, error) {
|
||||||
|
return readRuneReader(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) PrecendingCharacter() rune {
|
||||||
|
if r.pos.Padding != 0 {
|
||||||
|
return rune(' ')
|
||||||
|
}
|
||||||
|
if r.pos.Start <= 0 {
|
||||||
|
return rune('\n')
|
||||||
|
}
|
||||||
|
l := len(r.source)
|
||||||
|
i := r.pos.Start - 1
|
||||||
|
for ; i < l && i >= 0; i-- {
|
||||||
|
if utf8.RuneStart(r.source[i]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i < 0 || i >= l {
|
||||||
|
return rune('\n')
|
||||||
|
}
|
||||||
|
rn, _ := utf8.DecodeRune(r.source[i:])
|
||||||
|
return rn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) LineOffset() int {
|
||||||
|
if r.lineOffset < 0 {
|
||||||
|
v := 0
|
||||||
|
for i := r.head; i < r.pos.Start; i++ {
|
||||||
|
if r.source[i] == '\t' {
|
||||||
|
v += util.TabWidth(v)
|
||||||
|
} else {
|
||||||
|
v++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.lineOffset = v - r.pos.Padding
|
||||||
|
}
|
||||||
|
return r.lineOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) Peek() byte {
|
||||||
|
if r.line < r.segmentsLength && r.pos.Start >= 0 && r.pos.Start < r.last {
|
||||||
|
if r.pos.Padding != 0 {
|
||||||
|
return space[0]
|
||||||
|
}
|
||||||
|
return r.source[r.pos.Start]
|
||||||
|
}
|
||||||
|
return EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) PeekLine() ([]byte, Segment) {
|
||||||
|
if r.line < r.segmentsLength && r.pos.Start >= 0 && r.pos.Start < r.last {
|
||||||
|
return r.pos.Value(r.source), r.pos
|
||||||
|
}
|
||||||
|
return nil, r.pos
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) Advance(n int) {
|
||||||
|
r.lineOffset = -1
|
||||||
|
|
||||||
|
if n < r.pos.Stop-r.pos.Start && r.pos.Padding == 0 {
|
||||||
|
r.pos.Start += n
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for ; n > 0; n-- {
|
||||||
|
if r.pos.Padding != 0 {
|
||||||
|
r.pos.Padding--
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if r.pos.Start >= r.pos.Stop-1 && r.pos.Stop < r.last {
|
||||||
|
r.AdvanceLine()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
r.pos.Start++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) AdvanceAndSetPadding(n, padding int) {
|
||||||
|
r.Advance(n)
|
||||||
|
if padding > r.pos.Padding {
|
||||||
|
r.SetPadding(padding)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) AdvanceLine() {
|
||||||
|
r.SetPosition(r.line+1, NewSegment(invalidValue, invalidValue))
|
||||||
|
r.head = r.pos.Start
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) Position() (int, Segment) {
|
||||||
|
return r.line, r.pos
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) SetPosition(line int, pos Segment) {
|
||||||
|
r.lineOffset = -1
|
||||||
|
r.line = line
|
||||||
|
if pos.Start == invalidValue {
|
||||||
|
if r.line < r.segmentsLength {
|
||||||
|
s := r.segments.At(line)
|
||||||
|
r.head = s.Start
|
||||||
|
r.pos = s
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
r.pos = pos
|
||||||
|
if r.line < r.segmentsLength {
|
||||||
|
s := r.segments.At(line)
|
||||||
|
r.head = s.Start
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) SetPadding(v int) {
|
||||||
|
r.lineOffset = -1
|
||||||
|
r.pos.Padding = v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) SkipSpaces() (Segment, int, bool) {
|
||||||
|
return skipSpacesReader(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) SkipBlankLines() (Segment, int, bool) {
|
||||||
|
return skipBlankLinesReader(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) Match(reg *regexp.Regexp) bool {
|
||||||
|
return matchReader(r, reg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *blockReader) FindSubMatch(reg *regexp.Regexp) [][]byte {
|
||||||
|
return findSubMatchReader(r, reg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func skipBlankLinesReader(r Reader) (Segment, int, bool) {
|
||||||
|
lines := 0
|
||||||
|
for {
|
||||||
|
line, seg := r.PeekLine()
|
||||||
|
if line == nil {
|
||||||
|
return seg, lines, false
|
||||||
|
}
|
||||||
|
if util.IsBlank(line) {
|
||||||
|
lines++
|
||||||
|
r.AdvanceLine()
|
||||||
|
} else {
|
||||||
|
return seg, lines, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func skipSpacesReader(r Reader) (Segment, int, bool) {
|
||||||
|
chars := 0
|
||||||
|
for {
|
||||||
|
line, segment := r.PeekLine()
|
||||||
|
if line == nil {
|
||||||
|
return segment, chars, false
|
||||||
|
}
|
||||||
|
for i, c := range line {
|
||||||
|
if util.IsSpace(c) {
|
||||||
|
chars++
|
||||||
|
r.Advance(1)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return segment.WithStart(segment.Start + i + 1), chars, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func matchReader(r Reader, reg *regexp.Regexp) bool {
|
||||||
|
oldline, oldseg := r.Position()
|
||||||
|
match := reg.FindReaderSubmatchIndex(r)
|
||||||
|
r.SetPosition(oldline, oldseg)
|
||||||
|
if match == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
r.Advance(match[1] - match[0])
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func findSubMatchReader(r Reader, reg *regexp.Regexp) [][]byte {
|
||||||
|
oldline, oldseg := r.Position()
|
||||||
|
match := reg.FindReaderSubmatchIndex(r)
|
||||||
|
r.SetPosition(oldline, oldseg)
|
||||||
|
if match == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
runes := make([]rune, 0, match[1]-match[0])
|
||||||
|
for i := 0; i < match[1]; {
|
||||||
|
r, size, _ := readRuneReader(r)
|
||||||
|
i += size
|
||||||
|
runes = append(runes, r)
|
||||||
|
}
|
||||||
|
result := [][]byte{}
|
||||||
|
for i := 0; i < len(match); i += 2 {
|
||||||
|
result = append(result, []byte(string(runes[match[i]:match[i+1]])))
|
||||||
|
}
|
||||||
|
|
||||||
|
r.SetPosition(oldline, oldseg)
|
||||||
|
r.Advance(match[1] - match[0])
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func readRuneReader(r Reader) (rune, int, error) {
|
||||||
|
line, _ := r.PeekLine()
|
||||||
|
if line == nil {
|
||||||
|
return 0, 0, io.EOF
|
||||||
|
}
|
||||||
|
rn, size := utf8.DecodeRune(line)
|
||||||
|
if rn == utf8.RuneError {
|
||||||
|
return 0, 0, io.EOF
|
||||||
|
}
|
||||||
|
r.Advance(size)
|
||||||
|
return rn, size, nil
|
||||||
|
}
|
209
vendor/github.com/yuin/goldmark/text/segment.go
generated
vendored
Normal file
209
vendor/github.com/yuin/goldmark/text/segment.go
generated
vendored
Normal file
|
@ -0,0 +1,209 @@
|
||||||
|
package text
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"github.com/yuin/goldmark/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
var space = []byte(" ")
|
||||||
|
|
||||||
|
// A Segment struct holds information about source potisions.
|
||||||
|
type Segment struct {
|
||||||
|
// Start is a start position of the segment.
|
||||||
|
Start int
|
||||||
|
|
||||||
|
// Stop is a stop position of the segment.
|
||||||
|
// This value should be excluded.
|
||||||
|
Stop int
|
||||||
|
|
||||||
|
// Padding is a padding length of the segment.
|
||||||
|
Padding int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSegment return a new Segment.
|
||||||
|
func NewSegment(start, stop int) Segment {
|
||||||
|
return Segment{
|
||||||
|
Start: start,
|
||||||
|
Stop: stop,
|
||||||
|
Padding: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSegmentPadding returns a new Segment with the given padding.
|
||||||
|
func NewSegmentPadding(start, stop, n int) Segment {
|
||||||
|
return Segment{
|
||||||
|
Start: start,
|
||||||
|
Stop: stop,
|
||||||
|
Padding: n,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns a value of the segment.
|
||||||
|
func (t *Segment) Value(buffer []byte) []byte {
|
||||||
|
if t.Padding == 0 {
|
||||||
|
return buffer[t.Start:t.Stop]
|
||||||
|
}
|
||||||
|
result := make([]byte, 0, t.Padding+t.Stop-t.Start+1)
|
||||||
|
result = append(result, bytes.Repeat(space, t.Padding)...)
|
||||||
|
return append(result, buffer[t.Start:t.Stop]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns a length of the segment.
|
||||||
|
func (t *Segment) Len() int {
|
||||||
|
return t.Stop - t.Start + t.Padding
|
||||||
|
}
|
||||||
|
|
||||||
|
// Between returns a segment between this segment and the given segment.
|
||||||
|
func (t *Segment) Between(other Segment) Segment {
|
||||||
|
if t.Stop != other.Stop {
|
||||||
|
panic("invalid state")
|
||||||
|
}
|
||||||
|
return NewSegmentPadding(
|
||||||
|
t.Start,
|
||||||
|
other.Start,
|
||||||
|
t.Padding-other.Padding,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEmpty returns true if this segment is empty, otherwise false.
|
||||||
|
func (t *Segment) IsEmpty() bool {
|
||||||
|
return t.Start >= t.Stop && t.Padding == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimRightSpace returns a new segment by slicing off all trailing
|
||||||
|
// space characters.
|
||||||
|
func (t *Segment) TrimRightSpace(buffer []byte) Segment {
|
||||||
|
v := buffer[t.Start:t.Stop]
|
||||||
|
l := util.TrimRightSpaceLength(v)
|
||||||
|
if l == len(v) {
|
||||||
|
return NewSegment(t.Start, t.Start)
|
||||||
|
}
|
||||||
|
return NewSegmentPadding(t.Start, t.Stop-l, t.Padding)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimLeftSpace returns a new segment by slicing off all leading
|
||||||
|
// space characters including padding.
|
||||||
|
func (t *Segment) TrimLeftSpace(buffer []byte) Segment {
|
||||||
|
v := buffer[t.Start:t.Stop]
|
||||||
|
l := util.TrimLeftSpaceLength(v)
|
||||||
|
return NewSegment(t.Start+l, t.Stop)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimLeftSpaceWidth returns a new segment by slicing off leading space
|
||||||
|
// characters until the given width.
|
||||||
|
func (t *Segment) TrimLeftSpaceWidth(width int, buffer []byte) Segment {
|
||||||
|
padding := t.Padding
|
||||||
|
for ; width > 0; width-- {
|
||||||
|
if padding == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
padding--
|
||||||
|
}
|
||||||
|
if width == 0 {
|
||||||
|
return NewSegmentPadding(t.Start, t.Stop, padding)
|
||||||
|
}
|
||||||
|
text := buffer[t.Start:t.Stop]
|
||||||
|
start := t.Start
|
||||||
|
for _, c := range text {
|
||||||
|
if start >= t.Stop-1 || width <= 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if c == ' ' {
|
||||||
|
width--
|
||||||
|
} else if c == '\t' {
|
||||||
|
width -= 4
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
start++
|
||||||
|
}
|
||||||
|
if width < 0 {
|
||||||
|
padding = width * -1
|
||||||
|
}
|
||||||
|
return NewSegmentPadding(start, t.Stop, padding)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithStart returns a new Segment with same value except Start.
|
||||||
|
func (t *Segment) WithStart(v int) Segment {
|
||||||
|
return NewSegmentPadding(v, t.Stop, t.Padding)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithStop returns a new Segment with same value except Stop.
|
||||||
|
func (t *Segment) WithStop(v int) Segment {
|
||||||
|
return NewSegmentPadding(t.Start, v, t.Padding)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConcatPadding concats the padding to the given slice.
|
||||||
|
func (t *Segment) ConcatPadding(v []byte) []byte {
|
||||||
|
if t.Padding > 0 {
|
||||||
|
return append(v, bytes.Repeat(space, t.Padding)...)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Segments is a collection of the Segment.
|
||||||
|
type Segments struct {
|
||||||
|
values []Segment
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSegments return a new Segments.
|
||||||
|
func NewSegments() *Segments {
|
||||||
|
return &Segments{
|
||||||
|
values: nil,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append appends the given segment after the tail of the collection.
|
||||||
|
func (s *Segments) Append(t Segment) {
|
||||||
|
if s.values == nil {
|
||||||
|
s.values = make([]Segment, 0, 20)
|
||||||
|
}
|
||||||
|
s.values = append(s.values, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendAll appends all elements of given segments after the tail of the collection.
|
||||||
|
func (s *Segments) AppendAll(t []Segment) {
|
||||||
|
if s.values == nil {
|
||||||
|
s.values = make([]Segment, 0, 20)
|
||||||
|
}
|
||||||
|
s.values = append(s.values, t...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the length of the collection.
|
||||||
|
func (s *Segments) Len() int {
|
||||||
|
if s.values == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return len(s.values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// At returns a segment at the given index.
|
||||||
|
func (s *Segments) At(i int) Segment {
|
||||||
|
return s.values[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the given Segment.
|
||||||
|
func (s *Segments) Set(i int, v Segment) {
|
||||||
|
s.values[i] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSliced replace the collection with a subsliced value.
|
||||||
|
func (s *Segments) SetSliced(lo, hi int) {
|
||||||
|
s.values = s.values[lo:hi]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sliced returns a subslice of the collection.
|
||||||
|
func (s *Segments) Sliced(lo, hi int) []Segment {
|
||||||
|
return s.values[lo:hi]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear delete all element of the collction.
|
||||||
|
func (s *Segments) Clear() {
|
||||||
|
s.values = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unshift insert the given Segment to head of the collection.
|
||||||
|
func (s *Segments) Unshift(v Segment) {
|
||||||
|
s.values = append(s.values[0:1], s.values[0:]...)
|
||||||
|
s.values[0] = v
|
||||||
|
}
|
2142
vendor/github.com/yuin/goldmark/util/html5entities.go
generated
vendored
Normal file
2142
vendor/github.com/yuin/goldmark/util/html5entities.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
897
vendor/github.com/yuin/goldmark/util/util.go
generated
vendored
Normal file
897
vendor/github.com/yuin/goldmark/util/util.go
generated
vendored
Normal file
|
@ -0,0 +1,897 @@
|
||||||
|
// Package util provides utility functions for the goldmark.
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A CopyOnWriteBuffer is a byte buffer that copies buffer when
|
||||||
|
// it need to be changed.
|
||||||
|
type CopyOnWriteBuffer struct {
|
||||||
|
buffer []byte
|
||||||
|
copied bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCopyOnWriteBuffer returns a new CopyOnWriteBuffer.
|
||||||
|
func NewCopyOnWriteBuffer(buffer []byte) CopyOnWriteBuffer {
|
||||||
|
return CopyOnWriteBuffer{
|
||||||
|
buffer: buffer,
|
||||||
|
copied: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes given bytes to the buffer.
|
||||||
|
func (b *CopyOnWriteBuffer) Write(value []byte) {
|
||||||
|
if !b.copied {
|
||||||
|
b.buffer = make([]byte, 0, len(b.buffer)+20)
|
||||||
|
b.copied = true
|
||||||
|
}
|
||||||
|
b.buffer = append(b.buffer, value...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteByte writes the given byte to the buffer.
|
||||||
|
func (b *CopyOnWriteBuffer) WriteByte(c byte) {
|
||||||
|
if !b.copied {
|
||||||
|
b.buffer = make([]byte, 0, len(b.buffer)+20)
|
||||||
|
b.copied = true
|
||||||
|
}
|
||||||
|
b.buffer = append(b.buffer, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns bytes of this buffer.
|
||||||
|
func (b *CopyOnWriteBuffer) Bytes() []byte {
|
||||||
|
return b.buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCopied returns true if buffer has been copied, otherwise false.
|
||||||
|
func (b *CopyOnWriteBuffer) IsCopied() bool {
|
||||||
|
return b.copied
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEscapedPunctuation returns true if caracter at a given index i
|
||||||
|
// is an escaped punctuation, otherwise false.
|
||||||
|
func IsEscapedPunctuation(source []byte, i int) bool {
|
||||||
|
return source[i] == '\\' && i < len(source)-1 && IsPunct(source[i+1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadWhile read the given source while pred is true.
|
||||||
|
func ReadWhile(source []byte, index [2]int, pred func(byte) bool) (int, bool) {
|
||||||
|
j := index[0]
|
||||||
|
ok := false
|
||||||
|
for ; j < index[1]; j++ {
|
||||||
|
c1 := source[j]
|
||||||
|
if pred(c1) {
|
||||||
|
ok = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return j, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsBlank returns true if the given string is all space characters.
|
||||||
|
func IsBlank(bs []byte) bool {
|
||||||
|
for _, b := range bs {
|
||||||
|
if !IsSpace(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// VisualizeSpaces visualize invisible space characters.
|
||||||
|
func VisualizeSpaces(bs []byte) []byte {
|
||||||
|
bs = bytes.Replace(bs, []byte(" "), []byte("[SPACE]"), -1)
|
||||||
|
bs = bytes.Replace(bs, []byte("\t"), []byte("[TAB]"), -1)
|
||||||
|
bs = bytes.Replace(bs, []byte("\n"), []byte("[NEWLINE]\n"), -1)
|
||||||
|
bs = bytes.Replace(bs, []byte("\r"), []byte("[CR]"), -1)
|
||||||
|
return bs
|
||||||
|
}
|
||||||
|
|
||||||
|
// TabWidth calculates actual width of a tab at the given position.
|
||||||
|
func TabWidth(currentPos int) int {
|
||||||
|
return 4 - currentPos%4
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndentPosition searches an indent position with the given width for the given line.
|
||||||
|
// If the line contains tab characters, paddings may be not zero.
|
||||||
|
// currentPos==0 and width==2:
|
||||||
|
//
|
||||||
|
// position: 0 1
|
||||||
|
// [TAB]aaaa
|
||||||
|
// width: 1234 5678
|
||||||
|
//
|
||||||
|
// width=2 is in the tab character. In this case, IndentPosition returns
|
||||||
|
// (pos=1, padding=2)
|
||||||
|
func IndentPosition(bs []byte, currentPos, width int) (pos, padding int) {
|
||||||
|
if width == 0 {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
w := 0
|
||||||
|
l := len(bs)
|
||||||
|
i := 0
|
||||||
|
hasTab := false
|
||||||
|
for ; i < l; i++ {
|
||||||
|
if bs[i] == '\t' {
|
||||||
|
w += TabWidth(currentPos + w)
|
||||||
|
hasTab = true
|
||||||
|
} else if bs[i] == ' ' {
|
||||||
|
w++
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if w >= width {
|
||||||
|
if !hasTab {
|
||||||
|
return width, 0
|
||||||
|
}
|
||||||
|
return i, w - width
|
||||||
|
}
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndentPositionPadding searches an indent position with the given width for the given line.
|
||||||
|
// This function is mostly same as IndentPosition except this function
|
||||||
|
// takes account into additional paddings.
|
||||||
|
func IndentPositionPadding(bs []byte, currentPos, paddingv, width int) (pos, padding int) {
|
||||||
|
if width == 0 {
|
||||||
|
return 0, paddingv
|
||||||
|
}
|
||||||
|
w := 0
|
||||||
|
i := 0
|
||||||
|
l := len(bs)
|
||||||
|
for ; i < l; i++ {
|
||||||
|
if bs[i] == '\t' {
|
||||||
|
w += TabWidth(currentPos + w)
|
||||||
|
} else if bs[i] == ' ' {
|
||||||
|
w++
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if w >= width {
|
||||||
|
return i - paddingv, w - width
|
||||||
|
}
|
||||||
|
return -1, -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// DedentPosition dedents lines by the given width.
|
||||||
|
func DedentPosition(bs []byte, currentPos, width int) (pos, padding int) {
|
||||||
|
if width == 0 {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
w := 0
|
||||||
|
l := len(bs)
|
||||||
|
i := 0
|
||||||
|
for ; i < l; i++ {
|
||||||
|
if bs[i] == '\t' {
|
||||||
|
w += TabWidth(currentPos + w)
|
||||||
|
} else if bs[i] == ' ' {
|
||||||
|
w++
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if w >= width {
|
||||||
|
return i, w - width
|
||||||
|
}
|
||||||
|
return i, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// DedentPositionPadding dedents lines by the given width.
|
||||||
|
// This function is mostly same as DedentPosition except this function
|
||||||
|
// takes account into additional paddings.
|
||||||
|
func DedentPositionPadding(bs []byte, currentPos, paddingv, width int) (pos, padding int) {
|
||||||
|
if width == 0 {
|
||||||
|
return 0, paddingv
|
||||||
|
}
|
||||||
|
|
||||||
|
w := 0
|
||||||
|
i := 0
|
||||||
|
l := len(bs)
|
||||||
|
for ; i < l; i++ {
|
||||||
|
if bs[i] == '\t' {
|
||||||
|
w += TabWidth(currentPos + w)
|
||||||
|
} else if bs[i] == ' ' {
|
||||||
|
w++
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if w >= width {
|
||||||
|
return i - paddingv, w - width
|
||||||
|
}
|
||||||
|
return i - paddingv, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndentWidth calculate an indent width for the given line.
|
||||||
|
func IndentWidth(bs []byte, currentPos int) (width, pos int) {
|
||||||
|
l := len(bs)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
b := bs[i]
|
||||||
|
if b == ' ' {
|
||||||
|
width++
|
||||||
|
pos++
|
||||||
|
} else if b == '\t' {
|
||||||
|
width += TabWidth(currentPos + width)
|
||||||
|
pos++
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstNonSpacePosition returns a potisoin line that is a first nonspace
|
||||||
|
// character.
|
||||||
|
func FirstNonSpacePosition(bs []byte) int {
|
||||||
|
i := 0
|
||||||
|
for ; i < len(bs); i++ {
|
||||||
|
c := bs[i]
|
||||||
|
if c == ' ' || c == '\t' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if c == '\n' {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindClosure returns a position that closes the given opener.
|
||||||
|
// If codeSpan is set true, it ignores characters in code spans.
|
||||||
|
// If allowNesting is set true, closures correspond to nested opener will be
|
||||||
|
// ignored.
|
||||||
|
func FindClosure(bs []byte, opener, closure byte, codeSpan, allowNesting bool) int {
|
||||||
|
i := 0
|
||||||
|
opened := 1
|
||||||
|
codeSpanOpener := 0
|
||||||
|
for i < len(bs) {
|
||||||
|
c := bs[i]
|
||||||
|
if codeSpan && codeSpanOpener != 0 && c == '`' {
|
||||||
|
codeSpanCloser := 0
|
||||||
|
for ; i < len(bs); i++ {
|
||||||
|
if bs[i] == '`' {
|
||||||
|
codeSpanCloser++
|
||||||
|
} else {
|
||||||
|
i--
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if codeSpanCloser == codeSpanOpener {
|
||||||
|
codeSpanOpener = 0
|
||||||
|
}
|
||||||
|
} else if c == '\\' && i < len(bs)-1 && IsPunct(bs[i+1]) {
|
||||||
|
i += 2
|
||||||
|
continue
|
||||||
|
} else if codeSpan && codeSpanOpener == 0 && c == '`' {
|
||||||
|
for ; i < len(bs); i++ {
|
||||||
|
if bs[i] == '`' {
|
||||||
|
codeSpanOpener++
|
||||||
|
} else {
|
||||||
|
i--
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (codeSpan && codeSpanOpener == 0) || !codeSpan {
|
||||||
|
if c == closure {
|
||||||
|
opened--
|
||||||
|
if opened == 0 {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
} else if c == opener {
|
||||||
|
if !allowNesting {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
opened++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimLeft trims characters in the given s from head of the source.
|
||||||
|
// bytes.TrimLeft offers same functionalities, but bytes.TrimLeft
|
||||||
|
// allocates new buffer for the result.
|
||||||
|
func TrimLeft(source, b []byte) []byte {
|
||||||
|
i := 0
|
||||||
|
for ; i < len(source); i++ {
|
||||||
|
c := source[i]
|
||||||
|
found := false
|
||||||
|
for j := 0; j < len(b); j++ {
|
||||||
|
if c == b[j] {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return source[i:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimRight trims characters in the given s from tail of the source.
|
||||||
|
func TrimRight(source, b []byte) []byte {
|
||||||
|
i := len(source) - 1
|
||||||
|
for ; i >= 0; i-- {
|
||||||
|
c := source[i]
|
||||||
|
found := false
|
||||||
|
for j := 0; j < len(b); j++ {
|
||||||
|
if c == b[j] {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return source[:i+1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimLeftLength returns a length of leading specified characters.
|
||||||
|
func TrimLeftLength(source, s []byte) int {
|
||||||
|
return len(source) - len(TrimLeft(source, s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimRightLength returns a length of trailing specified characters.
|
||||||
|
func TrimRightLength(source, s []byte) int {
|
||||||
|
return len(source) - len(TrimRight(source, s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimLeftSpaceLength returns a length of leading space characters.
|
||||||
|
func TrimLeftSpaceLength(source []byte) int {
|
||||||
|
i := 0
|
||||||
|
for ; i < len(source); i++ {
|
||||||
|
if !IsSpace(source[i]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimRightSpaceLength returns a length of trailing space characters.
|
||||||
|
func TrimRightSpaceLength(source []byte) int {
|
||||||
|
l := len(source)
|
||||||
|
i := l - 1
|
||||||
|
for ; i >= 0; i-- {
|
||||||
|
if !IsSpace(source[i]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i < 0 {
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
return l - 1 - i
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimLeftSpace returns a subslice of the given string by slicing off all leading
|
||||||
|
// space characters.
|
||||||
|
func TrimLeftSpace(source []byte) []byte {
|
||||||
|
return TrimLeft(source, spaces)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimRightSpace returns a subslice of the given string by slicing off all trailing
|
||||||
|
// space characters.
|
||||||
|
func TrimRightSpace(source []byte) []byte {
|
||||||
|
return TrimRight(source, spaces)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceSpaces replaces sequence of spaces with the given repl.
|
||||||
|
func ReplaceSpaces(source []byte, repl byte) []byte {
|
||||||
|
var ret []byte
|
||||||
|
start := -1
|
||||||
|
for i, c := range source {
|
||||||
|
iss := IsSpace(c)
|
||||||
|
if start < 0 && iss {
|
||||||
|
start = i
|
||||||
|
continue
|
||||||
|
} else if start >= 0 && iss {
|
||||||
|
continue
|
||||||
|
} else if start >= 0 {
|
||||||
|
if ret == nil {
|
||||||
|
ret = make([]byte, 0, len(source))
|
||||||
|
ret = append(ret, source[:start]...)
|
||||||
|
}
|
||||||
|
ret = append(ret, repl)
|
||||||
|
start = -1
|
||||||
|
}
|
||||||
|
if ret != nil {
|
||||||
|
ret = append(ret, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if start >= 0 && ret != nil {
|
||||||
|
ret = append(ret, repl)
|
||||||
|
}
|
||||||
|
if ret == nil {
|
||||||
|
return source
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToRune decode given bytes start at pos and returns a rune.
|
||||||
|
func ToRune(source []byte, pos int) rune {
|
||||||
|
i := pos
|
||||||
|
for ; i >= 0; i-- {
|
||||||
|
if utf8.RuneStart(source[i]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r, _ := utf8.DecodeRune(source[i:])
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToValidRune returns 0xFFFD if the given rune is invalid, otherwise v.
|
||||||
|
func ToValidRune(v rune) rune {
|
||||||
|
if v == 0 || !utf8.ValidRune(v) {
|
||||||
|
return rune(0xFFFD)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToLinkReference convert given bytes into a valid link reference string.
|
||||||
|
// ToLinkReference trims leading and trailing spaces and convert into lower
|
||||||
|
// case and replace spaces with a single space character.
|
||||||
|
func ToLinkReference(v []byte) string {
|
||||||
|
v = TrimLeftSpace(v)
|
||||||
|
v = TrimRightSpace(v)
|
||||||
|
return strings.ToLower(string(ReplaceSpaces(v, ' ')))
|
||||||
|
}
|
||||||
|
|
||||||
|
var htmlEscapeTable = [256][]byte{nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []byte("""), nil, nil, nil, []byte("&"), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []byte("<"), nil, []byte(">"), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil}
|
||||||
|
|
||||||
|
// EscapeHTMLByte returns HTML escaped bytes if the given byte should be escaped,
|
||||||
|
// otherwise nil.
|
||||||
|
func EscapeHTMLByte(b byte) []byte {
|
||||||
|
return htmlEscapeTable[b]
|
||||||
|
}
|
||||||
|
|
||||||
|
// EscapeHTML escapes characters that should be escaped in HTML text.
|
||||||
|
func EscapeHTML(v []byte) []byte {
|
||||||
|
cob := NewCopyOnWriteBuffer(v)
|
||||||
|
n := 0
|
||||||
|
for i := 0; i < len(v); i++ {
|
||||||
|
c := v[i]
|
||||||
|
escaped := htmlEscapeTable[c]
|
||||||
|
if escaped != nil {
|
||||||
|
cob.Write(v[n:i])
|
||||||
|
cob.Write(escaped)
|
||||||
|
n = i + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if cob.IsCopied() {
|
||||||
|
cob.Write(v[n:])
|
||||||
|
}
|
||||||
|
return cob.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnescapePunctuations unescapes blackslash escaped punctuations.
|
||||||
|
func UnescapePunctuations(source []byte) []byte {
|
||||||
|
cob := NewCopyOnWriteBuffer(source)
|
||||||
|
limit := len(source)
|
||||||
|
n := 0
|
||||||
|
for i := 0; i < limit; {
|
||||||
|
c := source[i]
|
||||||
|
if i < limit-1 && c == '\\' && IsPunct(source[i+1]) {
|
||||||
|
cob.Write(source[n:i])
|
||||||
|
cob.WriteByte(source[i+1])
|
||||||
|
i += 2
|
||||||
|
n = i
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if cob.IsCopied() {
|
||||||
|
cob.Write(source[n:])
|
||||||
|
}
|
||||||
|
return cob.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolveNumericReferences resolve numeric references like 'Ӓ" .
|
||||||
|
func ResolveNumericReferences(source []byte) []byte {
|
||||||
|
cob := NewCopyOnWriteBuffer(source)
|
||||||
|
buf := make([]byte, 6, 6)
|
||||||
|
limit := len(source)
|
||||||
|
ok := false
|
||||||
|
n := 0
|
||||||
|
for i := 0; i < limit; i++ {
|
||||||
|
if source[i] == '&' {
|
||||||
|
pos := i
|
||||||
|
next := i + 1
|
||||||
|
if next < limit && source[next] == '#' {
|
||||||
|
nnext := next + 1
|
||||||
|
if nnext < limit {
|
||||||
|
nc := source[nnext]
|
||||||
|
// code point like #x22;
|
||||||
|
if nnext < limit && nc == 'x' || nc == 'X' {
|
||||||
|
start := nnext + 1
|
||||||
|
i, ok = ReadWhile(source, [2]int{start, limit}, IsHexDecimal)
|
||||||
|
if ok && i < limit && source[i] == ';' {
|
||||||
|
v, _ := strconv.ParseUint(BytesToReadOnlyString(source[start:i]), 16, 32)
|
||||||
|
cob.Write(source[n:pos])
|
||||||
|
n = i + 1
|
||||||
|
runeSize := utf8.EncodeRune(buf, ToValidRune(rune(v)))
|
||||||
|
cob.Write(buf[:runeSize])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// code point like #1234;
|
||||||
|
} else if nc >= '0' && nc <= '9' {
|
||||||
|
start := nnext
|
||||||
|
i, ok = ReadWhile(source, [2]int{start, limit}, IsNumeric)
|
||||||
|
if ok && i < limit && i-start < 8 && source[i] == ';' {
|
||||||
|
v, _ := strconv.ParseUint(BytesToReadOnlyString(source[start:i]), 0, 32)
|
||||||
|
cob.Write(source[n:pos])
|
||||||
|
n = i + 1
|
||||||
|
runeSize := utf8.EncodeRune(buf, ToValidRune(rune(v)))
|
||||||
|
cob.Write(buf[:runeSize])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i = next - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if cob.IsCopied() {
|
||||||
|
cob.Write(source[n:])
|
||||||
|
}
|
||||||
|
return cob.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolveEntityNames resolve entity references like 'ö" .
|
||||||
|
func ResolveEntityNames(source []byte) []byte {
|
||||||
|
cob := NewCopyOnWriteBuffer(source)
|
||||||
|
limit := len(source)
|
||||||
|
ok := false
|
||||||
|
n := 0
|
||||||
|
for i := 0; i < limit; i++ {
|
||||||
|
if source[i] == '&' {
|
||||||
|
pos := i
|
||||||
|
next := i + 1
|
||||||
|
if !(next < limit && source[next] == '#') {
|
||||||
|
start := next
|
||||||
|
i, ok = ReadWhile(source, [2]int{start, limit}, IsAlphaNumeric)
|
||||||
|
if ok && i < limit && source[i] == ';' {
|
||||||
|
name := BytesToReadOnlyString(source[start:i])
|
||||||
|
entity, ok := LookUpHTML5EntityByName(name)
|
||||||
|
if ok {
|
||||||
|
cob.Write(source[n:pos])
|
||||||
|
n = i + 1
|
||||||
|
cob.Write(entity.Characters)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i = next - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if cob.IsCopied() {
|
||||||
|
cob.Write(source[n:])
|
||||||
|
}
|
||||||
|
return cob.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
var htmlSpace = []byte("%20")
|
||||||
|
|
||||||
|
// URLEscape escape the given URL.
|
||||||
|
// If resolveReference is set true:
|
||||||
|
// 1. unescape punctuations
|
||||||
|
// 2. resolve numeric references
|
||||||
|
// 3. resolve entity references
|
||||||
|
//
|
||||||
|
// URL encoded values (%xx) are keeped as is.
|
||||||
|
func URLEscape(v []byte, resolveReference bool) []byte {
|
||||||
|
if resolveReference {
|
||||||
|
v = UnescapePunctuations(v)
|
||||||
|
v = ResolveNumericReferences(v)
|
||||||
|
v = ResolveEntityNames(v)
|
||||||
|
}
|
||||||
|
cob := NewCopyOnWriteBuffer(v)
|
||||||
|
limit := len(v)
|
||||||
|
n := 0
|
||||||
|
|
||||||
|
for i := 0; i < limit; {
|
||||||
|
c := v[i]
|
||||||
|
if urlEscapeTable[c] == 1 {
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if c == '%' && i+2 < limit && IsHexDecimal(v[i+1]) && IsHexDecimal(v[i+1]) {
|
||||||
|
i += 3
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
u8len := utf8lenTable[c]
|
||||||
|
if u8len == 99 { // invalid utf8 leading byte, skip it
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if c == ' ' {
|
||||||
|
cob.Write(v[n:i])
|
||||||
|
cob.Write(htmlSpace)
|
||||||
|
i++
|
||||||
|
n = i
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if int(u8len) >= len(v) {
|
||||||
|
u8len = int8(len(v) - 1)
|
||||||
|
}
|
||||||
|
if u8len == 0 {
|
||||||
|
i++
|
||||||
|
n = i
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cob.Write(v[n:i])
|
||||||
|
stop := i + int(u8len)
|
||||||
|
if stop > len(v) {
|
||||||
|
i++
|
||||||
|
n = i
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cob.Write(StringToReadOnlyBytes(url.QueryEscape(string(v[i:stop]))))
|
||||||
|
i += int(u8len)
|
||||||
|
n = i
|
||||||
|
}
|
||||||
|
if cob.IsCopied() && n < limit {
|
||||||
|
cob.Write(v[n:])
|
||||||
|
}
|
||||||
|
return cob.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindURLIndex returns a stop index value if the given bytes seem an URL.
|
||||||
|
// This function is equivalent to [A-Za-z][A-Za-z0-9.+-]{1,31}:[^<>\x00-\x20]* .
|
||||||
|
func FindURLIndex(b []byte) int {
|
||||||
|
i := 0
|
||||||
|
if !(len(b) > 0 && urlTable[b[i]]&7 == 7) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
for ; i < len(b); i++ {
|
||||||
|
c := b[i]
|
||||||
|
if urlTable[c]&4 != 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i == 1 || i > 33 || i >= len(b) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if b[i] != ':' {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
for ; i < len(b); i++ {
|
||||||
|
c := b[i]
|
||||||
|
if urlTable[c]&1 != 1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
var emailDomainRegexp = regexp.MustCompile(`^[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*`)
|
||||||
|
|
||||||
|
// FindEmailIndex returns a stop index value if the given bytes seem an email address.
|
||||||
|
func FindEmailIndex(b []byte) int {
|
||||||
|
// TODO: eliminate regexps
|
||||||
|
i := 0
|
||||||
|
for ; i < len(b); i++ {
|
||||||
|
c := b[i]
|
||||||
|
if emailTable[c]&1 != 1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if i >= len(b) || b[i] != '@' {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
if i >= len(b) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
match := emailDomainRegexp.FindSubmatchIndex(b[i:])
|
||||||
|
if match == nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return i + match[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
var spaces = []byte(" \t\n\x0b\x0c\x0d")
|
||||||
|
|
||||||
|
var spaceTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||||
|
|
||||||
|
var punctTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||||
|
|
||||||
|
// a-zA-Z0-9, ;/?:@&=+$,-_.!~*'()#
|
||||||
|
var urlEscapeTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||||
|
|
||||||
|
var utf8lenTable = [256]int8{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 99, 99, 99, 99, 99, 99, 99, 99}
|
||||||
|
|
||||||
|
var urlTable = [256]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 5, 5, 1, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 0, 1, 0, 1, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 1, 1, 1, 1, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
|
||||||
|
|
||||||
|
var emailTable = [256]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||||
|
|
||||||
|
// UTF8Len returns a byte length of the utf-8 character.
|
||||||
|
func UTF8Len(b byte) int8 {
|
||||||
|
return utf8lenTable[b]
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsPunct returns true if the given character is a punctuation, otherwise false.
|
||||||
|
func IsPunct(c byte) bool {
|
||||||
|
return punctTable[c] == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsSpace returns true if the given character is a space, otherwise false.
|
||||||
|
func IsSpace(c byte) bool {
|
||||||
|
return spaceTable[c] == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNumeric returns true if the given character is a numeric, otherwise false.
|
||||||
|
func IsNumeric(c byte) bool {
|
||||||
|
return c >= '0' && c <= '9'
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsHexDecimal returns true if the given character is a hexdecimal, otherwise false.
|
||||||
|
func IsHexDecimal(c byte) bool {
|
||||||
|
return c >= '0' && c <= '9' || c >= 'a' && c <= 'f' || c >= 'A' && c <= 'F'
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAlphaNumeric returns true if the given character is a alphabet or a numeric, otherwise false.
|
||||||
|
func IsAlphaNumeric(c byte) bool {
|
||||||
|
return c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9'
|
||||||
|
}
|
||||||
|
|
||||||
|
// A BufWriter is a subset of the bufio.Writer .
|
||||||
|
type BufWriter interface {
|
||||||
|
io.Writer
|
||||||
|
Available() int
|
||||||
|
Buffered() int
|
||||||
|
Flush() error
|
||||||
|
WriteByte(c byte) error
|
||||||
|
WriteRune(r rune) (size int, err error)
|
||||||
|
WriteString(s string) (int, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A PrioritizedValue struct holds pair of an arbitrary value and a priority.
|
||||||
|
type PrioritizedValue struct {
|
||||||
|
// Value is an arbitrary value that you want to prioritize.
|
||||||
|
Value interface{}
|
||||||
|
// Priority is a priority of the value.
|
||||||
|
Priority int
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrioritizedSlice is a slice of the PrioritizedValues
|
||||||
|
type PrioritizedSlice []PrioritizedValue
|
||||||
|
|
||||||
|
// Sort sorts the PrioritizedSlice in ascending order.
|
||||||
|
func (s PrioritizedSlice) Sort() {
|
||||||
|
sort.Slice(s, func(i, j int) bool {
|
||||||
|
return s[i].Priority < s[j].Priority
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes the given value from this slice.
|
||||||
|
func (s PrioritizedSlice) Remove(v interface{}) PrioritizedSlice {
|
||||||
|
i := 0
|
||||||
|
found := false
|
||||||
|
for ; i < len(s); i++ {
|
||||||
|
if s[i].Value == v {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return append(s[:i], s[i+1:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prioritized returns a new PrioritizedValue.
|
||||||
|
func Prioritized(v interface{}, priority int) PrioritizedValue {
|
||||||
|
return PrioritizedValue{v, priority}
|
||||||
|
}
|
||||||
|
|
||||||
|
func bytesHash(b []byte) uint64 {
|
||||||
|
var hash uint64 = 5381
|
||||||
|
for _, c := range b {
|
||||||
|
hash = ((hash << 5) + hash) + uint64(c)
|
||||||
|
}
|
||||||
|
return hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesFilter is a efficient data structure for checking whether bytes exist or not.
|
||||||
|
// BytesFilter is thread-safe.
|
||||||
|
type BytesFilter interface {
|
||||||
|
// Add adds given bytes to this set.
|
||||||
|
Add([]byte)
|
||||||
|
|
||||||
|
// Contains return true if this set contains given bytes, otherwise false.
|
||||||
|
Contains([]byte) bool
|
||||||
|
|
||||||
|
// Extend copies this filter and adds given bytes to new filter.
|
||||||
|
Extend(...[]byte) BytesFilter
|
||||||
|
}
|
||||||
|
|
||||||
|
type bytesFilter struct {
|
||||||
|
chars [256]uint8
|
||||||
|
threshold int
|
||||||
|
slots [][][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBytesFilter returns a new BytesFilter.
|
||||||
|
func NewBytesFilter(elements ...[]byte) BytesFilter {
|
||||||
|
s := &bytesFilter{
|
||||||
|
threshold: 3,
|
||||||
|
slots: make([][][]byte, 64),
|
||||||
|
}
|
||||||
|
for _, element := range elements {
|
||||||
|
s.Add(element)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *bytesFilter) Add(b []byte) {
|
||||||
|
l := len(b)
|
||||||
|
m := s.threshold
|
||||||
|
if l < s.threshold {
|
||||||
|
m = l
|
||||||
|
}
|
||||||
|
for i := 0; i < m; i++ {
|
||||||
|
s.chars[b[i]] |= 1 << uint8(i)
|
||||||
|
}
|
||||||
|
h := bytesHash(b) % uint64(len(s.slots))
|
||||||
|
slot := s.slots[h]
|
||||||
|
if slot == nil {
|
||||||
|
slot = [][]byte{}
|
||||||
|
}
|
||||||
|
s.slots[h] = append(slot, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *bytesFilter) Extend(bs ...[]byte) BytesFilter {
|
||||||
|
newFilter := NewBytesFilter().(*bytesFilter)
|
||||||
|
newFilter.chars = s.chars
|
||||||
|
newFilter.threshold = s.threshold
|
||||||
|
for k, v := range s.slots {
|
||||||
|
newSlot := make([][]byte, len(v))
|
||||||
|
copy(newSlot, v)
|
||||||
|
newFilter.slots[k] = v
|
||||||
|
}
|
||||||
|
for _, b := range bs {
|
||||||
|
newFilter.Add(b)
|
||||||
|
}
|
||||||
|
return newFilter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *bytesFilter) Contains(b []byte) bool {
|
||||||
|
l := len(b)
|
||||||
|
m := s.threshold
|
||||||
|
if l < s.threshold {
|
||||||
|
m = l
|
||||||
|
}
|
||||||
|
for i := 0; i < m; i++ {
|
||||||
|
if (s.chars[b[i]] & (1 << uint8(i))) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h := bytesHash(b) % uint64(len(s.slots))
|
||||||
|
slot := s.slots[h]
|
||||||
|
if slot == nil || len(slot) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, element := range slot {
|
||||||
|
if bytes.Equal(element, b) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
13
vendor/github.com/yuin/goldmark/util/util_safe.go
generated
vendored
Normal file
13
vendor/github.com/yuin/goldmark/util/util_safe.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
// +build appengine,js
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
// BytesToReadOnlyString returns a string converted from given bytes.
|
||||||
|
func BytesToReadOnlyString(b []byte) string {
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToReadOnlyBytes returns bytes converted from given string.
|
||||||
|
func StringToReadOnlyBytes(s string) []byte {
|
||||||
|
return []byte(s)
|
||||||
|
}
|
20
vendor/github.com/yuin/goldmark/util/util_unsafe.go
generated
vendored
Normal file
20
vendor/github.com/yuin/goldmark/util/util_unsafe.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
// +build !appengine,!js
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BytesToReadOnlyString returns a string converted from given bytes.
|
||||||
|
func BytesToReadOnlyString(b []byte) string {
|
||||||
|
return *(*string)(unsafe.Pointer(&b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToReadOnlyBytes returns bytes converted from given string.
|
||||||
|
func StringToReadOnlyBytes(s string) []byte {
|
||||||
|
sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||||
|
bh := reflect.SliceHeader{Data: sh.Data, Len: sh.Len, Cap: sh.Len}
|
||||||
|
return *(*[]byte)(unsafe.Pointer(&bh))
|
||||||
|
}
|
14
vendor/modules.txt
vendored
14
vendor/modules.txt
vendored
|
@ -361,16 +361,12 @@ github.com/prometheus/procfs/internal/util
|
||||||
# github.com/quasoft/websspi v1.0.0
|
# github.com/quasoft/websspi v1.0.0
|
||||||
github.com/quasoft/websspi
|
github.com/quasoft/websspi
|
||||||
github.com/quasoft/websspi/secctx
|
github.com/quasoft/websspi/secctx
|
||||||
# github.com/russross/blackfriday/v2 v2.0.1
|
|
||||||
github.com/russross/blackfriday/v2
|
|
||||||
# github.com/satori/go.uuid v1.2.0
|
# github.com/satori/go.uuid v1.2.0
|
||||||
github.com/satori/go.uuid
|
github.com/satori/go.uuid
|
||||||
# github.com/sergi/go-diff v1.0.0
|
# github.com/sergi/go-diff v1.0.0
|
||||||
github.com/sergi/go-diff/diffmatchpatch
|
github.com/sergi/go-diff/diffmatchpatch
|
||||||
# github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b
|
# github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b
|
||||||
github.com/shurcooL/httpfs/vfsutil
|
github.com/shurcooL/httpfs/vfsutil
|
||||||
# github.com/shurcooL/sanitized_anchor_name v1.0.0
|
|
||||||
github.com/shurcooL/sanitized_anchor_name
|
|
||||||
# github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd
|
# github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd
|
||||||
github.com/shurcooL/vfsgen
|
github.com/shurcooL/vfsgen
|
||||||
# github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d
|
# github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d
|
||||||
|
@ -432,6 +428,16 @@ github.com/willf/bitset
|
||||||
github.com/xanzy/ssh-agent
|
github.com/xanzy/ssh-agent
|
||||||
# github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53
|
# github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53
|
||||||
github.com/yohcop/openid-go
|
github.com/yohcop/openid-go
|
||||||
|
# github.com/yuin/goldmark v1.1.19
|
||||||
|
github.com/yuin/goldmark
|
||||||
|
github.com/yuin/goldmark/ast
|
||||||
|
github.com/yuin/goldmark/extension
|
||||||
|
github.com/yuin/goldmark/extension/ast
|
||||||
|
github.com/yuin/goldmark/parser
|
||||||
|
github.com/yuin/goldmark/renderer
|
||||||
|
github.com/yuin/goldmark/renderer/html
|
||||||
|
github.com/yuin/goldmark/text
|
||||||
|
github.com/yuin/goldmark/util
|
||||||
# go.mongodb.org/mongo-driver v1.1.1
|
# go.mongodb.org/mongo-driver v1.1.1
|
||||||
go.mongodb.org/mongo-driver/bson
|
go.mongodb.org/mongo-driver/bson
|
||||||
go.mongodb.org/mongo-driver/bson/bsoncodec
|
go.mongodb.org/mongo-driver/bson/bsoncodec
|
||||||
|
|
Loading…
Reference in a new issue