diff --git a/Gopkg.lock b/Gopkg.lock
index e4305b123..f5da50a3d 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -7,6 +7,12 @@
packages = [".","fs","fuseutil"]
revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
+[[projects]]
+ name = "github.com/cpuguy83/go-md2man"
+ packages = ["md2man"]
+ revision = "a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa"
+ version = "v1.0.6"
+
[[projects]]
name = "github.com/elithrar/simple-scrypt"
packages = ["."]
@@ -85,10 +91,22 @@
revision = "bb2ecf9a98e35a0b336ffc23fc515fb6e7961577"
version = "v0.1.0"
+[[projects]]
+ name = "github.com/russross/blackfriday"
+ packages = ["."]
+ revision = "0b647d0506a698cca42caca173e55559b12a69f2"
+ version = "v1.4"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/shurcooL/sanitized_anchor_name"
+ packages = ["."]
+ revision = "541ff5ee47f1dddf6a5281af78307d921524bcb5"
+
[[projects]]
branch = "master"
name = "github.com/spf13/cobra"
- packages = ["."]
+ packages = [".","doc"]
revision = "f20b4e9c32bb3e9d44773ca208db814f24dcd21b"
[[projects]]
@@ -115,9 +133,15 @@
packages = ["unix"]
revision = "c4489faa6e5ab84c0ef40d6ee878f7a030281f0f"
+[[projects]]
+ branch = "v2"
+ name = "gopkg.in/yaml.v2"
+ packages = ["."]
+ revision = "25c4ec802a7d637f88d584ab26798e94ad14c13b"
+
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
- inputs-digest = "b4de858790fd06b6d1df19baa8a3246f23b2fded9ab6eb85325ddb3ee91003b8"
+ inputs-digest = "10287830033309dd91c5e7e381e1a56cd3bf135fd4e776954232c404c39be210"
solver-name = "gps-cdcl"
solver-version = 1
diff --git a/vendor/github.com/cpuguy83/go-md2man/.gitignore b/vendor/github.com/cpuguy83/go-md2man/.gitignore
new file mode 100644
index 000000000..b651fbfb1
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/.gitignore
@@ -0,0 +1 @@
+go-md2man
diff --git a/vendor/github.com/cpuguy83/go-md2man/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/LICENSE.md
new file mode 100644
index 000000000..1cade6cef
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/LICENSE.md
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Brian Goff
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/cpuguy83/go-md2man/README.md b/vendor/github.com/cpuguy83/go-md2man/README.md
new file mode 100644
index 000000000..b7aae65d2
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/README.md
@@ -0,0 +1,18 @@
+go-md2man
+=========
+
+** Work in Progress **
+This still needs a lot of help to be complete, or even usable!
+
+Uses blackfriday to process markdown into man pages.
+
+### Usage
+
+./md2man -in /path/to/markdownfile.md -out /manfile/output/path
+
+### How to contribute
+
+We use [govend](https://github.com/govend/govend) for vendoring Go packages.
+
+How to update dependencies: `govend -v -u --prune`
+
diff --git a/vendor/github.com/cpuguy83/go-md2man/go-md2man.1.md b/vendor/github.com/cpuguy83/go-md2man/go-md2man.1.md
new file mode 100644
index 000000000..e1ae104e3
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/go-md2man.1.md
@@ -0,0 +1,23 @@
+go-md2man 1 "January 2015" go-md2man "User Manual"
+==================================================
+
+# NAME
+ go-md2man - Convert mardown files into manpages
+
+# SYNOPSIS
+ go-md2man -in=[/path/to/md/file] -out=[/path/to/output]
+
+# Description
+ go-md2man converts standard markdown formatted documents into manpages. It is
+ written purely in Go so as to reduce dependencies on 3rd party libs.
+
+ By default, the input is stdin and the output is stdout.
+
+# Example
+ Convert the markdown file "go-md2man.1.md" into a manpage.
+
+ go-md2man -in=README.md -out=go-md2man.1.out
+
+# HISTORY
+ January 2015, Originally compiled by Brian Goff( cpuguy83@gmail.com )
+
diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/md2man.go
new file mode 100644
index 000000000..8f6dcdaed
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/md2man.go
@@ -0,0 +1,51 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/cpuguy83/go-md2man/md2man"
+)
+
+var inFilePath = flag.String("in", "", "Path to file to be processed (default: stdin)")
+var outFilePath = flag.String("out", "", "Path to output processed file (default: stdout)")
+
+func main() {
+ var err error
+ flag.Parse()
+
+ inFile := os.Stdin
+ if *inFilePath != "" {
+ inFile, err = os.Open(*inFilePath)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ }
+ defer inFile.Close()
+
+ doc, err := ioutil.ReadAll(inFile)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+
+ out := md2man.Render(doc)
+
+ outFile := os.Stdout
+ if *outFilePath != "" {
+ outFile, err = os.Create(*outFilePath)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ defer outFile.Close()
+ }
+ _, err = outFile.Write(out)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go
new file mode 100644
index 000000000..8f44fa155
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go
@@ -0,0 +1,19 @@
+package md2man
+
+import (
+ "github.com/russross/blackfriday"
+)
+
+func Render(doc []byte) []byte {
+ renderer := RoffRenderer(0)
+ extensions := 0
+ extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS
+ extensions |= blackfriday.EXTENSION_TABLES
+ extensions |= blackfriday.EXTENSION_FENCED_CODE
+ extensions |= blackfriday.EXTENSION_AUTOLINK
+ extensions |= blackfriday.EXTENSION_SPACE_HEADERS
+ extensions |= blackfriday.EXTENSION_FOOTNOTES
+ extensions |= blackfriday.EXTENSION_TITLEBLOCK
+
+ return blackfriday.Markdown(doc, renderer, extensions)
+}
diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go
new file mode 100644
index 000000000..b8cea1c73
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go
@@ -0,0 +1,282 @@
+package md2man
+
+import (
+ "bytes"
+ "fmt"
+ "html"
+ "strings"
+
+ "github.com/russross/blackfriday"
+)
+
+type roffRenderer struct{}
+
+var listCounter int
+
+func RoffRenderer(flags int) blackfriday.Renderer {
+ return &roffRenderer{}
+}
+
+func (r *roffRenderer) GetFlags() int {
+ return 0
+}
+
+func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) {
+ out.WriteString(".TH ")
+
+ splitText := bytes.Split(text, []byte("\n"))
+ for i, line := range splitText {
+ line = bytes.TrimPrefix(line, []byte("% "))
+ if i == 0 {
+ line = bytes.Replace(line, []byte("("), []byte("\" \""), 1)
+ line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1)
+ }
+ line = append([]byte("\""), line...)
+ line = append(line, []byte("\" ")...)
+ out.Write(line)
+ }
+ out.WriteString("\n")
+
+ // disable hyphenation
+ out.WriteString(".nh\n")
+ // disable justification (adjust text to left margin only)
+ out.WriteString(".ad l\n")
+}
+
+func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) {
+ out.WriteString("\n.PP\n.RS\n\n.nf\n")
+ escapeSpecialChars(out, text)
+ out.WriteString("\n.fi\n.RE\n")
+}
+
+func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) {
+ out.WriteString("\n.PP\n.RS\n")
+ out.Write(text)
+ out.WriteString("\n.RE\n")
+}
+
+func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) {
+ out.Write(text)
+}
+
+func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) {
+ marker := out.Len()
+
+ switch {
+ case marker == 0:
+ // This is the doc header
+ out.WriteString(".TH ")
+ case level == 1:
+ out.WriteString("\n\n.SH ")
+ case level == 2:
+ out.WriteString("\n.SH ")
+ default:
+ out.WriteString("\n.SS ")
+ }
+
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+}
+
+func (r *roffRenderer) HRule(out *bytes.Buffer) {
+ out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n")
+}
+
+func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) {
+ marker := out.Len()
+ if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
+ listCounter = 1
+ }
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+}
+
+func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) {
+ if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
+ out.WriteString(fmt.Sprintf(".IP \"%3d.\" 5\n", listCounter))
+ listCounter += 1
+ } else {
+ out.WriteString(".IP \\(bu 2\n")
+ }
+ out.Write(text)
+ out.WriteString("\n")
+}
+
+func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) {
+ marker := out.Len()
+ out.WriteString("\n.PP\n")
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+ if marker != 0 {
+ out.WriteString("\n")
+ }
+}
+
+// TODO: This might now work
+func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
+ out.WriteString(".TS\nallbox;\n")
+
+ out.Write(header)
+ out.Write(body)
+ out.WriteString("\n.TE\n")
+}
+
+func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) {
+ if out.Len() > 0 {
+ out.WriteString("\n")
+ }
+ out.Write(text)
+ out.WriteString("\n")
+}
+
+func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
+ if out.Len() > 0 {
+ out.WriteString(" ")
+ }
+ out.Write(text)
+ out.WriteString(" ")
+}
+
+// TODO: This is probably broken
+func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) {
+ if out.Len() > 0 {
+ out.WriteString("\t")
+ }
+ out.Write(text)
+ out.WriteString("\t")
+}
+
+func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) {
+
+}
+
+func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
+
+}
+
+func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) {
+ out.WriteString("\n\\[la]")
+ out.Write(link)
+ out.WriteString("\\[ra]")
+}
+
+func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\fB\\fC")
+ escapeSpecialChars(out, text)
+ out.WriteString("\\fR")
+}
+
+func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\fB")
+ out.Write(text)
+ out.WriteString("\\fP")
+}
+
+func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\fI")
+ out.Write(text)
+ out.WriteString("\\fP")
+}
+
+func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
+}
+
+func (r *roffRenderer) LineBreak(out *bytes.Buffer) {
+ out.WriteString("\n.br\n")
+}
+
+func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
+ out.Write(content)
+ r.AutoLink(out, link, 0)
+}
+
+func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) {
+ out.Write(tag)
+}
+
+func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\s+2")
+ out.Write(text)
+ out.WriteString("\\s-2")
+}
+
+func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) {
+}
+
+func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
+
+}
+
+func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) {
+ out.WriteString(html.UnescapeString(string(entity)))
+}
+
+func processFooterText(text []byte) []byte {
+ text = bytes.TrimPrefix(text, []byte("% "))
+ newText := []byte{}
+ textArr := strings.Split(string(text), ") ")
+
+ for i, w := range textArr {
+ if i == 0 {
+ w = strings.Replace(w, "(", "\" \"", 1)
+ w = fmt.Sprintf("\"%s\"", w)
+ } else {
+ w = fmt.Sprintf(" \"%s\"", w)
+ }
+ newText = append(newText, []byte(w)...)
+ }
+ newText = append(newText, []byte(" \"\"")...)
+
+ return newText
+}
+
+func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) {
+ escapeSpecialChars(out, text)
+}
+
+func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) {
+}
+
+func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) {
+}
+
+func needsBackslash(c byte) bool {
+ for _, r := range []byte("-_&\\~") {
+ if c == r {
+ return true
+ }
+ }
+ return false
+}
+
+func escapeSpecialChars(out *bytes.Buffer, text []byte) {
+ for i := 0; i < len(text); i++ {
+ // escape initial apostrophe or period
+ if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
+ out.WriteString("\\&")
+ }
+
+ // directly copy normal characters
+ org := i
+
+ for i < len(text) && !needsBackslash(text[i]) {
+ i++
+ }
+ if i > org {
+ out.Write(text[org:i])
+ }
+
+ // escape a character
+ if i >= len(text) {
+ break
+ }
+ out.WriteByte('\\')
+ out.WriteByte(text[i])
+ }
+}
diff --git a/vendor/github.com/cpuguy83/go-md2man/vendor.yml b/vendor/github.com/cpuguy83/go-md2man/vendor.yml
new file mode 100644
index 000000000..1b85d9ae5
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/vendor.yml
@@ -0,0 +1,5 @@
+vendors:
+- path: github.com/russross/blackfriday
+ rev: 93622da34e54fb6529bfb7c57e710f37a8d9cbd8
+- path: github.com/shurcooL/sanitized_anchor_name
+ rev: 10ef21a441db47d8b13ebcc5fd2310f636973c77
diff --git a/vendor/github.com/russross/blackfriday/.gitignore b/vendor/github.com/russross/blackfriday/.gitignore
new file mode 100644
index 000000000..75623dccc
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/.gitignore
@@ -0,0 +1,8 @@
+*.out
+*.swp
+*.8
+*.6
+_obj
+_test*
+markdown
+tags
diff --git a/vendor/github.com/russross/blackfriday/.travis.yml b/vendor/github.com/russross/blackfriday/.travis.yml
new file mode 100644
index 000000000..208fd25bc
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/.travis.yml
@@ -0,0 +1,18 @@
+# Travis CI (http://travis-ci.org/) is a continuous integration service for
+# open source projects. This file configures it to run unit tests for
+# blackfriday.
+
+language: go
+
+go:
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+
+install:
+ - go get -d -t -v ./...
+ - go build -v ./...
+
+script:
+ - go test -v ./...
diff --git a/vendor/github.com/russross/blackfriday/LICENSE.txt b/vendor/github.com/russross/blackfriday/LICENSE.txt
new file mode 100644
index 000000000..2885af360
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/LICENSE.txt
@@ -0,0 +1,29 @@
+Blackfriday is distributed under the Simplified BSD License:
+
+> Copyright © 2011 Russ Ross
+> All rights reserved.
+>
+> Redistribution and use in source and binary forms, with or without
+> modification, are permitted provided that the following conditions
+> are met:
+>
+> 1. Redistributions of source code must retain the above copyright
+> notice, this list of conditions and the following disclaimer.
+>
+> 2. Redistributions in binary form must reproduce the above
+> copyright notice, this list of conditions and the following
+> disclaimer in the documentation and/or other materials provided with
+> the distribution.
+>
+> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+> POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/russross/blackfriday/README.md b/vendor/github.com/russross/blackfriday/README.md
new file mode 100644
index 000000000..7b979700b
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/README.md
@@ -0,0 +1,265 @@
+Blackfriday [![Build Status](https://travis-ci.org/russross/blackfriday.svg?branch=master)](https://travis-ci.org/russross/blackfriday)
+===========
+
+Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
+is paranoid about its input (so you can safely feed it user-supplied
+data), it is fast, it supports common extensions (tables, smart
+punctuation substitutions, etc.), and it is safe for all utf-8
+(unicode) input.
+
+HTML output is currently supported, along with Smartypants
+extensions. An experimental LaTeX output engine is also included.
+
+It started as a translation from C of [Sundown][3].
+
+
+Installation
+------------
+
+Blackfriday is compatible with Go 1. If you are using an older
+release of Go, consider using v1.1 of blackfriday, which was based
+on the last stable release of Go prior to Go 1. You can find it as a
+tagged commit on github.
+
+With Go 1 and git installed:
+
+ go get github.com/russross/blackfriday
+
+will download, compile, and install the package into your `$GOPATH`
+directory hierarchy. Alternatively, you can achieve the same if you
+import it into a project:
+
+ import "github.com/russross/blackfriday"
+
+and `go get` without parameters.
+
+Usage
+-----
+
+For basic usage, it is as simple as getting your input into a byte
+slice and calling:
+
+ output := blackfriday.MarkdownBasic(input)
+
+This renders it with no extensions enabled. To get a more useful
+feature set, use this instead:
+
+ output := blackfriday.MarkdownCommon(input)
+
+### Sanitize untrusted content
+
+Blackfriday itself does nothing to protect against malicious content. If you are
+dealing with user-supplied markdown, we recommend running blackfriday's output
+through HTML sanitizer such as
+[Bluemonday](https://github.com/microcosm-cc/bluemonday).
+
+Here's an example of simple usage of blackfriday together with bluemonday:
+
+``` go
+import (
+ "github.com/microcosm-cc/bluemonday"
+ "github.com/russross/blackfriday"
+)
+
+// ...
+unsafe := blackfriday.MarkdownCommon(input)
+html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
+```
+
+### Custom options
+
+If you want to customize the set of options, first get a renderer
+(currently either the HTML or LaTeX output engines), then use it to
+call the more general `Markdown` function. For examples, see the
+implementations of `MarkdownBasic` and `MarkdownCommon` in
+`markdown.go`.
+
+You can also check out `blackfriday-tool` for a more complete example
+of how to use it. Download and install it using:
+
+ go get github.com/russross/blackfriday-tool
+
+This is a simple command-line tool that allows you to process a
+markdown file using a standalone program. You can also browse the
+source directly on github if you are just looking for some example
+code:
+
+* Hello Goodbye List List List List List Nested list ####### Header 7 #Header 1 ##Header 2 ###Header 3 ####Header 4 #####Header 5 ######Header 6 #######Header 7 Hello Goodbye List List List Nested list } Hello Goodbye List List List List List Nested list List List List List List Nested list Hello Goodbye List List List List List Nested list Hello Goodbye List List List List List Nested list Paragraph Paragraph ======== Paragraph ===== Paragraph Paragraph Paragraph ===== - -- * ** _ __ -*- -----* Hello Yin Yang Ting Bong Goo Yin Yang Ting Bong Goo Yin Yang Ting Bong Goo *Hello Paragraph\n* No linebreak Paragraph List List\nSecond line List Continued List List List normal text Yin Yang Ting Bong Goo 1 Hello 1.Hello Paragraph\n1. No linebreak Paragraph List List\nSecond line List Continued List List Definition a Definition b Definition a Definition b Definition c Term 1\n:Definition a Definition a Definition b Text 1 Definition a Text 1 Definition b Text 2
tag
+ if size := p.htmlHr(out, data, doRender); size > 0 {
+ return size
+ }
+
+ // no special case recognized
+ return 0
+ }
+
+ // look for an unindented matching closing tag
+ // followed by a blank line
+ found := false
+ /*
+ closetag := []byte("\n" + curtag + ">")
+ j = len(curtag) + 1
+ for !found {
+ // scan for a closing tag at the beginning of a line
+ if skip := bytes.Index(data[j:], closetag); skip >= 0 {
+ j += skip + len(closetag)
+ } else {
+ break
+ }
+
+ // see if it is the only thing on the line
+ if skip := p.isEmpty(data[j:]); skip > 0 {
+ // see if it is followed by a blank line/eof
+ j += skip
+ if j >= len(data) {
+ found = true
+ i = j
+ } else {
+ if skip := p.isEmpty(data[j:]); skip > 0 {
+ j += skip
+ found = true
+ i = j
+ }
+ }
+ }
+ }
+ */
+
+ // if not found, try a second pass looking for indented match
+ // but not if tag is "ins" or "del" (following original Markdown.pl)
+ if !found && curtag != "ins" && curtag != "del" {
+ i = 1
+ for i < len(data) {
+ i++
+ for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
+ i++
+ }
+
+ if i+2+len(curtag) >= len(data) {
+ break
+ }
+
+ j = p.htmlFindEnd(curtag, data[i-1:])
+
+ if j > 0 {
+ i += j - 1
+ found = true
+ break
+ }
+ }
+ }
+
+ if !found {
+ return 0
+ }
+
+ // the end of the block has been found
+ if doRender {
+ // trim newlines
+ end := i
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ p.r.BlockHtml(out, data[:end])
+ }
+
+ return i
+}
+
+// HTML comment, lax form
+func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int {
+ i := p.inlineHtmlComment(out, data)
+ // needs to end with a blank line
+ if j := p.isEmpty(data[i:]); j > 0 {
+ size := i + j
+ if doRender {
+ // trim trailing newlines
+ end := size
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ p.r.BlockHtml(out, data[:end])
+ }
+ return size
+ }
+ return 0
+}
+
+// HR, which is the only self-closing block tag considered
+func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int {
+ if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
+ return 0
+ }
+ if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
+ // not an
tag after all; at least not a valid one
+ return 0
+ }
+
+ i := 3
+ for data[i] != '>' && data[i] != '\n' {
+ i++
+ }
+
+ if data[i] == '>' {
+ i++
+ if j := p.isEmpty(data[i:]); j > 0 {
+ size := i + j
+ if doRender {
+ // trim newlines
+ end := size
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ p.r.BlockHtml(out, data[:end])
+ }
+ return size
+ }
+ }
+
+ return 0
+}
+
+func (p *parser) htmlFindTag(data []byte) (string, bool) {
+ i := 0
+ for isalnum(data[i]) {
+ i++
+ }
+ key := string(data[:i])
+ if _, ok := blockTags[key]; ok {
+ return key, true
+ }
+ return "", false
+}
+
+func (p *parser) htmlFindEnd(tag string, data []byte) int {
+ // assume data[0] == '<' && data[1] == '/' already tested
+
+ // check if tag is a match
+ closetag := []byte("" + tag + ">")
+ if !bytes.HasPrefix(data, closetag) {
+ return 0
+ }
+ i := len(closetag)
+
+ // check that the rest of the line is blank
+ skip := 0
+ if skip = p.isEmpty(data[i:]); skip == 0 {
+ return 0
+ }
+ i += skip
+ skip = 0
+
+ if i >= len(data) {
+ return i
+ }
+
+ if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
+ return i
+ }
+ if skip = p.isEmpty(data[i:]); skip == 0 {
+ // following line must be blank
+ return 0
+ }
+
+ return i + skip
+}
+
+func (p *parser) isEmpty(data []byte) int {
+ // it is okay to call isEmpty on an empty buffer
+ if len(data) == 0 {
+ return 0
+ }
+
+ var i int
+ for i = 0; i < len(data) && data[i] != '\n'; i++ {
+ if data[i] != ' ' && data[i] != '\t' {
+ return 0
+ }
+ }
+ return i + 1
+}
+
+func (p *parser) isHRule(data []byte) bool {
+ i := 0
+
+ // skip up to three spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // look at the hrule char
+ if data[i] != '*' && data[i] != '-' && data[i] != '_' {
+ return false
+ }
+ c := data[i]
+
+ // the whole line must be the char or whitespace
+ n := 0
+ for data[i] != '\n' {
+ switch {
+ case data[i] == c:
+ n++
+ case data[i] != ' ':
+ return false
+ }
+ i++
+ }
+
+ return n >= 3
+}
+
+func (p *parser) isFencedCode(data []byte, syntax **string, oldmarker string) (skip int, marker string) {
+ i, size := 0, 0
+ skip = 0
+
+ // skip up to three spaces
+ for i < len(data) && i < 3 && data[i] == ' ' {
+ i++
+ }
+ if i >= len(data) {
+ return
+ }
+
+ // check for the marker characters: ~ or `
+ if data[i] != '~' && data[i] != '`' {
+ return
+ }
+
+ c := data[i]
+
+ // the whole line must be the same char or whitespace
+ for i < len(data) && data[i] == c {
+ size++
+ i++
+ }
+
+ if i >= len(data) {
+ return
+ }
+
+ // the marker char must occur at least 3 times
+ if size < 3 {
+ return
+ }
+ marker = string(data[i-size : i])
+
+ // if this is the end marker, it must match the beginning marker
+ if oldmarker != "" && marker != oldmarker {
+ return
+ }
+
+ if syntax != nil {
+ syn := 0
+ i = skipChar(data, i, ' ')
+
+ if i >= len(data) {
+ return
+ }
+
+ syntaxStart := i
+
+ if data[i] == '{' {
+ i++
+ syntaxStart++
+
+ for i < len(data) && data[i] != '}' && data[i] != '\n' {
+ syn++
+ i++
+ }
+
+ if i >= len(data) || data[i] != '}' {
+ return
+ }
+
+ // strip all whitespace at the beginning and the end
+ // of the {} block
+ for syn > 0 && isspace(data[syntaxStart]) {
+ syntaxStart++
+ syn--
+ }
+
+ for syn > 0 && isspace(data[syntaxStart+syn-1]) {
+ syn--
+ }
+
+ i++
+ } else {
+ for i < len(data) && !isspace(data[i]) {
+ syn++
+ i++
+ }
+ }
+
+ language := string(data[syntaxStart : syntaxStart+syn])
+ *syntax = &language
+ }
+
+ i = skipChar(data, i, ' ')
+ if i >= len(data) || data[i] != '\n' {
+ return
+ }
+
+ skip = i + 1
+ return
+}
+
+func (p *parser) fencedCode(out *bytes.Buffer, data []byte, doRender bool) int {
+ var lang *string
+ beg, marker := p.isFencedCode(data, &lang, "")
+ if beg == 0 || beg >= len(data) {
+ return 0
+ }
+
+ var work bytes.Buffer
+
+ for {
+ // safe to assume beg < len(data)
+
+ // check for the end of the code block
+ fenceEnd, _ := p.isFencedCode(data[beg:], nil, marker)
+ if fenceEnd != 0 {
+ beg += fenceEnd
+ break
+ }
+
+ // copy the current line
+ end := skipUntilChar(data, beg, '\n') + 1
+
+ // did we reach the end of the buffer without a closing marker?
+ if end >= len(data) {
+ return 0
+ }
+
+ // verbatim copy to the working buffer
+ if doRender {
+ work.Write(data[beg:end])
+ }
+ beg = end
+ }
+
+ syntax := ""
+ if lang != nil {
+ syntax = *lang
+ }
+
+ if doRender {
+ p.r.BlockCode(out, work.Bytes(), syntax)
+ }
+
+ return beg
+}
+
+func (p *parser) table(out *bytes.Buffer, data []byte) int {
+ var header bytes.Buffer
+ i, columns := p.tableHeader(&header, data)
+ if i == 0 {
+ return 0
+ }
+
+ var body bytes.Buffer
+
+ for i < len(data) {
+ pipes, rowStart := 0, i
+ for ; data[i] != '\n'; i++ {
+ if data[i] == '|' {
+ pipes++
+ }
+ }
+
+ if pipes == 0 {
+ i = rowStart
+ break
+ }
+
+ // include the newline in data sent to tableRow
+ i++
+ p.tableRow(&body, data[rowStart:i], columns, false)
+ }
+
+ p.r.Table(out, header.Bytes(), body.Bytes(), columns)
+
+ return i
+}
+
+// check if the specified position is preceded by an odd number of backslashes
+func isBackslashEscaped(data []byte, i int) bool {
+ backslashes := 0
+ for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
+ backslashes++
+ }
+ return backslashes&1 == 1
+}
+
+func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) {
+ i := 0
+ colCount := 1
+ for i = 0; data[i] != '\n'; i++ {
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ colCount++
+ }
+ }
+
+ // doesn't look like a table header
+ if colCount == 1 {
+ return
+ }
+
+ // include the newline in the data sent to tableRow
+ header := data[:i+1]
+
+ // column count ignores pipes at beginning or end of line
+ if data[0] == '|' {
+ colCount--
+ }
+ if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
+ colCount--
+ }
+
+ columns = make([]int, colCount)
+
+ // move on to the header underline
+ i++
+ if i >= len(data) {
+ return
+ }
+
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ i++
+ }
+ i = skipChar(data, i, ' ')
+
+ // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
+ // and trailing | optional on last column
+ col := 0
+ for data[i] != '\n' {
+ dashes := 0
+
+ if data[i] == ':' {
+ i++
+ columns[col] |= TABLE_ALIGNMENT_LEFT
+ dashes++
+ }
+ for data[i] == '-' {
+ i++
+ dashes++
+ }
+ if data[i] == ':' {
+ i++
+ columns[col] |= TABLE_ALIGNMENT_RIGHT
+ dashes++
+ }
+ for data[i] == ' ' {
+ i++
+ }
+
+ // end of column test is messy
+ switch {
+ case dashes < 3:
+ // not a valid column
+ return
+
+ case data[i] == '|' && !isBackslashEscaped(data, i):
+ // marker found, now skip past trailing whitespace
+ col++
+ i++
+ for data[i] == ' ' {
+ i++
+ }
+
+ // trailing junk found after last column
+ if col >= colCount && data[i] != '\n' {
+ return
+ }
+
+ case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
+ // something else found where marker was required
+ return
+
+ case data[i] == '\n':
+ // marker is optional for the last column
+ col++
+
+ default:
+ // trailing junk found after last column
+ return
+ }
+ }
+ if col != colCount {
+ return
+ }
+
+ p.tableRow(out, header, columns, true)
+ size = i + 1
+ return
+}
+
+func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) {
+ i, col := 0, 0
+ var rowWork bytes.Buffer
+
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ i++
+ }
+
+ for col = 0; col < len(columns) && i < len(data); col++ {
+ for data[i] == ' ' {
+ i++
+ }
+
+ cellStart := i
+
+ for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
+ i++
+ }
+
+ cellEnd := i
+
+ // skip the end-of-cell marker, possibly taking us past end of buffer
+ i++
+
+ for cellEnd > cellStart && data[cellEnd-1] == ' ' {
+ cellEnd--
+ }
+
+ var cellWork bytes.Buffer
+ p.inline(&cellWork, data[cellStart:cellEnd])
+
+ if header {
+ p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col])
+ } else {
+ p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col])
+ }
+ }
+
+ // pad it out with empty columns to get the right number
+ for ; col < len(columns); col++ {
+ if header {
+ p.r.TableHeaderCell(&rowWork, nil, columns[col])
+ } else {
+ p.r.TableCell(&rowWork, nil, columns[col])
+ }
+ }
+
+ // silently ignore rows with too many cells
+
+ p.r.TableRow(out, rowWork.Bytes())
+}
+
+// returns blockquote prefix length
+func (p *parser) quotePrefix(data []byte) int {
+ i := 0
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+ if data[i] == '>' {
+ if data[i+1] == ' ' {
+ return i + 2
+ }
+ return i + 1
+ }
+ return 0
+}
+
+// blockquote ends with at least one blank line
+// followed by something without a blockquote prefix
+func (p *parser) terminateBlockquote(data []byte, beg, end int) bool {
+ if p.isEmpty(data[beg:]) <= 0 {
+ return false
+ }
+ if end >= len(data) {
+ return true
+ }
+ return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
+}
+
+// parse a blockquote fragment
+func (p *parser) quote(out *bytes.Buffer, data []byte) int {
+ var raw bytes.Buffer
+ beg, end := 0, 0
+ for beg < len(data) {
+ end = beg
+ // Step over whole lines, collecting them. While doing that, check for
+ // fenced code and if one's found, incorporate it altogether,
+ // irregardless of any contents inside it
+ for data[end] != '\n' {
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ if i := p.fencedCode(out, data[end:], false); i > 0 {
+ // -1 to compensate for the extra end++ after the loop:
+ end += i - 1
+ break
+ }
+ }
+ end++
+ }
+ end++
+
+ if pre := p.quotePrefix(data[beg:]); pre > 0 {
+ // skip the prefix
+ beg += pre
+ } else if p.terminateBlockquote(data, beg, end) {
+ break
+ }
+
+ // this line is part of the blockquote
+ raw.Write(data[beg:end])
+ beg = end
+ }
+
+ var cooked bytes.Buffer
+ p.block(&cooked, raw.Bytes())
+ p.r.BlockQuote(out, cooked.Bytes())
+ return end
+}
+
+// returns prefix length for block code
+func (p *parser) codePrefix(data []byte) int {
+ if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
+ return 4
+ }
+ return 0
+}
+
+func (p *parser) code(out *bytes.Buffer, data []byte) int {
+ var work bytes.Buffer
+
+ i := 0
+ for i < len(data) {
+ beg := i
+ for data[i] != '\n' {
+ i++
+ }
+ i++
+
+ blankline := p.isEmpty(data[beg:i]) > 0
+ if pre := p.codePrefix(data[beg:i]); pre > 0 {
+ beg += pre
+ } else if !blankline {
+ // non-empty, non-prefixed line breaks the pre
+ i = beg
+ break
+ }
+
+ // verbatim copy to the working buffeu
+ if blankline {
+ work.WriteByte('\n')
+ } else {
+ work.Write(data[beg:i])
+ }
+ }
+
+ // trim all the \n off the end of work
+ workbytes := work.Bytes()
+ eol := len(workbytes)
+ for eol > 0 && workbytes[eol-1] == '\n' {
+ eol--
+ }
+ if eol != len(workbytes) {
+ work.Truncate(eol)
+ }
+
+ work.WriteByte('\n')
+
+ p.r.BlockCode(out, work.Bytes(), "")
+
+ return i
+}
+
+// returns unordered list item prefix
+func (p *parser) uliPrefix(data []byte) int {
+ i := 0
+
+ // start with up to 3 spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // need a *, +, or - followed by a space
+ if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
+ data[i+1] != ' ' {
+ return 0
+ }
+ return i + 2
+}
+
+// returns ordered list item prefix
+func (p *parser) oliPrefix(data []byte) int {
+ i := 0
+
+ // start with up to 3 spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // count the digits
+ start := i
+ for data[i] >= '0' && data[i] <= '9' {
+ i++
+ }
+
+ // we need >= 1 digits followed by a dot and a space
+ if start == i || data[i] != '.' || data[i+1] != ' ' {
+ return 0
+ }
+ return i + 2
+}
+
+// returns definition list item prefix
+func (p *parser) dliPrefix(data []byte) int {
+ i := 0
+
+ // need a : followed by a spaces
+ if data[i] != ':' || data[i+1] != ' ' {
+ return 0
+ }
+ for data[i] == ' ' {
+ i++
+ }
+ return i + 2
+}
+
+// parse ordered or unordered list block
+func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int {
+ i := 0
+ flags |= LIST_ITEM_BEGINNING_OF_LIST
+ work := func() bool {
+ for i < len(data) {
+ skip := p.listItem(out, data[i:], &flags)
+ i += skip
+
+ if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 {
+ break
+ }
+ flags &= ^LIST_ITEM_BEGINNING_OF_LIST
+ }
+ return true
+ }
+
+ p.r.List(out, work, flags)
+ return i
+}
+
+// Parse a single list item.
+// Assumes initial prefix is already removed if this is a sublist.
+func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int {
+ // keep track of the indentation of the first line
+ itemIndent := 0
+ for itemIndent < 3 && data[itemIndent] == ' ' {
+ itemIndent++
+ }
+
+ i := p.uliPrefix(data)
+ if i == 0 {
+ i = p.oliPrefix(data)
+ }
+ if i == 0 {
+ i = p.dliPrefix(data)
+ // reset definition term flag
+ if i > 0 {
+ *flags &= ^LIST_TYPE_TERM
+ }
+ }
+ if i == 0 {
+ // if in defnition list, set term flag and continue
+ if *flags&LIST_TYPE_DEFINITION != 0 {
+ *flags |= LIST_TYPE_TERM
+ } else {
+ return 0
+ }
+ }
+
+ // skip leading whitespace on first line
+ for data[i] == ' ' {
+ i++
+ }
+
+ // find the end of the line
+ line := i
+ for i > 0 && data[i-1] != '\n' {
+ i++
+ }
+
+ // get working buffer
+ var raw bytes.Buffer
+
+ // put the first line into the working buffer
+ raw.Write(data[line:i])
+ line = i
+
+ // process the following lines
+ containsBlankLine := false
+ sublist := 0
+
+gatherlines:
+ for line < len(data) {
+ i++
+
+ // find the end of this line
+ for data[i-1] != '\n' {
+ i++
+ }
+
+ // if it is an empty line, guess that it is part of this item
+ // and move on to the next line
+ if p.isEmpty(data[line:i]) > 0 {
+ containsBlankLine = true
+ line = i
+ continue
+ }
+
+ // calculate the indentation
+ indent := 0
+ for indent < 4 && line+indent < i && data[line+indent] == ' ' {
+ indent++
+ }
+
+ chunk := data[line+indent : i]
+
+ // evaluate how this line fits in
+ switch {
+ // is this a nested list item?
+ case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
+ p.oliPrefix(chunk) > 0 ||
+ p.dliPrefix(chunk) > 0:
+
+ if containsBlankLine {
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+ }
+
+ // to be a nested list, it must be indented more
+ // if not, it is the next item in the same list
+ if indent <= itemIndent {
+ break gatherlines
+ }
+
+ // is this the first item in the nested list?
+ if sublist == 0 {
+ sublist = raw.Len()
+ }
+
+ // is this a nested prefix header?
+ case p.isPrefixHeader(chunk):
+ // if the header is not indented, it is not nested in the list
+ // and thus ends the list
+ if containsBlankLine && indent < 4 {
+ *flags |= LIST_ITEM_END_OF_LIST
+ break gatherlines
+ }
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+
+ // anything following an empty line is only part
+ // of this item if it is indented 4 spaces
+ // (regardless of the indentation of the beginning of the item)
+ case containsBlankLine && indent < 4:
+ if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 {
+ // is the next item still a part of this list?
+ next := i
+ for data[next] != '\n' {
+ next++
+ }
+ for next < len(data)-1 && data[next] == '\n' {
+ next++
+ }
+ if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
+ *flags |= LIST_ITEM_END_OF_LIST
+ }
+ } else {
+ *flags |= LIST_ITEM_END_OF_LIST
+ }
+ break gatherlines
+
+ // a blank line means this should be parsed as a block
+ case containsBlankLine:
+ raw.WriteByte('\n')
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+ }
+
+ // if this line was preceeded by one or more blanks,
+ // re-introduce the blank into the buffer
+ if containsBlankLine {
+ containsBlankLine = false
+ raw.WriteByte('\n')
+
+ }
+
+ // add the line into the working buffer without prefix
+ raw.Write(data[line+indent : i])
+
+ line = i
+ }
+
+ rawBytes := raw.Bytes()
+
+ // render the contents of the list item
+ var cooked bytes.Buffer
+ if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 {
+ // intermediate render of block item, except for definition term
+ if sublist > 0 {
+ p.block(&cooked, rawBytes[:sublist])
+ p.block(&cooked, rawBytes[sublist:])
+ } else {
+ p.block(&cooked, rawBytes)
+ }
+ } else {
+ // intermediate render of inline item
+ if sublist > 0 {
+ p.inline(&cooked, rawBytes[:sublist])
+ p.block(&cooked, rawBytes[sublist:])
+ } else {
+ p.inline(&cooked, rawBytes)
+ }
+ }
+
+ // render the actual list item
+ cookedBytes := cooked.Bytes()
+ parsedEnd := len(cookedBytes)
+
+ // strip trailing newlines
+ for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' {
+ parsedEnd--
+ }
+ p.r.ListItem(out, cookedBytes[:parsedEnd], *flags)
+
+ return line
+}
+
+// render a single paragraph that has already been parsed out
+func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) {
+ if len(data) == 0 {
+ return
+ }
+
+ // trim leading spaces
+ beg := 0
+ for data[beg] == ' ' {
+ beg++
+ }
+
+ // trim trailing newline
+ end := len(data) - 1
+
+ // trim trailing spaces
+ for end > beg && data[end-1] == ' ' {
+ end--
+ }
+
+ work := func() bool {
+ p.inline(out, data[beg:end])
+ return true
+ }
+ p.r.Paragraph(out, work)
+}
+
+func (p *parser) paragraph(out *bytes.Buffer, data []byte) int {
+ // prev: index of 1st char of previous line
+ // line: index of 1st char of current line
+ // i: index of cursor/end of current line
+ var prev, line, i int
+
+ // keep going until we find something to mark the end of the paragraph
+ for i < len(data) {
+ // mark the beginning of the current line
+ prev = line
+ current := data[i:]
+ line = i
+
+ // did we find a blank line marking the end of the paragraph?
+ if n := p.isEmpty(current); n > 0 {
+ // did this blank line followed by a definition list item?
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if i < len(data)-1 && data[i+1] == ':' {
+ return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
+ }
+ }
+
+ p.renderParagraph(out, data[:i])
+ return i + n
+ }
+
+ // an underline under some text marks a header, so our paragraph ended on prev line
+ if i > 0 {
+ if level := p.isUnderlinedHeader(current); level > 0 {
+ // render the paragraph
+ p.renderParagraph(out, data[:prev])
+
+ // ignore leading and trailing whitespace
+ eol := i - 1
+ for prev < eol && data[prev] == ' ' {
+ prev++
+ }
+ for eol > prev && data[eol-1] == ' ' {
+ eol--
+ }
+
+ // render the header
+ // this ugly double closure avoids forcing variables onto the heap
+ work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool {
+ return func() bool {
+ pp.inline(o, d)
+ return true
+ }
+ }(out, p, data[prev:eol])
+
+ id := ""
+ if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
+ id = sanitized_anchor_name.Create(string(data[prev:eol]))
+ }
+
+ p.r.Header(out, work, level, id)
+
+ // find the end of the underline
+ for data[i] != '\n' {
+ i++
+ }
+ return i
+ }
+ }
+
+ // if the next line starts a block of HTML, then the paragraph ends here
+ if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
+ if data[i] == '<' && p.html(out, current, false) > 0 {
+ // rewind to before the HTML block
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // if there's a prefixed header or a horizontal rule after this, paragraph is over
+ if p.isPrefixHeader(current) || p.isHRule(current) {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+
+ // if there's a fenced code block, paragraph is over
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ if p.fencedCode(out, current, false) > 0 {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // if there's a definition list item, prev line is a definition term
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if p.dliPrefix(current) != 0 {
+ return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
+ }
+ }
+
+ // if there's a list after this, paragraph is over
+ if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 {
+ if p.uliPrefix(current) != 0 ||
+ p.oliPrefix(current) != 0 ||
+ p.quotePrefix(current) != 0 ||
+ p.codePrefix(current) != 0 {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // otherwise, scan to the beginning of the next line
+ for data[i] != '\n' {
+ i++
+ }
+ i++
+ }
+
+ p.renderParagraph(out, data[:i])
+ return i
+}
diff --git a/vendor/github.com/russross/blackfriday/block_test.go b/vendor/github.com/russross/blackfriday/block_test.go
new file mode 100644
index 000000000..b33c2578b
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/block_test.go
@@ -0,0 +1,1532 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross Header 1
\n",
+
+ "## Header 2\n",
+ "Header 2
\n",
+
+ "### Header 3\n",
+ "Header 3
\n",
+
+ "#### Header 4\n",
+ "Header 4
\n",
+
+ "##### Header 5\n",
+ "Header 5
\n",
+
+ "###### Header 6\n",
+ "Header 6
\n",
+
+ "####### Header 7\n",
+ "# Header 7
\n",
+
+ "#Header 1\n",
+ "Header 1
\n",
+
+ "##Header 2\n",
+ "Header 2
\n",
+
+ "###Header 3\n",
+ "Header 3
\n",
+
+ "####Header 4\n",
+ "Header 4
\n",
+
+ "#####Header 5\n",
+ "Header 5
\n",
+
+ "######Header 6\n",
+ "Header 6
\n",
+
+ "#######Header 7\n",
+ "#Header 7
\n",
+
+ "Hello\n# Header 1\nGoodbye\n",
+ "Header 1
\n\n\n
\n",
+
+ "* List\n#Header\n* List\n",
+ "Header
\n
\n",
+
+ "* List\n * Nested list\n # Nested header\n",
+ "Header
\n
\n",
+
+ "#Header 1 \\#\n",
+ "\n
Nested header
Header 1 #
\n",
+
+ "#Header 1 \\# foo\n",
+ "Header 1 # foo
\n",
+
+ "#Header 1 #\\##\n",
+ "Header 1 ##
\n",
+ }
+ doTestsBlock(t, tests, 0)
+}
+
+func TestPrefixHeaderSpaceExtension(t *testing.T) {
+ var tests = []string{
+ "# Header 1\n",
+ "Header 1
\n",
+
+ "## Header 2\n",
+ "Header 2
\n",
+
+ "### Header 3\n",
+ "Header 3
\n",
+
+ "#### Header 4\n",
+ "Header 4
\n",
+
+ "##### Header 5\n",
+ "Header 5
\n",
+
+ "###### Header 6\n",
+ "Header 6
\n",
+
+ "####### Header 7\n",
+ "Header 1
\n\n\n
\n",
+
+ "* List\n#Header\n* List\n",
+ "Header
\n
\n",
+
+ "* List\n * Nested list\n # Nested header\n",
+ "\n
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_SPACE_HEADERS)
+}
+
+func TestPrefixHeaderIdExtension(t *testing.T) {
+ var tests = []string{
+ "# Header 1 {#someid}\n",
+ "\n
Nested header
Header 1
\n",
+
+ "# Header 1 {#someid} \n",
+ "Header 1
\n",
+
+ "# Header 1 {#someid}\n",
+ "Header 1
\n",
+
+ "# Header 1 {#someid\n",
+ "Header 1 {#someid
\n",
+
+ "# Header 1 {#someid\n",
+ "Header 1 {#someid
\n",
+
+ "# Header 1 {#someid}}\n",
+ "Header 1
\n\nHeader 2
\n",
+
+ "### Header 3 {#someid}\n",
+ "Header 3
\n",
+
+ "#### Header 4 {#someid}\n",
+ "Header 4
\n",
+
+ "##### Header 5 {#someid}\n",
+ "Header 5
\n",
+
+ "###### Header 6 {#someid}\n",
+ "Header 6
\n",
+
+ "####### Header 7 {#someid}\n",
+ "# Header 7
\n",
+
+ "# Header 1 # {#someid}\n",
+ "Header 1
\n",
+
+ "## Header 2 ## {#someid}\n",
+ "Header 2
\n",
+
+ "Hello\n# Header 1\nGoodbye\n",
+ "Header 1
\n\n\n
\n",
+
+ "* List\n#Header {#someid}\n* List\n",
+ "Header
\n
\n",
+
+ "* List\n * Nested list\n # Nested header {#someid}\n",
+ "Header
\n
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_HEADER_IDS)
+}
+
+func TestPrefixHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) {
+ var tests = []string{
+ "# header 1 {#someid}\n",
+ "\n
Nested header
header 1
\n",
+
+ "## header 2 {#someid}\n",
+ "header 2
\n",
+
+ "### header 3 {#someid}\n",
+ "header 3
\n",
+
+ "#### header 4 {#someid}\n",
+ "header 4
\n",
+
+ "##### header 5 {#someid}\n",
+ "header 5
\n",
+
+ "###### header 6 {#someid}\n",
+ "header 6
\n",
+
+ "####### header 7 {#someid}\n",
+ "# header 7
\n",
+
+ "# header 1 # {#someid}\n",
+ "header 1
\n",
+
+ "## header 2 ## {#someid}\n",
+ "header 2
\n",
+
+ "* List\n# Header {#someid}\n* List\n",
+ "\n
\n",
+
+ "* List\n#Header {#someid}\n* List\n",
+ "Header
\n
\n",
+
+ "* List\n * Nested list\n # Nested header {#someid}\n",
+ "Header
\n
\n",
+ }
+
+ parameters := HtmlRendererParameters{
+ HeaderIDPrefix: "PRE:",
+ HeaderIDSuffix: ":POST",
+ }
+
+ doTestsBlockWithRunner(t, tests, EXTENSION_HEADER_IDS, runnerWithRendererParameters(parameters))
+}
+
+func TestPrefixAutoHeaderIdExtension(t *testing.T) {
+ var tests = []string{
+ "# Header 1\n",
+ "\n
Nested header
Header 1
\n",
+
+ "# Header 1 \n",
+ "Header 1
\n",
+
+ "## Header 2\n",
+ "Header 2
\n",
+
+ "### Header 3\n",
+ "Header 3
\n",
+
+ "#### Header 4\n",
+ "Header 4
\n",
+
+ "##### Header 5\n",
+ "Header 5
\n",
+
+ "###### Header 6\n",
+ "Header 6
\n",
+
+ "####### Header 7\n",
+ "# Header 7
\n",
+
+ "Hello\n# Header 1\nGoodbye\n",
+ "Header 1
\n\n\n
\n",
+
+ "* List\n#Header\n* List\n",
+ "Header
\n
\n",
+
+ "* List\n * Nested list\n # Nested header\n",
+ "Header
\n
\n",
+
+ "# Header\n\n# Header\n",
+ "\n
Nested header
Header
\n\nHeader
\n",
+
+ "# Header 1\n\n# Header 1",
+ "Header 1
\n\nHeader 1
\n",
+
+ "# Header\n\n# Header 1\n\n# Header\n\n# Header",
+ "Header
\n\nHeader 1
\n\nHeader
\n\nHeader
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_AUTO_HEADER_IDS)
+}
+
+func TestPrefixAutoHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) {
+ var tests = []string{
+ "# Header 1\n",
+ "Header 1
\n",
+
+ "# Header 1 \n",
+ "Header 1
\n",
+
+ "## Header 2\n",
+ "Header 2
\n",
+
+ "### Header 3\n",
+ "Header 3
\n",
+
+ "#### Header 4\n",
+ "Header 4
\n",
+
+ "##### Header 5\n",
+ "Header 5
\n",
+
+ "###### Header 6\n",
+ "Header 6
\n",
+
+ "####### Header 7\n",
+ "# Header 7
\n",
+
+ "Hello\n# Header 1\nGoodbye\n",
+ "Header 1
\n\n\n
\n",
+
+ "* List\n#Header\n* List\n",
+ "Header
\n
\n",
+
+ "* List\n * Nested list\n # Nested header\n",
+ "Header
\n
\n",
+
+ "# Header\n\n# Header\n",
+ "\n
Nested header
Header
\n\nHeader
\n",
+
+ "# Header 1\n\n# Header 1",
+ "Header 1
\n\nHeader 1
\n",
+
+ "# Header\n\n# Header 1\n\n# Header\n\n# Header",
+ "Header
\n\nHeader 1
\n\nHeader
\n\nHeader
\n",
+ }
+
+ parameters := HtmlRendererParameters{
+ HeaderIDPrefix: "PRE:",
+ HeaderIDSuffix: ":POST",
+ }
+
+ doTestsBlockWithRunner(t, tests, EXTENSION_AUTO_HEADER_IDS, runnerWithRendererParameters(parameters))
+}
+
+func TestPrefixMultipleHeaderExtensions(t *testing.T) {
+ var tests = []string{
+ "# Header\n\n# Header {#header}\n\n# Header 1",
+ "Header
\n\nHeader
\n\nHeader 1
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_AUTO_HEADER_IDS|EXTENSION_HEADER_IDS)
+}
+
+func TestUnderlineHeaders(t *testing.T) {
+ var tests = []string{
+ "Header 1\n========\n",
+ "Header 1
\n",
+
+ "Header 2\n--------\n",
+ "Header 2
\n",
+
+ "A\n=\n",
+ "A
\n",
+
+ "B\n-\n",
+ "B
\n",
+
+ "Paragraph\nHeader\n=\n",
+ "Header
\n",
+
+ "Header\n===\nParagraph\n",
+ "Header
\n\nHeader
\n\nAnother header
\n",
+
+ " Header\n======\n",
+ "Header
\n",
+
+ " Code\n========\n",
+ "
\n\nCode\n
Header with inline
\n",
+
+ "* List\n * Sublist\n Not a header\n ------\n",
+ "\n
\n",
+
+ "Paragraph\n\n\n\n\nHeader\n===\n",
+ "\n
Header
\n",
+
+ "Trailing space \n==== \n\n",
+ "Trailing space
\n",
+
+ "Trailing spaces\n==== \n\n",
+ "Trailing spaces
\n",
+
+ "Double underline\n=====\n=====\n",
+ "Double underline
\n\nHeader 1
\n",
+
+ "Header 2\n--------\n",
+ "Header 2
\n",
+
+ "A\n=\n",
+ "A
\n",
+
+ "B\n-\n",
+ "B
\n",
+
+ "Paragraph\nHeader\n=\n",
+ "Header
\n",
+
+ "Header\n===\nParagraph\n",
+ "Header
\n\nHeader
\n\nAnother header
\n",
+
+ " Header\n======\n",
+ "Header
\n",
+
+ "Header with *inline*\n=====\n",
+ "Header with inline
\n",
+
+ "Paragraph\n\n\n\n\nHeader\n===\n",
+ "Header
\n",
+
+ "Trailing space \n==== \n\n",
+ "Trailing space
\n",
+
+ "Trailing spaces\n==== \n\n",
+ "Trailing spaces
\n",
+
+ "Double underline\n=====\n=====\n",
+ "Double underline
\n\nHeader
\n\nHeader
\n",
+
+ "Header 1\n========\n\nHeader 1\n========\n",
+ "Header 1
\n\nHeader 1
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_AUTO_HEADER_IDS)
+}
+
+func TestHorizontalRule(t *testing.T) {
+ var tests = []string{
+ "-\n",
+ "
\n",
+
+ "----\n",
+ "
\n",
+
+ "*\n",
+ "
\n",
+
+ "****\n",
+ "
\n",
+
+ "_\n",
+ "
\n",
+
+ "____\n",
+ "
\n",
+
+ "-*-\n",
+ "
\n",
+
+ "* * *\n",
+ "
\n",
+
+ "_ _ _\n",
+ "
\n",
+
+ "-----*\n",
+ "
\n",
+
+ "Hello\n***\n",
+ "
\n",
+
+ "---\n***\n___\n",
+ "
\n\n
\n\n
\n",
+ }
+ doTestsBlock(t, tests, 0)
+}
+
+func TestUnorderedList(t *testing.T) {
+ var tests = []string{
+ "* Hello\n",
+ "\n
\n",
+
+ "* Yin\n* Yang\n",
+ "\n
\n",
+
+ "* Ting\n* Bong\n* Goo\n",
+ "\n
\n",
+
+ "* Yin\n\n* Yang\n",
+ "\n
\n",
+
+ "* Ting\n\n* Bong\n* Goo\n",
+ "\n
\n",
+
+ "+ Hello\n",
+ "\n
\n",
+
+ "+ Yin\n+ Yang\n",
+ "\n
\n",
+
+ "+ Ting\n+ Bong\n+ Goo\n",
+ "\n
\n",
+
+ "+ Yin\n\n+ Yang\n",
+ "\n
\n",
+
+ "+ Ting\n\n+ Bong\n+ Goo\n",
+ "\n
\n",
+
+ "- Hello\n",
+ "\n
\n",
+
+ "- Yin\n- Yang\n",
+ "\n
\n",
+
+ "- Ting\n- Bong\n- Goo\n",
+ "\n
\n",
+
+ "- Yin\n\n- Yang\n",
+ "\n
\n",
+
+ "- Ting\n\n- Bong\n- Goo\n",
+ "\n
\n",
+
+ "*Hello\n",
+ "\n
\n",
+
+ "* Hello \n Next line \n",
+ "\n
\n",
+
+ "Paragraph\n* No linebreak\n",
+ "\n
\n",
+
+ "* List\n * Nested list\n",
+ "\n
\n",
+
+ "* List\n\n * Nested list\n",
+ "\n
\n
\n",
+
+ "* List\n Second line\n\n + Nested\n",
+ "\n
\n
\n",
+
+ "* List\n + Nested\n\n Continued\n",
+ "\n
\n
\n",
+
+ "* List\n * shallow indent\n",
+ "\n
\n\n\n
\n",
+
+ "* List\n" +
+ " * shallow indent\n" +
+ " * part of second list\n" +
+ " * still second\n" +
+ " * almost there\n" +
+ " * third level\n",
+ "\n
\n" +
+ "
\n",
+
+ "* List\n extra indent, same paragraph\n",
+ "\n" +
+ "
\n" +
+ "
\n
\n",
+
+ "* List\n\n code block\n",
+ "\n
\n",
+
+ "* List\n\n code block with spaces\n",
+ "code block\n
\n
\n",
+
+ "* List\n\n * sublist\n\n normal text\n\n * another sublist\n",
+ " code block with spaces\n
\n
\n",
+ }
+ doTestsBlock(t, tests, 0)
+}
+
+func TestOrderedList(t *testing.T) {
+ var tests = []string{
+ "1. Hello\n",
+ "\n
\n\n\n
\n
\n",
+
+ "1. Yin\n2. Yang\n",
+ "\n
\n",
+
+ "1. Ting\n2. Bong\n3. Goo\n",
+ "\n
\n",
+
+ "1. Yin\n\n2. Yang\n",
+ "\n
\n",
+
+ "1. Ting\n\n2. Bong\n3. Goo\n",
+ "\n
\n",
+
+ "1 Hello\n",
+ "\n
\n",
+
+ "1. Hello \n Next line \n",
+ "\n
\n",
+
+ "Paragraph\n1. No linebreak\n",
+ "\n
\n",
+
+ "1. List\n 1. Nested list\n",
+ "\n
\n",
+
+ "1. List\n\n 1. Nested list\n",
+ "\n
\n
\n",
+
+ "1. List\n Second line\n\n 1. Nested\n",
+ "\n
\n
\n",
+
+ "1. List\n 1. Nested\n\n Continued\n",
+ "\n
\n
\n",
+
+ "1. List\n 1. shallow indent\n",
+ "\n
\n\n\n
\n",
+
+ "1. List\n" +
+ " 1. shallow indent\n" +
+ " 2. part of second list\n" +
+ " 3. still second\n" +
+ " 4. almost there\n" +
+ " 1. third level\n",
+ "\n
\n" +
+ "
\n",
+
+ "1. List\n extra indent, same paragraph\n",
+ "\n" +
+ "
\n" +
+ "
\n
\n",
+
+ "1. List\n\n code block\n",
+ "\n
\n",
+
+ "1. List\n\n code block with spaces\n",
+ "code block\n
\n
\n",
+
+ "1. List\n * Mixted list\n",
+ " code block with spaces\n
\n
\n",
+
+ "1. List\n * Mixed list\n",
+ "\n
\n
\n",
+
+ "* Start with unordered\n 1. Ordered\n",
+ "\n
\n
\n",
+
+ "* Start with unordered\n 1. Ordered\n",
+ "\n
\n
\n",
+
+ "1. numbers\n1. are ignored\n",
+ "\n
\n
\n",
+ }
+ doTestsBlock(t, tests, 0)
+}
+
+func TestDefinitionList(t *testing.T) {
+ var tests = []string{
+ "Term 1\n: Definition a\n",
+ "\n
\n",
+
+ "Term 1\n: Definition a \n",
+ "\n
\n",
+
+ "Term 1\n: Definition a\n: Definition b\n",
+ "\n
\n",
+
+ "Term 1\n: Definition a\n\nTerm 2\n: Definition b\n",
+ "\n" +
+ "
\n",
+
+ "Term 1\n: Definition a\n\nTerm 2\n: Definition b\n\nTerm 3\n: Definition c\n",
+ "\n" +
+ "
\n",
+
+ "Term 1\n: Definition a\n: Definition b\n\nTerm 2\n: Definition c\n",
+ "\n" +
+ "
\n",
+
+ "Term 1\n\n: Definition a\n\nTerm 2\n\n: Definition b\n",
+ "\n" +
+ "
\n",
+
+ "Term 1\n\n: Definition a\n\n: Definition b\n\nTerm 2\n\n: Definition c\n",
+ "\n" +
+ "
\n",
+
+ "Term 1\n: Definition a\nNext line\n",
+ "\n
\n",
+
+ "Term 1\n: Definition a\n Next line\n",
+ "\n
\n",
+
+ "Term 1\n: Definition a \n Next line \n",
+ "\n
\n",
+
+ "Term 1\n: Definition a\nNext line\n\nTerm 2\n: Definition b",
+ "\n" +
+ "
\n",
+
+ "Term 1\n: Definition a\n",
+ "\n
\n",
+
+ "Term 1\n:Definition a\n",
+ "\n" +
+ "
\n" +
+ "\n\n" +
+ "
\n" +
+ "\n\n" +
+ "
\n" +
+ "\n
Paragraph\n
Paragraph
\n\nParagraph\n
Paragraph
\n\nParagraph\n
And here?
\n", + + "Paragraph\n\nParagraph
\n\nAnd here?
\n", + } + doTestsBlock(t, tests, 0) +} + +func TestPreformattedHtmlLax(t *testing.T) { + var tests = []string{ + "Paragraph\nParagraph
\n\nParagraph
\n\nParagraph
\n\nAnd here?
\n", + + "Paragraph\n\nParagraph
\n\nAnd here?
\n", + + "Paragraph\nParagraph
\n\nAnd here?
\n", + + "Paragraph\n\nParagraph
\n\nAnd here?
\n", + } + doTestsBlock(t, tests, EXTENSION_LAX_HTML_BLOCKS) +} + +func TestFencedCodeBlock(t *testing.T) { + var tests = []string{ + "``` go\nfunc foo() bool {\n\treturn true;\n}\n```\n", + "func foo() bool {\n\treturn true;\n}\n
\n",
+
+ "``` c\n/* special & char < > \" escaping */\n```\n",
+ "/* special & char < > " escaping */\n
\n",
+
+ "``` c\nno *inline* processing ~~of text~~\n```\n",
+ "no *inline* processing ~~of text~~\n
\n",
+
+ "```\nNo language\n```\n",
+ "No language\n
\n",
+
+ "``` {ocaml}\nlanguage in braces\n```\n",
+ "language in braces\n
\n",
+
+ "``` {ocaml} \nwith extra whitespace\n```\n",
+ "with extra whitespace\n
\n",
+
+ "```{ ocaml }\nwith extra whitespace\n```\n",
+ "with extra whitespace\n
\n",
+
+ "~ ~~ java\nWith whitespace\n~~~\n",
+ "~ ~~ java\nWith whitespace\n~~~
\n", + + "~~\nonly two\n~~\n", + "~~\nonly two\n~~
\n", + + "```` python\nextra\n````\n", + "extra\n
\n",
+
+ "~~~ perl\nthree to start, four to end\n~~~~\n",
+ "~~~ perl\nthree to start, four to end\n~~~~
\n", + + "~~~~ perl\nfour to start, three to end\n~~~\n", + "~~~~ perl\nfour to start, three to end\n~~~
\n", + + "~~~ bash\ntildes\n~~~\n", + "tildes\n
\n",
+
+ "``` lisp\nno ending\n",
+ "``` lisp\nno ending
\n", + + "~~~ lisp\nend with language\n~~~ lisp\n", + "~~~ lisp\nend with language\n~~~ lisp
\n", + + "```\nmismatched begin and end\n~~~\n", + "```\nmismatched begin and end\n~~~
\n", + + "~~~\nmismatched begin and end\n```\n", + "~~~\nmismatched begin and end\n```
\n", + + " ``` oz\nleading spaces\n```\n", + "leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "leading spaces\n
\n",
+
+ "``` oz\nleading spaces\n ```\n",
+ "leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "``` oz\n
\n\nleading spaces\n ```
\n", + + "Bla bla\n\n``` oz\ncode blocks breakup paragraphs\n```\n\nBla Bla\n", + "Bla bla
\n\ncode blocks breakup paragraphs\n
\n\nBla Bla
\n", + + "Some text before a fenced code block\n``` oz\ncode blocks breakup paragraphs\n```\nAnd some text after a fenced code block", + "Some text before a fenced code block
\n\ncode blocks breakup paragraphs\n
\n\nAnd some text after a fenced code block
\n", + + "`", + "`
\n", + + "Bla bla\n\n``` oz\ncode blocks breakup paragraphs\n```\n\nBla Bla\n\n``` oz\nmultiple code blocks work okay\n```\n\nBla Bla\n", + "Bla bla
\n\ncode blocks breakup paragraphs\n
\n\nBla Bla
\n\nmultiple code blocks work okay\n
\n\nBla Bla
\n", + + "Some text before a fenced code block\n``` oz\ncode blocks breakup paragraphs\n```\nSome text in between\n``` oz\nmultiple code blocks work okay\n```\nAnd some text after a fenced code block", + "Some text before a fenced code block
\n\ncode blocks breakup paragraphs\n
\n\nSome text in between
\n\nmultiple code blocks work okay\n
\n\nAnd some text after a fenced code block
\n", + } + doTestsBlock(t, tests, EXTENSION_FENCED_CODE) +} + +func TestFencedCodeInsideBlockquotes(t *testing.T) { + cat := func(s ...string) string { return strings.Join(s, "\n") } + var tests = []string{ + cat("> ```go", + "package moo", + "", + "```", + ""), + `++`, + // ------------------------------------------- + cat("> foo", + "> ", + "> ```go", + "package moo", + "```", + "> ", + "> goo.", + ""), + `+package moo + +
++`, + // ------------------------------------------- + cat("> foo", + "> ", + "> quote", + "continues", + "```", + ""), + `foo
+ ++ +package moo +
goo.
+
++`, + // ------------------------------------------- + cat("> foo", + "> ", + "> ```go", + "package moo", + "```", + "> ", + "> goo.", + "> ", + "> ```go", + "package zoo", + "```", + "> ", + "> woo.", + ""), + `foo
+ +quote +continues +` + "```" + `
+
++`, + } + + // These 2 alternative forms of blockquoted fenced code blocks should produce same output. + forms := [2]string{ + cat("> plain quoted text", + "> ```fenced", + "code", + " with leading single space correctly preserved", + "okay", + "```", + "> rest of quoted text"), + cat("> plain quoted text", + "> ```fenced", + "> code", + "> with leading single space correctly preserved", + "> okay", + "> ```", + "> rest of quoted text"), + } + want := `foo
+ ++ +package moo +
goo.
+ ++ +package zoo +
woo.
+
++` + tests = append(tests, forms[0], want) + tests = append(tests, forms[1], want) + + doTestsBlock(t, tests, EXTENSION_FENCED_CODE) +} + +func TestTable(t *testing.T) { + var tests = []string{ + "a | b\n---|---\nc | d\n", + "plain quoted text
+ ++ +code + with leading single space correctly preserved +okay +
rest of quoted text
+
a | \nb | \n
---|---|
c | \nd | \n
a | b\n---|--\nc | d
\n", + + "|a|b|c|d|\n|----|----|----|---|\n|e|f|g|h|\n", + "a | \nb | \nc | \nd | \n
---|---|---|---|
e | \nf | \ng | \nh | \n
a | \nb | \nc | \nd | \n
---|---|---|---|
e | \nf | \ng | \nh | \n
a | \nb | \nc | \n
---|---|---|
d | \ne | \nf | \n
g | \nh | \n\n |
i | \nj | \nk | \n
n | \no | \np | \n
a | \nb | \nc | \n
---|---|---|
d | \ne | \nf | \n
a | \nb | \n" + + "c | \nd | \n
---|---|---|---|
e | \nf | \n" + + "g | \nh | \n
a | \nb | \nc | \n
---|
a | \nb | \nc | \nd | \ne | \n
---|---|---|---|---|
f | \ng | \nh | \ni | \nj | \n
a | \nb|c | \nd | \n
---|---|---|
f | \ng|h | \ni | \n
Yin
Yang
Ting
Bong
Goo
Yin
Yang
Ting
Bong
Goo
Yin
Yang
Ting
Bong
Goo
*Hello
\n", + + "* Hello \n", + "Paragraph
\n\nParagraph
\n\nList
\n\nList\nSecond line
\n\nList
\n\nContinued
List
\n\ncode block\n
List
\n\n code block with spaces\n
List
\n\nnormal text
\n\nYin
Yang
Ting
Bong
Goo
1 Hello
\n", + + "1.Hello\n", + "1.Hello
\n", + + "1. Hello \n", + "Paragraph
\n\nParagraph
\n\nList
\n\nList\nSecond line
\n\nList
\n\nContinued
List
\n\ncode block\n
List
\n\n code block with spaces\n
func foo() bool {\n\treturn true;\n}\n
\n",
+
+ "``` c\n/* special & char < > \" escaping */\n```\n",
+ "/* special & char < > " escaping */\n
\n",
+
+ "``` c\nno *inline* processing ~~of text~~\n```\n",
+ "no *inline* processing ~~of text~~\n
\n",
+
+ "```\nNo language\n```\n",
+ "No language\n
\n",
+
+ "``` {ocaml}\nlanguage in braces\n```\n",
+ "language in braces\n
\n",
+
+ "``` {ocaml} \nwith extra whitespace\n```\n",
+ "with extra whitespace\n
\n",
+
+ "```{ ocaml }\nwith extra whitespace\n```\n",
+ "with extra whitespace\n
\n",
+
+ "~ ~~ java\nWith whitespace\n~~~\n",
+ "~ ~~ java\nWith whitespace\n~~~
\n", + + "~~\nonly two\n~~\n", + "~~\nonly two\n~~
\n", + + "```` python\nextra\n````\n", + "extra\n
\n",
+
+ "~~~ perl\nthree to start, four to end\n~~~~\n",
+ "~~~ perl\nthree to start, four to end\n~~~~
\n", + + "~~~~ perl\nfour to start, three to end\n~~~\n", + "~~~~ perl\nfour to start, three to end\n~~~
\n", + + "~~~ bash\ntildes\n~~~\n", + "tildes\n
\n",
+
+ "``` lisp\nno ending\n",
+ "``` lisp\nno ending
\n", + + "~~~ lisp\nend with language\n~~~ lisp\n", + "~~~ lisp\nend with language\n~~~ lisp
\n", + + "```\nmismatched begin and end\n~~~\n", + "```\nmismatched begin and end\n~~~
\n", + + "~~~\nmismatched begin and end\n```\n", + "~~~\nmismatched begin and end\n```
\n", + + " ``` oz\nleading spaces\n```\n", + "leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "leading spaces\n
\n",
+
+ "``` oz\nleading spaces\n ```\n",
+ "leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "``` oz\n
\n\nleading spaces
\n\n```\n
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_FENCED_CODE|EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK)
+}
+
+func TestTitleBlock_EXTENSION_TITLEBLOCK(t *testing.T) {
+ var tests = []string{
+ "% Some title\n" +
+ "% Another title line\n" +
+ "% Yep, more here too\n",
+ "Some text
\n\n\n", + + "Some text\n\n\n", + "Some text
\n\n\n", + + "Some text\n\n\n", + "Some text
\n\n\n", + } + doTestsBlock(t, tests, 0) +} diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go new file mode 100644 index 000000000..74e67ee82 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/html.go @@ -0,0 +1,949 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross