diff --git a/custom/conf/app.ini.sample b/custom/conf/app.ini.sample index e6ccab95d9..33cd0506ed 100644 --- a/custom/conf/app.ini.sample +++ b/custom/conf/app.ini.sample @@ -479,6 +479,8 @@ DEFAULT_ORG_MEMBER_VISIBLE = false ; Default value for EnableDependencies ; Repositories will use dependencies by default depending on this setting DEFAULT_ENABLE_DEPENDENCIES = true +; Dependencies can be added from any repository where the user is granted access or only from the current repository depending on this setting. +ALLOW_CROSS_REPOSITORY_DEPENDENCIES = true ; Enable heatmap on users profiles. ENABLE_USER_HEATMAP = true ; Enable Timetracking diff --git a/docs/content/doc/advanced/config-cheat-sheet.en-us.md b/docs/content/doc/advanced/config-cheat-sheet.en-us.md index bcf871a3a4..1e24255d8d 100644 --- a/docs/content/doc/advanced/config-cheat-sheet.en-us.md +++ b/docs/content/doc/advanced/config-cheat-sheet.en-us.md @@ -297,6 +297,7 @@ relation to port exhaustion. - `RECAPTCHA_SITEKEY`: **""**: Go to https://www.google.com/recaptcha/admin to get a sitekey for recaptcha. - `RECAPTCHA_URL`: **https://www.google.com/recaptcha/**: Set the recaptcha url - allows the use of recaptcha net. - `DEFAULT_ENABLE_DEPENDENCIES`: **true**: Enable this to have dependencies enabled by default. +- `ALLOW_CROSS_REPOSITORY_DEPENDENCIES` : **true** Enable this to allow dependencies on issues from any repository where the user is granted access. - `ENABLE_USER_HEATMAP`: **true**: Enable this to display the heatmap on users profiles. - `EMAIL_DOMAIN_WHITELIST`: **\**: If non-empty, list of domain names that can only be used to register on this instance. diff --git a/go.mod b/go.mod index e1bbd9ac89..02f0c46f22 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,6 @@ require ( github.com/blevesearch/go-porterstemmer v0.0.0-20141230013033-23a2c8e5cf1f // indirect github.com/blevesearch/segment v0.0.0-20160105220820-db70c57796cc // indirect github.com/boombuler/barcode v0.0.0-20161226211916-fe0f26ff6d26 // indirect - github.com/chaseadamsio/goorgeous v0.0.0-20170901132237-098da33fde5f github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe // indirect github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect @@ -73,6 +72,7 @@ require ( github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae // indirect github.com/msteinert/pam v0.0.0-20151204160544-02ccfbfaf0cc github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5 + github.com/niklasfasching/go-org v0.1.7 github.com/oliamb/cutter v0.2.2 github.com/philhofer/fwd v1.0.0 // indirect github.com/pkg/errors v0.8.1 @@ -80,12 +80,13 @@ require ( github.com/prometheus/client_golang v1.1.0 github.com/prometheus/procfs v0.0.4 // indirect github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 // indirect - github.com/russross/blackfriday v0.0.0-20180428102519-11635eb403ff + github.com/russross/blackfriday v2.0.0+incompatible // indirect + github.com/russross/blackfriday/v2 v2.0.1 github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca // indirect github.com/satori/go.uuid v1.2.0 github.com/sergi/go-diff v1.0.0 github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b // indirect - github.com/shurcooL/sanitized_anchor_name v0.0.0-20160918041101-1dba4b3954bc // indirect + github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 // indirect github.com/stretchr/testify v1.4.0 @@ -100,7 +101,7 @@ require ( github.com/willf/bitset v0.0.0-20180426185212-8ce1146b8621 // indirect github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53 golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad - golang.org/x/net v0.0.0-20190909003024-a7b16738d86b + golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b golang.org/x/text v0.3.2 diff --git a/go.sum b/go.sum index 2eeaa79810..7445469d7e 100644 --- a/go.sum +++ b/go.sum @@ -86,8 +86,6 @@ github.com/boombuler/barcode v0.0.0-20161226211916-fe0f26ff6d26/go.mod h1:paBWMc github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK+hNk4tyD+nuGjpVLPEHuJSFXMw11/HPA= github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/chaseadamsio/goorgeous v0.0.0-20170901132237-098da33fde5f h1:REH9VH5ubNR0skLaOxK7TRJeRbE2dDfvaouQo8FsRcA= -github.com/chaseadamsio/goorgeous v0.0.0-20170901132237-098da33fde5f/go.mod h1:6QaC0vFoKWYDth94dHFNgRT2YkT5FHdQp/Yx15aAAi0= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/corbym/gocrest v1.0.3 h1:gwEdq6RkTmq+09CTuM29DfKOCtZ7G7bcyxs3IZ6EVdU= github.com/corbym/gocrest v1.0.3/go.mod h1:maVFL5lbdS2PgfOQgGRWDYTeunSWQeiEgoNdTABShCs= @@ -425,6 +423,10 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5 h1:BvoENQQU+fZ9uukda/RzCAL/191HHwJA5b13R6diVlY= github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= +github.com/niklasfasching/go-org v0.1.6 h1:F521WcqRNl8OJumlgAnekZgERaTA2HpfOYYfVEKOeI8= +github.com/niklasfasching/go-org v0.1.6/go.mod h1:AsLD6X7djzRIz4/RFZu8vwRL0VGjUvGZCCH1Nz0VdrU= +github.com/niklasfasching/go-org v0.1.7 h1:t3V+3XnS/7BhKv/7SlMUa8FvAiq577/a1T3D7mLIRXE= +github.com/niklasfasching/go-org v0.1.7/go.mod h1:AsLD6X7djzRIz4/RFZu8vwRL0VGjUvGZCCH1Nz0VdrU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oliamb/cutter v0.2.2 h1:Lfwkya0HHNU1YLnGv2hTkzHfasrSMkgv4Dn+5rmlk3k= github.com/oliamb/cutter v0.2.2/go.mod h1:4BenG2/4GuRBDbVm/OPahDVqbrOemzpPiG5mi1iryBU= @@ -487,8 +489,10 @@ github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001/go.mod h1:qq github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday v0.0.0-20180428102519-11635eb403ff h1:g9ZlAHmkc/h5So+OjNCkZWh+FjuKEOOOoyRkqlGA8+c= -github.com/russross/blackfriday v0.0.0-20180428102519-11635eb403ff/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk= +github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca h1:NugYot0LIVPxTvN8n+Kvkn6TrbMyxQiuvKdEwFdR9vI= github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= @@ -499,6 +503,8 @@ github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b h1:4kg1wyftSKxLtnP github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v0.0.0-20160918041101-1dba4b3954bc h1:3wIrJvFb3Pf6B/2mDBnN1G5IfUVev4X5apadQlWOczE= github.com/shurcooL/sanitized_anchor_name v0.0.0-20160918041101-1dba4b3954bc/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw= @@ -650,6 +656,8 @@ golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc= golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271 h1:N66aaryRB3Ax92gH0v3hp1QYZ3zWWCCUR/j8Ifh45Ss= +golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180620175406-ef147856a6dd/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= diff --git a/models/issue.go b/models/issue.go index 17205cc2fa..78413468b2 100644 --- a/models/issue.go +++ b/models/issue.go @@ -9,6 +9,7 @@ import ( "path" "regexp" "sort" + "strconv" "strings" "code.gitea.io/gitea/modules/base" @@ -378,6 +379,12 @@ func (issue *Issue) apiFormat(e Engine) *api.Issue { Updated: issue.UpdatedUnix.AsTime(), } + apiIssue.Repo = &api.RepositoryMeta{ + ID: issue.Repo.ID, + Name: issue.Repo.Name, + FullName: issue.Repo.FullName(), + } + if issue.ClosedUnix != 0 { apiIssue.Closed = issue.ClosedUnix.AsTimePtr() } @@ -1047,11 +1054,13 @@ type IssuesOptions struct { LabelIDs []int64 SortType string IssueIDs []int64 + // prioritize issues from this repo + PriorityRepoID int64 } // sortIssuesSession sort an issues-related session based on the provided // sortType string -func sortIssuesSession(sess *xorm.Session, sortType string) { +func sortIssuesSession(sess *xorm.Session, sortType string, priorityRepoID int64) { switch sortType { case "oldest": sess.Asc("issue.created_unix") @@ -1069,6 +1078,8 @@ func sortIssuesSession(sess *xorm.Session, sortType string) { sess.Asc("issue.deadline_unix") case "farduedate": sess.Desc("issue.deadline_unix") + case "priorityrepo": + sess.OrderBy("CASE WHEN issue.repo_id = " + strconv.FormatInt(priorityRepoID, 10) + " THEN 1 ELSE 2 END, issue.created_unix DESC") default: sess.Desc("issue.created_unix") } @@ -1170,7 +1181,7 @@ func Issues(opts *IssuesOptions) ([]*Issue, error) { defer sess.Close() opts.setupSession(sess) - sortIssuesSession(sess, opts.SortType) + sortIssuesSession(sess, opts.SortType, opts.PriorityRepoID) issues := make([]*Issue, 0, setting.UI.IssuePagingNum) if err := sess.Find(&issues); err != nil { @@ -1476,8 +1487,8 @@ func GetRepoIssueStats(repoID, uid int64, filterMode int, isPull bool) (numOpen } // SearchIssueIDsByKeyword search issues on database -func SearchIssueIDsByKeyword(kw string, repoID int64, limit, start int) (int64, []int64, error) { - var repoCond = builder.Eq{"repo_id": repoID} +func SearchIssueIDsByKeyword(kw string, repoIDs []int64, limit, start int) (int64, []int64, error) { + var repoCond = builder.In("repo_id", repoIDs) var subQuery = builder.Select("id").From("issue").Where(repoCond) var cond = builder.And( repoCond, @@ -1566,33 +1577,43 @@ func UpdateIssueDeadline(issue *Issue, deadlineUnix timeutil.TimeStamp, doer *Us return sess.Commit() } +// DependencyInfo represents high level information about an issue which is a dependency of another issue. +type DependencyInfo struct { + Issue `xorm:"extends"` + Repository `xorm:"extends"` +} + // Get Blocked By Dependencies, aka all issues this issue is blocked by. -func (issue *Issue) getBlockedByDependencies(e Engine) (issueDeps []*Issue, err error) { +func (issue *Issue) getBlockedByDependencies(e Engine) (issueDeps []*DependencyInfo, err error) { return issueDeps, e. - Table("issue_dependency"). - Select("issue.*"). - Join("INNER", "issue", "issue.id = issue_dependency.dependency_id"). + Table("issue"). + Join("INNER", "repository", "repository.id = issue.repo_id"). + Join("INNER", "issue_dependency", "issue_dependency.dependency_id = issue.id"). Where("issue_id = ?", issue.ID). + //sort by repo id then created date, with the issues of the same repo at the beginning of the list + OrderBy("CASE WHEN issue.repo_id = " + strconv.FormatInt(issue.RepoID, 10) + " THEN 0 ELSE issue.repo_id END, issue.created_unix DESC"). Find(&issueDeps) } // Get Blocking Dependencies, aka all issues this issue blocks. -func (issue *Issue) getBlockingDependencies(e Engine) (issueDeps []*Issue, err error) { +func (issue *Issue) getBlockingDependencies(e Engine) (issueDeps []*DependencyInfo, err error) { return issueDeps, e. - Table("issue_dependency"). - Select("issue.*"). - Join("INNER", "issue", "issue.id = issue_dependency.issue_id"). + Table("issue"). + Join("INNER", "repository", "repository.id = issue.repo_id"). + Join("INNER", "issue_dependency", "issue_dependency.issue_id = issue.id"). Where("dependency_id = ?", issue.ID). + //sort by repo id then created date, with the issues of the same repo at the beginning of the list + OrderBy("CASE WHEN issue.repo_id = " + strconv.FormatInt(issue.RepoID, 10) + " THEN 0 ELSE issue.repo_id END, issue.created_unix DESC"). Find(&issueDeps) } // BlockedByDependencies finds all Dependencies an issue is blocked by -func (issue *Issue) BlockedByDependencies() ([]*Issue, error) { +func (issue *Issue) BlockedByDependencies() ([]*DependencyInfo, error) { return issue.getBlockedByDependencies(x) } // BlockingDependencies returns all blocking dependencies, aka all other issues a given issue blocks -func (issue *Issue) BlockingDependencies() ([]*Issue, error) { +func (issue *Issue) BlockingDependencies() ([]*DependencyInfo, error) { return issue.getBlockingDependencies(x) } diff --git a/models/issue_label.go b/models/issue_label.go index 1fc873cfd4..497756af5b 100644 --- a/models/issue_label.go +++ b/models/issue_label.go @@ -250,6 +250,19 @@ func GetLabelIDsInRepoByNames(repoID int64, labelNames []string) ([]int64, error Find(&labelIDs) } +// GetLabelIDsInReposByNames returns a list of labelIDs by names in one of the given +// repositories. +// it silently ignores label names that do not belong to the repository. +func GetLabelIDsInReposByNames(repoIDs []int64, labelNames []string) ([]int64, error) { + labelIDs := make([]int64, 0, len(labelNames)) + return labelIDs, x.Table("label"). + In("repo_id", repoIDs). + In("name", labelNames). + Asc("name"). + Cols("id"). + Find(&labelIDs) +} + // GetLabelInRepoByID returns a label by ID in given repository. func GetLabelInRepoByID(repoID, labelID int64) (*Label, error) { return getLabelInRepoByID(x, repoID, labelID) diff --git a/models/issue_test.go b/models/issue_test.go index 592a0e3d77..d16c1de25d 100644 --- a/models/issue_test.go +++ b/models/issue_test.go @@ -264,24 +264,23 @@ func TestIssue_loadTotalTimes(t *testing.T) { func TestIssue_SearchIssueIDsByKeyword(t *testing.T) { assert.NoError(t, PrepareTestDatabase()) - - total, ids, err := SearchIssueIDsByKeyword("issue2", 1, 10, 0) + total, ids, err := SearchIssueIDsByKeyword("issue2", []int64{1}, 10, 0) assert.NoError(t, err) assert.EqualValues(t, 1, total) assert.EqualValues(t, []int64{2}, ids) - total, ids, err = SearchIssueIDsByKeyword("first", 1, 10, 0) + total, ids, err = SearchIssueIDsByKeyword("first", []int64{1}, 10, 0) assert.NoError(t, err) assert.EqualValues(t, 1, total) assert.EqualValues(t, []int64{1}, ids) - total, ids, err = SearchIssueIDsByKeyword("for", 1, 10, 0) + total, ids, err = SearchIssueIDsByKeyword("for", []int64{1}, 10, 0) assert.NoError(t, err) assert.EqualValues(t, 4, total) assert.EqualValues(t, []int64{1, 2, 3, 5}, ids) // issue1's comment id 2 - total, ids, err = SearchIssueIDsByKeyword("good", 1, 10, 0) + total, ids, err = SearchIssueIDsByKeyword("good", []int64{1}, 10, 0) assert.NoError(t, err) assert.EqualValues(t, 1, total) assert.EqualValues(t, []int64{1}, ids) diff --git a/models/pull_list.go b/models/pull_list.go index 4ec6fdde3b..2c2f53f4a1 100644 --- a/models/pull_list.go +++ b/models/pull_list.go @@ -87,7 +87,7 @@ func PullRequests(baseRepoID int64, opts *PullRequestsOptions) ([]*PullRequest, prs := make([]*PullRequest, 0, ItemsPerPage) findSession, err := listPullRequestStatement(baseRepoID, opts) - sortIssuesSession(findSession, opts.SortType) + sortIssuesSession(findSession, opts.SortType, 0) if err != nil { log.Error("listPullRequestStatement: %v", err) return nil, maxResults, err diff --git a/modules/indexer/issues/bleve.go b/modules/indexer/issues/bleve.go index 36279198b8..24443e54a3 100644 --- a/modules/indexer/issues/bleve.go +++ b/modules/indexer/issues/bleve.go @@ -218,9 +218,18 @@ func (b *BleveIndexer) Delete(ids ...int64) error { // Search searches for issues by given conditions. // Returns the matching issue IDs -func (b *BleveIndexer) Search(keyword string, repoID int64, limit, start int) (*SearchResult, error) { +func (b *BleveIndexer) Search(keyword string, repoIDs []int64, limit, start int) (*SearchResult, error) { + var repoQueriesP []*query.NumericRangeQuery + for _, repoID := range repoIDs { + repoQueriesP = append(repoQueriesP, numericEqualityQuery(repoID, "RepoID")) + } + repoQueries := make([]query.Query, len(repoQueriesP)) + for i, v := range repoQueriesP { + repoQueries[i] = query.Query(v) + } + indexerQuery := bleve.NewConjunctionQuery( - numericEqualityQuery(repoID, "RepoID"), + bleve.NewDisjunctionQuery(repoQueries...), bleve.NewDisjunctionQuery( newMatchPhraseQuery(keyword, "Title", issueIndexerAnalyzer), newMatchPhraseQuery(keyword, "Content", issueIndexerAnalyzer), @@ -242,8 +251,7 @@ func (b *BleveIndexer) Search(keyword string, repoID int64, limit, start int) (* return nil, err } ret.Hits = append(ret.Hits, Match{ - ID: id, - RepoID: repoID, + ID: id, }) } return &ret, nil diff --git a/modules/indexer/issues/bleve_test.go b/modules/indexer/issues/bleve_test.go index 8ec274566f..94d935d89d 100644 --- a/modules/indexer/issues/bleve_test.go +++ b/modules/indexer/issues/bleve_test.go @@ -76,7 +76,7 @@ func TestBleveIndexAndSearch(t *testing.T) { ) for _, kw := range keywords { - res, err := indexer.Search(kw.Keyword, 2, 10, 0) + res, err := indexer.Search(kw.Keyword, []int64{2}, 10, 0) assert.NoError(t, err) var ids = make([]int64, 0, len(res.Hits)) diff --git a/modules/indexer/issues/db.go b/modules/indexer/issues/db.go index 6e7f0c1a6e..7d4e389471 100644 --- a/modules/indexer/issues/db.go +++ b/modules/indexer/issues/db.go @@ -26,8 +26,8 @@ func (db *DBIndexer) Delete(ids ...int64) error { } // Search dummy function -func (db *DBIndexer) Search(kw string, repoID int64, limit, start int) (*SearchResult, error) { - total, ids, err := models.SearchIssueIDsByKeyword(kw, repoID, limit, start) +func (db *DBIndexer) Search(kw string, repoIDs []int64, limit, start int) (*SearchResult, error) { + total, ids, err := models.SearchIssueIDsByKeyword(kw, repoIDs, limit, start) if err != nil { return nil, err } @@ -37,8 +37,7 @@ func (db *DBIndexer) Search(kw string, repoID int64, limit, start int) (*SearchR } for _, id := range ids { result.Hits = append(result.Hits, Match{ - ID: id, - RepoID: repoID, + ID: id, }) } return &result, nil diff --git a/modules/indexer/issues/indexer.go b/modules/indexer/issues/indexer.go index 4f410daf4c..76da46d759 100644 --- a/modules/indexer/issues/indexer.go +++ b/modules/indexer/issues/indexer.go @@ -28,9 +28,8 @@ type IndexerData struct { // Match represents on search result type Match struct { - ID int64 `json:"id"` - RepoID int64 `json:"repo_id"` - Score float64 `json:"score"` + ID int64 `json:"id"` + Score float64 `json:"score"` } // SearchResult represents search results @@ -44,7 +43,7 @@ type Indexer interface { Init() (bool, error) Index(issue []*IndexerData) error Delete(ids ...int64) error - Search(kw string, repoID int64, limit, start int) (*SearchResult, error) + Search(kw string, repoIDs []int64, limit, start int) (*SearchResult, error) } type indexerHolder struct { @@ -262,9 +261,9 @@ func DeleteRepoIssueIndexer(repo *models.Repository) { } // SearchIssuesByKeyword search issue ids by keywords and repo id -func SearchIssuesByKeyword(repoID int64, keyword string) ([]int64, error) { +func SearchIssuesByKeyword(repoIDs []int64, keyword string) ([]int64, error) { var issueIDs []int64 - res, err := holder.get().Search(keyword, repoID, 1000, 0) + res, err := holder.get().Search(keyword, repoIDs, 1000, 0) if err != nil { return nil, err } diff --git a/modules/indexer/issues/indexer_test.go b/modules/indexer/issues/indexer_test.go index 212c2edfbe..a45fede9ac 100644 --- a/modules/indexer/issues/indexer_test.go +++ b/modules/indexer/issues/indexer_test.go @@ -30,19 +30,19 @@ func TestBleveSearchIssues(t *testing.T) { time.Sleep(5 * time.Second) - ids, err := SearchIssuesByKeyword(1, "issue2") + ids, err := SearchIssuesByKeyword([]int64{1}, "issue2") assert.NoError(t, err) assert.EqualValues(t, []int64{2}, ids) - ids, err = SearchIssuesByKeyword(1, "first") + ids, err = SearchIssuesByKeyword([]int64{1}, "first") assert.NoError(t, err) assert.EqualValues(t, []int64{1}, ids) - ids, err = SearchIssuesByKeyword(1, "for") + ids, err = SearchIssuesByKeyword([]int64{1}, "for") assert.NoError(t, err) assert.EqualValues(t, []int64{1, 2, 3, 5}, ids) - ids, err = SearchIssuesByKeyword(1, "good") + ids, err = SearchIssuesByKeyword([]int64{1}, "good") assert.NoError(t, err) assert.EqualValues(t, []int64{1}, ids) } @@ -53,19 +53,19 @@ func TestDBSearchIssues(t *testing.T) { setting.Indexer.IssueType = "db" InitIssueIndexer(true) - ids, err := SearchIssuesByKeyword(1, "issue2") + ids, err := SearchIssuesByKeyword([]int64{1}, "issue2") assert.NoError(t, err) assert.EqualValues(t, []int64{2}, ids) - ids, err = SearchIssuesByKeyword(1, "first") + ids, err = SearchIssuesByKeyword([]int64{1}, "first") assert.NoError(t, err) assert.EqualValues(t, []int64{1}, ids) - ids, err = SearchIssuesByKeyword(1, "for") + ids, err = SearchIssuesByKeyword([]int64{1}, "for") assert.NoError(t, err) assert.EqualValues(t, []int64{1, 2, 3, 5}, ids) - ids, err = SearchIssuesByKeyword(1, "good") + ids, err = SearchIssuesByKeyword([]int64{1}, "good") assert.NoError(t, err) assert.EqualValues(t, []int64{1}, ids) } diff --git a/modules/markup/html_test.go b/modules/markup/html_test.go index 91ef320b40..07747e97e1 100644 --- a/modules/markup/html_test.go +++ b/modules/markup/html_test.go @@ -323,6 +323,6 @@ func TestRender_ShortLinks(t *testing.T) { `

`) test( "

[[foobar]]

", - `

[[foobar]]

`, - `

[[foobar]]

`) + `

[[foobar]]

`, + `

[[foobar]]

`) } diff --git a/modules/markup/markdown/markdown.go b/modules/markup/markdown/markdown.go index d9fc768891..ff78d7ea3a 100644 --- a/modules/markup/markdown/markdown.go +++ b/modules/markup/markdown/markdown.go @@ -7,13 +7,14 @@ package markdown import ( "bytes" + "io" "strings" "code.gitea.io/gitea/modules/markup" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/util" - "github.com/russross/blackfriday" + "github.com/russross/blackfriday/v2" ) // Renderer is a extended version of underlying render object. @@ -25,134 +26,138 @@ type Renderer struct { var byteMailto = []byte("mailto:") -// Link defines how formal links should be processed to produce corresponding HTML elements. -func (r *Renderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { - // special case: this is not a link, a hash link or a mailto:, so it's a - // relative URL - if len(link) > 0 && !markup.IsLink(link) && - link[0] != '#' && !bytes.HasPrefix(link, byteMailto) { - lnk := string(link) +var htmlEscaper = [256][]byte{ + '&': []byte("&"), + '<': []byte("<"), + '>': []byte(">"), + '"': []byte("""), +} + +func escapeHTML(w io.Writer, s []byte) { + var start, end int + for end < len(s) { + escSeq := htmlEscaper[s[end]] + if escSeq != nil { + _, _ = w.Write(s[start:end]) + _, _ = w.Write(escSeq) + start = end + 1 + } + end++ + } + if start < len(s) && end <= len(s) { + _, _ = w.Write(s[start:end]) + } +} + +// RenderNode is a default renderer of a single node of a syntax tree. For +// block nodes it will be called twice: first time with entering=true, second +// time with entering=false, so that it could know when it's working on an open +// tag and when on close. It writes the result to w. +// +// The return value is a way to tell the calling walker to adjust its walk +// pattern: e.g. it can terminate the traversal by returning Terminate. Or it +// can ask the walker to skip a subtree of this node by returning SkipChildren. +// The typical behavior is to return GoToNext, which asks for the usual +// traversal to the next node. +func (r *Renderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + switch node.Type { + case blackfriday.Image: + prefix := r.URLPrefix if r.IsWiki { - lnk = util.URLJoin("wiki", lnk) + prefix = util.URLJoin(prefix, "wiki", "raw") } - mLink := util.URLJoin(r.URLPrefix, lnk) - link = []byte(mLink) - } - - if len(content) > 10 && string(content[0:9]) == " 0 { - out.WriteByte('\n') - } - - if flags&blackfriday.LIST_TYPE_DEFINITION != 0 { - out.WriteString("
") - } else if flags&blackfriday.LIST_TYPE_ORDERED != 0 { - out.WriteString("
    ") - } else { - out.WriteString("
      ") - } - if !text() { - out.Truncate(marker) - return - } - if flags&blackfriday.LIST_TYPE_DEFINITION != 0 { - out.WriteString("
\n") - } else if flags&blackfriday.LIST_TYPE_ORDERED != 0 { - out.WriteString("\n") - } else { - out.WriteString("\n") - } -} - -// ListItem defines how list items should be processed to produce corresponding HTML elements. -func (r *Renderer) ListItem(out *bytes.Buffer, text []byte, flags int) { - // Detect procedures to draw checkboxes. - prefix := "" - if bytes.HasPrefix(text, []byte("

")) { - prefix = "

" - } - switch { - case bytes.HasPrefix(text, []byte(prefix+"[ ] ")): - text = append([]byte(``), text[3+len(prefix):]...) - if prefix != "" { - text = bytes.Replace(text, []byte(prefix), []byte{}, 1) + prefix = strings.Replace(prefix, "/src/", "/media/", 1) + link := node.LinkData.Destination + if len(link) > 0 && !markup.IsLink(link) { + lnk := string(link) + lnk = util.URLJoin(prefix, lnk) + lnk = strings.Replace(lnk, " ", "+", -1) + link = []byte(lnk) } - case bytes.HasPrefix(text, []byte(prefix+"[x] ")): - text = append([]byte(``), text[3+len(prefix):]...) - if prefix != "" { - text = bytes.Replace(text, []byte(prefix), []byte{}, 1) + node.LinkData.Destination = link + // Render link around image only if parent is not link already + if node.Parent != nil && node.Parent.Type != blackfriday.Link { + if entering { + _, _ = w.Write([]byte(``)) + return r.Renderer.RenderNode(w, node, entering) + } + s := r.Renderer.RenderNode(w, node, entering) + _, _ = w.Write([]byte(``)) + return s + } + return r.Renderer.RenderNode(w, node, entering) + case blackfriday.Link: + // special case: this is not a link, a hash link or a mailto:, so it's a + // relative URL + link := node.LinkData.Destination + if len(link) > 0 && !markup.IsLink(link) && + link[0] != '#' && !bytes.HasPrefix(link, byteMailto) && + node.LinkData.Footnote == nil { + lnk := string(link) + if r.IsWiki { + lnk = util.URLJoin("wiki", lnk) + } + link = []byte(util.URLJoin(r.URLPrefix, lnk)) + } + node.LinkData.Destination = link + return r.Renderer.RenderNode(w, node, entering) + case blackfriday.Text: + isListItem := false + for n := node.Parent; n != nil; n = n.Parent { + if n.Type == blackfriday.Item { + isListItem = true + break + } + } + if isListItem { + text := node.Literal + switch { + case bytes.HasPrefix(text, []byte("[ ] ")): + _, _ = w.Write([]byte(``)) + text = text[3:] + case bytes.HasPrefix(text, []byte("[x] ")): + _, _ = w.Write([]byte(``)) + text = text[3:] + } + node.Literal = text } } - r.Renderer.ListItem(out, text, flags) -} - -// Image defines how images should be processed to produce corresponding HTML elements. -func (r *Renderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { - prefix := r.URLPrefix - if r.IsWiki { - prefix = util.URLJoin(prefix, "wiki", "raw") - } - prefix = strings.Replace(prefix, "/src/", "/media/", 1) - if len(link) > 0 && !markup.IsLink(link) { - lnk := string(link) - lnk = util.URLJoin(prefix, lnk) - lnk = strings.Replace(lnk, " ", "+", -1) - link = []byte(lnk) - } - - // Put a link around it pointing to itself by default - out.WriteString(``) - r.Renderer.Image(out, link, title, alt) - out.WriteString("") + return r.Renderer.RenderNode(w, node, entering) } const ( blackfridayExtensions = 0 | - blackfriday.EXTENSION_NO_INTRA_EMPHASIS | - blackfriday.EXTENSION_TABLES | - blackfriday.EXTENSION_FENCED_CODE | - blackfriday.EXTENSION_STRIKETHROUGH | - blackfriday.EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK | - blackfriday.EXTENSION_DEFINITION_LISTS | - blackfriday.EXTENSION_FOOTNOTES | - blackfriday.EXTENSION_HEADER_IDS | - blackfriday.EXTENSION_AUTO_HEADER_IDS + blackfriday.NoIntraEmphasis | + blackfriday.Tables | + blackfriday.FencedCode | + blackfriday.Strikethrough | + blackfriday.NoEmptyLineBeforeBlock | + blackfriday.DefinitionLists | + blackfriday.Footnotes | + blackfriday.HeadingIDs | + blackfriday.AutoHeadingIDs blackfridayHTMLFlags = 0 | - blackfriday.HTML_SKIP_STYLE | - blackfriday.HTML_OMIT_CONTENTS | - blackfriday.HTML_USE_SMARTYPANTS + blackfriday.Smartypants ) // RenderRaw renders Markdown to HTML without handling special links. func RenderRaw(body []byte, urlPrefix string, wikiMarkdown bool) []byte { renderer := &Renderer{ - Renderer: blackfriday.HtmlRenderer(blackfridayHTMLFlags, "", ""), + Renderer: blackfriday.NewHTMLRenderer(blackfriday.HTMLRendererParameters{ + Flags: blackfridayHTMLFlags, + }), URLPrefix: urlPrefix, IsWiki: wikiMarkdown, } exts := blackfridayExtensions if setting.Markdown.EnableHardLineBreak { - exts |= blackfriday.EXTENSION_HARD_LINE_BREAK + exts |= blackfriday.HardLineBreak } - body = blackfriday.Markdown(body, renderer, exts) + body = blackfriday.Run(body, blackfriday.WithRenderer(renderer), blackfriday.WithExtensions(exts)) return markup.SanitizeBytes(body) } diff --git a/modules/markup/markdown/markdown_test.go b/modules/markup/markdown/markdown_test.go index 669b49367e..b29f870ce5 100644 --- a/modules/markup/markdown/markdown_test.go +++ b/modules/markup/markdown/markdown_test.go @@ -166,13 +166,13 @@ func testAnswers(baseURLContent, baseURLImages string) []string {

Footnotes

Here is a simple footnote,1 and here is a longer one.2

+

    -
  1. This is the first footnote. -
  2. +
  3. This is the first footnote.
  4. Here is one with multiple paragraphs and code.

    @@ -180,9 +180,9 @@ func testAnswers(baseURLContent, baseURLImages string) []string {

    { my code }

    -

    Add as many paragraphs as you like.

    -
  5. +

    Add as many paragraphs as you like.

+
`, } diff --git a/modules/markup/mdstripper/mdstripper.go b/modules/markup/mdstripper/mdstripper.go index 7a901b17a9..d248944b68 100644 --- a/modules/markup/mdstripper/mdstripper.go +++ b/modules/markup/mdstripper/mdstripper.go @@ -6,43 +6,39 @@ package mdstripper import ( "bytes" + "io" - "github.com/russross/blackfriday" + "github.com/russross/blackfriday/v2" ) // MarkdownStripper extends blackfriday.Renderer type MarkdownStripper struct { - blackfriday.Renderer links []string coallesce bool + empty bool } const ( blackfridayExtensions = 0 | - blackfriday.EXTENSION_NO_INTRA_EMPHASIS | - blackfriday.EXTENSION_TABLES | - blackfriday.EXTENSION_FENCED_CODE | - blackfriday.EXTENSION_STRIKETHROUGH | - blackfriday.EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK | - blackfriday.EXTENSION_DEFINITION_LISTS | - blackfriday.EXTENSION_FOOTNOTES | - blackfriday.EXTENSION_HEADER_IDS | - blackfriday.EXTENSION_AUTO_HEADER_IDS | + blackfriday.NoIntraEmphasis | + blackfriday.Tables | + blackfriday.FencedCode | + blackfriday.Strikethrough | + blackfriday.NoEmptyLineBeforeBlock | + blackfriday.DefinitionLists | + blackfriday.Footnotes | + blackfriday.HeadingIDs | + blackfriday.AutoHeadingIDs | // Not included in modules/markup/markdown/markdown.go; // required here to process inline links - blackfriday.EXTENSION_AUTOLINK + blackfriday.Autolink ) -//revive:disable:var-naming Implementing the Rendering interface requires breaking some linting rules - // StripMarkdown parses markdown content by removing all markup and code blocks // in order to extract links and other references func StripMarkdown(rawBytes []byte) (string, []string) { - stripper := &MarkdownStripper{ - links: make([]string, 0, 10), - } - body := blackfriday.Markdown(rawBytes, stripper, blackfridayExtensions) - return string(body), stripper.GetLinks() + buf, links := StripMarkdownBytes(rawBytes) + return string(buf), links } // StripMarkdownBytes parses markdown content by removing all markup and code blocks @@ -50,205 +46,67 @@ func StripMarkdown(rawBytes []byte) (string, []string) { func StripMarkdownBytes(rawBytes []byte) ([]byte, []string) { stripper := &MarkdownStripper{ links: make([]string, 0, 10), + empty: true, } - body := blackfriday.Markdown(rawBytes, stripper, blackfridayExtensions) - return body, stripper.GetLinks() + + parser := blackfriday.New(blackfriday.WithRenderer(stripper), blackfriday.WithExtensions(blackfridayExtensions)) + ast := parser.Parse(rawBytes) + var buf bytes.Buffer + stripper.RenderHeader(&buf, ast) + ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + return stripper.RenderNode(&buf, node, entering) + }) + stripper.RenderFooter(&buf, ast) + return buf.Bytes(), stripper.GetLinks() } -// block-level callbacks - -// BlockCode dummy function to proceed with rendering -func (r *MarkdownStripper) BlockCode(out *bytes.Buffer, text []byte, infoString string) { - // Not rendered +// RenderNode is the main rendering method. It will be called once for +// every leaf node and twice for every non-leaf node (first with +// entering=true, then with entering=false). The method should write its +// rendition of the node to the supplied writer w. +func (r *MarkdownStripper) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + if !entering { + return blackfriday.GoToNext + } + switch node.Type { + case blackfriday.Text: + r.processString(w, node.Literal, node.Parent == nil) + return blackfriday.GoToNext + case blackfriday.Link: + r.processLink(w, node.LinkData.Destination) + r.coallesce = false + return blackfriday.SkipChildren + } r.coallesce = false + return blackfriday.GoToNext } -// BlockQuote dummy function to proceed with rendering -func (r *MarkdownStripper) BlockQuote(out *bytes.Buffer, text []byte) { - // FIXME: perhaps it's better to leave out block quote for this? - r.processString(out, text, false) +// RenderHeader is a method that allows the renderer to produce some +// content preceding the main body of the output document. +func (r *MarkdownStripper) RenderHeader(w io.Writer, ast *blackfriday.Node) { } -// BlockHtml dummy function to proceed with rendering -func (r *MarkdownStripper) BlockHtml(out *bytes.Buffer, text []byte) { //nolint - // Not rendered - r.coallesce = false +// RenderFooter is a symmetric counterpart of RenderHeader. +func (r *MarkdownStripper) RenderFooter(w io.Writer, ast *blackfriday.Node) { } -// Header dummy function to proceed with rendering -func (r *MarkdownStripper) Header(out *bytes.Buffer, text func() bool, level int, id string) { - text() - r.coallesce = false -} - -// HRule dummy function to proceed with rendering -func (r *MarkdownStripper) HRule(out *bytes.Buffer) { - // Not rendered - r.coallesce = false -} - -// List dummy function to proceed with rendering -func (r *MarkdownStripper) List(out *bytes.Buffer, text func() bool, flags int) { - text() - r.coallesce = false -} - -// ListItem dummy function to proceed with rendering -func (r *MarkdownStripper) ListItem(out *bytes.Buffer, text []byte, flags int) { - r.processString(out, text, false) -} - -// Paragraph dummy function to proceed with rendering -func (r *MarkdownStripper) Paragraph(out *bytes.Buffer, text func() bool) { - text() - r.coallesce = false -} - -// Table dummy function to proceed with rendering -func (r *MarkdownStripper) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { - r.processString(out, header, false) - r.processString(out, body, false) -} - -// TableRow dummy function to proceed with rendering -func (r *MarkdownStripper) TableRow(out *bytes.Buffer, text []byte) { - r.processString(out, text, false) -} - -// TableHeaderCell dummy function to proceed with rendering -func (r *MarkdownStripper) TableHeaderCell(out *bytes.Buffer, text []byte, flags int) { - r.processString(out, text, false) -} - -// TableCell dummy function to proceed with rendering -func (r *MarkdownStripper) TableCell(out *bytes.Buffer, text []byte, flags int) { - r.processString(out, text, false) -} - -// Footnotes dummy function to proceed with rendering -func (r *MarkdownStripper) Footnotes(out *bytes.Buffer, text func() bool) { - text() -} - -// FootnoteItem dummy function to proceed with rendering -func (r *MarkdownStripper) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) { - r.processString(out, text, false) -} - -// TitleBlock dummy function to proceed with rendering -func (r *MarkdownStripper) TitleBlock(out *bytes.Buffer, text []byte) { - r.processString(out, text, false) -} - -// Span-level callbacks - -// AutoLink dummy function to proceed with rendering -func (r *MarkdownStripper) AutoLink(out *bytes.Buffer, link []byte, kind int) { - r.processLink(out, link, []byte{}) -} - -// CodeSpan dummy function to proceed with rendering -func (r *MarkdownStripper) CodeSpan(out *bytes.Buffer, text []byte) { - // Not rendered - r.coallesce = false -} - -// DoubleEmphasis dummy function to proceed with rendering -func (r *MarkdownStripper) DoubleEmphasis(out *bytes.Buffer, text []byte) { - r.processString(out, text, false) -} - -// Emphasis dummy function to proceed with rendering -func (r *MarkdownStripper) Emphasis(out *bytes.Buffer, text []byte) { - r.processString(out, text, false) -} - -// Image dummy function to proceed with rendering -func (r *MarkdownStripper) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { - // Not rendered - r.coallesce = false -} - -// LineBreak dummy function to proceed with rendering -func (r *MarkdownStripper) LineBreak(out *bytes.Buffer) { - // Not rendered - r.coallesce = false -} - -// Link dummy function to proceed with rendering -func (r *MarkdownStripper) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { - r.processLink(out, link, content) -} - -// RawHtmlTag dummy function to proceed with rendering -func (r *MarkdownStripper) RawHtmlTag(out *bytes.Buffer, tag []byte) { //nolint - // Not rendered - r.coallesce = false -} - -// TripleEmphasis dummy function to proceed with rendering -func (r *MarkdownStripper) TripleEmphasis(out *bytes.Buffer, text []byte) { - r.processString(out, text, false) -} - -// StrikeThrough dummy function to proceed with rendering -func (r *MarkdownStripper) StrikeThrough(out *bytes.Buffer, text []byte) { - r.processString(out, text, false) -} - -// FootnoteRef dummy function to proceed with rendering -func (r *MarkdownStripper) FootnoteRef(out *bytes.Buffer, ref []byte, id int) { - // Not rendered - r.coallesce = false -} - -// Low-level callbacks - -// Entity dummy function to proceed with rendering -func (r *MarkdownStripper) Entity(out *bytes.Buffer, entity []byte) { - // FIXME: literal entities are not parsed; perhaps they should - r.coallesce = false -} - -// NormalText dummy function to proceed with rendering -func (r *MarkdownStripper) NormalText(out *bytes.Buffer, text []byte) { - r.processString(out, text, true) -} - -// Header and footer - -// DocumentHeader dummy function to proceed with rendering -func (r *MarkdownStripper) DocumentHeader(out *bytes.Buffer) { - r.coallesce = false -} - -// DocumentFooter dummy function to proceed with rendering -func (r *MarkdownStripper) DocumentFooter(out *bytes.Buffer) { - r.coallesce = false -} - -// GetFlags returns rendering flags -func (r *MarkdownStripper) GetFlags() int { - return 0 -} - -//revive:enable:var-naming - -func doubleSpace(out *bytes.Buffer) { - if out.Len() > 0 { - out.WriteByte('\n') +func (r *MarkdownStripper) doubleSpace(w io.Writer) { + if !r.empty { + _, _ = w.Write([]byte{'\n'}) } } -func (r *MarkdownStripper) processString(out *bytes.Buffer, text []byte, coallesce bool) { +func (r *MarkdownStripper) processString(w io.Writer, text []byte, coallesce bool) { // Always break-up words if !coallesce || !r.coallesce { - doubleSpace(out) + r.doubleSpace(w) } - out.Write(text) + _, _ = w.Write(text) r.coallesce = coallesce + r.empty = false } -func (r *MarkdownStripper) processLink(out *bytes.Buffer, link []byte, content []byte) { + +func (r *MarkdownStripper) processLink(w io.Writer, link []byte) { // Links are processed out of band r.links = append(r.links, string(link)) r.coallesce = false diff --git a/modules/markup/orgmode/orgmode.go b/modules/markup/orgmode/orgmode.go index f63155201e..54188d2734 100644 --- a/modules/markup/orgmode/orgmode.go +++ b/modules/markup/orgmode/orgmode.go @@ -5,12 +5,16 @@ package markup import ( + "bytes" + "fmt" + "html" + "strings" + "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/markup" - "code.gitea.io/gitea/modules/markup/markdown" + "code.gitea.io/gitea/modules/util" - "github.com/chaseadamsio/goorgeous" - "github.com/russross/blackfriday" + "github.com/niklasfasching/go-org/org" ) func init() { @@ -32,23 +36,23 @@ func (Parser) Extensions() []string { } // Render renders orgmode rawbytes to HTML -func Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) (result []byte) { - defer func() { - if err := recover(); err != nil { - log.Error("Panic in orgmode.Render: %v Just returning the rawBytes", err) - result = rawBytes - } - }() - htmlFlags := blackfriday.HTML_USE_XHTML - htmlFlags |= blackfriday.HTML_SKIP_STYLE - htmlFlags |= blackfriday.HTML_OMIT_CONTENTS - renderer := &markdown.Renderer{ - Renderer: blackfriday.HtmlRenderer(htmlFlags, "", ""), - URLPrefix: urlPrefix, - IsWiki: isWiki, +func Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) []byte { + htmlWriter := org.NewHTMLWriter() + + renderer := &Renderer{ + HTMLWriter: htmlWriter, + URLPrefix: urlPrefix, + IsWiki: isWiki, } - result = goorgeous.Org(rawBytes, renderer) - return + + htmlWriter.ExtendingWriter = renderer + + res, err := org.New().Silent().Parse(bytes.NewReader(rawBytes), "").Write(renderer) + if err != nil { + log.Error("Panic in orgmode.Render: %v Just returning the rawBytes", err) + return rawBytes + } + return []byte(res) } // RenderString reners orgmode string to HTML string @@ -56,7 +60,63 @@ func RenderString(rawContent string, urlPrefix string, metas map[string]string, return string(Render([]byte(rawContent), urlPrefix, metas, isWiki)) } -// Render implements markup.Parser +// Render reners orgmode string to HTML string func (Parser) Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) []byte { return Render(rawBytes, urlPrefix, metas, isWiki) } + +// Renderer implements org.Writer +type Renderer struct { + *org.HTMLWriter + URLPrefix string + IsWiki bool +} + +var byteMailto = []byte("mailto:") + +// WriteRegularLink renders images, links or videos +func (r *Renderer) WriteRegularLink(l org.RegularLink) { + link := []byte(html.EscapeString(l.URL)) + if l.Protocol == "file" { + link = link[len("file:"):] + } + if len(link) > 0 && !markup.IsLink(link) && + link[0] != '#' && !bytes.HasPrefix(link, byteMailto) { + lnk := string(link) + if r.IsWiki { + lnk = util.URLJoin("wiki", lnk) + } + link = []byte(util.URLJoin(r.URLPrefix, lnk)) + } + + description := string(link) + if l.Description != nil { + description = r.nodesAsString(l.Description...) + } + switch l.Kind() { + case "image": + r.WriteString(fmt.Sprintf(`%s`, link, description, description)) + case "video": + r.WriteString(fmt.Sprintf(``, link, description, description)) + default: + r.WriteString(fmt.Sprintf(`%s`, link, description, description)) + } +} + +func (r *Renderer) emptyClone() *Renderer { + wcopy := *(r.HTMLWriter) + wcopy.Builder = strings.Builder{} + + rcopy := *r + rcopy.HTMLWriter = &wcopy + + wcopy.ExtendingWriter = &rcopy + + return &rcopy +} + +func (r *Renderer) nodesAsString(nodes ...org.Node) string { + tmp := r.emptyClone() + org.WriteNodes(tmp, nodes...) + return tmp.String() +} diff --git a/modules/markup/orgmode/orgmode_test.go b/modules/markup/orgmode/orgmode_test.go index 3846922c25..40323912b4 100644 --- a/modules/markup/orgmode/orgmode_test.go +++ b/modules/markup/orgmode/orgmode_test.go @@ -27,12 +27,12 @@ func TestRender_StandardLinks(t *testing.T) { assert.Equal(t, strings.TrimSpace(expected), strings.TrimSpace(buffer)) } - googleRendered := `

https://google.com/

` + googleRendered := "

\nhttps://google.com/\n

" test("[[https://google.com/]]", googleRendered) lnk := util.URLJoin(AppSubURL, "WikiPage") test("[[WikiPage][WikiPage]]", - `

WikiPage

`) + "

\nWikiPage\n

") } func TestRender_Images(t *testing.T) { @@ -45,10 +45,8 @@ func TestRender_Images(t *testing.T) { } url := "../../.images/src/02/train.jpg" - title := "Train" result := util.URLJoin(AppSubURL, url) - test( - "[[file:"+url+"]["+title+"]]", - `

`+title+`

`) + test("[[file:"+url+"]]", + "

\n\""+result+"\"\n

") } diff --git a/modules/setting/service.go b/modules/setting/service.go index dea4081ee8..93629100a2 100644 --- a/modules/setting/service.go +++ b/modules/setting/service.go @@ -39,6 +39,7 @@ var Service struct { EnableTimetracking bool DefaultEnableTimetracking bool DefaultEnableDependencies bool + AllowCrossRepositoryDependencies bool DefaultAllowOnlyContributorsToTrackTime bool NoReplyAddress string EnableUserHeatmap bool @@ -79,6 +80,7 @@ func newService() { Service.DefaultEnableTimetracking = sec.Key("DEFAULT_ENABLE_TIMETRACKING").MustBool(true) } Service.DefaultEnableDependencies = sec.Key("DEFAULT_ENABLE_DEPENDENCIES").MustBool(true) + Service.AllowCrossRepositoryDependencies = sec.Key("ALLOW_CROSS_REPOSITORY_DEPENDENCIES").MustBool(true) Service.DefaultAllowOnlyContributorsToTrackTime = sec.Key("DEFAULT_ALLOW_ONLY_CONTRIBUTORS_TO_TRACK_TIME").MustBool(true) Service.NoReplyAddress = sec.Key("NO_REPLY_ADDRESS").MustString("noreply.example.org") Service.EnableUserHeatmap = sec.Key("ENABLE_USER_HEATMAP").MustBool(true) diff --git a/modules/structs/issue.go b/modules/structs/issue.go index 58fd7344b4..bd39f9ea44 100644 --- a/modules/structs/issue.go +++ b/modules/structs/issue.go @@ -26,6 +26,13 @@ type PullRequestMeta struct { Merged *time.Time `json:"merged_at"` } +// RepositoryMeta basic repository information +type RepositoryMeta struct { + ID int64 `json:"id"` + Name string `json:"name"` + FullName string `json:"full_name"` +} + // Issue represents an issue in a repository // swagger:model type Issue struct { @@ -57,6 +64,7 @@ type Issue struct { Deadline *time.Time `json:"due_date"` PullRequest *PullRequestMeta `json:"pull_request"` + Repo *RepositoryMeta `json:"repository"` } // ListIssueOption list issue options diff --git a/options/locale/locale_ja-JP.ini b/options/locale/locale_ja-JP.ini index f903f543e1..3e25b16eee 100644 --- a/options/locale/locale_ja-JP.ini +++ b/options/locale/locale_ja-JP.ini @@ -259,6 +259,7 @@ openid_signin_desc=あなたのOpenID URIを入力してください。 例: htt disable_forgot_password_mail=アカウント回復機能は無効になっています。 サイト管理者にお問い合わせください。 email_domain_blacklisted=あなたのメールアドレスでは登録することはできません。 authorize_application=アプリケーションを許可 +authorize_redirect_notice=このアプリケーションを許可すると %s にリダイレクトします。 authorize_application_created_by=このアプリケーションは %s が作成しました。 authorize_application_description=アクセスを許可すると、このアプリケーションは、プライベート リポジトリや組織を含むあなたのすべてのアカウント情報に対して、アクセスと書き込みができるようになります。 authorize_title=%s"にあなたのアカウントへのアクセスを許可しますか? @@ -701,6 +702,7 @@ editor.preview_changes=変更をプレビュー editor.cannot_edit_lfs_files=LFSのファイルはWebインターフェースで編集できません。 editor.cannot_edit_non_text_files=バイナリファイルはWebインターフェースで編集できません。 editor.edit_this_file=ファイルを編集 +editor.this_file_locked=ファイルはロックされています editor.must_be_on_a_branch=このファイルを変更したり変更の提案をするには、ブランチ上にいる必要があります。 editor.fork_before_edit=このファイルを変更したり変更の提案をするには、リポジトリをフォークする必要があります。 editor.delete_this_file=ファイルを削除 @@ -800,6 +802,7 @@ issues.delete_branch_at=`がブランチ %[1]s を削除 %[2]s` issues.open_tab=%d件 オープン中 issues.close_tab=%d件 クローズ済 issues.filter_label=ラベル +issues.filter_label_exclude=`ラベルで除外するには alt + click/enter` issues.filter_label_no_select=すべてのラベル issues.filter_milestone=マイルストーン issues.filter_milestone_no_select=すべてのマイルストーン @@ -974,6 +977,7 @@ issues.review.review=レビュー issues.review.reviewers=レビューア issues.review.show_outdated=古い内容を表示 issues.review.hide_outdated=古い内容を隠す +issues.assignee.error=予期しないエラーにより、一部の担当者を追加できませんでした。 pulls.desc=プルリクエストとコードレビューの有効化。 pulls.new=新しいプルリクエスト @@ -1374,6 +1378,9 @@ settings.unarchive.text=リポジトリのアーカイブを解除すると、 settings.unarchive.success=リポジトリのアーカイブを解除しました。 settings.unarchive.error=リポジトリのアーカイブ解除でエラーが発生しました。 詳細はログを確認してください。 settings.update_avatar_success=リポジトリのアバターを更新しました。 +settings.lfs=LFS +settings.lfs_delete=LFSファイル(OID %s)の削除 +settings.lfs_delete_warning=LFSファイルを削除すると、チェックアウトのときに 'object does not exist' エラーが発生するかもしれません。 よろしいですか? diff.browse_source=ソースを参照 diff.parent=親 diff --git a/public/css/index.css b/public/css/index.css index dca2d6f0b6..f7eb02b296 100644 --- a/public/css/index.css +++ b/public/css/index.css @@ -78,6 +78,7 @@ a{cursor:pointer} .ui.form .ui.button{font-weight:400} .ui.floating.label{z-index:10} .ui.transparent.label{background-color:transparent} +.ui.nopadding{padding:0} .ui.menu,.ui.segment,.ui.vertical.menu{box-shadow:none} .ui .menu:not(.vertical) .item>.button.compact{padding:.58928571em 1.125em} .ui .menu:not(.vertical) .item>.button.small{font-size:.92857143rem} @@ -109,6 +110,8 @@ a{cursor:pointer} .ui .text.truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap;display:inline-block} .ui .text.thin{font-weight:400} .ui .text.middle{vertical-align:middle} +.ui .text.nopadding{padding:0} +.ui .text.nomargin{margin:0} .ui .message{text-align:center} .ui.bottom.attached.message{font-weight:700;text-align:left;color:#000} .ui.bottom.attached.message .pull-right{color:#000} diff --git a/public/js/index.js b/public/js/index.js index e76e993a1d..bfcf36f528 100644 --- a/public/js/index.js +++ b/public/js/index.js @@ -3254,10 +3254,16 @@ function deleteDependencyModal(id, type) { function initIssueList() { const repolink = $('#repolink').val(); + const repoId = $('#repoId').val(); + const crossRepoSearch = $('#crossRepoSearch').val(); + let issueSearchUrl = suburl + '/api/v1/repos/' + repolink + '/issues?q={query}'; + if (crossRepoSearch === 'true') { + issueSearchUrl = suburl + '/api/v1/repos/issues/search?q={query}&priority_repo_id=' + repoId; + } $('#new-dependency-drop-list') .dropdown({ apiSettings: { - url: suburl + '/api/v1/repos/' + repolink + '/issues?q={query}', + url: issueSearchUrl, onResponse: function(response) { const filteredResponse = {'success': true, 'results': []}; const currIssueId = $('#new-dependency-drop-list').data('issue-id'); @@ -3268,7 +3274,8 @@ function initIssueList() { return; } filteredResponse.results.push({ - 'name' : '#' + issue.number + ' ' + htmlEncode(issue.title), + 'name' : '#' + issue.number + ' ' + htmlEncode(issue.title) + + '
' + htmlEncode(issue.repository.full_name) + '
', 'value' : issue.id }); }); diff --git a/public/less/_base.less b/public/less/_base.less index 7fcfaf82ea..8bf49b1ef9 100644 --- a/public/less/_base.less +++ b/public/less/_base.less @@ -321,6 +321,10 @@ code, background-color: transparent; } + &.nopadding { + padding: 0; + } + &.menu, &.vertical.menu, &.segment { @@ -453,6 +457,14 @@ code, &.middle { vertical-align: middle; } + + &.nopadding { + padding: 0; + } + + &.nomargin { + margin: 0; + } } .message { diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go index 1ef99fdc95..7238b4aad6 100644 --- a/routers/api/v1/api.go +++ b/routers/api/v1/api.go @@ -596,6 +596,8 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("/search", repo.Search) }) + m.Get("/repos/issues/search", repo.SearchIssues) + m.Combo("/repositories/:id", reqToken()).Get(repo.GetByID) m.Group("/repos", func() { diff --git a/routers/api/v1/repo/issue.go b/routers/api/v1/repo/issue.go index 6fcb2abfb6..a349aa8c1d 100644 --- a/routers/api/v1/repo/issue.go +++ b/routers/api/v1/repo/issue.go @@ -14,6 +14,7 @@ import ( "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/context" issue_indexer "code.gitea.io/gitea/modules/indexer/issues" + "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/setting" api "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/timeutil" @@ -22,6 +23,137 @@ import ( milestone_service "code.gitea.io/gitea/services/milestone" ) +// SearchIssues searches for issues across the repositories that the user has access to +func SearchIssues(ctx *context.APIContext) { + // swagger:operation GET /repos/issues/search issue issueSearchIssues + // --- + // summary: Search for issues across the repositories that the user has access to + // produces: + // - application/json + // parameters: + // - name: state + // in: query + // description: whether issue is open or closed + // type: string + // - name: labels + // in: query + // description: comma separated list of labels. Fetch only issues that have any of this labels. Non existent labels are discarded + // type: string + // - name: page + // in: query + // description: page number of requested issues + // type: integer + // - name: q + // in: query + // description: search string + // type: string + // - name: priority_repo_id + // in: query + // description: repository to prioritize in the results + // type: integer + // format: int64 + // responses: + // "200": + // "$ref": "#/responses/IssueList" + var isClosed util.OptionalBool + switch ctx.Query("state") { + case "closed": + isClosed = util.OptionalBoolTrue + case "all": + isClosed = util.OptionalBoolNone + default: + isClosed = util.OptionalBoolFalse + } + + // find repos user can access (for issue search) + repoIDs := make([]int64, 0) + issueCount := 0 + for page := 1; ; page++ { + repos, count, err := models.SearchRepositoryByName(&models.SearchRepoOptions{ + Page: page, + PageSize: 15, + Private: true, + Keyword: "", + OwnerID: ctx.User.ID, + TopicOnly: false, + Collaborate: util.OptionalBoolNone, + UserIsAdmin: ctx.IsUserSiteAdmin(), + UserID: ctx.User.ID, + OrderBy: models.SearchOrderByRecentUpdated, + }) + if err != nil { + ctx.Error(500, "SearchRepositoryByName", err) + return + } + + if len(repos) == 0 { + break + } + log.Trace("Processing next %d repos of %d", len(repos), count) + for _, repo := range repos { + switch isClosed { + case util.OptionalBoolTrue: + issueCount += repo.NumClosedIssues + case util.OptionalBoolFalse: + issueCount += repo.NumOpenIssues + case util.OptionalBoolNone: + issueCount += repo.NumIssues + } + repoIDs = append(repoIDs, repo.ID) + } + } + + var issues []*models.Issue + + keyword := strings.Trim(ctx.Query("q"), " ") + if strings.IndexByte(keyword, 0) >= 0 { + keyword = "" + } + var issueIDs []int64 + var labelIDs []int64 + var err error + if len(keyword) > 0 && len(repoIDs) > 0 { + issueIDs, err = issue_indexer.SearchIssuesByKeyword(repoIDs, keyword) + } + + labels := ctx.Query("labels") + if splitted := strings.Split(labels, ","); labels != "" && len(splitted) > 0 { + labelIDs, err = models.GetLabelIDsInReposByNames(repoIDs, splitted) + if err != nil { + ctx.Error(500, "GetLabelIDsInRepoByNames", err) + return + } + } + + // Only fetch the issues if we either don't have a keyword or the search returned issues + // This would otherwise return all issues if no issues were found by the search. + if len(keyword) == 0 || len(issueIDs) > 0 || len(labelIDs) > 0 { + issues, err = models.Issues(&models.IssuesOptions{ + RepoIDs: repoIDs, + Page: ctx.QueryInt("page"), + PageSize: setting.UI.IssuePagingNum, + IsClosed: isClosed, + IssueIDs: issueIDs, + LabelIDs: labelIDs, + SortType: "priorityrepo", + PriorityRepoID: ctx.QueryInt64("priority_repo_id"), + }) + } + + if err != nil { + ctx.Error(500, "Issues", err) + return + } + + apiIssues := make([]*api.Issue, len(issues)) + for i := range issues { + apiIssues[i] = issues[i].APIFormat() + } + + ctx.SetLinkHeader(issueCount, setting.UI.IssuePagingNum) + ctx.JSON(200, &apiIssues) +} + // ListIssues list the issues of a repository func ListIssues(ctx *context.APIContext) { // swagger:operation GET /repos/{owner}/{repo}/issues issue issueListIssues @@ -79,7 +211,7 @@ func ListIssues(ctx *context.APIContext) { var labelIDs []int64 var err error if len(keyword) > 0 { - issueIDs, err = issue_indexer.SearchIssuesByKeyword(ctx.Repo.Repository.ID, keyword) + issueIDs, err = issue_indexer.SearchIssuesByKeyword([]int64{ctx.Repo.Repository.ID}, keyword) } if splitted := strings.Split(ctx.Query("labels"), ","); len(splitted) > 0 { diff --git a/routers/repo/issue.go b/routers/repo/issue.go index 04c718d5b9..9a691471d5 100644 --- a/routers/repo/issue.go +++ b/routers/repo/issue.go @@ -149,7 +149,7 @@ func issues(ctx *context.Context, milestoneID int64, isPullOption util.OptionalB var issueIDs []int64 if len(keyword) > 0 { - issueIDs, err = issue_indexer.SearchIssuesByKeyword(repo.ID, keyword) + issueIDs, err = issue_indexer.SearchIssuesByKeyword([]int64{repo.ID}, keyword) if err != nil { ctx.ServerError("issueIndexer.Search", err) return @@ -778,6 +778,9 @@ func ViewIssue(ctx *context.Context) { // Check if the user can use the dependencies ctx.Data["CanCreateIssueDependencies"] = ctx.Repo.CanCreateIssueDependencies(ctx.User) + // check if dependencies can be created across repositories + ctx.Data["AllowCrossRepositoryDependencies"] = setting.Service.AllowCrossRepositoryDependencies + // Render comments and and fetch participants. participants[0] = issue.Poster for _, comment = range issue.Comments { diff --git a/routers/repo/issue_dependency.go b/routers/repo/issue_dependency.go index 730271126d..6b11f0cdf1 100644 --- a/routers/repo/issue_dependency.go +++ b/routers/repo/issue_dependency.go @@ -10,6 +10,7 @@ import ( "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/context" + "code.gitea.io/gitea/modules/setting" ) // AddDependency adds new dependencies @@ -39,14 +40,14 @@ func AddDependency(ctx *context.Context) { return } - // Check if both issues are in the same repo - if issue.RepoID != dep.RepoID { + // Check if both issues are in the same repo if cross repository dependencies is not enabled + if issue.RepoID != dep.RepoID && !setting.Service.AllowCrossRepositoryDependencies { ctx.Flash.Error(ctx.Tr("repo.issues.dependency.add_error_dep_not_same_repo")) return } // Check if issue and dependency is the same - if dep.Index == issueIndex { + if dep.ID == issue.ID { ctx.Flash.Error(ctx.Tr("repo.issues.dependency.add_error_same_issue")) return } diff --git a/templates/repo/issue/view_content/sidebar.tmpl b/templates/repo/issue/view_content/sidebar.tmpl index c42d8aff7f..637d4ad04a 100644 --- a/templates/repo/issue/view_content/sidebar.tmpl +++ b/templates/repo/issue/view_content/sidebar.tmpl @@ -274,14 +274,15 @@
{{range .BlockingDependencies}} -
-
#{{.Index}}
- {{.Title}} -
+
+ #{{.Issue.Index}} + {{.Issue.Title}} +
{{.Repository.OwnerName}}/{{.Repository.Name}}
+
{{if and $.CanCreateIssueDependencies (not $.Repository.IsArchived)}} - - + + {{end}}
@@ -300,14 +301,15 @@
{{range .BlockedByDependencies}} -
-
#{{.Index}}
- {{.Title}} -
- {{if and $.CanCreateIssueDependencies (not $.IsArchived)}} - - +
+ #{{.Issue.Index}} + {{.Issue.Title}} +
{{.Repository.OwnerName}}/{{.Repository.Name}}
+
+ {{if and $.CanCreateIssueDependencies (not $.Repository.IsArchived)}} + + {{end}}
@@ -424,6 +426,8 @@
{{if and .CanCreateIssueDependencies (not .Repository.IsArchived)}} + + diff --git a/templates/swagger/v1_json.tmpl b/templates/swagger/v1_json.tmpl index 2c817c029a..15f43bf268 100644 --- a/templates/swagger/v1_json.tmpl +++ b/templates/swagger/v1_json.tmpl @@ -1111,6 +1111,56 @@ } } }, + "/repos/issues/search": { + "get": { + "produces": [ + "application/json" + ], + "tags": [ + "issue" + ], + "summary": "Search for issues across the repositories that the user has access to", + "operationId": "issueSearchIssues", + "parameters": [ + { + "type": "string", + "description": "whether issue is open or closed", + "name": "state", + "in": "query" + }, + { + "type": "string", + "description": "comma separated list of labels. Fetch only issues that have any of this labels. Non existent labels are discarded", + "name": "labels", + "in": "query" + }, + { + "type": "integer", + "description": "page number of requested issues", + "name": "page", + "in": "query" + }, + { + "type": "string", + "description": "search string", + "name": "q", + "in": "query" + }, + { + "type": "integer", + "format": "int64", + "description": "repository to prioritize in the results", + "name": "priority_repo_id", + "in": "query" + } + ], + "responses": { + "200": { + "$ref": "#/responses/IssueList" + } + } + } + }, "/repos/migrate": { "post": { "consumes": [ @@ -9358,6 +9408,9 @@ "pull_request": { "$ref": "#/definitions/PullRequestMeta" }, + "repository": { + "$ref": "#/definitions/RepositoryMeta" + }, "state": { "$ref": "#/definitions/StateType" }, @@ -10254,6 +10307,26 @@ }, "x-go-package": "code.gitea.io/gitea/modules/structs" }, + "RepositoryMeta": { + "description": "RepositoryMeta basic repository information", + "type": "object", + "properties": { + "full_name": { + "type": "string", + "x-go-name": "FullName" + }, + "id": { + "type": "integer", + "format": "int64", + "x-go-name": "ID" + }, + "name": { + "type": "string", + "x-go-name": "Name" + } + }, + "x-go-package": "code.gitea.io/gitea/modules/structs" + }, "SearchResults": { "description": "SearchResults results of a successful search", "type": "object", diff --git a/vendor/github.com/chaseadamsio/goorgeous/.gitignore b/vendor/github.com/chaseadamsio/goorgeous/.gitignore deleted file mode 100644 index 496ee2ca6a..0000000000 --- a/vendor/github.com/chaseadamsio/goorgeous/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.DS_Store \ No newline at end of file diff --git a/vendor/github.com/chaseadamsio/goorgeous/.travis.yml b/vendor/github.com/chaseadamsio/goorgeous/.travis.yml deleted file mode 100644 index 31dca02817..0000000000 --- a/vendor/github.com/chaseadamsio/goorgeous/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.7 - -before_install: - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - -script: - - go test -v -covermode=count -coverprofile=coverage.out - - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci diff --git a/vendor/github.com/chaseadamsio/goorgeous/README.org b/vendor/github.com/chaseadamsio/goorgeous/README.org deleted file mode 100644 index 37e0f2ec73..0000000000 --- a/vendor/github.com/chaseadamsio/goorgeous/README.org +++ /dev/null @@ -1,66 +0,0 @@ -#+TITLE: chaseadamsio/goorgeous - -[[https://travis-ci.org/chaseadamsio/goorgeous.svg?branch=master]] -[[https://coveralls.io/repos/github/chaseadamsio/goorgeous/badge.svg?branch=master]] - -/goorgeous is a Go Org to HTML Parser./ - -[[file:gopher_small.gif]] - -*Pronounced: Go? Org? Yes!* - -#+BEGIN_QUOTE -"Org mode is for keeping notes, maintaining TODO lists, planning projects, and authoring documents with a fast and effective plain-text system." - -- [[orgmode.org]] -#+END_QUOTE - -The purpose of this package is to come as close as possible as parsing an =*.org= document into HTML, the same way one might publish [[http://orgmode.org/worg/org-tutorials/org-publish-html-tutorial.html][with org-publish-html from Emacs]]. - -* Installation - -#+BEGIN_SRC sh - go get -u github.com/chaseadamsio/goorgeous -#+END_SRC - -* Usage - -** Org Headers - -To retrieve the headers from a =[]byte=, call =OrgHeaders= and it will return a =map[string]interface{}=: - -#+BEGIN_SRC go - input := "#+title: goorgeous\n* Some Headline\n" - out := goorgeous.OrgHeaders(input) -#+END_SRC - -#+BEGIN_SRC go - map[string]interface{}{ - "title": "goorgeous" - } -#+END_SRC - -** Org Content - -After importing =github.com/chaseadamsio/goorgeous=, you can call =Org= with a =[]byte= and it will return an =html= version of the content as a =[]byte= - -#+BEGIN_SRC go - input := "#+TITLE: goorgeous\n* Some Headline\n" - out := goorgeous.Org(input) -#+END_SRC - -=out= will be: - -#+BEGIN_SRC html -

Some Headline

/n -#+END_SRC - -* Why? - -First off, I've become an unapologetic user of Emacs & ever since finding =org-mode= I use it for anything having to do with writing content, organizing my life and keeping documentation of my days/weeks/months. - -Although I like Emacs & =emacs-lisp=, I publish all of my html sites with [[https://gohugo.io][Hugo Static Site Generator]] and wanted to be able to write my content in =org-mode= in Emacs rather than markdown. - -Hugo's implementation of templating and speed are unmatched, so the only way I knew for sure I could continue to use Hugo and write in =org-mode= seamlessly was to write a golang parser for org content and submit a PR for Hugo to use it. -* Acknowledgements -I leaned heavily on russross' [[https://github.com/russross/blackfriday][blackfriday markdown renderer]] as both an example of how to write a parser (with some updates to leverage the go we know today) and reusing the blackfriday HTML Renderer so I didn't have to write my own! diff --git a/vendor/github.com/chaseadamsio/goorgeous/goorgeous.go b/vendor/github.com/chaseadamsio/goorgeous/goorgeous.go deleted file mode 100644 index f1b2671d65..0000000000 --- a/vendor/github.com/chaseadamsio/goorgeous/goorgeous.go +++ /dev/null @@ -1,803 +0,0 @@ -package goorgeous - -import ( - "bufio" - "bytes" - "regexp" - - "github.com/russross/blackfriday" - "github.com/shurcooL/sanitized_anchor_name" -) - -type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int - -type footnotes struct { - id string - def string -} - -type parser struct { - r blackfriday.Renderer - inlineCallback [256]inlineParser - notes []footnotes -} - -// NewParser returns a new parser with the inlineCallbacks required for org content -func NewParser(renderer blackfriday.Renderer) *parser { - p := new(parser) - p.r = renderer - - p.inlineCallback['='] = generateVerbatim - p.inlineCallback['~'] = generateCode - p.inlineCallback['/'] = generateEmphasis - p.inlineCallback['_'] = generateUnderline - p.inlineCallback['*'] = generateBold - p.inlineCallback['+'] = generateStrikethrough - p.inlineCallback['['] = generateLinkOrImg - - return p -} - -// OrgCommon is the easiest way to parse a byte slice of org content and makes assumptions -// that the caller wants to use blackfriday's HTMLRenderer with XHTML -func OrgCommon(input []byte) []byte { - renderer := blackfriday.HtmlRenderer(blackfriday.HTML_USE_XHTML, "", "") - return OrgOptions(input, renderer) -} - -// Org is a convenience name for OrgOptions -func Org(input []byte, renderer blackfriday.Renderer) []byte { - return OrgOptions(input, renderer) -} - -// OrgOptions takes an org content byte slice and a renderer to use -func OrgOptions(input []byte, renderer blackfriday.Renderer) []byte { - // in the case that we need to render something in isEmpty but there isn't a new line char - input = append(input, '\n') - var output bytes.Buffer - - p := NewParser(renderer) - - scanner := bufio.NewScanner(bytes.NewReader(input)) - // used to capture code blocks - marker := "" - syntax := "" - listType := "" - inParagraph := false - inList := false - inTable := false - inFixedWidthArea := false - var tmpBlock bytes.Buffer - - for scanner.Scan() { - data := scanner.Bytes() - - if !isEmpty(data) && isComment(data) || IsKeyword(data) { - switch { - case inList: - if tmpBlock.Len() > 0 { - p.generateList(&output, tmpBlock.Bytes(), listType) - } - inList = false - listType = "" - tmpBlock.Reset() - case inTable: - if tmpBlock.Len() > 0 { - p.generateTable(&output, tmpBlock.Bytes()) - } - inTable = false - tmpBlock.Reset() - case inParagraph: - if tmpBlock.Len() > 0 { - p.generateParagraph(&output, tmpBlock.Bytes()[:len(tmpBlock.Bytes())-1]) - } - inParagraph = false - tmpBlock.Reset() - case inFixedWidthArea: - if tmpBlock.Len() > 0 { - tmpBlock.WriteString("\n") - output.Write(tmpBlock.Bytes()) - } - inFixedWidthArea = false - tmpBlock.Reset() - } - - } - - switch { - case isEmpty(data): - switch { - case inList: - if tmpBlock.Len() > 0 { - p.generateList(&output, tmpBlock.Bytes(), listType) - } - inList = false - listType = "" - tmpBlock.Reset() - case inTable: - if tmpBlock.Len() > 0 { - p.generateTable(&output, tmpBlock.Bytes()) - } - inTable = false - tmpBlock.Reset() - case inParagraph: - if tmpBlock.Len() > 0 { - p.generateParagraph(&output, tmpBlock.Bytes()[:len(tmpBlock.Bytes())-1]) - } - inParagraph = false - tmpBlock.Reset() - case inFixedWidthArea: - if tmpBlock.Len() > 0 { - tmpBlock.WriteString("\n") - output.Write(tmpBlock.Bytes()) - } - inFixedWidthArea = false - tmpBlock.Reset() - case marker != "": - tmpBlock.WriteByte('\n') - default: - continue - } - case isPropertyDrawer(data) || marker == "PROPERTIES": - if marker == "" { - marker = "PROPERTIES" - } - if bytes.Equal(data, []byte(":END:")) { - marker = "" - } - continue - case isBlock(data) || marker != "": - matches := reBlock.FindSubmatch(data) - if len(matches) > 0 { - if string(matches[1]) == "END" { - switch marker { - case "QUOTE": - var tmpBuf bytes.Buffer - p.inline(&tmpBuf, tmpBlock.Bytes()) - p.r.BlockQuote(&output, tmpBuf.Bytes()) - case "CENTER": - var tmpBuf bytes.Buffer - output.WriteString("
\n") - p.inline(&tmpBuf, tmpBlock.Bytes()) - output.Write(tmpBuf.Bytes()) - output.WriteString("
\n") - default: - tmpBlock.WriteByte('\n') - p.r.BlockCode(&output, tmpBlock.Bytes(), syntax) - } - marker = "" - tmpBlock.Reset() - continue - } - - } - if marker != "" { - if marker != "SRC" && marker != "EXAMPLE" { - var tmpBuf bytes.Buffer - tmpBuf.Write([]byte("

\n")) - p.inline(&tmpBuf, data) - tmpBuf.WriteByte('\n') - tmpBuf.Write([]byte("

\n")) - tmpBlock.Write(tmpBuf.Bytes()) - - } else { - tmpBlock.WriteByte('\n') - tmpBlock.Write(data) - } - - } else { - marker = string(matches[2]) - syntax = string(matches[3]) - } - case isFootnoteDef(data): - matches := reFootnoteDef.FindSubmatch(data) - for i := range p.notes { - if p.notes[i].id == string(matches[1]) { - p.notes[i].def = string(matches[2]) - } - } - case isTable(data): - if inTable != true { - inTable = true - } - tmpBlock.Write(data) - tmpBlock.WriteByte('\n') - case IsKeyword(data): - continue - case isComment(data): - p.generateComment(&output, data) - case isHeadline(data): - p.generateHeadline(&output, data) - case isDefinitionList(data): - if inList != true { - listType = "dl" - inList = true - } - var work bytes.Buffer - flags := blackfriday.LIST_TYPE_DEFINITION - matches := reDefinitionList.FindSubmatch(data) - flags |= blackfriday.LIST_TYPE_TERM - p.inline(&work, matches[1]) - p.r.ListItem(&tmpBlock, work.Bytes(), flags) - work.Reset() - flags &= ^blackfriday.LIST_TYPE_TERM - p.inline(&work, matches[2]) - p.r.ListItem(&tmpBlock, work.Bytes(), flags) - case isUnorderedList(data): - if inList != true { - listType = "ul" - inList = true - } - matches := reUnorderedList.FindSubmatch(data) - var work bytes.Buffer - p.inline(&work, matches[2]) - p.r.ListItem(&tmpBlock, work.Bytes(), 0) - case isOrderedList(data): - if inList != true { - listType = "ol" - inList = true - } - matches := reOrderedList.FindSubmatch(data) - var work bytes.Buffer - tmpBlock.WriteString(" 0 { - tmpBlock.WriteString(" value=\"") - tmpBlock.Write(matches[2]) - tmpBlock.WriteString("\"") - matches[3] = matches[3][1:] - } - p.inline(&work, matches[3]) - tmpBlock.WriteString(">") - tmpBlock.Write(work.Bytes()) - tmpBlock.WriteString("\n") - case isHorizontalRule(data): - p.r.HRule(&output) - case isExampleLine(data): - if inParagraph == true { - if len(tmpBlock.Bytes()) > 0 { - p.generateParagraph(&output, tmpBlock.Bytes()[:len(tmpBlock.Bytes())-1]) - inParagraph = false - } - tmpBlock.Reset() - } - if inFixedWidthArea != true { - tmpBlock.WriteString("
\n")
-				inFixedWidthArea = true
-			}
-			matches := reExampleLine.FindSubmatch(data)
-			tmpBlock.Write(matches[1])
-			tmpBlock.WriteString("\n")
-			break
-		default:
-			if inParagraph == false {
-				inParagraph = true
-				if inFixedWidthArea == true {
-					if tmpBlock.Len() > 0 {
-						tmpBlock.WriteString("
") - output.Write(tmpBlock.Bytes()) - } - inFixedWidthArea = false - tmpBlock.Reset() - } - } - tmpBlock.Write(data) - tmpBlock.WriteByte('\n') - } - } - - if len(tmpBlock.Bytes()) > 0 { - if inParagraph == true { - p.generateParagraph(&output, tmpBlock.Bytes()[:len(tmpBlock.Bytes())-1]) - } else if inFixedWidthArea == true { - tmpBlock.WriteString("\n") - output.Write(tmpBlock.Bytes()) - } - } - - // Writing footnote def. list - if len(p.notes) > 0 { - flags := blackfriday.LIST_ITEM_BEGINNING_OF_LIST - p.r.Footnotes(&output, func() bool { - for i := range p.notes { - p.r.FootnoteItem(&output, []byte(p.notes[i].id), []byte(p.notes[i].def), flags) - } - return true - }) - } - - return output.Bytes() -} - -// Org Syntax has been broken up into 4 distinct sections based on -// the org-syntax draft (http://orgmode.org/worg/dev/org-syntax.html): -// - Headlines -// - Greater Elements -// - Elements -// - Objects - -// Headlines -func isHeadline(data []byte) bool { - if !charMatches(data[0], '*') { - return false - } - level := 0 - for level < 6 && charMatches(data[level], '*') { - level++ - } - return charMatches(data[level], ' ') -} - -func (p *parser) generateHeadline(out *bytes.Buffer, data []byte) { - level := 1 - status := "" - priority := "" - - for level < 6 && data[level] == '*' { - level++ - } - - start := skipChar(data, level, ' ') - - data = data[start:] - i := 0 - - // Check if has a status so it can be rendered as a separate span that can be hidden or - // modified with CSS classes - if hasStatus(data[i:4]) { - status = string(data[i:4]) - i += 5 // one extra character for the next whitespace - } - - // Check if the next byte is a priority marker - if data[i] == '[' && hasPriority(data[i+1]) { - priority = string(data[i+1]) - i += 4 // for "[c]" + ' ' - } - - tags, tagsFound := findTags(data, i) - - headlineID := sanitized_anchor_name.Create(string(data[i:])) - - generate := func() bool { - dataEnd := len(data) - if tagsFound > 0 { - dataEnd = tagsFound - } - - headline := bytes.TrimRight(data[i:dataEnd], " \t") - - if status != "" { - out.WriteString("" + status + "") - out.WriteByte(' ') - } - - if priority != "" { - out.WriteString("[" + priority + "]") - out.WriteByte(' ') - } - - p.inline(out, headline) - - if tagsFound > 0 { - for _, tag := range tags { - out.WriteByte(' ') - out.WriteString("" + tag + "") - out.WriteByte(' ') - } - } - return true - } - - p.r.Header(out, generate, level, headlineID) -} - -func hasStatus(data []byte) bool { - return bytes.Contains(data, []byte("TODO")) || bytes.Contains(data, []byte("DONE")) -} - -func hasPriority(char byte) bool { - return (charMatches(char, 'A') || charMatches(char, 'B') || charMatches(char, 'C')) -} - -func findTags(data []byte, start int) ([]string, int) { - tags := []string{} - tagOpener := 0 - tagMarker := tagOpener - for tIdx := start; tIdx < len(data); tIdx++ { - if tagMarker > 0 && data[tIdx] == ':' { - tags = append(tags, string(data[tagMarker+1:tIdx])) - tagMarker = tIdx - } - if data[tIdx] == ':' && tagOpener == 0 && data[tIdx-1] == ' ' { - tagMarker = tIdx - tagOpener = tIdx - } - } - return tags, tagOpener -} - -// Greater Elements -// ~~ Definition Lists -var reDefinitionList = regexp.MustCompile(`^\s*-\s+(.+?)\s+::\s+(.*)`) - -func isDefinitionList(data []byte) bool { - return reDefinitionList.Match(data) -} - -// ~~ Example lines -var reExampleLine = regexp.MustCompile(`^\s*:\s(\s*.*)|^\s*:$`) - -func isExampleLine(data []byte) bool { - return reExampleLine.Match(data) -} - -// ~~ Ordered Lists -var reOrderedList = regexp.MustCompile(`^(\s*)\d+\.\s+\[?@?(\d*)\]?(.+)`) - -func isOrderedList(data []byte) bool { - return reOrderedList.Match(data) -} - -// ~~ Unordered Lists -var reUnorderedList = regexp.MustCompile(`^(\s*)[-\+]\s+(.+)`) - -func isUnorderedList(data []byte) bool { - return reUnorderedList.Match(data) -} - -// ~~ Tables -var reTableHeaders = regexp.MustCompile(`^[|+-]*$`) - -func isTable(data []byte) bool { - return charMatches(data[0], '|') -} - -func (p *parser) generateTable(output *bytes.Buffer, data []byte) { - var table bytes.Buffer - rows := bytes.Split(bytes.Trim(data, "\n"), []byte("\n")) - hasTableHeaders := len(rows) > 1 - if len(rows) > 1 { - hasTableHeaders = reTableHeaders.Match(rows[1]) - } - tbodySet := false - - for idx, row := range rows { - var rowBuff bytes.Buffer - if hasTableHeaders && idx == 0 { - table.WriteString("") - for _, cell := range bytes.Split(row[1:len(row)-1], []byte("|")) { - p.r.TableHeaderCell(&rowBuff, bytes.Trim(cell, " \t"), 0) - } - p.r.TableRow(&table, rowBuff.Bytes()) - table.WriteString("\n") - } else if hasTableHeaders && idx == 1 { - continue - } else { - if !tbodySet { - table.WriteString("") - tbodySet = true - } - if !reTableHeaders.Match(row) { - for _, cell := range bytes.Split(row[1:len(row)-1], []byte("|")) { - var cellBuff bytes.Buffer - p.inline(&cellBuff, bytes.Trim(cell, " \t")) - p.r.TableCell(&rowBuff, cellBuff.Bytes(), 0) - } - p.r.TableRow(&table, rowBuff.Bytes()) - } - if tbodySet && idx == len(rows)-1 { - table.WriteString("\n") - tbodySet = false - } - } - } - - output.WriteString("\n\n") - output.Write(table.Bytes()) - output.WriteString("
\n") -} - -// ~~ Property Drawers - -func isPropertyDrawer(data []byte) bool { - return bytes.Equal(data, []byte(":PROPERTIES:")) -} - -// ~~ Dynamic Blocks -var reBlock = regexp.MustCompile(`^#\+(BEGIN|END)_(\w+)\s*([0-9A-Za-z_\-]*)?`) - -func isBlock(data []byte) bool { - return reBlock.Match(data) -} - -// ~~ Footnotes -var reFootnoteDef = regexp.MustCompile(`^\[fn:([\w]+)\] +(.+)`) - -func isFootnoteDef(data []byte) bool { - return reFootnoteDef.Match(data) -} - -// Elements -// ~~ Keywords -func IsKeyword(data []byte) bool { - return len(data) > 2 && charMatches(data[0], '#') && charMatches(data[1], '+') && !charMatches(data[2], ' ') -} - -// ~~ Comments -func isComment(data []byte) bool { - return charMatches(data[0], '#') && charMatches(data[1], ' ') -} - -func (p *parser) generateComment(out *bytes.Buffer, data []byte) { - var work bytes.Buffer - work.WriteString("") - work.WriteByte('\n') - out.Write(work.Bytes()) -} - -// ~~ Horizontal Rules -var reHorizontalRule = regexp.MustCompile(`^\s*?-----\s?$`) - -func isHorizontalRule(data []byte) bool { - return reHorizontalRule.Match(data) -} - -// ~~ Paragraphs -func (p *parser) generateParagraph(out *bytes.Buffer, data []byte) { - generate := func() bool { - p.inline(out, bytes.Trim(data, " ")) - return true - } - p.r.Paragraph(out, generate) -} - -func (p *parser) generateList(output *bytes.Buffer, data []byte, listType string) { - generateList := func() bool { - output.WriteByte('\n') - p.inline(output, bytes.Trim(data, " ")) - return true - } - switch listType { - case "ul": - p.r.List(output, generateList, 0) - case "ol": - p.r.List(output, generateList, blackfriday.LIST_TYPE_ORDERED) - case "dl": - p.r.List(output, generateList, blackfriday.LIST_TYPE_DEFINITION) - } -} - -// Objects - -func (p *parser) inline(out *bytes.Buffer, data []byte) { - i, end := 0, 0 - - for i < len(data) { - for end < len(data) && p.inlineCallback[data[end]] == nil { - end++ - } - - p.r.Entity(out, data[i:end]) - - if end >= len(data) { - break - } - i = end - - handler := p.inlineCallback[data[i]] - - if consumed := handler(p, out, data, i); consumed > 0 { - i += consumed - end = i - continue - } - - end = i + 1 - } -} - -func isAcceptablePreOpeningChar(dataIn, data []byte, offset int) bool { - if len(dataIn) == len(data) { - return true - } - - char := dataIn[offset-1] - return charMatches(char, ' ') || isPreChar(char) -} - -func isPreChar(char byte) bool { - return charMatches(char, '>') || charMatches(char, '(') || charMatches(char, '{') || charMatches(char, '[') -} - -func isAcceptablePostClosingChar(char byte) bool { - return charMatches(char, ' ') || isTerminatingChar(char) -} - -func isTerminatingChar(char byte) bool { - return charMatches(char, '.') || charMatches(char, ',') || charMatches(char, '?') || charMatches(char, '!') || charMatches(char, ')') || charMatches(char, '}') || charMatches(char, ']') -} - -func findLastCharInInline(data []byte, char byte) int { - timesFound := 0 - last := 0 - // Start from character after the inline indicator - for i := 1; i < len(data); i++ { - if timesFound == 1 { - break - } - if data[i] == char { - if len(data) == i+1 || (len(data) > i+1 && isAcceptablePostClosingChar(data[i+1])) { - last = i - timesFound += 1 - } - } - } - return last -} - -func generator(p *parser, out *bytes.Buffer, dataIn []byte, offset int, char byte, doInline bool, renderer func(*bytes.Buffer, []byte)) int { - data := dataIn[offset:] - c := byte(char) - start := 1 - i := start - if len(data) <= 1 { - return 0 - } - - lastCharInside := findLastCharInInline(data, c) - - // Org mode spec says a non-whitespace character must immediately follow. - // if the current char is the marker, then there's no text between, not a candidate - if isSpace(data[i]) || lastCharInside == i || !isAcceptablePreOpeningChar(dataIn, data, offset) { - return 0 - } - - if lastCharInside > 0 { - var work bytes.Buffer - if doInline { - p.inline(&work, data[start:lastCharInside]) - renderer(out, work.Bytes()) - } else { - renderer(out, data[start:lastCharInside]) - } - next := lastCharInside + 1 - return next - } - - return 0 -} - -// ~~ Text Markup -func generateVerbatim(p *parser, out *bytes.Buffer, data []byte, offset int) int { - return generator(p, out, data, offset, '=', false, p.r.CodeSpan) -} - -func generateCode(p *parser, out *bytes.Buffer, data []byte, offset int) int { - return generator(p, out, data, offset, '~', false, p.r.CodeSpan) -} - -func generateEmphasis(p *parser, out *bytes.Buffer, data []byte, offset int) int { - return generator(p, out, data, offset, '/', true, p.r.Emphasis) -} - -func generateUnderline(p *parser, out *bytes.Buffer, data []byte, offset int) int { - underline := func(out *bytes.Buffer, text []byte) { - out.WriteString("") - out.Write(text) - out.WriteString("") - } - - return generator(p, out, data, offset, '_', true, underline) -} - -func generateBold(p *parser, out *bytes.Buffer, data []byte, offset int) int { - return generator(p, out, data, offset, '*', true, p.r.DoubleEmphasis) -} - -func generateStrikethrough(p *parser, out *bytes.Buffer, data []byte, offset int) int { - return generator(p, out, data, offset, '+', true, p.r.StrikeThrough) -} - -// ~~ Images and Links (inc. Footnote) -var reLinkOrImg = regexp.MustCompile(`\[\[(.+?)\]\[?(.*?)\]?\]`) - -func generateLinkOrImg(p *parser, out *bytes.Buffer, data []byte, offset int) int { - data = data[offset+1:] - start := 1 - i := start - var hyperlink []byte - isImage := false - isFootnote := false - closedLink := false - hasContent := false - - if bytes.Equal(data[0:3], []byte("fn:")) { - isFootnote = true - } else if data[0] != '[' { - return 0 - } - - if bytes.Equal(data[1:6], []byte("file:")) { - isImage = true - } - - for i < len(data) { - currChar := data[i] - switch { - case charMatches(currChar, ']') && closedLink == false: - if isImage { - hyperlink = data[start+5 : i] - } else if isFootnote { - refid := data[start+2 : i] - if bytes.Equal(refid, bytes.Trim(refid, " ")) { - p.notes = append(p.notes, footnotes{string(refid), "DEFINITION NOT FOUND"}) - p.r.FootnoteRef(out, refid, len(p.notes)) - return i + 2 - } else { - return 0 - } - } else if bytes.Equal(data[i-4:i], []byte(".org")) { - orgStart := start - if bytes.Equal(data[orgStart:orgStart+2], []byte("./")) { - orgStart = orgStart + 1 - } - hyperlink = data[orgStart : i-4] - } else { - hyperlink = data[start:i] - } - closedLink = true - case charMatches(currChar, '['): - start = i + 1 - hasContent = true - case charMatches(currChar, ']') && closedLink == true && hasContent == true && isImage == true: - p.r.Image(out, hyperlink, data[start:i], data[start:i]) - return i + 3 - case charMatches(currChar, ']') && closedLink == true && hasContent == true: - var tmpBuf bytes.Buffer - p.inline(&tmpBuf, data[start:i]) - p.r.Link(out, hyperlink, tmpBuf.Bytes(), tmpBuf.Bytes()) - return i + 3 - case charMatches(currChar, ']') && closedLink == true && hasContent == false && isImage == true: - p.r.Image(out, hyperlink, hyperlink, hyperlink) - return i + 2 - case charMatches(currChar, ']') && closedLink == true && hasContent == false: - p.r.Link(out, hyperlink, hyperlink, hyperlink) - return i + 2 - } - i++ - } - - return 0 -} - -// Helpers -func skipChar(data []byte, start int, char byte) int { - i := start - for i < len(data) && charMatches(data[i], char) { - i++ - } - return i -} - -func isSpace(char byte) bool { - return charMatches(char, ' ') -} - -func isEmpty(data []byte) bool { - if len(data) == 0 { - return true - } - - for i := 0; i < len(data) && !charMatches(data[i], '\n'); i++ { - if !charMatches(data[i], ' ') && !charMatches(data[i], '\t') { - return false - } - } - return true -} - -func charMatches(a byte, b byte) bool { - return a == b -} diff --git a/vendor/github.com/chaseadamsio/goorgeous/gopher.gif b/vendor/github.com/chaseadamsio/goorgeous/gopher.gif deleted file mode 100644 index be7567e3cf..0000000000 Binary files a/vendor/github.com/chaseadamsio/goorgeous/gopher.gif and /dev/null differ diff --git a/vendor/github.com/chaseadamsio/goorgeous/gopher_small.gif b/vendor/github.com/chaseadamsio/goorgeous/gopher_small.gif deleted file mode 100644 index 1cd31fdd0c..0000000000 Binary files a/vendor/github.com/chaseadamsio/goorgeous/gopher_small.gif and /dev/null differ diff --git a/vendor/github.com/chaseadamsio/goorgeous/header.go b/vendor/github.com/chaseadamsio/goorgeous/header.go deleted file mode 100644 index 66e8b99321..0000000000 --- a/vendor/github.com/chaseadamsio/goorgeous/header.go +++ /dev/null @@ -1,70 +0,0 @@ -package goorgeous - -import ( - "bufio" - "bytes" - "regexp" - "strings" -) - -// ExtractOrgHeaders finds and returns all of the headers -// from a bufio.Reader and returns them as their own byte slice -func ExtractOrgHeaders(r *bufio.Reader) (fm []byte, err error) { - var out bytes.Buffer - endOfHeaders := true - for endOfHeaders { - p, err := r.Peek(2) - if err != nil { - return nil, err - } - if !charMatches(p[0], '#') && !charMatches(p[1], '+') { - endOfHeaders = false - break - } - line, _, err := r.ReadLine() - if err != nil { - return nil, err - } - out.Write(line) - out.WriteByte('\n') - } - return out.Bytes(), nil -} - -var reHeader = regexp.MustCompile(`^#\+(\w+?): (.*)`) - -// OrgHeaders find all of the headers from a byte slice and returns -// them as a map of string interface -func OrgHeaders(input []byte) (map[string]interface{}, error) { - out := make(map[string]interface{}) - scanner := bufio.NewScanner(bytes.NewReader(input)) - - for scanner.Scan() { - data := scanner.Bytes() - if !charMatches(data[0], '#') && !charMatches(data[1], '+') { - return out, nil - } - matches := reHeader.FindSubmatch(data) - - if len(matches) < 3 { - continue - } - - key := string(matches[1]) - val := matches[2] - switch { - case strings.ToLower(key) == "tags" || strings.ToLower(key) == "categories" || strings.ToLower(key) == "aliases": - bTags := bytes.Split(val, []byte(" ")) - tags := make([]string, len(bTags)) - for idx, tag := range bTags { - tags[idx] = string(tag) - } - out[key] = tags - default: - out[key] = string(val) - } - - } - return out, nil - -} diff --git a/vendor/github.com/chaseadamsio/goorgeous/LICENSE b/vendor/github.com/niklasfasching/go-org/LICENSE similarity index 94% rename from vendor/github.com/chaseadamsio/goorgeous/LICENSE rename to vendor/github.com/niklasfasching/go-org/LICENSE index d7a37c6a3b..22986cae14 100644 --- a/vendor/github.com/chaseadamsio/goorgeous/LICENSE +++ b/vendor/github.com/niklasfasching/go-org/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2017 Chase Adams +Copyright (c) 2018 Niklas Fasching Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/niklasfasching/go-org/org/block.go b/vendor/github.com/niklasfasching/go-org/org/block.go new file mode 100644 index 0000000000..0e7a526243 --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/block.go @@ -0,0 +1,84 @@ +package org + +import ( + "regexp" + "strings" + "unicode" +) + +type Block struct { + Name string + Parameters []string + Children []Node +} + +type Example struct { + Children []Node +} + +var exampleLineRegexp = regexp.MustCompile(`^(\s*):(\s(.*)|\s*$)`) +var beginBlockRegexp = regexp.MustCompile(`(?i)^(\s*)#\+BEGIN_(\w+)(.*)`) +var endBlockRegexp = regexp.MustCompile(`(?i)^(\s*)#\+END_(\w+)`) + +func lexBlock(line string) (token, bool) { + if m := beginBlockRegexp.FindStringSubmatch(line); m != nil { + return token{"beginBlock", len(m[1]), strings.ToUpper(m[2]), m}, true + } else if m := endBlockRegexp.FindStringSubmatch(line); m != nil { + return token{"endBlock", len(m[1]), strings.ToUpper(m[2]), m}, true + } + return nilToken, false +} + +func lexExample(line string) (token, bool) { + if m := exampleLineRegexp.FindStringSubmatch(line); m != nil { + return token{"example", len(m[1]), m[3], m}, true + } + return nilToken, false +} + +func isRawTextBlock(name string) bool { return name == "SRC" || name == "EXAMPLE" || name == "EXPORT" } + +func (d *Document) parseBlock(i int, parentStop stopFn) (int, Node) { + t, start := d.tokens[i], i + name, parameters := t.content, strings.Fields(t.matches[3]) + trim := trimIndentUpTo(d.tokens[i].lvl) + stop := func(d *Document, i int) bool { + return i >= len(d.tokens) || (d.tokens[i].kind == "endBlock" && d.tokens[i].content == name) + } + block, i := Block{name, parameters, nil}, i+1 + if isRawTextBlock(name) { + rawText := "" + for ; !stop(d, i); i++ { + rawText += trim(d.tokens[i].matches[0]) + "\n" + } + block.Children = d.parseRawInline(rawText) + } else { + consumed, nodes := d.parseMany(i, stop) + block.Children = nodes + i += consumed + } + if i < len(d.tokens) && d.tokens[i].kind == "endBlock" && d.tokens[i].content == name { + return i + 1 - start, block + } + return 0, nil +} + +func (d *Document) parseExample(i int, parentStop stopFn) (int, Node) { + example, start := Example{}, i + for ; !parentStop(d, i) && d.tokens[i].kind == "example"; i++ { + example.Children = append(example.Children, Text{d.tokens[i].content, true}) + } + return i - start, example +} + +func trimIndentUpTo(max int) func(string) string { + return func(line string) string { + i := 0 + for ; i < len(line) && i < max && unicode.IsSpace(rune(line[i])); i++ { + } + return line[i:] + } +} + +func (n Example) String() string { return orgWriter.nodesAsString(n) } +func (n Block) String() string { return orgWriter.nodesAsString(n) } diff --git a/vendor/github.com/niklasfasching/go-org/org/document.go b/vendor/github.com/niklasfasching/go-org/org/document.go new file mode 100644 index 0000000000..e43eb626db --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/document.go @@ -0,0 +1,260 @@ +// Package org is an Org mode syntax processor. +// +// It parses plain text into an AST and can export it as HTML or pretty printed Org mode syntax. +// Further export formats can be defined using the Writer interface. +// +// You probably want to start with something like this: +// input := strings.NewReader("Your Org mode input") +// html, err := org.New().Parse(input, "./").Write(org.NewHTMLWriter()) +// if err != nil { +// log.Fatalf("Something went wrong: %s", err) +// } +// log.Print(html) +package org + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strings" +) + +type Configuration struct { + MaxEmphasisNewLines int // Maximum number of newlines inside an emphasis. See org-emphasis-regexp-components newline. + AutoLink bool // Try to convert text passages that look like hyperlinks into hyperlinks. + DefaultSettings map[string]string // Default values for settings that are overriden by setting the same key in BufferSettings. + Log *log.Logger // Log is used to print warnings during parsing. + ReadFile func(filename string) ([]byte, error) // ReadFile is used to read e.g. #+INCLUDE files. +} + +// Document contains the parsing results and a pointer to the Configuration. +type Document struct { + *Configuration + Path string // Path of the file containing the parse input - used to resolve relative paths during parsing (e.g. INCLUDE). + tokens []token + Nodes []Node + NamedNodes map[string]Node + Outline Outline // Outline is a Table Of Contents for the document and contains all sections (headline + content). + BufferSettings map[string]string // Settings contains all settings that were parsed from keywords. + Error error +} + +// Node represents a parsed node of the document. +type Node interface { + String() string // String returns the pretty printed Org mode string for the node (see OrgWriter). +} + +type lexFn = func(line string) (t token, ok bool) +type parseFn = func(*Document, int, stopFn) (int, Node) +type stopFn = func(*Document, int) bool + +type token struct { + kind string + lvl int + content string + matches []string +} + +var lexFns = []lexFn{ + lexHeadline, + lexDrawer, + lexBlock, + lexList, + lexTable, + lexHorizontalRule, + lexKeywordOrComment, + lexFootnoteDefinition, + lexExample, + lexText, +} + +var nilToken = token{"nil", -1, "", nil} +var orgWriter = NewOrgWriter() + +// New returns a new Configuration with (hopefully) sane defaults. +func New() *Configuration { + return &Configuration{ + AutoLink: true, + MaxEmphasisNewLines: 1, + DefaultSettings: map[string]string{ + "TODO": "TODO | DONE", + "EXCLUDE_TAGS": "noexport", + "OPTIONS": "toc:t <:t e:t f:t pri:t todo:t tags:t", + }, + Log: log.New(os.Stderr, "go-org: ", 0), + ReadFile: ioutil.ReadFile, + } +} + +// String returns the pretty printed Org mode string for the given nodes (see OrgWriter). +func String(nodes []Node) string { return orgWriter.nodesAsString(nodes...) } + +// Write is called after with an instance of the Writer interface to export a parsed Document into another format. +func (d *Document) Write(w Writer) (out string, err error) { + defer func() { + if recovered := recover(); recovered != nil { + err = fmt.Errorf("could not write output: %s", recovered) + } + }() + if d.Error != nil { + return "", d.Error + } else if d.Nodes == nil { + return "", fmt.Errorf("could not write output: parse was not called") + } + w.Before(d) + WriteNodes(w, d.Nodes...) + w.After(d) + return w.String(), err +} + +// Parse parses the input into an AST (and some other helpful fields like Outline). +// To allow method chaining, errors are stored in document.Error rather than being returned. +func (c *Configuration) Parse(input io.Reader, path string) (d *Document) { + outlineSection := &Section{} + d = &Document{ + Configuration: c, + Outline: Outline{outlineSection, outlineSection, 0}, + BufferSettings: map[string]string{}, + NamedNodes: map[string]Node{}, + Path: path, + } + defer func() { + if recovered := recover(); recovered != nil { + d.Error = fmt.Errorf("could not parse input: %v", recovered) + } + }() + if d.tokens != nil { + d.Error = fmt.Errorf("parse was called multiple times") + } + d.tokenize(input) + _, nodes := d.parseMany(0, func(d *Document, i int) bool { return i >= len(d.tokens) }) + d.Nodes = nodes + return d +} + +// Silent disables all logging of warnings during parsing. +func (c *Configuration) Silent() *Configuration { + c.Log = log.New(ioutil.Discard, "", 0) + return c +} + +func (d *Document) tokenize(input io.Reader) { + d.tokens = []token{} + scanner := bufio.NewScanner(input) + for scanner.Scan() { + d.tokens = append(d.tokens, tokenize(scanner.Text())) + } + if err := scanner.Err(); err != nil { + d.Error = fmt.Errorf("could not tokenize input: %s", err) + } +} + +// Get returns the value for key in BufferSettings or DefaultSettings if key does not exist in the former +func (d *Document) Get(key string) string { + if v, ok := d.BufferSettings[key]; ok { + return v + } + if v, ok := d.DefaultSettings[key]; ok { + return v + } + return "" +} + +// GetOption returns the value associated to the export option key +// Currently supported options: +// - < (export timestamps) +// - e (export org entities) +// - f (export footnotes) +// - toc (export table of content) +// - todo (export headline todo status) +// - pri (export headline priority) +// - tags (export headline tags) +// see https://orgmode.org/manual/Export-settings.html for more information +func (d *Document) GetOption(key string) bool { + get := func(settings map[string]string) string { + for _, field := range strings.Fields(settings["OPTIONS"]) { + if strings.HasPrefix(field, key+":") { + return field[len(key)+1:] + } + } + return "" + } + value := get(d.BufferSettings) + if value == "" { + value = get(d.DefaultSettings) + } + switch value { + case "t": + return true + case "nil": + return false + default: + d.Log.Printf("Bad value for export option %s (%s)", key, value) + return false + } +} + +func (d *Document) parseOne(i int, stop stopFn) (consumed int, node Node) { + switch d.tokens[i].kind { + case "unorderedList", "orderedList": + consumed, node = d.parseList(i, stop) + case "tableRow", "tableSeparator": + consumed, node = d.parseTable(i, stop) + case "beginBlock": + consumed, node = d.parseBlock(i, stop) + case "beginDrawer": + consumed, node = d.parseDrawer(i, stop) + case "text": + consumed, node = d.parseParagraph(i, stop) + case "example": + consumed, node = d.parseExample(i, stop) + case "horizontalRule": + consumed, node = d.parseHorizontalRule(i, stop) + case "comment": + consumed, node = d.parseComment(i, stop) + case "keyword": + consumed, node = d.parseKeyword(i, stop) + case "headline": + consumed, node = d.parseHeadline(i, stop) + case "footnoteDefinition": + consumed, node = d.parseFootnoteDefinition(i, stop) + } + + if consumed != 0 { + return consumed, node + } + d.Log.Printf("Could not parse token %#v: Falling back to treating it as plain text.", d.tokens[i]) + m := plainTextRegexp.FindStringSubmatch(d.tokens[i].matches[0]) + d.tokens[i] = token{"text", len(m[1]), m[2], m} + return d.parseOne(i, stop) +} + +func (d *Document) parseMany(i int, stop stopFn) (int, []Node) { + start, nodes := i, []Node{} + for i < len(d.tokens) && !stop(d, i) { + consumed, node := d.parseOne(i, stop) + i += consumed + nodes = append(nodes, node) + } + return i - start, nodes +} + +func (d *Document) addHeadline(headline *Headline) int { + current := &Section{Headline: headline} + d.Outline.last.add(current) + d.Outline.count++ + d.Outline.last = current + return d.Outline.count +} + +func tokenize(line string) token { + for _, lexFn := range lexFns { + if token, ok := lexFn(line); ok { + return token + } + } + panic(fmt.Sprintf("could not lex line: %s", line)) +} diff --git a/vendor/github.com/niklasfasching/go-org/org/drawer.go b/vendor/github.com/niklasfasching/go-org/org/drawer.go new file mode 100644 index 0000000000..8bb9974380 --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/drawer.go @@ -0,0 +1,97 @@ +package org + +import ( + "regexp" + "strings" +) + +type Drawer struct { + Name string + Children []Node +} + +type PropertyDrawer struct { + Properties [][]string +} + +var beginDrawerRegexp = regexp.MustCompile(`^(\s*):(\S+):\s*$`) +var endDrawerRegexp = regexp.MustCompile(`^(\s*):END:\s*$`) +var propertyRegexp = regexp.MustCompile(`^(\s*):(\S+):(\s+(.*)$|$)`) + +func lexDrawer(line string) (token, bool) { + if m := endDrawerRegexp.FindStringSubmatch(line); m != nil { + return token{"endDrawer", len(m[1]), "", m}, true + } else if m := beginDrawerRegexp.FindStringSubmatch(line); m != nil { + return token{"beginDrawer", len(m[1]), strings.ToUpper(m[2]), m}, true + } + return nilToken, false +} + +func (d *Document) parseDrawer(i int, parentStop stopFn) (int, Node) { + name := strings.ToUpper(d.tokens[i].content) + if name == "PROPERTIES" { + return d.parsePropertyDrawer(i, parentStop) + } + drawer, start := Drawer{Name: name}, i + i++ + stop := func(d *Document, i int) bool { + if parentStop(d, i) { + return true + } + kind := d.tokens[i].kind + return kind == "beginDrawer" || kind == "endDrawer" || kind == "headline" + } + for { + consumed, nodes := d.parseMany(i, stop) + i += consumed + drawer.Children = append(drawer.Children, nodes...) + if i < len(d.tokens) && d.tokens[i].kind == "beginDrawer" { + p := Paragraph{[]Node{Text{":" + d.tokens[i].content + ":", false}}} + drawer.Children = append(drawer.Children, p) + i++ + } else { + break + } + } + if i < len(d.tokens) && d.tokens[i].kind == "endDrawer" { + i++ + } + return i - start, drawer +} + +func (d *Document) parsePropertyDrawer(i int, parentStop stopFn) (int, Node) { + drawer, start := PropertyDrawer{}, i + i++ + stop := func(d *Document, i int) bool { + return parentStop(d, i) || (d.tokens[i].kind != "text" && d.tokens[i].kind != "beginDrawer") + } + for ; !stop(d, i); i++ { + m := propertyRegexp.FindStringSubmatch(d.tokens[i].matches[0]) + if m == nil { + return 0, nil + } + k, v := strings.ToUpper(m[2]), strings.TrimSpace(m[4]) + drawer.Properties = append(drawer.Properties, []string{k, v}) + } + if i < len(d.tokens) && d.tokens[i].kind == "endDrawer" { + i++ + } else { + return 0, nil + } + return i - start, drawer +} + +func (d *PropertyDrawer) Get(key string) (string, bool) { + if d == nil { + return "", false + } + for _, kvPair := range d.Properties { + if kvPair[0] == key { + return kvPair[1], true + } + } + return "", false +} + +func (n Drawer) String() string { return orgWriter.nodesAsString(n) } +func (n PropertyDrawer) String() string { return orgWriter.nodesAsString(n) } diff --git a/vendor/github.com/niklasfasching/go-org/org/footnote.go b/vendor/github.com/niklasfasching/go-org/org/footnote.go new file mode 100644 index 0000000000..660e244386 --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/footnote.go @@ -0,0 +1,35 @@ +package org + +import ( + "regexp" +) + +type FootnoteDefinition struct { + Name string + Children []Node + Inline bool +} + +var footnoteDefinitionRegexp = regexp.MustCompile(`^\[fn:([\w-]+)\](\s+(.+)|\s*$)`) + +func lexFootnoteDefinition(line string) (token, bool) { + if m := footnoteDefinitionRegexp.FindStringSubmatch(line); m != nil { + return token{"footnoteDefinition", 0, m[1], m}, true + } + return nilToken, false +} + +func (d *Document) parseFootnoteDefinition(i int, parentStop stopFn) (int, Node) { + start, name := i, d.tokens[i].content + d.tokens[i] = tokenize(d.tokens[i].matches[2]) + stop := func(d *Document, i int) bool { + return parentStop(d, i) || + (isSecondBlankLine(d, i) && i > start+1) || + d.tokens[i].kind == "headline" || d.tokens[i].kind == "footnoteDefinition" + } + consumed, nodes := d.parseMany(i, stop) + definition := FootnoteDefinition{name, nodes, false} + return consumed, definition +} + +func (n FootnoteDefinition) String() string { return orgWriter.nodesAsString(n) } diff --git a/vendor/github.com/niklasfasching/go-org/org/fuzz.go b/vendor/github.com/niklasfasching/go-org/org/fuzz.go new file mode 100644 index 0000000000..1e72b5ad92 --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/fuzz.go @@ -0,0 +1,27 @@ +// +build gofuzz + +package org + +import ( + "bytes" + "strings" +) + +// Fuzz function to be used by https://github.com/dvyukov/go-fuzz +func Fuzz(input []byte) int { + conf := New().Silent() + d := conf.Parse(bytes.NewReader(input), "") + orgOutput, err := d.Write(NewOrgWriter()) + if err != nil { + panic(err) + } + htmlOutputA, err := d.Write(NewHTMLWriter()) + if err != nil { + panic(err) + } + htmlOutputB, err := conf.Parse(strings.NewReader(orgOutput), "").Write(NewHTMLWriter()) + if htmlOutputA != htmlOutputB { + panic("rendered org results in different html than original input") + } + return 0 +} diff --git a/vendor/github.com/niklasfasching/go-org/org/headline.go b/vendor/github.com/niklasfasching/go-org/org/headline.go new file mode 100644 index 0000000000..23b986fbc8 --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/headline.go @@ -0,0 +1,101 @@ +package org + +import ( + "fmt" + "regexp" + "strings" + "unicode" +) + +type Outline struct { + *Section + last *Section + count int +} + +type Section struct { + Headline *Headline + Parent *Section + Children []*Section +} + +type Headline struct { + Index int + Lvl int + Status string + Priority string + Properties *PropertyDrawer + Title []Node + Tags []string + Children []Node +} + +var headlineRegexp = regexp.MustCompile(`^([*]+)\s+(.*)`) +var tagRegexp = regexp.MustCompile(`(.*?)\s+(:[A-Za-z0-9_@#%:]+:\s*$)`) + +func lexHeadline(line string) (token, bool) { + if m := headlineRegexp.FindStringSubmatch(line); m != nil { + return token{"headline", len(m[1]), m[2], m}, true + } + return nilToken, false +} + +func (d *Document) parseHeadline(i int, parentStop stopFn) (int, Node) { + t, headline := d.tokens[i], Headline{} + headline.Lvl = t.lvl + + headline.Index = d.addHeadline(&headline) + + text := t.content + todoKeywords := strings.FieldsFunc(d.Get("TODO"), func(r rune) bool { return unicode.IsSpace(r) || r == '|' }) + for _, k := range todoKeywords { + if strings.HasPrefix(text, k) && len(text) > len(k) && unicode.IsSpace(rune(text[len(k)])) { + headline.Status = k + text = text[len(k)+1:] + break + } + } + + if len(text) >= 4 && text[0:2] == "[#" && strings.Contains("ABC", text[2:3]) && text[3] == ']' { + headline.Priority = text[2:3] + text = strings.TrimSpace(text[4:]) + } + + if m := tagRegexp.FindStringSubmatch(text); m != nil { + text = m[1] + headline.Tags = strings.FieldsFunc(m[2], func(r rune) bool { return r == ':' }) + } + + headline.Title = d.parseInline(text) + + stop := func(d *Document, i int) bool { + return parentStop(d, i) || d.tokens[i].kind == "headline" && d.tokens[i].lvl <= headline.Lvl + } + consumed, nodes := d.parseMany(i+1, stop) + if len(nodes) > 0 { + if d, ok := nodes[0].(PropertyDrawer); ok { + headline.Properties = &d + nodes = nodes[1:] + } + } + headline.Children = nodes + return consumed + 1, headline +} + +func (h Headline) ID() string { + if customID, ok := h.Properties.Get("CUSTOM_ID"); ok { + return customID + } + return fmt.Sprintf("headline-%d", h.Index) +} + +func (parent *Section) add(current *Section) { + if parent.Headline == nil || parent.Headline.Lvl < current.Headline.Lvl { + parent.Children = append(parent.Children, current) + current.Parent = parent + } else { + parent.Parent.add(current) + } +} + +func (n Headline) String() string { return orgWriter.nodesAsString(n) } diff --git a/vendor/github.com/niklasfasching/go-org/org/html_entity.go b/vendor/github.com/niklasfasching/go-org/org/html_entity.go new file mode 100644 index 0000000000..484059b28d --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/html_entity.go @@ -0,0 +1,437 @@ +package org + +import "strings" + +var htmlEntityReplacer *strings.Replacer + +func init() { + htmlEntities = append(htmlEntities, + "---", "—", + "--", "–", + "...", "…", + ) + htmlEntityReplacer = strings.NewReplacer(htmlEntities...) +} + +/* +Generated & copied over using the following elisp +(Setting up go generate seems like a waste for now - I call YAGNI on that one) + +(insert (mapconcat + (lambda (entity) (concat "`\\" (car entity) "`, `" (nth 6 entity) "`")) ; entity -> utf8 + (remove-if-not 'listp org-entities) + ",\n")) +*/ +var htmlEntities = []string{ + `\Agrave`, `À`, + `\agrave`, `à`, + `\Aacute`, `Á`, + `\aacute`, `á`, + `\Acirc`, `Â`, + `\acirc`, `â`, + `\Amacr`, `Ã`, + `\amacr`, `ã`, + `\Atilde`, `Ã`, + `\atilde`, `ã`, + `\Auml`, `Ä`, + `\auml`, `ä`, + `\Aring`, `Å`, + `\AA`, `Å`, + `\aring`, `å`, + `\AElig`, `Æ`, + `\aelig`, `æ`, + `\Ccedil`, `Ç`, + `\ccedil`, `ç`, + `\Egrave`, `È`, + `\egrave`, `è`, + `\Eacute`, `É`, + `\eacute`, `é`, + `\Ecirc`, `Ê`, + `\ecirc`, `ê`, + `\Euml`, `Ë`, + `\euml`, `ë`, + `\Igrave`, `Ì`, + `\igrave`, `ì`, + `\Iacute`, `Í`, + `\iacute`, `í`, + `\Icirc`, `Î`, + `\icirc`, `î`, + `\Iuml`, `Ï`, + `\iuml`, `ï`, + `\Ntilde`, `Ñ`, + `\ntilde`, `ñ`, + `\Ograve`, `Ò`, + `\ograve`, `ò`, + `\Oacute`, `Ó`, + `\oacute`, `ó`, + `\Ocirc`, `Ô`, + `\ocirc`, `ô`, + `\Otilde`, `Õ`, + `\otilde`, `õ`, + `\Ouml`, `Ö`, + `\ouml`, `ö`, + `\Oslash`, `Ø`, + `\oslash`, `ø`, + `\OElig`, `Œ`, + `\oelig`, `œ`, + `\Scaron`, `Š`, + `\scaron`, `š`, + `\szlig`, `ß`, + `\Ugrave`, `Ù`, + `\ugrave`, `ù`, + `\Uacute`, `Ú`, + `\uacute`, `ú`, + `\Ucirc`, `Û`, + `\ucirc`, `û`, + `\Uuml`, `Ü`, + `\uuml`, `ü`, + `\Yacute`, `Ý`, + `\yacute`, `ý`, + `\Yuml`, `Ÿ`, + `\yuml`, `ÿ`, + `\fnof`, `ƒ`, + `\real`, `ℜ`, + `\image`, `ℑ`, + `\weierp`, `℘`, + `\ell`, `ℓ`, + `\imath`, `ı`, + `\jmath`, `ȷ`, + `\Alpha`, `Α`, + `\alpha`, `α`, + `\Beta`, `Β`, + `\beta`, `β`, + `\Gamma`, `Γ`, + `\gamma`, `γ`, + `\Delta`, `Δ`, + `\delta`, `δ`, + `\Epsilon`, `Ε`, + `\epsilon`, `ε`, + `\varepsilon`, `ε`, + `\Zeta`, `Ζ`, + `\zeta`, `ζ`, + `\Eta`, `Η`, + `\eta`, `η`, + `\Theta`, `Θ`, + `\theta`, `θ`, + `\thetasym`, `ϑ`, + `\vartheta`, `ϑ`, + `\Iota`, `Ι`, + `\iota`, `ι`, + `\Kappa`, `Κ`, + `\kappa`, `κ`, + `\Lambda`, `Λ`, + `\lambda`, `λ`, + `\Mu`, `Μ`, + `\mu`, `μ`, + `\nu`, `ν`, + `\Nu`, `Ν`, + `\Xi`, `Ξ`, + `\xi`, `ξ`, + `\Omicron`, `Ο`, + `\omicron`, `ο`, + `\Pi`, `Π`, + `\pi`, `π`, + `\Rho`, `Ρ`, + `\rho`, `ρ`, + `\Sigma`, `Σ`, + `\sigma`, `σ`, + `\sigmaf`, `ς`, + `\varsigma`, `ς`, + `\Tau`, `Τ`, + `\Upsilon`, `Υ`, + `\upsih`, `ϒ`, + `\upsilon`, `υ`, + `\Phi`, `Φ`, + `\phi`, `ɸ`, + `\varphi`, `φ`, + `\Chi`, `Χ`, + `\chi`, `χ`, + `\acutex`, `𝑥́`, + `\Psi`, `Ψ`, + `\psi`, `ψ`, + `\tau`, `τ`, + `\Omega`, `Ω`, + `\omega`, `ω`, + `\piv`, `ϖ`, + `\varpi`, `ϖ`, + `\partial`, `∂`, + `\alefsym`, `ℵ`, + `\aleph`, `ℵ`, + `\gimel`, `ℷ`, + `\beth`, `ב`, + `\dalet`, `ד`, + `\ETH`, `Ð`, + `\eth`, `ð`, + `\THORN`, `Þ`, + `\thorn`, `þ`, + `\dots`, `…`, + `\cdots`, `⋯`, + `\hellip`, `…`, + `\middot`, `·`, + `\iexcl`, `¡`, + `\iquest`, `¿`, + `\shy`, ``, + `\ndash`, `–`, + `\mdash`, `—`, + `\quot`, `"`, + `\acute`, `´`, + `\ldquo`, `“`, + `\rdquo`, `”`, + `\bdquo`, `„`, + `\lsquo`, `‘`, + `\rsquo`, `’`, + `\sbquo`, `‚`, + `\laquo`, `«`, + `\raquo`, `»`, + `\lsaquo`, `‹`, + `\rsaquo`, `›`, + `\circ`, `∘`, + `\vert`, `|`, + `\vbar`, `|`, + `\brvbar`, `¦`, + `\S`, `§`, + `\sect`, `§`, + `\amp`, `&`, + `\lt`, `<`, + `\gt`, `>`, + `\tilde`, `~`, + `\slash`, `/`, + `\plus`, `+`, + `\under`, `_`, + `\equal`, `=`, + `\asciicirc`, `^`, + `\dagger`, `†`, + `\dag`, `†`, + `\Dagger`, `‡`, + `\ddag`, `‡`, + `\nbsp`, ` `, + `\ensp`, ` `, + `\emsp`, ` `, + `\thinsp`, ` `, + `\curren`, `¤`, + `\cent`, `¢`, + `\pound`, `£`, + `\yen`, `¥`, + `\euro`, `€`, + `\EUR`, `€`, + `\dollar`, `$`, + `\USD`, `$`, + `\copy`, `©`, + `\reg`, `®`, + `\trade`, `™`, + `\minus`, `−`, + `\pm`, `±`, + `\plusmn`, `±`, + `\times`, `×`, + `\frasl`, `⁄`, + `\colon`, `:`, + `\div`, `÷`, + `\frac12`, `½`, + `\frac14`, `¼`, + `\frac34`, `¾`, + `\permil`, `‰`, + `\sup1`, `¹`, + `\sup2`, `²`, + `\sup3`, `³`, + `\radic`, `√`, + `\sum`, `∑`, + `\prod`, `∏`, + `\micro`, `µ`, + `\macr`, `¯`, + `\deg`, `°`, + `\prime`, `′`, + `\Prime`, `″`, + `\infin`, `∞`, + `\infty`, `∞`, + `\prop`, `∝`, + `\propto`, `∝`, + `\not`, `¬`, + `\neg`, `¬`, + `\land`, `∧`, + `\wedge`, `∧`, + `\lor`, `∨`, + `\vee`, `∨`, + `\cap`, `∩`, + `\cup`, `∪`, + `\smile`, `⌣`, + `\frown`, `⌢`, + `\int`, `∫`, + `\therefore`, `∴`, + `\there4`, `∴`, + `\because`, `∵`, + `\sim`, `∼`, + `\cong`, `≅`, + `\simeq`, `≅`, + `\asymp`, `≈`, + `\approx`, `≈`, + `\ne`, `≠`, + `\neq`, `≠`, + `\equiv`, `≡`, + `\triangleq`, `≜`, + `\le`, `≤`, + `\leq`, `≤`, + `\ge`, `≥`, + `\geq`, `≥`, + `\lessgtr`, `≶`, + `\lesseqgtr`, `⋚`, + `\ll`, `≪`, + `\Ll`, `⋘`, + `\lll`, `⋘`, + `\gg`, `≫`, + `\Gg`, `⋙`, + `\ggg`, `⋙`, + `\prec`, `≺`, + `\preceq`, `≼`, + `\preccurlyeq`, `≼`, + `\succ`, `≻`, + `\succeq`, `≽`, + `\succcurlyeq`, `≽`, + `\sub`, `⊂`, + `\subset`, `⊂`, + `\sup`, `⊃`, + `\supset`, `⊃`, + `\nsub`, `⊄`, + `\sube`, `⊆`, + `\nsup`, `⊅`, + `\supe`, `⊇`, + `\setminus`, `⧵`, + `\forall`, `∀`, + `\exist`, `∃`, + `\exists`, `∃`, + `\nexist`, `∄`, + `\nexists`, `∄`, + `\empty`, `∅`, + `\emptyset`, `∅`, + `\isin`, `∈`, + `\in`, `∈`, + `\notin`, `∉`, + `\ni`, `∋`, + `\nabla`, `∇`, + `\ang`, `∠`, + `\angle`, `∠`, + `\perp`, `⊥`, + `\parallel`, `∥`, + `\sdot`, `⋅`, + `\cdot`, `⋅`, + `\lceil`, `⌈`, + `\rceil`, `⌉`, + `\lfloor`, `⌊`, + `\rfloor`, `⌋`, + `\lang`, `⟨`, + `\rang`, `⟩`, + `\langle`, `⟨`, + `\rangle`, `⟩`, + `\hbar`, `ℏ`, + `\mho`, `℧`, + `\larr`, `←`, + `\leftarrow`, `←`, + `\gets`, `←`, + `\lArr`, `⇐`, + `\Leftarrow`, `⇐`, + `\uarr`, `↑`, + `\uparrow`, `↑`, + `\uArr`, `⇑`, + `\Uparrow`, `⇑`, + `\rarr`, `→`, + `\to`, `→`, + `\rightarrow`, `→`, + `\rArr`, `⇒`, + `\Rightarrow`, `⇒`, + `\darr`, `↓`, + `\downarrow`, `↓`, + `\dArr`, `⇓`, + `\Downarrow`, `⇓`, + `\harr`, `↔`, + `\leftrightarrow`, `↔`, + `\hArr`, `⇔`, + `\Leftrightarrow`, `⇔`, + `\crarr`, `↵`, + `\hookleftarrow`, `↵`, + `\arccos`, `arccos`, + `\arcsin`, `arcsin`, + `\arctan`, `arctan`, + `\arg`, `arg`, + `\cos`, `cos`, + `\cosh`, `cosh`, + `\cot`, `cot`, + `\coth`, `coth`, + `\csc`, `csc`, + `\deg`, `deg`, + `\det`, `det`, + `\dim`, `dim`, + `\exp`, `exp`, + `\gcd`, `gcd`, + `\hom`, `hom`, + `\inf`, `inf`, + `\ker`, `ker`, + `\lg`, `lg`, + `\lim`, `lim`, + `\liminf`, `liminf`, + `\limsup`, `limsup`, + `\ln`, `ln`, + `\log`, `log`, + `\max`, `max`, + `\min`, `min`, + `\Pr`, `Pr`, + `\sec`, `sec`, + `\sin`, `sin`, + `\sinh`, `sinh`, + `\sup`, `sup`, + `\tan`, `tan`, + `\tanh`, `tanh`, + `\bull`, `•`, + `\bullet`, `•`, + `\star`, `⋆`, + `\lowast`, `∗`, + `\ast`, `*`, + `\odot`, `ʘ`, + `\oplus`, `⊕`, + `\otimes`, `⊗`, + `\check`, `✓`, + `\checkmark`, `✓`, + `\para`, `¶`, + `\ordf`, `ª`, + `\ordm`, `º`, + `\cedil`, `¸`, + `\oline`, `‾`, + `\uml`, `¨`, + `\zwnj`, `‌`, + `\zwj`, `‍`, + `\lrm`, `‎`, + `\rlm`, `‏`, + `\smiley`, `☺`, + `\blacksmile`, `☻`, + `\sad`, `☹`, + `\frowny`, `☹`, + `\clubs`, `♣`, + `\clubsuit`, `♣`, + `\spades`, `♠`, + `\spadesuit`, `♠`, + `\hearts`, `♥`, + `\heartsuit`, `♥`, + `\diams`, `◆`, + `\diamondsuit`, `◆`, + `\diamond`, `◆`, + `\Diamond`, `◆`, + `\loz`, `⧫`, + `\_ `, ` `, + `\_ `, `  `, + `\_ `, `   `, + `\_ `, `    `, + `\_ `, `     `, + `\_ `, `      `, + `\_ `, `       `, + `\_ `, `        `, + `\_ `, `         `, + `\_ `, `          `, + `\_ `, `           `, + `\_ `, `            `, + `\_ `, `             `, + `\_ `, `              `, + `\_ `, `               `, + `\_ `, `                `, + `\_ `, `                 `, + `\_ `, `                  `, + `\_ `, `                   `, + `\_ `, `                    `, +} diff --git a/vendor/github.com/niklasfasching/go-org/org/html_writer.go b/vendor/github.com/niklasfasching/go-org/org/html_writer.go new file mode 100644 index 0000000000..90a48c6b4b --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/html_writer.go @@ -0,0 +1,504 @@ +package org + +import ( + "fmt" + "html" + "log" + "regexp" + "strings" + "unicode" + + h "golang.org/x/net/html" + "golang.org/x/net/html/atom" +) + +// HTMLWriter exports an org document into a html document. +type HTMLWriter struct { + ExtendingWriter Writer + HighlightCodeBlock func(source, lang string) string + + strings.Builder + document *Document + htmlEscape bool + log *log.Logger + footnotes *footnotes +} + +type footnotes struct { + mapping map[string]int + list []*FootnoteDefinition +} + +var emphasisTags = map[string][]string{ + "/": []string{"", ""}, + "*": []string{"", ""}, + "+": []string{"", ""}, + "~": []string{"", ""}, + "=": []string{``, ""}, + "_": []string{``, ""}, + "_{}": []string{"", ""}, + "^{}": []string{"", ""}, +} + +var listTags = map[string][]string{ + "unordered": []string{"
    ", "
"}, + "ordered": []string{"
    ", "
"}, + "descriptive": []string{"
", "
"}, +} + +var listItemStatuses = map[string]string{ + " ": "unchecked", + "-": "indeterminate", + "X": "checked", +} + +var cleanHeadlineTitleForHTMLAnchorRegexp = regexp.MustCompile(`]*>`) // nested a tags are not valid HTML + +func NewHTMLWriter() *HTMLWriter { + defaultConfig := New() + return &HTMLWriter{ + document: &Document{Configuration: defaultConfig}, + log: defaultConfig.Log, + htmlEscape: true, + HighlightCodeBlock: func(source, lang string) string { + return fmt.Sprintf("
\n
\n%s\n
\n
", html.EscapeString(source)) + }, + footnotes: &footnotes{ + mapping: map[string]int{}, + }, + } +} + +func (w *HTMLWriter) emptyClone() *HTMLWriter { + wcopy := *w + wcopy.Builder = strings.Builder{} + return &wcopy +} + +func (w *HTMLWriter) nodesAsString(nodes ...Node) string { + tmp := w.emptyClone() + WriteNodes(tmp, nodes...) + return tmp.String() +} + +func (w *HTMLWriter) WriterWithExtensions() Writer { + if w.ExtendingWriter != nil { + return w.ExtendingWriter + } + return w +} + +func (w *HTMLWriter) Before(d *Document) { + w.document = d + w.log = d.Log + w.WriteOutline(d) +} + +func (w *HTMLWriter) After(d *Document) { + w.WriteFootnotes(d) +} + +func (w *HTMLWriter) WriteComment(Comment) {} +func (w *HTMLWriter) WritePropertyDrawer(PropertyDrawer) {} + +func (w *HTMLWriter) WriteBlock(b Block) { + content := "" + if isRawTextBlock(b.Name) { + exportWriter := w.emptyClone() + exportWriter.htmlEscape = false + WriteNodes(exportWriter, b.Children...) + content = strings.TrimRightFunc(exportWriter.String(), unicode.IsSpace) + } else { + content = w.nodesAsString(b.Children...) + } + switch name := b.Name; { + case name == "SRC": + lang := "text" + if len(b.Parameters) >= 1 { + lang = strings.ToLower(b.Parameters[0]) + } + content = w.HighlightCodeBlock(content, lang) + w.WriteString(fmt.Sprintf("
\n%s\n
\n", lang, content)) + case name == "EXAMPLE": + w.WriteString(`
` + "\n" + content + "\n
\n") + case name == "EXPORT" && len(b.Parameters) >= 1 && strings.ToLower(b.Parameters[0]) == "html": + w.WriteString(content + "\n") + case name == "QUOTE": + w.WriteString("
\n" + content + "
\n") + case name == "CENTER": + w.WriteString(`
` + "\n") + w.WriteString(content + "
\n") + default: + w.WriteString(fmt.Sprintf(`
`, strings.ToLower(b.Name)) + "\n") + w.WriteString(content + "
\n") + } +} + +func (w *HTMLWriter) WriteDrawer(d Drawer) { + WriteNodes(w, d.Children...) +} + +func (w *HTMLWriter) WriteKeyword(k Keyword) { + if k.Key == "HTML" { + w.WriteString(k.Value + "\n") + } +} + +func (w *HTMLWriter) WriteInclude(i Include) { + WriteNodes(w, i.Resolve()) +} + +func (w *HTMLWriter) WriteFootnoteDefinition(f FootnoteDefinition) { + w.footnotes.updateDefinition(f) +} + +func (w *HTMLWriter) WriteFootnotes(d *Document) { + if !w.document.GetOption("f") || len(w.footnotes.list) == 0 { + return + } + w.WriteString(`
` + "\n") + w.WriteString(`
` + "\n") + w.WriteString(`
` + "\n") + for i, definition := range w.footnotes.list { + id := i + 1 + if definition == nil { + name := "" + for k, v := range w.footnotes.mapping { + if v == i { + name = k + } + } + w.log.Printf("Missing footnote definition for [fn:%s] (#%d)", name, id) + continue + } + w.WriteString(`
` + "\n") + w.WriteString(fmt.Sprintf(`%d`, id, id, id) + "\n") + w.WriteString(`
` + "\n") + WriteNodes(w, definition.Children...) + w.WriteString("
\n
\n") + } + w.WriteString("
\n
\n") +} + +func (w *HTMLWriter) WriteOutline(d *Document) { + if w.document.GetOption("toc") && len(d.Outline.Children) != 0 { + w.WriteString("\n") + } +} + +func (w *HTMLWriter) writeSection(section *Section) { + // NOTE: To satisfy hugo ExtractTOC() check we cannot use `
  • \n` here. Doesn't really matter, just a note. + w.WriteString("
  • ") + h := section.Headline + title := cleanHeadlineTitleForHTMLAnchorRegexp.ReplaceAllString(w.nodesAsString(h.Title...), "") + w.WriteString(fmt.Sprintf("%s\n", h.ID(), title)) + if len(section.Children) != 0 { + w.WriteString("
      \n") + for _, section := range section.Children { + w.writeSection(section) + } + w.WriteString("
    \n") + } + w.WriteString("
  • \n") +} + +func (w *HTMLWriter) WriteHeadline(h Headline) { + for _, excludeTag := range strings.Fields(w.document.Get("EXCLUDE_TAGS")) { + for _, tag := range h.Tags { + if excludeTag == tag { + return + } + } + } + + w.WriteString(fmt.Sprintf(``, h.Lvl, h.ID()) + "\n") + if w.document.GetOption("todo") && h.Status != "" { + w.WriteString(fmt.Sprintf(`%s`, h.Status) + "\n") + } + if w.document.GetOption("pri") && h.Priority != "" { + w.WriteString(fmt.Sprintf(`[%s]`, h.Priority) + "\n") + } + + WriteNodes(w, h.Title...) + if w.document.GetOption("tags") && len(h.Tags) != 0 { + tags := make([]string, len(h.Tags)) + for i, tag := range h.Tags { + tags[i] = fmt.Sprintf(`%s`, tag) + } + w.WriteString("   ") + w.WriteString(fmt.Sprintf(`%s`, strings.Join(tags, " "))) + } + w.WriteString(fmt.Sprintf("\n\n", h.Lvl)) + WriteNodes(w, h.Children...) +} + +func (w *HTMLWriter) WriteText(t Text) { + if !w.htmlEscape { + w.WriteString(t.Content) + } else if !w.document.GetOption("e") || t.IsRaw { + w.WriteString(html.EscapeString(t.Content)) + } else { + w.WriteString(html.EscapeString(htmlEntityReplacer.Replace(t.Content))) + } +} + +func (w *HTMLWriter) WriteEmphasis(e Emphasis) { + tags, ok := emphasisTags[e.Kind] + if !ok { + panic(fmt.Sprintf("bad emphasis %#v", e)) + } + w.WriteString(tags[0]) + WriteNodes(w, e.Content...) + w.WriteString(tags[1]) +} + +func (w *HTMLWriter) WriteLatexFragment(l LatexFragment) { + w.WriteString(l.OpeningPair) + WriteNodes(w, l.Content...) + w.WriteString(l.ClosingPair) +} + +func (w *HTMLWriter) WriteStatisticToken(s StatisticToken) { + w.WriteString(fmt.Sprintf(`[%s]`, s.Content)) +} + +func (w *HTMLWriter) WriteLineBreak(l LineBreak) { + w.WriteString(strings.Repeat("\n", l.Count)) +} + +func (w *HTMLWriter) WriteExplicitLineBreak(l ExplicitLineBreak) { + w.WriteString("
    \n") +} + +func (w *HTMLWriter) WriteFootnoteLink(l FootnoteLink) { + if !w.document.GetOption("f") { + return + } + i := w.footnotes.add(l) + id := i + 1 + w.WriteString(fmt.Sprintf(`%d`, id, id, id)) +} + +func (w *HTMLWriter) WriteTimestamp(t Timestamp) { + if !w.document.GetOption("<") { + return + } + w.WriteString(`<`) + if t.IsDate { + w.WriteString(t.Time.Format(datestampFormat)) + } else { + w.WriteString(t.Time.Format(timestampFormat)) + } + if t.Interval != "" { + w.WriteString(" " + t.Interval) + } + w.WriteString(`>`) +} + +func (w *HTMLWriter) WriteRegularLink(l RegularLink) { + url := html.EscapeString(l.URL) + if l.Protocol == "file" { + url = url[len("file:"):] + } + description := url + if l.Description != nil { + description = w.nodesAsString(l.Description...) + } + switch l.Kind() { + case "image": + w.WriteString(fmt.Sprintf(`%s`, url, description, description)) + case "video": + w.WriteString(fmt.Sprintf(``, url, description, description)) + default: + w.WriteString(fmt.Sprintf(`%s`, url, description)) + } +} + +func (w *HTMLWriter) WriteList(l List) { + tags, ok := listTags[l.Kind] + if !ok { + panic(fmt.Sprintf("bad list kind %#v", l)) + } + w.WriteString(tags[0] + "\n") + WriteNodes(w, l.Items...) + w.WriteString(tags[1] + "\n") +} + +func (w *HTMLWriter) WriteListItem(li ListItem) { + if li.Status != "" { + w.WriteString(fmt.Sprintf("
  • \n", listItemStatuses[li.Status])) + } else { + w.WriteString("
  • \n") + } + WriteNodes(w, li.Children...) + w.WriteString("
  • \n") +} + +func (w *HTMLWriter) WriteDescriptiveListItem(di DescriptiveListItem) { + if di.Status != "" { + w.WriteString(fmt.Sprintf("
    \n", listItemStatuses[di.Status])) + } else { + w.WriteString("
    \n") + } + + if len(di.Term) != 0 { + WriteNodes(w, di.Term...) + } else { + w.WriteString("?") + } + w.WriteString("\n
    \n") + w.WriteString("
    \n") + WriteNodes(w, di.Details...) + w.WriteString("
    \n") +} + +func (w *HTMLWriter) WriteParagraph(p Paragraph) { + if len(p.Children) == 0 { + return + } + w.WriteString("

    ") + if _, ok := p.Children[0].(LineBreak); !ok { + w.WriteString("\n") + } + WriteNodes(w, p.Children...) + w.WriteString("\n

    \n") +} + +func (w *HTMLWriter) WriteExample(e Example) { + w.WriteString(`
    ` + "\n")
    +	if len(e.Children) != 0 {
    +		for _, n := range e.Children {
    +			WriteNodes(w, n)
    +			w.WriteString("\n")
    +		}
    +	}
    +	w.WriteString("
    \n") +} + +func (w *HTMLWriter) WriteHorizontalRule(h HorizontalRule) { + w.WriteString("
    \n") +} + +func (w *HTMLWriter) WriteNodeWithMeta(n NodeWithMeta) { + out := w.nodesAsString(n.Node) + if p, ok := n.Node.(Paragraph); ok { + if len(p.Children) == 1 && isImageOrVideoLink(p.Children[0]) { + out = w.nodesAsString(p.Children[0]) + } + } + for _, attributes := range n.Meta.HTMLAttributes { + out = w.withHTMLAttributes(out, attributes...) + "\n" + } + if len(n.Meta.Caption) != 0 { + caption := "" + for i, ns := range n.Meta.Caption { + if i != 0 { + caption += " " + } + caption += w.nodesAsString(ns...) + } + out = fmt.Sprintf("
    \n%s
    \n%s\n
    \n
    \n", out, caption) + } + w.WriteString(out) +} + +func (w *HTMLWriter) WriteNodeWithName(n NodeWithName) { + WriteNodes(w, n.Node) +} + +func (w *HTMLWriter) WriteTable(t Table) { + w.WriteString("\n") + beforeFirstContentRow := true + for i, row := range t.Rows { + if row.IsSpecial || len(row.Columns) == 0 { + continue + } + if beforeFirstContentRow { + beforeFirstContentRow = false + if i+1 < len(t.Rows) && len(t.Rows[i+1].Columns) == 0 { + w.WriteString("\n") + w.writeTableColumns(row.Columns, "th") + w.WriteString("\n\n") + continue + } else { + w.WriteString("\n") + } + } + w.writeTableColumns(row.Columns, "td") + } + w.WriteString("\n
    \n") +} + +func (w *HTMLWriter) writeTableColumns(columns []Column, tag string) { + w.WriteString("\n") + for _, column := range columns { + if column.Align == "" { + w.WriteString(fmt.Sprintf("<%s>", tag)) + } else { + w.WriteString(fmt.Sprintf(`<%s class="align-%s">`, tag, column.Align)) + } + WriteNodes(w, column.Children...) + w.WriteString(fmt.Sprintf("\n", tag)) + } + w.WriteString("\n") +} + +func (w *HTMLWriter) withHTMLAttributes(input string, kvs ...string) string { + if len(kvs)%2 != 0 { + w.log.Printf("withHTMLAttributes: Len of kvs must be even: %#v", kvs) + return input + } + context := &h.Node{Type: h.ElementNode, Data: "body", DataAtom: atom.Body} + nodes, err := h.ParseFragment(strings.NewReader(strings.TrimSpace(input)), context) + if err != nil || len(nodes) != 1 { + w.log.Printf("withHTMLAttributes: Could not extend attributes of %s: %v (%s)", input, nodes, err) + return input + } + out, node := strings.Builder{}, nodes[0] + for i := 0; i < len(kvs)-1; i += 2 { + node.Attr = setHTMLAttribute(node.Attr, strings.TrimPrefix(kvs[i], ":"), kvs[i+1]) + } + err = h.Render(&out, nodes[0]) + if err != nil { + w.log.Printf("withHTMLAttributes: Could not extend attributes of %s: %v (%s)", input, node, err) + return input + } + return out.String() +} + +func setHTMLAttribute(attributes []h.Attribute, k, v string) []h.Attribute { + for i, a := range attributes { + if strings.ToLower(a.Key) == strings.ToLower(k) { + switch strings.ToLower(k) { + case "class", "style": + attributes[i].Val += " " + v + default: + attributes[i].Val = v + } + return attributes + } + } + return append(attributes, h.Attribute{Namespace: "", Key: k, Val: v}) +} + +func (fs *footnotes) add(f FootnoteLink) int { + if i, ok := fs.mapping[f.Name]; ok && f.Name != "" { + return i + } + fs.list = append(fs.list, f.Definition) + i := len(fs.list) - 1 + if f.Name != "" { + fs.mapping[f.Name] = i + } + return i +} + +func (fs *footnotes) updateDefinition(f FootnoteDefinition) { + if i, ok := fs.mapping[f.Name]; ok { + fs.list[i] = &f + } +} diff --git a/vendor/github.com/niklasfasching/go-org/org/inline.go b/vendor/github.com/niklasfasching/go-org/org/inline.go new file mode 100644 index 0000000000..02d5a15341 --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/inline.go @@ -0,0 +1,357 @@ +package org + +import ( + "fmt" + "path" + "regexp" + "strings" + "time" + "unicode" +) + +type Text struct { + Content string + IsRaw bool +} + +type LineBreak struct{ Count int } +type ExplicitLineBreak struct{} + +type StatisticToken struct{ Content string } + +type Timestamp struct { + Time time.Time + IsDate bool + Interval string +} + +type Emphasis struct { + Kind string + Content []Node +} + +type LatexFragment struct { + OpeningPair string + ClosingPair string + Content []Node +} + +type FootnoteLink struct { + Name string + Definition *FootnoteDefinition +} + +type RegularLink struct { + Protocol string + Description []Node + URL string + AutoLink bool +} + +var validURLCharacters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~:/?#[]@!$&'()*+,;=" +var autolinkProtocols = regexp.MustCompile(`^(https?|ftp|file)$`) +var imageExtensionRegexp = regexp.MustCompile(`^[.](png|gif|jpe?g|svg|tiff?)$`) +var videoExtensionRegexp = regexp.MustCompile(`^[.](webm|mp4)$`) + +var subScriptSuperScriptRegexp = regexp.MustCompile(`^([_^]){([^{}]+?)}`) +var timestampRegexp = regexp.MustCompile(`^<(\d{4}-\d{2}-\d{2})( [A-Za-z]+)?( \d{2}:\d{2})?( \+\d+[dwmy])?>`) +var footnoteRegexp = regexp.MustCompile(`^\[fn:([\w-]*?)(:(.*?))?\]`) +var statisticsTokenRegexp = regexp.MustCompile(`^\[(\d+/\d+|\d+%)\]`) +var latexFragmentRegexp = regexp.MustCompile(`(?s)^\\begin{(\w+)}(.*)\\end{(\w+)}`) + +var timestampFormat = "2006-01-02 Mon 15:04" +var datestampFormat = "2006-01-02 Mon" + +var latexFragmentPairs = map[string]string{ + `\(`: `\)`, + `\[`: `\]`, + `$$`: `$$`, +} + +func (d *Document) parseInline(input string) (nodes []Node) { + previous, current := 0, 0 + for current < len(input) { + rewind, consumed, node := 0, 0, (Node)(nil) + switch input[current] { + case '^': + consumed, node = d.parseSubOrSuperScript(input, current) + case '_': + consumed, node = d.parseSubScriptOrEmphasis(input, current) + case '*', '/', '+': + consumed, node = d.parseEmphasis(input, current, false) + case '=', '~': + consumed, node = d.parseEmphasis(input, current, true) + case '[': + consumed, node = d.parseOpeningBracket(input, current) + case '<': + consumed, node = d.parseTimestamp(input, current) + case '\\': + consumed, node = d.parseExplicitLineBreakOrLatexFragment(input, current) + case '$': + consumed, node = d.parseLatexFragment(input, current) + case '\n': + consumed, node = d.parseLineBreak(input, current) + case ':': + rewind, consumed, node = d.parseAutoLink(input, current) + current -= rewind + } + if consumed != 0 { + if current > previous { + nodes = append(nodes, Text{input[previous:current], false}) + } + if node != nil { + nodes = append(nodes, node) + } + current += consumed + previous = current + } else { + current++ + } + } + + if previous < len(input) { + nodes = append(nodes, Text{input[previous:], false}) + } + return nodes +} + +func (d *Document) parseRawInline(input string) (nodes []Node) { + previous, current := 0, 0 + for current < len(input) { + if input[current] == '\n' { + consumed, node := d.parseLineBreak(input, current) + if current > previous { + nodes = append(nodes, Text{input[previous:current], true}) + } + nodes = append(nodes, node) + current += consumed + previous = current + } else { + current++ + } + } + if previous < len(input) { + nodes = append(nodes, Text{input[previous:], true}) + } + return nodes +} + +func (d *Document) parseLineBreak(input string, start int) (int, Node) { + i := start + for ; i < len(input) && input[i] == '\n'; i++ { + } + return i - start, LineBreak{i - start} +} + +func (d *Document) parseExplicitLineBreakOrLatexFragment(input string, start int) (int, Node) { + switch { + case start+2 >= len(input): + case input[start+1] == '\\' && start != 0 && input[start-1] != '\n': + for i := start + 2; unicode.IsSpace(rune(input[i])); i++ { + if i >= len(input) || input[i] == '\n' { + return i + 1 - start, ExplicitLineBreak{} + } + } + case input[start+1] == '(' || input[start+1] == '[': + return d.parseLatexFragment(input, start) + case strings.Index(input[start:], `\begin{`) == 0: + if m := latexFragmentRegexp.FindStringSubmatch(input[start:]); m != nil { + if open, content, close := m[1], m[2], m[3]; open == close { + openingPair, closingPair := `\begin{`+open+`}`, `\end{`+close+`}` + i := strings.Index(input[start:], closingPair) + return i + len(closingPair), LatexFragment{openingPair, closingPair, d.parseRawInline(content)} + } + } + } + return 0, nil +} + +func (d *Document) parseLatexFragment(input string, start int) (int, Node) { + if start+2 >= len(input) { + return 0, nil + } + openingPair := input[start : start+2] + closingPair := latexFragmentPairs[openingPair] + if i := strings.Index(input[start+2:], closingPair); i != -1 { + content := d.parseRawInline(input[start+2 : start+2+i]) + return i + 2 + 2, LatexFragment{openingPair, closingPair, content} + } + return 0, nil +} + +func (d *Document) parseSubOrSuperScript(input string, start int) (int, Node) { + if m := subScriptSuperScriptRegexp.FindStringSubmatch(input[start:]); m != nil { + return len(m[2]) + 3, Emphasis{m[1] + "{}", []Node{Text{m[2], false}}} + } + return 0, nil +} + +func (d *Document) parseSubScriptOrEmphasis(input string, start int) (int, Node) { + if consumed, node := d.parseSubOrSuperScript(input, start); consumed != 0 { + return consumed, node + } + return d.parseEmphasis(input, start, false) +} + +func (d *Document) parseOpeningBracket(input string, start int) (int, Node) { + if len(input[start:]) >= 2 && input[start] == '[' && input[start+1] == '[' { + return d.parseRegularLink(input, start) + } else if footnoteRegexp.MatchString(input[start:]) { + return d.parseFootnoteReference(input, start) + } else if statisticsTokenRegexp.MatchString(input[start:]) { + return d.parseStatisticToken(input, start) + } + return 0, nil +} + +func (d *Document) parseFootnoteReference(input string, start int) (int, Node) { + if m := footnoteRegexp.FindStringSubmatch(input[start:]); m != nil { + name, definition := m[1], m[3] + if name == "" && definition == "" { + return 0, nil + } + link := FootnoteLink{name, nil} + if definition != "" { + link.Definition = &FootnoteDefinition{name, []Node{Paragraph{d.parseInline(definition)}}, true} + } + return len(m[0]), link + } + return 0, nil +} + +func (d *Document) parseStatisticToken(input string, start int) (int, Node) { + if m := statisticsTokenRegexp.FindStringSubmatch(input[start:]); m != nil { + return len(m[1]) + 2, StatisticToken{m[1]} + } + return 0, nil +} + +func (d *Document) parseAutoLink(input string, start int) (int, int, Node) { + if !d.AutoLink || start == 0 || len(input[start:]) < 3 || input[start:start+3] != "://" { + return 0, 0, nil + } + protocolStart, protocol := start-1, "" + for ; protocolStart > 0; protocolStart-- { + if !unicode.IsLetter(rune(input[protocolStart])) { + protocolStart++ + break + } + } + if m := autolinkProtocols.FindStringSubmatch(input[protocolStart:start]); m != nil { + protocol = m[1] + } else { + return 0, 0, nil + } + end := start + for ; end < len(input) && strings.ContainsRune(validURLCharacters, rune(input[end])); end++ { + } + path := input[start:end] + if path == "://" { + return 0, 0, nil + } + return len(protocol), len(path + protocol), RegularLink{protocol, nil, protocol + path, true} +} + +func (d *Document) parseRegularLink(input string, start int) (int, Node) { + input = input[start:] + if len(input) < 3 || input[:2] != "[[" || input[2] == '[' { + return 0, nil + } + end := strings.Index(input, "]]") + if end == -1 { + return 0, nil + } + rawLinkParts := strings.Split(input[2:end], "][") + description, link := ([]Node)(nil), rawLinkParts[0] + if len(rawLinkParts) == 2 { + link, description = rawLinkParts[0], d.parseInline(rawLinkParts[1]) + } + if strings.ContainsRune(link, '\n') { + return 0, nil + } + consumed := end + 2 + protocol, linkParts := "", strings.SplitN(link, ":", 2) + if len(linkParts) == 2 { + protocol = linkParts[0] + } + return consumed, RegularLink{protocol, description, link, false} +} + +func (d *Document) parseTimestamp(input string, start int) (int, Node) { + if m := timestampRegexp.FindStringSubmatch(input[start:]); m != nil { + ddmmyy, hhmm, interval, isDate := m[1], m[3], strings.TrimSpace(m[4]), false + if hhmm == "" { + hhmm, isDate = "00:00", true + } + t, err := time.Parse(timestampFormat, fmt.Sprintf("%s Mon %s", ddmmyy, hhmm)) + if err != nil { + return 0, nil + } + timestamp := Timestamp{t, isDate, interval} + return len(m[0]), timestamp + } + return 0, nil +} + +func (d *Document) parseEmphasis(input string, start int, isRaw bool) (int, Node) { + marker, i := input[start], start + if !hasValidPreAndBorderChars(input, i) { + return 0, nil + } + for i, consumedNewLines := i+1, 0; i < len(input) && consumedNewLines <= d.MaxEmphasisNewLines; i++ { + if input[i] == '\n' { + consumedNewLines++ + } + + if input[i] == marker && i != start+1 && hasValidPostAndBorderChars(input, i) { + if isRaw { + return i + 1 - start, Emphasis{input[start : start+1], d.parseRawInline(input[start+1 : i])} + } + return i + 1 - start, Emphasis{input[start : start+1], d.parseInline(input[start+1 : i])} + } + } + return 0, nil +} + +// see org-emphasis-regexp-components (emacs elisp variable) + +func hasValidPreAndBorderChars(input string, i int) bool { + return (i+1 >= len(input) || isValidBorderChar(rune(input[i+1]))) && (i == 0 || isValidPreChar(rune(input[i-1]))) +} + +func hasValidPostAndBorderChars(input string, i int) bool { + return (i == 0 || isValidBorderChar(rune(input[i-1]))) && (i+1 >= len(input) || isValidPostChar(rune(input[i+1]))) +} + +func isValidPreChar(r rune) bool { + return unicode.IsSpace(r) || strings.ContainsRune(`-({'"`, r) +} + +func isValidPostChar(r rune) bool { + return unicode.IsSpace(r) || strings.ContainsRune(`-.,:!?;'")}[`, r) +} + +func isValidBorderChar(r rune) bool { return !unicode.IsSpace(r) } + +func (l RegularLink) Kind() string { + if p := l.Protocol; l.Description != nil || (p != "" && p != "file" && p != "http" && p != "https") { + return "regular" + } + if imageExtensionRegexp.MatchString(path.Ext(l.URL)) { + return "image" + } + if videoExtensionRegexp.MatchString(path.Ext(l.URL)) { + return "video" + } + return "regular" +} + +func (n Text) String() string { return orgWriter.nodesAsString(n) } +func (n LineBreak) String() string { return orgWriter.nodesAsString(n) } +func (n ExplicitLineBreak) String() string { return orgWriter.nodesAsString(n) } +func (n StatisticToken) String() string { return orgWriter.nodesAsString(n) } +func (n Emphasis) String() string { return orgWriter.nodesAsString(n) } +func (n LatexFragment) String() string { return orgWriter.nodesAsString(n) } +func (n FootnoteLink) String() string { return orgWriter.nodesAsString(n) } +func (n RegularLink) String() string { return orgWriter.nodesAsString(n) } +func (n Timestamp) String() string { return orgWriter.nodesAsString(n) } diff --git a/vendor/github.com/niklasfasching/go-org/org/keyword.go b/vendor/github.com/niklasfasching/go-org/org/keyword.go new file mode 100644 index 0000000000..776241797b --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/keyword.go @@ -0,0 +1,184 @@ +package org + +import ( + "bytes" + "path/filepath" + "regexp" + "strings" +) + +type Comment struct{ Content string } + +type Keyword struct { + Key string + Value string +} + +type NodeWithName struct { + Name string + Node Node +} + +type NodeWithMeta struct { + Node Node + Meta Metadata +} + +type Metadata struct { + Caption [][]Node + HTMLAttributes [][]string +} + +type Include struct { + Keyword + Resolve func() Node +} + +var keywordRegexp = regexp.MustCompile(`^(\s*)#\+([^:]+):(\s+(.*)|$)`) +var commentRegexp = regexp.MustCompile(`^(\s*)#(.*)`) + +var includeFileRegexp = regexp.MustCompile(`(?i)^"([^"]+)" (src|example|export) (\w+)$`) +var attributeRegexp = regexp.MustCompile(`(?:^|\s+)(:[-\w]+)\s+(.*)$`) + +func lexKeywordOrComment(line string) (token, bool) { + if m := keywordRegexp.FindStringSubmatch(line); m != nil { + return token{"keyword", len(m[1]), m[2], m}, true + } else if m := commentRegexp.FindStringSubmatch(line); m != nil { + return token{"comment", len(m[1]), m[2], m}, true + } + return nilToken, false +} + +func (d *Document) parseComment(i int, stop stopFn) (int, Node) { + return 1, Comment{d.tokens[i].content} +} + +func (d *Document) parseKeyword(i int, stop stopFn) (int, Node) { + k := parseKeyword(d.tokens[i]) + switch k.Key { + case "NAME": + return d.parseNodeWithName(k, i, stop) + case "SETUPFILE": + return d.loadSetupFile(k) + case "INCLUDE": + return d.parseInclude(k) + case "CAPTION", "ATTR_HTML": + consumed, node := d.parseAffiliated(i, stop) + if consumed != 0 { + return consumed, node + } + fallthrough + default: + if _, ok := d.BufferSettings[k.Key]; ok { + d.BufferSettings[k.Key] = strings.Join([]string{d.BufferSettings[k.Key], k.Value}, "\n") + } else { + d.BufferSettings[k.Key] = k.Value + } + return 1, k + } +} + +func (d *Document) parseNodeWithName(k Keyword, i int, stop stopFn) (int, Node) { + if stop(d, i+1) { + return 0, nil + } + consumed, node := d.parseOne(i+1, stop) + if consumed == 0 || node == nil { + return 0, nil + } + d.NamedNodes[k.Value] = node + return consumed + 1, NodeWithName{k.Value, node} +} + +func (d *Document) parseAffiliated(i int, stop stopFn) (int, Node) { + start, meta := i, Metadata{} + for ; !stop(d, i) && d.tokens[i].kind == "keyword"; i++ { + switch k := parseKeyword(d.tokens[i]); k.Key { + case "CAPTION": + meta.Caption = append(meta.Caption, d.parseInline(k.Value)) + case "ATTR_HTML": + attributes, rest := []string{}, k.Value + for { + if k, m := "", attributeRegexp.FindStringSubmatch(rest); m != nil { + k, rest = m[1], m[2] + attributes = append(attributes, k) + if v, m := "", attributeRegexp.FindStringSubmatchIndex(rest); m != nil { + v, rest = rest[:m[0]], rest[m[0]:] + attributes = append(attributes, v) + } else { + attributes = append(attributes, strings.TrimSpace(rest)) + break + } + } else { + break + } + } + meta.HTMLAttributes = append(meta.HTMLAttributes, attributes) + default: + return 0, nil + } + } + if stop(d, i) { + return 0, nil + } + consumed, node := d.parseOne(i, stop) + if consumed == 0 || node == nil { + return 0, nil + } + i += consumed + return i - start, NodeWithMeta{node, meta} +} + +func parseKeyword(t token) Keyword { + k, v := t.matches[2], t.matches[4] + return Keyword{strings.ToUpper(k), strings.TrimSpace(v)} +} + +func (d *Document) parseInclude(k Keyword) (int, Node) { + resolve := func() Node { + d.Log.Printf("Bad include %#v", k) + return k + } + if m := includeFileRegexp.FindStringSubmatch(k.Value); m != nil { + path, kind, lang := m[1], m[2], m[3] + if !filepath.IsAbs(path) { + path = filepath.Join(filepath.Dir(d.Path), path) + } + resolve = func() Node { + bs, err := d.ReadFile(path) + if err != nil { + d.Log.Printf("Bad include %#v: %s", k, err) + return k + } + return Block{strings.ToUpper(kind), []string{lang}, d.parseRawInline(string(bs))} + } + } + return 1, Include{k, resolve} +} + +func (d *Document) loadSetupFile(k Keyword) (int, Node) { + path := k.Value + if !filepath.IsAbs(path) { + path = filepath.Join(filepath.Dir(d.Path), path) + } + bs, err := d.ReadFile(path) + if err != nil { + d.Log.Printf("Bad setup file: %#v: %s", k, err) + return 1, k + } + setupDocument := d.Configuration.Parse(bytes.NewReader(bs), path) + if err := setupDocument.Error; err != nil { + d.Log.Printf("Bad setup file: %#v: %s", k, err) + return 1, k + } + for k, v := range setupDocument.BufferSettings { + d.BufferSettings[k] = v + } + return 1, k +} + +func (n Comment) String() string { return orgWriter.nodesAsString(n) } +func (n Keyword) String() string { return orgWriter.nodesAsString(n) } +func (n NodeWithMeta) String() string { return orgWriter.nodesAsString(n) } +func (n NodeWithName) String() string { return orgWriter.nodesAsString(n) } +func (n Include) String() string { return orgWriter.nodesAsString(n) } diff --git a/vendor/github.com/niklasfasching/go-org/org/list.go b/vendor/github.com/niklasfasching/go-org/org/list.go new file mode 100644 index 0000000000..6ba28f6fe4 --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/list.go @@ -0,0 +1,114 @@ +package org + +import ( + "fmt" + "regexp" + "strings" + "unicode" +) + +type List struct { + Kind string + Items []Node +} + +type ListItem struct { + Bullet string + Status string + Children []Node +} + +type DescriptiveListItem struct { + Bullet string + Status string + Term []Node + Details []Node +} + +var unorderedListRegexp = regexp.MustCompile(`^(\s*)([+*-])(\s+(.*)|$)`) +var orderedListRegexp = regexp.MustCompile(`^(\s*)(([0-9]+|[a-zA-Z])[.)])(\s+(.*)|$)`) +var descriptiveListItemRegexp = regexp.MustCompile(`\s::(\s|$)`) +var listItemStatusRegexp = regexp.MustCompile(`\[( |X|-)\]\s`) + +func lexList(line string) (token, bool) { + if m := unorderedListRegexp.FindStringSubmatch(line); m != nil { + return token{"unorderedList", len(m[1]), m[4], m}, true + } else if m := orderedListRegexp.FindStringSubmatch(line); m != nil { + return token{"orderedList", len(m[1]), m[5], m}, true + } + return nilToken, false +} + +func isListToken(t token) bool { + return t.kind == "unorderedList" || t.kind == "orderedList" +} + +func listKind(t token) (string, string) { + kind := "" + switch bullet := t.matches[2]; { + case bullet == "*" || bullet == "+" || bullet == "-": + kind = "unordered" + case unicode.IsLetter(rune(bullet[0])), unicode.IsDigit(rune(bullet[0])): + kind = "ordered" + default: + panic(fmt.Sprintf("bad list bullet '%s': %#v", bullet, t)) + } + if descriptiveListItemRegexp.MatchString(t.content) { + return kind, "descriptive" + } + return kind, kind +} + +func (d *Document) parseList(i int, parentStop stopFn) (int, Node) { + start, lvl := i, d.tokens[i].lvl + listMainKind, kind := listKind(d.tokens[i]) + list := List{Kind: kind} + stop := func(*Document, int) bool { + if parentStop(d, i) || d.tokens[i].lvl != lvl || !isListToken(d.tokens[i]) { + return true + } + itemMainKind, _ := listKind(d.tokens[i]) + return itemMainKind != listMainKind + } + for !stop(d, i) { + consumed, node := d.parseListItem(list, i, parentStop) + i += consumed + list.Items = append(list.Items, node) + } + return i - start, list +} + +func (d *Document) parseListItem(l List, i int, parentStop stopFn) (int, Node) { + start, nodes, bullet := i, []Node{}, d.tokens[i].matches[2] + minIndent, dterm, content, status := d.tokens[i].lvl+len(bullet), "", d.tokens[i].content, "" + if m := listItemStatusRegexp.FindStringSubmatch(content); m != nil { + status, content = m[1], content[len("[ ] "):] + } + if l.Kind == "descriptive" { + if m := descriptiveListItemRegexp.FindStringIndex(content); m != nil { + dterm, content = content[:m[0]], content[m[1]:] + } + } + + d.tokens[i] = tokenize(strings.Repeat(" ", minIndent) + content) + stop := func(d *Document, i int) bool { + if parentStop(d, i) { + return true + } + t := d.tokens[i] + return t.lvl < minIndent && !(t.kind == "text" && t.content == "") + } + for !stop(d, i) && (i <= start+1 || !isSecondBlankLine(d, i)) { + consumed, node := d.parseOne(i, stop) + i += consumed + nodes = append(nodes, node) + } + if l.Kind == "descriptive" { + return i - start, DescriptiveListItem{bullet, status, d.parseInline(dterm), nodes} + } + return i - start, ListItem{bullet, status, nodes} +} + +func (n List) String() string { return orgWriter.nodesAsString(n) } +func (n ListItem) String() string { return orgWriter.nodesAsString(n) } +func (n DescriptiveListItem) String() string { return orgWriter.nodesAsString(n) } diff --git a/vendor/github.com/niklasfasching/go-org/org/org_writer.go b/vendor/github.com/niklasfasching/go-org/org/org_writer.go new file mode 100644 index 0000000000..d574cda527 --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/org_writer.go @@ -0,0 +1,334 @@ +package org + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// OrgWriter export an org document into pretty printed org document. +type OrgWriter struct { + ExtendingWriter Writer + TagsColumn int + + strings.Builder + indent string +} + +var emphasisOrgBorders = map[string][]string{ + "_": []string{"_", "_"}, + "*": []string{"*", "*"}, + "/": []string{"/", "/"}, + "+": []string{"+", "+"}, + "~": []string{"~", "~"}, + "=": []string{"=", "="}, + "_{}": []string{"_{", "}"}, + "^{}": []string{"^{", "}"}, +} + +func NewOrgWriter() *OrgWriter { + return &OrgWriter{ + TagsColumn: 77, + } +} + +func (w *OrgWriter) WriterWithExtensions() Writer { + if w.ExtendingWriter != nil { + return w.ExtendingWriter + } + return w +} + +func (w *OrgWriter) Before(d *Document) {} +func (w *OrgWriter) After(d *Document) {} + +func (w *OrgWriter) emptyClone() *OrgWriter { + wcopy := *w + wcopy.Builder = strings.Builder{} + return &wcopy +} + +func (w *OrgWriter) nodesAsString(nodes ...Node) string { + tmp := w.emptyClone() + WriteNodes(tmp, nodes...) + return tmp.String() +} + +func (w *OrgWriter) WriteHeadline(h Headline) { + tmp := w.emptyClone() + tmp.WriteString(strings.Repeat("*", h.Lvl)) + if h.Status != "" { + tmp.WriteString(" " + h.Status) + } + if h.Priority != "" { + tmp.WriteString(" [#" + h.Priority + "]") + } + tmp.WriteString(" ") + WriteNodes(tmp, h.Title...) + hString := tmp.String() + if len(h.Tags) != 0 { + tString := ":" + strings.Join(h.Tags, ":") + ":" + if n := w.TagsColumn - len(tString) - len(hString); n > 0 { + w.WriteString(hString + strings.Repeat(" ", n) + tString) + } else { + w.WriteString(hString + " " + tString) + } + } else { + w.WriteString(hString) + } + w.WriteString("\n") + if len(h.Children) != 0 { + w.WriteString(w.indent) + } + if h.Properties != nil { + WriteNodes(w, *h.Properties) + } + WriteNodes(w, h.Children...) +} + +func (w *OrgWriter) WriteBlock(b Block) { + w.WriteString(w.indent + "#+BEGIN_" + b.Name) + if len(b.Parameters) != 0 { + w.WriteString(" " + strings.Join(b.Parameters, " ")) + } + w.WriteString("\n") + if isRawTextBlock(b.Name) { + w.WriteString(w.indent) + } + WriteNodes(w, b.Children...) + if !isRawTextBlock(b.Name) { + w.WriteString(w.indent) + } + w.WriteString("#+END_" + b.Name + "\n") +} + +func (w *OrgWriter) WriteDrawer(d Drawer) { + w.WriteString(w.indent + ":" + d.Name + ":\n") + WriteNodes(w, d.Children...) + w.WriteString(w.indent + ":END:\n") +} + +func (w *OrgWriter) WritePropertyDrawer(d PropertyDrawer) { + w.WriteString(":PROPERTIES:\n") + for _, kvPair := range d.Properties { + k, v := kvPair[0], kvPair[1] + if v != "" { + v = " " + v + } + w.WriteString(fmt.Sprintf(":%s:%s\n", k, v)) + } + w.WriteString(":END:\n") +} + +func (w *OrgWriter) WriteFootnoteDefinition(f FootnoteDefinition) { + w.WriteString(fmt.Sprintf("[fn:%s]", f.Name)) + content := w.nodesAsString(f.Children...) + if content != "" && !unicode.IsSpace(rune(content[0])) { + w.WriteString(" ") + } + w.WriteString(content) +} + +func (w *OrgWriter) WriteParagraph(p Paragraph) { + content := w.nodesAsString(p.Children...) + if len(content) > 0 && content[0] != '\n' { + w.WriteString(w.indent) + } + w.WriteString(content + "\n") +} + +func (w *OrgWriter) WriteExample(e Example) { + for _, n := range e.Children { + w.WriteString(w.indent + ":") + if content := w.nodesAsString(n); content != "" { + w.WriteString(" " + content) + } + w.WriteString("\n") + } +} + +func (w *OrgWriter) WriteKeyword(k Keyword) { + w.WriteString(w.indent + "#+" + k.Key + ":") + if k.Value != "" { + w.WriteString(" " + k.Value) + } + w.WriteString("\n") +} + +func (w *OrgWriter) WriteInclude(i Include) { + w.WriteKeyword(i.Keyword) +} + +func (w *OrgWriter) WriteNodeWithMeta(n NodeWithMeta) { + for _, ns := range n.Meta.Caption { + w.WriteString("#+CAPTION: ") + WriteNodes(w, ns...) + w.WriteString("\n") + } + for _, attributes := range n.Meta.HTMLAttributes { + w.WriteString("#+ATTR_HTML: ") + w.WriteString(strings.Join(attributes, " ") + "\n") + } + WriteNodes(w, n.Node) +} + +func (w *OrgWriter) WriteNodeWithName(n NodeWithName) { + w.WriteString(fmt.Sprintf("#+NAME: %s\n", n.Name)) + WriteNodes(w, n.Node) +} + +func (w *OrgWriter) WriteComment(c Comment) { + w.WriteString(w.indent + "#" + c.Content + "\n") +} + +func (w *OrgWriter) WriteList(l List) { WriteNodes(w, l.Items...) } + +func (w *OrgWriter) WriteListItem(li ListItem) { + liWriter := w.emptyClone() + liWriter.indent = w.indent + strings.Repeat(" ", len(li.Bullet)+1) + WriteNodes(liWriter, li.Children...) + content := strings.TrimPrefix(liWriter.String(), liWriter.indent) + w.WriteString(w.indent + li.Bullet) + if li.Status != "" { + w.WriteString(fmt.Sprintf(" [%s]", li.Status)) + } + if len(content) > 0 && content[0] == '\n' { + w.WriteString(content) + } else { + w.WriteString(" " + content) + } +} + +func (w *OrgWriter) WriteDescriptiveListItem(di DescriptiveListItem) { + w.WriteString(w.indent + di.Bullet) + if di.Status != "" { + w.WriteString(fmt.Sprintf(" [%s]", di.Status)) + } + indent := w.indent + strings.Repeat(" ", len(di.Bullet)+1) + if len(di.Term) != 0 { + term := w.nodesAsString(di.Term...) + w.WriteString(" " + term + " ::") + indent = indent + strings.Repeat(" ", len(term)+4) + } + diWriter := w.emptyClone() + diWriter.indent = indent + WriteNodes(diWriter, di.Details...) + details := strings.TrimPrefix(diWriter.String(), diWriter.indent) + if len(details) > 0 && details[0] == '\n' { + w.WriteString(details) + } else { + w.WriteString(" " + details) + } +} + +func (w *OrgWriter) WriteTable(t Table) { + for _, row := range t.Rows { + w.WriteString(w.indent) + if len(row.Columns) == 0 { + w.WriteString(`|`) + for i := 0; i < len(t.ColumnInfos); i++ { + w.WriteString(strings.Repeat("-", t.ColumnInfos[i].Len+2)) + if i < len(t.ColumnInfos)-1 { + w.WriteString("+") + } + } + w.WriteString(`|`) + + } else { + w.WriteString(`|`) + for _, column := range row.Columns { + w.WriteString(` `) + content := w.nodesAsString(column.Children...) + if content == "" { + content = " " + } + n := column.Len - utf8.RuneCountInString(content) + if n < 0 { + n = 0 + } + if column.Align == "center" { + if n%2 != 0 { + w.WriteString(" ") + } + w.WriteString(strings.Repeat(" ", n/2) + content + strings.Repeat(" ", n/2)) + } else if column.Align == "right" { + w.WriteString(strings.Repeat(" ", n) + content) + } else { + w.WriteString(content + strings.Repeat(" ", n)) + } + w.WriteString(` |`) + } + } + w.WriteString("\n") + } +} + +func (w *OrgWriter) WriteHorizontalRule(hr HorizontalRule) { + w.WriteString(w.indent + "-----\n") +} + +func (w *OrgWriter) WriteText(t Text) { w.WriteString(t.Content) } + +func (w *OrgWriter) WriteEmphasis(e Emphasis) { + borders, ok := emphasisOrgBorders[e.Kind] + if !ok { + panic(fmt.Sprintf("bad emphasis %#v", e)) + } + w.WriteString(borders[0]) + WriteNodes(w, e.Content...) + w.WriteString(borders[1]) +} + +func (w *OrgWriter) WriteLatexFragment(l LatexFragment) { + w.WriteString(l.OpeningPair) + WriteNodes(w, l.Content...) + w.WriteString(l.ClosingPair) +} + +func (w *OrgWriter) WriteStatisticToken(s StatisticToken) { + w.WriteString(fmt.Sprintf("[%s]", s.Content)) +} + +func (w *OrgWriter) WriteLineBreak(l LineBreak) { + w.WriteString(strings.Repeat("\n"+w.indent, l.Count)) +} + +func (w *OrgWriter) WriteExplicitLineBreak(l ExplicitLineBreak) { + w.WriteString(`\\` + "\n" + w.indent) +} + +func (w *OrgWriter) WriteTimestamp(t Timestamp) { + w.WriteString("<") + if t.IsDate { + w.WriteString(t.Time.Format(datestampFormat)) + } else { + w.WriteString(t.Time.Format(timestampFormat)) + } + if t.Interval != "" { + w.WriteString(" " + t.Interval) + } + w.WriteString(">") +} + +func (w *OrgWriter) WriteFootnoteLink(l FootnoteLink) { + w.WriteString("[fn:" + l.Name) + if l.Definition != nil { + w.WriteString(":") + WriteNodes(w, l.Definition.Children[0].(Paragraph).Children...) + } + w.WriteString("]") +} + +func (w *OrgWriter) WriteRegularLink(l RegularLink) { + if l.AutoLink { + w.WriteString(l.URL) + } else if l.Description == nil { + w.WriteString(fmt.Sprintf("[[%s]]", l.URL)) + } else { + descriptionWriter := w.emptyClone() + WriteNodes(descriptionWriter, l.Description...) + description := descriptionWriter.String() + w.WriteString(fmt.Sprintf("[[%s][%s]]", l.URL, description)) + } +} diff --git a/vendor/github.com/niklasfasching/go-org/org/paragraph.go b/vendor/github.com/niklasfasching/go-org/org/paragraph.go new file mode 100644 index 0000000000..b7d3ea92ce --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/paragraph.go @@ -0,0 +1,46 @@ +package org + +import ( + "regexp" + "strings" +) + +type Paragraph struct{ Children []Node } +type HorizontalRule struct{} + +var horizontalRuleRegexp = regexp.MustCompile(`^(\s*)-{5,}\s*$`) +var plainTextRegexp = regexp.MustCompile(`^(\s*)(.*)`) + +func lexText(line string) (token, bool) { + if m := plainTextRegexp.FindStringSubmatch(line); m != nil { + return token{"text", len(m[1]), m[2], m}, true + } + return nilToken, false +} + +func lexHorizontalRule(line string) (token, bool) { + if m := horizontalRuleRegexp.FindStringSubmatch(line); m != nil { + return token{"horizontalRule", len(m[1]), "", m}, true + } + return nilToken, false +} + +func (d *Document) parseParagraph(i int, parentStop stopFn) (int, Node) { + lines, start := []string{d.tokens[i].content}, i + i++ + stop := func(d *Document, i int) bool { + return parentStop(d, i) || d.tokens[i].kind != "text" || d.tokens[i].content == "" + } + for ; !stop(d, i); i++ { + lines = append(lines, d.tokens[i].content) + } + consumed := i - start + return consumed, Paragraph{d.parseInline(strings.Join(lines, "\n"))} +} + +func (d *Document) parseHorizontalRule(i int, parentStop stopFn) (int, Node) { + return 1, HorizontalRule{} +} + +func (n Paragraph) String() string { return orgWriter.nodesAsString(n) } +func (n HorizontalRule) String() string { return orgWriter.nodesAsString(n) } diff --git a/vendor/github.com/niklasfasching/go-org/org/table.go b/vendor/github.com/niklasfasching/go-org/org/table.go new file mode 100644 index 0000000000..a404e1a9f2 --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/table.go @@ -0,0 +1,130 @@ +package org + +import ( + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +type Table struct { + Rows []Row + ColumnInfos []ColumnInfo +} + +type Row struct { + Columns []Column + IsSpecial bool +} + +type Column struct { + Children []Node + *ColumnInfo +} + +type ColumnInfo struct { + Align string + Len int +} + +var tableSeparatorRegexp = regexp.MustCompile(`^(\s*)(\|[+-|]*)\s*$`) +var tableRowRegexp = regexp.MustCompile(`^(\s*)(\|.*)`) + +var columnAlignRegexp = regexp.MustCompile(`^<(l|c|r)>$`) + +func lexTable(line string) (token, bool) { + if m := tableSeparatorRegexp.FindStringSubmatch(line); m != nil { + return token{"tableSeparator", len(m[1]), m[2], m}, true + } else if m := tableRowRegexp.FindStringSubmatch(line); m != nil { + return token{"tableRow", len(m[1]), m[2], m}, true + } + return nilToken, false +} + +func (d *Document) parseTable(i int, parentStop stopFn) (int, Node) { + rawRows, start := [][]string{}, i + for ; !parentStop(d, i); i++ { + if t := d.tokens[i]; t.kind == "tableRow" { + rawRow := strings.FieldsFunc(d.tokens[i].content, func(r rune) bool { return r == '|' }) + for i := range rawRow { + rawRow[i] = strings.TrimSpace(rawRow[i]) + } + rawRows = append(rawRows, rawRow) + } else if t.kind == "tableSeparator" { + rawRows = append(rawRows, nil) + } else { + break + } + } + + table := Table{nil, getColumnInfos(rawRows)} + for _, rawColumns := range rawRows { + row := Row{nil, isSpecialRow(rawColumns)} + if len(rawColumns) != 0 { + for i := range table.ColumnInfos { + column := Column{nil, &table.ColumnInfos[i]} + if i < len(rawColumns) { + column.Children = d.parseInline(rawColumns[i]) + } + row.Columns = append(row.Columns, column) + } + } + table.Rows = append(table.Rows, row) + } + return i - start, table +} + +func getColumnInfos(rows [][]string) []ColumnInfo { + columnCount := 0 + for _, columns := range rows { + if n := len(columns); n > columnCount { + columnCount = n + } + } + + columnInfos := make([]ColumnInfo, columnCount) + for i := 0; i < columnCount; i++ { + countNumeric, countNonNumeric := 0, 0 + for _, columns := range rows { + if i >= len(columns) { + continue + } + + if n := utf8.RuneCountInString(columns[i]); n > columnInfos[i].Len { + columnInfos[i].Len = n + } + + if m := columnAlignRegexp.FindStringSubmatch(columns[i]); m != nil && isSpecialRow(columns) { + switch m[1] { + case "l": + columnInfos[i].Align = "left" + case "c": + columnInfos[i].Align = "center" + case "r": + columnInfos[i].Align = "right" + } + } else if _, err := strconv.ParseFloat(columns[i], 32); err == nil { + countNumeric++ + } else if strings.TrimSpace(columns[i]) != "" { + countNonNumeric++ + } + } + + if columnInfos[i].Align == "" && countNumeric >= countNonNumeric { + columnInfos[i].Align = "right" + } + } + return columnInfos +} + +func isSpecialRow(rawColumns []string) bool { + isAlignRow := true + for _, rawColumn := range rawColumns { + if !columnAlignRegexp.MatchString(rawColumn) && rawColumn != "" { + isAlignRow = false + } + } + return isAlignRow +} + +func (n Table) String() string { return orgWriter.nodesAsString(n) } diff --git a/vendor/github.com/niklasfasching/go-org/org/util.go b/vendor/github.com/niklasfasching/go-org/org/util.go new file mode 100644 index 0000000000..c25bf27ee2 --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/util.go @@ -0,0 +1,19 @@ +package org + +func isSecondBlankLine(d *Document, i int) bool { + if i-1 <= 0 { + return false + } + t1, t2 := d.tokens[i-1], d.tokens[i] + if t1.kind == "text" && t2.kind == "text" && t1.content == "" && t2.content == "" { + return true + } + return false +} + +func isImageOrVideoLink(n Node) bool { + if l, ok := n.(RegularLink); ok && l.Kind() == "video" || l.Kind() == "image" { + return true + } + return false +} diff --git a/vendor/github.com/niklasfasching/go-org/org/writer.go b/vendor/github.com/niklasfasching/go-org/org/writer.go new file mode 100644 index 0000000000..c4aebd69f5 --- /dev/null +++ b/vendor/github.com/niklasfasching/go-org/org/writer.go @@ -0,0 +1,103 @@ +package org + +import "fmt" + +// Writer is the interface that is used to export a parsed document into a new format. See Document.Write(). +type Writer interface { + Before(*Document) // Before is called before any nodes are passed to the writer. + After(*Document) // After is called after all nodes have been passed to the writer. + String() string // String is called at the very end to retrieve the final output. + + WriterWithExtensions() Writer + + WriteKeyword(Keyword) + WriteInclude(Include) + WriteComment(Comment) + WriteNodeWithMeta(NodeWithMeta) + WriteNodeWithName(NodeWithName) + WriteHeadline(Headline) + WriteBlock(Block) + WriteExample(Example) + WriteDrawer(Drawer) + WritePropertyDrawer(PropertyDrawer) + WriteList(List) + WriteListItem(ListItem) + WriteDescriptiveListItem(DescriptiveListItem) + WriteTable(Table) + WriteHorizontalRule(HorizontalRule) + WriteParagraph(Paragraph) + WriteText(Text) + WriteEmphasis(Emphasis) + WriteLatexFragment(LatexFragment) + WriteStatisticToken(StatisticToken) + WriteExplicitLineBreak(ExplicitLineBreak) + WriteLineBreak(LineBreak) + WriteRegularLink(RegularLink) + WriteTimestamp(Timestamp) + WriteFootnoteLink(FootnoteLink) + WriteFootnoteDefinition(FootnoteDefinition) +} + +func WriteNodes(w Writer, nodes ...Node) { + w = w.WriterWithExtensions() + for _, n := range nodes { + switch n := n.(type) { + case Keyword: + w.WriteKeyword(n) + case Include: + w.WriteInclude(n) + case Comment: + w.WriteComment(n) + case NodeWithMeta: + w.WriteNodeWithMeta(n) + case NodeWithName: + w.WriteNodeWithName(n) + case Headline: + w.WriteHeadline(n) + case Block: + w.WriteBlock(n) + case Example: + w.WriteExample(n) + case Drawer: + w.WriteDrawer(n) + case PropertyDrawer: + w.WritePropertyDrawer(n) + case List: + w.WriteList(n) + case ListItem: + w.WriteListItem(n) + case DescriptiveListItem: + w.WriteDescriptiveListItem(n) + case Table: + w.WriteTable(n) + case HorizontalRule: + w.WriteHorizontalRule(n) + case Paragraph: + w.WriteParagraph(n) + case Text: + w.WriteText(n) + case Emphasis: + w.WriteEmphasis(n) + case LatexFragment: + w.WriteLatexFragment(n) + case StatisticToken: + w.WriteStatisticToken(n) + case ExplicitLineBreak: + w.WriteExplicitLineBreak(n) + case LineBreak: + w.WriteLineBreak(n) + case RegularLink: + w.WriteRegularLink(n) + case Timestamp: + w.WriteTimestamp(n) + case FootnoteLink: + w.WriteFootnoteLink(n) + case FootnoteDefinition: + w.WriteFootnoteDefinition(n) + default: + if n != nil { + panic(fmt.Sprintf("bad node %T %#v", n, n)) + } + } + } +} diff --git a/vendor/github.com/russross/blackfriday/doc.go b/vendor/github.com/russross/blackfriday/doc.go deleted file mode 100644 index 9656c42a19..0000000000 --- a/vendor/github.com/russross/blackfriday/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// Package blackfriday is a Markdown processor. -// -// It translates plain text with simple formatting rules into HTML or LaTeX. -// -// Sanitized Anchor Names -// -// Blackfriday includes an algorithm for creating sanitized anchor names -// corresponding to a given input text. This algorithm is used to create -// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The -// algorithm is specified below, so that other packages can create -// compatible anchor names and links to those anchors. -// -// The algorithm iterates over the input text, interpreted as UTF-8, -// one Unicode code point (rune) at a time. All runes that are letters (category L) -// or numbers (category N) are considered valid characters. They are mapped to -// lower case, and included in the output. All other runes are considered -// invalid characters. Invalid characters that preceed the first valid character, -// as well as invalid character that follow the last valid character -// are dropped completely. All other sequences of invalid characters -// between two valid characters are replaced with a single dash character '-'. -// -// SanitizedAnchorName exposes this functionality, and can be used to -// create compatible links to the anchor names generated by blackfriday. -// This algorithm is also implemented in a small standalone package at -// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients -// that want a small package and don't need full functionality of blackfriday. -package blackfriday - -// NOTE: Keep Sanitized Anchor Name algorithm in sync with package -// github.com/shurcooL/sanitized_anchor_name. -// Otherwise, users of sanitized_anchor_name will get anchor names -// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go deleted file mode 100644 index e0a6c69c96..0000000000 --- a/vendor/github.com/russross/blackfriday/html.go +++ /dev/null @@ -1,938 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// -// HTML rendering backend -// -// - -package blackfriday - -import ( - "bytes" - "fmt" - "regexp" - "strconv" - "strings" -) - -// Html renderer configuration options. -const ( - HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks - HTML_SKIP_STYLE // skip embedded