This commit is contained in:
Mustafa Gezen 2023-02-17 20:00:41 +01:00
parent 69f985f637
commit 2b88f7b5c8
154 changed files with 10 additions and 27675 deletions

2
go.mod
View File

@ -62,6 +62,7 @@ require (
openapi.peridot.resf.org/peridotopenapi v0.0.0-00010101000000-000000000000
peridot.resf.org/common v0.0.0-00010101000000-000000000000
peridot.resf.org/obsidian/pb v0.0.0-00010101000000-000000000000
peridot.resf.org/peridot/admin/pb v0.0.0-00010101000000-000000000000 // indirect
peridot.resf.org/peridot/keykeeper/pb v0.0.0-00010101000000-000000000000
peridot.resf.org/peridot/pb v0.0.0-00010101000000-000000000000
peridot.resf.org/peridot/yumrepofs/pb v0.0.0-00010101000000-000000000000
@ -80,6 +81,7 @@ replace (
bazel.build/semver => ./bazel-bin/build/bazel/semver/semver_go_proto_/bazel.build/semver
peridot.resf.org/obsidian/pb => ./bazel-bin/obsidian/proto/v1/obsidianpb_go_proto_/peridot.resf.org/obsidian/pb
peridot.resf.org/peridot/pb => ./bazel-bin/peridot/proto/v1/peridotpb_go_proto_/peridot.resf.org/peridot/pb
peridot.resf.org/peridot/admin/pb => ./bazel-bin/peridot/proto/v1/admin/adminpb_go_proto_/peridot.resf.org/peridot/admin/pb
peridot.resf.org/peridot/keykeeper/pb => ./bazel-bin/peridot/proto/v1/keykeeper/keykeeperpb_go_proto_/peridot.resf.org/peridot/keykeeper/pb
peridot.resf.org/peridot/yumrepofs/pb => ./bazel-bin/peridot/proto/v1/yumrepofs/yumrepofspb_go_proto_/peridot.resf.org/peridot/yumrepofs/pb
peridot.resf.org/common => ./bazel-bin/proto/commonpb_go_proto_/peridot.resf.org/common

View File

@ -4,7 +4,6 @@ package resf.peridot.admin.v1;
import "google/api/annotations.proto";
import "validate/validate.proto";
import "apollo/proto/v1/advisory.proto";
import "peridot/proto/v1/task.proto";
option go_package = "peridot.resf.org/peridot/admin/pb;adminpb";

View File

@ -1 +0,0 @@
testdata/* linguist-vendored

View File

@ -1,16 +0,0 @@
# editor temporary files
*.sublime-*
.DS_Store
*.swp
#*.*#
tags
# direnv config
.env*
# test binaries
*.test
# coverage and profilte outputs
*.out

View File

@ -1,31 +0,0 @@
arch:
- amd64
- ppc64le
language: go
go:
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
- 1.12.x
- 1.13.x
- 1.14.x
- 1.15.x
- tip
jobs:
exclude:
- arch: ppc64le
go: 1.7.x
- arch: ppc64le
go: 1.8.x
- arch: ppc64le
go: 1.9.x
- arch: ppc64le
go: 1.10.x
- arch: ppc64le
go: 1.11.x
- arch: ppc64le
go: 1.12.x

View File

@ -1,25 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "goquery",
srcs = [
"array.go",
"doc.go",
"expand.go",
"filter.go",
"iteration.go",
"manipulation.go",
"property.go",
"query.go",
"traversal.go",
"type.go",
"utilities.go",
],
importmap = "peridot.resf.org/vendor/github.com/PuerkitoBio/goquery",
importpath = "github.com/PuerkitoBio/goquery",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/andybalholm/cascadia",
"@org_golang_x_net//html",
],
)

View File

@ -1,12 +0,0 @@
Copyright (c) 2012-2021, Martin Angers & Contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,190 +0,0 @@
# goquery - a little like that j-thing, only in Go
[![builds.sr.ht status](https://builds.sr.ht/~mna/goquery/commits/fedora.yml.svg)](https://builds.sr.ht/~mna/goquery/commits/fedora.yml?) [![build status](https://secure.travis-ci.org/PuerkitoBio/goquery.svg?branch=master)](http://travis-ci.org/PuerkitoBio/goquery) [![Go Reference](https://pkg.go.dev/badge/github.com/PuerkitoBio/goquery.svg)](https://pkg.go.dev/github.com/PuerkitoBio/goquery) [![Sourcegraph Badge](https://sourcegraph.com/github.com/PuerkitoBio/goquery/-/badge.svg)](https://sourcegraph.com/github.com/PuerkitoBio/goquery?badge)
goquery brings a syntax and a set of features similar to [jQuery][] to the [Go language][go]. It is based on Go's [net/html package][html] and the CSS Selector library [cascadia][]. Since the net/html parser returns nodes, and not a full-featured DOM tree, jQuery's stateful manipulation functions (like height(), css(), detach()) have been left off.
Also, because the net/html parser requires UTF-8 encoding, so does goquery: it is the caller's responsibility to ensure that the source document provides UTF-8 encoded HTML. See the [wiki][] for various options to do this.
Syntax-wise, it is as close as possible to jQuery, with the same function names when possible, and that warm and fuzzy chainable interface. jQuery being the ultra-popular library that it is, I felt that writing a similar HTML-manipulating library was better to follow its API than to start anew (in the same spirit as Go's `fmt` package), even though some of its methods are less than intuitive (looking at you, [index()][index]...).
## Table of Contents
* [Installation](#installation)
* [Changelog](#changelog)
* [API](#api)
* [Examples](#examples)
* [Related Projects](#related-projects)
* [Support](#support)
* [License](#license)
## Installation
Please note that because of the net/html dependency, goquery requires Go1.1+ and is tested on Go1.7+.
$ go get github.com/PuerkitoBio/goquery
(optional) To run unit tests:
$ cd $GOPATH/src/github.com/PuerkitoBio/goquery
$ go test
(optional) To run benchmarks (warning: it runs for a few minutes):
$ cd $GOPATH/src/github.com/PuerkitoBio/goquery
$ go test -bench=".*"
## Changelog
**Note that goquery's API is now stable, and will not break.**
* **2021-06-14 (v1.7.0)** : Add `Single` and `SingleMatcher` functions to optimize first-match selection (thanks [@gdollardollar](https://github.com/gdollardollar)).
* **2021-01-11 (v1.6.1)** : Fix panic when calling `{Prepend,Append,Set}Html` on a `Selection` that contains non-Element nodes.
* **2020-10-08 (v1.6.0)** : Parse html in context of the container node for all functions that deal with html strings (`AfterHtml`, `AppendHtml`, etc.). Thanks to [@thiemok][thiemok] and [@davidjwilkins][djw] for their work on this.
* **2020-02-04 (v1.5.1)** : Update module dependencies.
* **2018-11-15 (v1.5.0)** : Go module support (thanks @Zaba505).
* **2018-06-07 (v1.4.1)** : Add `NewDocumentFromReader` examples.
* **2018-03-24 (v1.4.0)** : Deprecate `NewDocument(url)` and `NewDocumentFromResponse(response)`.
* **2018-01-28 (v1.3.0)** : Add `ToEnd` constant to `Slice` until the end of the selection (thanks to @davidjwilkins for raising the issue).
* **2018-01-11 (v1.2.0)** : Add `AddBack*` and deprecate `AndSelf` (thanks to @davidjwilkins).
* **2017-02-12 (v1.1.0)** : Add `SetHtml` and `SetText` (thanks to @glebtv).
* **2016-12-29 (v1.0.2)** : Optimize allocations for `Selection.Text` (thanks to @radovskyb).
* **2016-08-28 (v1.0.1)** : Optimize performance for large documents.
* **2016-07-27 (v1.0.0)** : Tag version 1.0.0.
* **2016-06-15** : Invalid selector strings internally compile to a `Matcher` implementation that never matches any node (instead of a panic). So for example, `doc.Find("~")` returns an empty `*Selection` object.
* **2016-02-02** : Add `NodeName` utility function similar to the DOM's `nodeName` property. It returns the tag name of the first element in a selection, and other relevant values of non-element nodes (see [doc][] for details). Add `OuterHtml` utility function similar to the DOM's `outerHTML` property (named `OuterHtml` in small caps for consistency with the existing `Html` method on the `Selection`).
* **2015-04-20** : Add `AttrOr` helper method to return the attribute's value or a default value if absent. Thanks to [piotrkowalczuk][piotr].
* **2015-02-04** : Add more manipulation functions - Prepend* - thanks again to [Andrew Stone][thatguystone].
* **2014-11-28** : Add more manipulation functions - ReplaceWith*, Wrap* and Unwrap - thanks again to [Andrew Stone][thatguystone].
* **2014-11-07** : Add manipulation functions (thanks to [Andrew Stone][thatguystone]) and `*Matcher` functions, that receive compiled cascadia selectors instead of selector strings, thus avoiding potential panics thrown by goquery via `cascadia.MustCompile` calls. This results in better performance (selectors can be compiled once and reused) and more idiomatic error handling (you can handle cascadia's compilation errors, instead of recovering from panics, which had been bugging me for a long time). Note that the actual type expected is a `Matcher` interface, that `cascadia.Selector` implements. Other matcher implementations could be used.
* **2014-11-06** : Change import paths of net/html to golang.org/x/net/html (see https://groups.google.com/forum/#!topic/golang-nuts/eD8dh3T9yyA). Make sure to update your code to use the new import path too when you call goquery with `html.Node`s.
* **v0.3.2** : Add `NewDocumentFromReader()` (thanks jweir) which allows creating a goquery document from an io.Reader.
* **v0.3.1** : Add `NewDocumentFromResponse()` (thanks assassingj) which allows creating a goquery document from an http response.
* **v0.3.0** : Add `EachWithBreak()` which allows to break out of an `Each()` loop by returning false. This function was added instead of changing the existing `Each()` to avoid breaking compatibility.
* **v0.2.1** : Make go-getable, now that [go.net/html is Go1.0-compatible][gonet] (thanks to @matrixik for pointing this out).
* **v0.2.0** : Add support for negative indices in Slice(). **BREAKING CHANGE** `Document.Root` is removed, `Document` is now a `Selection` itself (a selection of one, the root element, just like `Document.Root` was before). Add jQuery's Closest() method.
* **v0.1.1** : Add benchmarks to use as baseline for refactorings, refactor Next...() and Prev...() methods to use the new html package's linked list features (Next/PrevSibling, FirstChild). Good performance boost (40+% in some cases).
* **v0.1.0** : Initial release.
## API
goquery exposes two structs, `Document` and `Selection`, and the `Matcher` interface. Unlike jQuery, which is loaded as part of a DOM document, and thus acts on its containing document, goquery doesn't know which HTML document to act upon. So it needs to be told, and that's what the `Document` type is for. It holds the root document node as the initial Selection value to manipulate.
jQuery often has many variants for the same function (no argument, a selector string argument, a jQuery object argument, a DOM element argument, ...). Instead of exposing the same features in goquery as a single method with variadic empty interface arguments, statically-typed signatures are used following this naming convention:
* When the jQuery equivalent can be called with no argument, it has the same name as jQuery for the no argument signature (e.g.: `Prev()`), and the version with a selector string argument is called `XxxFiltered()` (e.g.: `PrevFiltered()`)
* When the jQuery equivalent **requires** one argument, the same name as jQuery is used for the selector string version (e.g.: `Is()`)
* The signatures accepting a jQuery object as argument are defined in goquery as `XxxSelection()` and take a `*Selection` object as argument (e.g.: `FilterSelection()`)
* The signatures accepting a DOM element as argument in jQuery are defined in goquery as `XxxNodes()` and take a variadic argument of type `*html.Node` (e.g.: `FilterNodes()`)
* The signatures accepting a function as argument in jQuery are defined in goquery as `XxxFunction()` and take a function as argument (e.g.: `FilterFunction()`)
* The goquery methods that can be called with a selector string have a corresponding version that take a `Matcher` interface and are defined as `XxxMatcher()` (e.g.: `IsMatcher()`)
Utility functions that are not in jQuery but are useful in Go are implemented as functions (that take a `*Selection` as parameter), to avoid a potential naming clash on the `*Selection`'s methods (reserved for jQuery-equivalent behaviour).
The complete [package reference documentation can be found here][doc].
Please note that Cascadia's selectors do not necessarily match all supported selectors of jQuery (Sizzle). See the [cascadia project][cascadia] for details. Invalid selector strings compile to a `Matcher` that fails to match any node. Behaviour of the various functions that take a selector string as argument follows from that fact, e.g. (where `~` is an invalid selector string):
* `Find("~")` returns an empty selection because the selector string doesn't match anything.
* `Add("~")` returns a new selection that holds the same nodes as the original selection, because it didn't add any node (selector string didn't match anything).
* `ParentsFiltered("~")` returns an empty selection because the selector string doesn't match anything.
* `ParentsUntil("~")` returns all parents of the selection because the selector string didn't match any element to stop before the top element.
## Examples
See some tips and tricks in the [wiki][].
Adapted from example_test.go:
```Go
package main
import (
"fmt"
"log"
"net/http"
"github.com/PuerkitoBio/goquery"
)
func ExampleScrape() {
// Request the HTML page.
res, err := http.Get("http://metalsucks.net")
if err != nil {
log.Fatal(err)
}
defer res.Body.Close()
if res.StatusCode != 200 {
log.Fatalf("status code error: %d %s", res.StatusCode, res.Status)
}
// Load the HTML document
doc, err := goquery.NewDocumentFromReader(res.Body)
if err != nil {
log.Fatal(err)
}
// Find the review items
doc.Find(".left-content article .post-title").Each(func(i int, s *goquery.Selection) {
// For each item found, get the title
title := s.Find("a").Text()
fmt.Printf("Review %d: %s\n", i, title)
})
}
func main() {
ExampleScrape()
}
```
## Related Projects
- [Goq][goq], an HTML deserialization and scraping library based on goquery and struct tags.
- [andybalholm/cascadia][cascadia], the CSS selector library used by goquery.
- [suntong/cascadia][cascadiacli], a command-line interface to the cascadia CSS selector library, useful to test selectors.
- [gocolly/colly](https://github.com/gocolly/colly), a lightning fast and elegant Scraping Framework
- [gnulnx/goperf](https://github.com/gnulnx/goperf), a website performance test tool that also fetches static assets.
- [MontFerret/ferret](https://github.com/MontFerret/ferret), declarative web scraping.
- [tacusci/berrycms](https://github.com/tacusci/berrycms), a modern simple to use CMS with easy to write plugins
- [Dataflow kit](https://github.com/slotix/dataflowkit), Web Scraping framework for Gophers.
- [Geziyor](https://github.com/geziyor/geziyor), a fast web crawling & scraping framework for Go. Supports JS rendering.
- [Pagser](https://github.com/foolin/pagser), a simple, easy, extensible, configurable HTML parser to struct based on goquery and struct tags.
- [stitcherd](https://github.com/vhodges/stitcherd), A server for doing server side includes using css selectors and DOM updates.
## Support
There are a number of ways you can support the project:
* Use it, star it, build something with it, spread the word!
- If you do build something open-source or otherwise publicly-visible, let me know so I can add it to the [Related Projects](#related-projects) section!
* Raise issues to improve the project (note: doc typos and clarifications are issues too!)
- Please search existing issues before opening a new one - it may have already been adressed.
* Pull requests: please discuss new code in an issue first, unless the fix is really trivial.
- Make sure new code is tested.
- Be mindful of existing code - PRs that break existing code have a high probability of being declined, unless it fixes a serious issue.
* Sponsor the developer
- See the Github Sponsor button at the top of the repo on github
- or via BuyMeACoffee.com, below
<a href="https://www.buymeacoffee.com/mna" target="_blank"><img src="https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png" alt="Buy Me A Coffee" style="height: 41px !important;width: 174px !important;box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;-webkit-box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;" ></a>
## License
The [BSD 3-Clause license][bsd], the same as the [Go language][golic]. Cascadia's license is [here][caslic].
[jquery]: http://jquery.com/
[go]: http://golang.org/
[cascadia]: https://github.com/andybalholm/cascadia
[cascadiacli]: https://github.com/suntong/cascadia
[bsd]: http://opensource.org/licenses/BSD-3-Clause
[golic]: http://golang.org/LICENSE
[caslic]: https://github.com/andybalholm/cascadia/blob/master/LICENSE
[doc]: https://pkg.go.dev/github.com/PuerkitoBio/goquery
[index]: http://api.jquery.com/index/
[gonet]: https://github.com/golang/net/
[html]: https://pkg.go.dev/golang.org/x/net/html
[wiki]: https://github.com/PuerkitoBio/goquery/wiki/Tips-and-tricks
[thatguystone]: https://github.com/thatguystone
[piotr]: https://github.com/piotrkowalczuk
[goq]: https://github.com/andrewstuart/goq
[thiemok]: https://github.com/thiemok
[djw]: https://github.com/davidjwilkins

View File

@ -1,124 +0,0 @@
package goquery
import (
"golang.org/x/net/html"
)
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
// ToEnd is a special index value that can be used as end index in a call
// to Slice so that all elements are selected until the end of the Selection.
// It is equivalent to passing (*Selection).Length().
ToEnd = maxInt
)
// First reduces the set of matched elements to the first in the set.
// It returns a new Selection object, and an empty Selection object if the
// the selection is empty.
func (s *Selection) First() *Selection {
return s.Eq(0)
}
// Last reduces the set of matched elements to the last in the set.
// It returns a new Selection object, and an empty Selection object if
// the selection is empty.
func (s *Selection) Last() *Selection {
return s.Eq(-1)
}
// Eq reduces the set of matched elements to the one at the specified index.
// If a negative index is given, it counts backwards starting at the end of the
// set. It returns a new Selection object, and an empty Selection object if the
// index is invalid.
func (s *Selection) Eq(index int) *Selection {
if index < 0 {
index += len(s.Nodes)
}
if index >= len(s.Nodes) || index < 0 {
return newEmptySelection(s.document)
}
return s.Slice(index, index+1)
}
// Slice reduces the set of matched elements to a subset specified by a range
// of indices. The start index is 0-based and indicates the index of the first
// element to select. The end index is 0-based and indicates the index at which
// the elements stop being selected (the end index is not selected).
//
// The indices may be negative, in which case they represent an offset from the
// end of the selection.
//
// The special value ToEnd may be specified as end index, in which case all elements
// until the end are selected. This works both for a positive and negative start
// index.
func (s *Selection) Slice(start, end int) *Selection {
if start < 0 {
start += len(s.Nodes)
}
if end == ToEnd {
end = len(s.Nodes)
} else if end < 0 {
end += len(s.Nodes)
}
return pushStack(s, s.Nodes[start:end])
}
// Get retrieves the underlying node at the specified index.
// Get without parameter is not implemented, since the node array is available
// on the Selection object.
func (s *Selection) Get(index int) *html.Node {
if index < 0 {
index += len(s.Nodes) // Negative index gets from the end
}
return s.Nodes[index]
}
// Index returns the position of the first element within the Selection object
// relative to its sibling elements.
func (s *Selection) Index() int {
if len(s.Nodes) > 0 {
return newSingleSelection(s.Nodes[0], s.document).PrevAll().Length()
}
return -1
}
// IndexSelector returns the position of the first element within the
// Selection object relative to the elements matched by the selector, or -1 if
// not found.
func (s *Selection) IndexSelector(selector string) int {
if len(s.Nodes) > 0 {
sel := s.document.Find(selector)
return indexInSlice(sel.Nodes, s.Nodes[0])
}
return -1
}
// IndexMatcher returns the position of the first element within the
// Selection object relative to the elements matched by the matcher, or -1 if
// not found.
func (s *Selection) IndexMatcher(m Matcher) int {
if len(s.Nodes) > 0 {
sel := s.document.FindMatcher(m)
return indexInSlice(sel.Nodes, s.Nodes[0])
}
return -1
}
// IndexOfNode returns the position of the specified node within the Selection
// object, or -1 if not found.
func (s *Selection) IndexOfNode(node *html.Node) int {
return indexInSlice(s.Nodes, node)
}
// IndexOfSelection returns the position of the first node in the specified
// Selection object within this Selection object, or -1 if not found.
func (s *Selection) IndexOfSelection(sel *Selection) int {
if sel != nil && len(sel.Nodes) > 0 {
return indexInSlice(s.Nodes, sel.Nodes[0])
}
return -1
}

View File

@ -1,123 +0,0 @@
// Copyright (c) 2012-2016, Martin Angers & Contributors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
// * Neither the name of the author nor the names of its contributors may be used to
// endorse or promote products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
// OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
// AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package goquery implements features similar to jQuery, including the chainable
syntax, to manipulate and query an HTML document.
It brings a syntax and a set of features similar to jQuery to the Go language.
It is based on Go's net/html package and the CSS Selector library cascadia.
Since the net/html parser returns nodes, and not a full-featured DOM
tree, jQuery's stateful manipulation functions (like height(), css(), detach())
have been left off.
Also, because the net/html parser requires UTF-8 encoding, so does goquery: it is
the caller's responsibility to ensure that the source document provides UTF-8 encoded HTML.
See the repository's wiki for various options on how to do this.
Syntax-wise, it is as close as possible to jQuery, with the same method names when
possible, and that warm and fuzzy chainable interface. jQuery being the
ultra-popular library that it is, writing a similar HTML-manipulating
library was better to follow its API than to start anew (in the same spirit as
Go's fmt package), even though some of its methods are less than intuitive (looking
at you, index()...).
It is hosted on GitHub, along with additional documentation in the README.md
file: https://github.com/puerkitobio/goquery
Please note that because of the net/html dependency, goquery requires Go1.1+.
The various methods are split into files based on the category of behavior.
The three dots (...) indicate that various "overloads" are available.
* array.go : array-like positional manipulation of the selection.
- Eq()
- First()
- Get()
- Index...()
- Last()
- Slice()
* expand.go : methods that expand or augment the selection's set.
- Add...()
- AndSelf()
- Union(), which is an alias for AddSelection()
* filter.go : filtering methods, that reduce the selection's set.
- End()
- Filter...()
- Has...()
- Intersection(), which is an alias of FilterSelection()
- Not...()
* iteration.go : methods to loop over the selection's nodes.
- Each()
- EachWithBreak()
- Map()
* manipulation.go : methods for modifying the document
- After...()
- Append...()
- Before...()
- Clone()
- Empty()
- Prepend...()
- Remove...()
- ReplaceWith...()
- Unwrap()
- Wrap...()
- WrapAll...()
- WrapInner...()
* property.go : methods that inspect and get the node's properties values.
- Attr*(), RemoveAttr(), SetAttr()
- AddClass(), HasClass(), RemoveClass(), ToggleClass()
- Html()
- Length()
- Size(), which is an alias for Length()
- Text()
* query.go : methods that query, or reflect, a node's identity.
- Contains()
- Is...()
* traversal.go : methods to traverse the HTML document tree.
- Children...()
- Contents()
- Find...()
- Next...()
- Parent[s]...()
- Prev...()
- Siblings...()
* type.go : definition of the types exposed by goquery.
- Document
- Selection
- Matcher
* utilities.go : definition of helper functions (and not methods on a *Selection)
that are not part of jQuery, but are useful to goquery.
- NodeName
- OuterHtml
*/
package goquery

View File

@ -1,70 +0,0 @@
package goquery
import "golang.org/x/net/html"
// Add adds the selector string's matching nodes to those in the current
// selection and returns a new Selection object.
// The selector string is run in the context of the document of the current
// Selection object.
func (s *Selection) Add(selector string) *Selection {
return s.AddNodes(findWithMatcher([]*html.Node{s.document.rootNode}, compileMatcher(selector))...)
}
// AddMatcher adds the matcher's matching nodes to those in the current
// selection and returns a new Selection object.
// The matcher is run in the context of the document of the current
// Selection object.
func (s *Selection) AddMatcher(m Matcher) *Selection {
return s.AddNodes(findWithMatcher([]*html.Node{s.document.rootNode}, m)...)
}
// AddSelection adds the specified Selection object's nodes to those in the
// current selection and returns a new Selection object.
func (s *Selection) AddSelection(sel *Selection) *Selection {
if sel == nil {
return s.AddNodes()
}
return s.AddNodes(sel.Nodes...)
}
// Union is an alias for AddSelection.
func (s *Selection) Union(sel *Selection) *Selection {
return s.AddSelection(sel)
}
// AddNodes adds the specified nodes to those in the
// current selection and returns a new Selection object.
func (s *Selection) AddNodes(nodes ...*html.Node) *Selection {
return pushStack(s, appendWithoutDuplicates(s.Nodes, nodes, nil))
}
// AndSelf adds the previous set of elements on the stack to the current set.
// It returns a new Selection object containing the current Selection combined
// with the previous one.
// Deprecated: This function has been deprecated and is now an alias for AddBack().
func (s *Selection) AndSelf() *Selection {
return s.AddBack()
}
// AddBack adds the previous set of elements on the stack to the current set.
// It returns a new Selection object containing the current Selection combined
// with the previous one.
func (s *Selection) AddBack() *Selection {
return s.AddSelection(s.prevSel)
}
// AddBackFiltered reduces the previous set of elements on the stack to those that
// match the selector string, and adds them to the current set.
// It returns a new Selection object containing the current Selection combined
// with the filtered previous one
func (s *Selection) AddBackFiltered(selector string) *Selection {
return s.AddSelection(s.prevSel.Filter(selector))
}
// AddBackMatcher reduces the previous set of elements on the stack to those that match
// the mateher, and adds them to the curernt set.
// It returns a new Selection object containing the current Selection combined
// with the filtered previous one
func (s *Selection) AddBackMatcher(m Matcher) *Selection {
return s.AddSelection(s.prevSel.FilterMatcher(m))
}

View File

@ -1,163 +0,0 @@
package goquery
import "golang.org/x/net/html"
// Filter reduces the set of matched elements to those that match the selector string.
// It returns a new Selection object for this subset of matching elements.
func (s *Selection) Filter(selector string) *Selection {
return s.FilterMatcher(compileMatcher(selector))
}
// FilterMatcher reduces the set of matched elements to those that match
// the given matcher. It returns a new Selection object for this subset
// of matching elements.
func (s *Selection) FilterMatcher(m Matcher) *Selection {
return pushStack(s, winnow(s, m, true))
}
// Not removes elements from the Selection that match the selector string.
// It returns a new Selection object with the matching elements removed.
func (s *Selection) Not(selector string) *Selection {
return s.NotMatcher(compileMatcher(selector))
}
// NotMatcher removes elements from the Selection that match the given matcher.
// It returns a new Selection object with the matching elements removed.
func (s *Selection) NotMatcher(m Matcher) *Selection {
return pushStack(s, winnow(s, m, false))
}
// FilterFunction reduces the set of matched elements to those that pass the function's test.
// It returns a new Selection object for this subset of elements.
func (s *Selection) FilterFunction(f func(int, *Selection) bool) *Selection {
return pushStack(s, winnowFunction(s, f, true))
}
// NotFunction removes elements from the Selection that pass the function's test.
// It returns a new Selection object with the matching elements removed.
func (s *Selection) NotFunction(f func(int, *Selection) bool) *Selection {
return pushStack(s, winnowFunction(s, f, false))
}
// FilterNodes reduces the set of matched elements to those that match the specified nodes.
// It returns a new Selection object for this subset of elements.
func (s *Selection) FilterNodes(nodes ...*html.Node) *Selection {
return pushStack(s, winnowNodes(s, nodes, true))
}
// NotNodes removes elements from the Selection that match the specified nodes.
// It returns a new Selection object with the matching elements removed.
func (s *Selection) NotNodes(nodes ...*html.Node) *Selection {
return pushStack(s, winnowNodes(s, nodes, false))
}
// FilterSelection reduces the set of matched elements to those that match a
// node in the specified Selection object.
// It returns a new Selection object for this subset of elements.
func (s *Selection) FilterSelection(sel *Selection) *Selection {
if sel == nil {
return pushStack(s, winnowNodes(s, nil, true))
}
return pushStack(s, winnowNodes(s, sel.Nodes, true))
}
// NotSelection removes elements from the Selection that match a node in the specified
// Selection object. It returns a new Selection object with the matching elements removed.
func (s *Selection) NotSelection(sel *Selection) *Selection {
if sel == nil {
return pushStack(s, winnowNodes(s, nil, false))
}
return pushStack(s, winnowNodes(s, sel.Nodes, false))
}
// Intersection is an alias for FilterSelection.
func (s *Selection) Intersection(sel *Selection) *Selection {
return s.FilterSelection(sel)
}
// Has reduces the set of matched elements to those that have a descendant
// that matches the selector.
// It returns a new Selection object with the matching elements.
func (s *Selection) Has(selector string) *Selection {
return s.HasSelection(s.document.Find(selector))
}
// HasMatcher reduces the set of matched elements to those that have a descendant
// that matches the matcher.
// It returns a new Selection object with the matching elements.
func (s *Selection) HasMatcher(m Matcher) *Selection {
return s.HasSelection(s.document.FindMatcher(m))
}
// HasNodes reduces the set of matched elements to those that have a
// descendant that matches one of the nodes.
// It returns a new Selection object with the matching elements.
func (s *Selection) HasNodes(nodes ...*html.Node) *Selection {
return s.FilterFunction(func(_ int, sel *Selection) bool {
// Add all nodes that contain one of the specified nodes
for _, n := range nodes {
if sel.Contains(n) {
return true
}
}
return false
})
}
// HasSelection reduces the set of matched elements to those that have a
// descendant that matches one of the nodes of the specified Selection object.
// It returns a new Selection object with the matching elements.
func (s *Selection) HasSelection(sel *Selection) *Selection {
if sel == nil {
return s.HasNodes()
}
return s.HasNodes(sel.Nodes...)
}
// End ends the most recent filtering operation in the current chain and
// returns the set of matched elements to its previous state.
func (s *Selection) End() *Selection {
if s.prevSel != nil {
return s.prevSel
}
return newEmptySelection(s.document)
}
// Filter based on the matcher, and the indicator to keep (Filter) or
// to get rid of (Not) the matching elements.
func winnow(sel *Selection, m Matcher, keep bool) []*html.Node {
// Optimize if keep is requested
if keep {
return m.Filter(sel.Nodes)
}
// Use grep
return grep(sel, func(i int, s *Selection) bool {
return !m.Match(s.Get(0))
})
}
// Filter based on an array of nodes, and the indicator to keep (Filter) or
// to get rid of (Not) the matching elements.
func winnowNodes(sel *Selection, nodes []*html.Node, keep bool) []*html.Node {
if len(nodes)+len(sel.Nodes) < minNodesForSet {
return grep(sel, func(i int, s *Selection) bool {
return isInSlice(nodes, s.Get(0)) == keep
})
}
set := make(map[*html.Node]bool)
for _, n := range nodes {
set[n] = true
}
return grep(sel, func(i int, s *Selection) bool {
return set[s.Get(0)] == keep
})
}
// Filter based on a function test, and the indicator to keep (Filter) or
// to get rid of (Not) the matching elements.
func winnowFunction(sel *Selection, f func(int, *Selection) bool, keep bool) []*html.Node {
return grep(sel, func(i int, s *Selection) bool {
return f(i, s) == keep
})
}

View File

@ -1,8 +0,0 @@
module github.com/PuerkitoBio/goquery
require (
github.com/andybalholm/cascadia v1.1.0
golang.org/x/net v0.0.0-20200202094626-16171245cfb2
)
go 1.13

View File

@ -1,8 +0,0 @@
github.com/andybalholm/cascadia v1.1.0 h1:BuuO6sSfQNFRu1LppgbD25Hr2vLYW25JvxHs5zzsLTo=
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

View File

@ -1,39 +0,0 @@
package goquery
// Each iterates over a Selection object, executing a function for each
// matched element. It returns the current Selection object. The function
// f is called for each element in the selection with the index of the
// element in that selection starting at 0, and a *Selection that contains
// only that element.
func (s *Selection) Each(f func(int, *Selection)) *Selection {
for i, n := range s.Nodes {
f(i, newSingleSelection(n, s.document))
}
return s
}
// EachWithBreak iterates over a Selection object, executing a function for each
// matched element. It is identical to Each except that it is possible to break
// out of the loop by returning false in the callback function. It returns the
// current Selection object.
func (s *Selection) EachWithBreak(f func(int, *Selection) bool) *Selection {
for i, n := range s.Nodes {
if !f(i, newSingleSelection(n, s.document)) {
return s
}
}
return s
}
// Map passes each element in the current matched set through a function,
// producing a slice of string holding the returned values. The function
// f is called for each element in the selection with the index of the
// element in that selection starting at 0, and a *Selection that contains
// only that element.
func (s *Selection) Map(f func(int, *Selection) string) (result []string) {
for i, n := range s.Nodes {
result = append(result, f(i, newSingleSelection(n, s.document)))
}
return result
}

View File

@ -1,679 +0,0 @@
package goquery
import (
"strings"
"golang.org/x/net/html"
)
// After applies the selector from the root document and inserts the matched elements
// after the elements in the set of matched elements.
//
// If one of the matched elements in the selection is not currently in the
// document, it's impossible to insert nodes after it, so it will be ignored.
//
// This follows the same rules as Selection.Append.
func (s *Selection) After(selector string) *Selection {
return s.AfterMatcher(compileMatcher(selector))
}
// AfterMatcher applies the matcher from the root document and inserts the matched elements
// after the elements in the set of matched elements.
//
// If one of the matched elements in the selection is not currently in the
// document, it's impossible to insert nodes after it, so it will be ignored.
//
// This follows the same rules as Selection.Append.
func (s *Selection) AfterMatcher(m Matcher) *Selection {
return s.AfterNodes(m.MatchAll(s.document.rootNode)...)
}
// AfterSelection inserts the elements in the selection after each element in the set of matched
// elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) AfterSelection(sel *Selection) *Selection {
return s.AfterNodes(sel.Nodes...)
}
// AfterHtml parses the html and inserts it after the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) AfterHtml(htmlStr string) *Selection {
return s.eachNodeHtml(htmlStr, true, func(node *html.Node, nodes []*html.Node) {
nextSibling := node.NextSibling
for _, n := range nodes {
if node.Parent != nil {
node.Parent.InsertBefore(n, nextSibling)
}
}
})
}
// AfterNodes inserts the nodes after each element in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) AfterNodes(ns ...*html.Node) *Selection {
return s.manipulateNodes(ns, true, func(sn *html.Node, n *html.Node) {
if sn.Parent != nil {
sn.Parent.InsertBefore(n, sn.NextSibling)
}
})
}
// Append appends the elements specified by the selector to the end of each element
// in the set of matched elements, following those rules:
//
// 1) The selector is applied to the root document.
//
// 2) Elements that are part of the document will be moved to the new location.
//
// 3) If there are multiple locations to append to, cloned nodes will be
// appended to all target locations except the last one, which will be moved
// as noted in (2).
func (s *Selection) Append(selector string) *Selection {
return s.AppendMatcher(compileMatcher(selector))
}
// AppendMatcher appends the elements specified by the matcher to the end of each element
// in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) AppendMatcher(m Matcher) *Selection {
return s.AppendNodes(m.MatchAll(s.document.rootNode)...)
}
// AppendSelection appends the elements in the selection to the end of each element
// in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) AppendSelection(sel *Selection) *Selection {
return s.AppendNodes(sel.Nodes...)
}
// AppendHtml parses the html and appends it to the set of matched elements.
func (s *Selection) AppendHtml(htmlStr string) *Selection {
return s.eachNodeHtml(htmlStr, false, func(node *html.Node, nodes []*html.Node) {
for _, n := range nodes {
node.AppendChild(n)
}
})
}
// AppendNodes appends the specified nodes to each node in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) AppendNodes(ns ...*html.Node) *Selection {
return s.manipulateNodes(ns, false, func(sn *html.Node, n *html.Node) {
sn.AppendChild(n)
})
}
// Before inserts the matched elements before each element in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) Before(selector string) *Selection {
return s.BeforeMatcher(compileMatcher(selector))
}
// BeforeMatcher inserts the matched elements before each element in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) BeforeMatcher(m Matcher) *Selection {
return s.BeforeNodes(m.MatchAll(s.document.rootNode)...)
}
// BeforeSelection inserts the elements in the selection before each element in the set of matched
// elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) BeforeSelection(sel *Selection) *Selection {
return s.BeforeNodes(sel.Nodes...)
}
// BeforeHtml parses the html and inserts it before the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) BeforeHtml(htmlStr string) *Selection {
return s.eachNodeHtml(htmlStr, true, func(node *html.Node, nodes []*html.Node) {
for _, n := range nodes {
if node.Parent != nil {
node.Parent.InsertBefore(n, node)
}
}
})
}
// BeforeNodes inserts the nodes before each element in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) BeforeNodes(ns ...*html.Node) *Selection {
return s.manipulateNodes(ns, false, func(sn *html.Node, n *html.Node) {
if sn.Parent != nil {
sn.Parent.InsertBefore(n, sn)
}
})
}
// Clone creates a deep copy of the set of matched nodes. The new nodes will not be
// attached to the document.
func (s *Selection) Clone() *Selection {
ns := newEmptySelection(s.document)
ns.Nodes = cloneNodes(s.Nodes)
return ns
}
// Empty removes all children nodes from the set of matched elements.
// It returns the children nodes in a new Selection.
func (s *Selection) Empty() *Selection {
var nodes []*html.Node
for _, n := range s.Nodes {
for c := n.FirstChild; c != nil; c = n.FirstChild {
n.RemoveChild(c)
nodes = append(nodes, c)
}
}
return pushStack(s, nodes)
}
// Prepend prepends the elements specified by the selector to each element in
// the set of matched elements, following the same rules as Append.
func (s *Selection) Prepend(selector string) *Selection {
return s.PrependMatcher(compileMatcher(selector))
}
// PrependMatcher prepends the elements specified by the matcher to each
// element in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) PrependMatcher(m Matcher) *Selection {
return s.PrependNodes(m.MatchAll(s.document.rootNode)...)
}
// PrependSelection prepends the elements in the selection to each element in
// the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) PrependSelection(sel *Selection) *Selection {
return s.PrependNodes(sel.Nodes...)
}
// PrependHtml parses the html and prepends it to the set of matched elements.
func (s *Selection) PrependHtml(htmlStr string) *Selection {
return s.eachNodeHtml(htmlStr, false, func(node *html.Node, nodes []*html.Node) {
firstChild := node.FirstChild
for _, n := range nodes {
node.InsertBefore(n, firstChild)
}
})
}
// PrependNodes prepends the specified nodes to each node in the set of
// matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) PrependNodes(ns ...*html.Node) *Selection {
return s.manipulateNodes(ns, true, func(sn *html.Node, n *html.Node) {
// sn.FirstChild may be nil, in which case this functions like
// sn.AppendChild()
sn.InsertBefore(n, sn.FirstChild)
})
}
// Remove removes the set of matched elements from the document.
// It returns the same selection, now consisting of nodes not in the document.
func (s *Selection) Remove() *Selection {
for _, n := range s.Nodes {
if n.Parent != nil {
n.Parent.RemoveChild(n)
}
}
return s
}
// RemoveFiltered removes from the current set of matched elements those that
// match the selector filter. It returns the Selection of removed nodes.
//
// For example if the selection s contains "<h1>", "<h2>" and "<h3>"
// and s.RemoveFiltered("h2") is called, only the "<h2>" node is removed
// (and returned), while "<h1>" and "<h3>" are kept in the document.
func (s *Selection) RemoveFiltered(selector string) *Selection {
return s.RemoveMatcher(compileMatcher(selector))
}
// RemoveMatcher removes from the current set of matched elements those that
// match the Matcher filter. It returns the Selection of removed nodes.
// See RemoveFiltered for additional information.
func (s *Selection) RemoveMatcher(m Matcher) *Selection {
return s.FilterMatcher(m).Remove()
}
// ReplaceWith replaces each element in the set of matched elements with the
// nodes matched by the given selector.
// It returns the removed elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) ReplaceWith(selector string) *Selection {
return s.ReplaceWithMatcher(compileMatcher(selector))
}
// ReplaceWithMatcher replaces each element in the set of matched elements with
// the nodes matched by the given Matcher.
// It returns the removed elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) ReplaceWithMatcher(m Matcher) *Selection {
return s.ReplaceWithNodes(m.MatchAll(s.document.rootNode)...)
}
// ReplaceWithSelection replaces each element in the set of matched elements with
// the nodes from the given Selection.
// It returns the removed elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) ReplaceWithSelection(sel *Selection) *Selection {
return s.ReplaceWithNodes(sel.Nodes...)
}
// ReplaceWithHtml replaces each element in the set of matched elements with
// the parsed HTML.
// It returns the removed elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) ReplaceWithHtml(htmlStr string) *Selection {
s.eachNodeHtml(htmlStr, true, func(node *html.Node, nodes []*html.Node) {
nextSibling := node.NextSibling
for _, n := range nodes {
if node.Parent != nil {
node.Parent.InsertBefore(n, nextSibling)
}
}
})
return s.Remove()
}
// ReplaceWithNodes replaces each element in the set of matched elements with
// the given nodes.
// It returns the removed elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) ReplaceWithNodes(ns ...*html.Node) *Selection {
s.AfterNodes(ns...)
return s.Remove()
}
// SetHtml sets the html content of each element in the selection to
// specified html string.
func (s *Selection) SetHtml(htmlStr string) *Selection {
for _, context := range s.Nodes {
for c := context.FirstChild; c != nil; c = context.FirstChild {
context.RemoveChild(c)
}
}
return s.eachNodeHtml(htmlStr, false, func(node *html.Node, nodes []*html.Node) {
for _, n := range nodes {
node.AppendChild(n)
}
})
}
// SetText sets the content of each element in the selection to specified content.
// The provided text string is escaped.
func (s *Selection) SetText(text string) *Selection {
return s.SetHtml(html.EscapeString(text))
}
// Unwrap removes the parents of the set of matched elements, leaving the matched
// elements (and their siblings, if any) in their place.
// It returns the original selection.
func (s *Selection) Unwrap() *Selection {
s.Parent().Each(func(i int, ss *Selection) {
// For some reason, jquery allows unwrap to remove the <head> element, so
// allowing it here too. Same for <html>. Why it allows those elements to
// be unwrapped while not allowing body is a mystery to me.
if ss.Nodes[0].Data != "body" {
ss.ReplaceWithSelection(ss.Contents())
}
})
return s
}
// Wrap wraps each element in the set of matched elements inside the first
// element matched by the given selector. The matched child is cloned before
// being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) Wrap(selector string) *Selection {
return s.WrapMatcher(compileMatcher(selector))
}
// WrapMatcher wraps each element in the set of matched elements inside the
// first element matched by the given matcher. The matched child is cloned
// before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapMatcher(m Matcher) *Selection {
return s.wrapNodes(m.MatchAll(s.document.rootNode)...)
}
// WrapSelection wraps each element in the set of matched elements inside the
// first element in the given Selection. The element is cloned before being
// inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapSelection(sel *Selection) *Selection {
return s.wrapNodes(sel.Nodes...)
}
// WrapHtml wraps each element in the set of matched elements inside the inner-
// most child of the given HTML.
//
// It returns the original set of elements.
func (s *Selection) WrapHtml(htmlStr string) *Selection {
nodesMap := make(map[string][]*html.Node)
for _, context := range s.Nodes {
var parent *html.Node
if context.Parent != nil {
parent = context.Parent
} else {
parent = &html.Node{Type: html.ElementNode}
}
nodes, found := nodesMap[nodeName(parent)]
if !found {
nodes = parseHtmlWithContext(htmlStr, parent)
nodesMap[nodeName(parent)] = nodes
}
newSingleSelection(context, s.document).wrapAllNodes(cloneNodes(nodes)...)
}
return s
}
// WrapNode wraps each element in the set of matched elements inside the inner-
// most child of the given node. The given node is copied before being inserted
// into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapNode(n *html.Node) *Selection {
return s.wrapNodes(n)
}
func (s *Selection) wrapNodes(ns ...*html.Node) *Selection {
s.Each(func(i int, ss *Selection) {
ss.wrapAllNodes(ns...)
})
return s
}
// WrapAll wraps a single HTML structure, matched by the given selector, around
// all elements in the set of matched elements. The matched child is cloned
// before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapAll(selector string) *Selection {
return s.WrapAllMatcher(compileMatcher(selector))
}
// WrapAllMatcher wraps a single HTML structure, matched by the given Matcher,
// around all elements in the set of matched elements. The matched child is
// cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapAllMatcher(m Matcher) *Selection {
return s.wrapAllNodes(m.MatchAll(s.document.rootNode)...)
}
// WrapAllSelection wraps a single HTML structure, the first node of the given
// Selection, around all elements in the set of matched elements. The matched
// child is cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapAllSelection(sel *Selection) *Selection {
return s.wrapAllNodes(sel.Nodes...)
}
// WrapAllHtml wraps the given HTML structure around all elements in the set of
// matched elements. The matched child is cloned before being inserted into the
// document.
//
// It returns the original set of elements.
func (s *Selection) WrapAllHtml(htmlStr string) *Selection {
var context *html.Node
var nodes []*html.Node
if len(s.Nodes) > 0 {
context = s.Nodes[0]
if context.Parent != nil {
nodes = parseHtmlWithContext(htmlStr, context)
} else {
nodes = parseHtml(htmlStr)
}
}
return s.wrapAllNodes(nodes...)
}
func (s *Selection) wrapAllNodes(ns ...*html.Node) *Selection {
if len(ns) > 0 {
return s.WrapAllNode(ns[0])
}
return s
}
// WrapAllNode wraps the given node around the first element in the Selection,
// making all other nodes in the Selection children of the given node. The node
// is cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapAllNode(n *html.Node) *Selection {
if s.Size() == 0 {
return s
}
wrap := cloneNode(n)
first := s.Nodes[0]
if first.Parent != nil {
first.Parent.InsertBefore(wrap, first)
first.Parent.RemoveChild(first)
}
for c := getFirstChildEl(wrap); c != nil; c = getFirstChildEl(wrap) {
wrap = c
}
newSingleSelection(wrap, s.document).AppendSelection(s)
return s
}
// WrapInner wraps an HTML structure, matched by the given selector, around the
// content of element in the set of matched elements. The matched child is
// cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapInner(selector string) *Selection {
return s.WrapInnerMatcher(compileMatcher(selector))
}
// WrapInnerMatcher wraps an HTML structure, matched by the given selector,
// around the content of element in the set of matched elements. The matched
// child is cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapInnerMatcher(m Matcher) *Selection {
return s.wrapInnerNodes(m.MatchAll(s.document.rootNode)...)
}
// WrapInnerSelection wraps an HTML structure, matched by the given selector,
// around the content of element in the set of matched elements. The matched
// child is cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapInnerSelection(sel *Selection) *Selection {
return s.wrapInnerNodes(sel.Nodes...)
}
// WrapInnerHtml wraps an HTML structure, matched by the given selector, around
// the content of element in the set of matched elements. The matched child is
// cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapInnerHtml(htmlStr string) *Selection {
nodesMap := make(map[string][]*html.Node)
for _, context := range s.Nodes {
nodes, found := nodesMap[nodeName(context)]
if !found {
nodes = parseHtmlWithContext(htmlStr, context)
nodesMap[nodeName(context)] = nodes
}
newSingleSelection(context, s.document).wrapInnerNodes(cloneNodes(nodes)...)
}
return s
}
// WrapInnerNode wraps an HTML structure, matched by the given selector, around
// the content of element in the set of matched elements. The matched child is
// cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapInnerNode(n *html.Node) *Selection {
return s.wrapInnerNodes(n)
}
func (s *Selection) wrapInnerNodes(ns ...*html.Node) *Selection {
if len(ns) == 0 {
return s
}
s.Each(func(i int, s *Selection) {
contents := s.Contents()
if contents.Size() > 0 {
contents.wrapAllNodes(ns...)
} else {
s.AppendNodes(cloneNode(ns[0]))
}
})
return s
}
func parseHtml(h string) []*html.Node {
// Errors are only returned when the io.Reader returns any error besides
// EOF, but strings.Reader never will
nodes, err := html.ParseFragment(strings.NewReader(h), &html.Node{Type: html.ElementNode})
if err != nil {
panic("goquery: failed to parse HTML: " + err.Error())
}
return nodes
}
func parseHtmlWithContext(h string, context *html.Node) []*html.Node {
// Errors are only returned when the io.Reader returns any error besides
// EOF, but strings.Reader never will
nodes, err := html.ParseFragment(strings.NewReader(h), context)
if err != nil {
panic("goquery: failed to parse HTML: " + err.Error())
}
return nodes
}
// Get the first child that is an ElementNode
func getFirstChildEl(n *html.Node) *html.Node {
c := n.FirstChild
for c != nil && c.Type != html.ElementNode {
c = c.NextSibling
}
return c
}
// Deep copy a slice of nodes.
func cloneNodes(ns []*html.Node) []*html.Node {
cns := make([]*html.Node, 0, len(ns))
for _, n := range ns {
cns = append(cns, cloneNode(n))
}
return cns
}
// Deep copy a node. The new node has clones of all the original node's
// children but none of its parents or siblings.
func cloneNode(n *html.Node) *html.Node {
nn := &html.Node{
Type: n.Type,
DataAtom: n.DataAtom,
Data: n.Data,
Attr: make([]html.Attribute, len(n.Attr)),
}
copy(nn.Attr, n.Attr)
for c := n.FirstChild; c != nil; c = c.NextSibling {
nn.AppendChild(cloneNode(c))
}
return nn
}
func (s *Selection) manipulateNodes(ns []*html.Node, reverse bool,
f func(sn *html.Node, n *html.Node)) *Selection {
lasti := s.Size() - 1
// net.Html doesn't provide document fragments for insertion, so to get
// things in the correct order with After() and Prepend(), the callback
// needs to be called on the reverse of the nodes.
if reverse {
for i, j := 0, len(ns)-1; i < j; i, j = i+1, j-1 {
ns[i], ns[j] = ns[j], ns[i]
}
}
for i, sn := range s.Nodes {
for _, n := range ns {
if i != lasti {
f(sn, cloneNode(n))
} else {
if n.Parent != nil {
n.Parent.RemoveChild(n)
}
f(sn, n)
}
}
}
return s
}
// eachNodeHtml parses the given html string and inserts the resulting nodes in the dom with the mergeFn.
// The parsed nodes are inserted for each element of the selection.
// isParent can be used to indicate that the elements of the selection should be treated as the parent for the parsed html.
// A cache is used to avoid parsing the html multiple times should the elements of the selection result in the same context.
func (s *Selection) eachNodeHtml(htmlStr string, isParent bool, mergeFn func(n *html.Node, nodes []*html.Node)) *Selection {
// cache to avoid parsing the html for the same context multiple times
nodeCache := make(map[string][]*html.Node)
var context *html.Node
for _, n := range s.Nodes {
if isParent {
context = n.Parent
} else {
if n.Type != html.ElementNode {
continue
}
context = n
}
if context != nil {
nodes, found := nodeCache[nodeName(context)]
if !found {
nodes = parseHtmlWithContext(htmlStr, context)
nodeCache[nodeName(context)] = nodes
}
mergeFn(n, cloneNodes(nodes))
}
}
return s
}

View File

@ -1,275 +0,0 @@
package goquery
import (
"bytes"
"regexp"
"strings"
"golang.org/x/net/html"
)
var rxClassTrim = regexp.MustCompile("[\t\r\n]")
// Attr gets the specified attribute's value for the first element in the
// Selection. To get the value for each element individually, use a looping
// construct such as Each or Map method.
func (s *Selection) Attr(attrName string) (val string, exists bool) {
if len(s.Nodes) == 0 {
return
}
return getAttributeValue(attrName, s.Nodes[0])
}
// AttrOr works like Attr but returns default value if attribute is not present.
func (s *Selection) AttrOr(attrName, defaultValue string) string {
if len(s.Nodes) == 0 {
return defaultValue
}
val, exists := getAttributeValue(attrName, s.Nodes[0])
if !exists {
return defaultValue
}
return val
}
// RemoveAttr removes the named attribute from each element in the set of matched elements.
func (s *Selection) RemoveAttr(attrName string) *Selection {
for _, n := range s.Nodes {
removeAttr(n, attrName)
}
return s
}
// SetAttr sets the given attribute on each element in the set of matched elements.
func (s *Selection) SetAttr(attrName, val string) *Selection {
for _, n := range s.Nodes {
attr := getAttributePtr(attrName, n)
if attr == nil {
n.Attr = append(n.Attr, html.Attribute{Key: attrName, Val: val})
} else {
attr.Val = val
}
}
return s
}
// Text gets the combined text contents of each element in the set of matched
// elements, including their descendants.
func (s *Selection) Text() string {
var buf bytes.Buffer
// Slightly optimized vs calling Each: no single selection object created
var f func(*html.Node)
f = func(n *html.Node) {
if n.Type == html.TextNode {
// Keep newlines and spaces, like jQuery
buf.WriteString(n.Data)
}
if n.FirstChild != nil {
for c := n.FirstChild; c != nil; c = c.NextSibling {
f(c)
}
}
}
for _, n := range s.Nodes {
f(n)
}
return buf.String()
}
// Size is an alias for Length.
func (s *Selection) Size() int {
return s.Length()
}
// Length returns the number of elements in the Selection object.
func (s *Selection) Length() int {
return len(s.Nodes)
}
// Html gets the HTML contents of the first element in the set of matched
// elements. It includes text and comment nodes.
func (s *Selection) Html() (ret string, e error) {
// Since there is no .innerHtml, the HTML content must be re-created from
// the nodes using html.Render.
var buf bytes.Buffer
if len(s.Nodes) > 0 {
for c := s.Nodes[0].FirstChild; c != nil; c = c.NextSibling {
e = html.Render(&buf, c)
if e != nil {
return
}
}
ret = buf.String()
}
return
}
// AddClass adds the given class(es) to each element in the set of matched elements.
// Multiple class names can be specified, separated by a space or via multiple arguments.
func (s *Selection) AddClass(class ...string) *Selection {
classStr := strings.TrimSpace(strings.Join(class, " "))
if classStr == "" {
return s
}
tcls := getClassesSlice(classStr)
for _, n := range s.Nodes {
curClasses, attr := getClassesAndAttr(n, true)
for _, newClass := range tcls {
if !strings.Contains(curClasses, " "+newClass+" ") {
curClasses += newClass + " "
}
}
setClasses(n, attr, curClasses)
}
return s
}
// HasClass determines whether any of the matched elements are assigned the
// given class.
func (s *Selection) HasClass(class string) bool {
class = " " + class + " "
for _, n := range s.Nodes {
classes, _ := getClassesAndAttr(n, false)
if strings.Contains(classes, class) {
return true
}
}
return false
}
// RemoveClass removes the given class(es) from each element in the set of matched elements.
// Multiple class names can be specified, separated by a space or via multiple arguments.
// If no class name is provided, all classes are removed.
func (s *Selection) RemoveClass(class ...string) *Selection {
var rclasses []string
classStr := strings.TrimSpace(strings.Join(class, " "))
remove := classStr == ""
if !remove {
rclasses = getClassesSlice(classStr)
}
for _, n := range s.Nodes {
if remove {
removeAttr(n, "class")
} else {
classes, attr := getClassesAndAttr(n, true)
for _, rcl := range rclasses {
classes = strings.Replace(classes, " "+rcl+" ", " ", -1)
}
setClasses(n, attr, classes)
}
}
return s
}
// ToggleClass adds or removes the given class(es) for each element in the set of matched elements.
// Multiple class names can be specified, separated by a space or via multiple arguments.
func (s *Selection) ToggleClass(class ...string) *Selection {
classStr := strings.TrimSpace(strings.Join(class, " "))
if classStr == "" {
return s
}
tcls := getClassesSlice(classStr)
for _, n := range s.Nodes {
classes, attr := getClassesAndAttr(n, true)
for _, tcl := range tcls {
if strings.Contains(classes, " "+tcl+" ") {
classes = strings.Replace(classes, " "+tcl+" ", " ", -1)
} else {
classes += tcl + " "
}
}
setClasses(n, attr, classes)
}
return s
}
func getAttributePtr(attrName string, n *html.Node) *html.Attribute {
if n == nil {
return nil
}
for i, a := range n.Attr {
if a.Key == attrName {
return &n.Attr[i]
}
}
return nil
}
// Private function to get the specified attribute's value from a node.
func getAttributeValue(attrName string, n *html.Node) (val string, exists bool) {
if a := getAttributePtr(attrName, n); a != nil {
val = a.Val
exists = true
}
return
}
// Get and normalize the "class" attribute from the node.
func getClassesAndAttr(n *html.Node, create bool) (classes string, attr *html.Attribute) {
// Applies only to element nodes
if n.Type == html.ElementNode {
attr = getAttributePtr("class", n)
if attr == nil && create {
n.Attr = append(n.Attr, html.Attribute{
Key: "class",
Val: "",
})
attr = &n.Attr[len(n.Attr)-1]
}
}
if attr == nil {
classes = " "
} else {
classes = rxClassTrim.ReplaceAllString(" "+attr.Val+" ", " ")
}
return
}
func getClassesSlice(classes string) []string {
return strings.Split(rxClassTrim.ReplaceAllString(" "+classes+" ", " "), " ")
}
func removeAttr(n *html.Node, attrName string) {
for i, a := range n.Attr {
if a.Key == attrName {
n.Attr[i], n.Attr[len(n.Attr)-1], n.Attr =
n.Attr[len(n.Attr)-1], html.Attribute{}, n.Attr[:len(n.Attr)-1]
return
}
}
}
func setClasses(n *html.Node, attr *html.Attribute, classes string) {
classes = strings.TrimSpace(classes)
if classes == "" {
removeAttr(n, "class")
return
}
attr.Val = classes
}

View File

@ -1,49 +0,0 @@
package goquery
import "golang.org/x/net/html"
// Is checks the current matched set of elements against a selector and
// returns true if at least one of these elements matches.
func (s *Selection) Is(selector string) bool {
return s.IsMatcher(compileMatcher(selector))
}
// IsMatcher checks the current matched set of elements against a matcher and
// returns true if at least one of these elements matches.
func (s *Selection) IsMatcher(m Matcher) bool {
if len(s.Nodes) > 0 {
if len(s.Nodes) == 1 {
return m.Match(s.Nodes[0])
}
return len(m.Filter(s.Nodes)) > 0
}
return false
}
// IsFunction checks the current matched set of elements against a predicate and
// returns true if at least one of these elements matches.
func (s *Selection) IsFunction(f func(int, *Selection) bool) bool {
return s.FilterFunction(f).Length() > 0
}
// IsSelection checks the current matched set of elements against a Selection object
// and returns true if at least one of these elements matches.
func (s *Selection) IsSelection(sel *Selection) bool {
return s.FilterSelection(sel).Length() > 0
}
// IsNodes checks the current matched set of elements against the specified nodes
// and returns true if at least one of these elements matches.
func (s *Selection) IsNodes(nodes ...*html.Node) bool {
return s.FilterNodes(nodes...).Length() > 0
}
// Contains returns true if the specified Node is within,
// at any depth, one of the nodes in the Selection object.
// It is NOT inclusive, to behave like jQuery's implementation, and
// unlike Javascript's .contains, so if the contained
// node is itself in the selection, it returns false.
func (s *Selection) Contains(n *html.Node) bool {
return sliceContains(s.Nodes, n)
}

View File

@ -1,698 +0,0 @@
package goquery
import "golang.org/x/net/html"
type siblingType int
// Sibling type, used internally when iterating over children at the same
// level (siblings) to specify which nodes are requested.
const (
siblingPrevUntil siblingType = iota - 3
siblingPrevAll
siblingPrev
siblingAll
siblingNext
siblingNextAll
siblingNextUntil
siblingAllIncludingNonElements
)
// Find gets the descendants of each element in the current set of matched
// elements, filtered by a selector. It returns a new Selection object
// containing these matched elements.
func (s *Selection) Find(selector string) *Selection {
return pushStack(s, findWithMatcher(s.Nodes, compileMatcher(selector)))
}
// FindMatcher gets the descendants of each element in the current set of matched
// elements, filtered by the matcher. It returns a new Selection object
// containing these matched elements.
func (s *Selection) FindMatcher(m Matcher) *Selection {
return pushStack(s, findWithMatcher(s.Nodes, m))
}
// FindSelection gets the descendants of each element in the current
// Selection, filtered by a Selection. It returns a new Selection object
// containing these matched elements.
func (s *Selection) FindSelection(sel *Selection) *Selection {
if sel == nil {
return pushStack(s, nil)
}
return s.FindNodes(sel.Nodes...)
}
// FindNodes gets the descendants of each element in the current
// Selection, filtered by some nodes. It returns a new Selection object
// containing these matched elements.
func (s *Selection) FindNodes(nodes ...*html.Node) *Selection {
return pushStack(s, mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
if sliceContains(s.Nodes, n) {
return []*html.Node{n}
}
return nil
}))
}
// Contents gets the children of each element in the Selection,
// including text and comment nodes. It returns a new Selection object
// containing these elements.
func (s *Selection) Contents() *Selection {
return pushStack(s, getChildrenNodes(s.Nodes, siblingAllIncludingNonElements))
}
// ContentsFiltered gets the children of each element in the Selection,
// filtered by the specified selector. It returns a new Selection
// object containing these elements. Since selectors only act on Element nodes,
// this function is an alias to ChildrenFiltered unless the selector is empty,
// in which case it is an alias to Contents.
func (s *Selection) ContentsFiltered(selector string) *Selection {
if selector != "" {
return s.ChildrenFiltered(selector)
}
return s.Contents()
}
// ContentsMatcher gets the children of each element in the Selection,
// filtered by the specified matcher. It returns a new Selection
// object containing these elements. Since matchers only act on Element nodes,
// this function is an alias to ChildrenMatcher.
func (s *Selection) ContentsMatcher(m Matcher) *Selection {
return s.ChildrenMatcher(m)
}
// Children gets the child elements of each element in the Selection.
// It returns a new Selection object containing these elements.
func (s *Selection) Children() *Selection {
return pushStack(s, getChildrenNodes(s.Nodes, siblingAll))
}
// ChildrenFiltered gets the child elements of each element in the Selection,
// filtered by the specified selector. It returns a new
// Selection object containing these elements.
func (s *Selection) ChildrenFiltered(selector string) *Selection {
return filterAndPush(s, getChildrenNodes(s.Nodes, siblingAll), compileMatcher(selector))
}
// ChildrenMatcher gets the child elements of each element in the Selection,
// filtered by the specified matcher. It returns a new
// Selection object containing these elements.
func (s *Selection) ChildrenMatcher(m Matcher) *Selection {
return filterAndPush(s, getChildrenNodes(s.Nodes, siblingAll), m)
}
// Parent gets the parent of each element in the Selection. It returns a
// new Selection object containing the matched elements.
func (s *Selection) Parent() *Selection {
return pushStack(s, getParentNodes(s.Nodes))
}
// ParentFiltered gets the parent of each element in the Selection filtered by a
// selector. It returns a new Selection object containing the matched elements.
func (s *Selection) ParentFiltered(selector string) *Selection {
return filterAndPush(s, getParentNodes(s.Nodes), compileMatcher(selector))
}
// ParentMatcher gets the parent of each element in the Selection filtered by a
// matcher. It returns a new Selection object containing the matched elements.
func (s *Selection) ParentMatcher(m Matcher) *Selection {
return filterAndPush(s, getParentNodes(s.Nodes), m)
}
// Closest gets the first element that matches the selector by testing the
// element itself and traversing up through its ancestors in the DOM tree.
func (s *Selection) Closest(selector string) *Selection {
cs := compileMatcher(selector)
return s.ClosestMatcher(cs)
}
// ClosestMatcher gets the first element that matches the matcher by testing the
// element itself and traversing up through its ancestors in the DOM tree.
func (s *Selection) ClosestMatcher(m Matcher) *Selection {
return pushStack(s, mapNodes(s.Nodes, func(i int, n *html.Node) []*html.Node {
// For each node in the selection, test the node itself, then each parent
// until a match is found.
for ; n != nil; n = n.Parent {
if m.Match(n) {
return []*html.Node{n}
}
}
return nil
}))
}
// ClosestNodes gets the first element that matches one of the nodes by testing the
// element itself and traversing up through its ancestors in the DOM tree.
func (s *Selection) ClosestNodes(nodes ...*html.Node) *Selection {
set := make(map[*html.Node]bool)
for _, n := range nodes {
set[n] = true
}
return pushStack(s, mapNodes(s.Nodes, func(i int, n *html.Node) []*html.Node {
// For each node in the selection, test the node itself, then each parent
// until a match is found.
for ; n != nil; n = n.Parent {
if set[n] {
return []*html.Node{n}
}
}
return nil
}))
}
// ClosestSelection gets the first element that matches one of the nodes in the
// Selection by testing the element itself and traversing up through its ancestors
// in the DOM tree.
func (s *Selection) ClosestSelection(sel *Selection) *Selection {
if sel == nil {
return pushStack(s, nil)
}
return s.ClosestNodes(sel.Nodes...)
}
// Parents gets the ancestors of each element in the current Selection. It
// returns a new Selection object with the matched elements.
func (s *Selection) Parents() *Selection {
return pushStack(s, getParentsNodes(s.Nodes, nil, nil))
}
// ParentsFiltered gets the ancestors of each element in the current
// Selection. It returns a new Selection object with the matched elements.
func (s *Selection) ParentsFiltered(selector string) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nil), compileMatcher(selector))
}
// ParentsMatcher gets the ancestors of each element in the current
// Selection. It returns a new Selection object with the matched elements.
func (s *Selection) ParentsMatcher(m Matcher) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nil), m)
}
// ParentsUntil gets the ancestors of each element in the Selection, up to but
// not including the element matched by the selector. It returns a new Selection
// object containing the matched elements.
func (s *Selection) ParentsUntil(selector string) *Selection {
return pushStack(s, getParentsNodes(s.Nodes, compileMatcher(selector), nil))
}
// ParentsUntilMatcher gets the ancestors of each element in the Selection, up to but
// not including the element matched by the matcher. It returns a new Selection
// object containing the matched elements.
func (s *Selection) ParentsUntilMatcher(m Matcher) *Selection {
return pushStack(s, getParentsNodes(s.Nodes, m, nil))
}
// ParentsUntilSelection gets the ancestors of each element in the Selection,
// up to but not including the elements in the specified Selection. It returns a
// new Selection object containing the matched elements.
func (s *Selection) ParentsUntilSelection(sel *Selection) *Selection {
if sel == nil {
return s.Parents()
}
return s.ParentsUntilNodes(sel.Nodes...)
}
// ParentsUntilNodes gets the ancestors of each element in the Selection,
// up to but not including the specified nodes. It returns a
// new Selection object containing the matched elements.
func (s *Selection) ParentsUntilNodes(nodes ...*html.Node) *Selection {
return pushStack(s, getParentsNodes(s.Nodes, nil, nodes))
}
// ParentsFilteredUntil is like ParentsUntil, with the option to filter the
// results based on a selector string. It returns a new Selection
// object containing the matched elements.
func (s *Selection) ParentsFilteredUntil(filterSelector, untilSelector string) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
}
// ParentsFilteredUntilMatcher is like ParentsUntilMatcher, with the option to filter the
// results based on a matcher. It returns a new Selection object containing the matched elements.
func (s *Selection) ParentsFilteredUntilMatcher(filter, until Matcher) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, until, nil), filter)
}
// ParentsFilteredUntilSelection is like ParentsUntilSelection, with the
// option to filter the results based on a selector string. It returns a new
// Selection object containing the matched elements.
func (s *Selection) ParentsFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
return s.ParentsMatcherUntilSelection(compileMatcher(filterSelector), sel)
}
// ParentsMatcherUntilSelection is like ParentsUntilSelection, with the
// option to filter the results based on a matcher. It returns a new
// Selection object containing the matched elements.
func (s *Selection) ParentsMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
if sel == nil {
return s.ParentsMatcher(filter)
}
return s.ParentsMatcherUntilNodes(filter, sel.Nodes...)
}
// ParentsFilteredUntilNodes is like ParentsUntilNodes, with the
// option to filter the results based on a selector string. It returns a new
// Selection object containing the matched elements.
func (s *Selection) ParentsFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nodes), compileMatcher(filterSelector))
}
// ParentsMatcherUntilNodes is like ParentsUntilNodes, with the
// option to filter the results based on a matcher. It returns a new
// Selection object containing the matched elements.
func (s *Selection) ParentsMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nodes), filter)
}
// Siblings gets the siblings of each element in the Selection. It returns
// a new Selection object containing the matched elements.
func (s *Selection) Siblings() *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil))
}
// SiblingsFiltered gets the siblings of each element in the Selection
// filtered by a selector. It returns a new Selection object containing the
// matched elements.
func (s *Selection) SiblingsFiltered(selector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil), compileMatcher(selector))
}
// SiblingsMatcher gets the siblings of each element in the Selection
// filtered by a matcher. It returns a new Selection object containing the
// matched elements.
func (s *Selection) SiblingsMatcher(m Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil), m)
}
// Next gets the immediately following sibling of each element in the
// Selection. It returns a new Selection object containing the matched elements.
func (s *Selection) Next() *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil))
}
// NextFiltered gets the immediately following sibling of each element in the
// Selection filtered by a selector. It returns a new Selection object
// containing the matched elements.
func (s *Selection) NextFiltered(selector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil), compileMatcher(selector))
}
// NextMatcher gets the immediately following sibling of each element in the
// Selection filtered by a matcher. It returns a new Selection object
// containing the matched elements.
func (s *Selection) NextMatcher(m Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil), m)
}
// NextAll gets all the following siblings of each element in the
// Selection. It returns a new Selection object containing the matched elements.
func (s *Selection) NextAll() *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil))
}
// NextAllFiltered gets all the following siblings of each element in the
// Selection filtered by a selector. It returns a new Selection object
// containing the matched elements.
func (s *Selection) NextAllFiltered(selector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil), compileMatcher(selector))
}
// NextAllMatcher gets all the following siblings of each element in the
// Selection filtered by a matcher. It returns a new Selection object
// containing the matched elements.
func (s *Selection) NextAllMatcher(m Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil), m)
}
// Prev gets the immediately preceding sibling of each element in the
// Selection. It returns a new Selection object containing the matched elements.
func (s *Selection) Prev() *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil))
}
// PrevFiltered gets the immediately preceding sibling of each element in the
// Selection filtered by a selector. It returns a new Selection object
// containing the matched elements.
func (s *Selection) PrevFiltered(selector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil), compileMatcher(selector))
}
// PrevMatcher gets the immediately preceding sibling of each element in the
// Selection filtered by a matcher. It returns a new Selection object
// containing the matched elements.
func (s *Selection) PrevMatcher(m Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil), m)
}
// PrevAll gets all the preceding siblings of each element in the
// Selection. It returns a new Selection object containing the matched elements.
func (s *Selection) PrevAll() *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil))
}
// PrevAllFiltered gets all the preceding siblings of each element in the
// Selection filtered by a selector. It returns a new Selection object
// containing the matched elements.
func (s *Selection) PrevAllFiltered(selector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil), compileMatcher(selector))
}
// PrevAllMatcher gets all the preceding siblings of each element in the
// Selection filtered by a matcher. It returns a new Selection object
// containing the matched elements.
func (s *Selection) PrevAllMatcher(m Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil), m)
}
// NextUntil gets all following siblings of each element up to but not
// including the element matched by the selector. It returns a new Selection
// object containing the matched elements.
func (s *Selection) NextUntil(selector string) *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
compileMatcher(selector), nil))
}
// NextUntilMatcher gets all following siblings of each element up to but not
// including the element matched by the matcher. It returns a new Selection
// object containing the matched elements.
func (s *Selection) NextUntilMatcher(m Matcher) *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
m, nil))
}
// NextUntilSelection gets all following siblings of each element up to but not
// including the element matched by the Selection. It returns a new Selection
// object containing the matched elements.
func (s *Selection) NextUntilSelection(sel *Selection) *Selection {
if sel == nil {
return s.NextAll()
}
return s.NextUntilNodes(sel.Nodes...)
}
// NextUntilNodes gets all following siblings of each element up to but not
// including the element matched by the nodes. It returns a new Selection
// object containing the matched elements.
func (s *Selection) NextUntilNodes(nodes ...*html.Node) *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
nil, nodes))
}
// PrevUntil gets all preceding siblings of each element up to but not
// including the element matched by the selector. It returns a new Selection
// object containing the matched elements.
func (s *Selection) PrevUntil(selector string) *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
compileMatcher(selector), nil))
}
// PrevUntilMatcher gets all preceding siblings of each element up to but not
// including the element matched by the matcher. It returns a new Selection
// object containing the matched elements.
func (s *Selection) PrevUntilMatcher(m Matcher) *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
m, nil))
}
// PrevUntilSelection gets all preceding siblings of each element up to but not
// including the element matched by the Selection. It returns a new Selection
// object containing the matched elements.
func (s *Selection) PrevUntilSelection(sel *Selection) *Selection {
if sel == nil {
return s.PrevAll()
}
return s.PrevUntilNodes(sel.Nodes...)
}
// PrevUntilNodes gets all preceding siblings of each element up to but not
// including the element matched by the nodes. It returns a new Selection
// object containing the matched elements.
func (s *Selection) PrevUntilNodes(nodes ...*html.Node) *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
nil, nodes))
}
// NextFilteredUntil is like NextUntil, with the option to filter
// the results based on a selector string.
// It returns a new Selection object containing the matched elements.
func (s *Selection) NextFilteredUntil(filterSelector, untilSelector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
}
// NextFilteredUntilMatcher is like NextUntilMatcher, with the option to filter
// the results based on a matcher.
// It returns a new Selection object containing the matched elements.
func (s *Selection) NextFilteredUntilMatcher(filter, until Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
until, nil), filter)
}
// NextFilteredUntilSelection is like NextUntilSelection, with the
// option to filter the results based on a selector string. It returns a new
// Selection object containing the matched elements.
func (s *Selection) NextFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
return s.NextMatcherUntilSelection(compileMatcher(filterSelector), sel)
}
// NextMatcherUntilSelection is like NextUntilSelection, with the
// option to filter the results based on a matcher. It returns a new
// Selection object containing the matched elements.
func (s *Selection) NextMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
if sel == nil {
return s.NextMatcher(filter)
}
return s.NextMatcherUntilNodes(filter, sel.Nodes...)
}
// NextFilteredUntilNodes is like NextUntilNodes, with the
// option to filter the results based on a selector string. It returns a new
// Selection object containing the matched elements.
func (s *Selection) NextFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
nil, nodes), compileMatcher(filterSelector))
}
// NextMatcherUntilNodes is like NextUntilNodes, with the
// option to filter the results based on a matcher. It returns a new
// Selection object containing the matched elements.
func (s *Selection) NextMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
nil, nodes), filter)
}
// PrevFilteredUntil is like PrevUntil, with the option to filter
// the results based on a selector string.
// It returns a new Selection object containing the matched elements.
func (s *Selection) PrevFilteredUntil(filterSelector, untilSelector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
}
// PrevFilteredUntilMatcher is like PrevUntilMatcher, with the option to filter
// the results based on a matcher.
// It returns a new Selection object containing the matched elements.
func (s *Selection) PrevFilteredUntilMatcher(filter, until Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
until, nil), filter)
}
// PrevFilteredUntilSelection is like PrevUntilSelection, with the
// option to filter the results based on a selector string. It returns a new
// Selection object containing the matched elements.
func (s *Selection) PrevFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
return s.PrevMatcherUntilSelection(compileMatcher(filterSelector), sel)
}
// PrevMatcherUntilSelection is like PrevUntilSelection, with the
// option to filter the results based on a matcher. It returns a new
// Selection object containing the matched elements.
func (s *Selection) PrevMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
if sel == nil {
return s.PrevMatcher(filter)
}
return s.PrevMatcherUntilNodes(filter, sel.Nodes...)
}
// PrevFilteredUntilNodes is like PrevUntilNodes, with the
// option to filter the results based on a selector string. It returns a new
// Selection object containing the matched elements.
func (s *Selection) PrevFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
nil, nodes), compileMatcher(filterSelector))
}
// PrevMatcherUntilNodes is like PrevUntilNodes, with the
// option to filter the results based on a matcher. It returns a new
// Selection object containing the matched elements.
func (s *Selection) PrevMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
nil, nodes), filter)
}
// Filter and push filters the nodes based on a matcher, and pushes the results
// on the stack, with the srcSel as previous selection.
func filterAndPush(srcSel *Selection, nodes []*html.Node, m Matcher) *Selection {
// Create a temporary Selection with the specified nodes to filter using winnow
sel := &Selection{nodes, srcSel.document, nil}
// Filter based on matcher and push on stack
return pushStack(srcSel, winnow(sel, m, true))
}
// Internal implementation of Find that return raw nodes.
func findWithMatcher(nodes []*html.Node, m Matcher) []*html.Node {
// Map nodes to find the matches within the children of each node
return mapNodes(nodes, func(i int, n *html.Node) (result []*html.Node) {
// Go down one level, becausejQuery's Find selects only within descendants
for c := n.FirstChild; c != nil; c = c.NextSibling {
if c.Type == html.ElementNode {
result = append(result, m.MatchAll(c)...)
}
}
return
})
}
// Internal implementation to get all parent nodes, stopping at the specified
// node (or nil if no stop).
func getParentsNodes(nodes []*html.Node, stopm Matcher, stopNodes []*html.Node) []*html.Node {
return mapNodes(nodes, func(i int, n *html.Node) (result []*html.Node) {
for p := n.Parent; p != nil; p = p.Parent {
sel := newSingleSelection(p, nil)
if stopm != nil {
if sel.IsMatcher(stopm) {
break
}
} else if len(stopNodes) > 0 {
if sel.IsNodes(stopNodes...) {
break
}
}
if p.Type == html.ElementNode {
result = append(result, p)
}
}
return
})
}
// Internal implementation of sibling nodes that return a raw slice of matches.
func getSiblingNodes(nodes []*html.Node, st siblingType, untilm Matcher, untilNodes []*html.Node) []*html.Node {
var f func(*html.Node) bool
// If the requested siblings are ...Until, create the test function to
// determine if the until condition is reached (returns true if it is)
if st == siblingNextUntil || st == siblingPrevUntil {
f = func(n *html.Node) bool {
if untilm != nil {
// Matcher-based condition
sel := newSingleSelection(n, nil)
return sel.IsMatcher(untilm)
} else if len(untilNodes) > 0 {
// Nodes-based condition
sel := newSingleSelection(n, nil)
return sel.IsNodes(untilNodes...)
}
return false
}
}
return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
return getChildrenWithSiblingType(n.Parent, st, n, f)
})
}
// Gets the children nodes of each node in the specified slice of nodes,
// based on the sibling type request.
func getChildrenNodes(nodes []*html.Node, st siblingType) []*html.Node {
return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
return getChildrenWithSiblingType(n, st, nil, nil)
})
}
// Gets the children of the specified parent, based on the requested sibling
// type, skipping a specified node if required.
func getChildrenWithSiblingType(parent *html.Node, st siblingType, skipNode *html.Node,
untilFunc func(*html.Node) bool) (result []*html.Node) {
// Create the iterator function
var iter = func(cur *html.Node) (ret *html.Node) {
// Based on the sibling type requested, iterate the right way
for {
switch st {
case siblingAll, siblingAllIncludingNonElements:
if cur == nil {
// First iteration, start with first child of parent
// Skip node if required
if ret = parent.FirstChild; ret == skipNode && skipNode != nil {
ret = skipNode.NextSibling
}
} else {
// Skip node if required
if ret = cur.NextSibling; ret == skipNode && skipNode != nil {
ret = skipNode.NextSibling
}
}
case siblingPrev, siblingPrevAll, siblingPrevUntil:
if cur == nil {
// Start with previous sibling of the skip node
ret = skipNode.PrevSibling
} else {
ret = cur.PrevSibling
}
case siblingNext, siblingNextAll, siblingNextUntil:
if cur == nil {
// Start with next sibling of the skip node
ret = skipNode.NextSibling
} else {
ret = cur.NextSibling
}
default:
panic("Invalid sibling type.")
}
if ret == nil || ret.Type == html.ElementNode || st == siblingAllIncludingNonElements {
return
}
// Not a valid node, try again from this one
cur = ret
}
}
for c := iter(nil); c != nil; c = iter(c) {
// If this is an ...Until case, test before append (returns true
// if the until condition is reached)
if st == siblingNextUntil || st == siblingPrevUntil {
if untilFunc(c) {
return
}
}
result = append(result, c)
if st == siblingNext || st == siblingPrev {
// Only one node was requested (immediate next or previous), so exit
return
}
}
return
}
// Internal implementation of parent nodes that return a raw slice of Nodes.
func getParentNodes(nodes []*html.Node) []*html.Node {
return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
if n.Parent != nil && n.Parent.Type == html.ElementNode {
return []*html.Node{n.Parent}
}
return nil
})
}
// Internal map function used by many traversing methods. Takes the source nodes
// to iterate on and the mapping function that returns an array of nodes.
// Returns an array of nodes mapped by calling the callback function once for
// each node in the source nodes.
func mapNodes(nodes []*html.Node, f func(int, *html.Node) []*html.Node) (result []*html.Node) {
set := make(map[*html.Node]bool)
for i, n := range nodes {
if vals := f(i, n); len(vals) > 0 {
result = appendWithoutDuplicates(result, vals, set)
}
}
return result
}

View File

@ -1,203 +0,0 @@
package goquery
import (
"errors"
"io"
"net/http"
"net/url"
"github.com/andybalholm/cascadia"
"golang.org/x/net/html"
)
// Document represents an HTML document to be manipulated. Unlike jQuery, which
// is loaded as part of a DOM document, and thus acts upon its containing
// document, GoQuery doesn't know which HTML document to act upon. So it needs
// to be told, and that's what the Document class is for. It holds the root
// document node to manipulate, and can make selections on this document.
type Document struct {
*Selection
Url *url.URL
rootNode *html.Node
}
// NewDocumentFromNode is a Document constructor that takes a root html Node
// as argument.
func NewDocumentFromNode(root *html.Node) *Document {
return newDocument(root, nil)
}
// NewDocument is a Document constructor that takes a string URL as argument.
// It loads the specified document, parses it, and stores the root Document
// node, ready to be manipulated.
//
// Deprecated: Use the net/http standard library package to make the request
// and validate the response before calling goquery.NewDocumentFromReader
// with the response's body.
func NewDocument(url string) (*Document, error) {
// Load the URL
res, e := http.Get(url)
if e != nil {
return nil, e
}
return NewDocumentFromResponse(res)
}
// NewDocumentFromReader returns a Document from an io.Reader.
// It returns an error as second value if the reader's data cannot be parsed
// as html. It does not check if the reader is also an io.Closer, the
// provided reader is never closed by this call. It is the responsibility
// of the caller to close it if required.
func NewDocumentFromReader(r io.Reader) (*Document, error) {
root, e := html.Parse(r)
if e != nil {
return nil, e
}
return newDocument(root, nil), nil
}
// NewDocumentFromResponse is another Document constructor that takes an http response as argument.
// It loads the specified response's document, parses it, and stores the root Document
// node, ready to be manipulated. The response's body is closed on return.
//
// Deprecated: Use goquery.NewDocumentFromReader with the response's body.
func NewDocumentFromResponse(res *http.Response) (*Document, error) {
if res == nil {
return nil, errors.New("Response is nil")
}
defer res.Body.Close()
if res.Request == nil {
return nil, errors.New("Response.Request is nil")
}
// Parse the HTML into nodes
root, e := html.Parse(res.Body)
if e != nil {
return nil, e
}
// Create and fill the document
return newDocument(root, res.Request.URL), nil
}
// CloneDocument creates a deep-clone of a document.
func CloneDocument(doc *Document) *Document {
return newDocument(cloneNode(doc.rootNode), doc.Url)
}
// Private constructor, make sure all fields are correctly filled.
func newDocument(root *html.Node, url *url.URL) *Document {
// Create and fill the document
d := &Document{nil, url, root}
d.Selection = newSingleSelection(root, d)
return d
}
// Selection represents a collection of nodes matching some criteria. The
// initial Selection can be created by using Document.Find, and then
// manipulated using the jQuery-like chainable syntax and methods.
type Selection struct {
Nodes []*html.Node
document *Document
prevSel *Selection
}
// Helper constructor to create an empty selection
func newEmptySelection(doc *Document) *Selection {
return &Selection{nil, doc, nil}
}
// Helper constructor to create a selection of only one node
func newSingleSelection(node *html.Node, doc *Document) *Selection {
return &Selection{[]*html.Node{node}, doc, nil}
}
// Matcher is an interface that defines the methods to match
// HTML nodes against a compiled selector string. Cascadia's
// Selector implements this interface.
type Matcher interface {
Match(*html.Node) bool
MatchAll(*html.Node) []*html.Node
Filter([]*html.Node) []*html.Node
}
// Single compiles a selector string to a Matcher that stops after the first
// match is found.
//
// By default, Selection.Find and other functions that accept a selector string
// to select nodes will use all matches corresponding to that selector. By
// using the Matcher returned by Single, at most the first match will be
// selected.
//
// For example, those two statements are semantically equivalent:
//
// sel1 := doc.Find("a").First()
// sel2 := doc.FindMatcher(goquery.Single("a"))
//
// The one using Single is optimized to be potentially much faster on large
// documents.
//
// Only the behaviour of the MatchAll method of the Matcher interface is
// altered compared to standard Matchers. This means that the single-selection
// property of the Matcher only applies for Selection methods where the Matcher
// is used to select nodes, not to filter or check if a node matches the
// Matcher - in those cases, the behaviour of the Matcher is unchanged (e.g.
// FilterMatcher(Single("div")) will still result in a Selection with multiple
// "div"s if there were many "div"s in the Selection to begin with).
func Single(selector string) Matcher {
return singleMatcher{compileMatcher(selector)}
}
// SingleMatcher returns a Matcher matches the same nodes as m, but that stops
// after the first match is found.
//
// See the documentation of function Single for more details.
func SingleMatcher(m Matcher) Matcher {
if _, ok := m.(singleMatcher); ok {
// m is already a singleMatcher
return m
}
return singleMatcher{m}
}
// compileMatcher compiles the selector string s and returns
// the corresponding Matcher. If s is an invalid selector string,
// it returns a Matcher that fails all matches.
func compileMatcher(s string) Matcher {
cs, err := cascadia.Compile(s)
if err != nil {
return invalidMatcher{}
}
return cs
}
type singleMatcher struct {
Matcher
}
func (m singleMatcher) MatchAll(n *html.Node) []*html.Node {
// Optimized version - stops finding at the first match (cascadia-compiled
// matchers all use this code path).
if mm, ok := m.Matcher.(interface{ MatchFirst(*html.Node) *html.Node }); ok {
node := mm.MatchFirst(n)
if node == nil {
return nil
}
return []*html.Node{node}
}
// Fallback version, for e.g. test mocks that don't provide the MatchFirst
// method.
nodes := m.Matcher.MatchAll(n)
if len(nodes) > 0 {
return nodes[:1:1]
}
return nil
}
// invalidMatcher is a Matcher that always fails to match.
type invalidMatcher struct{}
func (invalidMatcher) Match(n *html.Node) bool { return false }
func (invalidMatcher) MatchAll(n *html.Node) []*html.Node { return nil }
func (invalidMatcher) Filter(ns []*html.Node) []*html.Node { return nil }

View File

@ -1,171 +0,0 @@
package goquery
import (
"bytes"
"golang.org/x/net/html"
)
// used to determine if a set (map[*html.Node]bool) should be used
// instead of iterating over a slice. The set uses more memory and
// is slower than slice iteration for small N.
const minNodesForSet = 1000
var nodeNames = []string{
html.ErrorNode: "#error",
html.TextNode: "#text",
html.DocumentNode: "#document",
html.CommentNode: "#comment",
}
// NodeName returns the node name of the first element in the selection.
// It tries to behave in a similar way as the DOM's nodeName property
// (https://developer.mozilla.org/en-US/docs/Web/API/Node/nodeName).
//
// Go's net/html package defines the following node types, listed with
// the corresponding returned value from this function:
//
// ErrorNode : #error
// TextNode : #text
// DocumentNode : #document
// ElementNode : the element's tag name
// CommentNode : #comment
// DoctypeNode : the name of the document type
//
func NodeName(s *Selection) string {
if s.Length() == 0 {
return ""
}
return nodeName(s.Get(0))
}
// nodeName returns the node name of the given html node.
// See NodeName for additional details on behaviour.
func nodeName(node *html.Node) string {
if node == nil {
return ""
}
switch node.Type {
case html.ElementNode, html.DoctypeNode:
return node.Data
default:
if node.Type >= 0 && int(node.Type) < len(nodeNames) {
return nodeNames[node.Type]
}
return ""
}
}
// OuterHtml returns the outer HTML rendering of the first item in
// the selection - that is, the HTML including the first element's
// tag and attributes.
//
// Unlike InnerHtml, this is a function and not a method on the Selection,
// because this is not a jQuery method (in javascript-land, this is
// a property provided by the DOM).
func OuterHtml(s *Selection) (string, error) {
var buf bytes.Buffer
if s.Length() == 0 {
return "", nil
}
n := s.Get(0)
if err := html.Render(&buf, n); err != nil {
return "", err
}
return buf.String(), nil
}
// Loop through all container nodes to search for the target node.
func sliceContains(container []*html.Node, contained *html.Node) bool {
for _, n := range container {
if nodeContains(n, contained) {
return true
}
}
return false
}
// Checks if the contained node is within the container node.
func nodeContains(container *html.Node, contained *html.Node) bool {
// Check if the parent of the contained node is the container node, traversing
// upward until the top is reached, or the container is found.
for contained = contained.Parent; contained != nil; contained = contained.Parent {
if container == contained {
return true
}
}
return false
}
// Checks if the target node is in the slice of nodes.
func isInSlice(slice []*html.Node, node *html.Node) bool {
return indexInSlice(slice, node) > -1
}
// Returns the index of the target node in the slice, or -1.
func indexInSlice(slice []*html.Node, node *html.Node) int {
if node != nil {
for i, n := range slice {
if n == node {
return i
}
}
}
return -1
}
// Appends the new nodes to the target slice, making sure no duplicate is added.
// There is no check to the original state of the target slice, so it may still
// contain duplicates. The target slice is returned because append() may create
// a new underlying array. If targetSet is nil, a local set is created with the
// target if len(target) + len(nodes) is greater than minNodesForSet.
func appendWithoutDuplicates(target []*html.Node, nodes []*html.Node, targetSet map[*html.Node]bool) []*html.Node {
// if there are not that many nodes, don't use the map, faster to just use nested loops
// (unless a non-nil targetSet is passed, in which case the caller knows better).
if targetSet == nil && len(target)+len(nodes) < minNodesForSet {
for _, n := range nodes {
if !isInSlice(target, n) {
target = append(target, n)
}
}
return target
}
// if a targetSet is passed, then assume it is reliable, otherwise create one
// and initialize it with the current target contents.
if targetSet == nil {
targetSet = make(map[*html.Node]bool, len(target))
for _, n := range target {
targetSet[n] = true
}
}
for _, n := range nodes {
if !targetSet[n] {
target = append(target, n)
targetSet[n] = true
}
}
return target
}
// Loop through a selection, returning only those nodes that pass the predicate
// function.
func grep(sel *Selection, predicate func(i int, s *Selection) bool) (result []*html.Node) {
for i, n := range sel.Nodes {
if predicate(i, newSingleSelection(n, sel.document)) {
result = append(result, n)
}
}
return result
}
// Creates a new Selection object based on the specified nodes, and keeps the
// source Selection object on the stack (linked list).
func pushStack(fromSel *Selection, nodes []*html.Node) *Selection {
result := &Selection{nodes, fromSel.document, fromSel}
return result
}

View File

@ -1,14 +0,0 @@
language: go
go:
- 1.3
- 1.4
install:
- go get github.com/andybalholm/cascadia
script:
- go test -v
notifications:
email: false

View File

@ -1,15 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "cascadia",
srcs = [
"parser.go",
"selector.go",
"serialize.go",
"specificity.go",
],
importmap = "peridot.resf.org/vendor/github.com/andybalholm/cascadia",
importpath = "github.com/andybalholm/cascadia",
visibility = ["//visibility:public"],
deps = ["@org_golang_x_net//html"],
)

View File

@ -1,24 +0,0 @@
Copyright (c) 2011 Andy Balholm. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,9 +0,0 @@
# cascadia
[![](https://travis-ci.org/andybalholm/cascadia.svg)](https://travis-ci.org/andybalholm/cascadia)
The Cascadia package implements CSS selectors for use with the parse trees produced by the html package.
To test CSS selectors without writing Go code, check out [cascadia](https://github.com/suntong/cascadia) the command line tool, a thin wrapper around this package.
[Refer to godoc here](https://godoc.org/github.com/andybalholm/cascadia).

View File

@ -1,5 +0,0 @@
module github.com/andybalholm/cascadia
require golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01
go 1.13

View File

@ -1,838 +0,0 @@
// Package cascadia is an implementation of CSS selectors.
package cascadia
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
)
// a parser for CSS selectors
type parser struct {
s string // the source text
i int // the current position
// if `false`, parsing a pseudo-element
// returns an error.
acceptPseudoElements bool
}
// parseEscape parses a backslash escape.
func (p *parser) parseEscape() (result string, err error) {
if len(p.s) < p.i+2 || p.s[p.i] != '\\' {
return "", errors.New("invalid escape sequence")
}
start := p.i + 1
c := p.s[start]
switch {
case c == '\r' || c == '\n' || c == '\f':
return "", errors.New("escaped line ending outside string")
case hexDigit(c):
// unicode escape (hex)
var i int
for i = start; i < start+6 && i < len(p.s) && hexDigit(p.s[i]); i++ {
// empty
}
v, _ := strconv.ParseUint(p.s[start:i], 16, 21)
if len(p.s) > i {
switch p.s[i] {
case '\r':
i++
if len(p.s) > i && p.s[i] == '\n' {
i++
}
case ' ', '\t', '\n', '\f':
i++
}
}
p.i = i
return string(rune(v)), nil
}
// Return the literal character after the backslash.
result = p.s[start : start+1]
p.i += 2
return result, nil
}
// toLowerASCII returns s with all ASCII capital letters lowercased.
func toLowerASCII(s string) string {
var b []byte
for i := 0; i < len(s); i++ {
if c := s[i]; 'A' <= c && c <= 'Z' {
if b == nil {
b = make([]byte, len(s))
copy(b, s)
}
b[i] = s[i] + ('a' - 'A')
}
}
if b == nil {
return s
}
return string(b)
}
func hexDigit(c byte) bool {
return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F'
}
// nameStart returns whether c can be the first character of an identifier
// (not counting an initial hyphen, or an escape sequence).
func nameStart(c byte) bool {
return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_' || c > 127
}
// nameChar returns whether c can be a character within an identifier
// (not counting an escape sequence).
func nameChar(c byte) bool {
return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_' || c > 127 ||
c == '-' || '0' <= c && c <= '9'
}
// parseIdentifier parses an identifier.
func (p *parser) parseIdentifier() (result string, err error) {
startingDash := false
if len(p.s) > p.i && p.s[p.i] == '-' {
startingDash = true
p.i++
}
if len(p.s) <= p.i {
return "", errors.New("expected identifier, found EOF instead")
}
if c := p.s[p.i]; !(nameStart(c) || c == '\\') {
return "", fmt.Errorf("expected identifier, found %c instead", c)
}
result, err = p.parseName()
if startingDash && err == nil {
result = "-" + result
}
return
}
// parseName parses a name (which is like an identifier, but doesn't have
// extra restrictions on the first character).
func (p *parser) parseName() (result string, err error) {
i := p.i
loop:
for i < len(p.s) {
c := p.s[i]
switch {
case nameChar(c):
start := i
for i < len(p.s) && nameChar(p.s[i]) {
i++
}
result += p.s[start:i]
case c == '\\':
p.i = i
val, err := p.parseEscape()
if err != nil {
return "", err
}
i = p.i
result += val
default:
break loop
}
}
if result == "" {
return "", errors.New("expected name, found EOF instead")
}
p.i = i
return result, nil
}
// parseString parses a single- or double-quoted string.
func (p *parser) parseString() (result string, err error) {
i := p.i
if len(p.s) < i+2 {
return "", errors.New("expected string, found EOF instead")
}
quote := p.s[i]
i++
loop:
for i < len(p.s) {
switch p.s[i] {
case '\\':
if len(p.s) > i+1 {
switch c := p.s[i+1]; c {
case '\r':
if len(p.s) > i+2 && p.s[i+2] == '\n' {
i += 3
continue loop
}
fallthrough
case '\n', '\f':
i += 2
continue loop
}
}
p.i = i
val, err := p.parseEscape()
if err != nil {
return "", err
}
i = p.i
result += val
case quote:
break loop
case '\r', '\n', '\f':
return "", errors.New("unexpected end of line in string")
default:
start := i
for i < len(p.s) {
if c := p.s[i]; c == quote || c == '\\' || c == '\r' || c == '\n' || c == '\f' {
break
}
i++
}
result += p.s[start:i]
}
}
if i >= len(p.s) {
return "", errors.New("EOF in string")
}
// Consume the final quote.
i++
p.i = i
return result, nil
}
// parseRegex parses a regular expression; the end is defined by encountering an
// unmatched closing ')' or ']' which is not consumed
func (p *parser) parseRegex() (rx *regexp.Regexp, err error) {
i := p.i
if len(p.s) < i+2 {
return nil, errors.New("expected regular expression, found EOF instead")
}
// number of open parens or brackets;
// when it becomes negative, finished parsing regex
open := 0
loop:
for i < len(p.s) {
switch p.s[i] {
case '(', '[':
open++
case ')', ']':
open--
if open < 0 {
break loop
}
}
i++
}
if i >= len(p.s) {
return nil, errors.New("EOF in regular expression")
}
rx, err = regexp.Compile(p.s[p.i:i])
p.i = i
return rx, err
}
// skipWhitespace consumes whitespace characters and comments.
// It returns true if there was actually anything to skip.
func (p *parser) skipWhitespace() bool {
i := p.i
for i < len(p.s) {
switch p.s[i] {
case ' ', '\t', '\r', '\n', '\f':
i++
continue
case '/':
if strings.HasPrefix(p.s[i:], "/*") {
end := strings.Index(p.s[i+len("/*"):], "*/")
if end != -1 {
i += end + len("/**/")
continue
}
}
}
break
}
if i > p.i {
p.i = i
return true
}
return false
}
// consumeParenthesis consumes an opening parenthesis and any following
// whitespace. It returns true if there was actually a parenthesis to skip.
func (p *parser) consumeParenthesis() bool {
if p.i < len(p.s) && p.s[p.i] == '(' {
p.i++
p.skipWhitespace()
return true
}
return false
}
// consumeClosingParenthesis consumes a closing parenthesis and any preceding
// whitespace. It returns true if there was actually a parenthesis to skip.
func (p *parser) consumeClosingParenthesis() bool {
i := p.i
p.skipWhitespace()
if p.i < len(p.s) && p.s[p.i] == ')' {
p.i++
return true
}
p.i = i
return false
}
// parseTypeSelector parses a type selector (one that matches by tag name).
func (p *parser) parseTypeSelector() (result tagSelector, err error) {
tag, err := p.parseIdentifier()
if err != nil {
return
}
return tagSelector{tag: toLowerASCII(tag)}, nil
}
// parseIDSelector parses a selector that matches by id attribute.
func (p *parser) parseIDSelector() (idSelector, error) {
if p.i >= len(p.s) {
return idSelector{}, fmt.Errorf("expected id selector (#id), found EOF instead")
}
if p.s[p.i] != '#' {
return idSelector{}, fmt.Errorf("expected id selector (#id), found '%c' instead", p.s[p.i])
}
p.i++
id, err := p.parseName()
if err != nil {
return idSelector{}, err
}
return idSelector{id: id}, nil
}
// parseClassSelector parses a selector that matches by class attribute.
func (p *parser) parseClassSelector() (classSelector, error) {
if p.i >= len(p.s) {
return classSelector{}, fmt.Errorf("expected class selector (.class), found EOF instead")
}
if p.s[p.i] != '.' {
return classSelector{}, fmt.Errorf("expected class selector (.class), found '%c' instead", p.s[p.i])
}
p.i++
class, err := p.parseIdentifier()
if err != nil {
return classSelector{}, err
}
return classSelector{class: class}, nil
}
// parseAttributeSelector parses a selector that matches by attribute value.
func (p *parser) parseAttributeSelector() (attrSelector, error) {
if p.i >= len(p.s) {
return attrSelector{}, fmt.Errorf("expected attribute selector ([attribute]), found EOF instead")
}
if p.s[p.i] != '[' {
return attrSelector{}, fmt.Errorf("expected attribute selector ([attribute]), found '%c' instead", p.s[p.i])
}
p.i++
p.skipWhitespace()
key, err := p.parseIdentifier()
if err != nil {
return attrSelector{}, err
}
key = toLowerASCII(key)
p.skipWhitespace()
if p.i >= len(p.s) {
return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
if p.s[p.i] == ']' {
p.i++
return attrSelector{key: key, operation: ""}, nil
}
if p.i+2 >= len(p.s) {
return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
op := p.s[p.i : p.i+2]
if op[0] == '=' {
op = "="
} else if op[1] != '=' {
return attrSelector{}, fmt.Errorf(`expected equality operator, found "%s" instead`, op)
}
p.i += len(op)
p.skipWhitespace()
if p.i >= len(p.s) {
return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
var val string
var rx *regexp.Regexp
if op == "#=" {
rx, err = p.parseRegex()
} else {
switch p.s[p.i] {
case '\'', '"':
val, err = p.parseString()
default:
val, err = p.parseIdentifier()
}
}
if err != nil {
return attrSelector{}, err
}
p.skipWhitespace()
if p.i >= len(p.s) {
return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
if p.s[p.i] != ']' {
return attrSelector{}, fmt.Errorf("expected ']', found '%c' instead", p.s[p.i])
}
p.i++
switch op {
case "=", "!=", "~=", "|=", "^=", "$=", "*=", "#=":
return attrSelector{key: key, val: val, operation: op, regexp: rx}, nil
default:
return attrSelector{}, fmt.Errorf("attribute operator %q is not supported", op)
}
}
var errExpectedParenthesis = errors.New("expected '(' but didn't find it")
var errExpectedClosingParenthesis = errors.New("expected ')' but didn't find it")
var errUnmatchedParenthesis = errors.New("unmatched '('")
// parsePseudoclassSelector parses a pseudoclass selector like :not(p) or a pseudo-element
// For backwards compatibility, both ':' and '::' prefix are allowed for pseudo-elements.
// https://drafts.csswg.org/selectors-3/#pseudo-elements
// Returning a nil `Sel` (and a nil `error`) means we found a pseudo-element.
func (p *parser) parsePseudoclassSelector() (out Sel, pseudoElement string, err error) {
if p.i >= len(p.s) {
return nil, "", fmt.Errorf("expected pseudoclass selector (:pseudoclass), found EOF instead")
}
if p.s[p.i] != ':' {
return nil, "", fmt.Errorf("expected attribute selector (:pseudoclass), found '%c' instead", p.s[p.i])
}
p.i++
var mustBePseudoElement bool
if p.i >= len(p.s) {
return nil, "", fmt.Errorf("got empty pseudoclass (or pseudoelement)")
}
if p.s[p.i] == ':' { // we found a pseudo-element
mustBePseudoElement = true
p.i++
}
name, err := p.parseIdentifier()
if err != nil {
return
}
name = toLowerASCII(name)
if mustBePseudoElement && (name != "after" && name != "backdrop" && name != "before" &&
name != "cue" && name != "first-letter" && name != "first-line" && name != "grammar-error" &&
name != "marker" && name != "placeholder" && name != "selection" && name != "spelling-error") {
return out, "", fmt.Errorf("unknown pseudoelement :%s", name)
}
switch name {
case "not", "has", "haschild":
if !p.consumeParenthesis() {
return out, "", errExpectedParenthesis
}
sel, parseErr := p.parseSelectorGroup()
if parseErr != nil {
return out, "", parseErr
}
if !p.consumeClosingParenthesis() {
return out, "", errExpectedClosingParenthesis
}
out = relativePseudoClassSelector{name: name, match: sel}
case "contains", "containsown":
if !p.consumeParenthesis() {
return out, "", errExpectedParenthesis
}
if p.i == len(p.s) {
return out, "", errUnmatchedParenthesis
}
var val string
switch p.s[p.i] {
case '\'', '"':
val, err = p.parseString()
default:
val, err = p.parseIdentifier()
}
if err != nil {
return out, "", err
}
val = strings.ToLower(val)
p.skipWhitespace()
if p.i >= len(p.s) {
return out, "", errors.New("unexpected EOF in pseudo selector")
}
if !p.consumeClosingParenthesis() {
return out, "", errExpectedClosingParenthesis
}
out = containsPseudoClassSelector{own: name == "containsown", value: val}
case "matches", "matchesown":
if !p.consumeParenthesis() {
return out, "", errExpectedParenthesis
}
rx, err := p.parseRegex()
if err != nil {
return out, "", err
}
if p.i >= len(p.s) {
return out, "", errors.New("unexpected EOF in pseudo selector")
}
if !p.consumeClosingParenthesis() {
return out, "", errExpectedClosingParenthesis
}
out = regexpPseudoClassSelector{own: name == "matchesown", regexp: rx}
case "nth-child", "nth-last-child", "nth-of-type", "nth-last-of-type":
if !p.consumeParenthesis() {
return out, "", errExpectedParenthesis
}
a, b, err := p.parseNth()
if err != nil {
return out, "", err
}
if !p.consumeClosingParenthesis() {
return out, "", errExpectedClosingParenthesis
}
last := name == "nth-last-child" || name == "nth-last-of-type"
ofType := name == "nth-of-type" || name == "nth-last-of-type"
out = nthPseudoClassSelector{a: a, b: b, last: last, ofType: ofType}
case "first-child":
out = nthPseudoClassSelector{a: 0, b: 1, ofType: false, last: false}
case "last-child":
out = nthPseudoClassSelector{a: 0, b: 1, ofType: false, last: true}
case "first-of-type":
out = nthPseudoClassSelector{a: 0, b: 1, ofType: true, last: false}
case "last-of-type":
out = nthPseudoClassSelector{a: 0, b: 1, ofType: true, last: true}
case "only-child":
out = onlyChildPseudoClassSelector{ofType: false}
case "only-of-type":
out = onlyChildPseudoClassSelector{ofType: true}
case "input":
out = inputPseudoClassSelector{}
case "empty":
out = emptyElementPseudoClassSelector{}
case "root":
out = rootPseudoClassSelector{}
case "after", "backdrop", "before", "cue", "first-letter", "first-line", "grammar-error", "marker", "placeholder", "selection", "spelling-error":
return nil, name, nil
default:
return out, "", fmt.Errorf("unknown pseudoclass or pseudoelement :%s", name)
}
return
}
// parseInteger parses a decimal integer.
func (p *parser) parseInteger() (int, error) {
i := p.i
start := i
for i < len(p.s) && '0' <= p.s[i] && p.s[i] <= '9' {
i++
}
if i == start {
return 0, errors.New("expected integer, but didn't find it")
}
p.i = i
val, err := strconv.Atoi(p.s[start:i])
if err != nil {
return 0, err
}
return val, nil
}
// parseNth parses the argument for :nth-child (normally of the form an+b).
func (p *parser) parseNth() (a, b int, err error) {
// initial state
if p.i >= len(p.s) {
goto eof
}
switch p.s[p.i] {
case '-':
p.i++
goto negativeA
case '+':
p.i++
goto positiveA
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
goto positiveA
case 'n', 'N':
a = 1
p.i++
goto readN
case 'o', 'O', 'e', 'E':
id, nameErr := p.parseName()
if nameErr != nil {
return 0, 0, nameErr
}
id = toLowerASCII(id)
if id == "odd" {
return 2, 1, nil
}
if id == "even" {
return 2, 0, nil
}
return 0, 0, fmt.Errorf("expected 'odd' or 'even', but found '%s' instead", id)
default:
goto invalid
}
positiveA:
if p.i >= len(p.s) {
goto eof
}
switch p.s[p.i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
a, err = p.parseInteger()
if err != nil {
return 0, 0, err
}
goto readA
case 'n', 'N':
a = 1
p.i++
goto readN
default:
goto invalid
}
negativeA:
if p.i >= len(p.s) {
goto eof
}
switch p.s[p.i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
a, err = p.parseInteger()
if err != nil {
return 0, 0, err
}
a = -a
goto readA
case 'n', 'N':
a = -1
p.i++
goto readN
default:
goto invalid
}
readA:
if p.i >= len(p.s) {
goto eof
}
switch p.s[p.i] {
case 'n', 'N':
p.i++
goto readN
default:
// The number we read as a is actually b.
return 0, a, nil
}
readN:
p.skipWhitespace()
if p.i >= len(p.s) {
goto eof
}
switch p.s[p.i] {
case '+':
p.i++
p.skipWhitespace()
b, err = p.parseInteger()
if err != nil {
return 0, 0, err
}
return a, b, nil
case '-':
p.i++
p.skipWhitespace()
b, err = p.parseInteger()
if err != nil {
return 0, 0, err
}
return a, -b, nil
default:
return a, 0, nil
}
eof:
return 0, 0, errors.New("unexpected EOF while attempting to parse expression of form an+b")
invalid:
return 0, 0, errors.New("unexpected character while attempting to parse expression of form an+b")
}
// parseSimpleSelectorSequence parses a selector sequence that applies to
// a single element.
func (p *parser) parseSimpleSelectorSequence() (Sel, error) {
var selectors []Sel
if p.i >= len(p.s) {
return nil, errors.New("expected selector, found EOF instead")
}
switch p.s[p.i] {
case '*':
// It's the universal selector. Just skip over it, since it doesn't affect the meaning.
p.i++
case '#', '.', '[', ':':
// There's no type selector. Wait to process the other till the main loop.
default:
r, err := p.parseTypeSelector()
if err != nil {
return nil, err
}
selectors = append(selectors, r)
}
var pseudoElement string
loop:
for p.i < len(p.s) {
var (
ns Sel
newPseudoElement string
err error
)
switch p.s[p.i] {
case '#':
ns, err = p.parseIDSelector()
case '.':
ns, err = p.parseClassSelector()
case '[':
ns, err = p.parseAttributeSelector()
case ':':
ns, newPseudoElement, err = p.parsePseudoclassSelector()
default:
break loop
}
if err != nil {
return nil, err
}
// From https://drafts.csswg.org/selectors-3/#pseudo-elements :
// "Only one pseudo-element may appear per selector, and if present
// it must appear after the sequence of simple selectors that
// represents the subjects of the selector.""
if ns == nil { // we found a pseudo-element
if pseudoElement != "" {
return nil, fmt.Errorf("only one pseudo-element is accepted per selector, got %s and %s", pseudoElement, newPseudoElement)
}
if !p.acceptPseudoElements {
return nil, fmt.Errorf("pseudo-element %s found, but pseudo-elements support is disabled", newPseudoElement)
}
pseudoElement = newPseudoElement
} else {
if pseudoElement != "" {
return nil, fmt.Errorf("pseudo-element %s must be at the end of selector", pseudoElement)
}
selectors = append(selectors, ns)
}
}
if len(selectors) == 1 && pseudoElement == "" { // no need wrap the selectors in compoundSelector
return selectors[0], nil
}
return compoundSelector{selectors: selectors, pseudoElement: pseudoElement}, nil
}
// parseSelector parses a selector that may include combinators.
func (p *parser) parseSelector() (Sel, error) {
p.skipWhitespace()
result, err := p.parseSimpleSelectorSequence()
if err != nil {
return nil, err
}
for {
var (
combinator byte
c Sel
)
if p.skipWhitespace() {
combinator = ' '
}
if p.i >= len(p.s) {
return result, nil
}
switch p.s[p.i] {
case '+', '>', '~':
combinator = p.s[p.i]
p.i++
p.skipWhitespace()
case ',', ')':
// These characters can't begin a selector, but they can legally occur after one.
return result, nil
}
if combinator == 0 {
return result, nil
}
c, err = p.parseSimpleSelectorSequence()
if err != nil {
return nil, err
}
result = combinedSelector{first: result, combinator: combinator, second: c}
}
}
// parseSelectorGroup parses a group of selectors, separated by commas.
func (p *parser) parseSelectorGroup() (SelectorGroup, error) {
current, err := p.parseSelector()
if err != nil {
return nil, err
}
result := SelectorGroup{current}
for p.i < len(p.s) {
if p.s[p.i] != ',' {
break
}
p.i++
c, err := p.parseSelector()
if err != nil {
return nil, err
}
result = append(result, c)
}
return result, nil
}

View File

@ -1,938 +0,0 @@
package cascadia
import (
"bytes"
"fmt"
"regexp"
"strings"
"golang.org/x/net/html"
)
// Matcher is the interface for basic selector functionality.
// Match returns whether a selector matches n.
type Matcher interface {
Match(n *html.Node) bool
}
// Sel is the interface for all the functionality provided by selectors.
type Sel interface {
Matcher
Specificity() Specificity
// Returns a CSS input compiling to this selector.
String() string
// Returns a pseudo-element, or an empty string.
PseudoElement() string
}
// Parse parses a selector. Use `ParseWithPseudoElement`
// if you need support for pseudo-elements.
func Parse(sel string) (Sel, error) {
p := &parser{s: sel}
compiled, err := p.parseSelector()
if err != nil {
return nil, err
}
if p.i < len(sel) {
return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
}
return compiled, nil
}
// ParseWithPseudoElement parses a single selector,
// with support for pseudo-element.
func ParseWithPseudoElement(sel string) (Sel, error) {
p := &parser{s: sel, acceptPseudoElements: true}
compiled, err := p.parseSelector()
if err != nil {
return nil, err
}
if p.i < len(sel) {
return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
}
return compiled, nil
}
// ParseGroup parses a selector, or a group of selectors separated by commas.
// Use `ParseGroupWithPseudoElements`
// if you need support for pseudo-elements.
func ParseGroup(sel string) (SelectorGroup, error) {
p := &parser{s: sel}
compiled, err := p.parseSelectorGroup()
if err != nil {
return nil, err
}
if p.i < len(sel) {
return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
}
return compiled, nil
}
// ParseGroupWithPseudoElements parses a selector, or a group of selectors separated by commas.
// It supports pseudo-elements.
func ParseGroupWithPseudoElements(sel string) (SelectorGroup, error) {
p := &parser{s: sel, acceptPseudoElements: true}
compiled, err := p.parseSelectorGroup()
if err != nil {
return nil, err
}
if p.i < len(sel) {
return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
}
return compiled, nil
}
// A Selector is a function which tells whether a node matches or not.
//
// This type is maintained for compatibility; I recommend using the newer and
// more idiomatic interfaces Sel and Matcher.
type Selector func(*html.Node) bool
// Compile parses a selector and returns, if successful, a Selector object
// that can be used to match against html.Node objects.
func Compile(sel string) (Selector, error) {
compiled, err := ParseGroup(sel)
if err != nil {
return nil, err
}
return Selector(compiled.Match), nil
}
// MustCompile is like Compile, but panics instead of returning an error.
func MustCompile(sel string) Selector {
compiled, err := Compile(sel)
if err != nil {
panic(err)
}
return compiled
}
// MatchAll returns a slice of the nodes that match the selector,
// from n and its children.
func (s Selector) MatchAll(n *html.Node) []*html.Node {
return s.matchAllInto(n, nil)
}
func (s Selector) matchAllInto(n *html.Node, storage []*html.Node) []*html.Node {
if s(n) {
storage = append(storage, n)
}
for child := n.FirstChild; child != nil; child = child.NextSibling {
storage = s.matchAllInto(child, storage)
}
return storage
}
func queryInto(n *html.Node, m Matcher, storage []*html.Node) []*html.Node {
for child := n.FirstChild; child != nil; child = child.NextSibling {
if m.Match(child) {
storage = append(storage, child)
}
storage = queryInto(child, m, storage)
}
return storage
}
// QueryAll returns a slice of all the nodes that match m, from the descendants
// of n.
func QueryAll(n *html.Node, m Matcher) []*html.Node {
return queryInto(n, m, nil)
}
// Match returns true if the node matches the selector.
func (s Selector) Match(n *html.Node) bool {
return s(n)
}
// MatchFirst returns the first node that matches s, from n and its children.
func (s Selector) MatchFirst(n *html.Node) *html.Node {
if s.Match(n) {
return n
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
m := s.MatchFirst(c)
if m != nil {
return m
}
}
return nil
}
// Query returns the first node that matches m, from the descendants of n.
// If none matches, it returns nil.
func Query(n *html.Node, m Matcher) *html.Node {
for c := n.FirstChild; c != nil; c = c.NextSibling {
if m.Match(c) {
return c
}
if matched := Query(c, m); matched != nil {
return matched
}
}
return nil
}
// Filter returns the nodes in nodes that match the selector.
func (s Selector) Filter(nodes []*html.Node) (result []*html.Node) {
for _, n := range nodes {
if s(n) {
result = append(result, n)
}
}
return result
}
// Filter returns the nodes that match m.
func Filter(nodes []*html.Node, m Matcher) (result []*html.Node) {
for _, n := range nodes {
if m.Match(n) {
result = append(result, n)
}
}
return result
}
type tagSelector struct {
tag string
}
// Matches elements with a given tag name.
func (t tagSelector) Match(n *html.Node) bool {
return n.Type == html.ElementNode && n.Data == t.tag
}
func (c tagSelector) Specificity() Specificity {
return Specificity{0, 0, 1}
}
func (c tagSelector) PseudoElement() string {
return ""
}
type classSelector struct {
class string
}
// Matches elements by class attribute.
func (t classSelector) Match(n *html.Node) bool {
return matchAttribute(n, "class", func(s string) bool {
return matchInclude(t.class, s)
})
}
func (c classSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c classSelector) PseudoElement() string {
return ""
}
type idSelector struct {
id string
}
// Matches elements by id attribute.
func (t idSelector) Match(n *html.Node) bool {
return matchAttribute(n, "id", func(s string) bool {
return s == t.id
})
}
func (c idSelector) Specificity() Specificity {
return Specificity{1, 0, 0}
}
func (c idSelector) PseudoElement() string {
return ""
}
type attrSelector struct {
key, val, operation string
regexp *regexp.Regexp
}
// Matches elements by attribute value.
func (t attrSelector) Match(n *html.Node) bool {
switch t.operation {
case "":
return matchAttribute(n, t.key, func(string) bool { return true })
case "=":
return matchAttribute(n, t.key, func(s string) bool { return s == t.val })
case "!=":
return attributeNotEqualMatch(t.key, t.val, n)
case "~=":
// matches elements where the attribute named key is a whitespace-separated list that includes val.
return matchAttribute(n, t.key, func(s string) bool { return matchInclude(t.val, s) })
case "|=":
return attributeDashMatch(t.key, t.val, n)
case "^=":
return attributePrefixMatch(t.key, t.val, n)
case "$=":
return attributeSuffixMatch(t.key, t.val, n)
case "*=":
return attributeSubstringMatch(t.key, t.val, n)
case "#=":
return attributeRegexMatch(t.key, t.regexp, n)
default:
panic(fmt.Sprintf("unsuported operation : %s", t.operation))
}
}
// matches elements where the attribute named key satisifes the function f.
func matchAttribute(n *html.Node, key string, f func(string) bool) bool {
if n.Type != html.ElementNode {
return false
}
for _, a := range n.Attr {
if a.Key == key && f(a.Val) {
return true
}
}
return false
}
// attributeNotEqualMatch matches elements where
// the attribute named key does not have the value val.
func attributeNotEqualMatch(key, val string, n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
for _, a := range n.Attr {
if a.Key == key && a.Val == val {
return false
}
}
return true
}
// returns true if s is a whitespace-separated list that includes val.
func matchInclude(val, s string) bool {
for s != "" {
i := strings.IndexAny(s, " \t\r\n\f")
if i == -1 {
return s == val
}
if s[:i] == val {
return true
}
s = s[i+1:]
}
return false
}
// matches elements where the attribute named key equals val or starts with val plus a hyphen.
func attributeDashMatch(key, val string, n *html.Node) bool {
return matchAttribute(n, key,
func(s string) bool {
if s == val {
return true
}
if len(s) <= len(val) {
return false
}
if s[:len(val)] == val && s[len(val)] == '-' {
return true
}
return false
})
}
// attributePrefixMatch returns a Selector that matches elements where
// the attribute named key starts with val.
func attributePrefixMatch(key, val string, n *html.Node) bool {
return matchAttribute(n, key,
func(s string) bool {
if strings.TrimSpace(s) == "" {
return false
}
return strings.HasPrefix(s, val)
})
}
// attributeSuffixMatch matches elements where
// the attribute named key ends with val.
func attributeSuffixMatch(key, val string, n *html.Node) bool {
return matchAttribute(n, key,
func(s string) bool {
if strings.TrimSpace(s) == "" {
return false
}
return strings.HasSuffix(s, val)
})
}
// attributeSubstringMatch matches nodes where
// the attribute named key contains val.
func attributeSubstringMatch(key, val string, n *html.Node) bool {
return matchAttribute(n, key,
func(s string) bool {
if strings.TrimSpace(s) == "" {
return false
}
return strings.Contains(s, val)
})
}
// attributeRegexMatch matches nodes where
// the attribute named key matches the regular expression rx
func attributeRegexMatch(key string, rx *regexp.Regexp, n *html.Node) bool {
return matchAttribute(n, key,
func(s string) bool {
return rx.MatchString(s)
})
}
func (c attrSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c attrSelector) PseudoElement() string {
return ""
}
// ---------------- Pseudo class selectors ----------------
// we use severals concrete types of pseudo-class selectors
type relativePseudoClassSelector struct {
name string // one of "not", "has", "haschild"
match SelectorGroup
}
func (s relativePseudoClassSelector) Match(n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
switch s.name {
case "not":
// matches elements that do not match a.
return !s.match.Match(n)
case "has":
// matches elements with any descendant that matches a.
return hasDescendantMatch(n, s.match)
case "haschild":
// matches elements with a child that matches a.
return hasChildMatch(n, s.match)
default:
panic(fmt.Sprintf("unsupported relative pseudo class selector : %s", s.name))
}
}
// hasChildMatch returns whether n has any child that matches a.
func hasChildMatch(n *html.Node, a Matcher) bool {
for c := n.FirstChild; c != nil; c = c.NextSibling {
if a.Match(c) {
return true
}
}
return false
}
// hasDescendantMatch performs a depth-first search of n's descendants,
// testing whether any of them match a. It returns true as soon as a match is
// found, or false if no match is found.
func hasDescendantMatch(n *html.Node, a Matcher) bool {
for c := n.FirstChild; c != nil; c = c.NextSibling {
if a.Match(c) || (c.Type == html.ElementNode && hasDescendantMatch(c, a)) {
return true
}
}
return false
}
// Specificity returns the specificity of the most specific selectors
// in the pseudo-class arguments.
// See https://www.w3.org/TR/selectors/#specificity-rules
func (s relativePseudoClassSelector) Specificity() Specificity {
var max Specificity
for _, sel := range s.match {
newSpe := sel.Specificity()
if max.Less(newSpe) {
max = newSpe
}
}
return max
}
func (c relativePseudoClassSelector) PseudoElement() string {
return ""
}
type containsPseudoClassSelector struct {
own bool
value string
}
func (s containsPseudoClassSelector) Match(n *html.Node) bool {
var text string
if s.own {
// matches nodes that directly contain the given text
text = strings.ToLower(nodeOwnText(n))
} else {
// matches nodes that contain the given text.
text = strings.ToLower(nodeText(n))
}
return strings.Contains(text, s.value)
}
func (s containsPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c containsPseudoClassSelector) PseudoElement() string {
return ""
}
type regexpPseudoClassSelector struct {
own bool
regexp *regexp.Regexp
}
func (s regexpPseudoClassSelector) Match(n *html.Node) bool {
var text string
if s.own {
// matches nodes whose text directly matches the specified regular expression
text = nodeOwnText(n)
} else {
// matches nodes whose text matches the specified regular expression
text = nodeText(n)
}
return s.regexp.MatchString(text)
}
// writeNodeText writes the text contained in n and its descendants to b.
func writeNodeText(n *html.Node, b *bytes.Buffer) {
switch n.Type {
case html.TextNode:
b.WriteString(n.Data)
case html.ElementNode:
for c := n.FirstChild; c != nil; c = c.NextSibling {
writeNodeText(c, b)
}
}
}
// nodeText returns the text contained in n and its descendants.
func nodeText(n *html.Node) string {
var b bytes.Buffer
writeNodeText(n, &b)
return b.String()
}
// nodeOwnText returns the contents of the text nodes that are direct
// children of n.
func nodeOwnText(n *html.Node) string {
var b bytes.Buffer
for c := n.FirstChild; c != nil; c = c.NextSibling {
if c.Type == html.TextNode {
b.WriteString(c.Data)
}
}
return b.String()
}
func (s regexpPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c regexpPseudoClassSelector) PseudoElement() string {
return ""
}
type nthPseudoClassSelector struct {
a, b int
last, ofType bool
}
func (s nthPseudoClassSelector) Match(n *html.Node) bool {
if s.a == 0 {
if s.last {
return simpleNthLastChildMatch(s.b, s.ofType, n)
} else {
return simpleNthChildMatch(s.b, s.ofType, n)
}
}
return nthChildMatch(s.a, s.b, s.last, s.ofType, n)
}
// nthChildMatch implements :nth-child(an+b).
// If last is true, implements :nth-last-child instead.
// If ofType is true, implements :nth-of-type instead.
func nthChildMatch(a, b int, last, ofType bool, n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
parent := n.Parent
if parent == nil {
return false
}
if parent.Type == html.DocumentNode {
return false
}
i := -1
count := 0
for c := parent.FirstChild; c != nil; c = c.NextSibling {
if (c.Type != html.ElementNode) || (ofType && c.Data != n.Data) {
continue
}
count++
if c == n {
i = count
if !last {
break
}
}
}
if i == -1 {
// This shouldn't happen, since n should always be one of its parent's children.
return false
}
if last {
i = count - i + 1
}
i -= b
if a == 0 {
return i == 0
}
return i%a == 0 && i/a >= 0
}
// simpleNthChildMatch implements :nth-child(b).
// If ofType is true, implements :nth-of-type instead.
func simpleNthChildMatch(b int, ofType bool, n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
parent := n.Parent
if parent == nil {
return false
}
if parent.Type == html.DocumentNode {
return false
}
count := 0
for c := parent.FirstChild; c != nil; c = c.NextSibling {
if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
continue
}
count++
if c == n {
return count == b
}
if count >= b {
return false
}
}
return false
}
// simpleNthLastChildMatch implements :nth-last-child(b).
// If ofType is true, implements :nth-last-of-type instead.
func simpleNthLastChildMatch(b int, ofType bool, n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
parent := n.Parent
if parent == nil {
return false
}
if parent.Type == html.DocumentNode {
return false
}
count := 0
for c := parent.LastChild; c != nil; c = c.PrevSibling {
if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
continue
}
count++
if c == n {
return count == b
}
if count >= b {
return false
}
}
return false
}
// Specificity for nth-child pseudo-class.
// Does not support a list of selectors
func (s nthPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c nthPseudoClassSelector) PseudoElement() string {
return ""
}
type onlyChildPseudoClassSelector struct {
ofType bool
}
// Match implements :only-child.
// If `ofType` is true, it implements :only-of-type instead.
func (s onlyChildPseudoClassSelector) Match(n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
parent := n.Parent
if parent == nil {
return false
}
if parent.Type == html.DocumentNode {
return false
}
count := 0
for c := parent.FirstChild; c != nil; c = c.NextSibling {
if (c.Type != html.ElementNode) || (s.ofType && c.Data != n.Data) {
continue
}
count++
if count > 1 {
return false
}
}
return count == 1
}
func (s onlyChildPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c onlyChildPseudoClassSelector) PseudoElement() string {
return ""
}
type inputPseudoClassSelector struct{}
// Matches input, select, textarea and button elements.
func (s inputPseudoClassSelector) Match(n *html.Node) bool {
return n.Type == html.ElementNode && (n.Data == "input" || n.Data == "select" || n.Data == "textarea" || n.Data == "button")
}
func (s inputPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c inputPseudoClassSelector) PseudoElement() string {
return ""
}
type emptyElementPseudoClassSelector struct{}
// Matches empty elements.
func (s emptyElementPseudoClassSelector) Match(n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
switch c.Type {
case html.ElementNode, html.TextNode:
return false
}
}
return true
}
func (s emptyElementPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c emptyElementPseudoClassSelector) PseudoElement() string {
return ""
}
type rootPseudoClassSelector struct{}
// Match implements :root
func (s rootPseudoClassSelector) Match(n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
if n.Parent == nil {
return false
}
return n.Parent.Type == html.DocumentNode
}
func (s rootPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
func (c rootPseudoClassSelector) PseudoElement() string {
return ""
}
type compoundSelector struct {
selectors []Sel
pseudoElement string
}
// Matches elements if each sub-selectors matches.
func (t compoundSelector) Match(n *html.Node) bool {
if len(t.selectors) == 0 {
return n.Type == html.ElementNode
}
for _, sel := range t.selectors {
if !sel.Match(n) {
return false
}
}
return true
}
func (s compoundSelector) Specificity() Specificity {
var out Specificity
for _, sel := range s.selectors {
out = out.Add(sel.Specificity())
}
if s.pseudoElement != "" {
// https://drafts.csswg.org/selectors-3/#specificity
out = out.Add(Specificity{0, 0, 1})
}
return out
}
func (c compoundSelector) PseudoElement() string {
return c.pseudoElement
}
type combinedSelector struct {
first Sel
combinator byte
second Sel
}
func (t combinedSelector) Match(n *html.Node) bool {
if t.first == nil {
return false // maybe we should panic
}
switch t.combinator {
case 0:
return t.first.Match(n)
case ' ':
return descendantMatch(t.first, t.second, n)
case '>':
return childMatch(t.first, t.second, n)
case '+':
return siblingMatch(t.first, t.second, true, n)
case '~':
return siblingMatch(t.first, t.second, false, n)
default:
panic("unknown combinator")
}
}
// matches an element if it matches d and has an ancestor that matches a.
func descendantMatch(a, d Matcher, n *html.Node) bool {
if !d.Match(n) {
return false
}
for p := n.Parent; p != nil; p = p.Parent {
if a.Match(p) {
return true
}
}
return false
}
// matches an element if it matches d and its parent matches a.
func childMatch(a, d Matcher, n *html.Node) bool {
return d.Match(n) && n.Parent != nil && a.Match(n.Parent)
}
// matches an element if it matches s2 and is preceded by an element that matches s1.
// If adjacent is true, the sibling must be immediately before the element.
func siblingMatch(s1, s2 Matcher, adjacent bool, n *html.Node) bool {
if !s2.Match(n) {
return false
}
if adjacent {
for n = n.PrevSibling; n != nil; n = n.PrevSibling {
if n.Type == html.TextNode || n.Type == html.CommentNode {
continue
}
return s1.Match(n)
}
return false
}
// Walk backwards looking for element that matches s1
for c := n.PrevSibling; c != nil; c = c.PrevSibling {
if s1.Match(c) {
return true
}
}
return false
}
func (s combinedSelector) Specificity() Specificity {
spec := s.first.Specificity()
if s.second != nil {
spec = spec.Add(s.second.Specificity())
}
return spec
}
// on combinedSelector, a pseudo-element only makes sens on the last
// selector, although others increase specificity.
func (c combinedSelector) PseudoElement() string {
if c.second == nil {
return ""
}
return c.second.PseudoElement()
}
// A SelectorGroup is a list of selectors, which matches if any of the
// individual selectors matches.
type SelectorGroup []Sel
// Match returns true if the node matches one of the single selectors.
func (s SelectorGroup) Match(n *html.Node) bool {
for _, sel := range s {
if sel.Match(n) {
return true
}
}
return false
}

View File

@ -1,120 +0,0 @@
package cascadia
import (
"fmt"
"strings"
)
// implements the reverse operation Sel -> string
func (c tagSelector) String() string {
return c.tag
}
func (c idSelector) String() string {
return "#" + c.id
}
func (c classSelector) String() string {
return "." + c.class
}
func (c attrSelector) String() string {
val := c.val
if c.operation == "#=" {
val = c.regexp.String()
} else if c.operation != "" {
val = fmt.Sprintf(`"%s"`, val)
}
return fmt.Sprintf(`[%s%s%s]`, c.key, c.operation, val)
}
func (c relativePseudoClassSelector) String() string {
return fmt.Sprintf(":%s(%s)", c.name, c.match.String())
}
func (c containsPseudoClassSelector) String() string {
s := "contains"
if c.own {
s += "Own"
}
return fmt.Sprintf(`:%s("%s")`, s, c.value)
}
func (c regexpPseudoClassSelector) String() string {
s := "matches"
if c.own {
s += "Own"
}
return fmt.Sprintf(":%s(%s)", s, c.regexp.String())
}
func (c nthPseudoClassSelector) String() string {
if c.a == 0 && c.b == 1 { // special cases
s := ":first-"
if c.last {
s = ":last-"
}
if c.ofType {
s += "of-type"
} else {
s += "child"
}
return s
}
var name string
switch [2]bool{c.last, c.ofType} {
case [2]bool{true, true}:
name = "nth-last-of-type"
case [2]bool{true, false}:
name = "nth-last-child"
case [2]bool{false, true}:
name = "nth-of-type"
case [2]bool{false, false}:
name = "nth-child"
}
return fmt.Sprintf(":%s(%dn+%d)", name, c.a, c.b)
}
func (c onlyChildPseudoClassSelector) String() string {
if c.ofType {
return ":only-of-type"
}
return ":only-child"
}
func (c inputPseudoClassSelector) String() string {
return ":input"
}
func (c emptyElementPseudoClassSelector) String() string {
return ":empty"
}
func (c rootPseudoClassSelector) String() string {
return ":root"
}
func (c compoundSelector) String() string {
if len(c.selectors) == 0 && c.pseudoElement == "" {
return "*"
}
chunks := make([]string, len(c.selectors))
for i, sel := range c.selectors {
chunks[i] = sel.String()
}
s := strings.Join(chunks, "")
if c.pseudoElement != "" {
s += "::" + c.pseudoElement
}
return s
}
func (c combinedSelector) String() string {
start := c.first.String()
if c.second != nil {
start += fmt.Sprintf(" %s %s", string(c.combinator), c.second.String())
}
return start
}
func (c SelectorGroup) String() string {
ck := make([]string, len(c))
for i, s := range c {
ck[i] = s.String()
}
return strings.Join(ck, ", ")
}

View File

@ -1,26 +0,0 @@
package cascadia
// Specificity is the CSS specificity as defined in
// https://www.w3.org/TR/selectors/#specificity-rules
// with the convention Specificity = [A,B,C].
type Specificity [3]int
// returns `true` if s < other (strictly), false otherwise
func (s Specificity) Less(other Specificity) bool {
for i := range s {
if s[i] < other[i] {
return true
}
if s[i] > other[i] {
return false
}
}
return false
}
func (s Specificity) Add(other Specificity) Specificity {
for i, sp := range other {
s[i] += sp
}
return s
}

View File

@ -1,32 +0,0 @@
# vscode
.vscode
debug
*.test
./build
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@ -1,16 +0,0 @@
language: go
go:
- 1.9.x
- 1.12.x
- 1.13.x
install:
- go get golang.org/x/net/html/charset
- go get golang.org/x/net/html
- go get github.com/antchfx/xpath
- go get github.com/mattn/goveralls
- go get github.com/golang/groupcache
script:
- $HOME/gopath/bin/goveralls -service=travis-ci

View File

@ -1,18 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "htmlquery",
srcs = [
"cache.go",
"query.go",
],
importmap = "peridot.resf.org/vendor/github.com/antchfx/htmlquery",
importpath = "github.com/antchfx/htmlquery",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/antchfx/xpath",
"//vendor/github.com/golang/groupcache/lru",
"@org_golang_x_net//html",
"@org_golang_x_net//html/charset",
],
)

View File

@ -1,17 +0,0 @@
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,168 +0,0 @@
htmlquery
====
[![Build Status](https://travis-ci.org/antchfx/htmlquery.svg?branch=master)](https://travis-ci.org/antchfx/htmlquery)
[![Coverage Status](https://coveralls.io/repos/github/antchfx/htmlquery/badge.svg?branch=master)](https://coveralls.io/github/antchfx/htmlquery?branch=master)
[![GoDoc](https://godoc.org/github.com/antchfx/htmlquery?status.svg)](https://godoc.org/github.com/antchfx/htmlquery)
[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/htmlquery)](https://goreportcard.com/report/github.com/antchfx/htmlquery)
Overview
====
`htmlquery` is an XPath query package for HTML, lets you extract data or evaluate from HTML documents by an XPath expression.
`htmlquery` built-in the query object caching feature based on [LRU](https://godoc.org/github.com/golang/groupcache/lru), this feature will caching the recently used XPATH query string. Enable query caching can avoid re-compile XPath expression each query.
Installation
====
```
go get github.com/antchfx/htmlquery
```
Getting Started
====
#### Query, returns matched elements or error.
```go
nodes, err := htmlquery.QueryAll(doc, "//a")
if err != nil {
panic(`not a valid XPath expression.`)
}
```
#### Load HTML document from URL.
```go
doc, err := htmlquery.LoadURL("http://example.com/")
```
#### Load HTML from document.
```go
filePath := "/home/user/sample.html"
doc, err := htmlquery.LoadDoc(filePath)
```
#### Load HTML document from string.
```go
s := `<html>....</html>`
doc, err := htmlquery.Parse(strings.NewReader(s))
```
#### Find all A elements.
```go
list := htmlquery.Find(doc, "//a")
```
#### Find all A elements that have `href` attribute.
```go
list := range htmlquery.Find(doc, "//a[@href]")
```
#### Find all A elements with `href` attribute and only return `href` value.
```go
list := range htmlquery.Find(doc, "//a/@href")
for n := range list{
fmt.Println(htmlquery.InnerText(n)) // output @href value without A element.
}
```
### Find the third A element.
```go
a := htmlquery.FindOne(doc, "//a[3]")
```
#### Evaluate the number of all IMG element.
```go
expr, _ := xpath.Compile("count(//img)")
v := expr.Evaluate(htmlquery.CreateXPathNavigator(doc)).(float64)
fmt.Printf("total count is %f", v)
```
FAQ
====
#### `Find()` vs `QueryAll()`, which is better?
`Find` and `QueryAll` both do the same things, searches all of matched html nodes.
The `Find` will panics if you give an error XPath query, but `QueryAll` will return an error for you.
#### Can I save my query expression object for the next query?
Yes, you can. We offer the `QuerySelector` and `QuerySelectorAll` methods, It will accept your query expression object.
Cache a query expression object(or reused) will avoid re-compile XPath query expression, improve your query performance.
#### XPath query object cache performance
```
goos: windows
goarch: amd64
pkg: github.com/antchfx/htmlquery
BenchmarkSelectorCache-4 20000000 55.2 ns/op
BenchmarkDisableSelectorCache-4 500000 3162 ns/op
```
#### How to disable caching?
```
htmlquery.DisableSelectorCache = true
```
Changelogs
===
2019-11-19
- Add built-in query object cache feature, avoid re-compilation for the same query string. [#16](https://github.com/antchfx/htmlquery/issues/16)
- Added LoadDoc [18](https://github.com/antchfx/htmlquery/pull/18)
2019-10-05
- Add new methods that compatible with invalid XPath expression error: `QueryAll` and `Query`.
- Add `QuerySelector` and `QuerySelectorAll` methods, supported reused your query object.
2019-02-04
- [#7](https://github.com/antchfx/htmlquery/issues/7) Removed deprecated `FindEach()` and `FindEachWithBreak()` methods.
2018-12-28
- Avoid adding duplicate elements to list for `Find()` method. [#6](https://github.com/antchfx/htmlquery/issues/6)
Tutorial
===
```go
func main() {
doc, err := htmlquery.LoadURL("https://www.bing.com/search?q=golang")
if err != nil {
panic(err)
}
// Find all news item.
list, err := htmlquery.QueryAll(doc, "//ol/li")
if err != nil {
panic(err)
}
for i, n := range list {
a := htmlquery.FindOne(n, "//a")
fmt.Printf("%d %s(%s)\n", i, htmlquery.InnerText(a), htmlquery.SelectAttr(a, "href"))
}
}
```
List of supported XPath query packages
===
| Name | Description |
| ------------------------------------------------- | ----------------------------------------- |
| [htmlquery](https://github.com/antchfx/htmlquery) | XPath query package for the HTML document |
| [xmlquery](https://github.com/antchfx/xmlquery) | XPath query package for the XML document |
| [jsonquery](https://github.com/antchfx/jsonquery) | XPath query package for the JSON document |
Questions
===
Please let me know if you have any questions.

View File

@ -1,42 +0,0 @@
package htmlquery
import (
"sync"
"github.com/antchfx/xpath"
"github.com/golang/groupcache/lru"
)
// DisableSelectorCache will disable caching for the query selector if value is true.
var DisableSelectorCache = false
// SelectorCacheMaxEntries allows how many selector object can be caching. Default is 50.
// Will disable caching if SelectorCacheMaxEntries <= 0.
var SelectorCacheMaxEntries = 50
var (
cacheOnce sync.Once
cache *lru.Cache
cacheMutex sync.Mutex
)
func getQuery(expr string) (*xpath.Expr, error) {
if DisableSelectorCache || SelectorCacheMaxEntries <= 0 {
return xpath.Compile(expr)
}
cacheOnce.Do(func() {
cache = lru.New(SelectorCacheMaxEntries)
})
cacheMutex.Lock()
defer cacheMutex.Unlock()
if v, ok := cache.Get(expr); ok {
return v.(*xpath.Expr), nil
}
v, err := xpath.Compile(expr)
if err != nil {
return nil, err
}
cache.Add(expr, v)
return v, nil
}

View File

@ -1,9 +0,0 @@
module github.com/antchfx/htmlquery
go 1.14
require (
github.com/antchfx/xpath v1.1.6
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e
golang.org/x/net v0.0.0-20200421231249-e086a090c8fd
)

View File

@ -1,11 +0,0 @@
github.com/antchfx/xpath v1.1.6 h1:6sVh6hB5T6phw1pFpHRQ+C4bd8sNI+O58flqtg7h0R0=
github.com/antchfx/xpath v1.1.6/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20200421231249-e086a090c8fd h1:QPwSajcTUrFriMF1nJ3XzgoqakqQEsnZf9LdXdi2nkI=
golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

View File

@ -1,338 +0,0 @@
/*
Package htmlquery provides extract data from HTML documents using XPath expression.
*/
package htmlquery
import (
"bufio"
"bytes"
"fmt"
"io"
"net/http"
"os"
"github.com/antchfx/xpath"
"golang.org/x/net/html"
"golang.org/x/net/html/charset"
)
var _ xpath.NodeNavigator = &NodeNavigator{}
// CreateXPathNavigator creates a new xpath.NodeNavigator for the specified html.Node.
func CreateXPathNavigator(top *html.Node) *NodeNavigator {
return &NodeNavigator{curr: top, root: top, attr: -1}
}
// Find is like QueryAll but Will panics if the expression `expr` cannot be parsed.
//
// See `QueryAll()` function.
func Find(top *html.Node, expr string) []*html.Node {
nodes, err := QueryAll(top, expr)
if err != nil {
panic(err)
}
return nodes
}
// FindOne is like Query but will panics if the expression `expr` cannot be parsed.
// See `Query()` function.
func FindOne(top *html.Node, expr string) *html.Node {
node, err := Query(top, expr)
if err != nil {
panic(err)
}
return node
}
// QueryAll searches the html.Node that matches by the specified XPath expr.
// Return an error if the expression `expr` cannot be parsed.
func QueryAll(top *html.Node, expr string) ([]*html.Node, error) {
exp, err := getQuery(expr)
if err != nil {
return nil, err
}
nodes := QuerySelectorAll(top, exp)
return nodes, nil
}
// Query searches the html.Node that matches by the specified XPath expr,
// and return the first element of matched html.Node.
//
// Return an error if the expression `expr` cannot be parsed.
func Query(top *html.Node, expr string) (*html.Node, error) {
exp, err := getQuery(expr)
if err != nil {
return nil, err
}
return QuerySelector(top, exp), nil
}
// QuerySelector returns the first matched html.Node by the specified XPath selector.
func QuerySelector(top *html.Node, selector *xpath.Expr) *html.Node {
t := selector.Select(CreateXPathNavigator(top))
if t.MoveNext() {
return getCurrentNode(t.Current().(*NodeNavigator))
}
return nil
}
// QuerySelectorAll searches all of the html.Node that matches the specified XPath selectors.
func QuerySelectorAll(top *html.Node, selector *xpath.Expr) []*html.Node {
var elems []*html.Node
t := selector.Select(CreateXPathNavigator(top))
for t.MoveNext() {
nav := t.Current().(*NodeNavigator)
n := getCurrentNode(nav)
// avoid adding duplicate nodes.
if len(elems) > 0 && (elems[0] == n || (nav.NodeType() == xpath.AttributeNode &&
nav.LocalName() == elems[0].Data && nav.Value() == InnerText(elems[0]))) {
continue
}
elems = append(elems, n)
}
return elems
}
// LoadURL loads the HTML document from the specified URL.
func LoadURL(url string) (*html.Node, error) {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
r, err := charset.NewReader(resp.Body, resp.Header.Get("Content-Type"))
if err != nil {
return nil, err
}
return html.Parse(r)
}
// LoadDoc loads the HTML document from the specified file path.
func LoadDoc(path string) (*html.Node, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return html.Parse(bufio.NewReader(f))
}
func getCurrentNode(n *NodeNavigator) *html.Node {
if n.NodeType() == xpath.AttributeNode {
childNode := &html.Node{
Type: html.TextNode,
Data: n.Value(),
}
return &html.Node{
Type: html.ElementNode,
Data: n.LocalName(),
FirstChild: childNode,
LastChild: childNode,
}
}
return n.curr
}
// Parse returns the parse tree for the HTML from the given Reader.
func Parse(r io.Reader) (*html.Node, error) {
return html.Parse(r)
}
// InnerText returns the text between the start and end tags of the object.
func InnerText(n *html.Node) string {
var output func(*bytes.Buffer, *html.Node)
output = func(buf *bytes.Buffer, n *html.Node) {
switch n.Type {
case html.TextNode:
buf.WriteString(n.Data)
return
case html.CommentNode:
return
}
for child := n.FirstChild; child != nil; child = child.NextSibling {
output(buf, child)
}
}
var buf bytes.Buffer
output(&buf, n)
return buf.String()
}
// SelectAttr returns the attribute value with the specified name.
func SelectAttr(n *html.Node, name string) (val string) {
if n == nil {
return
}
if n.Type == html.ElementNode && n.Parent == nil && name == n.Data {
return InnerText(n)
}
for _, attr := range n.Attr {
if attr.Key == name {
val = attr.Val
break
}
}
return
}
// OutputHTML returns the text including tags name.
func OutputHTML(n *html.Node, self bool) string {
var buf bytes.Buffer
if self {
html.Render(&buf, n)
} else {
for n := n.FirstChild; n != nil; n = n.NextSibling {
html.Render(&buf, n)
}
}
return buf.String()
}
type NodeNavigator struct {
root, curr *html.Node
attr int
}
func (h *NodeNavigator) Current() *html.Node {
return h.curr
}
func (h *NodeNavigator) NodeType() xpath.NodeType {
switch h.curr.Type {
case html.CommentNode:
return xpath.CommentNode
case html.TextNode:
return xpath.TextNode
case html.DocumentNode:
return xpath.RootNode
case html.ElementNode:
if h.attr != -1 {
return xpath.AttributeNode
}
return xpath.ElementNode
case html.DoctypeNode:
// ignored <!DOCTYPE HTML> declare and as Root-Node type.
return xpath.RootNode
}
panic(fmt.Sprintf("unknown HTML node type: %v", h.curr.Type))
}
func (h *NodeNavigator) LocalName() string {
if h.attr != -1 {
return h.curr.Attr[h.attr].Key
}
return h.curr.Data
}
func (*NodeNavigator) Prefix() string {
return ""
}
func (h *NodeNavigator) Value() string {
switch h.curr.Type {
case html.CommentNode:
return h.curr.Data
case html.ElementNode:
if h.attr != -1 {
return h.curr.Attr[h.attr].Val
}
return InnerText(h.curr)
case html.TextNode:
return h.curr.Data
}
return ""
}
func (h *NodeNavigator) Copy() xpath.NodeNavigator {
n := *h
return &n
}
func (h *NodeNavigator) MoveToRoot() {
h.curr = h.root
}
func (h *NodeNavigator) MoveToParent() bool {
if h.attr != -1 {
h.attr = -1
return true
} else if node := h.curr.Parent; node != nil {
h.curr = node
return true
}
return false
}
func (h *NodeNavigator) MoveToNextAttribute() bool {
if h.attr >= len(h.curr.Attr)-1 {
return false
}
h.attr++
return true
}
func (h *NodeNavigator) MoveToChild() bool {
if h.attr != -1 {
return false
}
if node := h.curr.FirstChild; node != nil {
h.curr = node
return true
}
return false
}
func (h *NodeNavigator) MoveToFirst() bool {
if h.attr != -1 || h.curr.PrevSibling == nil {
return false
}
for {
node := h.curr.PrevSibling
if node == nil {
break
}
h.curr = node
}
return true
}
func (h *NodeNavigator) String() string {
return h.Value()
}
func (h *NodeNavigator) MoveToNext() bool {
if h.attr != -1 {
return false
}
if node := h.curr.NextSibling; node != nil {
h.curr = node
return true
}
return false
}
func (h *NodeNavigator) MoveToPrevious() bool {
if h.attr != -1 {
return false
}
if node := h.curr.PrevSibling; node != nil {
h.curr = node
return true
}
return false
}
func (h *NodeNavigator) MoveTo(other xpath.NodeNavigator) bool {
node, ok := other.(*NodeNavigator)
if !ok || node.root != h.root {
return false
}
h.curr = node.curr
h.attr = node.attr
return true
}

View File

@ -1,32 +0,0 @@
# vscode
.vscode
debug
*.test
./build
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@ -1,17 +0,0 @@
language: go
go:
- 1.9.x
- 1.12.x
- 1.13.x
- 1.14.x
- 1.15.x
install:
- go get golang.org/x/net/html/charset
- go get github.com/antchfx/xpath
- go get github.com/mattn/goveralls
- go get github.com/golang/groupcache
script:
- $HOME/gopath/bin/goveralls -service=travis-ci

View File

@ -1,21 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "xmlquery",
srcs = [
"cache.go",
"cached_reader.go",
"node.go",
"options.go",
"parse.go",
"query.go",
],
importmap = "peridot.resf.org/vendor/github.com/antchfx/xmlquery",
importpath = "github.com/antchfx/xmlquery",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/antchfx/xpath",
"//vendor/github.com/golang/groupcache/lru",
"@org_golang_x_net//html/charset",
],
)

View File

@ -1,17 +0,0 @@
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,262 +0,0 @@
xmlquery
====
[![Build Status](https://travis-ci.org/antchfx/xmlquery.svg?branch=master)](https://travis-ci.org/antchfx/xmlquery)
[![Coverage Status](https://coveralls.io/repos/github/antchfx/xmlquery/badge.svg?branch=master)](https://coveralls.io/github/antchfx/xmlquery?branch=master)
[![GoDoc](https://godoc.org/github.com/antchfx/xmlquery?status.svg)](https://godoc.org/github.com/antchfx/xmlquery)
[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/xmlquery)](https://goreportcard.com/report/github.com/antchfx/xmlquery)
Overview
===
`xmlquery` is an XPath query package for XML documents, allowing you to extract
data or evaluate from XML documents with an XPath expression.
`xmlquery` has a built-in query object caching feature that caches recently used
XPATH query strings. Enabling caching can avoid recompile XPath expression for
each query.
Change Logs
===
2020-08-??
- Add XML stream loading and parsing support.
2019-11-11
- Add XPath query caching.
2019-10-05
- Add new methods compatible with invalid XPath expression error: `QueryAll` and `Query`.
- Add `QuerySelector` and `QuerySelectorAll` methods, support for reused query objects.
- PR [#12](https://github.com/antchfx/xmlquery/pull/12) (Thanks @FrancescoIlario)
- PR [#11](https://github.com/antchfx/xmlquery/pull/11) (Thanks @gjvnq)
2018-12-23
- Added XML output including comment nodes. [#9](https://github.com/antchfx/xmlquery/issues/9)
2018-12-03
- Added support to attribute name with namespace prefix and XML output. [#6](https://github.com/antchfx/xmlquery/issues/6)
Installation
====
```
$ go get github.com/antchfx/xmlquery
```
Getting Started
===
### Find specified XPath query.
```go
list, err := xmlquery.QueryAll(doc, "a")
if err != nil {
panic(err)
}
```
#### Parse an XML from URL.
```go
doc, err := xmlquery.LoadURL("http://www.example.com/sitemap.xml")
```
#### Parse an XML from string.
```go
s := `<?xml version="1.0" encoding="utf-8"?><rss version="2.0"></rss>`
doc, err := xmlquery.Parse(strings.NewReader(s))
```
#### Parse an XML from io.Reader.
```go
f, err := os.Open("../books.xml")
doc, err := xmlquery.Parse(f)
```
#### Parse an XML in a stream fashion (simple case without elements filtering).
```go
f, err := os.Open("../books.xml")
p, err := xmlquery.CreateStreamParser(f, "/bookstore/book")
for {
n, err := p.Read()
if err == io.EOF {
break
}
if err != nil {
...
}
}
```
#### Parse an XML in a stream fashion (simple case advanced element filtering).
```go
f, err := os.Open("../books.xml")
p, err := xmlquery.CreateStreamParser(f, "/bookstore/book", "/bookstore/book[price>=10]")
for {
n, err := p.Read()
if err == io.EOF {
break
}
if err != nil {
...
}
}
```
#### Find authors of all books in the bookstore.
```go
list := xmlquery.Find(doc, "//book//author")
// or
list := xmlquery.Find(doc, "//author")
```
#### Find the second book.
```go
book := xmlquery.FindOne(doc, "//book[2]")
```
#### Find all book elements and only get `id` attribute. (New Feature)
```go
list := xmlquery.Find(doc,"//book/@id")
```
#### Find all books with id `bk104`.
```go
list := xmlquery.Find(doc, "//book[@id='bk104']")
```
#### Find all books with price less than 5.
```go
list := xmlquery.Find(doc, "//book[price<5]")
```
#### Evaluate total price of all books.
```go
expr, err := xpath.Compile("sum(//book/price)")
price := expr.Evaluate(xmlquery.CreateXPathNavigator(doc)).(float64)
fmt.Printf("total price: %f\n", price)
```
#### Evaluate number of all book elements.
```go
expr, err := xpath.Compile("count(//book)")
price := expr.Evaluate(xmlquery.CreateXPathNavigator(doc)).(float64)
```
FAQ
====
#### `Find()` vs `QueryAll()`, which is better?
`Find` and `QueryAll` both do the same thing: searches all of matched XML nodes.
`Find` panics if provided with an invalid XPath query, while `QueryAll` returns
an error.
#### Can I save my query expression object for the next query?
Yes, you can. We provide `QuerySelector` and `QuerySelectorAll` methods; they
accept your query expression object.
Caching a query expression object avoids recompiling the XPath query
expression, improving query performance.
#### Create XML document.
```go
doc := &xmlquery.Node{
Type: xmlquery.DeclarationNode,
Data: "xml",
Attr: []xml.Attr{
xml.Attr{Name: xml.Name{Local: "version"}, Value: "1.0"},
},
}
root := &xmlquery.Node{
Data: "rss",
Type: xmlquery.ElementNode,
}
doc.FirstChild = root
channel := &xmlquery.Node{
Data: "channel",
Type: xmlquery.ElementNode,
}
root.FirstChild = channel
title := &xmlquery.Node{
Data: "title",
Type: xmlquery.ElementNode,
}
title_text := &xmlquery.Node{
Data: "W3Schools Home Page",
Type: xmlquery.TextNode,
}
title.FirstChild = title_text
channel.FirstChild = title
fmt.Println(doc.OutputXML(true))
// <?xml version="1.0"?><rss><channel><title>W3Schools Home Page</title></channel></rss>
```
Quick Tutorial
===
```go
import (
"github.com/antchfx/xmlquery"
)
func main(){
s := `<?xml version="1.0" encoding="UTF-8" ?>
<rss version="2.0">
<channel>
<title>W3Schools Home Page</title>
<link>https://www.w3schools.com</link>
<description>Free web building tutorials</description>
<item>
<title>RSS Tutorial</title>
<link>https://www.w3schools.com/xml/xml_rss.asp</link>
<description>New RSS tutorial on W3Schools</description>
</item>
<item>
<title>XML Tutorial</title>
<link>https://www.w3schools.com/xml</link>
<description>New XML tutorial on W3Schools</description>
</item>
</channel>
</rss>`
doc, err := xmlquery.Parse(strings.NewReader(s))
if err != nil {
panic(err)
}
channel := xmlquery.FindOne(doc, "//channel")
if n := channel.SelectElement("title"); n != nil {
fmt.Printf("title: %s\n", n.InnerText())
}
if n := channel.SelectElement("link"); n != nil {
fmt.Printf("link: %s\n", n.InnerText())
}
for i, n := range xmlquery.Find(doc, "//item/title") {
fmt.Printf("#%d %s\n", i, n.InnerText())
}
}
```
List of supported XPath query packages
===
| Name | Description |
| ------------------------------------------------- | ----------------------------------------- |
| [htmlquery](https://github.com/antchfx/htmlquery) | XPath query package for HTML documents |
| [xmlquery](https://github.com/antchfx/xmlquery) | XPath query package for XML documents |
| [jsonquery](https://github.com/antchfx/jsonquery) | XPath query package for JSON documents |
Questions
===
Please let me know if you have any questions

View File

@ -1,121 +0,0 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" ?>
<bookstore specialty="novel">
<book id="bk101">
<author>Gambardella, Matthew</author>
<title>XML Developer's Guide</title>
<genre>Computer</genre>
<price>44.95</price>
<publish_date>2000-10-01</publish_date>
<description>An in-depth look at creating applications
with XML.</description>
</book>
<book id="bk102">
<author>Ralls, Kim</author>
<title>Midnight Rain</title>
<genre>Fantasy</genre>
<price>5.95</price>
<publish_date>2000-12-16</publish_date>
<description>A former architect battles corporate zombies,
an evil sorceress, and her own childhood to become queen
of the world.</description>
</book>
<book id="bk103">
<author>Corets, Eva</author>
<title>Maeve Ascendant</title>
<genre>Fantasy</genre>
<price>5.95</price>
<publish_date>2000-11-17</publish_date>
<description>After the collapse of a nanotechnology
society in England, the young survivors lay the
foundation for a new society.</description>
</book>
<book id="bk104">
<author>Corets, Eva</author>
<title>Oberon's Legacy</title>
<genre>Fantasy</genre>
<price>5.95</price>
<publish_date>2001-03-10</publish_date>
<description>In post-apocalypse England, the mysterious
agent known only as Oberon helps to create a new life
for the inhabitants of London. Sequel to Maeve
Ascendant.</description>
</book>
<book id="bk105">
<author>Corets, Eva</author>
<title>The Sundered Grail</title>
<genre>Fantasy</genre>
<price>5.95</price>
<publish_date>2001-09-10</publish_date>
<description>The two daughters of Maeve, half-sisters,
battle one another for control of England. Sequel to
Oberon's Legacy.</description>
</book>
<book id="bk106">
<author>Randall, Cynthia</author>
<title>Lover Birds</title>
<genre>Romance</genre>
<price>4.95</price>
<publish_date>2000-09-02</publish_date>
<description>When Carla meets Paul at an ornithology
conference, tempers fly as feathers get ruffled.</description>
</book>
<book id="bk107">
<author>Thurman, Paula</author>
<title>Splish Splash</title>
<genre>Romance</genre>
<price>4.95</price>
<publish_date>2000-11-02</publish_date>
<description>A deep sea diver finds true love twenty
thousand leagues beneath the sea.</description>
</book>
<book id="bk108">
<author>Knorr, Stefan</author>
<title>Creepy Crawlies</title>
<genre>Horror</genre>
<price>4.95</price>
<publish_date>2000-12-06</publish_date>
<description>An anthology of horror stories about roaches,
centipedes, scorpions and other insects.</description>
</book>
<book id="bk109">
<author>Kress, Peter</author>
<title>Paradox Lost</title>
<genre>Science Fiction</genre>
<price>6.95</price>
<publish_date>2000-11-02</publish_date>
<description>After an inadvertant trip through a Heisenberg
Uncertainty Device, James Salway discovers the problems
of being quantum.</description>
</book>
<book id="bk110">
<author>O'Brien, Tim</author>
<title>Microsoft .NET: The Programming Bible</title>
<genre>Computer</genre>
<price>36.95</price>
<publish_date>2000-12-09</publish_date>
<description>Microsoft's .NET initiative is explored in
detail in this deep programmer's reference.</description>
</book>
<book id="bk111">
<author>O'Brien, Tim</author>
<title>MSXML3: A Comprehensive Guide</title>
<genre>Computer</genre>
<price>36.95</price>
<publish_date>2000-12-01</publish_date>
<description>The Microsoft MSXML3 parser is covered in
detail, with attention to XML DOM interfaces, XSLT processing,
SAX and more.</description>
</book>
<book id="bk112">
<author>Galos, Mike</author>
<title>Visual Studio 7: A Comprehensive Guide</title>
<genre>Computer</genre>
<price>49.95</price>
<publish_date>2001-04-16</publish_date>
<description>Microsoft Visual Studio 7 is explored in depth,
looking at how Visual Basic, Visual C++, C#, and ASP+ are
integrated into a comprehensive development
environment.</description>
</book>
</bookstore>

View File

@ -1,43 +0,0 @@
package xmlquery
import (
"sync"
"github.com/golang/groupcache/lru"
"github.com/antchfx/xpath"
)
// DisableSelectorCache will disable caching for the query selector if value is true.
var DisableSelectorCache = false
// SelectorCacheMaxEntries allows how many selector object can be caching. Default is 50.
// Will disable caching if SelectorCacheMaxEntries <= 0.
var SelectorCacheMaxEntries = 50
var (
cacheOnce sync.Once
cache *lru.Cache
cacheMutex sync.Mutex
)
func getQuery(expr string) (*xpath.Expr, error) {
if DisableSelectorCache || SelectorCacheMaxEntries <= 0 {
return xpath.Compile(expr)
}
cacheOnce.Do(func() {
cache = lru.New(SelectorCacheMaxEntries)
})
cacheMutex.Lock()
defer cacheMutex.Unlock()
if v, ok := cache.Get(expr); ok {
return v.(*xpath.Expr), nil
}
v, err := xpath.Compile(expr)
if err != nil {
return nil, err
}
cache.Add(expr, v)
return v, nil
}

View File

@ -1,69 +0,0 @@
package xmlquery
import (
"bufio"
)
type cachedReader struct {
buffer *bufio.Reader
cache []byte
cacheCap int
cacheLen int
caching bool
}
func newCachedReader(r *bufio.Reader) *cachedReader {
return &cachedReader{
buffer: r,
cache: make([]byte, 4096),
cacheCap: 4096,
cacheLen: 0,
caching: false,
}
}
func (c *cachedReader) StartCaching() {
c.cacheLen = 0
c.caching = true
}
func (c *cachedReader) ReadByte() (byte, error) {
if !c.caching {
return c.buffer.ReadByte()
}
b, err := c.buffer.ReadByte()
if err != nil {
return b, err
}
if c.cacheLen < c.cacheCap {
c.cache[c.cacheLen] = b
c.cacheLen++
}
return b, err
}
func (c *cachedReader) Cache() []byte {
return c.cache[:c.cacheLen]
}
func (c *cachedReader) StopCaching() {
c.caching = false
}
func (c *cachedReader) Read(p []byte) (int, error) {
n, err := c.buffer.Read(p)
if err != nil {
return n, err
}
if c.caching && c.cacheLen < c.cacheCap {
for i := 0; i < n; i++ {
c.cache[c.cacheLen] = p[i]
c.cacheLen++
if c.cacheLen >= c.cacheCap {
break
}
}
}
return n, err
}

View File

@ -1,9 +0,0 @@
module github.com/antchfx/xmlquery
go 1.14
require (
github.com/antchfx/xpath v1.1.10
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc
)

View File

@ -1,14 +0,0 @@
github.com/antchfx/xpath v1.1.10 h1:cJ0pOvEdN/WvYXxvRrzQH9x5QWKpzHacYO8qzCcDYAg=
github.com/antchfx/xpath v1.1.10/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc h1:zK/HqS5bZxDptfPJNq8v7vJfXtkU7r9TLIoSr1bXaP4=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

View File

@ -1,232 +0,0 @@
package xmlquery
import (
"bytes"
"encoding/xml"
"fmt"
"strings"
)
// A NodeType is the type of a Node.
type NodeType uint
const (
// DocumentNode is a document object that, as the root of the document tree,
// provides access to the entire XML document.
DocumentNode NodeType = iota
// DeclarationNode is the document type declaration, indicated by the
// following tag (for example, <!DOCTYPE...> ).
DeclarationNode
// ElementNode is an element (for example, <item> ).
ElementNode
// TextNode is the text content of a node.
TextNode
// CharDataNode node <![CDATA[content]]>
CharDataNode
// CommentNode a comment (for example, <!-- my comment --> ).
CommentNode
// AttributeNode is an attribute of element.
AttributeNode
)
type Attr struct {
Name xml.Name
Value string
NamespaceURI string
}
// A Node consists of a NodeType and some Data (tag name for
// element nodes, content for text) and are part of a tree of Nodes.
type Node struct {
Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node
Type NodeType
Data string
Prefix string
NamespaceURI string
Attr []Attr
level int // node level in the tree
}
// InnerText returns the text between the start and end tags of the object.
func (n *Node) InnerText() string {
var output func(*bytes.Buffer, *Node)
output = func(buf *bytes.Buffer, n *Node) {
switch n.Type {
case TextNode, CharDataNode:
buf.WriteString(n.Data)
case CommentNode:
default:
for child := n.FirstChild; child != nil; child = child.NextSibling {
output(buf, child)
}
}
}
var buf bytes.Buffer
output(&buf, n)
return buf.String()
}
func (n *Node) sanitizedData(preserveSpaces bool) string {
if preserveSpaces {
return strings.Trim(n.Data, "\n\t")
}
return strings.TrimSpace(n.Data)
}
func calculatePreserveSpaces(n *Node, pastValue bool) bool {
if attr := n.SelectAttr("xml:space"); attr == "preserve" {
return true
} else if attr == "default" {
return false
}
return pastValue
}
func outputXML(buf *bytes.Buffer, n *Node, preserveSpaces bool) {
preserveSpaces = calculatePreserveSpaces(n, preserveSpaces)
switch n.Type {
case TextNode:
xml.EscapeText(buf, []byte(n.sanitizedData(preserveSpaces)))
return
case CharDataNode:
buf.WriteString("<![CDATA[")
buf.WriteString(n.Data)
buf.WriteString("]]>")
return
case CommentNode:
buf.WriteString("<!--")
buf.WriteString(n.Data)
buf.WriteString("-->")
return
case DeclarationNode:
buf.WriteString("<?" + n.Data)
default:
if n.Prefix == "" {
buf.WriteString("<" + n.Data)
} else {
buf.WriteString("<" + n.Prefix + ":" + n.Data)
}
}
for _, attr := range n.Attr {
if attr.Name.Space != "" {
buf.WriteString(fmt.Sprintf(` %s:%s=`, attr.Name.Space, attr.Name.Local))
} else {
buf.WriteString(fmt.Sprintf(` %s=`, attr.Name.Local))
}
buf.WriteByte('"')
xml.EscapeText(buf, []byte(attr.Value))
buf.WriteByte('"')
}
if n.Type == DeclarationNode {
buf.WriteString("?>")
} else {
buf.WriteString(">")
}
for child := n.FirstChild; child != nil; child = child.NextSibling {
outputXML(buf, child, preserveSpaces)
}
if n.Type != DeclarationNode {
if n.Prefix == "" {
buf.WriteString(fmt.Sprintf("</%s>", n.Data))
} else {
buf.WriteString(fmt.Sprintf("</%s:%s>", n.Prefix, n.Data))
}
}
}
// OutputXML returns the text that including tags name.
func (n *Node) OutputXML(self bool) string {
var buf bytes.Buffer
if self {
outputXML(&buf, n, false)
} else {
for n := n.FirstChild; n != nil; n = n.NextSibling {
outputXML(&buf, n, false)
}
}
return buf.String()
}
// AddAttr adds a new attribute specified by 'key' and 'val' to a node 'n'.
func AddAttr(n *Node, key, val string) {
var attr Attr
if i := strings.Index(key, ":"); i > 0 {
attr = Attr{
Name: xml.Name{Space: key[:i], Local: key[i+1:]},
Value: val,
}
} else {
attr = Attr{
Name: xml.Name{Local: key},
Value: val,
}
}
n.Attr = append(n.Attr, attr)
}
// AddChild adds a new node 'n' to a node 'parent' as its last child.
func AddChild(parent, n *Node) {
n.Parent = parent
n.NextSibling = nil
if parent.FirstChild == nil {
parent.FirstChild = n
n.PrevSibling = nil
} else {
parent.LastChild.NextSibling = n
n.PrevSibling = parent.LastChild
}
parent.LastChild = n
}
// AddSibling adds a new node 'n' as a sibling of a given node 'sibling'.
// Note it is not necessarily true that the new node 'n' would be added
// immediately after 'sibling'. If 'sibling' isn't the last child of its
// parent, then the new node 'n' will be added at the end of the sibling
// chain of their parent.
func AddSibling(sibling, n *Node) {
for t := sibling.NextSibling; t != nil; t = t.NextSibling {
sibling = t
}
n.Parent = sibling.Parent
sibling.NextSibling = n
n.PrevSibling = sibling
n.NextSibling = nil
if sibling.Parent != nil {
sibling.Parent.LastChild = n
}
}
// RemoveFromTree removes a node and its subtree from the document
// tree it is in. If the node is the root of the tree, then it's no-op.
func RemoveFromTree(n *Node) {
if n.Parent == nil {
return
}
if n.Parent.FirstChild == n {
if n.Parent.LastChild == n {
n.Parent.FirstChild = nil
n.Parent.LastChild = nil
} else {
n.Parent.FirstChild = n.NextSibling
n.NextSibling.PrevSibling = nil
}
} else {
if n.Parent.LastChild == n {
n.Parent.LastChild = n.PrevSibling
n.PrevSibling.NextSibling = nil
} else {
n.PrevSibling.NextSibling = n.NextSibling
n.NextSibling.PrevSibling = n.PrevSibling
}
}
n.Parent = nil
n.PrevSibling = nil
n.NextSibling = nil
}

View File

@ -1,30 +0,0 @@
package xmlquery
import (
"encoding/xml"
)
type ParserOptions struct{
Decoder *DecoderOptions
}
func (options ParserOptions) apply(parser *parser) {
if options.Decoder != nil {
(*options.Decoder).apply(parser.decoder)
}
}
// DecoderOptions implement the very same options than the standard
// encoding/xml package. Please refer to this documentation:
// https://golang.org/pkg/encoding/xml/#Decoder
type DecoderOptions struct{
Strict bool
AutoClose []string
Entity map[string]string
}
func (options DecoderOptions) apply(decoder *xml.Decoder) {
decoder.Strict = options.Strict
decoder.AutoClose = options.AutoClose
decoder.Entity = options.Entity
}

View File

@ -1,365 +0,0 @@
package xmlquery
import (
"bufio"
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"regexp"
"strings"
"github.com/antchfx/xpath"
"golang.org/x/net/html/charset"
)
var xmlMIMERegex = regexp.MustCompile(`(?i)((application|image|message|model)/((\w|\.|-)+\+?)?|text/)(wb)?xml`)
// LoadURL loads the XML document from the specified URL.
func LoadURL(url string) (*Node, error) {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
// Make sure the Content-Type has a valid XML MIME type
if xmlMIMERegex.MatchString(resp.Header.Get("Content-Type")) {
return Parse(resp.Body)
}
return nil, fmt.Errorf("invalid XML document(%s)", resp.Header.Get("Content-Type"))
}
// Parse returns the parse tree for the XML from the given Reader.
func Parse(r io.Reader) (*Node, error) {
return ParseWithOptions(r, ParserOptions{})
}
// ParseWithOptions is like parse, but with custom options
func ParseWithOptions(r io.Reader, options ParserOptions) (*Node, error) {
p := createParser(r)
options.apply(p)
for {
_, err := p.parse()
if err == io.EOF {
return p.doc, nil
}
if err != nil {
return nil, err
}
}
}
type parser struct {
decoder *xml.Decoder
doc *Node
space2prefix map[string]string
level int
prev *Node
streamElementXPath *xpath.Expr // Under streaming mode, this specifies the xpath to the target element node(s).
streamElementFilter *xpath.Expr // If specified, it provides further filtering on the target element.
streamNode *Node // Need to remember the last target node So we can clean it up upon next Read() call.
streamNodePrev *Node // Need to remember target node's prev so upon target node removal, we can restore correct prev.
reader *cachedReader // Need to maintain a reference to the reader, so we can determine whether a node contains CDATA.
}
func createParser(r io.Reader) *parser {
reader := newCachedReader(bufio.NewReader(r))
p := &parser{
decoder: xml.NewDecoder(reader),
doc: &Node{Type: DocumentNode},
space2prefix: make(map[string]string),
level: 0,
reader: reader,
}
// http://www.w3.org/XML/1998/namespace is bound by definition to the prefix xml.
p.space2prefix["http://www.w3.org/XML/1998/namespace"] = "xml"
p.decoder.CharsetReader = charset.NewReaderLabel
p.prev = p.doc
return p
}
func (p *parser) parse() (*Node, error) {
var streamElementNodeCounter int
for {
tok, err := p.decoder.Token()
if err != nil {
return nil, err
}
switch tok := tok.(type) {
case xml.StartElement:
if p.level == 0 {
// mising XML declaration
node := &Node{Type: DeclarationNode, Data: "xml", level: 1}
AddChild(p.prev, node)
p.level = 1
p.prev = node
}
// https://www.w3.org/TR/xml-names/#scoping-defaulting
for _, att := range tok.Attr {
if att.Name.Local == "xmlns" {
p.space2prefix[att.Value] = ""
} else if att.Name.Space == "xmlns" {
p.space2prefix[att.Value] = att.Name.Local
}
}
if tok.Name.Space != "" {
if _, found := p.space2prefix[tok.Name.Space]; !found {
return nil, errors.New("xmlquery: invalid XML document, namespace is missing")
}
}
attributes := make([]Attr, len(tok.Attr))
for i, att := range tok.Attr {
name := att.Name
if prefix, ok := p.space2prefix[name.Space]; ok {
name.Space = prefix
}
attributes[i] = Attr{
Name: name,
Value: att.Value,
NamespaceURI: att.Name.Space,
}
}
node := &Node{
Type: ElementNode,
Data: tok.Name.Local,
Prefix: p.space2prefix[tok.Name.Space],
NamespaceURI: tok.Name.Space,
Attr: attributes,
level: p.level,
}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
// If we're in the streaming mode, we need to remember the node if it is the target node
// so that when we finish processing the node's EndElement, we know how/what to return to
// caller. Also we need to remove the target node from the tree upon next Read() call so
// memory doesn't grow unbounded.
if p.streamElementXPath != nil {
if p.streamNode == nil {
if QuerySelector(p.doc, p.streamElementXPath) != nil {
p.streamNode = node
p.streamNodePrev = p.prev
streamElementNodeCounter = 1
}
} else {
streamElementNodeCounter++
}
}
p.prev = node
p.level++
p.reader.StartCaching()
case xml.EndElement:
p.level--
// If we're in streaming mode, and we already have a potential streaming
// target node identified (p.streamNode != nil) then we need to check if
// this is the real one we want to return to caller.
if p.streamNode != nil {
streamElementNodeCounter--
if streamElementNodeCounter == 0 {
// Now we know this element node is the at least passing the initial
// p.streamElementXPath check and is a potential target node candidate.
// We need to have 1 more check with p.streamElementFilter (if given) to
// ensure it is really the element node we want.
// The reason we need a two-step check process is because the following
// situation:
// <AAA><BBB>b1</BBB></AAA>
// And say the p.streamElementXPath = "/AAA/BBB[. != 'b1']". Now during
// xml.StartElement time, the <BBB> node is still empty, so it will pass
// the p.streamElementXPath check. However, eventually we know this <BBB>
// shouldn't be returned to the caller. Having a second more fine-grained
// filter check ensures that. So in this case, the caller should really
// setup the stream parser with:
// streamElementXPath = "/AAA/BBB["
// streamElementFilter = "/AAA/BBB[. != 'b1']"
if p.streamElementFilter == nil || QuerySelector(p.doc, p.streamElementFilter) != nil {
return p.streamNode, nil
}
// otherwise, this isn't our target node, clean things up.
// note we also remove the underlying *Node from the node tree, to prevent
// future stream node candidate selection error.
RemoveFromTree(p.streamNode)
p.prev = p.streamNodePrev
p.streamNode = nil
p.streamNodePrev = nil
}
}
case xml.CharData:
p.reader.StopCaching()
// First, normalize the cache...
cached := strings.ToUpper(string(p.reader.Cache()))
nodeType := TextNode
if strings.HasPrefix(cached, "<![CDATA[") {
nodeType = CharDataNode
}
node := &Node{Type: nodeType, Data: string(tok), level: p.level}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
p.reader.StartCaching()
case xml.Comment:
node := &Node{Type: CommentNode, Data: string(tok), level: p.level}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
} else if p.level < p.prev.level {
for i := p.prev.level - p.level; i > 1; i-- {
p.prev = p.prev.Parent
}
AddSibling(p.prev.Parent, node)
}
case xml.ProcInst: // Processing Instruction
if p.prev.Type != DeclarationNode {
p.level++
}
node := &Node{Type: DeclarationNode, Data: tok.Target, level: p.level}
pairs := strings.Split(string(tok.Inst), " ")
for _, pair := range pairs {
pair = strings.TrimSpace(pair)
if i := strings.Index(pair, "="); i > 0 {
AddAttr(node, pair[:i], strings.Trim(pair[i+1:], `"`))
}
}
if p.level == p.prev.level {
AddSibling(p.prev, node)
} else if p.level > p.prev.level {
AddChild(p.prev, node)
}
p.prev = node
case xml.Directive:
}
}
}
// StreamParser enables loading and parsing an XML document in a streaming
// fashion.
type StreamParser struct {
p *parser
}
// CreateStreamParser creates a StreamParser. Argument streamElementXPath is
// required.
// Argument streamElementFilter is optional and should only be used in advanced
// scenarios.
//
// Scenario 1: simple case:
// xml := `<AAA><BBB>b1</BBB><BBB>b2</BBB></AAA>`
// sp, err := CreateStreamParser(strings.NewReader(xml), "/AAA/BBB")
// if err != nil {
// panic(err)
// }
// for {
// n, err := sp.Read()
// if err != nil {
// break
// }
// fmt.Println(n.OutputXML(true))
// }
// Output will be:
// <BBB>b1</BBB>
// <BBB>b2</BBB>
//
// Scenario 2: advanced case:
// xml := `<AAA><BBB>b1</BBB><BBB>b2</BBB></AAA>`
// sp, err := CreateStreamParser(strings.NewReader(xml), "/AAA/BBB", "/AAA/BBB[. != 'b1']")
// if err != nil {
// panic(err)
// }
// for {
// n, err := sp.Read()
// if err != nil {
// break
// }
// fmt.Println(n.OutputXML(true))
// }
// Output will be:
// <BBB>b2</BBB>
//
// As the argument names indicate, streamElementXPath should be used for
// providing xpath query pointing to the target element node only, no extra
// filtering on the element itself or its children; while streamElementFilter,
// if needed, can provide additional filtering on the target element and its
// children.
//
// CreateStreamParser returns an error if either streamElementXPath or
// streamElementFilter, if provided, cannot be successfully parsed and compiled
// into a valid xpath query.
func CreateStreamParser(r io.Reader, streamElementXPath string, streamElementFilter ...string) (*StreamParser, error) {
return CreateStreamParserWithOptions(r, ParserOptions{}, streamElementXPath, streamElementFilter...)
}
// CreateStreamParserWithOptions is like CreateStreamParser, but with custom options
func CreateStreamParserWithOptions(
r io.Reader,
options ParserOptions,
streamElementXPath string,
streamElementFilter ...string,
) (*StreamParser, error) {
elemXPath, err := getQuery(streamElementXPath)
if err != nil {
return nil, fmt.Errorf("invalid streamElementXPath '%s', err: %s", streamElementXPath, err.Error())
}
elemFilter := (*xpath.Expr)(nil)
if len(streamElementFilter) > 0 {
elemFilter, err = getQuery(streamElementFilter[0])
if err != nil {
return nil, fmt.Errorf("invalid streamElementFilter '%s', err: %s", streamElementFilter[0], err.Error())
}
}
parser := createParser(r)
options.apply(parser)
sp := &StreamParser{
p: parser,
}
sp.p.streamElementXPath = elemXPath
sp.p.streamElementFilter = elemFilter
return sp, nil
}
// Read returns a target node that satisfies the XPath specified by caller at
// StreamParser creation time. If there is no more satisfying target nodes after
// reading the rest of the XML document, io.EOF will be returned. At any time,
// any XML parsing error encountered will be returned, and the stream parsing
// stopped. Calling Read() after an error is returned (including io.EOF) results
// undefined behavior. Also note, due to the streaming nature, calling Read()
// will automatically remove any previous target node(s) from the document tree.
func (sp *StreamParser) Read() (*Node, error) {
// Because this is a streaming read, we need to release/remove last
// target node from the node tree to free up memory.
if sp.p.streamNode != nil {
// We need to remove all siblings before the current stream node,
// because the document may contain unwanted nodes between the target
// ones (for example new line text node), which would otherwise
// accumulate as first childs, and slow down the stream over time
for sp.p.streamNode.PrevSibling != nil {
RemoveFromTree(sp.p.streamNode.PrevSibling)
}
sp.p.prev = sp.p.streamNode.Parent
RemoveFromTree(sp.p.streamNode)
sp.p.streamNode = nil
sp.p.streamNodePrev = nil
}
return sp.p.parse()
}

View File

@ -1,309 +0,0 @@
/*
Package xmlquery provides extract data from XML documents using XPath expression.
*/
package xmlquery
import (
"fmt"
"strings"
"github.com/antchfx/xpath"
)
// SelectElements finds child elements with the specified name.
func (n *Node) SelectElements(name string) []*Node {
return Find(n, name)
}
// SelectElement finds child elements with the specified name.
func (n *Node) SelectElement(name string) *Node {
return FindOne(n, name)
}
// SelectAttr returns the attribute value with the specified name.
func (n *Node) SelectAttr(name string) string {
if n.Type == AttributeNode {
if n.Data == name {
return n.InnerText()
}
return ""
}
var local, space string
local = name
if i := strings.Index(name, ":"); i > 0 {
space = name[:i]
local = name[i+1:]
}
for _, attr := range n.Attr {
if attr.Name.Local == local && attr.Name.Space == space {
return attr.Value
}
}
return ""
}
var _ xpath.NodeNavigator = &NodeNavigator{}
// CreateXPathNavigator creates a new xpath.NodeNavigator for the specified
// XML Node.
func CreateXPathNavigator(top *Node) *NodeNavigator {
return &NodeNavigator{curr: top, root: top, attr: -1}
}
func getCurrentNode(it *xpath.NodeIterator) *Node {
n := it.Current().(*NodeNavigator)
if n.NodeType() == xpath.AttributeNode {
childNode := &Node{
Type: TextNode,
Data: n.Value(),
}
return &Node{
Parent: n.curr,
Type: AttributeNode,
Data: n.LocalName(),
FirstChild: childNode,
LastChild: childNode,
}
}
return n.curr
}
// Find is like QueryAll but panics if `expr` is not a valid XPath expression.
// See `QueryAll()` function.
func Find(top *Node, expr string) []*Node {
nodes, err := QueryAll(top, expr)
if err != nil {
panic(err)
}
return nodes
}
// FindOne is like Query but panics if `expr` is not a valid XPath expression.
// See `Query()` function.
func FindOne(top *Node, expr string) *Node {
node, err := Query(top, expr)
if err != nil {
panic(err)
}
return node
}
// QueryAll searches the XML Node that matches by the specified XPath expr.
// Returns an error if the expression `expr` cannot be parsed.
func QueryAll(top *Node, expr string) ([]*Node, error) {
exp, err := getQuery(expr)
if err != nil {
return nil, err
}
return QuerySelectorAll(top, exp), nil
}
// Query searches the XML Node that matches by the specified XPath expr,
// and returns first matched element.
func Query(top *Node, expr string) (*Node, error) {
exp, err := getQuery(expr)
if err != nil {
return nil, err
}
return QuerySelector(top, exp), nil
}
// QuerySelectorAll searches all of the XML Node that matches the specified
// XPath selectors.
func QuerySelectorAll(top *Node, selector *xpath.Expr) []*Node {
t := selector.Select(CreateXPathNavigator(top))
var elems []*Node
for t.MoveNext() {
elems = append(elems, getCurrentNode(t))
}
return elems
}
// QuerySelector returns the first matched XML Node by the specified XPath
// selector.
func QuerySelector(top *Node, selector *xpath.Expr) *Node {
t := selector.Select(CreateXPathNavigator(top))
if t.MoveNext() {
return getCurrentNode(t)
}
return nil
}
// FindEach searches the html.Node and calls functions cb.
// Important: this method is deprecated, instead, use for .. = range Find(){}.
func FindEach(top *Node, expr string, cb func(int, *Node)) {
for i, n := range Find(top, expr) {
cb(i, n)
}
}
// FindEachWithBreak functions the same as FindEach but allows to break the loop
// by returning false from the callback function `cb`.
// Important: this method is deprecated, instead, use .. = range Find(){}.
func FindEachWithBreak(top *Node, expr string, cb func(int, *Node) bool) {
for i, n := range Find(top, expr) {
if !cb(i, n) {
break
}
}
}
type NodeNavigator struct {
root, curr *Node
attr int
}
func (x *NodeNavigator) Current() *Node {
return x.curr
}
func (x *NodeNavigator) NodeType() xpath.NodeType {
switch x.curr.Type {
case CommentNode:
return xpath.CommentNode
case TextNode, CharDataNode:
return xpath.TextNode
case DeclarationNode, DocumentNode:
return xpath.RootNode
case ElementNode:
if x.attr != -1 {
return xpath.AttributeNode
}
return xpath.ElementNode
}
panic(fmt.Sprintf("unknown XML node type: %v", x.curr.Type))
}
func (x *NodeNavigator) LocalName() string {
if x.attr != -1 {
return x.curr.Attr[x.attr].Name.Local
}
return x.curr.Data
}
func (x *NodeNavigator) Prefix() string {
if x.NodeType() == xpath.AttributeNode {
if x.attr != -1 {
return x.curr.Attr[x.attr].Name.Space
}
return ""
}
return x.curr.Prefix
}
func (x *NodeNavigator) NamespaceURL() string {
if x.attr != -1 {
return x.curr.Attr[x.attr].NamespaceURI
}
return x.curr.NamespaceURI
}
func (x *NodeNavigator) Value() string {
switch x.curr.Type {
case CommentNode:
return x.curr.Data
case ElementNode:
if x.attr != -1 {
return x.curr.Attr[x.attr].Value
}
return x.curr.InnerText()
case TextNode:
return x.curr.Data
}
return ""
}
func (x *NodeNavigator) Copy() xpath.NodeNavigator {
n := *x
return &n
}
func (x *NodeNavigator) MoveToRoot() {
x.curr = x.root
}
func (x *NodeNavigator) MoveToParent() bool {
if x.attr != -1 {
x.attr = -1
return true
} else if node := x.curr.Parent; node != nil {
x.curr = node
return true
}
return false
}
func (x *NodeNavigator) MoveToNextAttribute() bool {
if x.attr >= len(x.curr.Attr)-1 {
return false
}
x.attr++
return true
}
func (x *NodeNavigator) MoveToChild() bool {
if x.attr != -1 {
return false
}
if node := x.curr.FirstChild; node != nil {
x.curr = node
return true
}
return false
}
func (x *NodeNavigator) MoveToFirst() bool {
if x.attr != -1 || x.curr.PrevSibling == nil {
return false
}
for {
node := x.curr.PrevSibling
if node == nil {
break
}
x.curr = node
}
return true
}
func (x *NodeNavigator) String() string {
return x.Value()
}
func (x *NodeNavigator) MoveToNext() bool {
if x.attr != -1 {
return false
}
for node := x.curr.NextSibling; node != nil; node = x.curr.NextSibling {
x.curr = node
if x.curr.Type != TextNode {
return true
}
}
return false
}
func (x *NodeNavigator) MoveToPrevious() bool {
if x.attr != -1 {
return false
}
for node := x.curr.PrevSibling; node != nil; node = x.curr.PrevSibling {
x.curr = node
if x.curr.Type != TextNode {
return true
}
}
return false
}
func (x *NodeNavigator) MoveTo(other xpath.NodeNavigator) bool {
node, ok := other.(*NodeNavigator)
if !ok || node.root != x.root {
return false
}
x.curr = node.curr
x.attr = node.attr
return true
}

View File

@ -1,32 +0,0 @@
# vscode
.vscode
debug
*.test
./build
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@ -1,12 +0,0 @@
language: go
go:
- 1.6
- 1.9
- '1.10'
install:
- go get github.com/mattn/goveralls
script:
- $HOME/gopath/bin/goveralls -service=travis-ci

View File

@ -1,18 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "xpath",
srcs = [
"build.go",
"func.go",
"func_go110.go",
"func_pre_go110.go",
"operator.go",
"parse.go",
"query.go",
"xpath.go",
],
importmap = "peridot.resf.org/vendor/github.com/antchfx/xpath",
importpath = "github.com/antchfx/xpath",
visibility = ["//visibility:public"],
)

View File

@ -1,17 +0,0 @@
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,172 +0,0 @@
XPath
====
[![GoDoc](https://godoc.org/github.com/antchfx/xpath?status.svg)](https://godoc.org/github.com/antchfx/xpath)
[![Coverage Status](https://coveralls.io/repos/github/antchfx/xpath/badge.svg?branch=master)](https://coveralls.io/github/antchfx/xpath?branch=master)
[![Build Status](https://travis-ci.org/antchfx/xpath.svg?branch=master)](https://travis-ci.org/antchfx/xpath)
[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/xpath)](https://goreportcard.com/report/github.com/antchfx/xpath)
XPath is Go package provides selecting nodes from XML, HTML or other documents using XPath expression.
Implementation
===
- [htmlquery](https://github.com/antchfx/htmlquery) - an XPath query package for HTML document
- [xmlquery](https://github.com/antchfx/xmlquery) - an XPath query package for XML document.
- [jsonquery](https://github.com/antchfx/jsonquery) - an XPath query package for JSON document
Supported Features
===
#### The basic XPath patterns.
> The basic XPath patterns cover 90% of the cases that most stylesheets will need.
- `node` : Selects all child elements with nodeName of node.
- `*` : Selects all child elements.
- `@attr` : Selects the attribute attr.
- `@*` : Selects all attributes.
- `node()` : Matches an org.w3c.dom.Node.
- `text()` : Matches a org.w3c.dom.Text node.
- `comment()` : Matches a comment.
- `.` : Selects the current node.
- `..` : Selects the parent of current node.
- `/` : Selects the document node.
- `a[expr]` : Select only those nodes matching a which also satisfy the expression expr.
- `a[n]` : Selects the nth matching node matching a When a filter's expression is a number, XPath selects based on position.
- `a/b` : For each node matching a, add the nodes matching b to the result.
- `a//b` : For each node matching a, add the descendant nodes matching b to the result.
- `//b` : Returns elements in the entire document matching b.
- `a|b` : All nodes matching a or b, union operation(not boolean or).
- `(a, b, c)` : Evaluates each of its operands and concatenates the resulting sequences, in order, into a single result sequence
#### Node Axes
- `child::*` : The child axis selects children of the current node.
- `descendant::*` : The descendant axis selects descendants of the current node. It is equivalent to '//'.
- `descendant-or-self::*` : Selects descendants including the current node.
- `attribute::*` : Selects attributes of the current element. It is equivalent to @*
- `following-sibling::*` : Selects nodes after the current node.
- `preceding-sibling::*` : Selects nodes before the current node.
- `following::*` : Selects the first matching node following in document order, excluding descendants.
- `preceding::*` : Selects the first matching node preceding in document order, excluding ancestors.
- `parent::*` : Selects the parent if it matches. The '..' pattern from the core is equivalent to 'parent::node()'.
- `ancestor::*` : Selects matching ancestors.
- `ancestor-or-self::*` : Selects ancestors including the current node.
- `self::*` : Selects the current node. '.' is equivalent to 'self::node()'.
#### Expressions
The gxpath supported three types: number, boolean, string.
- `path` : Selects nodes based on the path.
- `a = b` : Standard comparisons.
* a = b True if a equals b.
* a != b True if a is not equal to b.
* a < b True if a is less than b.
* a <= b True if a is less than or equal to b.
* a > b True if a is greater than b.
* a >= b True if a is greater than or equal to b.
- `a + b` : Arithmetic expressions.
* `- a` Unary minus
* a + b Add
* a - b Substract
* a * b Multiply
* a div b Divide
* a mod b Floating point mod, like Java.
- `a or b` : Boolean `or` operation.
- `a and b` : Boolean `and` operation.
- `(expr)` : Parenthesized expressions.
- `fun(arg1, ..., argn)` : Function calls:
| Function | Supported |
| --- | --- |
`boolean()`| ✓ |
`ceiling()`| ✓ |
`choose()`| ✗ |
`concat()`| ✓ |
`contains()`| ✓ |
`count()`| ✓ |
`current()`| ✗ |
`document()`| ✗ |
`element-available()`| ✗ |
`ends-with()`| ✓ |
`false()`| ✓ |
`floor()`| ✓ |
`format-number()`| ✗ |
`function-available()`| ✗ |
`generate-id()`| ✗ |
`id()`| ✗ |
`key()`| ✗ |
`lang()`| ✗ |
`last()`| ✓ |
`local-name()`| ✓ |
`name()`| ✓ |
`namespace-uri()`| ✓ |
`normalize-space()`| ✓ |
`not()`| ✓ |
`number()`| ✓ |
`position()`| ✓ |
`replace()`| ✓ |
`reverse()`| ✓ |
`round()`| ✓ |
`starts-with()`| ✓ |
`string()`| ✓ |
`string-length()`| ✓ |
`substring()`| ✓ |
`substring-after()`| ✓ |
`substring-before()`| ✓ |
`sum()`| ✓ |
`system-property()`| ✗ |
`translate()`| ✓ |
`true()`| ✓ |
`unparsed-entity-url()` | ✗ |
Changelogs
===
2019-03-19
- optimize XPath `|` operation performance. [#33](https://github.com/antchfx/xpath/issues/33). Tips: suggest split into multiple subquery if you have a lot of `|` operations.
2019-01-29
- improvement `normalize-space` function. [#32](https://github.com/antchfx/xpath/issues/32)
2018-12-07
- supports XPath 2.0 Sequence expressions. [#30](https://github.com/antchfx/xpath/pull/30) by [@minherz](https://github.com/minherz).

View File

@ -1,522 +0,0 @@
package xpath
import (
"errors"
"fmt"
)
type flag int
const (
noneFlag flag = iota
filterFlag
)
// builder provides building an XPath expressions.
type builder struct {
depth int
flag flag
firstInput query
}
// axisPredicate creates a predicate to predicating for this axis node.
func axisPredicate(root *axisNode) func(NodeNavigator) bool {
// get current axix node type.
typ := ElementNode
switch root.AxeType {
case "attribute":
typ = AttributeNode
case "self", "parent":
typ = allNode
default:
switch root.Prop {
case "comment":
typ = CommentNode
case "text":
typ = TextNode
// case "processing-instruction":
// typ = ProcessingInstructionNode
case "node":
typ = allNode
}
}
nametest := root.LocalName != "" || root.Prefix != ""
predicate := func(n NodeNavigator) bool {
if typ == n.NodeType() || typ == allNode || typ == TextNode {
if nametest {
if root.LocalName == n.LocalName() && root.Prefix == n.Prefix() {
return true
}
} else {
return true
}
}
return false
}
return predicate
}
// processAxisNode processes a query for the XPath axis node.
func (b *builder) processAxisNode(root *axisNode) (query, error) {
var (
err error
qyInput query
qyOutput query
predicate = axisPredicate(root)
)
if root.Input == nil {
qyInput = &contextQuery{}
} else {
if root.AxeType == "child" && (root.Input.Type() == nodeAxis) {
if input := root.Input.(*axisNode); input.AxeType == "descendant-or-self" {
var qyGrandInput query
if input.Input != nil {
qyGrandInput, _ = b.processNode(input.Input)
} else {
qyGrandInput = &contextQuery{}
}
// fix #20: https://github.com/antchfx/htmlquery/issues/20
filter := func(n NodeNavigator) bool {
v := predicate(n)
switch root.Prop {
case "text":
v = v && n.NodeType() == TextNode
case "comment":
v = v && n.NodeType() == CommentNode
}
return v
}
qyOutput = &descendantQuery{Input: qyGrandInput, Predicate: filter, Self: true}
return qyOutput, nil
}
}
qyInput, err = b.processNode(root.Input)
if err != nil {
return nil, err
}
}
switch root.AxeType {
case "ancestor":
qyOutput = &ancestorQuery{Input: qyInput, Predicate: predicate}
case "ancestor-or-self":
qyOutput = &ancestorQuery{Input: qyInput, Predicate: predicate, Self: true}
case "attribute":
qyOutput = &attributeQuery{Input: qyInput, Predicate: predicate}
case "child":
filter := func(n NodeNavigator) bool {
v := predicate(n)
switch root.Prop {
case "text":
v = v && n.NodeType() == TextNode
case "node":
v = v && (n.NodeType() == ElementNode || n.NodeType() == TextNode)
case "comment":
v = v && n.NodeType() == CommentNode
}
return v
}
qyOutput = &childQuery{Input: qyInput, Predicate: filter}
case "descendant":
qyOutput = &descendantQuery{Input: qyInput, Predicate: predicate}
case "descendant-or-self":
qyOutput = &descendantQuery{Input: qyInput, Predicate: predicate, Self: true}
case "following":
qyOutput = &followingQuery{Input: qyInput, Predicate: predicate}
case "following-sibling":
qyOutput = &followingQuery{Input: qyInput, Predicate: predicate, Sibling: true}
case "parent":
qyOutput = &parentQuery{Input: qyInput, Predicate: predicate}
case "preceding":
qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate}
case "preceding-sibling":
qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate, Sibling: true}
case "self":
qyOutput = &selfQuery{Input: qyInput, Predicate: predicate}
case "namespace":
// haha,what will you do someting??
default:
err = fmt.Errorf("unknown axe type: %s", root.AxeType)
return nil, err
}
return qyOutput, nil
}
// processFilterNode builds query for the XPath filter predicate.
func (b *builder) processFilterNode(root *filterNode) (query, error) {
b.flag |= filterFlag
qyInput, err := b.processNode(root.Input)
if err != nil {
return nil, err
}
qyCond, err := b.processNode(root.Condition)
if err != nil {
return nil, err
}
qyOutput := &filterQuery{Input: qyInput, Predicate: qyCond}
return qyOutput, nil
}
// processFunctionNode processes query for the XPath function node.
func (b *builder) processFunctionNode(root *functionNode) (query, error) {
var qyOutput query
switch root.FuncName {
case "starts-with":
arg1, err := b.processNode(root.Args[0])
if err != nil {
return nil, err
}
arg2, err := b.processNode(root.Args[1])
if err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: b.firstInput, Func: startwithFunc(arg1, arg2)}
case "ends-with":
arg1, err := b.processNode(root.Args[0])
if err != nil {
return nil, err
}
arg2, err := b.processNode(root.Args[1])
if err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: b.firstInput, Func: endwithFunc(arg1, arg2)}
case "contains":
arg1, err := b.processNode(root.Args[0])
if err != nil {
return nil, err
}
arg2, err := b.processNode(root.Args[1])
if err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: b.firstInput, Func: containsFunc(arg1, arg2)}
case "substring":
//substring( string , start [, length] )
if len(root.Args) < 2 {
return nil, errors.New("xpath: substring function must have at least two parameter")
}
var (
arg1, arg2, arg3 query
err error
)
if arg1, err = b.processNode(root.Args[0]); err != nil {
return nil, err
}
if arg2, err = b.processNode(root.Args[1]); err != nil {
return nil, err
}
if len(root.Args) == 3 {
if arg3, err = b.processNode(root.Args[2]); err != nil {
return nil, err
}
}
qyOutput = &functionQuery{Input: b.firstInput, Func: substringFunc(arg1, arg2, arg3)}
case "substring-before", "substring-after":
//substring-xxxx( haystack, needle )
if len(root.Args) != 2 {
return nil, errors.New("xpath: substring-before function must have two parameters")
}
var (
arg1, arg2 query
err error
)
if arg1, err = b.processNode(root.Args[0]); err != nil {
return nil, err
}
if arg2, err = b.processNode(root.Args[1]); err != nil {
return nil, err
}
qyOutput = &functionQuery{
Input: b.firstInput,
Func: substringIndFunc(arg1, arg2, root.FuncName == "substring-after"),
}
case "string-length":
// string-length( [string] )
if len(root.Args) < 1 {
return nil, errors.New("xpath: string-length function must have at least one parameter")
}
arg1, err := b.processNode(root.Args[0])
if err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: b.firstInput, Func: stringLengthFunc(arg1)}
case "normalize-space":
if len(root.Args) == 0 {
return nil, errors.New("xpath: normalize-space function must have at least one parameter")
}
argQuery, err := b.processNode(root.Args[0])
if err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: argQuery, Func: normalizespaceFunc}
case "replace":
//replace( string , string, string )
if len(root.Args) != 3 {
return nil, errors.New("xpath: replace function must have three parameters")
}
var (
arg1, arg2, arg3 query
err error
)
if arg1, err = b.processNode(root.Args[0]); err != nil {
return nil, err
}
if arg2, err = b.processNode(root.Args[1]); err != nil {
return nil, err
}
if arg3, err = b.processNode(root.Args[2]); err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: b.firstInput, Func: replaceFunc(arg1, arg2, arg3)}
case "translate":
//translate( string , string, string )
if len(root.Args) != 3 {
return nil, errors.New("xpath: translate function must have three parameters")
}
var (
arg1, arg2, arg3 query
err error
)
if arg1, err = b.processNode(root.Args[0]); err != nil {
return nil, err
}
if arg2, err = b.processNode(root.Args[1]); err != nil {
return nil, err
}
if arg3, err = b.processNode(root.Args[2]); err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: b.firstInput, Func: translateFunc(arg1, arg2, arg3)}
case "not":
if len(root.Args) == 0 {
return nil, errors.New("xpath: not function must have at least one parameter")
}
argQuery, err := b.processNode(root.Args[0])
if err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: argQuery, Func: notFunc}
case "name", "local-name", "namespace-uri":
if len(root.Args) > 1 {
return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName)
}
var (
arg query
err error
)
if len(root.Args) == 1 {
arg, err = b.processNode(root.Args[0])
if err != nil {
return nil, err
}
}
switch root.FuncName {
case "name":
qyOutput = &functionQuery{Input: b.firstInput, Func: nameFunc(arg)}
case "local-name":
qyOutput = &functionQuery{Input: b.firstInput, Func: localNameFunc(arg)}
case "namespace-uri":
qyOutput = &functionQuery{Input: b.firstInput, Func: namespaceFunc(arg)}
}
case "true", "false":
val := root.FuncName == "true"
qyOutput = &functionQuery{
Input: b.firstInput,
Func: func(_ query, _ iterator) interface{} {
return val
},
}
case "last":
qyOutput = &functionQuery{Input: b.firstInput, Func: lastFunc}
case "position":
qyOutput = &functionQuery{Input: b.firstInput, Func: positionFunc}
case "boolean", "number", "string":
inp := b.firstInput
if len(root.Args) > 1 {
return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName)
}
if len(root.Args) == 1 {
argQuery, err := b.processNode(root.Args[0])
if err != nil {
return nil, err
}
inp = argQuery
}
f := &functionQuery{Input: inp}
switch root.FuncName {
case "boolean":
f.Func = booleanFunc
case "string":
f.Func = stringFunc
case "number":
f.Func = numberFunc
}
qyOutput = f
case "count":
//if b.firstInput == nil {
// return nil, errors.New("xpath: expression must evaluate to node-set")
//}
if len(root.Args) == 0 {
return nil, fmt.Errorf("xpath: count(node-sets) function must with have parameters node-sets")
}
argQuery, err := b.processNode(root.Args[0])
if err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: argQuery, Func: countFunc}
case "sum":
if len(root.Args) == 0 {
return nil, fmt.Errorf("xpath: sum(node-sets) function must with have parameters node-sets")
}
argQuery, err := b.processNode(root.Args[0])
if err != nil {
return nil, err
}
qyOutput = &functionQuery{Input: argQuery, Func: sumFunc}
case "ceiling", "floor", "round":
if len(root.Args) == 0 {
return nil, fmt.Errorf("xpath: ceiling(node-sets) function must with have parameters node-sets")
}
argQuery, err := b.processNode(root.Args[0])
if err != nil {
return nil, err
}
f := &functionQuery{Input: argQuery}
switch root.FuncName {
case "ceiling":
f.Func = ceilingFunc
case "floor":
f.Func = floorFunc
case "round":
f.Func = roundFunc
}
qyOutput = f
case "concat":
if len(root.Args) < 2 {
return nil, fmt.Errorf("xpath: concat() must have at least two arguments")
}
var args []query
for _, v := range root.Args {
q, err := b.processNode(v)
if err != nil {
return nil, err
}
args = append(args, q)
}
qyOutput = &functionQuery{Input: b.firstInput, Func: concatFunc(args...)}
case "reverse":
if len(root.Args) == 0 {
return nil, fmt.Errorf("xpath: reverse(node-sets) function must with have parameters node-sets")
}
argQuery, err := b.processNode(root.Args[0])
if err != nil {
return nil, err
}
qyOutput = &transformFunctionQuery{Input: argQuery, Func: reverseFunc}
default:
return nil, fmt.Errorf("not yet support this function %s()", root.FuncName)
}
return qyOutput, nil
}
func (b *builder) processOperatorNode(root *operatorNode) (query, error) {
left, err := b.processNode(root.Left)
if err != nil {
return nil, err
}
right, err := b.processNode(root.Right)
if err != nil {
return nil, err
}
var qyOutput query
switch root.Op {
case "+", "-", "div", "mod": // Numeric operator
var exprFunc func(interface{}, interface{}) interface{}
switch root.Op {
case "+":
exprFunc = plusFunc
case "-":
exprFunc = minusFunc
case "div":
exprFunc = divFunc
case "mod":
exprFunc = modFunc
}
qyOutput = &numericQuery{Left: left, Right: right, Do: exprFunc}
case "=", ">", ">=", "<", "<=", "!=":
var exprFunc func(iterator, interface{}, interface{}) interface{}
switch root.Op {
case "=":
exprFunc = eqFunc
case ">":
exprFunc = gtFunc
case ">=":
exprFunc = geFunc
case "<":
exprFunc = ltFunc
case "<=":
exprFunc = leFunc
case "!=":
exprFunc = neFunc
}
qyOutput = &logicalQuery{Left: left, Right: right, Do: exprFunc}
case "or", "and":
isOr := false
if root.Op == "or" {
isOr = true
}
qyOutput = &booleanQuery{Left: left, Right: right, IsOr: isOr}
case "|":
qyOutput = &unionQuery{Left: left, Right: right}
}
return qyOutput, nil
}
func (b *builder) processNode(root node) (q query, err error) {
if b.depth = b.depth + 1; b.depth > 1024 {
err = errors.New("the xpath expressions is too complex")
return
}
switch root.Type() {
case nodeConstantOperand:
n := root.(*operandNode)
q = &constantQuery{Val: n.Val}
case nodeRoot:
q = &contextQuery{Root: true}
case nodeAxis:
q, err = b.processAxisNode(root.(*axisNode))
b.firstInput = q
case nodeFilter:
q, err = b.processFilterNode(root.(*filterNode))
case nodeFunction:
q, err = b.processFunctionNode(root.(*functionNode))
case nodeOperator:
q, err = b.processOperatorNode(root.(*operatorNode))
}
return
}
// build builds a specified XPath expressions expr.
func build(expr string) (q query, err error) {
defer func() {
if e := recover(); e != nil {
switch x := e.(type) {
case string:
err = errors.New(x)
case error:
err = x
default:
err = errors.New("unknown panic")
}
}
}()
root := parse(expr)
b := &builder{}
return b.processNode(root)
}

View File

@ -1,585 +0,0 @@
package xpath
import (
"errors"
"fmt"
"math"
"strconv"
"strings"
"sync"
"unicode"
)
// Defined an interface of stringBuilder that compatible with
// strings.Builder(go 1.10) and bytes.Buffer(< go 1.10)
type stringBuilder interface {
WriteRune(r rune) (n int, err error)
WriteString(s string) (int, error)
Reset()
Grow(n int)
String() string
}
var builderPool = sync.Pool{New: func() interface{} {
return newStringBuilder()
}}
// The XPath function list.
func predicate(q query) func(NodeNavigator) bool {
type Predicater interface {
Test(NodeNavigator) bool
}
if p, ok := q.(Predicater); ok {
return p.Test
}
return func(NodeNavigator) bool { return true }
}
// positionFunc is a XPath Node Set functions position().
func positionFunc(q query, t iterator) interface{} {
var (
count = 1
node = t.Current().Copy()
)
test := predicate(q)
for node.MoveToPrevious() {
if test(node) {
count++
}
}
return float64(count)
}
// lastFunc is a XPath Node Set functions last().
func lastFunc(q query, t iterator) interface{} {
var (
count = 0
node = t.Current().Copy()
)
node.MoveToFirst()
test := predicate(q)
for {
if test(node) {
count++
}
if !node.MoveToNext() {
break
}
}
return float64(count)
}
// countFunc is a XPath Node Set functions count(node-set).
func countFunc(q query, t iterator) interface{} {
var count = 0
q = functionArgs(q)
test := predicate(q)
switch typ := q.Evaluate(t).(type) {
case query:
for node := typ.Select(t); node != nil; node = typ.Select(t) {
if test(node) {
count++
}
}
}
return float64(count)
}
// sumFunc is a XPath Node Set functions sum(node-set).
func sumFunc(q query, t iterator) interface{} {
var sum float64
switch typ := functionArgs(q).Evaluate(t).(type) {
case query:
for node := typ.Select(t); node != nil; node = typ.Select(t) {
if v, err := strconv.ParseFloat(node.Value(), 64); err == nil {
sum += v
}
}
case float64:
sum = typ
case string:
v, err := strconv.ParseFloat(typ, 64)
if err != nil {
panic(errors.New("sum() function argument type must be a node-set or number"))
}
sum = v
}
return sum
}
func asNumber(t iterator, o interface{}) float64 {
switch typ := o.(type) {
case query:
node := typ.Select(t)
if node == nil {
return float64(0)
}
if v, err := strconv.ParseFloat(node.Value(), 64); err == nil {
return v
}
case float64:
return typ
case string:
v, err := strconv.ParseFloat(typ, 64)
if err != nil {
panic(errors.New("ceiling() function argument type must be a node-set or number"))
}
return v
}
return 0
}
// ceilingFunc is a XPath Node Set functions ceiling(node-set).
func ceilingFunc(q query, t iterator) interface{} {
val := asNumber(t, functionArgs(q).Evaluate(t))
return math.Ceil(val)
}
// floorFunc is a XPath Node Set functions floor(node-set).
func floorFunc(q query, t iterator) interface{} {
val := asNumber(t, functionArgs(q).Evaluate(t))
return math.Floor(val)
}
// roundFunc is a XPath Node Set functions round(node-set).
func roundFunc(q query, t iterator) interface{} {
val := asNumber(t, functionArgs(q).Evaluate(t))
//return math.Round(val)
return round(val)
}
// nameFunc is a XPath functions name([node-set]).
func nameFunc(arg query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
var v NodeNavigator
if arg == nil {
v = t.Current()
} else {
v = arg.Select(t)
if v == nil {
return ""
}
}
ns := v.Prefix()
if ns == "" {
return v.LocalName()
}
return ns + ":" + v.LocalName()
}
}
// localNameFunc is a XPath functions local-name([node-set]).
func localNameFunc(arg query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
var v NodeNavigator
if arg == nil {
v = t.Current()
} else {
v = arg.Select(t)
if v == nil {
return ""
}
}
return v.LocalName()
}
}
// namespaceFunc is a XPath functions namespace-uri([node-set]).
func namespaceFunc(arg query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
var v NodeNavigator
if arg == nil {
v = t.Current()
} else {
// Get the first node in the node-set if specified.
v = arg.Select(t)
if v == nil {
return ""
}
}
// fix about namespace-uri() bug: https://github.com/antchfx/xmlquery/issues/22
// TODO: In the next version, add NamespaceURL() to the NodeNavigator interface.
type namespaceURL interface {
NamespaceURL() string
}
if f, ok := v.(namespaceURL); ok {
return f.NamespaceURL()
}
return v.Prefix()
}
}
func asBool(t iterator, v interface{}) bool {
switch v := v.(type) {
case nil:
return false
case *NodeIterator:
return v.MoveNext()
case bool:
return v
case float64:
return v != 0
case string:
return v != ""
case query:
return v.Select(t) != nil
default:
panic(fmt.Errorf("unexpected type: %T", v))
}
}
func asString(t iterator, v interface{}) string {
switch v := v.(type) {
case nil:
return ""
case bool:
if v {
return "true"
}
return "false"
case float64:
return strconv.FormatFloat(v, 'g', -1, 64)
case string:
return v
case query:
node := v.Select(t)
if node == nil {
return ""
}
return node.Value()
default:
panic(fmt.Errorf("unexpected type: %T", v))
}
}
// booleanFunc is a XPath functions boolean([node-set]).
func booleanFunc(q query, t iterator) interface{} {
v := functionArgs(q).Evaluate(t)
return asBool(t, v)
}
// numberFunc is a XPath functions number([node-set]).
func numberFunc(q query, t iterator) interface{} {
v := functionArgs(q).Evaluate(t)
return asNumber(t, v)
}
// stringFunc is a XPath functions string([node-set]).
func stringFunc(q query, t iterator) interface{} {
v := functionArgs(q).Evaluate(t)
return asString(t, v)
}
// startwithFunc is a XPath functions starts-with(string, string).
func startwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
var (
m, n string
ok bool
)
switch typ := functionArgs(arg1).Evaluate(t).(type) {
case string:
m = typ
case query:
node := typ.Select(t)
if node == nil {
return false
}
m = node.Value()
default:
panic(errors.New("starts-with() function argument type must be string"))
}
n, ok = functionArgs(arg2).Evaluate(t).(string)
if !ok {
panic(errors.New("starts-with() function argument type must be string"))
}
return strings.HasPrefix(m, n)
}
}
// endwithFunc is a XPath functions ends-with(string, string).
func endwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
var (
m, n string
ok bool
)
switch typ := functionArgs(arg1).Evaluate(t).(type) {
case string:
m = typ
case query:
node := typ.Select(t)
if node == nil {
return false
}
m = node.Value()
default:
panic(errors.New("ends-with() function argument type must be string"))
}
n, ok = functionArgs(arg2).Evaluate(t).(string)
if !ok {
panic(errors.New("ends-with() function argument type must be string"))
}
return strings.HasSuffix(m, n)
}
}
// containsFunc is a XPath functions contains(string or @attr, string).
func containsFunc(arg1, arg2 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
var (
m, n string
ok bool
)
switch typ := functionArgs(arg1).Evaluate(t).(type) {
case string:
m = typ
case query:
node := typ.Select(t)
if node == nil {
return false
}
m = node.Value()
default:
panic(errors.New("contains() function argument type must be string"))
}
n, ok = functionArgs(arg2).Evaluate(t).(string)
if !ok {
panic(errors.New("contains() function argument type must be string"))
}
return strings.Contains(m, n)
}
}
// normalizespaceFunc is XPath functions normalize-space(string?)
func normalizespaceFunc(q query, t iterator) interface{} {
var m string
switch typ := functionArgs(q).Evaluate(t).(type) {
case string:
m = typ
case query:
node := typ.Select(t)
if node == nil {
return ""
}
m = node.Value()
}
var b = builderPool.Get().(stringBuilder)
b.Grow(len(m))
runeStr := []rune(strings.TrimSpace(m))
l := len(runeStr)
for i := range runeStr {
r := runeStr[i]
isSpace := unicode.IsSpace(r)
if !(isSpace && (i+1 < l && unicode.IsSpace(runeStr[i+1]))) {
if isSpace {
r = ' '
}
b.WriteRune(r)
}
}
result := b.String()
b.Reset()
builderPool.Put(b)
return result
}
// substringFunc is XPath functions substring function returns a part of a given string.
func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
var m string
switch typ := functionArgs(arg1).Evaluate(t).(type) {
case string:
m = typ
case query:
node := typ.Select(t)
if node == nil {
return ""
}
m = node.Value()
}
var start, length float64
var ok bool
if start, ok = functionArgs(arg2).Evaluate(t).(float64); !ok {
panic(errors.New("substring() function first argument type must be int"))
} else if start < 1 {
panic(errors.New("substring() function first argument type must be >= 1"))
}
start--
if arg3 != nil {
if length, ok = functionArgs(arg3).Evaluate(t).(float64); !ok {
panic(errors.New("substring() function second argument type must be int"))
}
}
if (len(m) - int(start)) < int(length) {
panic(errors.New("substring() function start and length argument out of range"))
}
if length > 0 {
return m[int(start):int(length+start)]
}
return m[int(start):]
}
}
// substringIndFunc is XPath functions substring-before/substring-after function returns a part of a given string.
func substringIndFunc(arg1, arg2 query, after bool) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
var str string
switch v := functionArgs(arg1).Evaluate(t).(type) {
case string:
str = v
case query:
node := v.Select(t)
if node == nil {
return ""
}
str = node.Value()
}
var word string
switch v := functionArgs(arg2).Evaluate(t).(type) {
case string:
word = v
case query:
node := v.Select(t)
if node == nil {
return ""
}
word = node.Value()
}
if word == "" {
return ""
}
i := strings.Index(str, word)
if i < 0 {
return ""
}
if after {
return str[i+len(word):]
}
return str[:i]
}
}
// stringLengthFunc is XPATH string-length( [string] ) function that returns a number
// equal to the number of characters in a given string.
func stringLengthFunc(arg1 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
switch v := functionArgs(arg1).Evaluate(t).(type) {
case string:
return float64(len(v))
case query:
node := v.Select(t)
if node == nil {
break
}
return float64(len(node.Value()))
}
return float64(0)
}
}
// translateFunc is XPath functions translate() function returns a replaced string.
func translateFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
str := asString(t, functionArgs(arg1).Evaluate(t))
src := asString(t, functionArgs(arg2).Evaluate(t))
dst := asString(t, functionArgs(arg3).Evaluate(t))
replace := make([]string, 0, len(src))
for i, s := range src {
d := ""
if i < len(dst) {
d = string(dst[i])
}
replace = append(replace, string(s), d)
}
return strings.NewReplacer(replace...).Replace(str)
}
}
// replaceFunc is XPath functions replace() function returns a replaced string.
func replaceFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
str := asString(t, functionArgs(arg1).Evaluate(t))
src := asString(t, functionArgs(arg2).Evaluate(t))
dst := asString(t, functionArgs(arg3).Evaluate(t))
return strings.Replace(str, src, dst, -1)
}
}
// notFunc is XPATH functions not(expression) function operation.
func notFunc(q query, t iterator) interface{} {
switch v := functionArgs(q).Evaluate(t).(type) {
case bool:
return !v
case query:
node := v.Select(t)
return node == nil
default:
return false
}
}
// concatFunc is the concat function concatenates two or more
// strings and returns the resulting string.
// concat( string1 , string2 [, stringn]* )
func concatFunc(args ...query) func(query, iterator) interface{} {
return func(q query, t iterator) interface{} {
b := builderPool.Get().(stringBuilder)
for _, v := range args {
v = functionArgs(v)
switch v := v.Evaluate(t).(type) {
case string:
b.WriteString(v)
case query:
node := v.Select(t)
if node != nil {
b.WriteString(node.Value())
}
}
}
result := b.String()
b.Reset()
builderPool.Put(b)
return result
}
}
// https://github.com/antchfx/xpath/issues/43
func functionArgs(q query) query {
if _, ok := q.(*functionQuery); ok {
return q
}
return q.Clone()
}
func reverseFunc(q query, t iterator) func() NodeNavigator {
var list []NodeNavigator
for {
node := q.Select(t)
if node == nil {
break
}
list = append(list, node.Copy())
}
i := len(list)
return func() NodeNavigator {
if i <= 0 {
return nil
}
i--
node := list[i]
return node
}
}

View File

@ -1,16 +0,0 @@
// +build go1.10
package xpath
import (
"math"
"strings"
)
func round(f float64) int {
return int(math.Round(f))
}
func newStringBuilder() stringBuilder{
return &strings.Builder{}
}

View File

@ -1,22 +0,0 @@
// +build !go1.10
package xpath
import (
"bytes"
"math"
)
// math.Round() is supported by Go 1.10+,
// This method just compatible for version <1.10.
// https://github.com/golang/go/issues/20100
func round(f float64) int {
if math.Abs(f) < 0.5 {
return 0
}
return int(f + math.Copysign(0.5, f))
}
func newStringBuilder() stringBuilder {
return &bytes.Buffer{}
}

View File

@ -1,305 +0,0 @@
package xpath
import (
"fmt"
"reflect"
"strconv"
)
// The XPath number operator function list.
// valueType is a return value type.
type valueType int
const (
booleanType valueType = iota
numberType
stringType
nodeSetType
)
func getValueType(i interface{}) valueType {
v := reflect.ValueOf(i)
switch v.Kind() {
case reflect.Float64:
return numberType
case reflect.String:
return stringType
case reflect.Bool:
return booleanType
default:
if _, ok := i.(query); ok {
return nodeSetType
}
}
panic(fmt.Errorf("xpath unknown value type: %v", v.Kind()))
}
type logical func(iterator, string, interface{}, interface{}) bool
var logicalFuncs = [][]logical{
{cmpBooleanBoolean, nil, nil, nil},
{nil, cmpNumericNumeric, cmpNumericString, cmpNumericNodeSet},
{nil, cmpStringNumeric, cmpStringString, cmpStringNodeSet},
{nil, cmpNodeSetNumeric, cmpNodeSetString, cmpNodeSetNodeSet},
}
// number vs number
func cmpNumberNumberF(op string, a, b float64) bool {
switch op {
case "=":
return a == b
case ">":
return a > b
case "<":
return a < b
case ">=":
return a >= b
case "<=":
return a <= b
case "!=":
return a != b
}
return false
}
// string vs string
func cmpStringStringF(op string, a, b string) bool {
switch op {
case "=":
return a == b
case ">":
return a > b
case "<":
return a < b
case ">=":
return a >= b
case "<=":
return a <= b
case "!=":
return a != b
}
return false
}
func cmpBooleanBooleanF(op string, a, b bool) bool {
switch op {
case "or":
return a || b
case "and":
return a && b
}
return false
}
func cmpNumericNumeric(t iterator, op string, m, n interface{}) bool {
a := m.(float64)
b := n.(float64)
return cmpNumberNumberF(op, a, b)
}
func cmpNumericString(t iterator, op string, m, n interface{}) bool {
a := m.(float64)
b := n.(string)
num, err := strconv.ParseFloat(b, 64)
if err != nil {
panic(err)
}
return cmpNumberNumberF(op, a, num)
}
func cmpNumericNodeSet(t iterator, op string, m, n interface{}) bool {
a := m.(float64)
b := n.(query)
for {
node := b.Select(t)
if node == nil {
break
}
num, err := strconv.ParseFloat(node.Value(), 64)
if err != nil {
panic(err)
}
if cmpNumberNumberF(op, a, num) {
return true
}
}
return false
}
func cmpNodeSetNumeric(t iterator, op string, m, n interface{}) bool {
a := m.(query)
b := n.(float64)
for {
node := a.Select(t)
if node == nil {
break
}
num, err := strconv.ParseFloat(node.Value(), 64)
if err != nil {
panic(err)
}
if cmpNumberNumberF(op, num, b) {
return true
}
}
return false
}
func cmpNodeSetString(t iterator, op string, m, n interface{}) bool {
a := m.(query)
b := n.(string)
for {
node := a.Select(t)
if node == nil {
break
}
if cmpStringStringF(op, b, node.Value()) {
return true
}
}
return false
}
func cmpNodeSetNodeSet(t iterator, op string, m, n interface{}) bool {
a := m.(query)
b := n.(query)
x := a.Select(t)
if x == nil {
return false
}
y := b.Select(t)
if y == nil {
return false
}
return cmpStringStringF(op, x.Value(), y.Value())
}
func cmpStringNumeric(t iterator, op string, m, n interface{}) bool {
a := m.(string)
b := n.(float64)
num, err := strconv.ParseFloat(a, 64)
if err != nil {
panic(err)
}
return cmpNumberNumberF(op, b, num)
}
func cmpStringString(t iterator, op string, m, n interface{}) bool {
a := m.(string)
b := n.(string)
return cmpStringStringF(op, a, b)
}
func cmpStringNodeSet(t iterator, op string, m, n interface{}) bool {
a := m.(string)
b := n.(query)
for {
node := b.Select(t)
if node == nil {
break
}
if cmpStringStringF(op, a, node.Value()) {
return true
}
}
return false
}
func cmpBooleanBoolean(t iterator, op string, m, n interface{}) bool {
a := m.(bool)
b := n.(bool)
return cmpBooleanBooleanF(op, a, b)
}
// eqFunc is an `=` operator.
func eqFunc(t iterator, m, n interface{}) interface{} {
t1 := getValueType(m)
t2 := getValueType(n)
return logicalFuncs[t1][t2](t, "=", m, n)
}
// gtFunc is an `>` operator.
func gtFunc(t iterator, m, n interface{}) interface{} {
t1 := getValueType(m)
t2 := getValueType(n)
return logicalFuncs[t1][t2](t, ">", m, n)
}
// geFunc is an `>=` operator.
func geFunc(t iterator, m, n interface{}) interface{} {
t1 := getValueType(m)
t2 := getValueType(n)
return logicalFuncs[t1][t2](t, ">=", m, n)
}
// ltFunc is an `<` operator.
func ltFunc(t iterator, m, n interface{}) interface{} {
t1 := getValueType(m)
t2 := getValueType(n)
return logicalFuncs[t1][t2](t, "<", m, n)
}
// leFunc is an `<=` operator.
func leFunc(t iterator, m, n interface{}) interface{} {
t1 := getValueType(m)
t2 := getValueType(n)
return logicalFuncs[t1][t2](t, "<=", m, n)
}
// neFunc is an `!=` operator.
func neFunc(t iterator, m, n interface{}) interface{} {
t1 := getValueType(m)
t2 := getValueType(n)
return logicalFuncs[t1][t2](t, "!=", m, n)
}
// orFunc is an `or` operator.
var orFunc = func(t iterator, m, n interface{}) interface{} {
t1 := getValueType(m)
t2 := getValueType(n)
return logicalFuncs[t1][t2](t, "or", m, n)
}
func numericExpr(m, n interface{}, cb func(float64, float64) float64) float64 {
typ := reflect.TypeOf(float64(0))
a := reflect.ValueOf(m).Convert(typ)
b := reflect.ValueOf(n).Convert(typ)
return cb(a.Float(), b.Float())
}
// plusFunc is an `+` operator.
var plusFunc = func(m, n interface{}) interface{} {
return numericExpr(m, n, func(a, b float64) float64 {
return a + b
})
}
// minusFunc is an `-` operator.
var minusFunc = func(m, n interface{}) interface{} {
return numericExpr(m, n, func(a, b float64) float64 {
return a - b
})
}
// mulFunc is an `*` operator.
var mulFunc = func(m, n interface{}) interface{} {
return numericExpr(m, n, func(a, b float64) float64 {
return a * b
})
}
// divFunc is an `DIV` operator.
var divFunc = func(m, n interface{}) interface{} {
return numericExpr(m, n, func(a, b float64) float64 {
return a / b
})
}
// modFunc is an 'MOD' operator.
var modFunc = func(m, n interface{}) interface{} {
return numericExpr(m, n, func(a, b float64) float64 {
return float64(int(a) % int(b))
})
}

File diff suppressed because it is too large Load Diff

View File

@ -1,923 +0,0 @@
package xpath
import (
"bytes"
"fmt"
"hash/fnv"
"reflect"
)
type iterator interface {
Current() NodeNavigator
}
// An XPath query interface.
type query interface {
// Select traversing iterator returns a query matched node NodeNavigator.
Select(iterator) NodeNavigator
// Evaluate evaluates query and returns values of the current query.
Evaluate(iterator) interface{}
Clone() query
}
// nopQuery is an empty query that always return nil for any query.
type nopQuery struct {
query
}
func (nopQuery) Select(iterator) NodeNavigator { return nil }
func (nopQuery) Evaluate(iterator) interface{} { return nil }
func (nopQuery) Clone() query { return nopQuery{} }
// contextQuery is returns current node on the iterator object query.
type contextQuery struct {
count int
Root bool // Moving to root-level node in the current context iterator.
}
func (c *contextQuery) Select(t iterator) (n NodeNavigator) {
if c.count == 0 {
c.count++
n = t.Current().Copy()
if c.Root {
n.MoveToRoot()
}
}
return n
}
func (c *contextQuery) Evaluate(iterator) interface{} {
c.count = 0
return c
}
func (c *contextQuery) Clone() query {
return &contextQuery{count: 0, Root: c.Root}
}
// ancestorQuery is an XPath ancestor node query.(ancestor::*|ancestor-self::*)
type ancestorQuery struct {
iterator func() NodeNavigator
Self bool
Input query
Predicate func(NodeNavigator) bool
}
func (a *ancestorQuery) Select(t iterator) NodeNavigator {
for {
if a.iterator == nil {
node := a.Input.Select(t)
if node == nil {
return nil
}
first := true
node = node.Copy()
a.iterator = func() NodeNavigator {
if first && a.Self {
first = false
if a.Predicate(node) {
return node
}
}
for node.MoveToParent() {
if !a.Predicate(node) {
continue
}
return node
}
return nil
}
}
if node := a.iterator(); node != nil {
return node
}
a.iterator = nil
}
}
func (a *ancestorQuery) Evaluate(t iterator) interface{} {
a.Input.Evaluate(t)
a.iterator = nil
return a
}
func (a *ancestorQuery) Test(n NodeNavigator) bool {
return a.Predicate(n)
}
func (a *ancestorQuery) Clone() query {
return &ancestorQuery{Self: a.Self, Input: a.Input.Clone(), Predicate: a.Predicate}
}
// attributeQuery is an XPath attribute node query.(@*)
type attributeQuery struct {
iterator func() NodeNavigator
Input query
Predicate func(NodeNavigator) bool
}
func (a *attributeQuery) Select(t iterator) NodeNavigator {
for {
if a.iterator == nil {
node := a.Input.Select(t)
if node == nil {
return nil
}
node = node.Copy()
a.iterator = func() NodeNavigator {
for {
onAttr := node.MoveToNextAttribute()
if !onAttr {
return nil
}
if a.Predicate(node) {
return node
}
}
}
}
if node := a.iterator(); node != nil {
return node
}
a.iterator = nil
}
}
func (a *attributeQuery) Evaluate(t iterator) interface{} {
a.Input.Evaluate(t)
a.iterator = nil
return a
}
func (a *attributeQuery) Test(n NodeNavigator) bool {
return a.Predicate(n)
}
func (a *attributeQuery) Clone() query {
return &attributeQuery{Input: a.Input.Clone(), Predicate: a.Predicate}
}
// childQuery is an XPath child node query.(child::*)
type childQuery struct {
posit int
iterator func() NodeNavigator
Input query
Predicate func(NodeNavigator) bool
}
func (c *childQuery) Select(t iterator) NodeNavigator {
for {
if c.iterator == nil {
c.posit = 0
node := c.Input.Select(t)
if node == nil {
return nil
}
node = node.Copy()
first := true
c.iterator = func() NodeNavigator {
for {
if (first && !node.MoveToChild()) || (!first && !node.MoveToNext()) {
return nil
}
first = false
if c.Predicate(node) {
return node
}
}
}
}
if node := c.iterator(); node != nil {
c.posit++
return node
}
c.iterator = nil
}
}
func (c *childQuery) Evaluate(t iterator) interface{} {
c.Input.Evaluate(t)
c.iterator = nil
return c
}
func (c *childQuery) Test(n NodeNavigator) bool {
return c.Predicate(n)
}
func (c *childQuery) Clone() query {
return &childQuery{Input: c.Input.Clone(), Predicate: c.Predicate}
}
// position returns a position of current NodeNavigator.
func (c *childQuery) position() int {
return c.posit
}
// descendantQuery is an XPath descendant node query.(descendant::* | descendant-or-self::*)
type descendantQuery struct {
iterator func() NodeNavigator
posit int
level int
Self bool
Input query
Predicate func(NodeNavigator) bool
}
func (d *descendantQuery) Select(t iterator) NodeNavigator {
for {
if d.iterator == nil {
d.posit = 0
node := d.Input.Select(t)
if node == nil {
return nil
}
node = node.Copy()
d.level = 0
positmap := make(map[int]int)
first := true
d.iterator = func() NodeNavigator {
if first && d.Self {
first = false
if d.Predicate(node) {
d.posit = 1
positmap[d.level] = 1
return node
}
}
for {
if node.MoveToChild() {
d.level = d.level + 1
positmap[d.level] = 0
} else {
for {
if d.level == 0 {
return nil
}
if node.MoveToNext() {
break
}
node.MoveToParent()
d.level = d.level - 1
}
}
if d.Predicate(node) {
positmap[d.level]++
d.posit = positmap[d.level]
return node
}
}
}
}
if node := d.iterator(); node != nil {
return node
}
d.iterator = nil
}
}
func (d *descendantQuery) Evaluate(t iterator) interface{} {
d.Input.Evaluate(t)
d.iterator = nil
return d
}
func (d *descendantQuery) Test(n NodeNavigator) bool {
return d.Predicate(n)
}
// position returns a position of current NodeNavigator.
func (d *descendantQuery) position() int {
return d.posit
}
func (d *descendantQuery) depth() int {
return d.level
}
func (d *descendantQuery) Clone() query {
return &descendantQuery{Self: d.Self, Input: d.Input.Clone(), Predicate: d.Predicate}
}
// followingQuery is an XPath following node query.(following::*|following-sibling::*)
type followingQuery struct {
posit int
iterator func() NodeNavigator
Input query
Sibling bool // The matching sibling node of current node.
Predicate func(NodeNavigator) bool
}
func (f *followingQuery) Select(t iterator) NodeNavigator {
for {
if f.iterator == nil {
f.posit = 0
node := f.Input.Select(t)
if node == nil {
return nil
}
node = node.Copy()
if f.Sibling {
f.iterator = func() NodeNavigator {
for {
if !node.MoveToNext() {
return nil
}
if f.Predicate(node) {
f.posit++
return node
}
}
}
} else {
var q *descendantQuery // descendant query
f.iterator = func() NodeNavigator {
for {
if q == nil {
for !node.MoveToNext() {
if !node.MoveToParent() {
return nil
}
}
q = &descendantQuery{
Self: true,
Input: &contextQuery{},
Predicate: f.Predicate,
}
t.Current().MoveTo(node)
}
if node := q.Select(t); node != nil {
f.posit = q.posit
return node
}
q = nil
}
}
}
}
if node := f.iterator(); node != nil {
return node
}
f.iterator = nil
}
}
func (f *followingQuery) Evaluate(t iterator) interface{} {
f.Input.Evaluate(t)
return f
}
func (f *followingQuery) Test(n NodeNavigator) bool {
return f.Predicate(n)
}
func (f *followingQuery) Clone() query {
return &followingQuery{Input: f.Input.Clone(), Sibling: f.Sibling, Predicate: f.Predicate}
}
func (f *followingQuery) position() int {
return f.posit
}
// precedingQuery is an XPath preceding node query.(preceding::*)
type precedingQuery struct {
iterator func() NodeNavigator
posit int
Input query
Sibling bool // The matching sibling node of current node.
Predicate func(NodeNavigator) bool
}
func (p *precedingQuery) Select(t iterator) NodeNavigator {
for {
if p.iterator == nil {
p.posit = 0
node := p.Input.Select(t)
if node == nil {
return nil
}
node = node.Copy()
if p.Sibling {
p.iterator = func() NodeNavigator {
for {
for !node.MoveToPrevious() {
return nil
}
if p.Predicate(node) {
p.posit++
return node
}
}
}
} else {
var q query
p.iterator = func() NodeNavigator {
for {
if q == nil {
for !node.MoveToPrevious() {
if !node.MoveToParent() {
return nil
}
p.posit = 0
}
q = &descendantQuery{
Self: true,
Input: &contextQuery{},
Predicate: p.Predicate,
}
t.Current().MoveTo(node)
}
if node := q.Select(t); node != nil {
p.posit++
return node
}
q = nil
}
}
}
}
if node := p.iterator(); node != nil {
return node
}
p.iterator = nil
}
}
func (p *precedingQuery) Evaluate(t iterator) interface{} {
p.Input.Evaluate(t)
return p
}
func (p *precedingQuery) Test(n NodeNavigator) bool {
return p.Predicate(n)
}
func (p *precedingQuery) Clone() query {
return &precedingQuery{Input: p.Input.Clone(), Sibling: p.Sibling, Predicate: p.Predicate}
}
func (p *precedingQuery) position() int {
return p.posit
}
// parentQuery is an XPath parent node query.(parent::*)
type parentQuery struct {
Input query
Predicate func(NodeNavigator) bool
}
func (p *parentQuery) Select(t iterator) NodeNavigator {
for {
node := p.Input.Select(t)
if node == nil {
return nil
}
node = node.Copy()
if node.MoveToParent() && p.Predicate(node) {
return node
}
}
}
func (p *parentQuery) Evaluate(t iterator) interface{} {
p.Input.Evaluate(t)
return p
}
func (p *parentQuery) Clone() query {
return &parentQuery{Input: p.Input.Clone(), Predicate: p.Predicate}
}
func (p *parentQuery) Test(n NodeNavigator) bool {
return p.Predicate(n)
}
// selfQuery is an Self node query.(self::*)
type selfQuery struct {
Input query
Predicate func(NodeNavigator) bool
}
func (s *selfQuery) Select(t iterator) NodeNavigator {
for {
node := s.Input.Select(t)
if node == nil {
return nil
}
if s.Predicate(node) {
return node
}
}
}
func (s *selfQuery) Evaluate(t iterator) interface{} {
s.Input.Evaluate(t)
return s
}
func (s *selfQuery) Test(n NodeNavigator) bool {
return s.Predicate(n)
}
func (s *selfQuery) Clone() query {
return &selfQuery{Input: s.Input.Clone(), Predicate: s.Predicate}
}
// filterQuery is an XPath query for predicate filter.
type filterQuery struct {
Input query
Predicate query
posit int
positmap map[int]int
}
func (f *filterQuery) do(t iterator) bool {
val := reflect.ValueOf(f.Predicate.Evaluate(t))
switch val.Kind() {
case reflect.Bool:
return val.Bool()
case reflect.String:
return len(val.String()) > 0
case reflect.Float64:
pt := getNodePosition(f.Input)
return int(val.Float()) == pt
default:
if q, ok := f.Predicate.(query); ok {
return q.Select(t) != nil
}
}
return false
}
func (f *filterQuery) position() int {
return f.posit
}
func (f *filterQuery) Select(t iterator) NodeNavigator {
if f.positmap == nil {
f.positmap = make(map[int]int)
}
for {
node := f.Input.Select(t)
if node == nil {
return node
}
node = node.Copy()
t.Current().MoveTo(node)
if f.do(t) {
// fix https://github.com/antchfx/htmlquery/issues/26
// Calculate and keep the each of matching node's position in the same depth.
level := getNodeDepth(f.Input)
f.positmap[level]++
f.posit = f.positmap[level]
return node
}
}
}
func (f *filterQuery) Evaluate(t iterator) interface{} {
f.Input.Evaluate(t)
return f
}
func (f *filterQuery) Clone() query {
return &filterQuery{Input: f.Input.Clone(), Predicate: f.Predicate.Clone()}
}
// functionQuery is an XPath function that returns a computed value for
// the Evaluate call of the current NodeNavigator node. Select call isn't
// applicable for functionQuery.
type functionQuery struct {
Input query // Node Set
Func func(query, iterator) interface{} // The xpath function.
}
func (f *functionQuery) Select(t iterator) NodeNavigator {
return nil
}
// Evaluate call a specified function that will returns the
// following value type: number,string,boolean.
func (f *functionQuery) Evaluate(t iterator) interface{} {
return f.Func(f.Input, t)
}
func (f *functionQuery) Clone() query {
return &functionQuery{Input: f.Input.Clone(), Func: f.Func}
}
// transformFunctionQuery diffs from functionQuery where the latter computes a scalar
// value (number,string,boolean) for the current NodeNavigator node while the former
// (transformFunctionQuery) performs a mapping or transform of the current NodeNavigator
// and returns a new NodeNavigator. It is used for non-scalar XPath functions such as
// reverse(), remove(), subsequence(), unordered(), etc.
type transformFunctionQuery struct {
Input query
Func func(query, iterator) func() NodeNavigator
iterator func() NodeNavigator
}
func (f *transformFunctionQuery) Select(t iterator) NodeNavigator {
if f.iterator == nil {
f.iterator = f.Func(f.Input, t)
}
return f.iterator()
}
func (f *transformFunctionQuery) Evaluate(t iterator) interface{} {
f.Input.Evaluate(t)
f.iterator = nil
return f
}
func (f *transformFunctionQuery) Clone() query {
return &transformFunctionQuery{Input: f.Input.Clone(), Func: f.Func}
}
// constantQuery is an XPath constant operand.
type constantQuery struct {
Val interface{}
}
func (c *constantQuery) Select(t iterator) NodeNavigator {
return nil
}
func (c *constantQuery) Evaluate(t iterator) interface{} {
return c.Val
}
func (c *constantQuery) Clone() query {
return c
}
// logicalQuery is an XPath logical expression.
type logicalQuery struct {
Left, Right query
Do func(iterator, interface{}, interface{}) interface{}
}
func (l *logicalQuery) Select(t iterator) NodeNavigator {
// When a XPath expr is logical expression.
node := t.Current().Copy()
val := l.Evaluate(t)
switch val.(type) {
case bool:
if val.(bool) == true {
return node
}
}
return nil
}
func (l *logicalQuery) Evaluate(t iterator) interface{} {
m := l.Left.Evaluate(t)
n := l.Right.Evaluate(t)
return l.Do(t, m, n)
}
func (l *logicalQuery) Clone() query {
return &logicalQuery{Left: l.Left.Clone(), Right: l.Right.Clone(), Do: l.Do}
}
// numericQuery is an XPath numeric operator expression.
type numericQuery struct {
Left, Right query
Do func(interface{}, interface{}) interface{}
}
func (n *numericQuery) Select(t iterator) NodeNavigator {
return nil
}
func (n *numericQuery) Evaluate(t iterator) interface{} {
m := n.Left.Evaluate(t)
k := n.Right.Evaluate(t)
return n.Do(m, k)
}
func (n *numericQuery) Clone() query {
return &numericQuery{Left: n.Left.Clone(), Right: n.Right.Clone(), Do: n.Do}
}
type booleanQuery struct {
IsOr bool
Left, Right query
iterator func() NodeNavigator
}
func (b *booleanQuery) Select(t iterator) NodeNavigator {
if b.iterator == nil {
var list []NodeNavigator
i := 0
root := t.Current().Copy()
if b.IsOr {
for {
node := b.Left.Select(t)
if node == nil {
break
}
node = node.Copy()
list = append(list, node)
}
t.Current().MoveTo(root)
for {
node := b.Right.Select(t)
if node == nil {
break
}
node = node.Copy()
list = append(list, node)
}
} else {
var m []NodeNavigator
var n []NodeNavigator
for {
node := b.Left.Select(t)
if node == nil {
break
}
node = node.Copy()
list = append(m, node)
}
t.Current().MoveTo(root)
for {
node := b.Right.Select(t)
if node == nil {
break
}
node = node.Copy()
list = append(n, node)
}
for _, k := range m {
for _, j := range n {
if k == j {
list = append(list, k)
}
}
}
}
b.iterator = func() NodeNavigator {
if i >= len(list) {
return nil
}
node := list[i]
i++
return node
}
}
return b.iterator()
}
func (b *booleanQuery) Evaluate(t iterator) interface{} {
m := b.Left.Evaluate(t)
left := asBool(t, m)
if b.IsOr && left {
return true
} else if !b.IsOr && !left {
return false
}
m = b.Right.Evaluate(t)
return asBool(t, m)
}
func (b *booleanQuery) Clone() query {
return &booleanQuery{IsOr: b.IsOr, Left: b.Left.Clone(), Right: b.Right.Clone()}
}
type unionQuery struct {
Left, Right query
iterator func() NodeNavigator
}
func (u *unionQuery) Select(t iterator) NodeNavigator {
if u.iterator == nil {
var list []NodeNavigator
var m = make(map[uint64]bool)
root := t.Current().Copy()
for {
node := u.Left.Select(t)
if node == nil {
break
}
code := getHashCode(node.Copy())
if _, ok := m[code]; !ok {
m[code] = true
list = append(list, node.Copy())
}
}
t.Current().MoveTo(root)
for {
node := u.Right.Select(t)
if node == nil {
break
}
code := getHashCode(node.Copy())
if _, ok := m[code]; !ok {
m[code] = true
list = append(list, node.Copy())
}
}
var i int
u.iterator = func() NodeNavigator {
if i >= len(list) {
return nil
}
node := list[i]
i++
return node
}
}
return u.iterator()
}
func (u *unionQuery) Evaluate(t iterator) interface{} {
u.iterator = nil
u.Left.Evaluate(t)
u.Right.Evaluate(t)
return u
}
func (u *unionQuery) Clone() query {
return &unionQuery{Left: u.Left.Clone(), Right: u.Right.Clone()}
}
func getHashCode(n NodeNavigator) uint64 {
var sb bytes.Buffer
switch n.NodeType() {
case AttributeNode, TextNode, CommentNode:
sb.WriteString(fmt.Sprintf("%s=%s", n.LocalName(), n.Value()))
// https://github.com/antchfx/htmlquery/issues/25
d := 1
for n.MoveToPrevious() {
d++
}
sb.WriteString(fmt.Sprintf("-%d", d))
for n.MoveToParent() {
d = 1
for n.MoveToPrevious() {
d++
}
sb.WriteString(fmt.Sprintf("-%d", d))
}
case ElementNode:
sb.WriteString(n.Prefix() + n.LocalName())
d := 1
for n.MoveToPrevious() {
d++
}
sb.WriteString(fmt.Sprintf("-%d", d))
for n.MoveToParent() {
d = 1
for n.MoveToPrevious() {
d++
}
sb.WriteString(fmt.Sprintf("-%d", d))
}
}
h := fnv.New64a()
h.Write([]byte(sb.String()))
return h.Sum64()
}
func getNodePosition(q query) int {
type Position interface {
position() int
}
if count, ok := q.(Position); ok {
return count.position()
}
return 1
}
func getNodeDepth(q query) int {
type Depth interface {
depth() int
}
if count, ok := q.(Depth); ok {
return count.depth()
}
return 0
}

View File

@ -1,161 +0,0 @@
package xpath
import (
"errors"
"fmt"
)
// NodeType represents a type of XPath node.
type NodeType int
const (
// RootNode is a root node of the XML document or node tree.
RootNode NodeType = iota
// ElementNode is an element, such as <element>.
ElementNode
// AttributeNode is an attribute, such as id='123'.
AttributeNode
// TextNode is the text content of a node.
TextNode
// CommentNode is a comment node, such as <!-- my comment -->
CommentNode
// allNode is any types of node, used by xpath package only to predicate match.
allNode
)
// NodeNavigator provides cursor model for navigating XML data.
type NodeNavigator interface {
// NodeType returns the XPathNodeType of the current node.
NodeType() NodeType
// LocalName gets the Name of the current node.
LocalName() string
// Prefix returns namespace prefix associated with the current node.
Prefix() string
// Value gets the value of current node.
Value() string
// Copy does a deep copy of the NodeNavigator and all its components.
Copy() NodeNavigator
// MoveToRoot moves the NodeNavigator to the root node of the current node.
MoveToRoot()
// MoveToParent moves the NodeNavigator to the parent node of the current node.
MoveToParent() bool
// MoveToNextAttribute moves the NodeNavigator to the next attribute on current node.
MoveToNextAttribute() bool
// MoveToChild moves the NodeNavigator to the first child node of the current node.
MoveToChild() bool
// MoveToFirst moves the NodeNavigator to the first sibling node of the current node.
MoveToFirst() bool
// MoveToNext moves the NodeNavigator to the next sibling node of the current node.
MoveToNext() bool
// MoveToPrevious moves the NodeNavigator to the previous sibling node of the current node.
MoveToPrevious() bool
// MoveTo moves the NodeNavigator to the same position as the specified NodeNavigator.
MoveTo(NodeNavigator) bool
}
// NodeIterator holds all matched Node object.
type NodeIterator struct {
node NodeNavigator
query query
}
// Current returns current node which matched.
func (t *NodeIterator) Current() NodeNavigator {
return t.node
}
// MoveNext moves Navigator to the next match node.
func (t *NodeIterator) MoveNext() bool {
n := t.query.Select(t)
if n != nil {
if !t.node.MoveTo(n) {
t.node = n.Copy()
}
return true
}
return false
}
// Select selects a node set using the specified XPath expression.
// This method is deprecated, recommend using Expr.Select() method instead.
func Select(root NodeNavigator, expr string) *NodeIterator {
exp, err := Compile(expr)
if err != nil {
panic(err)
}
return exp.Select(root)
}
// Expr is an XPath expression for query.
type Expr struct {
s string
q query
}
type iteratorFunc func() NodeNavigator
func (f iteratorFunc) Current() NodeNavigator {
return f()
}
// Evaluate returns the result of the expression.
// The result type of the expression is one of the follow: bool,float64,string,NodeIterator).
func (expr *Expr) Evaluate(root NodeNavigator) interface{} {
val := expr.q.Evaluate(iteratorFunc(func() NodeNavigator { return root }))
switch val.(type) {
case query:
return &NodeIterator{query: expr.q.Clone(), node: root}
}
return val
}
// Select selects a node set using the specified XPath expression.
func (expr *Expr) Select(root NodeNavigator) *NodeIterator {
return &NodeIterator{query: expr.q.Clone(), node: root}
}
// String returns XPath expression string.
func (expr *Expr) String() string {
return expr.s
}
// Compile compiles an XPath expression string.
func Compile(expr string) (*Expr, error) {
if expr == "" {
return nil, errors.New("expr expression is nil")
}
qy, err := build(expr)
if err != nil {
return nil, err
}
if qy == nil {
return nil, fmt.Errorf(fmt.Sprintf("undeclared variable in XPath expression: %s", expr))
}
return &Expr{s: expr, q: qy}, nil
}
// MustCompile compiles an XPath expression string and ignored error.
func MustCompile(expr string) *Expr {
exp, err := Compile(expr)
if err != nil {
return &Expr{s: expr, q: nopQuery{}}
}
return exp
}

View File

@ -1 +0,0 @@
comment: false

View File

@ -1,17 +0,0 @@
language: go
sudo: false
go:
- 1.11.x
- 1.12.x
- 1.13.x
- tip
script:
- go get -u golang.org/x/lint/golint
- OUT="$(go get -a)"; test -z "$OUT" || (echo "$OUT" && return 1)
- OUT="$(gofmt -l -d ./)"; test -z "$OUT" || (echo "$OUT" && return 1)
- OUT="$(golint ./...)"; test -z "$OUT" || (echo "$OUT" && return 1)
- go vet -v ./...
- go test -race -v -coverprofile=coverage.txt -covermode=atomic ./
- go build
after_success:
- bash <(curl -s https://codecov.io/bash)

View File

@ -1,33 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "colly",
srcs = [
"colly.go",
"context.go",
"htmlelement.go",
"http_backend.go",
"http_trace.go",
"request.go",
"response.go",
"unmarshal.go",
"xmlelement.go",
],
importmap = "peridot.resf.org/vendor/github.com/gocolly/colly/v2",
importpath = "github.com/gocolly/colly/v2",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/PuerkitoBio/goquery",
"//vendor/github.com/antchfx/htmlquery",
"//vendor/github.com/antchfx/xmlquery",
"//vendor/github.com/gobwas/glob",
"//vendor/github.com/gocolly/colly/v2/debug",
"//vendor/github.com/gocolly/colly/v2/storage",
"//vendor/github.com/kennygrant/sanitize",
"//vendor/github.com/saintfish/chardet",
"//vendor/github.com/temoto/robotstxt",
"@org_golang_google_appengine//urlfetch:go_default_library",
"@org_golang_x_net//html",
"@org_golang_x_net//html/charset",
],
)

View File

@ -1,33 +0,0 @@
# 2.0.0 - 2019.11.28
- Breaking change: Change Collector.RedirectHandler member to Collector.SetRedirectHandler function
- Go module support
- Collector.HasVisited method added to be able to check if an url has been visited
- Collector.SetClient method introduced
- HTMLElement.ChildTexts method added
- New user agents
- Multiple bugfixes
# 1.2.0 - 2019.02.13
- Compatibility with the latest htmlquery package
- New request shortcut for HEAD requests
- Check URL availibility before visiting
- Fix proxy URL value
- Request counter fix
- Minor fixes in examples
# 1.1.0 - 2018.08.13
- Appengine integration takes context.Context instead of http.Request (API change)
- Added "Accept" http header by default to every request
- Support slices of pointers in unmarshal
- Fixed a race condition in queues
- ForEachWithBreak method added to HTMLElement
- Added a local file example
- Support gzip decompression of response bodies
- Don't share waitgroup when cloning a collector
- Fixed instagram example
# 1.0.0 - 2018.05.13

View File

@ -1,67 +0,0 @@
# Contribute
## Introduction
First, thank you for considering contributing to colly! It's people like you that make the open source community such a great community! 😊
We welcome any type of contribution, not only code. You can help with
- **QA**: file bug reports, the more details you can give the better (e.g. screenshots with the console open)
- **Marketing**: writing blog posts, howto's, printing stickers, ...
- **Community**: presenting the project at meetups, organizing a dedicated meetup for the local community, ...
- **Code**: take a look at the [open issues](https://github.com/gocolly/colly/issues). Even if you can't write code, commenting on them, showing that you care about a given issue matters. It helps us triage them.
- **Money**: we welcome financial contributions in full transparency on our [open collective](https://opencollective.com/colly).
## Your First Contribution
Working on your first Pull Request? You can learn how from this *free* series, [How to Contribute to an Open Source Project on GitHub](https://egghead.io/series/how-to-contribute-to-an-open-source-project-on-github).
## Submitting code
Any code change should be submitted as a pull request. The description should explain what the code does and give steps to execute it. The pull request should also contain tests.
## Code review process
The bigger the pull request, the longer it will take to review and merge. Try to break down large pull requests in smaller chunks that are easier to review and merge.
It is also always helpful to have some context for your pull request. What was the purpose? Why does it matter to you?
## Financial contributions
We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/colly).
Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed.
## Questions
If you have any questions, create an [issue](https://github.com/gocolly/colly/issues/new) (protip: do a quick search first to see if someone else didn't ask the same question before!).
You can also reach us at hello@colly.opencollective.com.
## Credits
### Contributors
Thank you to all the people who have already contributed to colly!
<a href="graphs/contributors"><img src="https://opencollective.com/colly/contributors.svg?width=890" /></a>
### Backers
Thank you to all our backers! [[Become a backer](https://opencollective.com/colly#backer)]
<a href="https://opencollective.com/colly#backers" target="_blank"><img src="https://opencollective.com/colly/backers.svg?width=890"></a>
### Sponsors
Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/colly#sponsor))
<a href="https://opencollective.com/colly/sponsor/0/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/1/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/2/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/3/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/4/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/5/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/6/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/7/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/8/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/9/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/9/avatar.svg"></a>
<!-- This `CONTRIBUTING.md` is based on @nayafia's template https://github.com/nayafia/contributing-template -->

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,117 +0,0 @@
# Colly
Lightning Fast and Elegant Scraping Framework for Gophers
Colly provides a clean interface to write any kind of crawler/scraper/spider.
With Colly you can easily extract structured data from websites, which can be used for a wide range of applications, like data mining, data processing or archiving.
[![GoDoc](https://godoc.org/github.com/gocolly/colly?status.svg)](https://pkg.go.dev/github.com/gocolly/colly/v2)
[![Backers on Open Collective](https://opencollective.com/colly/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/colly/sponsors/badge.svg)](#sponsors) [![build status](https://img.shields.io/travis/gocolly/colly/master.svg?style=flat-square)](https://travis-ci.org/gocolly/colly)
[![report card](https://img.shields.io/badge/report%20card-a%2B-ff3333.svg?style=flat-square)](http://goreportcard.com/report/gocolly/colly)
[![view examples](https://img.shields.io/badge/learn%20by-examples-0077b3.svg?style=flat-square)](https://github.com/gocolly/colly/tree/master/_examples)
[![Code Coverage](https://img.shields.io/codecov/c/github/gocolly/colly/master.svg)](https://codecov.io/github/gocolly/colly?branch=master)
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fgocolly%2Fcolly.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fgocolly%2Fcolly?ref=badge_shield)
[![Twitter URL](https://img.shields.io/badge/twitter-follow-green.svg)](https://twitter.com/gocolly)
## Features
- Clean API
- Fast (>1k request/sec on a single core)
- Manages request delays and maximum concurrency per domain
- Automatic cookie and session handling
- Sync/async/parallel scraping
- Caching
- Automatic encoding of non-unicode responses
- Robots.txt support
- Distributed scraping
- Configuration via environment variables
- Extensions
## Example
```go
func main() {
c := colly.NewCollector()
// Find and visit all links
c.OnHTML("a[href]", func(e *colly.HTMLElement) {
e.Request.Visit(e.Attr("href"))
})
c.OnRequest(func(r *colly.Request) {
fmt.Println("Visiting", r.URL)
})
c.Visit("http://go-colly.org/")
}
```
See [examples folder](https://github.com/gocolly/colly/tree/master/_examples) for more detailed examples.
## Installation
Add colly to your `go.mod` file:
```
module github.com/x/y
go 1.14
require (
github.com/gocolly/colly/v2 latest
)
```
## Bugs
Bugs or suggestions? Visit the [issue tracker](https://github.com/gocolly/colly/issues) or join `#colly` on freenode
## Other Projects Using Colly
Below is a list of public, open source projects that use Colly:
- [greenpeace/check-my-pages](https://github.com/greenpeace/check-my-pages) Scraping script to test the Spanish Greenpeace web archive.
- [altsab/gowap](https://github.com/altsab/gowap) Wappalyzer implementation in Go.
- [jesuiscamille/goquotes](https://github.com/jesuiscamille/goquotes) A quotes scrapper, making your day a little better!
- [jivesearch/jivesearch](https://github.com/jivesearch/jivesearch) A search engine that doesn't track you.
- [Leagify/colly-draft-prospects](https://github.com/Leagify/colly-draft-prospects) A scraper for future NFL Draft prospects.
- [lucasepe/go-ps4](https://github.com/lucasepe/go-ps4) Search playstation store for your favorite PS4 games using the command line.
- [yringler/inside-chassidus-scraper](https://github.com/yringler/inside-chassidus-scraper) Scrapes Rabbi Paltiel's web site for lesson metadata.
- [gamedb/gamedb](https://github.com/gamedb/gamedb) A database of Steam games.
- [lawzava/scrape](https://github.com/lawzava/scrape) CLI for email scraping from any website.
- [eureka101v/WeiboSpiderGo](https://github.com/eureka101v/WeiboSpiderGo) A sina weibo(chinese twitter) scrapper
- [Go-phie/gophie](https://github.com/Go-phie/gophie) Search, Download and Stream movies from your terminal
- [imthaghost/goclone](https://github.com/imthaghost/goclone) Clone websites to your computer within seconds.
If you are using Colly in a project please send a pull request to add it to the list.
## Contributors
This project exists thanks to all the people who contribute. [[Contribute]](CONTRIBUTING.md).
<a href="https://github.com/gocolly/colly/graphs/contributors"><img src="https://opencollective.com/colly/contributors.svg?width=890" /></a>
## Backers
Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/colly#backer)]
<a href="https://opencollective.com/colly#backers" target="_blank"><img src="https://opencollective.com/colly/backers.svg?width=890"></a>
## Sponsors
Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/colly#sponsor)]
<a href="https://opencollective.com/colly/sponsor/0/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/1/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/2/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/3/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/4/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/5/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/6/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/7/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/8/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/colly/sponsor/9/website" target="_blank"><img src="https://opencollective.com/colly/sponsor/9/avatar.svg"></a>
## License
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fgocolly%2Fcolly.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fgocolly%2Fcolly?ref=badge_large)

View File

@ -1 +0,0 @@
2.1.0

File diff suppressed because it is too large Load Diff

View File

@ -1,87 +0,0 @@
// Copyright 2018 Adam Tauber
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package colly
import (
"sync"
)
// Context provides a tiny layer for passing data between callbacks
type Context struct {
contextMap map[string]interface{}
lock *sync.RWMutex
}
// NewContext initializes a new Context instance
func NewContext() *Context {
return &Context{
contextMap: make(map[string]interface{}),
lock: &sync.RWMutex{},
}
}
// UnmarshalBinary decodes Context value to nil
// This function is used by request caching
func (c *Context) UnmarshalBinary(_ []byte) error {
return nil
}
// MarshalBinary encodes Context value
// This function is used by request caching
func (c *Context) MarshalBinary() (_ []byte, _ error) {
return nil, nil
}
// Put stores a value of any type in Context
func (c *Context) Put(key string, value interface{}) {
c.lock.Lock()
c.contextMap[key] = value
c.lock.Unlock()
}
// Get retrieves a string value from Context.
// Get returns an empty string if key not found
func (c *Context) Get(key string) string {
c.lock.RLock()
defer c.lock.RUnlock()
if v, ok := c.contextMap[key]; ok {
return v.(string)
}
return ""
}
// GetAny retrieves a value from Context.
// GetAny returns nil if key not found
func (c *Context) GetAny(key string) interface{} {
c.lock.RLock()
defer c.lock.RUnlock()
if v, ok := c.contextMap[key]; ok {
return v
}
return nil
}
// ForEach iterate context
func (c *Context) ForEach(fn func(k string, v interface{}) interface{}) []interface{} {
c.lock.RLock()
defer c.lock.RUnlock()
ret := make([]interface{}, 0, len(c.contextMap))
for k, v := range c.contextMap {
ret = append(ret, fn(k, v))
}
return ret
}

View File

@ -1,13 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "debug",
srcs = [
"debug.go",
"logdebugger.go",
"webdebugger.go",
],
importmap = "peridot.resf.org/vendor/github.com/gocolly/colly/v2/debug",
importpath = "github.com/gocolly/colly/v2/debug",
visibility = ["//visibility:public"],
)

View File

@ -1,36 +0,0 @@
// Copyright 2018 Adam Tauber
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package debug
// Event represents an action inside a collector
type Event struct {
// Type is the type of the event
Type string
// RequestID identifies the HTTP request of the Event
RequestID uint32
// CollectorID identifies the collector of the Event
CollectorID uint32
// Values contains the event's key-value pairs. Different type of events
// can return different key-value pairs
Values map[string]string
}
// Debugger is an interface for different type of debugging backends
type Debugger interface {
// Init initializes the backend
Init() error
// Event receives a new collector event.
Event(e *Event)
}

View File

@ -1,54 +0,0 @@
// Copyright 2018 Adam Tauber
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package debug
import (
"io"
"log"
"os"
"sync/atomic"
"time"
)
// LogDebugger is the simplest debugger which prints log messages to the STDERR
type LogDebugger struct {
// Output is the log destination, anything can be used which implements them
// io.Writer interface. Leave it blank to use STDERR
Output io.Writer
// Prefix appears at the beginning of each generated log line
Prefix string
// Flag defines the logging properties.
Flag int
logger *log.Logger
counter int32
start time.Time
}
// Init initializes the LogDebugger
func (l *LogDebugger) Init() error {
l.counter = 0
l.start = time.Now()
if l.Output == nil {
l.Output = os.Stderr
}
l.logger = log.New(l.Output, l.Prefix, l.Flag)
return nil
}
// Event receives Collector events and prints them to STDERR
func (l *LogDebugger) Event(e *Event) {
i := atomic.AddInt32(&l.counter, 1)
l.logger.Printf("[%06d] %d [%6d - %s] %q (%s)\n", i, e.CollectorID, e.RequestID, e.Type, e.Values, time.Since(l.start))
}

View File

@ -1,153 +0,0 @@
// Copyright 2018 Adam Tauber
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package debug
import (
"encoding/json"
"log"
"net/http"
"sync"
"time"
)
// WebDebugger is a web based debuging frontend for colly
type WebDebugger struct {
// Address is the address of the web server. It is 127.0.0.1:7676 by default.
Address string
initialized bool
CurrentRequests map[uint32]requestInfo
RequestLog []requestInfo
sync.Mutex
}
type requestInfo struct {
URL string
Started time.Time
Duration time.Duration
ResponseStatus string
ID uint32
CollectorID uint32
}
// Init initializes the WebDebugger
func (w *WebDebugger) Init() error {
if w.initialized {
return nil
}
defer func() {
w.initialized = true
}()
if w.Address == "" {
w.Address = "127.0.0.1:7676"
}
w.RequestLog = make([]requestInfo, 0)
w.CurrentRequests = make(map[uint32]requestInfo)
http.HandleFunc("/", w.indexHandler)
http.HandleFunc("/status", w.statusHandler)
log.Println("Starting debug webserver on", w.Address)
go http.ListenAndServe(w.Address, nil)
return nil
}
// Event updates the debugger's status
func (w *WebDebugger) Event(e *Event) {
w.Lock()
defer w.Unlock()
switch e.Type {
case "request":
w.CurrentRequests[e.RequestID] = requestInfo{
URL: e.Values["url"],
Started: time.Now(),
ID: e.RequestID,
CollectorID: e.CollectorID,
}
case "response", "error":
r := w.CurrentRequests[e.RequestID]
r.Duration = time.Since(r.Started)
r.ResponseStatus = e.Values["status"]
w.RequestLog = append(w.RequestLog, r)
delete(w.CurrentRequests, e.RequestID)
}
}
func (w *WebDebugger) indexHandler(wr http.ResponseWriter, r *http.Request) {
wr.Write([]byte(`<!DOCTYPE html>
<html>
<head>
<title>Colly Debugger WebUI</title>
<script src="https://code.jquery.com/jquery-latest.min.js" type="text/javascript"></script>
<link rel="stylesheet" type="text/css" href="https://semantic-ui.com/dist/semantic.min.css">
</head>
<body>
<div class="ui inverted vertical masthead center aligned segment" id="menu">
<div class="ui tiny secondary inverted menu">
<a class="item" href="/"><b>Colly WebDebugger</b></a>
</div>
</div>
<div class="ui grid container">
<div class="row">
<div class="eight wide column">
<h1>Current Requests <span id="current_request_count"></span></h1>
<div id="current_requests" class="ui small feed"></div>
</div>
<div class="eight wide column">
<h1>Finished Requests <span id="request_log_count"></span></h1>
<div id="request_log" class="ui small feed"></div>
</div>
</div>
</div>
<script>
function curRequestTpl(url, started, collectorId) {
return '<div class="event"><div class="content"><div class="summary">' + url + '</div><div class="meta">Collector #' + collectorId + ' - ' + started + "</div></div></div>";
}
function requestLogTpl(url, duration, collectorId) {
return '<div class="event"><div class="content"><div class="summary">' + url + '</div><div class="meta">Collector #' + collectorId + ' - ' + (duration/1000000000) + "s</div></div></div>";
}
function fetchStatus() {
$.getJSON("/status", function(data) {
$("#current_requests").html("");
$("#request_log").html("");
$("#current_request_count").text('(' + Object.keys(data.CurrentRequests).length + ')');
$("#request_log_count").text('(' + data.RequestLog.length + ')');
for(var i in data.CurrentRequests) {
var r = data.CurrentRequests[i];
$("#current_requests").append(curRequestTpl(r.URL, r.Started, r.CollectorID));
}
for(var i in data.RequestLog.reverse()) {
var r = data.RequestLog[i];
$("#request_log").append(requestLogTpl(r.URL, r.Duration, r.CollectorID));
}
setTimeout(fetchStatus, 1000);
});
}
$(document).ready(function() {
fetchStatus();
});
</script>
</body>
</html>
`))
}
func (w *WebDebugger) statusHandler(wr http.ResponseWriter, r *http.Request) {
w.Lock()
jsonData, err := json.MarshalIndent(w, "", " ")
w.Unlock()
if err != nil {
panic(err)
}
wr.Write(jsonData)
}

View File

@ -1,23 +0,0 @@
module github.com/gocolly/colly/v2
go 1.12
require (
github.com/PuerkitoBio/goquery v1.5.1
github.com/andybalholm/cascadia v1.2.0 // indirect
github.com/antchfx/htmlquery v1.2.3
github.com/antchfx/xmlquery v1.2.4
github.com/antchfx/xpath v1.1.8 // indirect
github.com/gobwas/glob v0.2.3
github.com/gocolly/colly v1.2.0
github.com/golang/protobuf v1.4.2 // indirect
github.com/jawher/mow.cli v1.1.0
github.com/kennygrant/sanitize v1.2.4
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca
github.com/temoto/robotstxt v1.1.1
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 // indirect
golang.org/x/net v0.0.0-20200602114024-627f9648deb9
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b // indirect
google.golang.org/appengine v1.6.6
google.golang.org/protobuf v1.24.0 // indirect
)

View File

@ -1,134 +0,0 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/PuerkitoBio/goquery v1.5.0 h1:uGvmFXOA73IKluu/F84Xd1tt/z07GYm8X49XKHP7EJk=
github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg=
github.com/PuerkitoBio/goquery v1.5.1 h1:PSPBGne8NIUWw+/7vFBV+kG2J/5MOjbzc7154OaKCSE=
github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/andybalholm/cascadia v1.2.0 h1:vuRCkM5Ozh/BfmsaTm26kbjm0mIOM3yS5Ek/F5h18aE=
github.com/andybalholm/cascadia v1.2.0/go.mod h1:YCyR8vOZT9aZ1CHEd8ap0gMVm2aFgxBp0T0eFw1RUQY=
github.com/antchfx/htmlquery v1.0.0 h1:O5IXz8fZF3B3MW+B33MZWbTHBlYmcfw0BAxgErHuaMA=
github.com/antchfx/htmlquery v1.0.0/go.mod h1:MS9yksVSQXls00iXkiMqXr0J+umL/AmxXKuP28SUJM8=
github.com/antchfx/htmlquery v1.2.3 h1:sP3NFDneHx2stfNXCKbhHFo8XgNjCACnU/4AO5gWz6M=
github.com/antchfx/htmlquery v1.2.3/go.mod h1:B0ABL+F5irhhMWg54ymEZinzMSi0Kt3I2if0BLYa3V0=
github.com/antchfx/xmlquery v1.0.0 h1:YuEPqexGG2opZKNc9JU3Zw6zFXwC47wNcy6/F8oKsrM=
github.com/antchfx/xmlquery v1.0.0/go.mod h1:/+CnyD/DzHRnv2eRxrVbieRU/FIF6N0C+7oTtyUtCKk=
github.com/antchfx/xmlquery v1.2.4 h1:T/SH1bYdzdjTMoz2RgsfVKbM5uWh3gjDYYepFqQmFv4=
github.com/antchfx/xmlquery v1.2.4/go.mod h1:KQQuESaxSlqugE2ZBcM/qn+ebIpt+d+4Xx7YcSGAIrM=
github.com/antchfx/xpath v1.0.0 h1:Q5gFgh2O40VTSwMOVbFE7nFNRBu3tS21Tn0KAWeEjtk=
github.com/antchfx/xpath v1.0.0/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
github.com/antchfx/xpath v1.1.6/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
github.com/antchfx/xpath v1.1.8 h1:PcL6bIX42Px5usSx6xRYw/wjB3wYGkj0MJ9MBzEKVgk=
github.com/antchfx/xpath v1.1.8/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gocolly/colly v1.2.0 h1:qRz9YAn8FIH0qzgNUw+HT9UN7wm1oF9OBAilwEWpyrI=
github.com/gocolly/colly v1.2.0/go.mod h1:Hof5T3ZswNVsOHYmba1u03W65HDWgpV5HifSuueE0EA=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/jawher/mow.cli v1.1.0 h1:NdtHXRc0CwZQ507wMvQ/IS+Q3W3x2fycn973/b8Zuk8=
github.com/jawher/mow.cli v1.1.0/go.mod h1:aNaQlc7ozF3vw6IJ2dHjp2ZFiA4ozMIYY6PyuRJwlUg=
github.com/kennygrant/sanitize v1.2.4 h1:gN25/otpP5vAsO2djbMhF/LQX6R7+O1TB4yv8NzpJ3o=
github.com/kennygrant/sanitize v1.2.4/go.mod h1:LGsjYYtgxbetdg5owWB2mpgUL6e2nfw2eObZ0u0qvak=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca h1:NugYot0LIVPxTvN8n+Kvkn6TrbMyxQiuvKdEwFdR9vI=
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/temoto/robotstxt v1.1.1 h1:Gh8RCs8ouX3hRSxxK7B1mO5RFByQ4CmJZDwgom++JaA=
github.com/temoto/robotstxt v1.1.1/go.mod h1:+1AmkuG3IYkh1kv0d2qEB9Le88ehNO0zwOr3ujewlOo=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -1,131 +0,0 @@
// Copyright 2018 Adam Tauber
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package colly
import (
"strings"
"github.com/PuerkitoBio/goquery"
"golang.org/x/net/html"
)
// HTMLElement is the representation of a HTML tag.
type HTMLElement struct {
// Name is the name of the tag
Name string
Text string
attributes []html.Attribute
// Request is the request object of the element's HTML document
Request *Request
// Response is the Response object of the element's HTML document
Response *Response
// DOM is the goquery parsed DOM object of the page. DOM is relative
// to the current HTMLElement
DOM *goquery.Selection
// Index stores the position of the current element within all the elements matched by an OnHTML callback
Index int
}
// NewHTMLElementFromSelectionNode creates a HTMLElement from a goquery.Selection Node.
func NewHTMLElementFromSelectionNode(resp *Response, s *goquery.Selection, n *html.Node, idx int) *HTMLElement {
return &HTMLElement{
Name: n.Data,
Request: resp.Request,
Response: resp,
Text: goquery.NewDocumentFromNode(n).Text(),
DOM: s,
Index: idx,
attributes: n.Attr,
}
}
// Attr returns the selected attribute of a HTMLElement or empty string
// if no attribute found
func (h *HTMLElement) Attr(k string) string {
for _, a := range h.attributes {
if a.Key == k {
return a.Val
}
}
return ""
}
// ChildText returns the concatenated and stripped text content of the matching
// elements.
func (h *HTMLElement) ChildText(goquerySelector string) string {
return strings.TrimSpace(h.DOM.Find(goquerySelector).Text())
}
// ChildTexts returns the stripped text content of all the matching
// elements.
func (h *HTMLElement) ChildTexts(goquerySelector string) []string {
var res []string
h.DOM.Find(goquerySelector).Each(func(_ int, s *goquery.Selection) {
res = append(res, strings.TrimSpace(s.Text()))
})
return res
}
// ChildAttr returns the stripped text content of the first matching
// element's attribute.
func (h *HTMLElement) ChildAttr(goquerySelector, attrName string) string {
if attr, ok := h.DOM.Find(goquerySelector).Attr(attrName); ok {
return strings.TrimSpace(attr)
}
return ""
}
// ChildAttrs returns the stripped text content of all the matching
// element's attributes.
func (h *HTMLElement) ChildAttrs(goquerySelector, attrName string) []string {
var res []string
h.DOM.Find(goquerySelector).Each(func(_ int, s *goquery.Selection) {
if attr, ok := s.Attr(attrName); ok {
res = append(res, strings.TrimSpace(attr))
}
})
return res
}
// ForEach iterates over the elements matched by the first argument
// and calls the callback function on every HTMLElement match.
func (h *HTMLElement) ForEach(goquerySelector string, callback func(int, *HTMLElement)) {
i := 0
h.DOM.Find(goquerySelector).Each(func(_ int, s *goquery.Selection) {
for _, n := range s.Nodes {
callback(i, NewHTMLElementFromSelectionNode(h.Response, s, n, i))
i++
}
})
}
// ForEachWithBreak iterates over the elements matched by the first argument
// and calls the callback function on every HTMLElement match.
// It is identical to ForEach except that it is possible to break
// out of the loop by returning false in the callback function. It returns the
// current Selection object.
func (h *HTMLElement) ForEachWithBreak(goquerySelector string, callback func(int, *HTMLElement) bool) {
i := 0
h.DOM.Find(goquerySelector).EachWithBreak(func(_ int, s *goquery.Selection) bool {
for _, n := range s.Nodes {
if callback(i, NewHTMLElementFromSelectionNode(h.Response, s, n, i)) {
i++
return true
}
}
return false
})
}

View File

@ -1,237 +0,0 @@
// Copyright 2018 Adam Tauber
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package colly
import (
"crypto/sha1"
"encoding/gob"
"encoding/hex"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"path"
"regexp"
"strings"
"sync"
"time"
"compress/gzip"
"github.com/gobwas/glob"
)
type httpBackend struct {
LimitRules []*LimitRule
Client *http.Client
lock *sync.RWMutex
}
type checkHeadersFunc func(statusCode int, header http.Header) bool
// LimitRule provides connection restrictions for domains.
// Both DomainRegexp and DomainGlob can be used to specify
// the included domains patterns, but at least one is required.
// There can be two kind of limitations:
// - Parallelism: Set limit for the number of concurrent requests to matching domains
// - Delay: Wait specified amount of time between requests (parallelism is 1 in this case)
type LimitRule struct {
// DomainRegexp is a regular expression to match against domains
DomainRegexp string
// DomainGlob is a glob pattern to match against domains
DomainGlob string
// Delay is the duration to wait before creating a new request to the matching domains
Delay time.Duration
// RandomDelay is the extra randomized duration to wait added to Delay before creating a new request
RandomDelay time.Duration
// Parallelism is the number of the maximum allowed concurrent requests of the matching domains
Parallelism int
waitChan chan bool
compiledRegexp *regexp.Regexp
compiledGlob glob.Glob
}
// Init initializes the private members of LimitRule
func (r *LimitRule) Init() error {
waitChanSize := 1
if r.Parallelism > 1 {
waitChanSize = r.Parallelism
}
r.waitChan = make(chan bool, waitChanSize)
hasPattern := false
if r.DomainRegexp != "" {
c, err := regexp.Compile(r.DomainRegexp)
if err != nil {
return err
}
r.compiledRegexp = c
hasPattern = true
}
if r.DomainGlob != "" {
c, err := glob.Compile(r.DomainGlob)
if err != nil {
return err
}
r.compiledGlob = c
hasPattern = true
}
if !hasPattern {
return ErrNoPattern
}
return nil
}
func (h *httpBackend) Init(jar http.CookieJar) {
rand.Seed(time.Now().UnixNano())
h.Client = &http.Client{
Jar: jar,
Timeout: 10 * time.Second,
}
h.lock = &sync.RWMutex{}
}
// Match checks that the domain parameter triggers the rule
func (r *LimitRule) Match(domain string) bool {
match := false
if r.compiledRegexp != nil && r.compiledRegexp.MatchString(domain) {
match = true
}
if r.compiledGlob != nil && r.compiledGlob.Match(domain) {
match = true
}
return match
}
func (h *httpBackend) GetMatchingRule(domain string) *LimitRule {
if h.LimitRules == nil {
return nil
}
h.lock.RLock()
defer h.lock.RUnlock()
for _, r := range h.LimitRules {
if r.Match(domain) {
return r
}
}
return nil
}
func (h *httpBackend) Cache(request *http.Request, bodySize int, checkHeadersFunc checkHeadersFunc, cacheDir string) (*Response, error) {
if cacheDir == "" || request.Method != "GET" {
return h.Do(request, bodySize, checkHeadersFunc)
}
sum := sha1.Sum([]byte(request.URL.String()))
hash := hex.EncodeToString(sum[:])
dir := path.Join(cacheDir, hash[:2])
filename := path.Join(dir, hash)
if file, err := os.Open(filename); err == nil {
resp := new(Response)
err := gob.NewDecoder(file).Decode(resp)
file.Close()
if resp.StatusCode < 500 {
return resp, err
}
}
resp, err := h.Do(request, bodySize, checkHeadersFunc)
if err != nil || resp.StatusCode >= 500 {
return resp, err
}
if _, err := os.Stat(dir); err != nil {
if err := os.MkdirAll(dir, 0750); err != nil {
return resp, err
}
}
file, err := os.Create(filename + "~")
if err != nil {
return resp, err
}
if err := gob.NewEncoder(file).Encode(resp); err != nil {
file.Close()
return resp, err
}
file.Close()
return resp, os.Rename(filename+"~", filename)
}
func (h *httpBackend) Do(request *http.Request, bodySize int, checkHeadersFunc checkHeadersFunc) (*Response, error) {
r := h.GetMatchingRule(request.URL.Host)
if r != nil {
r.waitChan <- true
defer func(r *LimitRule) {
randomDelay := time.Duration(0)
if r.RandomDelay != 0 {
randomDelay = time.Duration(rand.Int63n(int64(r.RandomDelay)))
}
time.Sleep(r.Delay + randomDelay)
<-r.waitChan
}(r)
}
res, err := h.Client.Do(request)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.Request != nil {
*request = *res.Request
}
if !checkHeadersFunc(res.StatusCode, res.Header) {
// closing res.Body (see defer above) without reading it aborts
// the download
return nil, ErrAbortedAfterHeaders
}
var bodyReader io.Reader = res.Body
if bodySize > 0 {
bodyReader = io.LimitReader(bodyReader, int64(bodySize))
}
contentEncoding := strings.ToLower(res.Header.Get("Content-Encoding"))
if !res.Uncompressed && (strings.Contains(contentEncoding, "gzip") || (contentEncoding == "" && strings.Contains(strings.ToLower(res.Header.Get("Content-Type")), "gzip")) || strings.HasSuffix(strings.ToLower(request.URL.Path), ".xml.gz")) {
bodyReader, err = gzip.NewReader(bodyReader)
if err != nil {
return nil, err
}
defer bodyReader.(*gzip.Reader).Close()
}
body, err := ioutil.ReadAll(bodyReader)
if err != nil {
return nil, err
}
return &Response{
StatusCode: res.StatusCode,
Body: body,
Headers: &res.Header,
}, nil
}
func (h *httpBackend) Limit(rule *LimitRule) error {
h.lock.Lock()
if h.LimitRules == nil {
h.LimitRules = make([]*LimitRule, 0, 8)
}
h.LimitRules = append(h.LimitRules, rule)
h.lock.Unlock()
return rule.Init()
}
func (h *httpBackend) Limits(rules []*LimitRule) error {
for _, r := range rules {
if err := h.Limit(r); err != nil {
return err
}
}
return nil
}

View File

@ -1,37 +0,0 @@
package colly
import (
"net/http"
"net/http/httptrace"
"time"
)
// HTTPTrace provides a datastructure for storing an http trace.
type HTTPTrace struct {
start, connect time.Time
ConnectDuration time.Duration
FirstByteDuration time.Duration
}
// trace returns a httptrace.ClientTrace object to be used with an http
// request via httptrace.WithClientTrace() that fills in the HttpTrace.
func (ht *HTTPTrace) trace() *httptrace.ClientTrace {
trace := &httptrace.ClientTrace{
ConnectStart: func(network, addr string) { ht.connect = time.Now() },
ConnectDone: func(network, addr string, err error) {
ht.ConnectDuration = time.Since(ht.connect)
},
GetConn: func(hostPort string) { ht.start = time.Now() },
GotFirstResponseByte: func() {
ht.FirstByteDuration = time.Since(ht.start)
},
}
return trace
}
// WithTrace returns the given HTTP Request with this HTTPTrace added to its
// context.
func (ht *HTTPTrace) WithTrace(req *http.Request) *http.Request {
return req.WithContext(httptrace.WithClientTrace(req.Context(), ht.trace()))
}

View File

@ -1,188 +0,0 @@
// Copyright 2018 Adam Tauber
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package colly
import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"sync/atomic"
)
// Request is the representation of a HTTP request made by a Collector
type Request struct {
// URL is the parsed URL of the HTTP request
URL *url.URL
// Headers contains the Request's HTTP headers
Headers *http.Header
// Ctx is a context between a Request and a Response
Ctx *Context
// Depth is the number of the parents of the request
Depth int
// Method is the HTTP method of the request
Method string
// Body is the request body which is used on POST/PUT requests
Body io.Reader
// ResponseCharacterencoding is the character encoding of the response body.
// Leave it blank to allow automatic character encoding of the response body.
// It is empty by default and it can be set in OnRequest callback.
ResponseCharacterEncoding string
// ID is the Unique identifier of the request
ID uint32
collector *Collector
abort bool
baseURL *url.URL
// ProxyURL is the proxy address that handles the request
ProxyURL string
}
type serializableRequest struct {
URL string
Method string
Depth int
Body []byte
ID uint32
Ctx map[string]interface{}
Headers http.Header
}
// New creates a new request with the context of the original request
func (r *Request) New(method, URL string, body io.Reader) (*Request, error) {
u, err := url.Parse(URL)
if err != nil {
return nil, err
}
return &Request{
Method: method,
URL: u,
Body: body,
Ctx: r.Ctx,
Headers: &http.Header{},
ID: atomic.AddUint32(&r.collector.requestCount, 1),
collector: r.collector,
}, nil
}
// Abort cancels the HTTP request when called in an OnRequest callback
func (r *Request) Abort() {
r.abort = true
}
// AbsoluteURL returns with the resolved absolute URL of an URL chunk.
// AbsoluteURL returns empty string if the URL chunk is a fragment or
// could not be parsed
func (r *Request) AbsoluteURL(u string) string {
if strings.HasPrefix(u, "#") {
return ""
}
var base *url.URL
if r.baseURL != nil {
base = r.baseURL
} else {
base = r.URL
}
absURL, err := base.Parse(u)
if err != nil {
return ""
}
absURL.Fragment = ""
if absURL.Scheme == "//" {
absURL.Scheme = r.URL.Scheme
}
return absURL.String()
}
// Visit continues Collector's collecting job by creating a
// request and preserves the Context of the previous request.
// Visit also calls the previously provided callbacks
func (r *Request) Visit(URL string) error {
return r.collector.scrape(r.AbsoluteURL(URL), "GET", r.Depth+1, nil, r.Ctx, nil, true)
}
// HasVisited checks if the provided URL has been visited
func (r *Request) HasVisited(URL string) (bool, error) {
return r.collector.HasVisited(URL)
}
// Post continues a collector job by creating a POST request and preserves the Context
// of the previous request.
// Post also calls the previously provided callbacks
func (r *Request) Post(URL string, requestData map[string]string) error {
return r.collector.scrape(r.AbsoluteURL(URL), "POST", r.Depth+1, createFormReader(requestData), r.Ctx, nil, true)
}
// PostRaw starts a collector job by creating a POST request with raw binary data.
// PostRaw preserves the Context of the previous request
// and calls the previously provided callbacks
func (r *Request) PostRaw(URL string, requestData []byte) error {
return r.collector.scrape(r.AbsoluteURL(URL), "POST", r.Depth+1, bytes.NewReader(requestData), r.Ctx, nil, true)
}
// PostMultipart starts a collector job by creating a Multipart POST request
// with raw binary data. PostMultipart also calls the previously provided.
// callbacks
func (r *Request) PostMultipart(URL string, requestData map[string][]byte) error {
boundary := randomBoundary()
hdr := http.Header{}
hdr.Set("Content-Type", "multipart/form-data; boundary="+boundary)
hdr.Set("User-Agent", r.collector.UserAgent)
return r.collector.scrape(r.AbsoluteURL(URL), "POST", r.Depth+1, createMultipartReader(boundary, requestData), r.Ctx, hdr, true)
}
// Retry submits HTTP request again with the same parameters
func (r *Request) Retry() error {
r.Headers.Del("Cookie")
return r.collector.scrape(r.URL.String(), r.Method, r.Depth, r.Body, r.Ctx, *r.Headers, false)
}
// Do submits the request
func (r *Request) Do() error {
return r.collector.scrape(r.URL.String(), r.Method, r.Depth, r.Body, r.Ctx, *r.Headers, !r.collector.AllowURLRevisit)
}
// Marshal serializes the Request
func (r *Request) Marshal() ([]byte, error) {
ctx := make(map[string]interface{})
if r.Ctx != nil {
r.Ctx.ForEach(func(k string, v interface{}) interface{} {
ctx[k] = v
return nil
})
}
var err error
var body []byte
if r.Body != nil {
body, err = ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
}
sr := &serializableRequest{
URL: r.URL.String(),
Method: r.Method,
Depth: r.Depth,
Body: body,
ID: r.ID,
Ctx: ctx,
}
if r.Headers != nil {
sr.Headers = *r.Headers
}
return json.Marshal(sr)
}

View File

@ -1,115 +0,0 @@
// Copyright 2018 Adam Tauber
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package colly
import (
"bytes"
"fmt"
"io/ioutil"
"mime"
"net/http"
"strings"
"github.com/saintfish/chardet"
"golang.org/x/net/html/charset"
)
// Response is the representation of a HTTP response made by a Collector
type Response struct {
// StatusCode is the status code of the Response
StatusCode int
// Body is the content of the Response
Body []byte
// Ctx is a context between a Request and a Response
Ctx *Context
// Request is the Request object of the response
Request *Request
// Headers contains the Response's HTTP headers
Headers *http.Header
// Trace contains the HTTPTrace for the request. Will only be set by the
// collector if Collector.TraceHTTP is set to true.
Trace *HTTPTrace
}
// Save writes response body to disk
func (r *Response) Save(fileName string) error {
return ioutil.WriteFile(fileName, r.Body, 0644)
}
// FileName returns the sanitized file name parsed from "Content-Disposition"
// header or from URL
func (r *Response) FileName() string {
_, params, err := mime.ParseMediaType(r.Headers.Get("Content-Disposition"))
if fName, ok := params["filename"]; ok && err == nil {
return SanitizeFileName(fName)
}
if r.Request.URL.RawQuery != "" {
return SanitizeFileName(fmt.Sprintf("%s_%s", r.Request.URL.Path, r.Request.URL.RawQuery))
}
return SanitizeFileName(strings.TrimPrefix(r.Request.URL.Path, "/"))
}
func (r *Response) fixCharset(detectCharset bool, defaultEncoding string) error {
if len(r.Body) == 0 {
return nil
}
if defaultEncoding != "" {
tmpBody, err := encodeBytes(r.Body, "text/plain; charset="+defaultEncoding)
if err != nil {
return err
}
r.Body = tmpBody
return nil
}
contentType := strings.ToLower(r.Headers.Get("Content-Type"))
if strings.Contains(contentType, "image/") ||
strings.Contains(contentType, "video/") ||
strings.Contains(contentType, "audio/") ||
strings.Contains(contentType, "font/") {
// These MIME types should not have textual data.
return nil
}
if !strings.Contains(contentType, "charset") {
if !detectCharset {
return nil
}
d := chardet.NewTextDetector()
r, err := d.DetectBest(r.Body)
if err != nil {
return err
}
contentType = "text/plain; charset=" + r.Charset
}
if strings.Contains(contentType, "utf-8") || strings.Contains(contentType, "utf8") {
return nil
}
tmpBody, err := encodeBytes(r.Body, contentType)
if err != nil {
return err
}
r.Body = tmpBody
return nil
}
func encodeBytes(b []byte, contentType string) ([]byte, error) {
r, err := charset.NewReader(bytes.NewReader(b), contentType)
if err != nil {
return nil, err
}
return ioutil.ReadAll(r)
}

View File

@ -1,9 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "storage",
srcs = ["storage.go"],
importmap = "peridot.resf.org/vendor/github.com/gocolly/colly/v2/storage",
importpath = "github.com/gocolly/colly/v2/storage",
visibility = ["//visibility:public"],
)

View File

@ -1,128 +0,0 @@
// Copyright 2018 Adam Tauber
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"net/http"
"net/http/cookiejar"
"net/url"
"strings"
"sync"
)
// Storage is an interface which handles Collector's internal data,
// like visited urls and cookies.
// The default Storage of the Collector is the InMemoryStorage.
// Collector's storage can be changed by calling Collector.SetStorage()
// function.
type Storage interface {
// Init initializes the storage
Init() error
// Visited receives and stores a request ID that is visited by the Collector
Visited(requestID uint64) error
// IsVisited returns true if the request was visited before IsVisited
// is called
IsVisited(requestID uint64) (bool, error)
// Cookies retrieves stored cookies for a given host
Cookies(u *url.URL) string
// SetCookies stores cookies for a given host
SetCookies(u *url.URL, cookies string)
}
// InMemoryStorage is the default storage backend of colly.
// InMemoryStorage keeps cookies and visited urls in memory
// without persisting data on the disk.
type InMemoryStorage struct {
visitedURLs map[uint64]bool
lock *sync.RWMutex
jar *cookiejar.Jar
}
// Init initializes InMemoryStorage
func (s *InMemoryStorage) Init() error {
if s.visitedURLs == nil {
s.visitedURLs = make(map[uint64]bool)
}
if s.lock == nil {
s.lock = &sync.RWMutex{}
}
if s.jar == nil {
var err error
s.jar, err = cookiejar.New(nil)
return err
}
return nil
}
// Visited implements Storage.Visited()
func (s *InMemoryStorage) Visited(requestID uint64) error {
s.lock.Lock()
s.visitedURLs[requestID] = true
s.lock.Unlock()
return nil
}
// IsVisited implements Storage.IsVisited()
func (s *InMemoryStorage) IsVisited(requestID uint64) (bool, error) {
s.lock.RLock()
visited := s.visitedURLs[requestID]
s.lock.RUnlock()
return visited, nil
}
// Cookies implements Storage.Cookies()
func (s *InMemoryStorage) Cookies(u *url.URL) string {
return StringifyCookies(s.jar.Cookies(u))
}
// SetCookies implements Storage.SetCookies()
func (s *InMemoryStorage) SetCookies(u *url.URL, cookies string) {
s.jar.SetCookies(u, UnstringifyCookies(cookies))
}
// Close implements Storage.Close()
func (s *InMemoryStorage) Close() error {
return nil
}
// StringifyCookies serializes list of http.Cookies to string
func StringifyCookies(cookies []*http.Cookie) string {
// Stringify cookies.
cs := make([]string, len(cookies))
for i, c := range cookies {
cs[i] = c.String()
}
return strings.Join(cs, "\n")
}
// UnstringifyCookies deserializes a cookie string to http.Cookies
func UnstringifyCookies(s string) []*http.Cookie {
h := http.Header{}
for _, c := range strings.Split(s, "\n") {
h.Add("Set-Cookie", c)
}
r := http.Response{Header: h}
return r.Cookies()
}
// ContainsCookie checks if a cookie name is represented in cookies
func ContainsCookie(cookies []*http.Cookie, name string) bool {
for _, c := range cookies {
if c.Name == name {
return true
}
}
return false
}

View File

@ -1,218 +0,0 @@
// Copyright 2018 Adam Tauber
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package colly
import (
"errors"
"reflect"
"strings"
"github.com/PuerkitoBio/goquery"
)
// Unmarshal is a shorthand for colly.UnmarshalHTML
func (h *HTMLElement) Unmarshal(v interface{}) error {
return UnmarshalHTML(v, h.DOM, nil)
}
// UnmarshalWithMap is a shorthand for colly.UnmarshalHTML, extended to allow maps to be passed in.
func (h *HTMLElement) UnmarshalWithMap(v interface{}, structMap map[string]string) error {
return UnmarshalHTML(v, h.DOM, structMap)
}
// UnmarshalHTML declaratively extracts text or attributes to a struct from
// HTML response using struct tags composed of css selectors.
// Allowed struct tags:
// - "selector" (required): CSS (goquery) selector of the desired data
// - "attr" (optional): Selects the matching element's attribute's value.
// Leave it blank or omit to get the text of the element.
//
// Example struct declaration:
//
// type Nested struct {
// String string `selector:"div > p"`
// Classes []string `selector:"li" attr:"class"`
// Struct *Nested `selector:"div > div"`
// }
//
// Supported types: struct, *struct, string, []string
func UnmarshalHTML(v interface{}, s *goquery.Selection, structMap map[string]string) error {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return errors.New("Invalid type or nil-pointer")
}
sv := rv.Elem()
st := reflect.TypeOf(v).Elem()
if structMap != nil {
for k, v := range structMap {
attrV := sv.FieldByName(k)
if !attrV.CanAddr() || !attrV.CanSet() {
continue
}
if err := unmarshalSelector(s, attrV, v); err != nil {
return err
}
}
} else {
for i := 0; i < sv.NumField(); i++ {
attrV := sv.Field(i)
if !attrV.CanAddr() || !attrV.CanSet() {
continue
}
if err := unmarshalAttr(s, attrV, st.Field(i)); err != nil {
return err
}
}
}
return nil
}
func unmarshalSelector(s *goquery.Selection, attrV reflect.Value, selector string) error {
//selector is "-" specify that field should ignore.
if selector == "-" {
return nil
}
htmlAttr := ""
// TODO support more types
switch attrV.Kind() {
case reflect.Slice:
if err := unmarshalSlice(s, selector, htmlAttr, attrV); err != nil {
return err
}
case reflect.String:
val := getDOMValue(s.Find(selector), htmlAttr)
attrV.Set(reflect.Indirect(reflect.ValueOf(val)))
case reflect.Struct:
if err := unmarshalStruct(s, selector, attrV); err != nil {
return err
}
case reflect.Ptr:
if err := unmarshalPtr(s, selector, attrV); err != nil {
return err
}
default:
return errors.New("Invalid type: " + attrV.String())
}
return nil
}
func unmarshalAttr(s *goquery.Selection, attrV reflect.Value, attrT reflect.StructField) error {
selector := attrT.Tag.Get("selector")
//selector is "-" specify that field should ignore.
if selector == "-" {
return nil
}
htmlAttr := attrT.Tag.Get("attr")
// TODO support more types
switch attrV.Kind() {
case reflect.Slice:
if err := unmarshalSlice(s, selector, htmlAttr, attrV); err != nil {
return err
}
case reflect.String:
val := getDOMValue(s.Find(selector), htmlAttr)
attrV.Set(reflect.Indirect(reflect.ValueOf(val)))
case reflect.Struct:
if err := unmarshalStruct(s, selector, attrV); err != nil {
return err
}
case reflect.Ptr:
if err := unmarshalPtr(s, selector, attrV); err != nil {
return err
}
default:
return errors.New("Invalid type: " + attrV.String())
}
return nil
}
func unmarshalStruct(s *goquery.Selection, selector string, attrV reflect.Value) error {
newS := s
if selector != "" {
newS = newS.Find(selector)
}
if newS.Nodes == nil {
return nil
}
v := reflect.New(attrV.Type())
err := UnmarshalHTML(v.Interface(), newS, nil)
if err != nil {
return err
}
attrV.Set(reflect.Indirect(v))
return nil
}
func unmarshalPtr(s *goquery.Selection, selector string, attrV reflect.Value) error {
newS := s
if selector != "" {
newS = newS.Find(selector)
}
if newS.Nodes == nil {
return nil
}
e := attrV.Type().Elem()
if e.Kind() != reflect.Struct {
return errors.New("Invalid slice type")
}
v := reflect.New(e)
err := UnmarshalHTML(v.Interface(), newS, nil)
if err != nil {
return err
}
attrV.Set(v)
return nil
}
func unmarshalSlice(s *goquery.Selection, selector, htmlAttr string, attrV reflect.Value) error {
if attrV.Pointer() == 0 {
v := reflect.MakeSlice(attrV.Type(), 0, 0)
attrV.Set(v)
}
switch attrV.Type().Elem().Kind() {
case reflect.String:
s.Find(selector).Each(func(_ int, s *goquery.Selection) {
val := getDOMValue(s, htmlAttr)
attrV.Set(reflect.Append(attrV, reflect.Indirect(reflect.ValueOf(val))))
})
case reflect.Ptr:
s.Find(selector).Each(func(_ int, innerSel *goquery.Selection) {
someVal := reflect.New(attrV.Type().Elem().Elem())
UnmarshalHTML(someVal.Interface(), innerSel, nil)
attrV.Set(reflect.Append(attrV, someVal))
})
case reflect.Struct:
s.Find(selector).Each(func(_ int, innerSel *goquery.Selection) {
someVal := reflect.New(attrV.Type().Elem())
UnmarshalHTML(someVal.Interface(), innerSel, nil)
attrV.Set(reflect.Append(attrV, reflect.Indirect(someVal)))
})
default:
return errors.New("Invalid slice type")
}
return nil
}
func getDOMValue(s *goquery.Selection, attr string) string {
if attr == "" {
return strings.TrimSpace(s.First().Text())
}
attrV, _ := s.Attr(attr)
return attrV
}

View File

@ -1,170 +0,0 @@
// Copyright 2018 Adam Tauber
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package colly
import (
"encoding/xml"
"strings"
"github.com/antchfx/htmlquery"
"github.com/antchfx/xmlquery"
"golang.org/x/net/html"
)
// XMLElement is the representation of a XML tag.
type XMLElement struct {
// Name is the name of the tag
Name string
Text string
attributes interface{}
// Request is the request object of the element's HTML document
Request *Request
// Response is the Response object of the element's HTML document
Response *Response
// DOM is the DOM object of the page. DOM is relative
// to the current XMLElement and is either a html.Node or xmlquery.Node
// based on how the XMLElement was created.
DOM interface{}
isHTML bool
}
// NewXMLElementFromHTMLNode creates a XMLElement from a html.Node.
func NewXMLElementFromHTMLNode(resp *Response, s *html.Node) *XMLElement {
return &XMLElement{
Name: s.Data,
Request: resp.Request,
Response: resp,
Text: htmlquery.InnerText(s),
DOM: s,
attributes: s.Attr,
isHTML: true,
}
}
// NewXMLElementFromXMLNode creates a XMLElement from a xmlquery.Node.
func NewXMLElementFromXMLNode(resp *Response, s *xmlquery.Node) *XMLElement {
return &XMLElement{
Name: s.Data,
Request: resp.Request,
Response: resp,
Text: s.InnerText(),
DOM: s,
attributes: s.Attr,
isHTML: false,
}
}
// Attr returns the selected attribute of a HTMLElement or empty string
// if no attribute found
func (h *XMLElement) Attr(k string) string {
if h.isHTML {
for _, a := range h.attributes.([]html.Attribute) {
if a.Key == k {
return a.Val
}
}
} else {
for _, a := range h.attributes.([]xml.Attr) {
if a.Name.Local == k {
return a.Value
}
}
}
return ""
}
// ChildText returns the concatenated and stripped text content of the matching
// elements.
func (h *XMLElement) ChildText(xpathQuery string) string {
if h.isHTML {
child := htmlquery.FindOne(h.DOM.(*html.Node), xpathQuery)
if child == nil {
return ""
}
return strings.TrimSpace(htmlquery.InnerText(child))
}
child := xmlquery.FindOne(h.DOM.(*xmlquery.Node), xpathQuery)
if child == nil {
return ""
}
return strings.TrimSpace(child.InnerText())
}
// ChildAttr returns the stripped text content of the first matching
// element's attribute.
func (h *XMLElement) ChildAttr(xpathQuery, attrName string) string {
if h.isHTML {
child := htmlquery.FindOne(h.DOM.(*html.Node), xpathQuery)
if child != nil {
for _, attr := range child.Attr {
if attr.Key == attrName {
return strings.TrimSpace(attr.Val)
}
}
}
} else {
child := xmlquery.FindOne(h.DOM.(*xmlquery.Node), xpathQuery)
if child != nil {
for _, attr := range child.Attr {
if attr.Name.Local == attrName {
return strings.TrimSpace(attr.Value)
}
}
}
}
return ""
}
// ChildAttrs returns the stripped text content of all the matching
// element's attributes.
func (h *XMLElement) ChildAttrs(xpathQuery, attrName string) []string {
var res []string
if h.isHTML {
for _, child := range htmlquery.Find(h.DOM.(*html.Node), xpathQuery) {
for _, attr := range child.Attr {
if attr.Key == attrName {
res = append(res, strings.TrimSpace(attr.Val))
}
}
}
} else {
xmlquery.FindEach(h.DOM.(*xmlquery.Node), xpathQuery, func(i int, child *xmlquery.Node) {
for _, attr := range child.Attr {
if attr.Name.Local == attrName {
res = append(res, strings.TrimSpace(attr.Value))
}
}
})
}
return res
}
// ChildTexts returns an array of strings corresponding to child elements that match the xpath query.
// Each item in the array is the stripped text content of the corresponding matching child element.
func (h *XMLElement) ChildTexts(xpathQuery string) []string {
texts := make([]string, 0)
if h.isHTML {
for _, child := range htmlquery.Find(h.DOM.(*html.Node), xpathQuery) {
texts = append(texts, strings.TrimSpace(htmlquery.InnerText(child)))
}
} else {
xmlquery.FindEach(h.DOM.(*xmlquery.Node), xpathQuery, func(i int, child *xmlquery.Node) {
texts = append(texts, strings.TrimSpace(child.InnerText()))
})
}
return texts
}

View File

@ -1,16 +0,0 @@
language: go
sudo: false
matrix:
include:
- go: 1.8
- go: 1.9
- go: "1.10"
- go: 1.x
- go: tip
allow_failures:
- go: tip
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d -s .)
- go vet .
- go test -v -race ./...

View File

@ -1,29 +0,0 @@
# This is the official list of gorilla/feeds authors for copyright purposes.
# Please keep the list sorted.
Dmitry Chestnykh <dmitry@codingrobots.com>
Eddie Scholtz <eascholtz@gmail.com>
Gabriel Simmer <bladesimmer@gmail.com>
Google LLC (https://opensource.google.com/)
honky <honky@defendtheplanet.net>
James Gregory <james@jagregory.com>
Jason Hall <imjasonh@gmail.com>
Jason Moiron <jmoiron@jmoiron.net>
Kamil Kisiel <kamil@kamilkisiel.net>
Kevin Stock <kevinstock@tantalic.com>
Markus Zimmermann <markus.zimmermann@nethead.at>
Matt Silverlock <matt@eatsleeprepeat.net>
Matthew Dawson <matthew@mjdsystems.ca>
Milan Aleksic <milanaleksic@gmail.com>
Milan Aleksić <milanaleksic@gmail.com>
nlimpid <jshuangzl@gmail.com>
Paul Petring <paul@defendtheplanet.net>
Sean Enck <enckse@users.noreply.github.com>
Sue Spence <virtuallysue@gmail.com>
Supermighty <ukiah@faction.com>
Toru Fukui <fukuimone@gmail.com>
Vabd <vabd@anon.acme>
Volker <lists.volker@gmail.com>
ZhiFeng Hu <hufeng1987@gmail.com>
weberc2 <weberc2@gmail.com>

View File

@ -1,16 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "feeds",
srcs = [
"atom.go",
"doc.go",
"feed.go",
"json.go",
"rss.go",
"uuid.go",
],
importmap = "peridot.resf.org/vendor/github.com/gorilla/feeds",
importpath = "github.com/gorilla/feeds",
visibility = ["//visibility:public"],
)

View File

@ -1,22 +0,0 @@
Copyright (c) 2013-2018 The Gorilla Feeds Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,185 +0,0 @@
## gorilla/feeds
[![GoDoc](https://godoc.org/github.com/gorilla/feeds?status.svg)](https://godoc.org/github.com/gorilla/feeds)
[![Build Status](https://travis-ci.org/gorilla/feeds.svg?branch=master)](https://travis-ci.org/gorilla/feeds)
feeds is a web feed generator library for generating RSS, Atom and JSON feeds from Go
applications.
### Goals
* Provide a simple interface to create both Atom & RSS 2.0 feeds
* Full support for [Atom][atom], [RSS 2.0][rss], and [JSON Feed Version 1][jsonfeed] spec elements
* Ability to modify particulars for each spec
[atom]: https://tools.ietf.org/html/rfc4287
[rss]: http://www.rssboard.org/rss-specification
[jsonfeed]: https://jsonfeed.org/version/1
### Usage
```go
package main
import (
"fmt"
"log"
"time"
"github.com/gorilla/feeds"
)
func main() {
now := time.Now()
feed := &feeds.Feed{
Title: "jmoiron.net blog",
Link: &feeds.Link{Href: "http://jmoiron.net/blog"},
Description: "discussion about tech, footie, photos",
Author: &feeds.Author{Name: "Jason Moiron", Email: "jmoiron@jmoiron.net"},
Created: now,
}
feed.Items = []*feeds.Item{
&feeds.Item{
Title: "Limiting Concurrency in Go",
Link: &feeds.Link{Href: "http://jmoiron.net/blog/limiting-concurrency-in-go/"},
Description: "A discussion on controlled parallelism in golang",
Author: &feeds.Author{Name: "Jason Moiron", Email: "jmoiron@jmoiron.net"},
Created: now,
},
&feeds.Item{
Title: "Logic-less Template Redux",
Link: &feeds.Link{Href: "http://jmoiron.net/blog/logicless-template-redux/"},
Description: "More thoughts on logicless templates",
Created: now,
},
&feeds.Item{
Title: "Idiomatic Code Reuse in Go",
Link: &feeds.Link{Href: "http://jmoiron.net/blog/idiomatic-code-reuse-in-go/"},
Description: "How to use interfaces <em>effectively</em>",
Created: now,
},
}
atom, err := feed.ToAtom()
if err != nil {
log.Fatal(err)
}
rss, err := feed.ToRss()
if err != nil {
log.Fatal(err)
}
json, err := feed.ToJSON()
if err != nil {
log.Fatal(err)
}
fmt.Println(atom, "\n", rss, "\n", json)
}
```
Outputs:
```xml
<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title>jmoiron.net blog</title>
<link href="http://jmoiron.net/blog"></link>
<id>http://jmoiron.net/blog</id>
<updated>2013-01-16T03:26:01-05:00</updated>
<summary>discussion about tech, footie, photos</summary>
<entry>
<title>Limiting Concurrency in Go</title>
<link href="http://jmoiron.net/blog/limiting-concurrency-in-go/"></link>
<updated>2013-01-16T03:26:01-05:00</updated>
<id>tag:jmoiron.net,2013-01-16:/blog/limiting-concurrency-in-go/</id>
<summary type="html">A discussion on controlled parallelism in golang</summary>
<author>
<name>Jason Moiron</name>
<email>jmoiron@jmoiron.net</email>
</author>
</entry>
<entry>
<title>Logic-less Template Redux</title>
<link href="http://jmoiron.net/blog/logicless-template-redux/"></link>
<updated>2013-01-16T03:26:01-05:00</updated>
<id>tag:jmoiron.net,2013-01-16:/blog/logicless-template-redux/</id>
<summary type="html">More thoughts on logicless templates</summary>
<author></author>
</entry>
<entry>
<title>Idiomatic Code Reuse in Go</title>
<link href="http://jmoiron.net/blog/idiomatic-code-reuse-in-go/"></link>
<updated>2013-01-16T03:26:01-05:00</updated>
<id>tag:jmoiron.net,2013-01-16:/blog/idiomatic-code-reuse-in-go/</id>
<summary type="html">How to use interfaces &lt;em&gt;effectively&lt;/em&gt;</summary>
<author></author>
</entry>
</feed>
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0">
<channel>
<title>jmoiron.net blog</title>
<link>http://jmoiron.net/blog</link>
<description>discussion about tech, footie, photos</description>
<managingEditor>jmoiron@jmoiron.net (Jason Moiron)</managingEditor>
<pubDate>2013-01-16T03:22:24-05:00</pubDate>
<item>
<title>Limiting Concurrency in Go</title>
<link>http://jmoiron.net/blog/limiting-concurrency-in-go/</link>
<description>A discussion on controlled parallelism in golang</description>
<pubDate>2013-01-16T03:22:24-05:00</pubDate>
</item>
<item>
<title>Logic-less Template Redux</title>
<link>http://jmoiron.net/blog/logicless-template-redux/</link>
<description>More thoughts on logicless templates</description>
<pubDate>2013-01-16T03:22:24-05:00</pubDate>
</item>
<item>
<title>Idiomatic Code Reuse in Go</title>
<link>http://jmoiron.net/blog/idiomatic-code-reuse-in-go/</link>
<description>How to use interfaces &lt;em&gt;effectively&lt;/em&gt;</description>
<pubDate>2013-01-16T03:22:24-05:00</pubDate>
</item>
</channel>
</rss>
{
"version": "https://jsonfeed.org/version/1",
"title": "jmoiron.net blog",
"home_page_url": "http://jmoiron.net/blog",
"description": "discussion about tech, footie, photos",
"author": {
"name": "Jason Moiron"
},
"items": [
{
"id": "",
"url": "http://jmoiron.net/blog/limiting-concurrency-in-go/",
"title": "Limiting Concurrency in Go",
"summary": "A discussion on controlled parallelism in golang",
"date_published": "2013-01-16T03:22:24.530817846-05:00",
"author": {
"name": "Jason Moiron"
}
},
{
"id": "",
"url": "http://jmoiron.net/blog/logicless-template-redux/",
"title": "Logic-less Template Redux",
"summary": "More thoughts on logicless templates",
"date_published": "2013-01-16T03:22:24.530817846-05:00"
},
{
"id": "",
"url": "http://jmoiron.net/blog/idiomatic-code-reuse-in-go/",
"title": "Idiomatic Code Reuse in Go",
"summary": "How to use interfaces \u003cem\u003eeffectively\u003c/em\u003e",
"date_published": "2013-01-16T03:22:24.530817846-05:00"
}
]
}
```

View File

@ -1,169 +0,0 @@
package feeds
import (
"encoding/xml"
"fmt"
"net/url"
"time"
)
// Generates Atom feed as XML
const ns = "http://www.w3.org/2005/Atom"
type AtomPerson struct {
Name string `xml:"name,omitempty"`
Uri string `xml:"uri,omitempty"`
Email string `xml:"email,omitempty"`
}
type AtomSummary struct {
XMLName xml.Name `xml:"summary"`
Content string `xml:",chardata"`
Type string `xml:"type,attr"`
}
type AtomContent struct {
XMLName xml.Name `xml:"content"`
Content string `xml:",chardata"`
Type string `xml:"type,attr"`
}
type AtomAuthor struct {
XMLName xml.Name `xml:"author"`
AtomPerson
}
type AtomContributor struct {
XMLName xml.Name `xml:"contributor"`
AtomPerson
}
type AtomEntry struct {
XMLName xml.Name `xml:"entry"`
Xmlns string `xml:"xmlns,attr,omitempty"`
Title string `xml:"title"` // required
Updated string `xml:"updated"` // required
Id string `xml:"id"` // required
Category string `xml:"category,omitempty"`
Content *AtomContent
Rights string `xml:"rights,omitempty"`
Source string `xml:"source,omitempty"`
Published string `xml:"published,omitempty"`
Contributor *AtomContributor
Links []AtomLink // required if no child 'content' elements
Summary *AtomSummary // required if content has src or content is base64
Author *AtomAuthor // required if feed lacks an author
}
// Multiple links with different rel can coexist
type AtomLink struct {
//Atom 1.0 <link rel="enclosure" type="audio/mpeg" title="MP3" href="http://www.example.org/myaudiofile.mp3" length="1234" />
XMLName xml.Name `xml:"link"`
Href string `xml:"href,attr"`
Rel string `xml:"rel,attr,omitempty"`
Type string `xml:"type,attr,omitempty"`
Length string `xml:"length,attr,omitempty"`
}
type AtomFeed struct {
XMLName xml.Name `xml:"feed"`
Xmlns string `xml:"xmlns,attr"`
Title string `xml:"title"` // required
Id string `xml:"id"` // required
Updated string `xml:"updated"` // required
Category string `xml:"category,omitempty"`
Icon string `xml:"icon,omitempty"`
Logo string `xml:"logo,omitempty"`
Rights string `xml:"rights,omitempty"` // copyright used
Subtitle string `xml:"subtitle,omitempty"`
Link *AtomLink
Author *AtomAuthor `xml:"author,omitempty"`
Contributor *AtomContributor
Entries []*AtomEntry `xml:"entry"`
}
type Atom struct {
*Feed
}
func newAtomEntry(i *Item) *AtomEntry {
id := i.Id
// assume the description is html
s := &AtomSummary{Content: i.Description, Type: "html"}
if len(id) == 0 {
// if there's no id set, try to create one, either from data or just a uuid
if len(i.Link.Href) > 0 && (!i.Created.IsZero() || !i.Updated.IsZero()) {
dateStr := anyTimeFormat("2006-01-02", i.Updated, i.Created)
host, path := i.Link.Href, "/invalid.html"
if url, err := url.Parse(i.Link.Href); err == nil {
host, path = url.Host, url.Path
}
id = fmt.Sprintf("tag:%s,%s:%s", host, dateStr, path)
} else {
id = "urn:uuid:" + NewUUID().String()
}
}
var name, email string
if i.Author != nil {
name, email = i.Author.Name, i.Author.Email
}
link_rel := i.Link.Rel
if link_rel == "" {
link_rel = "alternate"
}
x := &AtomEntry{
Title: i.Title,
Links: []AtomLink{{Href: i.Link.Href, Rel: link_rel, Type: i.Link.Type}},
Id: id,
Updated: anyTimeFormat(time.RFC3339, i.Updated, i.Created),
Summary: s,
}
// if there's a content, assume it's html
if len(i.Content) > 0 {
x.Content = &AtomContent{Content: i.Content, Type: "html"}
}
if i.Enclosure != nil && link_rel != "enclosure" {
x.Links = append(x.Links, AtomLink{Href: i.Enclosure.Url, Rel: "enclosure", Type: i.Enclosure.Type, Length: i.Enclosure.Length})
}
if len(name) > 0 || len(email) > 0 {
x.Author = &AtomAuthor{AtomPerson: AtomPerson{Name: name, Email: email}}
}
return x
}
// create a new AtomFeed with a generic Feed struct's data
func (a *Atom) AtomFeed() *AtomFeed {
updated := anyTimeFormat(time.RFC3339, a.Updated, a.Created)
feed := &AtomFeed{
Xmlns: ns,
Title: a.Title,
Link: &AtomLink{Href: a.Link.Href, Rel: a.Link.Rel},
Subtitle: a.Description,
Id: a.Link.Href,
Updated: updated,
Rights: a.Copyright,
}
if a.Author != nil {
feed.Author = &AtomAuthor{AtomPerson: AtomPerson{Name: a.Author.Name, Email: a.Author.Email}}
}
for _, e := range a.Items {
feed.Entries = append(feed.Entries, newAtomEntry(e))
}
return feed
}
// FeedXml returns an XML-Ready object for an Atom object
func (a *Atom) FeedXml() interface{} {
return a.AtomFeed()
}
// FeedXml returns an XML-ready object for an AtomFeed object
func (a *AtomFeed) FeedXml() interface{} {
return a
}

View File

@ -1,73 +0,0 @@
/*
Syndication (feed) generator library for golang.
Installing
go get github.com/gorilla/feeds
Feeds provides a simple, generic Feed interface with a generic Item object as well as RSS, Atom and JSON Feed specific RssFeed, AtomFeed and JSONFeed objects which allow access to all of each spec's defined elements.
Examples
Create a Feed and some Items in that feed using the generic interfaces:
import (
"time"
. "github.com/gorilla/feeds"
)
now = time.Now()
feed := &Feed{
Title: "jmoiron.net blog",
Link: &Link{Href: "http://jmoiron.net/blog"},
Description: "discussion about tech, footie, photos",
Author: &Author{Name: "Jason Moiron", Email: "jmoiron@jmoiron.net"},
Created: now,
Copyright: "This work is copyright © Benjamin Button",
}
feed.Items = []*Item{
&Item{
Title: "Limiting Concurrency in Go",
Link: &Link{Href: "http://jmoiron.net/blog/limiting-concurrency-in-go/"},
Description: "A discussion on controlled parallelism in golang",
Author: &Author{Name: "Jason Moiron", Email: "jmoiron@jmoiron.net"},
Created: now,
},
&Item{
Title: "Logic-less Template Redux",
Link: &Link{Href: "http://jmoiron.net/blog/logicless-template-redux/"},
Description: "More thoughts on logicless templates",
Created: now,
},
&Item{
Title: "Idiomatic Code Reuse in Go",
Link: &Link{Href: "http://jmoiron.net/blog/idiomatic-code-reuse-in-go/"},
Description: "How to use interfaces <em>effectively</em>",
Created: now,
},
}
From here, you can output Atom, RSS, or JSON Feed versions of this feed easily
atom, err := feed.ToAtom()
rss, err := feed.ToRss()
json, err := feed.ToJSON()
You can also get access to the underlying objects that feeds uses to export its XML
atomFeed := (&Atom{Feed: feed}).AtomFeed()
rssFeed := (&Rss{Feed: feed}).RssFeed()
jsonFeed := (&JSON{Feed: feed}).JSONFeed()
From here, you can modify or add each syndication's specific fields before outputting
atomFeed.Subtitle = "plays the blues"
atom, err := ToXML(atomFeed)
rssFeed.Generator = "gorilla/feeds v1.0 (github.com/gorilla/feeds)"
rss, err := ToXML(rssFeed)
jsonFeed.NextUrl = "https://www.example.com/feed.json?page=2"
json, err := jsonFeed.ToJSON()
*/
package feeds

View File

@ -1,145 +0,0 @@
package feeds
import (
"encoding/json"
"encoding/xml"
"io"
"sort"
"time"
)
type Link struct {
Href, Rel, Type, Length string
}
type Author struct {
Name, Email string
}
type Image struct {
Url, Title, Link string
Width, Height int
}
type Enclosure struct {
Url, Length, Type string
}
type Item struct {
Title string
Link *Link
Source *Link
Author *Author
Description string // used as description in rss, summary in atom
Id string // used as guid in rss, id in atom
Updated time.Time
Created time.Time
Enclosure *Enclosure
Content string
}
type Feed struct {
Title string
Link *Link
Description string
Author *Author
Updated time.Time
Created time.Time
Id string
Subtitle string
Items []*Item
Copyright string
Image *Image
}
// add a new Item to a Feed
func (f *Feed) Add(item *Item) {
f.Items = append(f.Items, item)
}
// returns the first non-zero time formatted as a string or ""
func anyTimeFormat(format string, times ...time.Time) string {
for _, t := range times {
if !t.IsZero() {
return t.Format(format)
}
}
return ""
}
// interface used by ToXML to get a object suitable for exporting XML.
type XmlFeed interface {
FeedXml() interface{}
}
// turn a feed object (either a Feed, AtomFeed, or RssFeed) into xml
// returns an error if xml marshaling fails
func ToXML(feed XmlFeed) (string, error) {
x := feed.FeedXml()
data, err := xml.MarshalIndent(x, "", " ")
if err != nil {
return "", err
}
// strip empty line from default xml header
s := xml.Header[:len(xml.Header)-1] + string(data)
return s, nil
}
// WriteXML writes a feed object (either a Feed, AtomFeed, or RssFeed) as XML into
// the writer. Returns an error if XML marshaling fails.
func WriteXML(feed XmlFeed, w io.Writer) error {
x := feed.FeedXml()
// write default xml header, without the newline
if _, err := w.Write([]byte(xml.Header[:len(xml.Header)-1])); err != nil {
return err
}
e := xml.NewEncoder(w)
e.Indent("", " ")
return e.Encode(x)
}
// creates an Atom representation of this feed
func (f *Feed) ToAtom() (string, error) {
a := &Atom{f}
return ToXML(a)
}
// WriteAtom writes an Atom representation of this feed to the writer.
func (f *Feed) WriteAtom(w io.Writer) error {
return WriteXML(&Atom{f}, w)
}
// creates an Rss representation of this feed
func (f *Feed) ToRss() (string, error) {
r := &Rss{f}
return ToXML(r)
}
// WriteRss writes an RSS representation of this feed to the writer.
func (f *Feed) WriteRss(w io.Writer) error {
return WriteXML(&Rss{f}, w)
}
// ToJSON creates a JSON Feed representation of this feed
func (f *Feed) ToJSON() (string, error) {
j := &JSON{f}
return j.ToJSON()
}
// WriteJSON writes an JSON representation of this feed to the writer.
func (f *Feed) WriteJSON(w io.Writer) error {
j := &JSON{f}
feed := j.JSONFeed()
e := json.NewEncoder(w)
e.SetIndent("", " ")
return e.Encode(feed)
}
// Sort sorts the Items in the feed with the given less function.
func (f *Feed) Sort(less func(a, b *Item) bool) {
lessFunc := func(i, j int) bool {
return less(f.Items[i], f.Items[j])
}
sort.SliceStable(f.Items, lessFunc)
}

View File

@ -1,183 +0,0 @@
package feeds
import (
"encoding/json"
"strings"
"time"
)
const jsonFeedVersion = "https://jsonfeed.org/version/1"
// JSONAuthor represents the author of the feed or of an individual item
// in the feed
type JSONAuthor struct {
Name string `json:"name,omitempty"`
Url string `json:"url,omitempty"`
Avatar string `json:"avatar,omitempty"`
}
// JSONAttachment represents a related resource. Podcasts, for instance, would
// include an attachment thats an audio or video file.
type JSONAttachment struct {
Url string `json:"url,omitempty"`
MIMEType string `json:"mime_type,omitempty"`
Title string `json:"title,omitempty"`
Size int32 `json:"size,omitempty"`
Duration time.Duration `json:"duration_in_seconds,omitempty"`
}
// MarshalJSON implements the json.Marshaler interface.
// The Duration field is marshaled in seconds, all other fields are marshaled
// based upon the definitions in struct tags.
func (a *JSONAttachment) MarshalJSON() ([]byte, error) {
type EmbeddedJSONAttachment JSONAttachment
return json.Marshal(&struct {
Duration float64 `json:"duration_in_seconds,omitempty"`
*EmbeddedJSONAttachment
}{
EmbeddedJSONAttachment: (*EmbeddedJSONAttachment)(a),
Duration: a.Duration.Seconds(),
})
}
// UnmarshalJSON implements the json.Unmarshaler interface.
// The Duration field is expected to be in seconds, all other field types
// match the struct definition.
func (a *JSONAttachment) UnmarshalJSON(data []byte) error {
type EmbeddedJSONAttachment JSONAttachment
var raw struct {
Duration float64 `json:"duration_in_seconds,omitempty"`
*EmbeddedJSONAttachment
}
raw.EmbeddedJSONAttachment = (*EmbeddedJSONAttachment)(a)
err := json.Unmarshal(data, &raw)
if err != nil {
return err
}
if raw.Duration > 0 {
nsec := int64(raw.Duration * float64(time.Second))
raw.EmbeddedJSONAttachment.Duration = time.Duration(nsec)
}
return nil
}
// JSONItem represents a single entry/post for the feed.
type JSONItem struct {
Id string `json:"id"`
Url string `json:"url,omitempty"`
ExternalUrl string `json:"external_url,omitempty"`
Title string `json:"title,omitempty"`
ContentHTML string `json:"content_html,omitempty"`
ContentText string `json:"content_text,omitempty"`
Summary string `json:"summary,omitempty"`
Image string `json:"image,omitempty"`
BannerImage string `json:"banner_,omitempty"`
PublishedDate *time.Time `json:"date_published,omitempty"`
ModifiedDate *time.Time `json:"date_modified,omitempty"`
Author *JSONAuthor `json:"author,omitempty"`
Tags []string `json:"tags,omitempty"`
Attachments []JSONAttachment `json:"attachments,omitempty"`
}
// JSONHub describes an endpoint that can be used to subscribe to real-time
// notifications from the publisher of this feed.
type JSONHub struct {
Type string `json:"type"`
Url string `json:"url"`
}
// JSONFeed represents a syndication feed in the JSON Feed Version 1 format.
// Matching the specification found here: https://jsonfeed.org/version/1.
type JSONFeed struct {
Version string `json:"version"`
Title string `json:"title"`
HomePageUrl string `json:"home_page_url,omitempty"`
FeedUrl string `json:"feed_url,omitempty"`
Description string `json:"description,omitempty"`
UserComment string `json:"user_comment,omitempty"`
NextUrl string `json:"next_url,omitempty"`
Icon string `json:"icon,omitempty"`
Favicon string `json:"favicon,omitempty"`
Author *JSONAuthor `json:"author,omitempty"`
Expired *bool `json:"expired,omitempty"`
Hubs []*JSONItem `json:"hubs,omitempty"`
Items []*JSONItem `json:"items,omitempty"`
}
// JSON is used to convert a generic Feed to a JSONFeed.
type JSON struct {
*Feed
}
// ToJSON encodes f into a JSON string. Returns an error if marshalling fails.
func (f *JSON) ToJSON() (string, error) {
return f.JSONFeed().ToJSON()
}
// ToJSON encodes f into a JSON string. Returns an error if marshalling fails.
func (f *JSONFeed) ToJSON() (string, error) {
data, err := json.MarshalIndent(f, "", " ")
if err != nil {
return "", err
}
return string(data), nil
}
// JSONFeed creates a new JSONFeed with a generic Feed struct's data.
func (f *JSON) JSONFeed() *JSONFeed {
feed := &JSONFeed{
Version: jsonFeedVersion,
Title: f.Title,
Description: f.Description,
}
if f.Link != nil {
feed.HomePageUrl = f.Link.Href
}
if f.Author != nil {
feed.Author = &JSONAuthor{
Name: f.Author.Name,
}
}
for _, e := range f.Items {
feed.Items = append(feed.Items, newJSONItem(e))
}
return feed
}
func newJSONItem(i *Item) *JSONItem {
item := &JSONItem{
Id: i.Id,
Title: i.Title,
Summary: i.Description,
ContentHTML: i.Content,
}
if i.Link != nil {
item.Url = i.Link.Href
}
if i.Source != nil {
item.ExternalUrl = i.Source.Href
}
if i.Author != nil {
item.Author = &JSONAuthor{
Name: i.Author.Name,
}
}
if !i.Created.IsZero() {
item.PublishedDate = &i.Created
}
if !i.Updated.IsZero() {
item.ModifiedDate = &i.Updated
}
if i.Enclosure != nil && strings.HasPrefix(i.Enclosure.Type, "image/") {
item.Image = i.Enclosure.Url
}
return item
}

Some files were not shown because too many files have changed in this diff Show More