mirror of
https://github.com/miniflux/v2.git
synced 2025-08-01 17:38:37 +00:00
Make sure the scraper parse only HTML documents
This commit is contained in:
parent
aae3d75490
commit
3c3f397bf5
1 changed files with 8 additions and 3 deletions
|
@ -6,6 +6,7 @@ package scraper
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
|
@ -16,7 +17,7 @@ import (
|
|||
"github.com/miniflux/miniflux/url"
|
||||
)
|
||||
|
||||
// Fetch download a web page a returns relevant contents.
|
||||
// Fetch downloads a web page a returns relevant contents.
|
||||
func Fetch(websiteURL, rules string) (string, error) {
|
||||
client := http.NewClient(websiteURL)
|
||||
response, err := client.Get()
|
||||
|
@ -25,7 +26,11 @@ func Fetch(websiteURL, rules string) (string, error) {
|
|||
}
|
||||
|
||||
if response.HasServerFailure() {
|
||||
return "", errors.New("unable to download web page")
|
||||
return "", errors.New("scraper: unable to download web page")
|
||||
}
|
||||
|
||||
if !strings.Contains(response.ContentType, "text/html") {
|
||||
return "", fmt.Errorf("scraper: this resource is not a HTML document (%s)", response.ContentType)
|
||||
}
|
||||
|
||||
page, err := response.NormalizeBodyEncoding()
|
||||
|
@ -33,7 +38,7 @@ func Fetch(websiteURL, rules string) (string, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
// The entry URL could be a redirect somewhere else.
|
||||
// The entry URL could redirect somewhere else.
|
||||
websiteURL = response.EffectiveURL
|
||||
|
||||
if rules == "" {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue