1
0
Fork 0
mirror of https://github.com/miniflux/v2.git synced 2025-08-01 17:38:37 +00:00

Make sure the scraper parse only HTML documents

This commit is contained in:
Frédéric Guillot 2018-01-02 18:32:01 -08:00
parent aae3d75490
commit 3c3f397bf5

View file

@ -6,6 +6,7 @@ package scraper
import (
"errors"
"fmt"
"io"
"strings"
@ -16,7 +17,7 @@ import (
"github.com/miniflux/miniflux/url"
)
// Fetch download a web page a returns relevant contents.
// Fetch downloads a web page a returns relevant contents.
func Fetch(websiteURL, rules string) (string, error) {
client := http.NewClient(websiteURL)
response, err := client.Get()
@ -25,7 +26,11 @@ func Fetch(websiteURL, rules string) (string, error) {
}
if response.HasServerFailure() {
return "", errors.New("unable to download web page")
return "", errors.New("scraper: unable to download web page")
}
if !strings.Contains(response.ContentType, "text/html") {
return "", fmt.Errorf("scraper: this resource is not a HTML document (%s)", response.ContentType)
}
page, err := response.NormalizeBodyEncoding()
@ -33,7 +38,7 @@ func Fetch(websiteURL, rules string) (string, error) {
return "", err
}
// The entry URL could be a redirect somewhere else.
// The entry URL could redirect somewhere else.
websiteURL = response.EffectiveURL
if rules == "" {