1
0
Fork 0
mirror of https://github.com/miniflux/v2.git synced 2025-06-27 16:36:00 +00:00

Do not crawl existing entry URLs

This commit is contained in:
Frédéric Guillot 2018-01-19 18:43:27 -08:00
parent 09785df07f
commit 3b62f904d6
3 changed files with 23 additions and 8 deletions

View file

@ -10,10 +10,13 @@ import (
"github.com/miniflux/miniflux/reader/rewrite"
"github.com/miniflux/miniflux/reader/sanitizer"
"github.com/miniflux/miniflux/reader/scraper"
"github.com/miniflux/miniflux/storage"
)
// FeedProcessor handles the processing of feed contents.
type FeedProcessor struct {
userID int64
store *storage.Storage
feed *model.Feed
scraperRules string
rewriteRules string
@ -39,11 +42,15 @@ func (f *FeedProcessor) WithRewriteRules(rules string) {
func (f *FeedProcessor) Process() {
for _, entry := range f.feed.Entries {
if f.crawler {
content, err := scraper.Fetch(entry.URL, f.scraperRules)
if err != nil {
logger.Error("[FeedProcessor] %v", err)
if f.store.EntryURLExists(f.userID, entry.URL) {
logger.Debug(`[FeedProcessor] Do not crawl existing entry URL: "%s"`, entry.URL)
} else {
entry.Content = content
content, err := scraper.Fetch(entry.URL, f.scraperRules)
if err != nil {
logger.Error("[FeedProcessor] %v", err)
} else {
entry.Content = content
}
}
}
@ -53,6 +60,6 @@ func (f *FeedProcessor) Process() {
}
// NewFeedProcessor returns a new FeedProcessor.
func NewFeedProcessor(feed *model.Feed) *FeedProcessor {
return &FeedProcessor{feed: feed, crawler: false}
func NewFeedProcessor(userID int64, store *storage.Storage, feed *model.Feed) *FeedProcessor {
return &FeedProcessor{userID: userID, store: store, feed: feed, crawler: false}
}