1
0
Fork 0
mirror of https://github.com/miniflux/v2.git synced 2025-08-11 17:51:01 +00:00

refactor(filter): parse and merge filters only once per refresh

This commit is contained in:
Frédéric Guillot 2025-07-30 21:00:57 -07:00
parent bfd8cb3d22
commit 0c3e251884
3 changed files with 1539 additions and 279 deletions

View file

@ -37,7 +37,20 @@ func ProcessFeedEntries(store *storage.Storage, feed *model.Feed, userID int64,
parsedFeedURL, _ := url.Parse(feed.FeedURL)
parsedSiteURL, _ := url.Parse(feed.SiteURL)
// Process older entries first
blockRules := filter.ParseRules(user.BlockFilterEntryRules, feed.BlockFilterEntryRules)
allowRules := filter.ParseRules(user.KeepFilterEntryRules, feed.KeepFilterEntryRules)
slog.Debug("Filter rules",
slog.String("user_block_filter_rules", user.BlockFilterEntryRules),
slog.String("feed_block_filter_rules", feed.BlockFilterEntryRules),
slog.String("user_keep_filter_rules", user.KeepFilterEntryRules),
slog.String("feed_keep_filter_rules", feed.KeepFilterEntryRules),
slog.Any("block_rules", blockRules),
slog.Any("allow_rules", allowRules),
slog.Int64("user_id", user.ID),
slog.Int64("feed_id", feed.ID),
)
// Processing older entries first ensures that their creation timestamp is lower than newer entries.
for _, entry := range slices.Backward(feed.Entries) {
slog.Debug("Processing entry",
slog.Int64("user_id", user.ID),
@ -48,7 +61,15 @@ func ProcessFeedEntries(store *storage.Storage, feed *model.Feed, userID int64,
slog.String("feed_url", feed.FeedURL),
)
if filter.IsBlockedEntry(feed, entry, user) || !filter.IsAllowedEntry(feed, entry, user) {
if filter.IsBlockedEntry(blockRules, allowRules, feed, entry) {
slog.Debug("Entry is blocked by filter rules",
slog.Int64("user_id", user.ID),
slog.String("entry_url", entry.URL),
slog.String("entry_hash", entry.Hash),
slog.String("entry_title", entry.Title),
slog.Int64("feed_id", feed.ID),
slog.String("feed_url", feed.FeedURL),
)
continue
}