mirror of
https://github.com/miniflux/v2.git
synced 2025-08-26 18:21:01 +00:00
refactor(reader): use time.Duration instead of minutes count
In general, duration is used as time unit representation. At some places when int is returned, there's no documentation which unit is used. So just convert to time.Duration ASAP.
This commit is contained in:
parent
03021af53c
commit
ed3bf59356
10 changed files with 144 additions and 104 deletions
|
@ -7,7 +7,9 @@ import (
|
|||
"bytes"
|
||||
"os"
|
||||
"reflect"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestLogFileDefaultValue(t *testing.T) {
|
||||
|
@ -887,12 +889,22 @@ func TestSchedulerEntryFrequencyMaxInterval(t *testing.T) {
|
|||
t.Fatalf(`Parsing failure: %v`, err)
|
||||
}
|
||||
|
||||
expected := 30
|
||||
expected := 30 * time.Minute
|
||||
result := opts.SchedulerEntryFrequencyMaxInterval()
|
||||
|
||||
if result != expected {
|
||||
t.Fatalf(`Unexpected SCHEDULER_ENTRY_FREQUENCY_MAX_INTERVAL value, got %v instead of %v`, result, expected)
|
||||
}
|
||||
|
||||
sorted := opts.SortedOptions(false)
|
||||
i := slices.IndexFunc(sorted, func(opt *option) bool {
|
||||
return opt.Key == "SCHEDULER_ENTRY_FREQUENCY_MAX_INTERVAL"
|
||||
})
|
||||
|
||||
expectedSerialized := 30
|
||||
if got := sorted[i].Value; got != expectedSerialized {
|
||||
t.Fatalf(`Unexpected value in option output, got %q instead of %q`, got, expectedSerialized)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultSchedulerEntryFrequencyMinIntervalValue(t *testing.T) {
|
||||
|
@ -922,12 +934,22 @@ func TestSchedulerEntryFrequencyMinInterval(t *testing.T) {
|
|||
t.Fatalf(`Parsing failure: %v`, err)
|
||||
}
|
||||
|
||||
expected := 30
|
||||
expected := 30 * time.Minute
|
||||
result := opts.SchedulerEntryFrequencyMinInterval()
|
||||
|
||||
if result != expected {
|
||||
t.Fatalf(`Unexpected SCHEDULER_ENTRY_FREQUENCY_MIN_INTERVAL value, got %v instead of %v`, result, expected)
|
||||
}
|
||||
|
||||
sorted := opts.SortedOptions(false)
|
||||
i := slices.IndexFunc(sorted, func(opt *option) bool {
|
||||
return opt.Key == "SCHEDULER_ENTRY_FREQUENCY_MIN_INTERVAL"
|
||||
})
|
||||
|
||||
expectedSerialized := 30
|
||||
if got := sorted[i].Value; got != expectedSerialized {
|
||||
t.Fatalf(`Unexpected value in option output, got %q instead of %q`, got, expectedSerialized)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultSchedulerEntryFrequencyFactorValue(t *testing.T) {
|
||||
|
@ -992,12 +1014,22 @@ func TestSchedulerRoundRobin(t *testing.T) {
|
|||
t.Fatalf(`Parsing failure: %v`, err)
|
||||
}
|
||||
|
||||
expected := 15
|
||||
expected := 15 * time.Minute
|
||||
result := opts.SchedulerRoundRobinMinInterval()
|
||||
|
||||
if result != expected {
|
||||
t.Fatalf(`Unexpected SCHEDULER_ROUND_ROBIN_MIN_INTERVAL value, got %v instead of %v`, result, expected)
|
||||
}
|
||||
|
||||
sorted := opts.SortedOptions(false)
|
||||
i := slices.IndexFunc(sorted, func(opt *option) bool {
|
||||
return opt.Key == "SCHEDULER_ROUND_ROBIN_MIN_INTERVAL"
|
||||
})
|
||||
|
||||
expectedSerialized := 15
|
||||
if got := sorted[i].Value; got != expectedSerialized {
|
||||
t.Fatalf(`Unexpected value in option output, got %q instead of %q`, got, expectedSerialized)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultSchedulerRoundRobinMaxIntervalValue(t *testing.T) {
|
||||
|
@ -1027,12 +1059,22 @@ func TestSchedulerRoundRobinMaxInterval(t *testing.T) {
|
|||
t.Fatalf(`Parsing failure: %v`, err)
|
||||
}
|
||||
|
||||
expected := 150
|
||||
expected := 150 * time.Minute
|
||||
result := opts.SchedulerRoundRobinMaxInterval()
|
||||
|
||||
if result != expected {
|
||||
t.Fatalf(`Unexpected SCHEDULER_ROUND_ROBIN_MAX_INTERVAL value, got %v instead of %v`, result, expected)
|
||||
}
|
||||
|
||||
sorted := opts.SortedOptions(false)
|
||||
i := slices.IndexFunc(sorted, func(opt *option) bool {
|
||||
return opt.Key == "SCHEDULER_ROUND_ROBIN_MAX_INTERVAL"
|
||||
})
|
||||
|
||||
expectedSerialized := 150
|
||||
if got := sorted[i].Value; got != expectedSerialized {
|
||||
t.Fatalf(`Unexpected value in option output, got %q instead of %q`, got, expectedSerialized)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPollingParsingErrorLimit(t *testing.T) {
|
||||
|
|
|
@ -32,11 +32,11 @@ const (
|
|||
defaultForceRefreshInterval = 30
|
||||
defaultBatchSize = 100
|
||||
defaultPollingScheduler = "round_robin"
|
||||
defaultSchedulerEntryFrequencyMinInterval = 5
|
||||
defaultSchedulerEntryFrequencyMaxInterval = 24 * 60
|
||||
defaultSchedulerEntryFrequencyMinInterval = 5 * time.Minute
|
||||
defaultSchedulerEntryFrequencyMaxInterval = 24 * time.Hour
|
||||
defaultSchedulerEntryFrequencyFactor = 1
|
||||
defaultSchedulerRoundRobinMinInterval = 60
|
||||
defaultSchedulerRoundRobinMaxInterval = 1440
|
||||
defaultSchedulerRoundRobinMinInterval = 1 * time.Hour
|
||||
defaultSchedulerRoundRobinMaxInterval = 24 * time.Hour
|
||||
defaultPollingParsingErrorLimit = 3
|
||||
defaultRunMigrations = false
|
||||
defaultDatabaseURL = "user=postgres password=postgres dbname=miniflux2 sslmode=disable"
|
||||
|
@ -132,11 +132,11 @@ type options struct {
|
|||
cleanupRemoveSessionsDays int
|
||||
forceRefreshInterval int
|
||||
batchSize int
|
||||
schedulerEntryFrequencyMinInterval int
|
||||
schedulerEntryFrequencyMaxInterval int
|
||||
schedulerEntryFrequencyMinInterval time.Duration
|
||||
schedulerEntryFrequencyMaxInterval time.Duration
|
||||
schedulerEntryFrequencyFactor int
|
||||
schedulerRoundRobinMinInterval int
|
||||
schedulerRoundRobinMaxInterval int
|
||||
schedulerRoundRobinMinInterval time.Duration
|
||||
schedulerRoundRobinMaxInterval time.Duration
|
||||
pollingFrequency int
|
||||
pollingLimitPerHost int
|
||||
pollingParsingErrorLimit int
|
||||
|
@ -422,13 +422,13 @@ func (o *options) PollingScheduler() string {
|
|||
return o.pollingScheduler
|
||||
}
|
||||
|
||||
// SchedulerEntryFrequencyMaxInterval returns the maximum interval in minutes for the entry frequency scheduler.
|
||||
func (o *options) SchedulerEntryFrequencyMaxInterval() int {
|
||||
// SchedulerEntryFrequencyMaxInterval returns the maximum interval for the entry frequency scheduler.
|
||||
func (o *options) SchedulerEntryFrequencyMaxInterval() time.Duration {
|
||||
return o.schedulerEntryFrequencyMaxInterval
|
||||
}
|
||||
|
||||
// SchedulerEntryFrequencyMinInterval returns the minimum interval in minutes for the entry frequency scheduler.
|
||||
func (o *options) SchedulerEntryFrequencyMinInterval() int {
|
||||
// SchedulerEntryFrequencyMinInterval returns the minimum interval for the entry frequency scheduler.
|
||||
func (o *options) SchedulerEntryFrequencyMinInterval() time.Duration {
|
||||
return o.schedulerEntryFrequencyMinInterval
|
||||
}
|
||||
|
||||
|
@ -437,11 +437,11 @@ func (o *options) SchedulerEntryFrequencyFactor() int {
|
|||
return o.schedulerEntryFrequencyFactor
|
||||
}
|
||||
|
||||
func (o *options) SchedulerRoundRobinMinInterval() int {
|
||||
func (o *options) SchedulerRoundRobinMinInterval() time.Duration {
|
||||
return o.schedulerRoundRobinMinInterval
|
||||
}
|
||||
|
||||
func (o *options) SchedulerRoundRobinMaxInterval() int {
|
||||
func (o *options) SchedulerRoundRobinMaxInterval() time.Duration {
|
||||
return o.schedulerRoundRobinMaxInterval
|
||||
}
|
||||
|
||||
|
@ -781,11 +781,11 @@ func (o *options) SortedOptions(redactSecret bool) []*option {
|
|||
"MEDIA_PROXY_CUSTOM_URL": o.mediaProxyCustomURL,
|
||||
"ROOT_URL": o.rootURL,
|
||||
"RUN_MIGRATIONS": o.runMigrations,
|
||||
"SCHEDULER_ENTRY_FREQUENCY_MAX_INTERVAL": o.schedulerEntryFrequencyMaxInterval,
|
||||
"SCHEDULER_ENTRY_FREQUENCY_MIN_INTERVAL": o.schedulerEntryFrequencyMinInterval,
|
||||
"SCHEDULER_ENTRY_FREQUENCY_MAX_INTERVAL": int(o.schedulerEntryFrequencyMaxInterval.Minutes()),
|
||||
"SCHEDULER_ENTRY_FREQUENCY_MIN_INTERVAL": int(o.schedulerEntryFrequencyMinInterval.Minutes()),
|
||||
"SCHEDULER_ENTRY_FREQUENCY_FACTOR": o.schedulerEntryFrequencyFactor,
|
||||
"SCHEDULER_ROUND_ROBIN_MIN_INTERVAL": o.schedulerRoundRobinMinInterval,
|
||||
"SCHEDULER_ROUND_ROBIN_MAX_INTERVAL": o.schedulerRoundRobinMaxInterval,
|
||||
"SCHEDULER_ROUND_ROBIN_MIN_INTERVAL": int(o.schedulerRoundRobinMinInterval.Minutes()),
|
||||
"SCHEDULER_ROUND_ROBIN_MAX_INTERVAL": int(o.schedulerRoundRobinMaxInterval.Minutes()),
|
||||
"SCHEDULER_SERVICE": o.schedulerService,
|
||||
"WATCHDOG": o.watchdog,
|
||||
"WORKER_POOL_SIZE": o.workerPoolSize,
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// parser handles configuration parsing.
|
||||
|
@ -150,15 +151,15 @@ func (p *parser) parseLines(lines []string) (err error) {
|
|||
case "POLLING_SCHEDULER":
|
||||
p.opts.pollingScheduler = strings.ToLower(parseString(value, defaultPollingScheduler))
|
||||
case "SCHEDULER_ENTRY_FREQUENCY_MAX_INTERVAL":
|
||||
p.opts.schedulerEntryFrequencyMaxInterval = parseInt(value, defaultSchedulerEntryFrequencyMaxInterval)
|
||||
p.opts.schedulerEntryFrequencyMaxInterval = parseInterval(value, time.Minute, defaultSchedulerEntryFrequencyMaxInterval)
|
||||
case "SCHEDULER_ENTRY_FREQUENCY_MIN_INTERVAL":
|
||||
p.opts.schedulerEntryFrequencyMinInterval = parseInt(value, defaultSchedulerEntryFrequencyMinInterval)
|
||||
p.opts.schedulerEntryFrequencyMinInterval = parseInterval(value, time.Minute, defaultSchedulerEntryFrequencyMinInterval)
|
||||
case "SCHEDULER_ENTRY_FREQUENCY_FACTOR":
|
||||
p.opts.schedulerEntryFrequencyFactor = parseInt(value, defaultSchedulerEntryFrequencyFactor)
|
||||
case "SCHEDULER_ROUND_ROBIN_MIN_INTERVAL":
|
||||
p.opts.schedulerRoundRobinMinInterval = parseInt(value, defaultSchedulerRoundRobinMinInterval)
|
||||
p.opts.schedulerRoundRobinMinInterval = parseInterval(value, time.Minute, defaultSchedulerRoundRobinMinInterval)
|
||||
case "SCHEDULER_ROUND_ROBIN_MAX_INTERVAL":
|
||||
p.opts.schedulerRoundRobinMaxInterval = parseInt(value, defaultSchedulerRoundRobinMaxInterval)
|
||||
p.opts.schedulerRoundRobinMaxInterval = parseInterval(value, time.Minute, defaultSchedulerRoundRobinMaxInterval)
|
||||
case "MEDIA_PROXY_HTTP_CLIENT_TIMEOUT":
|
||||
p.opts.mediaProxyHTTPClientTimeout = parseInt(value, defaultMediaProxyHTTPClientTimeout)
|
||||
case "MEDIA_PROXY_MODE":
|
||||
|
|
|
@ -6,7 +6,6 @@ package model // import "miniflux.app/v2/internal/model"
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"miniflux.app/v2/internal/config"
|
||||
|
@ -69,11 +68,11 @@ type Feed struct {
|
|||
Entries Entries `json:"entries,omitempty"`
|
||||
|
||||
// Internal attributes (not exposed in the API and not persisted in the database)
|
||||
TTL int `json:"-"`
|
||||
IconURL string `json:"-"`
|
||||
UnreadCount int `json:"-"`
|
||||
ReadCount int `json:"-"`
|
||||
NumberOfVisibleEntries int `json:"-"`
|
||||
TTL time.Duration `json:"-"`
|
||||
IconURL string `json:"-"`
|
||||
UnreadCount int `json:"-"`
|
||||
ReadCount int `json:"-"`
|
||||
NumberOfVisibleEntries int `json:"-"`
|
||||
}
|
||||
|
||||
type FeedCounters struct {
|
||||
|
@ -119,35 +118,33 @@ func (f *Feed) CheckedNow() {
|
|||
}
|
||||
|
||||
// ScheduleNextCheck set "next_check_at" of a feed based on the scheduler selected from the configuration.
|
||||
func (f *Feed) ScheduleNextCheck(weeklyCount int, refreshDelayInMinutes int) int {
|
||||
func (f *Feed) ScheduleNextCheck(weeklyCount int, refreshDelay time.Duration) time.Duration {
|
||||
// Default to the global config Polling Frequency.
|
||||
intervalMinutes := config.Opts.SchedulerRoundRobinMinInterval()
|
||||
interval := config.Opts.SchedulerRoundRobinMinInterval()
|
||||
|
||||
if config.Opts.PollingScheduler() == SchedulerEntryFrequency {
|
||||
if weeklyCount <= 0 {
|
||||
intervalMinutes = config.Opts.SchedulerEntryFrequencyMaxInterval()
|
||||
interval = config.Opts.SchedulerEntryFrequencyMaxInterval()
|
||||
} else {
|
||||
intervalMinutes = int(math.Round(float64(7*24*60) / float64(weeklyCount*config.Opts.SchedulerEntryFrequencyFactor())))
|
||||
intervalMinutes = min(intervalMinutes, config.Opts.SchedulerEntryFrequencyMaxInterval())
|
||||
intervalMinutes = max(intervalMinutes, config.Opts.SchedulerEntryFrequencyMinInterval())
|
||||
interval = (7 * 24 * time.Hour) / time.Duration(weeklyCount*config.Opts.SchedulerEntryFrequencyFactor())
|
||||
interval = min(interval, config.Opts.SchedulerEntryFrequencyMaxInterval())
|
||||
interval = max(interval, config.Opts.SchedulerEntryFrequencyMinInterval())
|
||||
}
|
||||
}
|
||||
|
||||
// Use the RSS TTL field, Retry-After, Cache-Control or Expires HTTP headers if defined.
|
||||
if refreshDelayInMinutes > 0 && refreshDelayInMinutes > intervalMinutes {
|
||||
intervalMinutes = refreshDelayInMinutes
|
||||
}
|
||||
interval = max(interval, refreshDelay)
|
||||
|
||||
// Limit the max interval value for misconfigured feeds.
|
||||
switch config.Opts.PollingScheduler() {
|
||||
case SchedulerRoundRobin:
|
||||
intervalMinutes = min(intervalMinutes, config.Opts.SchedulerRoundRobinMaxInterval())
|
||||
interval = min(interval, config.Opts.SchedulerRoundRobinMaxInterval())
|
||||
case SchedulerEntryFrequency:
|
||||
intervalMinutes = min(intervalMinutes, config.Opts.SchedulerEntryFrequencyMaxInterval())
|
||||
interval = min(interval, config.Opts.SchedulerEntryFrequencyMaxInterval())
|
||||
}
|
||||
|
||||
f.NextCheckAt = time.Now().Add(time.Minute * time.Duration(intervalMinutes))
|
||||
return intervalMinutes
|
||||
f.NextCheckAt = time.Now().Add(interval)
|
||||
return interval
|
||||
}
|
||||
|
||||
// FeedCreationRequest represents the request to create a feed.
|
||||
|
|
|
@ -67,11 +67,11 @@ func TestFeedCheckedNow(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func checkTargetInterval(t *testing.T, feed *Feed, targetInterval int, timeBefore time.Time, message string) {
|
||||
if feed.NextCheckAt.Before(timeBefore.Add(time.Minute * time.Duration(targetInterval))) {
|
||||
func checkTargetInterval(t *testing.T, feed *Feed, targetInterval time.Duration, timeBefore time.Time, message string) {
|
||||
if feed.NextCheckAt.Before(timeBefore.Add(targetInterval)) {
|
||||
t.Errorf(`The next_check_at should be after timeBefore + %s`, message)
|
||||
}
|
||||
if feed.NextCheckAt.After(time.Now().Add(time.Minute * time.Duration(targetInterval))) {
|
||||
if feed.NextCheckAt.After(time.Now().Add(targetInterval)) {
|
||||
t.Errorf(`The next_check_at should be before now + %s`, message)
|
||||
}
|
||||
}
|
||||
|
@ -188,7 +188,7 @@ func TestFeedScheduleNextCheckRoundRobinMinInterval(t *testing.T) {
|
|||
t.Error(`The next_check_at must be set`)
|
||||
}
|
||||
|
||||
expectedInterval := minInterval
|
||||
expectedInterval := time.Duration(minInterval) * time.Minute
|
||||
checkTargetInterval(t, feed, expectedInterval, timeBefore, "TestFeedScheduleNextCheckRoundRobinMinInterval")
|
||||
}
|
||||
|
||||
|
@ -217,7 +217,7 @@ func TestFeedScheduleNextCheckEntryFrequencyMaxInterval(t *testing.T) {
|
|||
t.Error(`The next_check_at must be set`)
|
||||
}
|
||||
|
||||
targetInterval := maxInterval
|
||||
targetInterval := time.Duration(maxInterval) * time.Minute
|
||||
checkTargetInterval(t, feed, targetInterval, timeBefore, "entry frequency max interval")
|
||||
}
|
||||
|
||||
|
@ -246,7 +246,7 @@ func TestFeedScheduleNextCheckEntryFrequencyMaxIntervalZeroWeeklyCount(t *testin
|
|||
t.Error(`The next_check_at must be set`)
|
||||
}
|
||||
|
||||
targetInterval := maxInterval
|
||||
targetInterval := time.Duration(maxInterval) * time.Minute
|
||||
checkTargetInterval(t, feed, targetInterval, timeBefore, "entry frequency max interval")
|
||||
}
|
||||
|
||||
|
@ -275,7 +275,7 @@ func TestFeedScheduleNextCheckEntryFrequencyMinInterval(t *testing.T) {
|
|||
t.Error(`The next_check_at must be set`)
|
||||
}
|
||||
|
||||
targetInterval := minInterval
|
||||
targetInterval := time.Duration(minInterval) * time.Minute
|
||||
checkTargetInterval(t, feed, targetInterval, timeBefore, "entry frequency min interval")
|
||||
}
|
||||
|
||||
|
@ -301,7 +301,7 @@ func TestFeedScheduleNextCheckEntryFrequencyFactor(t *testing.T) {
|
|||
t.Error(`The next_check_at must be set`)
|
||||
}
|
||||
|
||||
targetInterval := config.Opts.SchedulerEntryFrequencyMaxInterval() / factor
|
||||
targetInterval := config.Opts.SchedulerEntryFrequencyMaxInterval() / time.Duration(factor)
|
||||
checkTargetInterval(t, feed, targetInterval, timeBefore, "factor * count")
|
||||
}
|
||||
|
||||
|
@ -326,17 +326,17 @@ func TestFeedScheduleNextCheckEntryFrequencySmallNewTTL(t *testing.T) {
|
|||
// Use a very large weekly count to trigger the min interval
|
||||
weeklyCount := largeWeeklyCount
|
||||
// TTL is smaller than minInterval.
|
||||
newTTL := minInterval / 2
|
||||
newTTL := time.Duration(minInterval) * time.Minute / 2
|
||||
feed.ScheduleNextCheck(weeklyCount, newTTL)
|
||||
|
||||
if feed.NextCheckAt.IsZero() {
|
||||
t.Error(`The next_check_at must be set`)
|
||||
}
|
||||
|
||||
targetInterval := minInterval
|
||||
targetInterval := time.Duration(minInterval) * time.Minute
|
||||
checkTargetInterval(t, feed, targetInterval, timeBefore, "entry frequency min interval")
|
||||
|
||||
if feed.NextCheckAt.Before(timeBefore.Add(time.Minute * time.Duration(newTTL))) {
|
||||
if feed.NextCheckAt.Before(timeBefore.Add(newTTL)) {
|
||||
t.Error(`The next_check_at should be after timeBefore + TTL`)
|
||||
}
|
||||
}
|
||||
|
@ -362,7 +362,7 @@ func TestFeedScheduleNextCheckEntryFrequencyLargeNewTTL(t *testing.T) {
|
|||
// Use a very large weekly count to trigger the min interval
|
||||
weeklyCount := largeWeeklyCount
|
||||
// TTL is larger than minInterval.
|
||||
newTTL := minInterval * 2
|
||||
newTTL := time.Duration(minInterval) * time.Minute * 2
|
||||
feed.ScheduleNextCheck(weeklyCount, newTTL)
|
||||
|
||||
if feed.NextCheckAt.IsZero() {
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -54,18 +53,19 @@ func (r *ResponseHandler) ETag() string {
|
|||
return r.httpResponse.Header.Get("ETag")
|
||||
}
|
||||
|
||||
func (r *ResponseHandler) ExpiresInMinutes() int {
|
||||
func (r *ResponseHandler) Expires() time.Duration {
|
||||
expiresHeaderValue := r.httpResponse.Header.Get("Expires")
|
||||
if expiresHeaderValue != "" {
|
||||
t, err := time.Parse(time.RFC1123, expiresHeaderValue)
|
||||
if err == nil {
|
||||
return int(math.Ceil(time.Until(t).Minutes()))
|
||||
// This rounds up to the next minute by rounding down and just adding a minute.
|
||||
return time.Until(t).Truncate(time.Minute) + time.Minute
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ResponseHandler) CacheControlMaxAgeInMinutes() int {
|
||||
func (r *ResponseHandler) CacheControlMaxAge() time.Duration {
|
||||
cacheControlHeaderValue := r.httpResponse.Header.Get("Cache-Control")
|
||||
if cacheControlHeaderValue != "" {
|
||||
for _, directive := range strings.Split(cacheControlHeaderValue, ",") {
|
||||
|
@ -73,7 +73,7 @@ func (r *ResponseHandler) CacheControlMaxAgeInMinutes() int {
|
|||
if strings.HasPrefix(directive, "max-age=") {
|
||||
maxAge, err := strconv.Atoi(strings.TrimPrefix(directive, "max-age="))
|
||||
if err == nil {
|
||||
return int(math.Ceil(float64(maxAge) / 60))
|
||||
return time.Duration(maxAge) * time.Second
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -81,17 +81,17 @@ func (r *ResponseHandler) CacheControlMaxAgeInMinutes() int {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (r *ResponseHandler) ParseRetryDelay() int {
|
||||
func (r *ResponseHandler) ParseRetryDelay() time.Duration {
|
||||
retryAfterHeaderValue := r.httpResponse.Header.Get("Retry-After")
|
||||
if retryAfterHeaderValue != "" {
|
||||
// First, try to parse as an integer (number of seconds)
|
||||
if seconds, err := strconv.Atoi(retryAfterHeaderValue); err == nil {
|
||||
return seconds
|
||||
return time.Duration(seconds) * time.Second
|
||||
}
|
||||
|
||||
// If not an integer, try to parse as an HTTP-date
|
||||
if t, err := time.Parse(time.RFC1123, retryAfterHeaderValue); err == nil {
|
||||
return int(time.Until(t).Seconds())
|
||||
return time.Until(t).Truncate(time.Second)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
|
|
|
@ -72,7 +72,7 @@ func TestIsModified(t *testing.T) {
|
|||
func TestRetryDelay(t *testing.T) {
|
||||
var testCases = map[string]struct {
|
||||
RetryAfterHeader string
|
||||
ExpectedDelay int
|
||||
ExpectedDelay time.Duration
|
||||
}{
|
||||
"Empty header": {
|
||||
RetryAfterHeader: "",
|
||||
|
@ -80,11 +80,11 @@ func TestRetryDelay(t *testing.T) {
|
|||
},
|
||||
"Integer value": {
|
||||
RetryAfterHeader: "42",
|
||||
ExpectedDelay: 42,
|
||||
ExpectedDelay: 42 * time.Second,
|
||||
},
|
||||
"HTTP-date": {
|
||||
RetryAfterHeader: time.Now().Add(42 * time.Second).Format(time.RFC1123),
|
||||
ExpectedDelay: 41,
|
||||
ExpectedDelay: 41 * time.Second,
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
|
@ -105,20 +105,20 @@ func TestRetryDelay(t *testing.T) {
|
|||
|
||||
func TestExpiresInMinutes(t *testing.T) {
|
||||
var testCases = map[string]struct {
|
||||
ExpiresHeader string
|
||||
ExpectedMinutes int
|
||||
ExpiresHeader string
|
||||
Expected time.Duration
|
||||
}{
|
||||
"Empty header": {
|
||||
ExpiresHeader: "",
|
||||
ExpectedMinutes: 0,
|
||||
ExpiresHeader: "",
|
||||
Expected: 0,
|
||||
},
|
||||
"Valid Expires header": {
|
||||
ExpiresHeader: time.Now().Add(10 * time.Minute).Format(time.RFC1123),
|
||||
ExpectedMinutes: 10,
|
||||
ExpiresHeader: time.Now().Add(10 * time.Minute).Format(time.RFC1123),
|
||||
Expected: 10 * time.Minute,
|
||||
},
|
||||
"Invalid Expires header": {
|
||||
ExpiresHeader: "invalid-date",
|
||||
ExpectedMinutes: 0,
|
||||
ExpiresHeader: "invalid-date",
|
||||
Expected: 0,
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
|
@ -130,8 +130,8 @@ func TestExpiresInMinutes(t *testing.T) {
|
|||
Header: header,
|
||||
},
|
||||
}
|
||||
if tc.ExpectedMinutes != rh.ExpiresInMinutes() {
|
||||
t.Errorf("Expected %d, got %d for scenario %q", tc.ExpectedMinutes, rh.ExpiresInMinutes(), name)
|
||||
if tc.Expected != rh.Expires() {
|
||||
t.Errorf("Expected %d, got %d for scenario %q", tc.Expected, rh.Expires(), name)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -140,23 +140,23 @@ func TestExpiresInMinutes(t *testing.T) {
|
|||
func TestCacheControlMaxAgeInMinutes(t *testing.T) {
|
||||
var testCases = map[string]struct {
|
||||
CacheControlHeader string
|
||||
ExpectedMinutes int
|
||||
Expected time.Duration
|
||||
}{
|
||||
"Empty header": {
|
||||
CacheControlHeader: "",
|
||||
ExpectedMinutes: 0,
|
||||
Expected: 0,
|
||||
},
|
||||
"Valid max-age": {
|
||||
CacheControlHeader: "max-age=600",
|
||||
ExpectedMinutes: 10,
|
||||
Expected: 10 * time.Minute,
|
||||
},
|
||||
"Invalid max-age": {
|
||||
CacheControlHeader: "max-age=invalid",
|
||||
ExpectedMinutes: 0,
|
||||
Expected: 0,
|
||||
},
|
||||
"Multiple directives": {
|
||||
CacheControlHeader: "no-cache, max-age=300",
|
||||
ExpectedMinutes: 5,
|
||||
Expected: 5 * time.Minute,
|
||||
},
|
||||
}
|
||||
for name, tc := range testCases {
|
||||
|
@ -168,8 +168,8 @@ func TestCacheControlMaxAgeInMinutes(t *testing.T) {
|
|||
Header: header,
|
||||
},
|
||||
}
|
||||
if tc.ExpectedMinutes != rh.CacheControlMaxAgeInMinutes() {
|
||||
t.Errorf("Expected %d, got %d for scenario %q", tc.ExpectedMinutes, rh.CacheControlMaxAgeInMinutes(), name)
|
||||
if tc.Expected != rh.CacheControlMaxAge() {
|
||||
t.Errorf("Expected %d, got %d for scenario %q", tc.Expected, rh.CacheControlMaxAge(), name)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"miniflux.app/v2/internal/config"
|
||||
"miniflux.app/v2/internal/integration"
|
||||
|
@ -208,7 +209,7 @@ func RefreshFeed(store *storage.Storage, userID, feedID int64, forceRefresh bool
|
|||
}
|
||||
|
||||
weeklyEntryCount := 0
|
||||
refreshDelayInMinutes := 0
|
||||
var refreshDelay time.Duration
|
||||
if config.Opts.PollingScheduler() == model.SchedulerEntryFrequency {
|
||||
var weeklyCountErr error
|
||||
weeklyEntryCount, weeklyCountErr = store.WeeklyFeedEntryCount(userID, feedID)
|
||||
|
@ -218,7 +219,7 @@ func RefreshFeed(store *storage.Storage, userID, feedID int64, forceRefresh bool
|
|||
}
|
||||
|
||||
originalFeed.CheckedNow()
|
||||
originalFeed.ScheduleNextCheck(weeklyEntryCount, refreshDelayInMinutes)
|
||||
originalFeed.ScheduleNextCheck(weeklyEntryCount, refreshDelay)
|
||||
|
||||
requestBuilder := fetcher.NewRequestBuilder()
|
||||
requestBuilder.WithUsernameAndPassword(originalFeed.Username, originalFeed.Password)
|
||||
|
@ -242,15 +243,14 @@ func RefreshFeed(store *storage.Storage, userID, feedID int64, forceRefresh bool
|
|||
defer responseHandler.Close()
|
||||
|
||||
if responseHandler.IsRateLimited() {
|
||||
retryDelayInSeconds := responseHandler.ParseRetryDelay()
|
||||
refreshDelayInMinutes = retryDelayInSeconds / 60
|
||||
calculatedNextCheckIntervalInMinutes := originalFeed.ScheduleNextCheck(weeklyEntryCount, refreshDelayInMinutes)
|
||||
retryDelay := responseHandler.ParseRetryDelay()
|
||||
calculatedNextCheckInterval := originalFeed.ScheduleNextCheck(weeklyEntryCount, retryDelay)
|
||||
|
||||
slog.Warn("Feed is rate limited",
|
||||
slog.String("feed_url", originalFeed.FeedURL),
|
||||
slog.Int("retry_delay_in_seconds", retryDelayInSeconds),
|
||||
slog.Int("refresh_delay_in_minutes", refreshDelayInMinutes),
|
||||
slog.Int("calculated_next_check_interval_in_minutes", calculatedNextCheckIntervalInMinutes),
|
||||
slog.Int("retry_delay_in_seconds", int(retryDelay.Seconds())),
|
||||
slog.Int("refresh_delay_in_minutes", int(refreshDelay.Minutes())),
|
||||
slog.Int("calculated_next_check_interval_in_minutes", int(calculatedNextCheckInterval.Minutes())),
|
||||
slog.Time("new_next_check_at", originalFeed.NextCheckAt),
|
||||
)
|
||||
}
|
||||
|
@ -316,22 +316,22 @@ func RefreshFeed(store *storage.Storage, userID, feedID int64, forceRefresh bool
|
|||
// Use the RSS TTL value, or the Cache-Control or Expires HTTP headers if available.
|
||||
// Otherwise, we use the default value from the configuration (min interval parameter).
|
||||
feedTTLValue := updatedFeed.TTL
|
||||
cacheControlMaxAgeValue := responseHandler.CacheControlMaxAgeInMinutes()
|
||||
expiresValue := responseHandler.ExpiresInMinutes()
|
||||
refreshDelayInMinutes = max(feedTTLValue, cacheControlMaxAgeValue, expiresValue)
|
||||
cacheControlMaxAgeValue := responseHandler.CacheControlMaxAge()
|
||||
expiresValue := responseHandler.Expires()
|
||||
refreshDelay = max(feedTTLValue, cacheControlMaxAgeValue, expiresValue)
|
||||
|
||||
// Set the next check at with updated arguments.
|
||||
calculatedNextCheckIntervalInMinutes := originalFeed.ScheduleNextCheck(weeklyEntryCount, refreshDelayInMinutes)
|
||||
calculatedNextCheckInterval := originalFeed.ScheduleNextCheck(weeklyEntryCount, refreshDelay)
|
||||
|
||||
slog.Debug("Updated next check date",
|
||||
slog.Int64("user_id", userID),
|
||||
slog.Int64("feed_id", feedID),
|
||||
slog.String("feed_url", originalFeed.FeedURL),
|
||||
slog.Int("feed_ttl_minutes", feedTTLValue),
|
||||
slog.Int("cache_control_max_age_in_minutes", cacheControlMaxAgeValue),
|
||||
slog.Int("expires_in_minutes", expiresValue),
|
||||
slog.Int("refresh_delay_in_minutes", refreshDelayInMinutes),
|
||||
slog.Int("calculated_next_check_interval_in_minutes", calculatedNextCheckIntervalInMinutes),
|
||||
slog.Int("feed_ttl_minutes", int(feedTTLValue.Minutes())),
|
||||
slog.Int("cache_control_max_age_in_minutes", int(cacheControlMaxAgeValue.Minutes())),
|
||||
slog.Int("expires_in_minutes", int(expiresValue.Minutes())),
|
||||
slog.Int("refresh_delay_in_minutes", int(refreshDelay.Minutes())),
|
||||
slog.Int("calculated_next_check_interval_in_minutes", int(calculatedNextCheckInterval.Minutes())),
|
||||
slog.Time("new_next_check_at", originalFeed.NextCheckAt),
|
||||
)
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ func (r *rssAdapter) buildFeed(baseURL string) *model.Feed {
|
|||
// Get TTL if defined.
|
||||
if r.rss.Channel.TTL != "" {
|
||||
if ttl, err := strconv.Atoi(r.rss.Channel.TTL); err == nil {
|
||||
feed.TTL = ttl
|
||||
feed.TTL = time.Duration(ttl) * time.Minute
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2151,7 +2151,7 @@ func TestParseFeedWithTTLField(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if feed.TTL != 60 {
|
||||
if feed.TTL != 60*time.Minute {
|
||||
t.Errorf("Incorrect TTL, got: %d", feed.TTL)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue