1
0
Fork 0
mirror of https://github.com/miniflux/v2.git synced 2025-08-26 18:21:01 +00:00

refactor(cli): use time.Duration for cleanup tasks

This commit is contained in:
gudvinr 2025-08-18 23:10:18 +03:00 committed by Frédéric Guillot
parent 7060ecc163
commit 983291c78b
8 changed files with 94 additions and 45 deletions

View file

@ -14,15 +14,15 @@ import (
) )
func runCleanupTasks(store *storage.Storage) { func runCleanupTasks(store *storage.Storage) {
nbSessions := store.CleanOldSessions(config.Opts.CleanupRemoveSessionsDays()) nbSessions := store.CleanOldSessions(config.Opts.CleanupRemoveSessionsInterval())
nbUserSessions := store.CleanOldUserSessions(config.Opts.CleanupRemoveSessionsDays()) nbUserSessions := store.CleanOldUserSessions(config.Opts.CleanupRemoveSessionsInterval())
slog.Info("Sessions cleanup completed", slog.Info("Sessions cleanup completed",
slog.Int64("application_sessions_removed", nbSessions), slog.Int64("application_sessions_removed", nbSessions),
slog.Int64("user_sessions_removed", nbUserSessions), slog.Int64("user_sessions_removed", nbUserSessions),
) )
startTime := time.Now() startTime := time.Now()
if rowsAffected, err := store.ArchiveEntries(model.EntryStatusRead, config.Opts.CleanupArchiveReadDays(), config.Opts.CleanupArchiveBatchSize()); err != nil { if rowsAffected, err := store.ArchiveEntries(model.EntryStatusRead, config.Opts.CleanupArchiveReadInterval(), config.Opts.CleanupArchiveBatchSize()); err != nil {
slog.Error("Unable to archive read entries", slog.Any("error", err)) slog.Error("Unable to archive read entries", slog.Any("error", err))
} else { } else {
slog.Info("Archiving read entries completed", slog.Info("Archiving read entries completed",
@ -35,7 +35,7 @@ func runCleanupTasks(store *storage.Storage) {
} }
startTime = time.Now() startTime = time.Now()
if rowsAffected, err := store.ArchiveEntries(model.EntryStatusUnread, config.Opts.CleanupArchiveUnreadDays(), config.Opts.CleanupArchiveBatchSize()); err != nil { if rowsAffected, err := store.ArchiveEntries(model.EntryStatusUnread, config.Opts.CleanupArchiveUnreadInterval(), config.Opts.CleanupArchiveBatchSize()); err != nil {
slog.Error("Unable to archive unread entries", slog.Any("error", err)) slog.Error("Unable to archive unread entries", slog.Any("error", err))
} else { } else {
slog.Info("Archiving unread entries completed", slog.Info("Archiving unread entries completed",

View file

@ -645,12 +645,22 @@ func TestDefaultCleanupArchiveReadDaysValue(t *testing.T) {
t.Fatalf(`Parsing failure: %v`, err) t.Fatalf(`Parsing failure: %v`, err)
} }
expected := 60 expected := 60 * 24 * time.Hour
result := opts.CleanupArchiveReadDays() result := opts.CleanupArchiveReadInterval()
if result != expected { if result != expected {
t.Fatalf(`Unexpected CLEANUP_ARCHIVE_READ_DAYS value, got %v instead of %v`, result, expected) t.Fatalf(`Unexpected CLEANUP_ARCHIVE_READ_DAYS value, got %v instead of %v`, result, expected)
} }
sorted := opts.SortedOptions(false)
i := slices.IndexFunc(sorted, func(opt *option) bool {
return opt.Key == "CLEANUP_ARCHIVE_READ_DAYS"
})
expectedSerialized := 60
if got := sorted[i].Value; got != expectedSerialized {
t.Fatalf(`Unexpected value in option output, got %q instead of %q`, got, expectedSerialized)
}
} }
func TestCleanupArchiveReadDays(t *testing.T) { func TestCleanupArchiveReadDays(t *testing.T) {
@ -664,12 +674,22 @@ func TestCleanupArchiveReadDays(t *testing.T) {
t.Fatalf(`Parsing failure: %v`, err) t.Fatalf(`Parsing failure: %v`, err)
} }
expected := 7 expected := 7 * 24 * time.Hour
result := opts.CleanupArchiveReadDays() result := opts.CleanupArchiveReadInterval()
if result != expected { if result != expected {
t.Fatalf(`Unexpected CLEANUP_ARCHIVE_READ_DAYS value, got %v instead of %v`, result, expected) t.Fatalf(`Unexpected CLEANUP_ARCHIVE_READ_DAYS value, got %v instead of %v`, result, expected)
} }
sorted := opts.SortedOptions(false)
i := slices.IndexFunc(sorted, func(opt *option) bool {
return opt.Key == "CLEANUP_ARCHIVE_READ_DAYS"
})
expectedSerialized := 7
if got := sorted[i].Value; got != expectedSerialized {
t.Fatalf(`Unexpected value in option output, got %q instead of %q`, got, expectedSerialized)
}
} }
func TestDefaultCleanupRemoveSessionsDaysValue(t *testing.T) { func TestDefaultCleanupRemoveSessionsDaysValue(t *testing.T) {
@ -681,12 +701,22 @@ func TestDefaultCleanupRemoveSessionsDaysValue(t *testing.T) {
t.Fatalf(`Parsing failure: %v`, err) t.Fatalf(`Parsing failure: %v`, err)
} }
expected := 30 expected := 30 * 24 * time.Hour
result := opts.CleanupRemoveSessionsDays() result := opts.CleanupRemoveSessionsInterval()
if result != expected { if result != expected {
t.Fatalf(`Unexpected CLEANUP_REMOVE_SESSIONS_DAYS value, got %v instead of %v`, result, expected) t.Fatalf(`Unexpected CLEANUP_REMOVE_SESSIONS_DAYS value, got %v instead of %v`, result, expected)
} }
sorted := opts.SortedOptions(false)
i := slices.IndexFunc(sorted, func(opt *option) bool {
return opt.Key == "CLEANUP_REMOVE_SESSIONS_DAYS"
})
expectedSerialized := 30
if got := sorted[i].Value; got != expectedSerialized {
t.Fatalf(`Unexpected value in option output, got %q instead of %q`, got, expectedSerialized)
}
} }
func TestCleanupRemoveSessionsDays(t *testing.T) { func TestCleanupRemoveSessionsDays(t *testing.T) {
@ -699,12 +729,22 @@ func TestCleanupRemoveSessionsDays(t *testing.T) {
t.Fatalf(`Parsing failure: %v`, err) t.Fatalf(`Parsing failure: %v`, err)
} }
expected := 7 expected := 7 * 24 * time.Hour
result := opts.CleanupRemoveSessionsDays() result := opts.CleanupRemoveSessionsInterval()
if result != expected { if result != expected {
t.Fatalf(`Unexpected CLEANUP_REMOVE_SESSIONS_DAYS value, got %v instead of %v`, result, expected) t.Fatalf(`Unexpected CLEANUP_REMOVE_SESSIONS_DAYS value, got %v instead of %v`, result, expected)
} }
sorted := opts.SortedOptions(false)
i := slices.IndexFunc(sorted, func(opt *option) bool {
return opt.Key == "CLEANUP_REMOVE_SESSIONS_DAYS"
})
expectedSerialized := 7
if got := sorted[i].Value; got != expectedSerialized {
t.Fatalf(`Unexpected value in option output, got %q instead of %q`, got, expectedSerialized)
}
} }
func TestDefaultWorkerPoolSizeValue(t *testing.T) { func TestDefaultWorkerPoolSizeValue(t *testing.T) {

View file

@ -48,10 +48,10 @@ const (
defaultKeyFile = "" defaultKeyFile = ""
defaultCertDomain = "" defaultCertDomain = ""
defaultCleanupFrequency = 24 * time.Hour defaultCleanupFrequency = 24 * time.Hour
defaultCleanupArchiveReadDays = 60 defaultCleanupArchiveReadInterval = 60 * 24 * time.Hour
defaultCleanupArchiveUnreadDays = 180 defaultCleanupArchiveUnreadInterval = 180 * 24 * time.Hour
defaultCleanupArchiveBatchSize = 10000 defaultCleanupArchiveBatchSize = 10000
defaultCleanupRemoveSessionsDays = 30 defaultCleanupRemoveSessionsInterval = 30 * 24 * time.Hour
defaultMediaProxyHTTPClientTimeout = 120 * time.Second defaultMediaProxyHTTPClientTimeout = 120 * time.Second
defaultMediaProxyMode = "http-only" defaultMediaProxyMode = "http-only"
defaultMediaResourceTypes = "image" defaultMediaResourceTypes = "image"
@ -126,10 +126,10 @@ type options struct {
certDomain string certDomain string
certKeyFile string certKeyFile string
cleanupFrequencyInterval time.Duration cleanupFrequencyInterval time.Duration
cleanupArchiveReadDays int cleanupArchiveReadInterval time.Duration
cleanupArchiveUnreadDays int cleanupArchiveUnreadInterval time.Duration
cleanupArchiveBatchSize int cleanupArchiveBatchSize int
cleanupRemoveSessionsDays int cleanupRemoveSessionsInterval time.Duration
forceRefreshInterval time.Duration forceRefreshInterval time.Duration
batchSize int batchSize int
schedulerEntryFrequencyMinInterval time.Duration schedulerEntryFrequencyMinInterval time.Duration
@ -210,10 +210,10 @@ func NewOptions() *options {
certDomain: defaultCertDomain, certDomain: defaultCertDomain,
certKeyFile: defaultKeyFile, certKeyFile: defaultKeyFile,
cleanupFrequencyInterval: defaultCleanupFrequency, cleanupFrequencyInterval: defaultCleanupFrequency,
cleanupArchiveReadDays: defaultCleanupArchiveReadDays, cleanupArchiveReadInterval: defaultCleanupArchiveReadInterval,
cleanupArchiveUnreadDays: defaultCleanupArchiveUnreadDays, cleanupArchiveUnreadInterval: defaultCleanupArchiveUnreadInterval,
cleanupArchiveBatchSize: defaultCleanupArchiveBatchSize, cleanupArchiveBatchSize: defaultCleanupArchiveBatchSize,
cleanupRemoveSessionsDays: defaultCleanupRemoveSessionsDays, cleanupRemoveSessionsInterval: defaultCleanupRemoveSessionsInterval,
pollingFrequency: defaultPollingFrequency, pollingFrequency: defaultPollingFrequency,
forceRefreshInterval: defaultForceRefreshInterval, forceRefreshInterval: defaultForceRefreshInterval,
batchSize: defaultBatchSize, batchSize: defaultBatchSize,
@ -366,14 +366,14 @@ func (o *options) CleanupFrequency() time.Duration {
return o.cleanupFrequencyInterval return o.cleanupFrequencyInterval
} }
// CleanupArchiveReadDays returns the number of days after which marking read items as removed. // CleanupArchiveReadDays returns the interval after which marking read items as removed.
func (o *options) CleanupArchiveReadDays() int { func (o *options) CleanupArchiveReadInterval() time.Duration {
return o.cleanupArchiveReadDays return o.cleanupArchiveReadInterval
} }
// CleanupArchiveUnreadDays returns the number of days after which marking unread items as removed. // CleanupArchiveUnreadDays returns the interval after which marking unread items as removed.
func (o *options) CleanupArchiveUnreadDays() int { func (o *options) CleanupArchiveUnreadInterval() time.Duration {
return o.cleanupArchiveUnreadDays return o.cleanupArchiveUnreadInterval
} }
// CleanupArchiveBatchSize returns the number of entries to archive for each interval. // CleanupArchiveBatchSize returns the number of entries to archive for each interval.
@ -381,9 +381,9 @@ func (o *options) CleanupArchiveBatchSize() int {
return o.cleanupArchiveBatchSize return o.cleanupArchiveBatchSize
} }
// CleanupRemoveSessionsDays returns the number of days after which to remove sessions. // CleanupRemoveSessionsDays returns the interval after which to remove sessions.
func (o *options) CleanupRemoveSessionsDays() int { func (o *options) CleanupRemoveSessionsInterval() time.Duration {
return o.cleanupRemoveSessionsDays return o.cleanupRemoveSessionsInterval
} }
// WorkerPoolSize returns the number of background worker. // WorkerPoolSize returns the number of background worker.
@ -723,9 +723,9 @@ func (o *options) SortedOptions(redactSecret bool) []*option {
"CERT_FILE": o.certFile, "CERT_FILE": o.certFile,
"CLEANUP_FREQUENCY_HOURS": int(o.cleanupFrequencyInterval.Hours()), "CLEANUP_FREQUENCY_HOURS": int(o.cleanupFrequencyInterval.Hours()),
"CLEANUP_ARCHIVE_BATCH_SIZE": o.cleanupArchiveBatchSize, "CLEANUP_ARCHIVE_BATCH_SIZE": o.cleanupArchiveBatchSize,
"CLEANUP_ARCHIVE_READ_DAYS": o.cleanupArchiveReadDays, "CLEANUP_ARCHIVE_READ_DAYS": int(o.cleanupArchiveReadInterval.Hours() / 24),
"CLEANUP_ARCHIVE_UNREAD_DAYS": o.cleanupArchiveUnreadDays, "CLEANUP_ARCHIVE_UNREAD_DAYS": int(o.cleanupArchiveUnreadInterval.Hours() / 24),
"CLEANUP_REMOVE_SESSIONS_DAYS": o.cleanupRemoveSessionsDays, "CLEANUP_REMOVE_SESSIONS_DAYS": int(o.cleanupRemoveSessionsInterval.Hours() / 24),
"CREATE_ADMIN": o.createAdmin, "CREATE_ADMIN": o.createAdmin,
"DATABASE_CONNECTION_LIFETIME": o.databaseConnectionLifetime, "DATABASE_CONNECTION_LIFETIME": o.databaseConnectionLifetime,
"DATABASE_MAX_CONNS": o.databaseMaxConns, "DATABASE_MAX_CONNS": o.databaseMaxConns,

View file

@ -129,13 +129,13 @@ func (p *parser) parseLines(lines []string) (err error) {
case "CLEANUP_FREQUENCY_HOURS": case "CLEANUP_FREQUENCY_HOURS":
p.opts.cleanupFrequencyInterval = parseInterval(value, time.Hour, defaultCleanupFrequency) p.opts.cleanupFrequencyInterval = parseInterval(value, time.Hour, defaultCleanupFrequency)
case "CLEANUP_ARCHIVE_READ_DAYS": case "CLEANUP_ARCHIVE_READ_DAYS":
p.opts.cleanupArchiveReadDays = parseInt(value, defaultCleanupArchiveReadDays) p.opts.cleanupArchiveReadInterval = parseInterval(value, 24*time.Hour, defaultCleanupArchiveReadInterval)
case "CLEANUP_ARCHIVE_UNREAD_DAYS": case "CLEANUP_ARCHIVE_UNREAD_DAYS":
p.opts.cleanupArchiveUnreadDays = parseInt(value, defaultCleanupArchiveUnreadDays) p.opts.cleanupArchiveUnreadInterval = parseInterval(value, 24*time.Hour, defaultCleanupArchiveUnreadInterval)
case "CLEANUP_ARCHIVE_BATCH_SIZE": case "CLEANUP_ARCHIVE_BATCH_SIZE":
p.opts.cleanupArchiveBatchSize = parseInt(value, defaultCleanupArchiveBatchSize) p.opts.cleanupArchiveBatchSize = parseInt(value, defaultCleanupArchiveBatchSize)
case "CLEANUP_REMOVE_SESSIONS_DAYS": case "CLEANUP_REMOVE_SESSIONS_DAYS":
p.opts.cleanupRemoveSessionsDays = parseInt(value, defaultCleanupRemoveSessionsDays) p.opts.cleanupRemoveSessionsInterval = parseInterval(value, 24*time.Hour, defaultCleanupRemoveSessionsInterval)
case "WORKER_POOL_SIZE": case "WORKER_POOL_SIZE":
p.opts.workerPoolSize = parseInt(value, defaultWorkerPoolSize) p.opts.workerPoolSize = parseInt(value, defaultWorkerPoolSize)
case "FORCE_REFRESH_INTERVAL": case "FORCE_REFRESH_INTERVAL":

View file

@ -24,7 +24,7 @@ func New(name, value string, isHTTPS bool, path string) *http.Cookie {
Path: basePath(path), Path: basePath(path),
Secure: isHTTPS, Secure: isHTTPS,
HttpOnly: true, HttpOnly: true,
Expires: time.Now().Add(time.Duration(config.Opts.CleanupRemoveSessionsDays()) * 24 * time.Hour), Expires: time.Now().Add(config.Opts.CleanupRemoveSessionsInterval()),
SameSite: http.SameSiteLaxMode, SameSite: http.SameSiteLaxMode,
} }
} }

View file

@ -392,9 +392,9 @@ func (s *Storage) RefreshFeedEntries(userID, feedID int64, entries model.Entries
return newEntries, nil return newEntries, nil
} }
// ArchiveEntries changes the status of entries to "removed" after the given number of days. // ArchiveEntries changes the status of entries to "removed" after the interval (24h minimum).
func (s *Storage) ArchiveEntries(status string, days, limit int) (int64, error) { func (s *Storage) ArchiveEntries(status string, interval time.Duration, limit int) (int64, error) {
if days < 0 || limit <= 0 { if interval < 0 || limit <= 0 {
return 0, nil return 0, nil
} }
@ -419,6 +419,8 @@ func (s *Storage) ArchiveEntries(status string, days, limit int) (int64, error)
) )
` `
days := max(int(interval/(24*time.Hour)), 1)
result, err := s.db.Exec(query, model.EntryStatusRemoved, status, fmt.Sprintf("%d days", days), limit) result, err := s.db.Exec(query, model.EntryStatusRemoved, status, fmt.Sprintf("%d days", days), limit)
if err != nil { if err != nil {
return 0, fmt.Errorf(`store: unable to archive %s entries: %v`, status, err) return 0, fmt.Errorf(`store: unable to archive %s entries: %v`, status, err)

View file

@ -7,6 +7,7 @@ import (
"crypto/rand" "crypto/rand"
"database/sql" "database/sql"
"fmt" "fmt"
"time"
"miniflux.app/v2/internal/model" "miniflux.app/v2/internal/model"
) )
@ -122,14 +123,17 @@ func (s *Storage) FlushAllSessions() (err error) {
return nil return nil
} }
// CleanOldSessions removes sessions older than specified days. // CleanOldSessions removes sessions older than specified interval (24h minimum).
func (s *Storage) CleanOldSessions(days int) int64 { func (s *Storage) CleanOldSessions(interval time.Duration) int64 {
query := ` query := `
DELETE FROM DELETE FROM
sessions sessions
WHERE WHERE
created_at < now() - $1::interval created_at < now() - $1::interval
` `
days := max(int(interval/(24*time.Hour)), 1)
result, err := s.db.Exec(query, fmt.Sprintf("%d days", days)) result, err := s.db.Exec(query, fmt.Sprintf("%d days", days))
if err != nil { if err != nil {
return 0 return 0

View file

@ -7,6 +7,7 @@ import (
"crypto/rand" "crypto/rand"
"database/sql" "database/sql"
"fmt" "fmt"
"time"
"miniflux.app/v2/internal/model" "miniflux.app/v2/internal/model"
) )
@ -43,7 +44,6 @@ func (s *Storage) UserSessions(userID int64) (model.UserSessions, error) {
&session.UserAgent, &session.UserAgent,
&session.IP, &session.IP,
) )
if err != nil { if err != nil {
return nil, fmt.Errorf(`store: unable to fetch user session row: %v`, err) return nil, fmt.Errorf(`store: unable to fetch user session row: %v`, err)
} }
@ -164,14 +164,17 @@ func (s *Storage) RemoveUserSessionByID(userID, sessionID int64) error {
return nil return nil
} }
// CleanOldUserSessions removes user sessions older than specified days. // CleanOldUserSessions removes user sessions older than specified interval (24h minimum).
func (s *Storage) CleanOldUserSessions(days int) int64 { func (s *Storage) CleanOldUserSessions(interval time.Duration) int64 {
query := ` query := `
DELETE FROM DELETE FROM
user_sessions user_sessions
WHERE WHERE
created_at < now() - $1::interval created_at < now() - $1::interval
` `
days := max(int(interval/(24*time.Hour)), 1)
result, err := s.db.Exec(query, fmt.Sprintf("%d days", days)) result, err := s.db.Exec(query, fmt.Sprintf("%d days", days))
if err != nil { if err != nil {
return 0 return 0