1
0
Fork 0
mirror of https://github.com/miniflux/v2.git synced 2025-08-26 18:21:01 +00:00

Implement structured logging using log/slog package

This commit is contained in:
Frédéric Guillot 2023-09-24 16:32:09 -07:00
parent 54cb8fa028
commit c0e954f19d
77 changed files with 1868 additions and 892 deletions

View file

@ -16,8 +16,7 @@ func askCredentials() (string, string) {
fd := int(os.Stdin.Fd())
if !term.IsTerminal(fd) {
fmt.Fprintf(os.Stderr, "This is not a terminal, exiting.\n")
os.Exit(1)
printErrorAndExit(fmt.Errorf("this is not a terminal, exiting"))
}
fmt.Print("Enter Username: ")

View file

@ -4,10 +4,10 @@
package cli // import "miniflux.app/v2/internal/cli"
import (
"log/slog"
"time"
"miniflux.app/v2/internal/config"
"miniflux.app/v2/internal/logger"
"miniflux.app/v2/internal/metric"
"miniflux.app/v2/internal/model"
"miniflux.app/v2/internal/storage"
@ -16,13 +16,18 @@ import (
func runCleanupTasks(store *storage.Storage) {
nbSessions := store.CleanOldSessions(config.Opts.CleanupRemoveSessionsDays())
nbUserSessions := store.CleanOldUserSessions(config.Opts.CleanupRemoveSessionsDays())
logger.Info("[Sessions] Removed %d application sessions and %d user sessions", nbSessions, nbUserSessions)
slog.Info("Sessions cleanup completed",
slog.Int64("application_sessions_removed", nbSessions),
slog.Int64("user_sessions_removed", nbUserSessions),
)
startTime := time.Now()
if rowsAffected, err := store.ArchiveEntries(model.EntryStatusRead, config.Opts.CleanupArchiveReadDays(), config.Opts.CleanupArchiveBatchSize()); err != nil {
logger.Error("[ArchiveReadEntries] %v", err)
slog.Error("Unable to archive read entries", slog.Any("error", err))
} else {
logger.Info("[ArchiveReadEntries] %d entries changed", rowsAffected)
slog.Info("Archiving read entries completed",
slog.Int64("read_entries_archived", rowsAffected),
)
if config.Opts.HasMetricsCollector() {
metric.ArchiveEntriesDuration.WithLabelValues(model.EntryStatusRead).Observe(time.Since(startTime).Seconds())
@ -31,9 +36,11 @@ func runCleanupTasks(store *storage.Storage) {
startTime = time.Now()
if rowsAffected, err := store.ArchiveEntries(model.EntryStatusUnread, config.Opts.CleanupArchiveUnreadDays(), config.Opts.CleanupArchiveBatchSize()); err != nil {
logger.Error("[ArchiveUnreadEntries] %v", err)
slog.Error("Unable to archive unread entries", slog.Any("error", err))
} else {
logger.Info("[ArchiveUnreadEntries] %d entries changed", rowsAffected)
slog.Info("Archiving unread entries completed",
slog.Int64("unread_entries_archived", rowsAffected),
)
if config.Opts.HasMetricsCollector() {
metric.ArchiveEntriesDuration.WithLabelValues(model.EntryStatusUnread).Observe(time.Since(startTime).Seconds())

View file

@ -6,11 +6,13 @@ package cli // import "miniflux.app/v2/internal/cli"
import (
"flag"
"fmt"
"io"
"log/slog"
"os"
"miniflux.app/v2/internal/config"
"miniflux.app/v2/internal/database"
"miniflux.app/v2/internal/locale"
"miniflux.app/v2/internal/logger"
"miniflux.app/v2/internal/storage"
"miniflux.app/v2/internal/ui/static"
"miniflux.app/v2/internal/version"
@ -74,13 +76,13 @@ func Parse() {
if flagConfigFile != "" {
config.Opts, err = cfg.ParseFile(flagConfigFile)
if err != nil {
logger.Fatal("%v", err)
printErrorAndExit(err)
}
}
config.Opts, err = cfg.ParseEnvironmentVariables()
if err != nil {
logger.Fatal("%v", err)
printErrorAndExit(err)
}
if flagConfigDump {
@ -88,12 +90,27 @@ func Parse() {
return
}
if config.Opts.LogDateTime() {
logger.EnableDateTime()
if flagDebugMode {
config.Opts.SetLogLevel("debug")
}
if flagDebugMode || config.Opts.HasDebugMode() {
logger.EnableDebug()
logFile := config.Opts.LogFile()
var logFileHandler io.Writer
switch logFile {
case "stdout":
logFileHandler = os.Stdout
case "stderr":
logFileHandler = os.Stderr
default:
logFileHandler, err = os.OpenFile(logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
printErrorAndExit(fmt.Errorf("unable to open log file: %v", err))
}
defer logFileHandler.(*os.File).Close()
}
if err := InitializeDefaultLogger(config.Opts.LogLevel(), logFileHandler, config.Opts.LogFormat(), config.Opts.LogDateTime()); err != nil {
printErrorAndExit(err)
}
if flagHealthCheck != "" {
@ -112,25 +129,23 @@ func Parse() {
}
if config.Opts.IsDefaultDatabaseURL() {
logger.Info("The default value for DATABASE_URL is used")
slog.Info("The default value for DATABASE_URL is used")
}
logger.Debug("Loading translations...")
if err := locale.LoadCatalogMessages(); err != nil {
logger.Fatal("Unable to load translations: %v", err)
printErrorAndExit(fmt.Errorf("unable to load translations: %v", err))
}
logger.Debug("Loading static assets...")
if err := static.CalculateBinaryFileChecksums(); err != nil {
logger.Fatal("Unable to calculate binary files checksum: %v", err)
printErrorAndExit(fmt.Errorf("unable to calculate binary file checksums: %v", err))
}
if err := static.GenerateStylesheetsBundles(); err != nil {
logger.Fatal("Unable to generate Stylesheet bundles: %v", err)
printErrorAndExit(fmt.Errorf("unable to generate stylesheets bundles: %v", err))
}
if err := static.GenerateJavascriptBundles(); err != nil {
logger.Fatal("Unable to generate Javascript bundles: %v", err)
printErrorAndExit(fmt.Errorf("unable to generate javascript bundles: %v", err))
}
db, err := database.NewConnectionPool(
@ -140,19 +155,19 @@ func Parse() {
config.Opts.DatabaseConnectionLifetime(),
)
if err != nil {
logger.Fatal("Unable to initialize database connection pool: %v", err)
printErrorAndExit(fmt.Errorf("unable to connect to database: %v", err))
}
defer db.Close()
store := storage.NewStorage(db)
if err := store.Ping(); err != nil {
logger.Fatal("Unable to connect to the database: %v", err)
printErrorAndExit(err)
}
if flagMigrate {
if err := database.Migrate(db); err != nil {
logger.Fatal(`%v`, err)
printErrorAndExit(err)
}
return
}
@ -180,12 +195,12 @@ func Parse() {
// Run migrations and start the daemon.
if config.Opts.RunMigrations() {
if err := database.Migrate(db); err != nil {
logger.Fatal(`%v`, err)
printErrorAndExit(err)
}
}
if err := database.IsSchemaUpToDate(db); err != nil {
logger.Fatal(`You must run the SQL migrations, %v`, err)
printErrorAndExit(err)
}
// Create admin user and start the daemon.
@ -205,3 +220,8 @@ func Parse() {
startDaemon(store)
}
func printErrorAndExit(err error) {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}

View file

@ -4,11 +4,9 @@
package cli // import "miniflux.app/v2/internal/cli"
import (
"fmt"
"os"
"log/slog"
"miniflux.app/v2/internal/config"
"miniflux.app/v2/internal/logger"
"miniflux.app/v2/internal/model"
"miniflux.app/v2/internal/storage"
"miniflux.app/v2/internal/validator"
@ -26,17 +24,17 @@ func createAdmin(store *storage.Storage) {
}
if store.UserExists(userCreationRequest.Username) {
logger.Info(`User %q already exists, skipping creation`, userCreationRequest.Username)
slog.Info("Skipping admin user creation because it already exists",
slog.String("username", userCreationRequest.Username),
)
return
}
if validationErr := validator.ValidateUserCreationWithPassword(store, userCreationRequest); validationErr != nil {
fmt.Fprintf(os.Stderr, "%s\n", validationErr)
os.Exit(1)
printErrorAndExit(validationErr.Error())
}
if _, err := store.CreateUser(userCreationRequest); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
printErrorAndExit(err)
}
}

View file

@ -5,6 +5,7 @@ package cli // import "miniflux.app/v2/internal/cli"
import (
"context"
"log/slog"
"net/http"
"os"
"os/signal"
@ -13,7 +14,6 @@ import (
"miniflux.app/v2/internal/config"
httpd "miniflux.app/v2/internal/http/server"
"miniflux.app/v2/internal/logger"
"miniflux.app/v2/internal/metric"
"miniflux.app/v2/internal/storage"
"miniflux.app/v2/internal/systemd"
@ -21,7 +21,7 @@ import (
)
func startDaemon(store *storage.Storage) {
logger.Info("Starting daemon...")
slog.Debug("Starting daemon...")
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt)
@ -44,26 +44,25 @@ func startDaemon(store *storage.Storage) {
}
if systemd.HasNotifySocket() {
logger.Info("Sending readiness notification to Systemd")
slog.Debug("Sending readiness notification to Systemd")
if err := systemd.SdNotify(systemd.SdNotifyReady); err != nil {
logger.Error("Unable to send readiness notification to systemd: %v", err)
slog.Error("Unable to send readiness notification to systemd", slog.Any("error", err))
}
if config.Opts.HasWatchdog() && systemd.HasSystemdWatchdog() {
logger.Info("Activating Systemd watchdog")
slog.Debug("Activating Systemd watchdog")
go func() {
interval, err := systemd.WatchdogInterval()
if err != nil {
logger.Error("Unable to parse watchdog interval from systemd: %v", err)
slog.Error("Unable to get watchdog interval from systemd", slog.Any("error", err))
return
}
for {
err := store.Ping()
if err != nil {
logger.Error(`Systemd Watchdog: %v`, err)
if err := store.Ping(); err != nil {
slog.Error("Unable to ping database", slog.Any("error", err))
} else {
systemd.SdNotify(systemd.SdNotifyWatchdog)
}
@ -75,7 +74,7 @@ func startDaemon(store *storage.Storage) {
}
<-stop
logger.Info("Shutting down the process...")
slog.Debug("Shutting down the process")
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
@ -83,5 +82,5 @@ func startDaemon(store *storage.Storage) {
httpServer.Shutdown(ctx)
}
logger.Info("Process gracefully stopped")
slog.Debug("Process gracefully stopped")
}

View file

@ -5,7 +5,6 @@ package cli // import "miniflux.app/v2/internal/cli"
import (
"fmt"
"os"
"miniflux.app/v2/internal/storage"
)
@ -13,7 +12,6 @@ import (
func flushSessions(store *storage.Storage) {
fmt.Println("Flushing all sessions (disconnect users)")
if err := store.FlushAllSessions(); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
printErrorAndExit(err)
}
}

View file

@ -4,11 +4,12 @@
package cli // import "miniflux.app/v2/internal/cli"
import (
"fmt"
"log/slog"
"net/http"
"time"
"miniflux.app/v2/internal/config"
"miniflux.app/v2/internal/logger"
)
func doHealthCheck(healthCheckEndpoint string) {
@ -16,18 +17,18 @@ func doHealthCheck(healthCheckEndpoint string) {
healthCheckEndpoint = "http://" + config.Opts.ListenAddr() + config.Opts.BasePath() + "/healthcheck"
}
logger.Debug(`Executing health check on %s`, healthCheckEndpoint)
slog.Debug("Executing health check request", slog.String("endpoint", healthCheckEndpoint))
client := &http.Client{Timeout: 3 * time.Second}
resp, err := client.Get(healthCheckEndpoint)
if err != nil {
logger.Fatal(`Health check failure: %v`, err)
printErrorAndExit(fmt.Errorf(`health check failure: %v`, err))
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
logger.Fatal(`Health check failed with status code %d`, resp.StatusCode)
printErrorAndExit(fmt.Errorf(`health check failed with status code %d`, resp.StatusCode))
}
logger.Debug(`Health check is OK`)
slog.Debug(`Health check is passing`)
}

46
internal/cli/logger.go Normal file
View file

@ -0,0 +1,46 @@
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package cli // import "miniflux.app/v2/internal/cli"
import (
"io"
"log/slog"
)
func InitializeDefaultLogger(logLevel string, logFile io.Writer, logFormat string, logTime bool) error {
var programLogLevel = new(slog.LevelVar)
switch logLevel {
case "debug":
programLogLevel.Set(slog.LevelDebug)
case "info":
programLogLevel.Set(slog.LevelInfo)
case "warning":
programLogLevel.Set(slog.LevelWarn)
case "error":
programLogLevel.Set(slog.LevelError)
}
logHandlerOptions := &slog.HandlerOptions{Level: programLogLevel}
if !logTime {
logHandlerOptions.ReplaceAttr = func(groups []string, a slog.Attr) slog.Attr {
if a.Key == slog.TimeKey {
return slog.Attr{}
}
return a
}
}
var logger *slog.Logger
switch logFormat {
case "json":
logger = slog.New(slog.NewJSONHandler(logFile, logHandlerOptions))
default:
logger = slog.New(slog.NewTextHandler(logFile, logHandlerOptions))
}
slog.SetDefault(logger)
return nil
}

View file

@ -4,11 +4,11 @@
package cli // import "miniflux.app/v2/internal/cli"
import (
"log/slog"
"sync"
"time"
"miniflux.app/v2/internal/config"
"miniflux.app/v2/internal/logger"
"miniflux.app/v2/internal/model"
feedHandler "miniflux.app/v2/internal/reader/handler"
"miniflux.app/v2/internal/storage"
@ -20,22 +20,39 @@ func refreshFeeds(store *storage.Storage) {
startTime := time.Now()
jobs, err := store.NewBatch(config.Opts.BatchSize())
if err != nil {
logger.Error("[Cronjob] %v", err)
slog.Error("Unable to fetch jobs from database", slog.Any("error", err))
}
nbJobs := len(jobs)
logger.Info("[Cronjob]] Created %d jobs from a batch size of %d", nbJobs, config.Opts.BatchSize())
slog.Info("Created a batch of feeds",
slog.Int("nb_jobs", nbJobs),
slog.Int("batch_size", config.Opts.BatchSize()),
)
var jobQueue = make(chan model.Job, nbJobs)
logger.Info("[Cronjob] Starting a pool of %d workers", config.Opts.WorkerPoolSize())
slog.Info("Starting a pool of workers",
slog.Int("nb_workers", config.Opts.WorkerPoolSize()),
)
for i := 0; i < config.Opts.WorkerPoolSize(); i++ {
wg.Add(1)
go func(workerID int) {
defer wg.Done()
for job := range jobQueue {
logger.Info("[Cronjob] Refreshing feed #%d for user #%d in worker #%d", job.FeedID, job.UserID, workerID)
slog.Info("Refreshing feed",
slog.Int64("feed_id", job.FeedID),
slog.Int64("user_id", job.UserID),
slog.Int("worker_id", workerID),
)
if err := feedHandler.RefreshFeed(store, job.UserID, job.FeedID, false); err != nil {
logger.Error("[Cronjob] Refreshing the feed #%d returned this error: %v", job.FeedID, err)
slog.Error("Unable to refresh feed",
slog.Int64("feed_id", job.FeedID),
slog.Int64("user_id", job.UserID),
slog.Any("error", err),
)
}
}
}(i)
@ -47,5 +64,9 @@ func refreshFeeds(store *storage.Storage) {
close(jobQueue)
wg.Wait()
logger.Info("[Cronjob] Refreshed %d feed(s) in %s", nbJobs, time.Since(startTime))
slog.Info("Refreshed a batch of feeds",
slog.Int("nb_feeds", nbJobs),
slog.String("duration", time.Since(startTime).String()),
)
}

View file

@ -5,7 +5,6 @@ package cli // import "miniflux.app/v2/internal/cli"
import (
"fmt"
"os"
"miniflux.app/v2/internal/model"
"miniflux.app/v2/internal/storage"
@ -16,27 +15,23 @@ func resetPassword(store *storage.Storage) {
username, password := askCredentials()
user, err := store.UserByUsername(username)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
printErrorAndExit(err)
}
if user == nil {
fmt.Fprintf(os.Stderr, "User not found!\n")
os.Exit(1)
printErrorAndExit(fmt.Errorf("user not found"))
}
userModificationRequest := &model.UserModificationRequest{
Password: &password,
}
if validationErr := validator.ValidateUserModification(store, user.ID, userModificationRequest); validationErr != nil {
fmt.Fprintf(os.Stderr, "%s\n", validationErr)
os.Exit(1)
printErrorAndExit(validationErr.Error())
}
user.Password = password
if err := store.UpdateUser(user); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
printErrorAndExit(err)
}
fmt.Println("Password changed!")

View file

@ -4,16 +4,16 @@
package cli // import "miniflux.app/v2/internal/cli"
import (
"log/slog"
"time"
"miniflux.app/v2/internal/config"
"miniflux.app/v2/internal/logger"
"miniflux.app/v2/internal/storage"
"miniflux.app/v2/internal/worker"
)
func runScheduler(store *storage.Storage, pool *worker.Pool) {
logger.Info(`Starting background scheduler...`)
slog.Debug(`Starting background scheduler...`)
go feedScheduler(
store,
@ -31,10 +31,12 @@ func runScheduler(store *storage.Storage, pool *worker.Pool) {
func feedScheduler(store *storage.Storage, pool *worker.Pool, frequency, batchSize int) {
for range time.Tick(time.Duration(frequency) * time.Minute) {
jobs, err := store.NewBatch(batchSize)
logger.Info("[Scheduler:Feed] Pushing %d jobs to the queue", len(jobs))
if err != nil {
logger.Error("[Scheduler:Feed] %v", err)
slog.Error("Unable to fetch jobs from database", slog.Any("error", err))
} else {
slog.Info("Created a batch of feeds",
slog.Int("nb_jobs", len(jobs)),
)
pool.Push(jobs)
}
}