mirror of
https://github.com/miniflux/v2.git
synced 2025-09-15 18:57:04 +00:00
Move internal packages to an internal folder
For reference: https://go.dev/doc/go1.4#internalpackages
This commit is contained in:
parent
c234903255
commit
168a870c02
433 changed files with 1121 additions and 1123 deletions
103
internal/storage/api_key.go
Normal file
103
internal/storage/api_key.go
Normal file
|
@ -0,0 +1,103 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"miniflux.app/v2/internal/model"
|
||||
)
|
||||
|
||||
// APIKeyExists checks if an API Key with the same description exists.
|
||||
func (s *Storage) APIKeyExists(userID int64, description string) bool {
|
||||
var result bool
|
||||
query := `SELECT true FROM api_keys WHERE user_id=$1 AND lower(description)=lower($2) LIMIT 1`
|
||||
s.db.QueryRow(query, userID, description).Scan(&result)
|
||||
return result
|
||||
}
|
||||
|
||||
// SetAPIKeyUsedTimestamp updates the last used date of an API Key.
|
||||
func (s *Storage) SetAPIKeyUsedTimestamp(userID int64, token string) error {
|
||||
query := `UPDATE api_keys SET last_used_at=now() WHERE user_id=$1 and token=$2`
|
||||
_, err := s.db.Exec(query, userID, token)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to update last used date for API key: %v`, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// APIKeys returns all API Keys that belongs to the given user.
|
||||
func (s *Storage) APIKeys(userID int64) (model.APIKeys, error) {
|
||||
query := `
|
||||
SELECT
|
||||
id, user_id, token, description, last_used_at, created_at
|
||||
FROM
|
||||
api_keys
|
||||
WHERE
|
||||
user_id=$1
|
||||
ORDER BY description ASC
|
||||
`
|
||||
rows, err := s.db.Query(query, userID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch API Keys: %v`, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
apiKeys := make(model.APIKeys, 0)
|
||||
for rows.Next() {
|
||||
var apiKey model.APIKey
|
||||
if err := rows.Scan(
|
||||
&apiKey.ID,
|
||||
&apiKey.UserID,
|
||||
&apiKey.Token,
|
||||
&apiKey.Description,
|
||||
&apiKey.LastUsedAt,
|
||||
&apiKey.CreatedAt,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch API Key row: %v`, err)
|
||||
}
|
||||
|
||||
apiKeys = append(apiKeys, &apiKey)
|
||||
}
|
||||
|
||||
return apiKeys, nil
|
||||
}
|
||||
|
||||
// CreateAPIKey inserts a new API key.
|
||||
func (s *Storage) CreateAPIKey(apiKey *model.APIKey) error {
|
||||
query := `
|
||||
INSERT INTO api_keys
|
||||
(user_id, token, description)
|
||||
VALUES
|
||||
($1, $2, $3)
|
||||
RETURNING
|
||||
id, created_at
|
||||
`
|
||||
err := s.db.QueryRow(
|
||||
query,
|
||||
apiKey.UserID,
|
||||
apiKey.Token,
|
||||
apiKey.Description,
|
||||
).Scan(
|
||||
&apiKey.ID,
|
||||
&apiKey.CreatedAt,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to create category: %v`, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveAPIKey deletes an API Key.
|
||||
func (s *Storage) RemoveAPIKey(userID, keyID int64) error {
|
||||
query := `DELETE FROM api_keys WHERE id = $1 AND user_id = $2`
|
||||
_, err := s.db.Exec(query, keyID, userID)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to remove this API Key: %v`, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
282
internal/storage/category.go
Normal file
282
internal/storage/category.go
Normal file
|
@ -0,0 +1,282 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/lib/pq"
|
||||
"miniflux.app/v2/internal/model"
|
||||
)
|
||||
|
||||
// AnotherCategoryExists checks if another category exists with the same title.
|
||||
func (s *Storage) AnotherCategoryExists(userID, categoryID int64, title string) bool {
|
||||
var result bool
|
||||
query := `SELECT true FROM categories WHERE user_id=$1 AND id != $2 AND lower(title)=lower($3) LIMIT 1`
|
||||
s.db.QueryRow(query, userID, categoryID, title).Scan(&result)
|
||||
return result
|
||||
}
|
||||
|
||||
// CategoryTitleExists checks if the given category exists into the database.
|
||||
func (s *Storage) CategoryTitleExists(userID int64, title string) bool {
|
||||
var result bool
|
||||
query := `SELECT true FROM categories WHERE user_id=$1 AND lower(title)=lower($2) LIMIT 1`
|
||||
s.db.QueryRow(query, userID, title).Scan(&result)
|
||||
return result
|
||||
}
|
||||
|
||||
// CategoryIDExists checks if the given category exists into the database.
|
||||
func (s *Storage) CategoryIDExists(userID, categoryID int64) bool {
|
||||
var result bool
|
||||
query := `SELECT true FROM categories WHERE user_id=$1 AND id=$2`
|
||||
s.db.QueryRow(query, userID, categoryID).Scan(&result)
|
||||
return result
|
||||
}
|
||||
|
||||
// Category returns a category from the database.
|
||||
func (s *Storage) Category(userID, categoryID int64) (*model.Category, error) {
|
||||
var category model.Category
|
||||
|
||||
query := `SELECT id, user_id, title, hide_globally FROM categories WHERE user_id=$1 AND id=$2`
|
||||
err := s.db.QueryRow(query, userID, categoryID).Scan(&category.ID, &category.UserID, &category.Title, &category.HideGlobally)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return nil, nil
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf(`store: unable to fetch category: %v`, err)
|
||||
default:
|
||||
return &category, nil
|
||||
}
|
||||
}
|
||||
|
||||
// FirstCategory returns the first category for the given user.
|
||||
func (s *Storage) FirstCategory(userID int64) (*model.Category, error) {
|
||||
query := `SELECT id, user_id, title, hide_globally FROM categories WHERE user_id=$1 ORDER BY title ASC LIMIT 1`
|
||||
|
||||
var category model.Category
|
||||
err := s.db.QueryRow(query, userID).Scan(&category.ID, &category.UserID, &category.Title, &category.HideGlobally)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return nil, nil
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf(`store: unable to fetch category: %v`, err)
|
||||
default:
|
||||
return &category, nil
|
||||
}
|
||||
}
|
||||
|
||||
// CategoryByTitle finds a category by the title.
|
||||
func (s *Storage) CategoryByTitle(userID int64, title string) (*model.Category, error) {
|
||||
var category model.Category
|
||||
|
||||
query := `SELECT id, user_id, title, hide_globally FROM categories WHERE user_id=$1 AND title=$2`
|
||||
err := s.db.QueryRow(query, userID, title).Scan(&category.ID, &category.UserID, &category.Title, &category.HideGlobally)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return nil, nil
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf(`store: unable to fetch category: %v`, err)
|
||||
default:
|
||||
return &category, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Categories returns all categories that belongs to the given user.
|
||||
func (s *Storage) Categories(userID int64) (model.Categories, error) {
|
||||
query := `SELECT id, user_id, title, hide_globally FROM categories WHERE user_id=$1 ORDER BY title ASC`
|
||||
rows, err := s.db.Query(query, userID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch categories: %v`, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
categories := make(model.Categories, 0)
|
||||
for rows.Next() {
|
||||
var category model.Category
|
||||
if err := rows.Scan(&category.ID, &category.UserID, &category.Title, &category.HideGlobally); err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch category row: %v`, err)
|
||||
}
|
||||
|
||||
categories = append(categories, &category)
|
||||
}
|
||||
|
||||
return categories, nil
|
||||
}
|
||||
|
||||
// CategoriesWithFeedCount returns all categories with the number of feeds.
|
||||
func (s *Storage) CategoriesWithFeedCount(userID int64) (model.Categories, error) {
|
||||
user, err := s.UserByID(userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query := `
|
||||
SELECT
|
||||
c.id,
|
||||
c.user_id,
|
||||
c.title,
|
||||
c.hide_globally,
|
||||
(SELECT count(*) FROM feeds WHERE feeds.category_id=c.id) AS count,
|
||||
(SELECT count(*)
|
||||
FROM feeds
|
||||
JOIN entries ON (feeds.id = entries.feed_id)
|
||||
WHERE feeds.category_id = c.id AND entries.status = 'unread') AS count_unread
|
||||
FROM categories c
|
||||
WHERE
|
||||
user_id=$1
|
||||
`
|
||||
|
||||
if user.CategoriesSortingOrder == "alphabetical" {
|
||||
query = query + `
|
||||
ORDER BY
|
||||
c.title ASC
|
||||
`
|
||||
} else {
|
||||
query = query + `
|
||||
ORDER BY
|
||||
count_unread DESC,
|
||||
c.title ASC
|
||||
`
|
||||
}
|
||||
|
||||
rows, err := s.db.Query(query, userID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch categories: %v`, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
categories := make(model.Categories, 0)
|
||||
for rows.Next() {
|
||||
var category model.Category
|
||||
if err := rows.Scan(&category.ID, &category.UserID, &category.Title, &category.HideGlobally, &category.FeedCount, &category.TotalUnread); err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch category row: %v`, err)
|
||||
}
|
||||
|
||||
categories = append(categories, &category)
|
||||
}
|
||||
|
||||
return categories, nil
|
||||
}
|
||||
|
||||
// CreateCategory creates a new category.
|
||||
func (s *Storage) CreateCategory(userID int64, request *model.CategoryRequest) (*model.Category, error) {
|
||||
var category model.Category
|
||||
|
||||
query := `
|
||||
INSERT INTO categories
|
||||
(user_id, title)
|
||||
VALUES
|
||||
($1, $2)
|
||||
RETURNING
|
||||
id,
|
||||
user_id,
|
||||
title
|
||||
`
|
||||
err := s.db.QueryRow(
|
||||
query,
|
||||
userID,
|
||||
request.Title,
|
||||
).Scan(
|
||||
&category.ID,
|
||||
&category.UserID,
|
||||
&category.Title,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to create category %q: %v`, request.Title, err)
|
||||
}
|
||||
|
||||
return &category, nil
|
||||
}
|
||||
|
||||
// UpdateCategory updates an existing category.
|
||||
func (s *Storage) UpdateCategory(category *model.Category) error {
|
||||
query := `UPDATE categories SET title=$1, hide_globally = $2 WHERE id=$3 AND user_id=$4`
|
||||
_, err := s.db.Exec(
|
||||
query,
|
||||
category.Title,
|
||||
category.HideGlobally,
|
||||
category.ID,
|
||||
category.UserID,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to update category: %v`, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveCategory deletes a category.
|
||||
func (s *Storage) RemoveCategory(userID, categoryID int64) error {
|
||||
query := `DELETE FROM categories WHERE id = $1 AND user_id = $2`
|
||||
result, err := s.db.Exec(query, categoryID, userID)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to remove this category: %v`, err)
|
||||
}
|
||||
|
||||
count, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to remove this category: %v`, err)
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
return errors.New(`store: no category has been removed`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// delete the given categories, replacing those categories with the user's first
|
||||
// category on affected feeds
|
||||
func (s *Storage) RemoveAndReplaceCategoriesByName(userid int64, titles []string) error {
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return errors.New("unable to begin transaction")
|
||||
}
|
||||
|
||||
titleParam := pq.Array(titles)
|
||||
var count int
|
||||
query := "SELECT count(*) FROM categories WHERE user_id = $1 and title != ANY($2)"
|
||||
err = tx.QueryRow(query, userid, titleParam).Scan(&count)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return errors.New("unable to retrieve category count")
|
||||
}
|
||||
if count < 1 {
|
||||
tx.Rollback()
|
||||
return errors.New("at least 1 category must remain after deletion")
|
||||
}
|
||||
|
||||
query = `
|
||||
WITH d_cats AS (SELECT id FROM categories WHERE user_id = $1 AND title = ANY($2))
|
||||
UPDATE feeds
|
||||
SET category_id =
|
||||
(SELECT id
|
||||
FROM categories
|
||||
WHERE user_id = $1 AND id NOT IN (SELECT id FROM d_cats)
|
||||
ORDER BY title ASC
|
||||
LIMIT 1)
|
||||
WHERE user_id = $1 AND category_id IN (SELECT id FROM d_cats)
|
||||
`
|
||||
_, err = tx.Exec(query, userid, titleParam)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return fmt.Errorf("unable to replace categories: %v", err)
|
||||
}
|
||||
|
||||
query = "DELETE FROM categories WHERE user_id = $1 AND title = ANY($2)"
|
||||
_, err = tx.Exec(query, userid, titleParam)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return fmt.Errorf("unable to delete categories: %v", err)
|
||||
}
|
||||
tx.Commit()
|
||||
return nil
|
||||
}
|
62
internal/storage/certificate_cache.go
Normal file
62
internal/storage/certificate_cache.go
Normal file
|
@ -0,0 +1,62 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
)
|
||||
|
||||
// Making sure that we're adhering to the autocert.Cache interface.
|
||||
var _ autocert.Cache = (*CertificateCache)(nil)
|
||||
|
||||
// CertificateCache provides a SQL backend to the autocert cache.
|
||||
type CertificateCache struct {
|
||||
storage *Storage
|
||||
}
|
||||
|
||||
// NewCertificateCache creates an cache instance that can be used with autocert.Cache.
|
||||
// It returns any errors that could happen while connecting to SQL.
|
||||
func NewCertificateCache(storage *Storage) *CertificateCache {
|
||||
return &CertificateCache{
|
||||
storage: storage,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a certificate data for the specified key.
|
||||
// If there's no such key, Get returns ErrCacheMiss.
|
||||
func (c *CertificateCache) Get(ctx context.Context, key string) ([]byte, error) {
|
||||
query := `SELECT data::bytea FROM acme_cache WHERE key = $1`
|
||||
var data []byte
|
||||
err := c.storage.db.QueryRowContext(ctx, query, key).Scan(&data)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, autocert.ErrCacheMiss
|
||||
}
|
||||
|
||||
return data, err
|
||||
}
|
||||
|
||||
// Put stores the data in the cache under the specified key.
|
||||
func (c *CertificateCache) Put(ctx context.Context, key string, data []byte) error {
|
||||
query := `INSERT INTO acme_cache (key, data, updated_at) VALUES($1, $2::bytea, now())
|
||||
ON CONFLICT (key) DO UPDATE SET data = $2::bytea, updated_at = now()`
|
||||
_, err := c.storage.db.ExecContext(ctx, query, key, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a certificate data from the cache under the specified key.
|
||||
// If there's no such key in the cache, Delete returns nil.
|
||||
func (c *CertificateCache) Delete(ctx context.Context, key string) error {
|
||||
query := `DELETE FROM acme_cache WHERE key = $1`
|
||||
_, err := c.storage.db.ExecContext(ctx, query, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
192
internal/storage/enclosure.go
Normal file
192
internal/storage/enclosure.go
Normal file
|
@ -0,0 +1,192 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"miniflux.app/v2/internal/model"
|
||||
)
|
||||
|
||||
// GetEnclosures returns all attachments for the given entry.
|
||||
func (s *Storage) GetEnclosures(entryID int64) (model.EnclosureList, error) {
|
||||
query := `
|
||||
SELECT
|
||||
id,
|
||||
user_id,
|
||||
entry_id,
|
||||
url,
|
||||
size,
|
||||
mime_type,
|
||||
media_progression
|
||||
FROM
|
||||
enclosures
|
||||
WHERE
|
||||
entry_id = $1
|
||||
ORDER BY id ASC
|
||||
`
|
||||
|
||||
rows, err := s.db.Query(query, entryID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch enclosures: %v`, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
enclosures := make(model.EnclosureList, 0)
|
||||
for rows.Next() {
|
||||
var enclosure model.Enclosure
|
||||
err := rows.Scan(
|
||||
&enclosure.ID,
|
||||
&enclosure.UserID,
|
||||
&enclosure.EntryID,
|
||||
&enclosure.URL,
|
||||
&enclosure.Size,
|
||||
&enclosure.MimeType,
|
||||
&enclosure.MediaProgression,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch enclosure row: %v`, err)
|
||||
}
|
||||
|
||||
enclosures = append(enclosures, &enclosure)
|
||||
}
|
||||
|
||||
return enclosures, nil
|
||||
}
|
||||
|
||||
func (s *Storage) GetEnclosure(enclosureID int64) (*model.Enclosure, error) {
|
||||
query := `
|
||||
SELECT
|
||||
id,
|
||||
user_id,
|
||||
entry_id,
|
||||
url,
|
||||
size,
|
||||
mime_type,
|
||||
media_progression
|
||||
FROM
|
||||
enclosures
|
||||
WHERE
|
||||
id = $1
|
||||
ORDER BY id ASC
|
||||
`
|
||||
|
||||
row := s.db.QueryRow(query, enclosureID)
|
||||
|
||||
var enclosure model.Enclosure
|
||||
err := row.Scan(
|
||||
&enclosure.ID,
|
||||
&enclosure.UserID,
|
||||
&enclosure.EntryID,
|
||||
&enclosure.URL,
|
||||
&enclosure.Size,
|
||||
&enclosure.MimeType,
|
||||
&enclosure.MediaProgression,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch enclosure row: %v`, err)
|
||||
}
|
||||
|
||||
return &enclosure, nil
|
||||
}
|
||||
|
||||
func (s *Storage) createEnclosure(tx *sql.Tx, enclosure *model.Enclosure) error {
|
||||
enclosureURL := strings.TrimSpace(enclosure.URL)
|
||||
if enclosureURL == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO enclosures
|
||||
(url, size, mime_type, entry_id, user_id, media_progression)
|
||||
VALUES
|
||||
($1, $2, $3, $4, $5, $6)
|
||||
ON CONFLICT (user_id, entry_id, md5(url)) DO NOTHING
|
||||
`
|
||||
_, err := tx.Exec(
|
||||
query,
|
||||
enclosureURL,
|
||||
enclosure.Size,
|
||||
enclosure.MimeType,
|
||||
enclosure.EntryID,
|
||||
enclosure.UserID,
|
||||
enclosure.MediaProgression,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to create enclosure: %v`, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) updateEnclosures(tx *sql.Tx, userID, entryID int64, enclosures model.EnclosureList) error {
|
||||
if len(enclosures) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sqlValues := []any{userID, entryID}
|
||||
sqlPlaceholders := []string{}
|
||||
|
||||
for _, enclosure := range enclosures {
|
||||
sqlPlaceholders = append(sqlPlaceholders, fmt.Sprintf(`$%d`, len(sqlValues)+1))
|
||||
sqlValues = append(sqlValues, strings.TrimSpace(enclosure.URL))
|
||||
|
||||
if err := s.createEnclosure(tx, enclosure); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
query := `
|
||||
DELETE FROM enclosures
|
||||
WHERE
|
||||
user_id=$1 AND
|
||||
entry_id=$2 AND
|
||||
url NOT IN (%s)
|
||||
`
|
||||
|
||||
query = fmt.Sprintf(query, strings.Join(sqlPlaceholders, `,`))
|
||||
|
||||
_, err := tx.Exec(query, sqlValues...)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to delete old enclosures: %v`, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) UpdateEnclosure(enclosure *model.Enclosure) error {
|
||||
query := `
|
||||
UPDATE
|
||||
enclosures
|
||||
SET
|
||||
url=$1,
|
||||
size=$2,
|
||||
mime_type=$3,
|
||||
entry_id=$4,
|
||||
user_id=$5,
|
||||
media_progression=$6
|
||||
WHERE
|
||||
id=$7
|
||||
`
|
||||
_, err := s.db.Exec(query,
|
||||
enclosure.URL,
|
||||
enclosure.Size,
|
||||
enclosure.MimeType,
|
||||
enclosure.EntryID,
|
||||
enclosure.UserID,
|
||||
enclosure.MediaProgression,
|
||||
enclosure.ID,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to update enclosure #%d : %v`, enclosure.ID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
593
internal/storage/entry.go
Normal file
593
internal/storage/entry.go
Normal file
|
@ -0,0 +1,593 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"miniflux.app/v2/internal/crypto"
|
||||
"miniflux.app/v2/internal/logger"
|
||||
"miniflux.app/v2/internal/model"
|
||||
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
// CountAllEntries returns the number of entries for each status in the database.
|
||||
func (s *Storage) CountAllEntries() map[string]int64 {
|
||||
rows, err := s.db.Query(`SELECT status, count(*) FROM entries GROUP BY status`)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
results := make(map[string]int64)
|
||||
results[model.EntryStatusUnread] = 0
|
||||
results[model.EntryStatusRead] = 0
|
||||
results[model.EntryStatusRemoved] = 0
|
||||
|
||||
for rows.Next() {
|
||||
var status string
|
||||
var count int64
|
||||
|
||||
if err := rows.Scan(&status, &count); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
results[status] = count
|
||||
}
|
||||
|
||||
results["total"] = results[model.EntryStatusUnread] + results[model.EntryStatusRead] + results[model.EntryStatusRemoved]
|
||||
return results
|
||||
}
|
||||
|
||||
// CountUnreadEntries returns the number of unread entries.
|
||||
func (s *Storage) CountUnreadEntries(userID int64) int {
|
||||
builder := s.NewEntryQueryBuilder(userID)
|
||||
builder.WithStatus(model.EntryStatusUnread)
|
||||
builder.WithGloballyVisible()
|
||||
|
||||
n, err := builder.CountEntries()
|
||||
if err != nil {
|
||||
logger.Error(`store: unable to count unread entries for user #%d: %v`, userID, err)
|
||||
return 0
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// NewEntryQueryBuilder returns a new EntryQueryBuilder
|
||||
func (s *Storage) NewEntryQueryBuilder(userID int64) *EntryQueryBuilder {
|
||||
return NewEntryQueryBuilder(s, userID)
|
||||
}
|
||||
|
||||
// UpdateEntryContent updates entry content.
|
||||
func (s *Storage) UpdateEntryContent(entry *model.Entry) error {
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := `
|
||||
UPDATE
|
||||
entries
|
||||
SET
|
||||
content=$1, reading_time=$2
|
||||
WHERE
|
||||
id=$3 AND user_id=$4
|
||||
`
|
||||
_, err = tx.Exec(query, entry.Content, entry.ReadingTime, entry.ID, entry.UserID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return fmt.Errorf(`store: unable to update content of entry #%d: %v`, entry.ID, err)
|
||||
}
|
||||
|
||||
query = `
|
||||
UPDATE
|
||||
entries
|
||||
SET
|
||||
document_vectors = setweight(to_tsvector(left(coalesce(title, ''), 500000)), 'A') || setweight(to_tsvector(left(coalesce(content, ''), 500000)), 'B')
|
||||
WHERE
|
||||
id=$1 AND user_id=$2
|
||||
`
|
||||
_, err = tx.Exec(query, entry.ID, entry.UserID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return fmt.Errorf(`store: unable to update content of entry #%d: %v`, entry.ID, err)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// createEntry add a new entry.
|
||||
func (s *Storage) createEntry(tx *sql.Tx, entry *model.Entry) error {
|
||||
query := `
|
||||
INSERT INTO entries
|
||||
(
|
||||
title,
|
||||
hash,
|
||||
url,
|
||||
comments_url,
|
||||
published_at,
|
||||
content,
|
||||
author,
|
||||
user_id,
|
||||
feed_id,
|
||||
reading_time,
|
||||
changed_at,
|
||||
document_vectors,
|
||||
tags
|
||||
)
|
||||
VALUES
|
||||
(
|
||||
$1,
|
||||
$2,
|
||||
$3,
|
||||
$4,
|
||||
$5,
|
||||
$6,
|
||||
$7,
|
||||
$8,
|
||||
$9,
|
||||
$10,
|
||||
now(),
|
||||
setweight(to_tsvector(left(coalesce($1, ''), 500000)), 'A') || setweight(to_tsvector(left(coalesce($6, ''), 500000)), 'B'),
|
||||
$11
|
||||
)
|
||||
RETURNING
|
||||
id, status
|
||||
`
|
||||
err := tx.QueryRow(
|
||||
query,
|
||||
entry.Title,
|
||||
entry.Hash,
|
||||
entry.URL,
|
||||
entry.CommentsURL,
|
||||
entry.Date,
|
||||
entry.Content,
|
||||
entry.Author,
|
||||
entry.UserID,
|
||||
entry.FeedID,
|
||||
entry.ReadingTime,
|
||||
pq.Array(removeDuplicates(entry.Tags)),
|
||||
).Scan(&entry.ID, &entry.Status)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to create entry %q (feed #%d): %v`, entry.URL, entry.FeedID, err)
|
||||
}
|
||||
|
||||
for i := 0; i < len(entry.Enclosures); i++ {
|
||||
entry.Enclosures[i].EntryID = entry.ID
|
||||
entry.Enclosures[i].UserID = entry.UserID
|
||||
err := s.createEnclosure(tx, entry.Enclosures[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateEntry updates an entry when a feed is refreshed.
|
||||
// Note: we do not update the published date because some feeds do not contains any date,
|
||||
// it default to time.Now() which could change the order of items on the history page.
|
||||
func (s *Storage) updateEntry(tx *sql.Tx, entry *model.Entry) error {
|
||||
query := `
|
||||
UPDATE
|
||||
entries
|
||||
SET
|
||||
title=$1,
|
||||
url=$2,
|
||||
comments_url=$3,
|
||||
content=$4,
|
||||
author=$5,
|
||||
reading_time=$6,
|
||||
document_vectors = setweight(to_tsvector(left(coalesce($1, ''), 500000)), 'A') || setweight(to_tsvector(left(coalesce($4, ''), 500000)), 'B'),
|
||||
tags=$10
|
||||
WHERE
|
||||
user_id=$7 AND feed_id=$8 AND hash=$9
|
||||
RETURNING
|
||||
id
|
||||
`
|
||||
err := tx.QueryRow(
|
||||
query,
|
||||
entry.Title,
|
||||
entry.URL,
|
||||
entry.CommentsURL,
|
||||
entry.Content,
|
||||
entry.Author,
|
||||
entry.ReadingTime,
|
||||
entry.UserID,
|
||||
entry.FeedID,
|
||||
entry.Hash,
|
||||
pq.Array(removeDuplicates(entry.Tags)),
|
||||
).Scan(&entry.ID)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to update entry %q: %v`, entry.URL, err)
|
||||
}
|
||||
|
||||
for _, enclosure := range entry.Enclosures {
|
||||
enclosure.UserID = entry.UserID
|
||||
enclosure.EntryID = entry.ID
|
||||
}
|
||||
|
||||
return s.updateEnclosures(tx, entry.UserID, entry.ID, entry.Enclosures)
|
||||
}
|
||||
|
||||
// entryExists checks if an entry already exists based on its hash when refreshing a feed.
|
||||
func (s *Storage) entryExists(tx *sql.Tx, entry *model.Entry) (bool, error) {
|
||||
var result bool
|
||||
|
||||
// Note: This query uses entries_feed_id_hash_key index (filtering on user_id is not necessary).
|
||||
err := tx.QueryRow(`SELECT true FROM entries WHERE feed_id=$1 AND hash=$2`, entry.FeedID, entry.Hash).Scan(&result)
|
||||
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
return result, fmt.Errorf(`store: unable to check if entry exists: %v`, err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetReadTime fetches the read time of an entry based on its hash, and the feed id and user id from the feed.
|
||||
// It's intended to be used on entries objects created by parsing a feed as they don't contain much information.
|
||||
// The feed param helps to scope the search to a specific user and feed in order to avoid hash clashes.
|
||||
func (s *Storage) GetReadTime(entry *model.Entry, feed *model.Feed) int {
|
||||
var result int
|
||||
s.db.QueryRow(
|
||||
`SELECT reading_time FROM entries WHERE user_id=$1 AND feed_id=$2 AND hash=$3`,
|
||||
feed.UserID,
|
||||
feed.ID,
|
||||
entry.Hash,
|
||||
).Scan(&result)
|
||||
return result
|
||||
}
|
||||
|
||||
// cleanupEntries deletes from the database entries marked as "removed" and not visible anymore in the feed.
|
||||
func (s *Storage) cleanupEntries(feedID int64, entryHashes []string) error {
|
||||
query := `
|
||||
DELETE FROM
|
||||
entries
|
||||
WHERE
|
||||
feed_id=$1
|
||||
AND
|
||||
id IN (SELECT id FROM entries WHERE feed_id=$2 AND status=$3 AND NOT (hash=ANY($4)))
|
||||
`
|
||||
if _, err := s.db.Exec(query, feedID, feedID, model.EntryStatusRemoved, pq.Array(entryHashes)); err != nil {
|
||||
return fmt.Errorf(`store: unable to cleanup entries: %v`, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RefreshFeedEntries updates feed entries while refreshing a feed.
|
||||
func (s *Storage) RefreshFeedEntries(userID, feedID int64, entries model.Entries, updateExistingEntries bool) (err error) {
|
||||
var entryHashes []string
|
||||
|
||||
for _, entry := range entries {
|
||||
entry.UserID = userID
|
||||
entry.FeedID = feedID
|
||||
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to start transaction: %v`, err)
|
||||
}
|
||||
|
||||
entryExists, err := s.entryExists(tx, entry)
|
||||
if err != nil {
|
||||
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
||||
return fmt.Errorf(`store: unable to rollback transaction: %v (rolled back due to: %v)`, rollbackErr, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if entryExists {
|
||||
if updateExistingEntries {
|
||||
err = s.updateEntry(tx, entry)
|
||||
}
|
||||
} else {
|
||||
err = s.createEntry(tx, entry)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
||||
return fmt.Errorf(`store: unable to rollback transaction: %v (rolled back due to: %v)`, rollbackErr, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf(`store: unable to commit transaction: %v`, err)
|
||||
}
|
||||
|
||||
entryHashes = append(entryHashes, entry.Hash)
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := s.cleanupEntries(feedID, entryHashes); err != nil {
|
||||
logger.Error(`store: feed #%d: %v`, feedID, err)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ArchiveEntries changes the status of entries to "removed" after the given number of days.
|
||||
func (s *Storage) ArchiveEntries(status string, days, limit int) (int64, error) {
|
||||
if days < 0 || limit <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
query := `
|
||||
UPDATE
|
||||
entries
|
||||
SET
|
||||
status='removed'
|
||||
WHERE
|
||||
id=ANY(SELECT id FROM entries WHERE status=$1 AND starred is false AND share_code='' AND created_at < now () - '%d days'::interval ORDER BY created_at ASC LIMIT %d)
|
||||
`
|
||||
|
||||
result, err := s.db.Exec(fmt.Sprintf(query, days, limit), status)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf(`store: unable to archive %s entries: %v`, status, err)
|
||||
}
|
||||
|
||||
count, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf(`store: unable to get the number of rows affected: %v`, err)
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// SetEntriesStatus update the status of the given list of entries.
|
||||
func (s *Storage) SetEntriesStatus(userID int64, entryIDs []int64, status string) error {
|
||||
query := `UPDATE entries SET status=$1, changed_at=now() WHERE user_id=$2 AND id=ANY($3)`
|
||||
result, err := s.db.Exec(query, status, userID, pq.Array(entryIDs))
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to update entries statuses %v: %v`, entryIDs, err)
|
||||
}
|
||||
|
||||
count, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to update these entries %v: %v`, entryIDs, err)
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
return errors.New(`store: nothing has been updated`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) SetEntriesStatusCount(userID int64, entryIDs []int64, status string) (int, error) {
|
||||
if err := s.SetEntriesStatus(userID, entryIDs, status); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
query := `
|
||||
SELECT count(*)
|
||||
FROM entries e
|
||||
JOIN feeds f ON (f.id = e.feed_id)
|
||||
JOIN categories c ON (c.id = f.category_id)
|
||||
WHERE e.user_id = $1
|
||||
AND e.id = ANY($2)
|
||||
AND NOT f.hide_globally
|
||||
AND NOT c.hide_globally
|
||||
`
|
||||
row := s.db.QueryRow(query, userID, pq.Array(entryIDs))
|
||||
visible := 0
|
||||
if err := row.Scan(&visible); err != nil {
|
||||
return 0, fmt.Errorf(`store: unable to query entries visibility %v: %v`, entryIDs, err)
|
||||
}
|
||||
|
||||
return visible, nil
|
||||
}
|
||||
|
||||
// SetEntriesBookmarked update the bookmarked state for the given list of entries.
|
||||
func (s *Storage) SetEntriesBookmarkedState(userID int64, entryIDs []int64, starred bool) error {
|
||||
query := `UPDATE entries SET starred=$1, changed_at=now() WHERE user_id=$2 AND id=ANY($3)`
|
||||
result, err := s.db.Exec(query, starred, userID, pq.Array(entryIDs))
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to update the bookmarked state %v: %v`, entryIDs, err)
|
||||
}
|
||||
|
||||
count, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to update these entries %v: %v`, entryIDs, err)
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
return errors.New(`store: nothing has been updated`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ToggleBookmark toggles entry bookmark value.
|
||||
func (s *Storage) ToggleBookmark(userID int64, entryID int64) error {
|
||||
query := `UPDATE entries SET starred = NOT starred, changed_at=now() WHERE user_id=$1 AND id=$2`
|
||||
result, err := s.db.Exec(query, userID, entryID)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to toggle bookmark flag for entry #%d: %v`, entryID, err)
|
||||
}
|
||||
|
||||
count, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to toggle bookmark flag for entry #%d: %v`, entryID, err)
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
return errors.New(`store: nothing has been updated`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FlushHistory set all entries with the status "read" to "removed".
|
||||
func (s *Storage) FlushHistory(userID int64) error {
|
||||
query := `
|
||||
UPDATE
|
||||
entries
|
||||
SET
|
||||
status=$1,
|
||||
changed_at=now()
|
||||
WHERE
|
||||
user_id=$2 AND status=$3 AND starred is false AND share_code=''
|
||||
`
|
||||
_, err := s.db.Exec(query, model.EntryStatusRemoved, userID, model.EntryStatusRead)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to flush history: %v`, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkAllAsRead updates all user entries to the read status.
|
||||
func (s *Storage) MarkAllAsRead(userID int64) error {
|
||||
query := `UPDATE entries SET status=$1, changed_at=now() WHERE user_id=$2 AND status=$3`
|
||||
result, err := s.db.Exec(query, model.EntryStatusRead, userID, model.EntryStatusUnread)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to mark all entries as read: %v`, err)
|
||||
}
|
||||
|
||||
count, _ := result.RowsAffected()
|
||||
logger.Debug("[Storage:MarkAllAsRead] %d items marked as read", count)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkGloballyVisibleFeedsAsRead updates all user entries to the read status.
|
||||
func (s *Storage) MarkGloballyVisibleFeedsAsRead(userID int64) error {
|
||||
query := `
|
||||
UPDATE
|
||||
entries
|
||||
SET
|
||||
status=$1,
|
||||
changed_at=now()
|
||||
FROM
|
||||
feeds
|
||||
WHERE
|
||||
entries.feed_id = feeds.id
|
||||
AND entries.user_id=$2
|
||||
AND entries.status=$3
|
||||
AND feeds.hide_globally=$4
|
||||
`
|
||||
result, err := s.db.Exec(query, model.EntryStatusRead, userID, model.EntryStatusUnread, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to mark globally visible feeds as read: %v`, err)
|
||||
}
|
||||
|
||||
count, _ := result.RowsAffected()
|
||||
logger.Debug("[Storage:MarkGloballyVisibleFeedsAsRead] %d items marked as read", count)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkFeedAsRead updates all feed entries to the read status.
|
||||
func (s *Storage) MarkFeedAsRead(userID, feedID int64, before time.Time) error {
|
||||
query := `
|
||||
UPDATE
|
||||
entries
|
||||
SET
|
||||
status=$1,
|
||||
changed_at=now()
|
||||
WHERE
|
||||
user_id=$2 AND feed_id=$3 AND status=$4 AND published_at < $5
|
||||
`
|
||||
result, err := s.db.Exec(query, model.EntryStatusRead, userID, feedID, model.EntryStatusUnread, before)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to mark feed entries as read: %v`, err)
|
||||
}
|
||||
|
||||
count, _ := result.RowsAffected()
|
||||
logger.Debug("[Storage:MarkFeedAsRead] %d items marked as read", count)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkCategoryAsRead updates all category entries to the read status.
|
||||
func (s *Storage) MarkCategoryAsRead(userID, categoryID int64, before time.Time) error {
|
||||
query := `
|
||||
UPDATE
|
||||
entries
|
||||
SET
|
||||
status=$1,
|
||||
changed_at=now()
|
||||
WHERE
|
||||
user_id=$2
|
||||
AND
|
||||
status=$3
|
||||
AND
|
||||
published_at < $4
|
||||
AND
|
||||
feed_id IN (SELECT id FROM feeds WHERE user_id=$2 AND category_id=$5)
|
||||
`
|
||||
result, err := s.db.Exec(query, model.EntryStatusRead, userID, model.EntryStatusUnread, before, categoryID)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to mark category entries as read: %v`, err)
|
||||
}
|
||||
|
||||
count, _ := result.RowsAffected()
|
||||
logger.Debug("[Storage:MarkCategoryAsRead] %d items marked as read", count)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EntryURLExists returns true if an entry with this URL already exists.
|
||||
func (s *Storage) EntryURLExists(feedID int64, entryURL string) bool {
|
||||
var result bool
|
||||
query := `SELECT true FROM entries WHERE feed_id=$1 AND url=$2`
|
||||
s.db.QueryRow(query, feedID, entryURL).Scan(&result)
|
||||
return result
|
||||
}
|
||||
|
||||
// EntryShareCode returns the share code of the provided entry.
|
||||
// It generates a new one if not already defined.
|
||||
func (s *Storage) EntryShareCode(userID int64, entryID int64) (shareCode string, err error) {
|
||||
query := `SELECT share_code FROM entries WHERE user_id=$1 AND id=$2`
|
||||
err = s.db.QueryRow(query, userID, entryID).Scan(&shareCode)
|
||||
if err != nil {
|
||||
err = fmt.Errorf(`store: unable to get share code for entry #%d: %v`, entryID, err)
|
||||
return
|
||||
}
|
||||
|
||||
if shareCode == "" {
|
||||
shareCode = crypto.GenerateRandomStringHex(20)
|
||||
|
||||
query = `UPDATE entries SET share_code = $1 WHERE user_id=$2 AND id=$3`
|
||||
_, err = s.db.Exec(query, shareCode, userID, entryID)
|
||||
if err != nil {
|
||||
err = fmt.Errorf(`store: unable to set share code for entry #%d: %v`, entryID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnshareEntry removes the share code for the given entry.
|
||||
func (s *Storage) UnshareEntry(userID int64, entryID int64) (err error) {
|
||||
query := `UPDATE entries SET share_code='' WHERE user_id=$1 AND id=$2`
|
||||
_, err = s.db.Exec(query, userID, entryID)
|
||||
if err != nil {
|
||||
err = fmt.Errorf(`store: unable to remove share code for entry #%d: %v`, entryID, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// removeDuplicate removes duplicate entries from a slice
|
||||
func removeDuplicates[T string | int](sliceList []T) []T {
|
||||
allKeys := make(map[T]bool)
|
||||
list := []T{}
|
||||
for _, item := range sliceList {
|
||||
if _, value := allKeys[item]; !value {
|
||||
allKeys[item] = true
|
||||
list = append(list, item)
|
||||
}
|
||||
}
|
||||
return list
|
||||
}
|
174
internal/storage/entry_pagination_builder.go
Normal file
174
internal/storage/entry_pagination_builder.go
Normal file
|
@ -0,0 +1,174 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"miniflux.app/v2/internal/model"
|
||||
"miniflux.app/v2/internal/timer"
|
||||
)
|
||||
|
||||
// EntryPaginationBuilder is a builder for entry prev/next queries.
|
||||
type EntryPaginationBuilder struct {
|
||||
store *Storage
|
||||
conditions []string
|
||||
args []interface{}
|
||||
entryID int64
|
||||
order string
|
||||
direction string
|
||||
}
|
||||
|
||||
// WithSearchQuery adds full-text search query to the condition.
|
||||
func (e *EntryPaginationBuilder) WithSearchQuery(query string) {
|
||||
if query != "" {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("e.document_vectors @@ plainto_tsquery($%d)", len(e.args)+1))
|
||||
e.args = append(e.args, query)
|
||||
}
|
||||
}
|
||||
|
||||
// WithStarred adds starred to the condition.
|
||||
func (e *EntryPaginationBuilder) WithStarred() {
|
||||
e.conditions = append(e.conditions, "e.starred is true")
|
||||
}
|
||||
|
||||
// WithFeedID adds feed_id to the condition.
|
||||
func (e *EntryPaginationBuilder) WithFeedID(feedID int64) {
|
||||
if feedID != 0 {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("e.feed_id = $%d", len(e.args)+1))
|
||||
e.args = append(e.args, feedID)
|
||||
}
|
||||
}
|
||||
|
||||
// WithCategoryID adds category_id to the condition.
|
||||
func (e *EntryPaginationBuilder) WithCategoryID(categoryID int64) {
|
||||
if categoryID != 0 {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("f.category_id = $%d", len(e.args)+1))
|
||||
e.args = append(e.args, categoryID)
|
||||
}
|
||||
}
|
||||
|
||||
// WithStatus adds status to the condition.
|
||||
func (e *EntryPaginationBuilder) WithStatus(status string) {
|
||||
if status != "" {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("e.status = $%d", len(e.args)+1))
|
||||
e.args = append(e.args, status)
|
||||
}
|
||||
}
|
||||
|
||||
// WithGloballyVisible adds global visibility to the condition.
|
||||
func (e *EntryPaginationBuilder) WithGloballyVisible() {
|
||||
e.conditions = append(e.conditions, "not c.hide_globally")
|
||||
e.conditions = append(e.conditions, "not f.hide_globally")
|
||||
}
|
||||
|
||||
// Entries returns previous and next entries.
|
||||
func (e *EntryPaginationBuilder) Entries() (*model.Entry, *model.Entry, error) {
|
||||
tx, err := e.store.db.Begin()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("begin transaction for entry pagination: %v", err)
|
||||
}
|
||||
|
||||
prevID, nextID, err := e.getPrevNextID(tx)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
prevEntry, err := e.getEntry(tx, prevID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
nextEntry, err := e.getEntry(tx, nextID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
tx.Commit()
|
||||
|
||||
if e.direction == "desc" {
|
||||
return nextEntry, prevEntry, nil
|
||||
}
|
||||
|
||||
return prevEntry, nextEntry, nil
|
||||
}
|
||||
|
||||
func (e *EntryPaginationBuilder) getPrevNextID(tx *sql.Tx) (prevID int64, nextID int64, err error) {
|
||||
defer timer.ExecutionTime(time.Now(), fmt.Sprintf("[EntryPaginationBuilder] %v, %v", e.conditions, e.args))
|
||||
|
||||
cte := `
|
||||
WITH entry_pagination AS (
|
||||
SELECT
|
||||
e.id,
|
||||
lag(e.id) over (order by e.%[1]s asc, e.id desc) as prev_id,
|
||||
lead(e.id) over (order by e.%[1]s asc, e.id desc) as next_id
|
||||
FROM entries AS e
|
||||
JOIN feeds AS f ON f.id=e.feed_id
|
||||
JOIN categories c ON c.id = f.category_id
|
||||
WHERE %[2]s
|
||||
ORDER BY e.%[1]s asc, e.id desc
|
||||
)
|
||||
SELECT prev_id, next_id FROM entry_pagination AS ep WHERE %[3]s;
|
||||
`
|
||||
|
||||
subCondition := strings.Join(e.conditions, " AND ")
|
||||
finalCondition := fmt.Sprintf("ep.id = $%d", len(e.args)+1)
|
||||
query := fmt.Sprintf(cte, e.order, subCondition, finalCondition)
|
||||
e.args = append(e.args, e.entryID)
|
||||
|
||||
var pID, nID sql.NullInt64
|
||||
err = tx.QueryRow(query, e.args...).Scan(&pID, &nID)
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return 0, 0, nil
|
||||
case err != nil:
|
||||
return 0, 0, fmt.Errorf("entry pagination: %v", err)
|
||||
}
|
||||
|
||||
if pID.Valid {
|
||||
prevID = pID.Int64
|
||||
}
|
||||
|
||||
if nID.Valid {
|
||||
nextID = nID.Int64
|
||||
}
|
||||
|
||||
return prevID, nextID, nil
|
||||
}
|
||||
|
||||
func (e *EntryPaginationBuilder) getEntry(tx *sql.Tx, entryID int64) (*model.Entry, error) {
|
||||
var entry model.Entry
|
||||
|
||||
err := tx.QueryRow(`SELECT id, title FROM entries WHERE id = $1`, entryID).Scan(
|
||||
&entry.ID,
|
||||
&entry.Title,
|
||||
)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return nil, nil
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf("fetching sibling entry: %v", err)
|
||||
}
|
||||
|
||||
return &entry, nil
|
||||
}
|
||||
|
||||
// NewEntryPaginationBuilder returns a new EntryPaginationBuilder.
|
||||
func NewEntryPaginationBuilder(store *Storage, userID, entryID int64, order, direction string) *EntryPaginationBuilder {
|
||||
return &EntryPaginationBuilder{
|
||||
store: store,
|
||||
args: []interface{}{userID, "removed"},
|
||||
conditions: []string{"e.user_id = $1", "e.status <> $2"},
|
||||
entryID: entryID,
|
||||
order: order,
|
||||
direction: direction,
|
||||
}
|
||||
}
|
429
internal/storage/entry_query_builder.go
Normal file
429
internal/storage/entry_query_builder.go
Normal file
|
@ -0,0 +1,429 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/lib/pq"
|
||||
|
||||
"miniflux.app/v2/internal/model"
|
||||
"miniflux.app/v2/internal/timezone"
|
||||
)
|
||||
|
||||
// EntryQueryBuilder builds a SQL query to fetch entries.
|
||||
type EntryQueryBuilder struct {
|
||||
store *Storage
|
||||
args []interface{}
|
||||
conditions []string
|
||||
sortExpressions []string
|
||||
limit int
|
||||
offset int
|
||||
}
|
||||
|
||||
// WithSearchQuery adds full-text search query to the condition.
|
||||
func (e *EntryQueryBuilder) WithSearchQuery(query string) *EntryQueryBuilder {
|
||||
if query != "" {
|
||||
nArgs := len(e.args) + 1
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("e.document_vectors @@ plainto_tsquery($%d)", nArgs))
|
||||
e.args = append(e.args, query)
|
||||
|
||||
// 0.0000001 = 0.1 / (seconds_in_a_day)
|
||||
e.WithSorting(
|
||||
fmt.Sprintf("ts_rank(document_vectors, plainto_tsquery($%d)) - extract (epoch from now() - published_at)::float * 0.0000001", nArgs),
|
||||
"DESC",
|
||||
)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// WithStarred adds starred filter.
|
||||
func (e *EntryQueryBuilder) WithStarred(starred bool) *EntryQueryBuilder {
|
||||
if starred {
|
||||
e.conditions = append(e.conditions, "e.starred is true")
|
||||
} else {
|
||||
e.conditions = append(e.conditions, "e.starred is false")
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// BeforeDate adds a condition < published_at
|
||||
func (e *EntryQueryBuilder) BeforeDate(date time.Time) *EntryQueryBuilder {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("e.published_at < $%d", len(e.args)+1))
|
||||
e.args = append(e.args, date)
|
||||
return e
|
||||
}
|
||||
|
||||
// AfterDate adds a condition > published_at
|
||||
func (e *EntryQueryBuilder) AfterDate(date time.Time) *EntryQueryBuilder {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("e.published_at > $%d", len(e.args)+1))
|
||||
e.args = append(e.args, date)
|
||||
return e
|
||||
}
|
||||
|
||||
// BeforeEntryID adds a condition < entryID.
|
||||
func (e *EntryQueryBuilder) BeforeEntryID(entryID int64) *EntryQueryBuilder {
|
||||
if entryID != 0 {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("e.id < $%d", len(e.args)+1))
|
||||
e.args = append(e.args, entryID)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// AfterEntryID adds a condition > entryID.
|
||||
func (e *EntryQueryBuilder) AfterEntryID(entryID int64) *EntryQueryBuilder {
|
||||
if entryID != 0 {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("e.id > $%d", len(e.args)+1))
|
||||
e.args = append(e.args, entryID)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// WithEntryIDs filter by entry IDs.
|
||||
func (e *EntryQueryBuilder) WithEntryIDs(entryIDs []int64) *EntryQueryBuilder {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("e.id = ANY($%d)", len(e.args)+1))
|
||||
e.args = append(e.args, pq.Int64Array(entryIDs))
|
||||
return e
|
||||
}
|
||||
|
||||
// WithEntryID filter by entry ID.
|
||||
func (e *EntryQueryBuilder) WithEntryID(entryID int64) *EntryQueryBuilder {
|
||||
if entryID != 0 {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("e.id = $%d", len(e.args)+1))
|
||||
e.args = append(e.args, entryID)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// WithFeedID filter by feed ID.
|
||||
func (e *EntryQueryBuilder) WithFeedID(feedID int64) *EntryQueryBuilder {
|
||||
if feedID > 0 {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("e.feed_id = $%d", len(e.args)+1))
|
||||
e.args = append(e.args, feedID)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// WithCategoryID filter by category ID.
|
||||
func (e *EntryQueryBuilder) WithCategoryID(categoryID int64) *EntryQueryBuilder {
|
||||
if categoryID > 0 {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("f.category_id = $%d", len(e.args)+1))
|
||||
e.args = append(e.args, categoryID)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// WithStatus filter by entry status.
|
||||
func (e *EntryQueryBuilder) WithStatus(status string) *EntryQueryBuilder {
|
||||
if status != "" {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("e.status = $%d", len(e.args)+1))
|
||||
e.args = append(e.args, status)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// WithStatuses filter by a list of entry statuses.
|
||||
func (e *EntryQueryBuilder) WithStatuses(statuses []string) *EntryQueryBuilder {
|
||||
if len(statuses) > 0 {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("e.status = ANY($%d)", len(e.args)+1))
|
||||
e.args = append(e.args, pq.StringArray(statuses))
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// WithTags filter by a list of entry tags.
|
||||
func (e *EntryQueryBuilder) WithTags(tags []string) *EntryQueryBuilder {
|
||||
if len(tags) > 0 {
|
||||
for _, cat := range tags {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("$%d = ANY(e.tags)", len(e.args)+1))
|
||||
e.args = append(e.args, cat)
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// WithoutStatus set the entry status that should not be returned.
|
||||
func (e *EntryQueryBuilder) WithoutStatus(status string) *EntryQueryBuilder {
|
||||
if status != "" {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("e.status <> $%d", len(e.args)+1))
|
||||
e.args = append(e.args, status)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// WithShareCode set the entry share code.
|
||||
func (e *EntryQueryBuilder) WithShareCode(shareCode string) *EntryQueryBuilder {
|
||||
e.conditions = append(e.conditions, fmt.Sprintf("e.share_code = $%d", len(e.args)+1))
|
||||
e.args = append(e.args, shareCode)
|
||||
return e
|
||||
}
|
||||
|
||||
// WithShareCodeNotEmpty adds a filter for non-empty share code.
|
||||
func (e *EntryQueryBuilder) WithShareCodeNotEmpty() *EntryQueryBuilder {
|
||||
e.conditions = append(e.conditions, "e.share_code <> ''")
|
||||
return e
|
||||
}
|
||||
|
||||
// WithSorting add a sort expression.
|
||||
func (e *EntryQueryBuilder) WithSorting(column, direction string) *EntryQueryBuilder {
|
||||
e.sortExpressions = append(e.sortExpressions, fmt.Sprintf("%s %s", column, direction))
|
||||
return e
|
||||
}
|
||||
|
||||
// WithLimit set the limit.
|
||||
func (e *EntryQueryBuilder) WithLimit(limit int) *EntryQueryBuilder {
|
||||
if limit > 0 {
|
||||
e.limit = limit
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// WithOffset set the offset.
|
||||
func (e *EntryQueryBuilder) WithOffset(offset int) *EntryQueryBuilder {
|
||||
if offset > 0 {
|
||||
e.offset = offset
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *EntryQueryBuilder) WithGloballyVisible() *EntryQueryBuilder {
|
||||
e.conditions = append(e.conditions, "not c.hide_globally")
|
||||
e.conditions = append(e.conditions, "not f.hide_globally")
|
||||
return e
|
||||
}
|
||||
|
||||
// CountEntries count the number of entries that match the condition.
|
||||
func (e *EntryQueryBuilder) CountEntries() (count int, err error) {
|
||||
query := `
|
||||
SELECT count(*)
|
||||
FROM entries e
|
||||
JOIN feeds f ON f.id = e.feed_id
|
||||
JOIN categories c ON c.id = f.category_id
|
||||
WHERE %s
|
||||
`
|
||||
condition := e.buildCondition()
|
||||
|
||||
err = e.store.db.QueryRow(fmt.Sprintf(query, condition), e.args...).Scan(&count)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("unable to count entries: %v", err)
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetEntry returns a single entry that match the condition.
|
||||
func (e *EntryQueryBuilder) GetEntry() (*model.Entry, error) {
|
||||
e.limit = 1
|
||||
entries, err := e.GetEntries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(entries) != 1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
entries[0].Enclosures, err = e.store.GetEnclosures(entries[0].ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return entries[0], nil
|
||||
}
|
||||
|
||||
// GetEntries returns a list of entries that match the condition.
|
||||
func (e *EntryQueryBuilder) GetEntries() (model.Entries, error) {
|
||||
query := `
|
||||
SELECT
|
||||
e.id,
|
||||
e.user_id,
|
||||
e.feed_id,
|
||||
e.hash,
|
||||
e.published_at at time zone u.timezone,
|
||||
e.title,
|
||||
e.url,
|
||||
e.comments_url,
|
||||
e.author,
|
||||
e.share_code,
|
||||
e.content,
|
||||
e.status,
|
||||
e.starred,
|
||||
e.reading_time,
|
||||
e.created_at,
|
||||
e.changed_at,
|
||||
e.tags,
|
||||
f.title as feed_title,
|
||||
f.feed_url,
|
||||
f.site_url,
|
||||
f.checked_at,
|
||||
f.category_id, c.title as category_title,
|
||||
f.scraper_rules,
|
||||
f.rewrite_rules,
|
||||
f.crawler,
|
||||
f.user_agent,
|
||||
f.cookie,
|
||||
f.no_media_player,
|
||||
fi.icon_id,
|
||||
u.timezone
|
||||
FROM
|
||||
entries e
|
||||
LEFT JOIN
|
||||
feeds f ON f.id=e.feed_id
|
||||
LEFT JOIN
|
||||
categories c ON c.id=f.category_id
|
||||
LEFT JOIN
|
||||
feed_icons fi ON fi.feed_id=f.id
|
||||
LEFT JOIN
|
||||
users u ON u.id=e.user_id
|
||||
WHERE %s %s
|
||||
`
|
||||
|
||||
condition := e.buildCondition()
|
||||
sorting := e.buildSorting()
|
||||
query = fmt.Sprintf(query, condition, sorting)
|
||||
|
||||
rows, err := e.store.db.Query(query, e.args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get entries: %v", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
entries := make(model.Entries, 0)
|
||||
for rows.Next() {
|
||||
var entry model.Entry
|
||||
var iconID sql.NullInt64
|
||||
var tz string
|
||||
|
||||
entry.Feed = &model.Feed{}
|
||||
entry.Feed.Category = &model.Category{}
|
||||
entry.Feed.Icon = &model.FeedIcon{}
|
||||
|
||||
err := rows.Scan(
|
||||
&entry.ID,
|
||||
&entry.UserID,
|
||||
&entry.FeedID,
|
||||
&entry.Hash,
|
||||
&entry.Date,
|
||||
&entry.Title,
|
||||
&entry.URL,
|
||||
&entry.CommentsURL,
|
||||
&entry.Author,
|
||||
&entry.ShareCode,
|
||||
&entry.Content,
|
||||
&entry.Status,
|
||||
&entry.Starred,
|
||||
&entry.ReadingTime,
|
||||
&entry.CreatedAt,
|
||||
&entry.ChangedAt,
|
||||
pq.Array(&entry.Tags),
|
||||
&entry.Feed.Title,
|
||||
&entry.Feed.FeedURL,
|
||||
&entry.Feed.SiteURL,
|
||||
&entry.Feed.CheckedAt,
|
||||
&entry.Feed.Category.ID,
|
||||
&entry.Feed.Category.Title,
|
||||
&entry.Feed.ScraperRules,
|
||||
&entry.Feed.RewriteRules,
|
||||
&entry.Feed.Crawler,
|
||||
&entry.Feed.UserAgent,
|
||||
&entry.Feed.Cookie,
|
||||
&entry.Feed.NoMediaPlayer,
|
||||
&iconID,
|
||||
&tz,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to fetch entry row: %v", err)
|
||||
}
|
||||
|
||||
if iconID.Valid {
|
||||
entry.Feed.Icon.IconID = iconID.Int64
|
||||
} else {
|
||||
entry.Feed.Icon.IconID = 0
|
||||
}
|
||||
|
||||
// Make sure that timestamp fields contains timezone information (API)
|
||||
entry.Date = timezone.Convert(tz, entry.Date)
|
||||
entry.CreatedAt = timezone.Convert(tz, entry.CreatedAt)
|
||||
entry.ChangedAt = timezone.Convert(tz, entry.ChangedAt)
|
||||
entry.Feed.CheckedAt = timezone.Convert(tz, entry.Feed.CheckedAt)
|
||||
|
||||
entry.Feed.ID = entry.FeedID
|
||||
entry.Feed.UserID = entry.UserID
|
||||
entry.Feed.Icon.FeedID = entry.FeedID
|
||||
entry.Feed.Category.UserID = entry.UserID
|
||||
entries = append(entries, &entry)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// GetEntryIDs returns a list of entry IDs that match the condition.
|
||||
func (e *EntryQueryBuilder) GetEntryIDs() ([]int64, error) {
|
||||
query := `SELECT e.id FROM entries e LEFT JOIN feeds f ON f.id=e.feed_id WHERE %s %s`
|
||||
|
||||
condition := e.buildCondition()
|
||||
query = fmt.Sprintf(query, condition, e.buildSorting())
|
||||
|
||||
rows, err := e.store.db.Query(query, e.args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get entries: %v", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var entryIDs []int64
|
||||
for rows.Next() {
|
||||
var entryID int64
|
||||
|
||||
err := rows.Scan(&entryID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to fetch entry row: %v", err)
|
||||
}
|
||||
|
||||
entryIDs = append(entryIDs, entryID)
|
||||
}
|
||||
|
||||
return entryIDs, nil
|
||||
}
|
||||
|
||||
func (e *EntryQueryBuilder) buildCondition() string {
|
||||
return strings.Join(e.conditions, " AND ")
|
||||
}
|
||||
|
||||
func (e *EntryQueryBuilder) buildSorting() string {
|
||||
var parts []string
|
||||
|
||||
if len(e.sortExpressions) > 0 {
|
||||
parts = append(parts, fmt.Sprintf(`ORDER BY %s`, strings.Join(e.sortExpressions, ", ")))
|
||||
}
|
||||
|
||||
if e.limit > 0 {
|
||||
parts = append(parts, fmt.Sprintf(`LIMIT %d`, e.limit))
|
||||
}
|
||||
|
||||
if e.offset > 0 {
|
||||
parts = append(parts, fmt.Sprintf(`OFFSET %d`, e.offset))
|
||||
}
|
||||
|
||||
return strings.Join(parts, " ")
|
||||
}
|
||||
|
||||
// NewEntryQueryBuilder returns a new EntryQueryBuilder.
|
||||
func NewEntryQueryBuilder(store *Storage, userID int64) *EntryQueryBuilder {
|
||||
return &EntryQueryBuilder{
|
||||
store: store,
|
||||
args: []interface{}{userID},
|
||||
conditions: []string{"e.user_id = $1"},
|
||||
}
|
||||
}
|
||||
|
||||
// NewAnonymousQueryBuilder returns a new EntryQueryBuilder suitable for anonymous users.
|
||||
func NewAnonymousQueryBuilder(store *Storage) *EntryQueryBuilder {
|
||||
return &EntryQueryBuilder{
|
||||
store: store,
|
||||
}
|
||||
}
|
449
internal/storage/feed.go
Normal file
449
internal/storage/feed.go
Normal file
|
@ -0,0 +1,449 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
|
||||
"miniflux.app/v2/internal/config"
|
||||
"miniflux.app/v2/internal/logger"
|
||||
"miniflux.app/v2/internal/model"
|
||||
)
|
||||
|
||||
type byStateAndName struct{ f model.Feeds }
|
||||
|
||||
func (l byStateAndName) Len() int { return len(l.f) }
|
||||
func (l byStateAndName) Swap(i, j int) { l.f[i], l.f[j] = l.f[j], l.f[i] }
|
||||
func (l byStateAndName) Less(i, j int) bool {
|
||||
// disabled test first, since we don't care about errors if disabled
|
||||
if l.f[i].Disabled != l.f[j].Disabled {
|
||||
return l.f[j].Disabled
|
||||
}
|
||||
if l.f[i].ParsingErrorCount != l.f[j].ParsingErrorCount {
|
||||
return l.f[i].ParsingErrorCount > l.f[j].ParsingErrorCount
|
||||
}
|
||||
if l.f[i].UnreadCount != l.f[j].UnreadCount {
|
||||
return l.f[i].UnreadCount > l.f[j].UnreadCount
|
||||
}
|
||||
return l.f[i].Title < l.f[j].Title
|
||||
}
|
||||
|
||||
// FeedExists checks if the given feed exists.
|
||||
func (s *Storage) FeedExists(userID, feedID int64) bool {
|
||||
var result bool
|
||||
query := `SELECT true FROM feeds WHERE user_id=$1 AND id=$2`
|
||||
s.db.QueryRow(query, userID, feedID).Scan(&result)
|
||||
return result
|
||||
}
|
||||
|
||||
// FeedURLExists checks if feed URL already exists.
|
||||
func (s *Storage) FeedURLExists(userID int64, feedURL string) bool {
|
||||
var result bool
|
||||
query := `SELECT true FROM feeds WHERE user_id=$1 AND feed_url=$2`
|
||||
s.db.QueryRow(query, userID, feedURL).Scan(&result)
|
||||
return result
|
||||
}
|
||||
|
||||
// AnotherFeedURLExists checks if the user a duplicated feed.
|
||||
func (s *Storage) AnotherFeedURLExists(userID, feedID int64, feedURL string) bool {
|
||||
var result bool
|
||||
query := `SELECT true FROM feeds WHERE id <> $1 AND user_id=$2 AND feed_url=$3`
|
||||
s.db.QueryRow(query, feedID, userID, feedURL).Scan(&result)
|
||||
return result
|
||||
}
|
||||
|
||||
// CountAllFeeds returns the number of feeds in the database.
|
||||
func (s *Storage) CountAllFeeds() map[string]int64 {
|
||||
rows, err := s.db.Query(`SELECT disabled, count(*) FROM feeds GROUP BY disabled`)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
results := make(map[string]int64)
|
||||
results["enabled"] = 0
|
||||
results["disabled"] = 0
|
||||
|
||||
for rows.Next() {
|
||||
var disabled bool
|
||||
var count int64
|
||||
|
||||
if err := rows.Scan(&disabled, &count); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if disabled {
|
||||
results["disabled"] = count
|
||||
} else {
|
||||
results["enabled"] = count
|
||||
}
|
||||
}
|
||||
|
||||
results["total"] = results["disabled"] + results["enabled"]
|
||||
return results
|
||||
}
|
||||
|
||||
// CountFeeds returns the number of feeds that belongs to the given user.
|
||||
func (s *Storage) CountFeeds(userID int64) int {
|
||||
var result int
|
||||
err := s.db.QueryRow(`SELECT count(*) FROM feeds WHERE user_id=$1`, userID).Scan(&result)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// CountUserFeedsWithErrors returns the number of feeds with parsing errors that belong to the given user.
|
||||
func (s *Storage) CountUserFeedsWithErrors(userID int64) int {
|
||||
pollingParsingErrorLimit := config.Opts.PollingParsingErrorLimit()
|
||||
if pollingParsingErrorLimit <= 0 {
|
||||
pollingParsingErrorLimit = 1
|
||||
}
|
||||
query := `SELECT count(*) FROM feeds WHERE user_id=$1 AND parsing_error_count >= $2`
|
||||
var result int
|
||||
err := s.db.QueryRow(query, userID, pollingParsingErrorLimit).Scan(&result)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// CountAllFeedsWithErrors returns the number of feeds with parsing errors.
|
||||
func (s *Storage) CountAllFeedsWithErrors() int {
|
||||
pollingParsingErrorLimit := config.Opts.PollingParsingErrorLimit()
|
||||
if pollingParsingErrorLimit <= 0 {
|
||||
pollingParsingErrorLimit = 1
|
||||
}
|
||||
query := `SELECT count(*) FROM feeds WHERE parsing_error_count >= $1`
|
||||
var result int
|
||||
err := s.db.QueryRow(query, pollingParsingErrorLimit).Scan(&result)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Feeds returns all feeds that belongs to the given user.
|
||||
func (s *Storage) Feeds(userID int64) (model.Feeds, error) {
|
||||
builder := NewFeedQueryBuilder(s, userID)
|
||||
builder.WithSorting(model.DefaultFeedSorting, model.DefaultFeedSortingDirection)
|
||||
return builder.GetFeeds()
|
||||
}
|
||||
|
||||
func getFeedsSorted(builder *FeedQueryBuilder) (model.Feeds, error) {
|
||||
result, err := builder.GetFeeds()
|
||||
if err == nil {
|
||||
sort.Sort(byStateAndName{result})
|
||||
return result, nil
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
// FeedsWithCounters returns all feeds of the given user with counters of read and unread entries.
|
||||
func (s *Storage) FeedsWithCounters(userID int64) (model.Feeds, error) {
|
||||
builder := NewFeedQueryBuilder(s, userID)
|
||||
builder.WithCounters()
|
||||
builder.WithSorting(model.DefaultFeedSorting, model.DefaultFeedSortingDirection)
|
||||
return getFeedsSorted(builder)
|
||||
}
|
||||
|
||||
// Return read and unread count.
|
||||
func (s *Storage) FetchCounters(userID int64) (model.FeedCounters, error) {
|
||||
builder := NewFeedQueryBuilder(s, userID)
|
||||
builder.WithCounters()
|
||||
reads, unreads, err := builder.fetchFeedCounter()
|
||||
return model.FeedCounters{ReadCounters: reads, UnreadCounters: unreads}, err
|
||||
}
|
||||
|
||||
// FeedsByCategoryWithCounters returns all feeds of the given user/category with counters of read and unread entries.
|
||||
func (s *Storage) FeedsByCategoryWithCounters(userID, categoryID int64) (model.Feeds, error) {
|
||||
builder := NewFeedQueryBuilder(s, userID)
|
||||
builder.WithCategoryID(categoryID)
|
||||
builder.WithCounters()
|
||||
builder.WithSorting(model.DefaultFeedSorting, model.DefaultFeedSortingDirection)
|
||||
return getFeedsSorted(builder)
|
||||
}
|
||||
|
||||
// WeeklyFeedEntryCount returns the weekly entry count for a feed.
|
||||
func (s *Storage) WeeklyFeedEntryCount(userID, feedID int64) (int, error) {
|
||||
query := `
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
entries
|
||||
WHERE
|
||||
entries.user_id=$1 AND
|
||||
entries.feed_id=$2 AND
|
||||
entries.published_at BETWEEN (now() - interval '1 week') AND now();
|
||||
`
|
||||
|
||||
var weeklyCount int
|
||||
err := s.db.QueryRow(query, userID, feedID).Scan(&weeklyCount)
|
||||
|
||||
switch {
|
||||
case errors.Is(err, sql.ErrNoRows):
|
||||
return 0, nil
|
||||
case err != nil:
|
||||
return 0, fmt.Errorf(`store: unable to fetch weekly count for feed #%d: %v`, feedID, err)
|
||||
}
|
||||
|
||||
return weeklyCount, nil
|
||||
}
|
||||
|
||||
// FeedByID returns a feed by the ID.
|
||||
func (s *Storage) FeedByID(userID, feedID int64) (*model.Feed, error) {
|
||||
builder := NewFeedQueryBuilder(s, userID)
|
||||
builder.WithFeedID(feedID)
|
||||
feed, err := builder.GetFeed()
|
||||
|
||||
switch {
|
||||
case errors.Is(err, sql.ErrNoRows):
|
||||
return nil, nil
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf(`store: unable to fetch feed #%d: %v`, feedID, err)
|
||||
}
|
||||
|
||||
return feed, nil
|
||||
}
|
||||
|
||||
// CreateFeed creates a new feed.
|
||||
func (s *Storage) CreateFeed(feed *model.Feed) error {
|
||||
sql := `
|
||||
INSERT INTO feeds (
|
||||
feed_url,
|
||||
site_url,
|
||||
title,
|
||||
category_id,
|
||||
user_id,
|
||||
etag_header,
|
||||
last_modified_header,
|
||||
crawler,
|
||||
user_agent,
|
||||
cookie,
|
||||
username,
|
||||
password,
|
||||
disabled,
|
||||
scraper_rules,
|
||||
rewrite_rules,
|
||||
blocklist_rules,
|
||||
keeplist_rules,
|
||||
ignore_http_cache,
|
||||
allow_self_signed_certificates,
|
||||
fetch_via_proxy,
|
||||
hide_globally,
|
||||
url_rewrite_rules,
|
||||
no_media_player
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23)
|
||||
RETURNING
|
||||
id
|
||||
`
|
||||
err := s.db.QueryRow(
|
||||
sql,
|
||||
feed.FeedURL,
|
||||
feed.SiteURL,
|
||||
feed.Title,
|
||||
feed.Category.ID,
|
||||
feed.UserID,
|
||||
feed.EtagHeader,
|
||||
feed.LastModifiedHeader,
|
||||
feed.Crawler,
|
||||
feed.UserAgent,
|
||||
feed.Cookie,
|
||||
feed.Username,
|
||||
feed.Password,
|
||||
feed.Disabled,
|
||||
feed.ScraperRules,
|
||||
feed.RewriteRules,
|
||||
feed.BlocklistRules,
|
||||
feed.KeeplistRules,
|
||||
feed.IgnoreHTTPCache,
|
||||
feed.AllowSelfSignedCertificates,
|
||||
feed.FetchViaProxy,
|
||||
feed.HideGlobally,
|
||||
feed.UrlRewriteRules,
|
||||
feed.NoMediaPlayer,
|
||||
).Scan(&feed.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to create feed %q: %v`, feed.FeedURL, err)
|
||||
}
|
||||
|
||||
for i := 0; i < len(feed.Entries); i++ {
|
||||
feed.Entries[i].FeedID = feed.ID
|
||||
feed.Entries[i].UserID = feed.UserID
|
||||
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to start transaction: %v`, err)
|
||||
}
|
||||
|
||||
entryExists, err := s.entryExists(tx, feed.Entries[i])
|
||||
if err != nil {
|
||||
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
||||
return fmt.Errorf(`store: unable to rollback transaction: %v (rolled back due to: %v)`, rollbackErr, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if !entryExists {
|
||||
if err := s.createEntry(tx, feed.Entries[i]); err != nil {
|
||||
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
||||
return fmt.Errorf(`store: unable to rollback transaction: %v (rolled back due to: %v)`, rollbackErr, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf(`store: unable to commit transaction: %v`, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateFeed updates an existing feed.
|
||||
func (s *Storage) UpdateFeed(feed *model.Feed) (err error) {
|
||||
query := `
|
||||
UPDATE
|
||||
feeds
|
||||
SET
|
||||
feed_url=$1,
|
||||
site_url=$2,
|
||||
title=$3,
|
||||
category_id=$4,
|
||||
etag_header=$5,
|
||||
last_modified_header=$6,
|
||||
checked_at=$7,
|
||||
parsing_error_msg=$8,
|
||||
parsing_error_count=$9,
|
||||
scraper_rules=$10,
|
||||
rewrite_rules=$11,
|
||||
blocklist_rules=$12,
|
||||
keeplist_rules=$13,
|
||||
crawler=$14,
|
||||
user_agent=$15,
|
||||
cookie=$16,
|
||||
username=$17,
|
||||
password=$18,
|
||||
disabled=$19,
|
||||
next_check_at=$20,
|
||||
ignore_http_cache=$21,
|
||||
allow_self_signed_certificates=$22,
|
||||
fetch_via_proxy=$23,
|
||||
hide_globally=$24,
|
||||
url_rewrite_rules=$25,
|
||||
no_media_player=$26
|
||||
WHERE
|
||||
id=$27 AND user_id=$28
|
||||
`
|
||||
_, err = s.db.Exec(query,
|
||||
feed.FeedURL,
|
||||
feed.SiteURL,
|
||||
feed.Title,
|
||||
feed.Category.ID,
|
||||
feed.EtagHeader,
|
||||
feed.LastModifiedHeader,
|
||||
feed.CheckedAt,
|
||||
feed.ParsingErrorMsg,
|
||||
feed.ParsingErrorCount,
|
||||
feed.ScraperRules,
|
||||
feed.RewriteRules,
|
||||
feed.BlocklistRules,
|
||||
feed.KeeplistRules,
|
||||
feed.Crawler,
|
||||
feed.UserAgent,
|
||||
feed.Cookie,
|
||||
feed.Username,
|
||||
feed.Password,
|
||||
feed.Disabled,
|
||||
feed.NextCheckAt,
|
||||
feed.IgnoreHTTPCache,
|
||||
feed.AllowSelfSignedCertificates,
|
||||
feed.FetchViaProxy,
|
||||
feed.HideGlobally,
|
||||
feed.UrlRewriteRules,
|
||||
feed.NoMediaPlayer,
|
||||
feed.ID,
|
||||
feed.UserID,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to update feed #%d (%s): %v`, feed.ID, feed.FeedURL, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateFeedError updates feed errors.
|
||||
func (s *Storage) UpdateFeedError(feed *model.Feed) (err error) {
|
||||
query := `
|
||||
UPDATE
|
||||
feeds
|
||||
SET
|
||||
parsing_error_msg=$1,
|
||||
parsing_error_count=$2,
|
||||
checked_at=$3,
|
||||
next_check_at=$4
|
||||
WHERE
|
||||
id=$5 AND user_id=$6
|
||||
`
|
||||
_, err = s.db.Exec(query,
|
||||
feed.ParsingErrorMsg,
|
||||
feed.ParsingErrorCount,
|
||||
feed.CheckedAt,
|
||||
feed.NextCheckAt,
|
||||
feed.ID,
|
||||
feed.UserID,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to update feed error #%d (%s): %v`, feed.ID, feed.FeedURL, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveFeed removes a feed and all entries.
|
||||
// This operation can takes time if the feed has lot of entries.
|
||||
func (s *Storage) RemoveFeed(userID, feedID int64) error {
|
||||
rows, err := s.db.Query(`SELECT id FROM entries WHERE user_id=$1 AND feed_id=$2`, userID, feedID)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to get user feed entries: %v`, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var entryID int64
|
||||
if err := rows.Scan(&entryID); err != nil {
|
||||
return fmt.Errorf(`store: unable to read user feed entry ID: %v`, err)
|
||||
}
|
||||
|
||||
logger.Debug(`[FEED DELETION] Deleting entry #%d of feed #%d for user #%d (%d GoRoutines)`, entryID, feedID, userID, runtime.NumGoroutine())
|
||||
|
||||
if _, err := s.db.Exec(`DELETE FROM entries WHERE id=$1 AND user_id=$2`, entryID, userID); err != nil {
|
||||
return fmt.Errorf(`store: unable to delete user feed entries #%d: %v`, entryID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := s.db.Exec(`DELETE FROM feeds WHERE id=$1 AND user_id=$2`, feedID, userID); err != nil {
|
||||
return fmt.Errorf(`store: unable to delete feed #%d: %v`, feedID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResetFeedErrors removes all feed errors.
|
||||
func (s *Storage) ResetFeedErrors() error {
|
||||
_, err := s.db.Exec(`UPDATE feeds SET parsing_error_count=0, parsing_error_msg=''`)
|
||||
return err
|
||||
}
|
307
internal/storage/feed_query_builder.go
Normal file
307
internal/storage/feed_query_builder.go
Normal file
|
@ -0,0 +1,307 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"miniflux.app/v2/internal/model"
|
||||
"miniflux.app/v2/internal/timezone"
|
||||
)
|
||||
|
||||
// FeedQueryBuilder builds a SQL query to fetch feeds.
|
||||
type FeedQueryBuilder struct {
|
||||
store *Storage
|
||||
args []interface{}
|
||||
conditions []string
|
||||
sortExpressions []string
|
||||
limit int
|
||||
offset int
|
||||
withCounters bool
|
||||
counterJoinFeeds bool
|
||||
counterArgs []interface{}
|
||||
counterConditions []string
|
||||
}
|
||||
|
||||
// NewFeedQueryBuilder returns a new FeedQueryBuilder.
|
||||
func NewFeedQueryBuilder(store *Storage, userID int64) *FeedQueryBuilder {
|
||||
return &FeedQueryBuilder{
|
||||
store: store,
|
||||
args: []interface{}{userID},
|
||||
conditions: []string{"f.user_id = $1"},
|
||||
counterArgs: []interface{}{userID, model.EntryStatusRead, model.EntryStatusUnread},
|
||||
counterConditions: []string{"e.user_id = $1", "e.status IN ($2, $3)"},
|
||||
}
|
||||
}
|
||||
|
||||
// WithCategoryID filter by category ID.
|
||||
func (f *FeedQueryBuilder) WithCategoryID(categoryID int64) *FeedQueryBuilder {
|
||||
if categoryID > 0 {
|
||||
f.conditions = append(f.conditions, fmt.Sprintf("f.category_id = $%d", len(f.args)+1))
|
||||
f.args = append(f.args, categoryID)
|
||||
f.counterConditions = append(f.counterConditions, fmt.Sprintf("f.category_id = $%d", len(f.counterArgs)+1))
|
||||
f.counterArgs = append(f.counterArgs, categoryID)
|
||||
f.counterJoinFeeds = true
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// WithFeedID filter by feed ID.
|
||||
func (f *FeedQueryBuilder) WithFeedID(feedID int64) *FeedQueryBuilder {
|
||||
if feedID > 0 {
|
||||
f.conditions = append(f.conditions, fmt.Sprintf("f.id = $%d", len(f.args)+1))
|
||||
f.args = append(f.args, feedID)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// WithCounters let the builder return feeds with counters of statuses of entries.
|
||||
func (f *FeedQueryBuilder) WithCounters() *FeedQueryBuilder {
|
||||
f.withCounters = true
|
||||
return f
|
||||
}
|
||||
|
||||
// WithSorting add a sort expression.
|
||||
func (f *FeedQueryBuilder) WithSorting(column, direction string) *FeedQueryBuilder {
|
||||
f.sortExpressions = append(f.sortExpressions, fmt.Sprintf("%s %s", column, direction))
|
||||
return f
|
||||
}
|
||||
|
||||
// WithLimit set the limit.
|
||||
func (f *FeedQueryBuilder) WithLimit(limit int) *FeedQueryBuilder {
|
||||
f.limit = limit
|
||||
return f
|
||||
}
|
||||
|
||||
// WithOffset set the offset.
|
||||
func (f *FeedQueryBuilder) WithOffset(offset int) *FeedQueryBuilder {
|
||||
f.offset = offset
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *FeedQueryBuilder) buildCondition() string {
|
||||
return strings.Join(f.conditions, " AND ")
|
||||
}
|
||||
|
||||
func (f *FeedQueryBuilder) buildCounterCondition() string {
|
||||
return strings.Join(f.counterConditions, " AND ")
|
||||
}
|
||||
|
||||
func (f *FeedQueryBuilder) buildSorting() string {
|
||||
var parts []string
|
||||
|
||||
if len(f.sortExpressions) > 0 {
|
||||
parts = append(parts, fmt.Sprintf(`ORDER BY %s`, strings.Join(f.sortExpressions, ", ")))
|
||||
}
|
||||
|
||||
if len(parts) > 0 {
|
||||
parts = append(parts, ", lower(f.title) ASC")
|
||||
}
|
||||
|
||||
if f.limit > 0 {
|
||||
parts = append(parts, fmt.Sprintf(`LIMIT %d`, f.limit))
|
||||
}
|
||||
|
||||
if f.offset > 0 {
|
||||
parts = append(parts, fmt.Sprintf(`OFFSET %d`, f.offset))
|
||||
}
|
||||
|
||||
return strings.Join(parts, " ")
|
||||
}
|
||||
|
||||
// GetFeed returns a single feed that match the condition.
|
||||
func (f *FeedQueryBuilder) GetFeed() (*model.Feed, error) {
|
||||
f.limit = 1
|
||||
feeds, err := f.GetFeeds()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(feeds) != 1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return feeds[0], nil
|
||||
}
|
||||
|
||||
// GetFeeds returns a list of feeds that match the condition.
|
||||
func (f *FeedQueryBuilder) GetFeeds() (model.Feeds, error) {
|
||||
var query = `
|
||||
SELECT
|
||||
f.id,
|
||||
f.feed_url,
|
||||
f.site_url,
|
||||
f.title,
|
||||
f.etag_header,
|
||||
f.last_modified_header,
|
||||
f.user_id,
|
||||
f.checked_at at time zone u.timezone,
|
||||
f.parsing_error_count,
|
||||
f.parsing_error_msg,
|
||||
f.scraper_rules,
|
||||
f.rewrite_rules,
|
||||
f.blocklist_rules,
|
||||
f.keeplist_rules,
|
||||
f.url_rewrite_rules,
|
||||
f.crawler,
|
||||
f.user_agent,
|
||||
f.cookie,
|
||||
f.username,
|
||||
f.password,
|
||||
f.ignore_http_cache,
|
||||
f.allow_self_signed_certificates,
|
||||
f.fetch_via_proxy,
|
||||
f.disabled,
|
||||
f.no_media_player,
|
||||
f.hide_globally,
|
||||
f.category_id,
|
||||
c.title as category_title,
|
||||
c.hide_globally as category_hidden,
|
||||
fi.icon_id,
|
||||
u.timezone
|
||||
FROM
|
||||
feeds f
|
||||
LEFT JOIN
|
||||
categories c ON c.id=f.category_id
|
||||
LEFT JOIN
|
||||
feed_icons fi ON fi.feed_id=f.id
|
||||
LEFT JOIN
|
||||
users u ON u.id=f.user_id
|
||||
WHERE %s
|
||||
%s
|
||||
`
|
||||
|
||||
query = fmt.Sprintf(query, f.buildCondition(), f.buildSorting())
|
||||
|
||||
rows, err := f.store.db.Query(query, f.args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch feeds: %w`, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
readCounters, unreadCounters, err := f.fetchFeedCounter()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
feeds := make(model.Feeds, 0)
|
||||
for rows.Next() {
|
||||
var feed model.Feed
|
||||
var iconID sql.NullInt64
|
||||
var tz string
|
||||
feed.Category = &model.Category{}
|
||||
|
||||
err := rows.Scan(
|
||||
&feed.ID,
|
||||
&feed.FeedURL,
|
||||
&feed.SiteURL,
|
||||
&feed.Title,
|
||||
&feed.EtagHeader,
|
||||
&feed.LastModifiedHeader,
|
||||
&feed.UserID,
|
||||
&feed.CheckedAt,
|
||||
&feed.ParsingErrorCount,
|
||||
&feed.ParsingErrorMsg,
|
||||
&feed.ScraperRules,
|
||||
&feed.RewriteRules,
|
||||
&feed.BlocklistRules,
|
||||
&feed.KeeplistRules,
|
||||
&feed.UrlRewriteRules,
|
||||
&feed.Crawler,
|
||||
&feed.UserAgent,
|
||||
&feed.Cookie,
|
||||
&feed.Username,
|
||||
&feed.Password,
|
||||
&feed.IgnoreHTTPCache,
|
||||
&feed.AllowSelfSignedCertificates,
|
||||
&feed.FetchViaProxy,
|
||||
&feed.Disabled,
|
||||
&feed.NoMediaPlayer,
|
||||
&feed.HideGlobally,
|
||||
&feed.Category.ID,
|
||||
&feed.Category.Title,
|
||||
&feed.Category.HideGlobally,
|
||||
&iconID,
|
||||
&tz,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch feeds row: %w`, err)
|
||||
}
|
||||
|
||||
if iconID.Valid {
|
||||
feed.Icon = &model.FeedIcon{FeedID: feed.ID, IconID: iconID.Int64}
|
||||
} else {
|
||||
feed.Icon = &model.FeedIcon{FeedID: feed.ID, IconID: 0}
|
||||
}
|
||||
|
||||
if readCounters != nil {
|
||||
if count, found := readCounters[feed.ID]; found {
|
||||
feed.ReadCount = count
|
||||
}
|
||||
}
|
||||
if unreadCounters != nil {
|
||||
if count, found := unreadCounters[feed.ID]; found {
|
||||
feed.UnreadCount = count
|
||||
}
|
||||
}
|
||||
|
||||
feed.CheckedAt = timezone.Convert(tz, feed.CheckedAt)
|
||||
feed.Category.UserID = feed.UserID
|
||||
feeds = append(feeds, &feed)
|
||||
}
|
||||
|
||||
return feeds, nil
|
||||
}
|
||||
|
||||
func (f *FeedQueryBuilder) fetchFeedCounter() (unreadCounters map[int64]int, readCounters map[int64]int, err error) {
|
||||
if !f.withCounters {
|
||||
return nil, nil, nil
|
||||
}
|
||||
query := `
|
||||
SELECT
|
||||
e.feed_id,
|
||||
e.status,
|
||||
count(*)
|
||||
FROM
|
||||
entries e
|
||||
%s
|
||||
WHERE
|
||||
%s
|
||||
GROUP BY
|
||||
e.feed_id, e.status
|
||||
`
|
||||
join := ""
|
||||
if f.counterJoinFeeds {
|
||||
join = "LEFT JOIN feeds f ON f.id=e.feed_id"
|
||||
}
|
||||
query = fmt.Sprintf(query, join, f.buildCounterCondition())
|
||||
|
||||
rows, err := f.store.db.Query(query, f.counterArgs...)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf(`store: unable to fetch feed counts: %w`, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
readCounters = make(map[int64]int)
|
||||
unreadCounters = make(map[int64]int)
|
||||
for rows.Next() {
|
||||
var feedID int64
|
||||
var status string
|
||||
var count int
|
||||
if err := rows.Scan(&feedID, &status, &count); err != nil {
|
||||
return nil, nil, fmt.Errorf(`store: unable to fetch feed counter row: %w`, err)
|
||||
}
|
||||
|
||||
if status == model.EntryStatusRead {
|
||||
readCounters[feedID] = count
|
||||
} else if status == model.EntryStatusUnread {
|
||||
unreadCounters[feedID] = count
|
||||
}
|
||||
}
|
||||
|
||||
return readCounters, unreadCounters, nil
|
||||
}
|
159
internal/storage/icon.go
Normal file
159
internal/storage/icon.go
Normal file
|
@ -0,0 +1,159 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"miniflux.app/v2/internal/model"
|
||||
)
|
||||
|
||||
// HasIcon checks if the given feed has an icon.
|
||||
func (s *Storage) HasIcon(feedID int64) bool {
|
||||
var result bool
|
||||
query := `SELECT true FROM feed_icons WHERE feed_id=$1`
|
||||
s.db.QueryRow(query, feedID).Scan(&result)
|
||||
return result
|
||||
}
|
||||
|
||||
// IconByID returns an icon by the ID.
|
||||
func (s *Storage) IconByID(iconID int64) (*model.Icon, error) {
|
||||
var icon model.Icon
|
||||
query := `SELECT id, hash, mime_type, content FROM icons WHERE id=$1`
|
||||
err := s.db.QueryRow(query, iconID).Scan(&icon.ID, &icon.Hash, &icon.MimeType, &icon.Content)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("Unable to fetch icon by hash: %v", err)
|
||||
}
|
||||
|
||||
return &icon, nil
|
||||
}
|
||||
|
||||
// IconByFeedID returns a feed icon.
|
||||
func (s *Storage) IconByFeedID(userID, feedID int64) (*model.Icon, error) {
|
||||
query := `
|
||||
SELECT
|
||||
icons.id,
|
||||
icons.hash,
|
||||
icons.mime_type,
|
||||
icons.content
|
||||
FROM icons
|
||||
LEFT JOIN feed_icons ON feed_icons.icon_id=icons.id
|
||||
LEFT JOIN feeds ON feeds.id=feed_icons.feed_id
|
||||
WHERE
|
||||
feeds.user_id=$1 AND feeds.id=$2
|
||||
LIMIT 1
|
||||
`
|
||||
var icon model.Icon
|
||||
err := s.db.QueryRow(query, userID, feedID).Scan(&icon.ID, &icon.Hash, &icon.MimeType, &icon.Content)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch icon: %v`, err)
|
||||
}
|
||||
|
||||
return &icon, nil
|
||||
}
|
||||
|
||||
// IconByHash returns an icon by the hash (checksum).
|
||||
func (s *Storage) IconByHash(icon *model.Icon) error {
|
||||
err := s.db.QueryRow(`SELECT id FROM icons WHERE hash=$1`, icon.Hash).Scan(&icon.ID)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf(`store: unable to fetch icon by hash: %v`, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateIcon creates a new icon.
|
||||
func (s *Storage) CreateIcon(icon *model.Icon) error {
|
||||
query := `
|
||||
INSERT INTO icons
|
||||
(hash, mime_type, content)
|
||||
VALUES
|
||||
($1, $2, $3)
|
||||
RETURNING
|
||||
id
|
||||
`
|
||||
err := s.db.QueryRow(
|
||||
query,
|
||||
icon.Hash,
|
||||
normalizeMimeType(icon.MimeType),
|
||||
icon.Content,
|
||||
).Scan(&icon.ID)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to create icon: %v`, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateFeedIcon creates an icon and associate the icon to the given feed.
|
||||
func (s *Storage) CreateFeedIcon(feedID int64, icon *model.Icon) error {
|
||||
err := s.IconByHash(icon)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if icon.ID == 0 {
|
||||
err := s.CreateIcon(icon)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
_, err = s.db.Exec(`INSERT INTO feed_icons (feed_id, icon_id) VALUES ($1, $2)`, feedID, icon.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to create feed icon: %v`, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Icons returns all icons that belongs to a user.
|
||||
func (s *Storage) Icons(userID int64) (model.Icons, error) {
|
||||
query := `
|
||||
SELECT
|
||||
icons.id,
|
||||
icons.hash,
|
||||
icons.mime_type,
|
||||
icons.content
|
||||
FROM icons
|
||||
LEFT JOIN feed_icons ON feed_icons.icon_id=icons.id
|
||||
LEFT JOIN feeds ON feeds.id=feed_icons.feed_id
|
||||
WHERE
|
||||
feeds.user_id=$1
|
||||
`
|
||||
rows, err := s.db.Query(query, userID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch icons: %v`, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var icons model.Icons
|
||||
for rows.Next() {
|
||||
var icon model.Icon
|
||||
err := rows.Scan(&icon.ID, &icon.Hash, &icon.MimeType, &icon.Content)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch icons row: %v`, err)
|
||||
}
|
||||
icons = append(icons, &icon)
|
||||
}
|
||||
|
||||
return icons, nil
|
||||
}
|
||||
|
||||
func normalizeMimeType(mimeType string) string {
|
||||
mimeType = strings.ToLower(mimeType)
|
||||
switch mimeType {
|
||||
case "image/png", "image/jpeg", "image/jpg", "image/webp", "image/svg+xml", "image/x-icon", "image/gif":
|
||||
return mimeType
|
||||
default:
|
||||
return "image/x-icon"
|
||||
}
|
||||
}
|
374
internal/storage/integration.go
Normal file
374
internal/storage/integration.go
Normal file
|
@ -0,0 +1,374 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"miniflux.app/v2/internal/model"
|
||||
)
|
||||
|
||||
// HasDuplicateFeverUsername checks if another user have the same Fever username.
|
||||
func (s *Storage) HasDuplicateFeverUsername(userID int64, feverUsername string) bool {
|
||||
query := `SELECT true FROM integrations WHERE user_id != $1 AND fever_username=$2`
|
||||
var result bool
|
||||
s.db.QueryRow(query, userID, feverUsername).Scan(&result)
|
||||
return result
|
||||
}
|
||||
|
||||
// HasDuplicateGoogleReaderUsername checks if another user have the same Google Reader username.
|
||||
func (s *Storage) HasDuplicateGoogleReaderUsername(userID int64, googleReaderUsername string) bool {
|
||||
query := `SELECT true FROM integrations WHERE user_id != $1 AND googlereader_username=$2`
|
||||
var result bool
|
||||
s.db.QueryRow(query, userID, googleReaderUsername).Scan(&result)
|
||||
return result
|
||||
}
|
||||
|
||||
// UserByFeverToken returns a user by using the Fever API token.
|
||||
func (s *Storage) UserByFeverToken(token string) (*model.User, error) {
|
||||
query := `
|
||||
SELECT
|
||||
users.id, users.is_admin, users.timezone
|
||||
FROM
|
||||
users
|
||||
LEFT JOIN
|
||||
integrations ON integrations.user_id=users.id
|
||||
WHERE
|
||||
integrations.fever_enabled='t' AND lower(integrations.fever_token)=lower($1)
|
||||
`
|
||||
|
||||
var user model.User
|
||||
err := s.db.QueryRow(query, token).Scan(&user.ID, &user.IsAdmin, &user.Timezone)
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return nil, nil
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf("store: unable to fetch user: %v", err)
|
||||
default:
|
||||
return &user, nil
|
||||
}
|
||||
}
|
||||
|
||||
// GoogleReaderUserCheckPassword validates the Google Reader hashed password.
|
||||
func (s *Storage) GoogleReaderUserCheckPassword(username, password string) error {
|
||||
var hash string
|
||||
|
||||
query := `
|
||||
SELECT
|
||||
googlereader_password
|
||||
FROM
|
||||
integrations
|
||||
WHERE
|
||||
integrations.googlereader_enabled='t' AND integrations.googlereader_username=$1
|
||||
`
|
||||
|
||||
err := s.db.QueryRow(query, username).Scan(&hash)
|
||||
if err == sql.ErrNoRows {
|
||||
return fmt.Errorf(`store: unable to find this user: %s`, username)
|
||||
} else if err != nil {
|
||||
return fmt.Errorf(`store: unable to fetch user: %v`, err)
|
||||
}
|
||||
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password)); err != nil {
|
||||
return fmt.Errorf(`store: invalid password for "%s" (%v)`, username, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GoogleReaderUserGetIntegration returns part of the Google Reader parts of the integration struct.
|
||||
func (s *Storage) GoogleReaderUserGetIntegration(username string) (*model.Integration, error) {
|
||||
var integration model.Integration
|
||||
|
||||
query := `
|
||||
SELECT
|
||||
user_id,
|
||||
googlereader_enabled,
|
||||
googlereader_username,
|
||||
googlereader_password
|
||||
FROM
|
||||
integrations
|
||||
WHERE
|
||||
integrations.googlereader_enabled='t' AND integrations.googlereader_username=$1
|
||||
`
|
||||
|
||||
err := s.db.QueryRow(query, username).Scan(&integration.UserID, &integration.GoogleReaderEnabled, &integration.GoogleReaderUsername, &integration.GoogleReaderPassword)
|
||||
if err == sql.ErrNoRows {
|
||||
return &integration, fmt.Errorf(`store: unable to find this user: %s`, username)
|
||||
} else if err != nil {
|
||||
return &integration, fmt.Errorf(`store: unable to fetch user: %v`, err)
|
||||
}
|
||||
|
||||
return &integration, nil
|
||||
}
|
||||
|
||||
// Integration returns user integration settings.
|
||||
func (s *Storage) Integration(userID int64) (*model.Integration, error) {
|
||||
query := `
|
||||
SELECT
|
||||
user_id,
|
||||
pinboard_enabled,
|
||||
pinboard_token,
|
||||
pinboard_tags,
|
||||
pinboard_mark_as_unread,
|
||||
instapaper_enabled,
|
||||
instapaper_username,
|
||||
instapaper_password,
|
||||
fever_enabled,
|
||||
fever_username,
|
||||
fever_token,
|
||||
googlereader_enabled,
|
||||
googlereader_username,
|
||||
googlereader_password,
|
||||
wallabag_enabled,
|
||||
wallabag_only_url,
|
||||
wallabag_url,
|
||||
wallabag_client_id,
|
||||
wallabag_client_secret,
|
||||
wallabag_username,
|
||||
wallabag_password,
|
||||
notion_enabled,
|
||||
notion_token,
|
||||
notion_page_id,
|
||||
nunux_keeper_enabled,
|
||||
nunux_keeper_url,
|
||||
nunux_keeper_api_key,
|
||||
espial_enabled,
|
||||
espial_url,
|
||||
espial_api_key,
|
||||
espial_tags,
|
||||
readwise_enabled,
|
||||
readwise_api_key,
|
||||
pocket_enabled,
|
||||
pocket_access_token,
|
||||
pocket_consumer_key,
|
||||
telegram_bot_enabled,
|
||||
telegram_bot_token,
|
||||
telegram_bot_chat_id,
|
||||
linkding_enabled,
|
||||
linkding_url,
|
||||
linkding_api_key,
|
||||
linkding_tags,
|
||||
linkding_mark_as_unread,
|
||||
matrix_bot_enabled,
|
||||
matrix_bot_user,
|
||||
matrix_bot_password,
|
||||
matrix_bot_url,
|
||||
matrix_bot_chat_id,
|
||||
apprise_enabled,
|
||||
apprise_url,
|
||||
apprise_services_url
|
||||
FROM
|
||||
integrations
|
||||
WHERE
|
||||
user_id=$1
|
||||
`
|
||||
var integration model.Integration
|
||||
err := s.db.QueryRow(query, userID).Scan(
|
||||
&integration.UserID,
|
||||
&integration.PinboardEnabled,
|
||||
&integration.PinboardToken,
|
||||
&integration.PinboardTags,
|
||||
&integration.PinboardMarkAsUnread,
|
||||
&integration.InstapaperEnabled,
|
||||
&integration.InstapaperUsername,
|
||||
&integration.InstapaperPassword,
|
||||
&integration.FeverEnabled,
|
||||
&integration.FeverUsername,
|
||||
&integration.FeverToken,
|
||||
&integration.GoogleReaderEnabled,
|
||||
&integration.GoogleReaderUsername,
|
||||
&integration.GoogleReaderPassword,
|
||||
&integration.WallabagEnabled,
|
||||
&integration.WallabagOnlyURL,
|
||||
&integration.WallabagURL,
|
||||
&integration.WallabagClientID,
|
||||
&integration.WallabagClientSecret,
|
||||
&integration.WallabagUsername,
|
||||
&integration.WallabagPassword,
|
||||
&integration.NotionEnabled,
|
||||
&integration.NotionToken,
|
||||
&integration.NotionPageID,
|
||||
&integration.NunuxKeeperEnabled,
|
||||
&integration.NunuxKeeperURL,
|
||||
&integration.NunuxKeeperAPIKey,
|
||||
&integration.EspialEnabled,
|
||||
&integration.EspialURL,
|
||||
&integration.EspialAPIKey,
|
||||
&integration.EspialTags,
|
||||
&integration.ReadwiseEnabled,
|
||||
&integration.ReadwiseAPIKey,
|
||||
&integration.PocketEnabled,
|
||||
&integration.PocketAccessToken,
|
||||
&integration.PocketConsumerKey,
|
||||
&integration.TelegramBotEnabled,
|
||||
&integration.TelegramBotToken,
|
||||
&integration.TelegramBotChatID,
|
||||
&integration.LinkdingEnabled,
|
||||
&integration.LinkdingURL,
|
||||
&integration.LinkdingAPIKey,
|
||||
&integration.LinkdingTags,
|
||||
&integration.LinkdingMarkAsUnread,
|
||||
&integration.MatrixBotEnabled,
|
||||
&integration.MatrixBotUser,
|
||||
&integration.MatrixBotPassword,
|
||||
&integration.MatrixBotURL,
|
||||
&integration.MatrixBotChatID,
|
||||
&integration.AppriseEnabled,
|
||||
&integration.AppriseURL,
|
||||
&integration.AppriseServicesURL,
|
||||
)
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return &integration, nil
|
||||
case err != nil:
|
||||
return &integration, fmt.Errorf(`store: unable to fetch integration row: %v`, err)
|
||||
default:
|
||||
return &integration, nil
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateIntegration saves user integration settings.
|
||||
func (s *Storage) UpdateIntegration(integration *model.Integration) error {
|
||||
query := `
|
||||
UPDATE
|
||||
integrations
|
||||
SET
|
||||
pinboard_enabled=$1,
|
||||
pinboard_token=$2,
|
||||
pinboard_tags=$3,
|
||||
pinboard_mark_as_unread=$4,
|
||||
instapaper_enabled=$5,
|
||||
instapaper_username=$6,
|
||||
instapaper_password=$7,
|
||||
fever_enabled=$8,
|
||||
fever_username=$9,
|
||||
fever_token=$10,
|
||||
wallabag_enabled=$11,
|
||||
wallabag_only_url=$12,
|
||||
wallabag_url=$13,
|
||||
wallabag_client_id=$14,
|
||||
wallabag_client_secret=$15,
|
||||
wallabag_username=$16,
|
||||
wallabag_password=$17,
|
||||
nunux_keeper_enabled=$18,
|
||||
nunux_keeper_url=$19,
|
||||
nunux_keeper_api_key=$20,
|
||||
pocket_enabled=$21,
|
||||
pocket_access_token=$22,
|
||||
pocket_consumer_key=$23,
|
||||
googlereader_enabled=$24,
|
||||
googlereader_username=$25,
|
||||
googlereader_password=$26,
|
||||
telegram_bot_enabled=$27,
|
||||
telegram_bot_token=$28,
|
||||
telegram_bot_chat_id=$29,
|
||||
espial_enabled=$30,
|
||||
espial_url=$31,
|
||||
espial_api_key=$32,
|
||||
espial_tags=$33,
|
||||
linkding_enabled=$34,
|
||||
linkding_url=$35,
|
||||
linkding_api_key=$36,
|
||||
linkding_tags=$37,
|
||||
linkding_mark_as_unread=$38,
|
||||
matrix_bot_enabled=$39,
|
||||
matrix_bot_user=$40,
|
||||
matrix_bot_password=$41,
|
||||
matrix_bot_url=$42,
|
||||
matrix_bot_chat_id=$43,
|
||||
notion_enabled=$44,
|
||||
notion_token=$45,
|
||||
notion_page_id=$46,
|
||||
readwise_enabled=$47,
|
||||
readwise_api_key=$48,
|
||||
apprise_enabled=$49,
|
||||
apprise_url=$50,
|
||||
apprise_services_url=$51
|
||||
WHERE
|
||||
user_id=$52
|
||||
`
|
||||
_, err := s.db.Exec(
|
||||
query,
|
||||
integration.PinboardEnabled,
|
||||
integration.PinboardToken,
|
||||
integration.PinboardTags,
|
||||
integration.PinboardMarkAsUnread,
|
||||
integration.InstapaperEnabled,
|
||||
integration.InstapaperUsername,
|
||||
integration.InstapaperPassword,
|
||||
integration.FeverEnabled,
|
||||
integration.FeverUsername,
|
||||
integration.FeverToken,
|
||||
integration.WallabagEnabled,
|
||||
integration.WallabagOnlyURL,
|
||||
integration.WallabagURL,
|
||||
integration.WallabagClientID,
|
||||
integration.WallabagClientSecret,
|
||||
integration.WallabagUsername,
|
||||
integration.WallabagPassword,
|
||||
integration.NunuxKeeperEnabled,
|
||||
integration.NunuxKeeperURL,
|
||||
integration.NunuxKeeperAPIKey,
|
||||
integration.PocketEnabled,
|
||||
integration.PocketAccessToken,
|
||||
integration.PocketConsumerKey,
|
||||
integration.GoogleReaderEnabled,
|
||||
integration.GoogleReaderUsername,
|
||||
integration.GoogleReaderPassword,
|
||||
integration.TelegramBotEnabled,
|
||||
integration.TelegramBotToken,
|
||||
integration.TelegramBotChatID,
|
||||
integration.EspialEnabled,
|
||||
integration.EspialURL,
|
||||
integration.EspialAPIKey,
|
||||
integration.EspialTags,
|
||||
integration.LinkdingEnabled,
|
||||
integration.LinkdingURL,
|
||||
integration.LinkdingAPIKey,
|
||||
integration.LinkdingTags,
|
||||
integration.LinkdingMarkAsUnread,
|
||||
integration.MatrixBotEnabled,
|
||||
integration.MatrixBotUser,
|
||||
integration.MatrixBotPassword,
|
||||
integration.MatrixBotURL,
|
||||
integration.MatrixBotChatID,
|
||||
integration.NotionEnabled,
|
||||
integration.NotionToken,
|
||||
integration.NotionPageID,
|
||||
integration.ReadwiseEnabled,
|
||||
integration.ReadwiseAPIKey,
|
||||
integration.AppriseEnabled,
|
||||
integration.AppriseURL,
|
||||
integration.AppriseServicesURL,
|
||||
integration.UserID,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to update integration row: %v`, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasSaveEntry returns true if the given user can save articles to third-parties.
|
||||
func (s *Storage) HasSaveEntry(userID int64) (result bool) {
|
||||
query := `
|
||||
SELECT
|
||||
true
|
||||
FROM
|
||||
integrations
|
||||
WHERE
|
||||
user_id=$1
|
||||
AND
|
||||
(pinboard_enabled='t' OR instapaper_enabled='t' OR wallabag_enabled='t' OR notion_enabled='t' OR nunux_keeper_enabled='t' OR espial_enabled='t' OR readwise_enabled='t' OR pocket_enabled='t' OR linkding_enabled='t' OR apprise_enabled='t')
|
||||
`
|
||||
if err := s.db.QueryRow(query, userID).Scan(&result); err != nil {
|
||||
result = false
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
81
internal/storage/job.go
Normal file
81
internal/storage/job.go
Normal file
|
@ -0,0 +1,81 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"miniflux.app/v2/internal/config"
|
||||
"miniflux.app/v2/internal/model"
|
||||
)
|
||||
|
||||
// NewBatch returns a series of jobs.
|
||||
func (s *Storage) NewBatch(batchSize int) (jobs model.JobList, err error) {
|
||||
pollingParsingErrorLimit := config.Opts.PollingParsingErrorLimit()
|
||||
query := `
|
||||
SELECT
|
||||
id,
|
||||
user_id
|
||||
FROM
|
||||
feeds
|
||||
WHERE
|
||||
disabled is false AND next_check_at < now() AND
|
||||
CASE WHEN $1 > 0 THEN parsing_error_count < $1 ELSE parsing_error_count >= 0 END
|
||||
ORDER BY next_check_at ASC LIMIT $2
|
||||
`
|
||||
return s.fetchBatchRows(query, pollingParsingErrorLimit, batchSize)
|
||||
}
|
||||
|
||||
// NewUserBatch returns a series of jobs but only for a given user.
|
||||
func (s *Storage) NewUserBatch(userID int64, batchSize int) (jobs model.JobList, err error) {
|
||||
// We do not take the error counter into consideration when the given
|
||||
// user refresh manually all his feeds to force a refresh.
|
||||
query := `
|
||||
SELECT
|
||||
id,
|
||||
user_id
|
||||
FROM
|
||||
feeds
|
||||
WHERE
|
||||
user_id=$1 AND disabled is false
|
||||
ORDER BY next_check_at ASC LIMIT %d
|
||||
`
|
||||
return s.fetchBatchRows(fmt.Sprintf(query, batchSize), userID)
|
||||
}
|
||||
|
||||
// NewCategoryBatch returns a series of jobs but only for a given category.
|
||||
func (s *Storage) NewCategoryBatch(userID int64, categoryID int64, batchSize int) (jobs model.JobList, err error) {
|
||||
// We do not take the error counter into consideration when the given
|
||||
// user refresh manually all his feeds to force a refresh.
|
||||
query := `
|
||||
SELECT
|
||||
id,
|
||||
user_id
|
||||
FROM
|
||||
feeds
|
||||
WHERE
|
||||
user_id=$1 AND category_id=$2 AND disabled is false
|
||||
ORDER BY next_check_at ASC LIMIT %d
|
||||
`
|
||||
return s.fetchBatchRows(fmt.Sprintf(query, batchSize), userID, categoryID)
|
||||
}
|
||||
|
||||
func (s *Storage) fetchBatchRows(query string, args ...interface{}) (jobs model.JobList, err error) {
|
||||
rows, err := s.db.Query(query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch batch of jobs: %v`, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var job model.Job
|
||||
if err := rows.Scan(&job.FeedID, &job.UserID); err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch job: %v`, err)
|
||||
}
|
||||
|
||||
jobs = append(jobs, job)
|
||||
}
|
||||
|
||||
return jobs, nil
|
||||
}
|
123
internal/storage/session.go
Normal file
123
internal/storage/session.go
Normal file
|
@ -0,0 +1,123 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"miniflux.app/v2/internal/crypto"
|
||||
"miniflux.app/v2/internal/model"
|
||||
)
|
||||
|
||||
// CreateAppSessionWithUserPrefs creates a new application session with the given user preferences.
|
||||
func (s *Storage) CreateAppSessionWithUserPrefs(userID int64) (*model.Session, error) {
|
||||
user, err := s.UserByID(userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
session := model.Session{
|
||||
ID: crypto.GenerateRandomString(32),
|
||||
Data: &model.SessionData{
|
||||
CSRF: crypto.GenerateRandomString(64),
|
||||
Theme: user.Theme,
|
||||
Language: user.Language,
|
||||
},
|
||||
}
|
||||
|
||||
return s.createAppSession(&session)
|
||||
}
|
||||
|
||||
// CreateAppSession creates a new application session.
|
||||
func (s *Storage) CreateAppSession() (*model.Session, error) {
|
||||
session := model.Session{
|
||||
ID: crypto.GenerateRandomString(32),
|
||||
Data: &model.SessionData{
|
||||
CSRF: crypto.GenerateRandomString(64),
|
||||
},
|
||||
}
|
||||
|
||||
return s.createAppSession(&session)
|
||||
}
|
||||
|
||||
func (s *Storage) createAppSession(session *model.Session) (*model.Session, error) {
|
||||
query := `INSERT INTO sessions (id, data) VALUES ($1, $2)`
|
||||
_, err := s.db.Exec(query, session.ID, session.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to create app session: %v`, err)
|
||||
}
|
||||
|
||||
return session, nil
|
||||
}
|
||||
|
||||
// UpdateAppSessionField updates only one session field.
|
||||
func (s *Storage) UpdateAppSessionField(sessionID, field string, value interface{}) error {
|
||||
query := `
|
||||
UPDATE
|
||||
sessions
|
||||
SET
|
||||
data = jsonb_set(data, '{%s}', to_jsonb($1::text), true)
|
||||
WHERE
|
||||
id=$2
|
||||
`
|
||||
_, err := s.db.Exec(fmt.Sprintf(query, field), value, sessionID)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to update session field: %v`, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AppSession returns the given session.
|
||||
func (s *Storage) AppSession(id string) (*model.Session, error) {
|
||||
var session model.Session
|
||||
|
||||
query := "SELECT id, data FROM sessions WHERE id=$1"
|
||||
err := s.db.QueryRow(query, id).Scan(
|
||||
&session.ID,
|
||||
&session.Data,
|
||||
)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return nil, fmt.Errorf(`store: session not found: %s`, id)
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf(`store: unable to fetch session: %v`, err)
|
||||
default:
|
||||
return &session, nil
|
||||
}
|
||||
}
|
||||
|
||||
// FlushAllSessions removes all sessions from the database.
|
||||
func (s *Storage) FlushAllSessions() (err error) {
|
||||
_, err = s.db.Exec(`DELETE FROM user_sessions`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = s.db.Exec(`DELETE FROM sessions`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanOldSessions removes sessions older than specified days.
|
||||
func (s *Storage) CleanOldSessions(days int) int64 {
|
||||
query := `
|
||||
DELETE FROM
|
||||
sessions
|
||||
WHERE
|
||||
id IN (SELECT id FROM sessions WHERE created_at < now() - interval '%d days')
|
||||
`
|
||||
result, err := s.db.Exec(fmt.Sprintf(query, days))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
n, _ := result.RowsAffected()
|
||||
return n
|
||||
}
|
44
internal/storage/storage.go
Normal file
44
internal/storage/storage.go
Normal file
|
@ -0,0 +1,44 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Storage handles all operations related to the database.
|
||||
type Storage struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// NewStorage returns a new Storage.
|
||||
func NewStorage(db *sql.DB) *Storage {
|
||||
return &Storage{db}
|
||||
}
|
||||
|
||||
// DatabaseVersion returns the version of the database which is in use.
|
||||
func (s *Storage) DatabaseVersion() string {
|
||||
var dbVersion string
|
||||
err := s.db.QueryRow(`SELECT current_setting('server_version')`).Scan(&dbVersion)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
return dbVersion
|
||||
}
|
||||
|
||||
// Ping checks if the database connection works.
|
||||
func (s *Storage) Ping() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
return s.db.PingContext(ctx)
|
||||
}
|
||||
|
||||
// DBStats returns database statistics.
|
||||
func (s *Storage) DBStats() sql.DBStats {
|
||||
return s.db.Stats()
|
||||
}
|
32
internal/storage/timezone.go
Normal file
32
internal/storage/timezone.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Timezones returns all timezones supported by the database.
|
||||
func (s *Storage) Timezones() (map[string]string, error) {
|
||||
timezones := make(map[string]string)
|
||||
rows, err := s.db.Query(`SELECT name FROM pg_timezone_names() ORDER BY name ASC`)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch timezones: %v`, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var timezone string
|
||||
if err := rows.Scan(&timezone); err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch timezones row: %v`, err)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(timezone, "posix") && !strings.HasPrefix(timezone, "SystemV") && timezone != "localtime" {
|
||||
timezones[timezone] = timezone
|
||||
}
|
||||
}
|
||||
|
||||
return timezones, nil
|
||||
}
|
652
internal/storage/user.go
Normal file
652
internal/storage/user.go
Normal file
|
@ -0,0 +1,652 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"miniflux.app/v2/internal/crypto"
|
||||
"miniflux.app/v2/internal/logger"
|
||||
"miniflux.app/v2/internal/model"
|
||||
|
||||
"github.com/lib/pq"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
// CountUsers returns the total number of users.
|
||||
func (s *Storage) CountUsers() int {
|
||||
var result int
|
||||
err := s.db.QueryRow(`SELECT count(*) FROM users`).Scan(&result)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// SetLastLogin updates the last login date of a user.
|
||||
func (s *Storage) SetLastLogin(userID int64) error {
|
||||
query := `UPDATE users SET last_login_at=now() WHERE id=$1`
|
||||
_, err := s.db.Exec(query, userID)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to update last login date: %v`, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UserExists checks if a user exists by using the given username.
|
||||
func (s *Storage) UserExists(username string) bool {
|
||||
var result bool
|
||||
s.db.QueryRow(`SELECT true FROM users WHERE username=LOWER($1)`, username).Scan(&result)
|
||||
return result
|
||||
}
|
||||
|
||||
// AnotherUserExists checks if another user exists with the given username.
|
||||
func (s *Storage) AnotherUserExists(userID int64, username string) bool {
|
||||
var result bool
|
||||
s.db.QueryRow(`SELECT true FROM users WHERE id != $1 AND username=LOWER($2)`, userID, username).Scan(&result)
|
||||
return result
|
||||
}
|
||||
|
||||
// CreateUser creates a new user.
|
||||
func (s *Storage) CreateUser(userCreationRequest *model.UserCreationRequest) (*model.User, error) {
|
||||
var hashedPassword string
|
||||
if userCreationRequest.Password != "" {
|
||||
var err error
|
||||
hashedPassword, err = crypto.HashPassword(userCreationRequest.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
query := `
|
||||
INSERT INTO users
|
||||
(username, password, is_admin, google_id, openid_connect_id)
|
||||
VALUES
|
||||
(LOWER($1), $2, $3, $4, $5)
|
||||
RETURNING
|
||||
id,
|
||||
username,
|
||||
is_admin,
|
||||
language,
|
||||
theme,
|
||||
timezone,
|
||||
entry_direction,
|
||||
entries_per_page,
|
||||
keyboard_shortcuts,
|
||||
show_reading_time,
|
||||
entry_swipe,
|
||||
gesture_nav,
|
||||
stylesheet,
|
||||
google_id,
|
||||
openid_connect_id,
|
||||
display_mode,
|
||||
entry_order,
|
||||
default_reading_speed,
|
||||
cjk_reading_speed,
|
||||
default_home_page,
|
||||
categories_sorting_order,
|
||||
mark_read_on_view
|
||||
`
|
||||
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to start transaction: %v`, err)
|
||||
}
|
||||
|
||||
var user model.User
|
||||
err = tx.QueryRow(
|
||||
query,
|
||||
userCreationRequest.Username,
|
||||
hashedPassword,
|
||||
userCreationRequest.IsAdmin,
|
||||
userCreationRequest.GoogleID,
|
||||
userCreationRequest.OpenIDConnectID,
|
||||
).Scan(
|
||||
&user.ID,
|
||||
&user.Username,
|
||||
&user.IsAdmin,
|
||||
&user.Language,
|
||||
&user.Theme,
|
||||
&user.Timezone,
|
||||
&user.EntryDirection,
|
||||
&user.EntriesPerPage,
|
||||
&user.KeyboardShortcuts,
|
||||
&user.ShowReadingTime,
|
||||
&user.EntrySwipe,
|
||||
&user.GestureNav,
|
||||
&user.Stylesheet,
|
||||
&user.GoogleID,
|
||||
&user.OpenIDConnectID,
|
||||
&user.DisplayMode,
|
||||
&user.EntryOrder,
|
||||
&user.DefaultReadingSpeed,
|
||||
&user.CJKReadingSpeed,
|
||||
&user.DefaultHomePage,
|
||||
&user.CategoriesSortingOrder,
|
||||
&user.MarkReadOnView,
|
||||
)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return nil, fmt.Errorf(`store: unable to create user: %v`, err)
|
||||
}
|
||||
|
||||
_, err = tx.Exec(`INSERT INTO categories (user_id, title) VALUES ($1, $2)`, user.ID, "All")
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return nil, fmt.Errorf(`store: unable to create user default category: %v`, err)
|
||||
}
|
||||
|
||||
_, err = tx.Exec(`INSERT INTO integrations (user_id) VALUES ($1)`, user.ID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return nil, fmt.Errorf(`store: unable to create integration row: %v`, err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to commit transaction: %v`, err)
|
||||
}
|
||||
|
||||
return &user, nil
|
||||
}
|
||||
|
||||
// UpdateUser updates a user.
|
||||
func (s *Storage) UpdateUser(user *model.User) error {
|
||||
if user.Password != "" {
|
||||
hashedPassword, err := crypto.HashPassword(user.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
query := `
|
||||
UPDATE users SET
|
||||
username=LOWER($1),
|
||||
password=$2,
|
||||
is_admin=$3,
|
||||
theme=$4,
|
||||
language=$5,
|
||||
timezone=$6,
|
||||
entry_direction=$7,
|
||||
entries_per_page=$8,
|
||||
keyboard_shortcuts=$9,
|
||||
show_reading_time=$10,
|
||||
entry_swipe=$11,
|
||||
gesture_nav=$12,
|
||||
stylesheet=$13,
|
||||
google_id=$14,
|
||||
openid_connect_id=$15,
|
||||
display_mode=$16,
|
||||
entry_order=$17,
|
||||
default_reading_speed=$18,
|
||||
cjk_reading_speed=$19,
|
||||
default_home_page=$20,
|
||||
categories_sorting_order=$21,
|
||||
mark_read_on_view=$22
|
||||
WHERE
|
||||
id=$23
|
||||
`
|
||||
|
||||
_, err = s.db.Exec(
|
||||
query,
|
||||
user.Username,
|
||||
hashedPassword,
|
||||
user.IsAdmin,
|
||||
user.Theme,
|
||||
user.Language,
|
||||
user.Timezone,
|
||||
user.EntryDirection,
|
||||
user.EntriesPerPage,
|
||||
user.KeyboardShortcuts,
|
||||
user.ShowReadingTime,
|
||||
user.EntrySwipe,
|
||||
user.GestureNav,
|
||||
user.Stylesheet,
|
||||
user.GoogleID,
|
||||
user.OpenIDConnectID,
|
||||
user.DisplayMode,
|
||||
user.EntryOrder,
|
||||
user.DefaultReadingSpeed,
|
||||
user.CJKReadingSpeed,
|
||||
user.DefaultHomePage,
|
||||
user.CategoriesSortingOrder,
|
||||
user.MarkReadOnView,
|
||||
user.ID,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to update user: %v`, err)
|
||||
}
|
||||
} else {
|
||||
query := `
|
||||
UPDATE users SET
|
||||
username=LOWER($1),
|
||||
is_admin=$2,
|
||||
theme=$3,
|
||||
language=$4,
|
||||
timezone=$5,
|
||||
entry_direction=$6,
|
||||
entries_per_page=$7,
|
||||
keyboard_shortcuts=$8,
|
||||
show_reading_time=$9,
|
||||
entry_swipe=$10,
|
||||
gesture_nav=$11,
|
||||
stylesheet=$12,
|
||||
google_id=$13,
|
||||
openid_connect_id=$14,
|
||||
display_mode=$15,
|
||||
entry_order=$16,
|
||||
default_reading_speed=$17,
|
||||
cjk_reading_speed=$18,
|
||||
default_home_page=$19,
|
||||
categories_sorting_order=$20,
|
||||
mark_read_on_view=$21
|
||||
WHERE
|
||||
id=$22
|
||||
`
|
||||
|
||||
_, err := s.db.Exec(
|
||||
query,
|
||||
user.Username,
|
||||
user.IsAdmin,
|
||||
user.Theme,
|
||||
user.Language,
|
||||
user.Timezone,
|
||||
user.EntryDirection,
|
||||
user.EntriesPerPage,
|
||||
user.KeyboardShortcuts,
|
||||
user.ShowReadingTime,
|
||||
user.EntrySwipe,
|
||||
user.GestureNav,
|
||||
user.Stylesheet,
|
||||
user.GoogleID,
|
||||
user.OpenIDConnectID,
|
||||
user.DisplayMode,
|
||||
user.EntryOrder,
|
||||
user.DefaultReadingSpeed,
|
||||
user.CJKReadingSpeed,
|
||||
user.DefaultHomePage,
|
||||
user.CategoriesSortingOrder,
|
||||
user.MarkReadOnView,
|
||||
user.ID,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to update user: %v`, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UserLanguage returns the language of the given user.
|
||||
func (s *Storage) UserLanguage(userID int64) (language string) {
|
||||
err := s.db.QueryRow(`SELECT language FROM users WHERE id = $1`, userID).Scan(&language)
|
||||
if err != nil {
|
||||
return "en_US"
|
||||
}
|
||||
|
||||
return language
|
||||
}
|
||||
|
||||
// UserByID finds a user by the ID.
|
||||
func (s *Storage) UserByID(userID int64) (*model.User, error) {
|
||||
query := `
|
||||
SELECT
|
||||
id,
|
||||
username,
|
||||
is_admin,
|
||||
theme,
|
||||
language,
|
||||
timezone,
|
||||
entry_direction,
|
||||
entries_per_page,
|
||||
keyboard_shortcuts,
|
||||
show_reading_time,
|
||||
entry_swipe,
|
||||
gesture_nav,
|
||||
last_login_at,
|
||||
stylesheet,
|
||||
google_id,
|
||||
openid_connect_id,
|
||||
display_mode,
|
||||
entry_order,
|
||||
default_reading_speed,
|
||||
cjk_reading_speed,
|
||||
default_home_page,
|
||||
categories_sorting_order,
|
||||
mark_read_on_view
|
||||
FROM
|
||||
users
|
||||
WHERE
|
||||
id = $1
|
||||
`
|
||||
return s.fetchUser(query, userID)
|
||||
}
|
||||
|
||||
// UserByUsername finds a user by the username.
|
||||
func (s *Storage) UserByUsername(username string) (*model.User, error) {
|
||||
query := `
|
||||
SELECT
|
||||
id,
|
||||
username,
|
||||
is_admin,
|
||||
theme,
|
||||
language,
|
||||
timezone,
|
||||
entry_direction,
|
||||
entries_per_page,
|
||||
keyboard_shortcuts,
|
||||
show_reading_time,
|
||||
entry_swipe,
|
||||
gesture_nav,
|
||||
last_login_at,
|
||||
stylesheet,
|
||||
google_id,
|
||||
openid_connect_id,
|
||||
display_mode,
|
||||
entry_order,
|
||||
default_reading_speed,
|
||||
cjk_reading_speed,
|
||||
default_home_page,
|
||||
categories_sorting_order,
|
||||
mark_read_on_view
|
||||
FROM
|
||||
users
|
||||
WHERE
|
||||
username=LOWER($1)
|
||||
`
|
||||
return s.fetchUser(query, username)
|
||||
}
|
||||
|
||||
// UserByField finds a user by a field value.
|
||||
func (s *Storage) UserByField(field, value string) (*model.User, error) {
|
||||
query := `
|
||||
SELECT
|
||||
id,
|
||||
username,
|
||||
is_admin,
|
||||
theme,
|
||||
language,
|
||||
timezone,
|
||||
entry_direction,
|
||||
entries_per_page,
|
||||
keyboard_shortcuts,
|
||||
show_reading_time,
|
||||
entry_swipe,
|
||||
gesture_nav,
|
||||
last_login_at,
|
||||
stylesheet,
|
||||
google_id,
|
||||
openid_connect_id,
|
||||
display_mode,
|
||||
entry_order,
|
||||
default_reading_speed,
|
||||
cjk_reading_speed,
|
||||
default_home_page,
|
||||
categories_sorting_order,
|
||||
mark_read_on_view
|
||||
FROM
|
||||
users
|
||||
WHERE
|
||||
%s=$1
|
||||
`
|
||||
return s.fetchUser(fmt.Sprintf(query, pq.QuoteIdentifier(field)), value)
|
||||
}
|
||||
|
||||
// AnotherUserWithFieldExists returns true if a user has the value set for the given field.
|
||||
func (s *Storage) AnotherUserWithFieldExists(userID int64, field, value string) bool {
|
||||
var result bool
|
||||
s.db.QueryRow(fmt.Sprintf(`SELECT true FROM users WHERE id <> $1 AND %s=$2`, pq.QuoteIdentifier(field)), userID, value).Scan(&result)
|
||||
return result
|
||||
}
|
||||
|
||||
// UserByAPIKey returns a User from an API Key.
|
||||
func (s *Storage) UserByAPIKey(token string) (*model.User, error) {
|
||||
query := `
|
||||
SELECT
|
||||
u.id,
|
||||
u.username,
|
||||
u.is_admin,
|
||||
u.theme,
|
||||
u.language,
|
||||
u.timezone,
|
||||
u.entry_direction,
|
||||
u.entries_per_page,
|
||||
u.keyboard_shortcuts,
|
||||
u.show_reading_time,
|
||||
u.entry_swipe,
|
||||
u.gesture_nav,
|
||||
u.last_login_at,
|
||||
u.stylesheet,
|
||||
u.google_id,
|
||||
u.openid_connect_id,
|
||||
u.display_mode,
|
||||
u.entry_order,
|
||||
u.default_reading_speed,
|
||||
u.cjk_reading_speed,
|
||||
u.default_home_page,
|
||||
u.categories_sorting_order,
|
||||
u.mark_read_on_view
|
||||
FROM
|
||||
users u
|
||||
LEFT JOIN
|
||||
api_keys ON api_keys.user_id=u.id
|
||||
WHERE
|
||||
api_keys.token = $1
|
||||
`
|
||||
return s.fetchUser(query, token)
|
||||
}
|
||||
|
||||
func (s *Storage) fetchUser(query string, args ...interface{}) (*model.User, error) {
|
||||
var user model.User
|
||||
err := s.db.QueryRow(query, args...).Scan(
|
||||
&user.ID,
|
||||
&user.Username,
|
||||
&user.IsAdmin,
|
||||
&user.Theme,
|
||||
&user.Language,
|
||||
&user.Timezone,
|
||||
&user.EntryDirection,
|
||||
&user.EntriesPerPage,
|
||||
&user.KeyboardShortcuts,
|
||||
&user.ShowReadingTime,
|
||||
&user.EntrySwipe,
|
||||
&user.GestureNav,
|
||||
&user.LastLoginAt,
|
||||
&user.Stylesheet,
|
||||
&user.GoogleID,
|
||||
&user.OpenIDConnectID,
|
||||
&user.DisplayMode,
|
||||
&user.EntryOrder,
|
||||
&user.DefaultReadingSpeed,
|
||||
&user.CJKReadingSpeed,
|
||||
&user.DefaultHomePage,
|
||||
&user.CategoriesSortingOrder,
|
||||
&user.MarkReadOnView,
|
||||
)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch user: %v`, err)
|
||||
}
|
||||
|
||||
return &user, nil
|
||||
}
|
||||
|
||||
// RemoveUser deletes a user.
|
||||
func (s *Storage) RemoveUser(userID int64) error {
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to start transaction: %v`, err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`DELETE FROM users WHERE id=$1`, userID); err != nil {
|
||||
tx.Rollback()
|
||||
return fmt.Errorf(`store: unable to remove user #%d: %v`, userID, err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`DELETE FROM integrations WHERE user_id=$1`, userID); err != nil {
|
||||
tx.Rollback()
|
||||
return fmt.Errorf(`store: unable to remove integration settings for user #%d: %v`, userID, err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf(`store: unable to commit transaction: %v`, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveUserAsync deletes user data without locking the database.
|
||||
func (s *Storage) RemoveUserAsync(userID int64) {
|
||||
go func() {
|
||||
if err := s.deleteUserFeeds(userID); err != nil {
|
||||
logger.Error(`%v`, err)
|
||||
return
|
||||
}
|
||||
|
||||
s.db.Exec(`DELETE FROM users WHERE id=$1`, userID)
|
||||
s.db.Exec(`DELETE FROM integrations WHERE user_id=$1`, userID)
|
||||
|
||||
logger.Debug(`[MASS DELETE] User #%d has been deleted (%d GoRoutines)`, userID, runtime.NumGoroutine())
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *Storage) deleteUserFeeds(userID int64) error {
|
||||
rows, err := s.db.Query(`SELECT id FROM feeds WHERE user_id=$1`, userID)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to get user feeds: %v`, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var feedID int64
|
||||
rows.Scan(&feedID)
|
||||
|
||||
logger.Debug(`[USER DELETION] Deleting feed #%d for user #%d (%d GoRoutines)`, feedID, userID, runtime.NumGoroutine())
|
||||
|
||||
if err := s.RemoveFeed(userID, feedID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Users returns all users.
|
||||
func (s *Storage) Users() (model.Users, error) {
|
||||
query := `
|
||||
SELECT
|
||||
id,
|
||||
username,
|
||||
is_admin,
|
||||
theme,
|
||||
language,
|
||||
timezone,
|
||||
entry_direction,
|
||||
entries_per_page,
|
||||
keyboard_shortcuts,
|
||||
show_reading_time,
|
||||
entry_swipe,
|
||||
gesture_nav,
|
||||
last_login_at,
|
||||
stylesheet,
|
||||
google_id,
|
||||
openid_connect_id,
|
||||
display_mode,
|
||||
entry_order,
|
||||
default_reading_speed,
|
||||
cjk_reading_speed,
|
||||
default_home_page,
|
||||
categories_sorting_order,
|
||||
mark_read_on_view
|
||||
FROM
|
||||
users
|
||||
ORDER BY username ASC
|
||||
`
|
||||
rows, err := s.db.Query(query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch users: %v`, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var users model.Users
|
||||
for rows.Next() {
|
||||
var user model.User
|
||||
err := rows.Scan(
|
||||
&user.ID,
|
||||
&user.Username,
|
||||
&user.IsAdmin,
|
||||
&user.Theme,
|
||||
&user.Language,
|
||||
&user.Timezone,
|
||||
&user.EntryDirection,
|
||||
&user.EntriesPerPage,
|
||||
&user.KeyboardShortcuts,
|
||||
&user.ShowReadingTime,
|
||||
&user.EntrySwipe,
|
||||
&user.GestureNav,
|
||||
&user.LastLoginAt,
|
||||
&user.Stylesheet,
|
||||
&user.GoogleID,
|
||||
&user.OpenIDConnectID,
|
||||
&user.DisplayMode,
|
||||
&user.EntryOrder,
|
||||
&user.DefaultReadingSpeed,
|
||||
&user.CJKReadingSpeed,
|
||||
&user.DefaultHomePage,
|
||||
&user.CategoriesSortingOrder,
|
||||
&user.MarkReadOnView,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch users row: %v`, err)
|
||||
}
|
||||
|
||||
users = append(users, &user)
|
||||
}
|
||||
|
||||
return users, nil
|
||||
}
|
||||
|
||||
// CheckPassword validate the hashed password.
|
||||
func (s *Storage) CheckPassword(username, password string) error {
|
||||
var hash string
|
||||
username = strings.ToLower(username)
|
||||
|
||||
err := s.db.QueryRow("SELECT password FROM users WHERE username=$1", username).Scan(&hash)
|
||||
if err == sql.ErrNoRows {
|
||||
return fmt.Errorf(`store: unable to find this user: %s`, username)
|
||||
} else if err != nil {
|
||||
return fmt.Errorf(`store: unable to fetch user: %v`, err)
|
||||
}
|
||||
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password)); err != nil {
|
||||
return fmt.Errorf(`store: invalid password for "%s" (%v)`, username, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasPassword returns true if the given user has a password defined.
|
||||
func (s *Storage) HasPassword(userID int64) (bool, error) {
|
||||
var result bool
|
||||
query := `SELECT true FROM users WHERE id=$1 AND password <> ''`
|
||||
|
||||
err := s.db.QueryRow(query, userID).Scan(&result)
|
||||
if err == sql.ErrNoRows {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, fmt.Errorf(`store: unable to execute query: %v`, err)
|
||||
}
|
||||
|
||||
if result {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
182
internal/storage/user_session.go
Normal file
182
internal/storage/user_session.go
Normal file
|
@ -0,0 +1,182 @@
|
|||
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package storage // import "miniflux.app/v2/internal/storage"
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"miniflux.app/v2/internal/crypto"
|
||||
"miniflux.app/v2/internal/model"
|
||||
)
|
||||
|
||||
// UserSessions returns the list of sessions for the given user.
|
||||
func (s *Storage) UserSessions(userID int64) (model.UserSessions, error) {
|
||||
query := `
|
||||
SELECT
|
||||
id,
|
||||
user_id,
|
||||
token,
|
||||
created_at,
|
||||
user_agent,
|
||||
ip
|
||||
FROM
|
||||
user_sessions
|
||||
WHERE
|
||||
user_id=$1 ORDER BY id DESC
|
||||
`
|
||||
rows, err := s.db.Query(query, userID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch user sessions: %v`, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var sessions model.UserSessions
|
||||
for rows.Next() {
|
||||
var session model.UserSession
|
||||
err := rows.Scan(
|
||||
&session.ID,
|
||||
&session.UserID,
|
||||
&session.Token,
|
||||
&session.CreatedAt,
|
||||
&session.UserAgent,
|
||||
&session.IP,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`store: unable to fetch user session row: %v`, err)
|
||||
}
|
||||
|
||||
sessions = append(sessions, &session)
|
||||
}
|
||||
|
||||
return sessions, nil
|
||||
}
|
||||
|
||||
// CreateUserSessionFromUsername creates a new user session.
|
||||
func (s *Storage) CreateUserSessionFromUsername(username, userAgent, ip string) (sessionID string, userID int64, err error) {
|
||||
token := crypto.GenerateRandomString(64)
|
||||
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return "", 0, fmt.Errorf(`store: unable to start transaction: %v`, err)
|
||||
}
|
||||
|
||||
err = tx.QueryRow(`SELECT id FROM users WHERE username = LOWER($1)`, username).Scan(&userID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return "", 0, fmt.Errorf(`store: unable to fetch user ID: %v`, err)
|
||||
}
|
||||
|
||||
_, err = tx.Exec(
|
||||
`INSERT INTO user_sessions (token, user_id, user_agent, ip) VALUES ($1, $2, $3, $4)`,
|
||||
token,
|
||||
userID,
|
||||
userAgent,
|
||||
ip,
|
||||
)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return "", 0, fmt.Errorf(`store: unable to create user session: %v`, err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return "", 0, fmt.Errorf(`store: unable to commit transaction: %v`, err)
|
||||
}
|
||||
|
||||
return token, userID, nil
|
||||
}
|
||||
|
||||
// UserSessionByToken finds a session by the token.
|
||||
func (s *Storage) UserSessionByToken(token string) (*model.UserSession, error) {
|
||||
var session model.UserSession
|
||||
|
||||
query := `
|
||||
SELECT
|
||||
id,
|
||||
user_id,
|
||||
token,
|
||||
created_at,
|
||||
user_agent,
|
||||
ip
|
||||
FROM
|
||||
user_sessions
|
||||
WHERE
|
||||
token = $1
|
||||
`
|
||||
err := s.db.QueryRow(query, token).Scan(
|
||||
&session.ID,
|
||||
&session.UserID,
|
||||
&session.Token,
|
||||
&session.CreatedAt,
|
||||
&session.UserAgent,
|
||||
&session.IP,
|
||||
)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return nil, nil
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf(`store: unable to fetch user session: %v`, err)
|
||||
default:
|
||||
return &session, nil
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveUserSessionByToken remove a session by using the token.
|
||||
func (s *Storage) RemoveUserSessionByToken(userID int64, token string) error {
|
||||
query := `DELETE FROM user_sessions WHERE user_id=$1 AND token=$2`
|
||||
result, err := s.db.Exec(query, userID, token)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to remove this user session: %v`, err)
|
||||
}
|
||||
|
||||
count, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to remove this user session: %v`, err)
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
return fmt.Errorf(`store: nothing has been removed`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveUserSessionByID remove a session by using the ID.
|
||||
func (s *Storage) RemoveUserSessionByID(userID, sessionID int64) error {
|
||||
query := `DELETE FROM user_sessions WHERE user_id=$1 AND id=$2`
|
||||
result, err := s.db.Exec(query, userID, sessionID)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to remove this user session: %v`, err)
|
||||
}
|
||||
|
||||
count, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf(`store: unable to remove this user session: %v`, err)
|
||||
}
|
||||
|
||||
if count != 1 {
|
||||
return fmt.Errorf(`store: nothing has been removed`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanOldUserSessions removes user sessions older than specified days.
|
||||
func (s *Storage) CleanOldUserSessions(days int) int64 {
|
||||
query := `
|
||||
DELETE FROM
|
||||
user_sessions
|
||||
WHERE
|
||||
id IN (SELECT id FROM user_sessions WHERE created_at < now() - interval '%d days')
|
||||
`
|
||||
result, err := s.db.Exec(fmt.Sprintf(query, days))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
n, _ := result.RowsAffected()
|
||||
return n
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue