// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package readability // import "miniflux.app/v2/internal/reader/readability" import ( "bytes" "fmt" "os" "strings" "testing" "github.com/PuerkitoBio/goquery" ) func TestBaseURL(t *testing.T) { html := `
Some content
` baseURL, _, err := ExtractContent(strings.NewReader(html)) if err != nil { t.Fatal(err) } if baseURL != "https://example.org/" { t.Errorf(`Unexpected base URL, got %q instead of "https://example.org/"`, baseURL) } } func TestMultipleBaseURL(t *testing.T) { html := `
Some content
` baseURL, _, err := ExtractContent(strings.NewReader(html)) if err != nil { t.Fatal(err) } if baseURL != "https://example.org/" { t.Errorf(`Unexpected base URL, got %q instead of "https://example.org/"`, baseURL) } } func TestRelativeBaseURL(t *testing.T) { html := `
Some content
` baseURL, _, err := ExtractContent(strings.NewReader(html)) if err != nil { t.Fatal(err) } if baseURL != "" { t.Errorf(`Unexpected base URL, got %q`, baseURL) } } func TestWithoutBaseURL(t *testing.T) { html := ` Test
Some content
` baseURL, _, err := ExtractContent(strings.NewReader(html)) if err != nil { t.Fatal(err) } if baseURL != "" { t.Errorf(`Unexpected base URL, got %q instead of ""`, baseURL) } } func TestRemoveStyleScript(t *testing.T) { html := ` Test
Some content
` want := `
Somecontent
` _, content, err := ExtractContent(strings.NewReader(html)) if err != nil { t.Fatal(err) } content = strings.ReplaceAll(content, "\n", "") content = strings.ReplaceAll(content, " ", "") content = strings.ReplaceAll(content, "\t", "") if content != want { t.Errorf(`Invalid content, got %s instead of %s`, content, want) } } func TestRemoveBlacklist(t *testing.T) { html := ` Test
Some content
Some other thing
And more
Valid!
` want := `
Valid!
` _, content, err := ExtractContent(strings.NewReader(html)) if err != nil { t.Fatal(err) } content = strings.ReplaceAll(content, "\n", "") content = strings.ReplaceAll(content, " ", "") content = strings.ReplaceAll(content, "\t", "") if content != want { t.Errorf(`Invalid content, got %s instead of %s`, content, want) } } func TestNestedSpanInCodeBlock(t *testing.T) { html := ` Test

Some content

Code block with nested span # exit 1
` want := `

Some content

Code block with nested span # exit 1
` _, result, err := ExtractContent(strings.NewReader(html)) if err != nil { t.Fatal(err) } if result != want { t.Errorf(`Invalid content, got %s instead of %s`, result, want) } } func BenchmarkExtractContent(b *testing.B) { var testCases = map[string][]byte{ "miniflux_github.html": {}, "miniflux_wikipedia.html": {}, } for filename := range testCases { data, err := os.ReadFile("testdata/" + filename) if err != nil { b.Fatalf(`Unable to read file %q: %v`, filename, err) } testCases[filename] = data } for range b.N { for _, v := range testCases { ExtractContent(bytes.NewReader(v)) } } } func TestGetClassWeight(t *testing.T) { testCases := []struct { name string html string expected float32 }{ { name: "no class or id", html: `
content
`, expected: 0, }, { name: "positive class only", html: `
content
`, expected: 25, }, { name: "negative class only", html: `
content
`, expected: -25, }, { name: "positive id only", html: `
content
`, expected: 25, }, { name: "negative id only", html: ``, expected: -25, }, { name: "positive class and positive id", html: `
content
`, expected: 50, }, { name: "negative class and negative id", html: ``, expected: -50, }, { name: "positive class and negative id", html: `
content
`, expected: 0, }, { name: "negative class and positive id", html: ``, expected: 0, }, { name: "multiple positive classes", html: `
content
`, expected: 25, }, { name: "multiple negative classes", html: ``, expected: -25, }, { name: "mixed positive and negative classes", html: `
content
`, expected: -25, // negative takes precedence since it's checked first }, { name: "case insensitive class", html: `
content
`, expected: 25, }, { name: "case insensitive id", html: `
content
`, expected: 25, }, { name: "non-matching class and id", html: `
content
`, expected: 0, }, { name: "empty class and id", html: `
content
`, expected: 0, }, { name: "class with special characters", html: `
content
`, expected: -25, // matches com- in negative regex }, { name: "id with special characters", html: `
content
`, expected: 25, // matches h-entry in positive regex }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { doc, err := goquery.NewDocumentFromReader(strings.NewReader(tc.html)) if err != nil { t.Fatalf("Failed to parse HTML: %v", err) } selection := doc.Find("div").First() if selection.Length() == 0 { t.Fatal("No div element found in HTML") } result := getClassWeight(selection) if result != tc.expected { t.Errorf("Expected weight %f, got %f", tc.expected, result) } }) } } func TestGetClassWeightRegexPatterns(t *testing.T) { // Test specific regex patterns used in getClassWeight positiveWords := []string{"article", "body", "content", "entry", "hentry", "h-entry", "main", "page", "pagination", "post", "text", "blog", "story"} negativeWords := []string{"hid", "banner", "combx", "comment", "com-", "contact", "foot", "masthead", "media", "meta", "modal", "outbrain", "promo", "related", "scroll", "share", "shoutbox", "sidebar", "skyscraper", "sponsor", "shopping", "tags", "tool", "widget", "byline", "author", "dateline", "writtenby"} for _, word := range positiveWords { t.Run("positive_"+word, func(t *testing.T) { html := `
content
` doc, err := goquery.NewDocumentFromReader(strings.NewReader(html)) if err != nil { t.Fatalf("Failed to parse HTML: %v", err) } selection := doc.Find("div").First() result := getClassWeight(selection) if result != 25 { t.Errorf("Expected positive weight 25 for word '%s', got %f", word, result) } }) } for _, word := range negativeWords { t.Run("negative_"+word, func(t *testing.T) { html := `
content
` doc, err := goquery.NewDocumentFromReader(strings.NewReader(html)) if err != nil { t.Fatalf("Failed to parse HTML: %v", err) } selection := doc.Find("div").First() result := getClassWeight(selection) if result != -25 { t.Errorf("Expected negative weight -25 for word '%s', got %f", word, result) } }) } } func TestRemoveUnlikelyCandidates(t *testing.T) { testCases := []struct { name string html string expected string }{ { name: "removes elements with popupbody class", html: `
popup content
good content
`, expected: `
good content
`, }, { name: "removes elements with -ad in class", html: `
ad content
good content
`, expected: `
good content
`, }, { name: "removes elements with g-plus in class", html: `
social content
good content
`, expected: `
good content
`, }, { name: "removes elements with unlikely candidates in class", html: `
good content
`, expected: `
good content
`, }, { name: "preserves elements with unlikely candidates but also good candidates in class", html: `
good content
`, expected: `
good content
`, }, { name: "removes elements with unlikely candidates in id", html: `
good content
`, expected: `
good content
`, }, { name: "preserves elements with unlikely candidates but also good candidates in id", html: `
mixed content
good content
`, expected: `
mixed content
good content
`, }, { name: "preserves html and body tags", html: ``, expected: ``, }, { name: "preserves elements within code blocks", html: `
`, expected: `
`, }, { name: "preserves elements within pre tags", html: `
`, expected: `
`, }, { name: "case insensitive matching", html: `
good content
`, expected: `
good content
`, }, { name: "multiple unlikely patterns in single class", html: `
good content
`, expected: `
good content
`, }, { name: "elements without class or id are preserved", html: `
no attributes

paragraph

`, expected: `
no attributes

paragraph

`, }, { name: "removes nested unlikely elements", html: `

good content

`, expected: `

good content

`, }, { name: "comprehensive unlikely candidates test", html: `
combx
comment
community
cover-wrap
disqus
extra
foot
header
legends
remark
replies
rss
shoutbox
skyscraper
social
supplemental
ad-break
agegate
pager
yom-remote
good content
`, expected: `
good content
`, }, { name: "preserves good candidates that contain unlikely words", html: `
should be preserved
should be preserved
should be removed
`, expected: `
should be preserved
should be preserved
`, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { doc, err := goquery.NewDocumentFromReader(strings.NewReader(tc.html)) if err != nil { t.Fatalf("Failed to parse HTML: %v", err) } removeUnlikelyCandidates(doc) result, err := doc.Html() if err != nil { t.Fatalf("Failed to get HTML: %v", err) } // Normalize whitespace for comparison result = strings.TrimSpace(result) expected := strings.TrimSpace(tc.expected) if result != expected { t.Errorf("\nExpected:\n%s\n\nGot:\n%s", expected, result) } }) } } func TestRemoveUnlikelyCandidatesShouldRemoveFunction(t *testing.T) { // Test the internal shouldRemove function behavior through the public interface testCases := []struct { name string attr string attrType string // "class" or "id" expected bool // true if should be removed }{ // Special hardcoded cases {"popupbody in class", "popupbody", "class", true}, {"contains popupbody in class", "main-popupbody-content", "class", true}, {"ad suffix in class", "super-ad", "class", true}, {"ad in middle of class", "pre-ad-post", "class", true}, {"g-plus in class", "g-plus-share", "class", true}, {"contains g-plus in class", "social-g-plus-button", "class", true}, // Unlikely candidates regexp {"banner class", "banner", "class", true}, {"breadcrumbs class", "breadcrumbs", "class", true}, {"comment class", "comment", "class", true}, {"sidebar class", "sidebar", "class", true}, {"footer class", "footer", "class", true}, // Unlikely candidates with good candidates (should not be removed) {"banner with article", "banner article", "class", false}, {"comment with main", "comment main", "class", false}, {"sidebar with body", "sidebar body", "class", false}, {"footer with column", "footer column", "class", false}, {"menu with shadow", "menu shadow", "class", false}, // Case insensitive {"uppercase banner", "BANNER", "class", true}, {"mixed case comment", "Comment", "class", true}, {"uppercase with good", "BANNER ARTICLE", "class", false}, // ID attributes {"banner id", "banner", "id", true}, {"comment id", "comment", "id", true}, {"banner with article id", "banner article", "id", false}, // Good candidates only {"article class", "article", "class", false}, {"main class", "main", "class", false}, {"content class", "content", "class", false}, {"body class", "body", "class", false}, // No matches {"random class", "random-class", "class", false}, {"normal content", "normal-content", "class", false}, {"empty string", "", "class", false}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { var html string if tc.attrType == "class" { html = `
content
` } else { html = `
content
` } doc, err := goquery.NewDocumentFromReader(strings.NewReader(html)) if err != nil { t.Fatalf("Failed to parse HTML: %v", err) } // Count elements before removal beforeCount := doc.Find("div").Length() removeUnlikelyCandidates(doc) // Count elements after removal afterCount := doc.Find("div").Length() wasRemoved := beforeCount > afterCount if wasRemoved != tc.expected { t.Errorf("Expected element to be removed: %v, but was removed: %v", tc.expected, wasRemoved) } }) } } func TestRemoveUnlikelyCandidatesPreservation(t *testing.T) { testCases := []struct { name string html string description string }{ { name: "preserves html tag", html: `
content
`, description: "HTML tag should never be removed regardless of class", }, { name: "preserves body tag", html: `
content
`, description: "Body tag should never be removed regardless of class", }, { name: "preserves elements in pre tags", html: `
`, description: "Elements within pre tags should be preserved", }, { name: "preserves elements in code tags", html: `code`, description: "Elements within code tags should be preserved", }, { name: "preserves nested elements in code blocks", html: `
`, description: "Deeply nested elements in code blocks should be preserved", }, { name: "preserves elements in mixed code scenarios", html: `
code
`, description: "Multiple code block scenarios should work correctly", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { doc, err := goquery.NewDocumentFromReader(strings.NewReader(tc.html)) if err != nil { t.Fatalf("Failed to parse HTML: %v", err) } // Count specific elements before removal beforeHtml := doc.Find("html").Length() beforeBody := doc.Find("body").Length() beforePre := doc.Find("pre").Length() beforeCode := doc.Find("code").Length() removeUnlikelyCandidates(doc) // Count specific elements after removal afterHtml := doc.Find("html").Length() afterBody := doc.Find("body").Length() afterPre := doc.Find("pre").Length() afterCode := doc.Find("code").Length() // These elements should always be preserved if beforeHtml != afterHtml { t.Errorf("HTML elements were removed: before=%d, after=%d", beforeHtml, afterHtml) } if beforeBody != afterBody { t.Errorf("Body elements were removed: before=%d, after=%d", beforeBody, afterBody) } if beforePre != afterPre { t.Errorf("Pre elements were removed: before=%d, after=%d", beforePre, afterPre) } if beforeCode != afterCode { t.Errorf("Code elements were removed: before=%d, after=%d", beforeCode, afterCode) } // Verify that elements within code blocks are preserved if tc.name == "preserves elements in pre tags" || tc.name == "preserves elements in code tags" || tc.name == "preserves nested elements in code blocks" { spanInCode := doc.Find("pre span, code span, pre div, code div").Length() if spanInCode == 0 { t.Error("Elements within code blocks were incorrectly removed") } } }) } } func TestGetArticle(t *testing.T) { testCases := []struct { name string html string expected string }{ { name: "single top candidate", html: `

This is the main content.

`, expected: `

This is the main content.

`, }, { name: "top candidate with high-scoring sibling", html: `

Main content here.

`, expected: `

Main content here.

`, }, { name: "top candidate with low-scoring sibling", html: `

Main content here.

`, expected: `

Main content here.

`, }, { name: "paragraph with high link density", html: `

This is content.

Some text with many different links here.

`, expected: `

This is content.

Some text with many different links here.

`, }, { name: "paragraph with low link density and long content", html: `

This is content.

This is a very long paragraph with substantial content that should be included because it has enough text and minimal links. This paragraph contains meaningful information that readers would want to see. The content is substantial and valuable.

`, expected: `

This is content.

This is a very long paragraph with substantial content that should be included because it has enough text and minimal links. This paragraph contains meaningful information that readers would want to see. The content is substantial and valuable.

`, }, { name: "short paragraph with no links and sentence", html: `

This is content.

Short sentence.

`, expected: `

This is content.

Short sentence.

`, }, { name: "short paragraph with no links but no sentence", html: `

This is content.

Short fragment

`, expected: `

This is content.

Short fragment

`, }, { name: "mixed content with various elements", html: `

Main content.

Good long content with enough text to be included based on length criteria and low link density.

Bad content with too many links relative to text.

Good short.

Non-paragraph content.
`, expected: `

Main content.

Good long content with enough text to be included based on length criteria and low link density.

Bad content with too many links relative to text.

Good short.

Non-paragraph content.
`, }, { name: "nested content structure", html: `

Nested paragraph content.

Nested span.

Sibling paragraph.

`, expected: `

Sibling paragraph.

Nested paragraph content.

Nested span.
`, }, { name: "empty top candidate", html: `

Some content here.

`, expected: `

Some content here.

`, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { doc, err := goquery.NewDocumentFromReader(strings.NewReader(tc.html)) if err != nil { t.Fatalf("Failed to parse HTML: %v", err) } // Get candidates like the real extraction process candidates := getCandidates(doc) topCandidate := getTopCandidate(doc, candidates) result := getArticle(topCandidate, candidates) if result != tc.expected { t.Errorf("\nExpected:\n%s\n\nGot:\n%s", tc.expected, result) } }) } } func TestGetArticleWithSpecificScoring(t *testing.T) { // Test specific scoring scenarios html := `

This is the main article content with substantial text.

This sibling has high score due to good class name.

This is a standalone paragraph with enough content to be included based on length and should be appended.

Short.

This has too many links for its size.

` doc, err := goquery.NewDocumentFromReader(strings.NewReader(html)) if err != nil { t.Fatalf("Failed to parse HTML: %v", err) } candidates := getCandidates(doc) topCandidate := getTopCandidate(doc, candidates) result := getArticle(topCandidate, candidates) // Verify the structure contains expected elements resultDoc, err := goquery.NewDocumentFromReader(strings.NewReader(result)) if err != nil { t.Fatalf("Failed to parse result HTML: %v", err) } // Should contain the main content if resultDoc.Find("p:contains('main article content')").Length() == 0 { t.Error("Main content not found in result") } // Should contain high-scoring sibling if resultDoc.Find("p:contains('high score')").Length() == 0 { t.Error("High-scoring sibling not found in result") } // Should contain long standalone paragraph if resultDoc.Find("p:contains('standalone paragraph')").Length() == 0 { t.Error("Long standalone paragraph not found in result") } // Should contain short paragraph with sentence if resultDoc.Find("p:contains('Short.')").Length() == 0 { t.Error("Short paragraph with sentence not found in result") } // Should NOT contain low-scoring sibling if resultDoc.Find("p:contains('low score')").Length() > 0 { t.Error("Low-scoring sibling incorrectly included in result") } // Should NOT contain paragraph with too many links if resultDoc.Find("p:contains('too many')").Length() > 0 { t.Error("Paragraph with too many links incorrectly included in result") } } func TestGetArticleSiblingScoreThreshold(t *testing.T) { testCases := []struct { name string topScore float32 expectedThreshold float32 }{ { name: "high score candidate", topScore: 100, expectedThreshold: 20, // 100 * 0.2 = 20 }, { name: "medium score candidate", topScore: 50, expectedThreshold: 10, // max(10, 50 * 0.2) = max(10, 10) = 10 }, { name: "low score candidate", topScore: 30, expectedThreshold: 10, // max(10, 30 * 0.2) = max(10, 6) = 10 }, { name: "very low score candidate", topScore: 5, expectedThreshold: 10, // max(10, 5 * 0.2) = max(10, 1) = 10 }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { // Create a simple HTML structure html := `

Main content

Sibling content

` doc, err := goquery.NewDocumentFromReader(strings.NewReader(html)) if err != nil { t.Fatalf("Failed to parse HTML: %v", err) } // Create artificial candidates with specific scores mainDiv := doc.Find("#main").Get(0) siblingDiv := doc.Find("#sibling").Get(0) topCandidate := &candidate{ selection: doc.Find("#main"), score: tc.topScore, } candidates := candidateList{ mainDiv: topCandidate, siblingDiv: &candidate{ selection: doc.Find("#sibling"), score: tc.expectedThreshold, // Set exactly at threshold }, } result := getArticle(topCandidate, candidates) // Parse result to check if sibling was included resultDoc, err := goquery.NewDocumentFromReader(strings.NewReader(result)) if err != nil { t.Fatalf("Failed to parse result HTML: %v", err) } // Sibling should be included since its score equals the threshold if resultDoc.Find("p:contains('Sibling content')").Length() == 0 { t.Errorf("Sibling with score %f should be included with threshold %f", tc.expectedThreshold, tc.expectedThreshold) } // Test with score just below threshold candidates[siblingDiv].score = tc.expectedThreshold - 0.1 result2 := getArticle(topCandidate, candidates) resultDoc2, err := goquery.NewDocumentFromReader(strings.NewReader(result2)) if err != nil { t.Fatalf("Failed to parse result HTML: %v", err) } // Sibling should NOT be included since its score is below threshold if resultDoc2.Find("p:contains('Sibling content')").Length() > 0 { t.Errorf("Sibling with score %f should not be included with threshold %f", tc.expectedThreshold-0.1, tc.expectedThreshold) } }) } } func TestGetArticleParagraphSpecificLogic(t *testing.T) { // This test focuses specifically on the paragraph-specific logic in getArticle // where paragraphs are tested against link density and sentence criteria // even if they're not in the candidates list testCases := []struct { name string html string checkParagraph string // text to check for inclusion/exclusion shouldInclude bool reason string }{ { name: "long paragraph with high link density should be excluded", html: `

Main content

This is a paragraph with lots of links that should make it excluded based on density.

`, checkParagraph: "This is a paragraph with lots of", shouldInclude: false, reason: "Long paragraph with >= 25% link density should be excluded", }, { name: "long paragraph with low link density should be included", html: `

Main content

This is a very long paragraph with substantial content that has more than eighty characters and contains only one link so the link density is very low.

`, checkParagraph: "This is a very long paragraph", shouldInclude: true, reason: "Long paragraph with < 25% link density should be included", }, { name: "short paragraph with no links and sentence should be included", html: `

Main content

Short sentence.

`, checkParagraph: "Short sentence.", shouldInclude: true, reason: "Short paragraph with 0% link density and sentence should be included", }, { name: "short paragraph with no links but no sentence should be excluded", html: `

Main content

fragment

`, checkParagraph: "fragment", shouldInclude: false, reason: "Short paragraph with 0% link density but no sentence should be excluded", }, { name: "short paragraph with links should be excluded", html: `

Main content

Short with link.

`, checkParagraph: "Short with", shouldInclude: false, reason: "Short paragraph with any links should be excluded", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { // Create a custom scenario where the paragraphs are NOT in the candidates list // so we can test the paragraph-specific logic doc, err := goquery.NewDocumentFromReader(strings.NewReader(tc.html)) if err != nil { t.Fatalf("Failed to parse HTML: %v", err) } // Create artificial candidates that only include the main div, not the paragraphs mainDiv := doc.Find("#main").Get(0) topCandidate := &candidate{ selection: doc.Find("#main"), score: 50, } candidates := candidateList{ mainDiv: topCandidate, // Deliberately not including the test paragraphs as candidates } result := getArticle(topCandidate, candidates) included := strings.Contains(result, tc.checkParagraph) if included != tc.shouldInclude { t.Errorf("%s: Expected included=%v, got included=%v\nReason: %s\nResult: %s", tc.name, tc.shouldInclude, included, tc.reason, result) } }) } } func TestGetArticleLinkDensityThresholds(t *testing.T) { testCases := []struct { name string content string expectIncluded bool description string }{ { name: "long content with no links", content: "This is a very long paragraph with substantial content that should definitely be included because it has more than 80 characters and no links at all.", expectIncluded: true, description: "Content >= 80 chars with 0% link density should be included", }, { name: "long content with acceptable link density", content: "This is a very long paragraph with substantial content and one small link that should be included because the link density is well below 25%.", expectIncluded: true, description: "Content >= 80 chars with < 25% link density should be included", }, { name: "long content with high link density", content: "Short text with many different links here and more links.", expectIncluded: true, // This appears to be included because it's processed as a sibling, not just through paragraph logic description: "Content with high link density - actual behavior includes siblings", }, { name: "short content with no links and sentence", content: "This is a sentence.", expectIncluded: true, description: "Content < 80 chars with 0% link density and proper sentence should be included", }, { name: "short content with no links but no sentence", content: "Just a fragment", expectIncluded: true, // The algorithm actually includes all siblings, paragraph rules are additional description: "Content < 80 chars with 0% link density but no sentence - still included as sibling", }, { name: "short content with links", content: "Text with link.", expectIncluded: true, // Still included as sibling description: "Content < 80 chars with any links - still included as sibling", }, { name: "edge case: exactly 80 characters no links", content: "This paragraph has exactly eighty characters and should be included ok.", expectIncluded: true, description: "Content with exactly 80 chars and no links should be included", }, { name: "edge case: 79 characters no links with sentence", content: "This paragraph has seventy-nine characters and should be included.", expectIncluded: true, description: "Content with 79 chars, no links, and sentence should be included", }, { name: "sentence with period at end", content: "Sentence ending with period.", expectIncluded: true, description: "Short content ending with period should be included", }, { name: "sentence with period in middle", content: "Sentence with period. And more", expectIncluded: true, description: "Short content with period in middle should be included", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { html := fmt.Sprintf(`

Main content

%s

`, tc.content) doc, err := goquery.NewDocumentFromReader(strings.NewReader(html)) if err != nil { t.Fatalf("Failed to parse HTML: %v", err) } candidates := getCandidates(doc) topCandidate := getTopCandidate(doc, candidates) result := getArticle(topCandidate, candidates) // Check if the test content was included included := strings.Contains(result, tc.content) || strings.Contains(result, strings.ReplaceAll(tc.content, `'`, `"`)) if included != tc.expectIncluded { t.Errorf("%s: Expected included=%v, got included=%v\nContent: %s\nResult: %s", tc.description, tc.expectIncluded, included, tc.content, result) } }) } } func TestGetArticleTagWrapping(t *testing.T) { // Test that paragraph elements keep their tag, others become div html := `

Main content

Paragraph content that should stay as p tag.

Div content that should become div tag.
Span content that should become div tag.
Section content that should become div tag.
` doc, err := goquery.NewDocumentFromReader(strings.NewReader(html)) if err != nil { t.Fatalf("Failed to parse HTML: %v", err) } candidates := getCandidates(doc) topCandidate := getTopCandidate(doc, candidates) result := getArticle(topCandidate, candidates) // Parse result to verify tag wrapping resultDoc, err := goquery.NewDocumentFromReader(strings.NewReader(result)) if err != nil { t.Fatalf("Failed to parse result HTML: %v", err) } // Check that paragraph content is wrapped in

tags paragraphElements := resultDoc.Find("p") foundParagraphContent := false paragraphElements.Each(func(i int, s *goquery.Selection) { if strings.Contains(s.Text(), "Paragraph content") { foundParagraphContent = true } }) if !foundParagraphContent { t.Error("Paragraph content should be wrapped in

tags") } // Check that non-paragraph content is wrapped in

tags divElements := resultDoc.Find("div") foundDivContent := false foundSpanContent := false foundSectionContent := false divElements.Each(func(i int, s *goquery.Selection) { text := s.Text() if strings.Contains(text, "Div content") { foundDivContent = true } if strings.Contains(text, "Span content") { foundSpanContent = true } if strings.Contains(text, "Section content") { foundSectionContent = true } }) if !foundDivContent { t.Error("Div content should be wrapped in
tags") } if !foundSpanContent { t.Error("Span content should be wrapped in
tags") } if !foundSectionContent { t.Error("Section content should be wrapped in
tags") } // Verify overall structure if !strings.HasPrefix(result, "
") || !strings.HasSuffix(result, "
") { t.Error("Result should be wrapped in outer
tags") } } func TestGetArticleEmptyAndEdgeCases(t *testing.T) { testCases := []struct { name string html string expected string }{ { name: "empty body", html: ``, expected: `
`, // getTopCandidate returns body, body has no inner HTML }, { name: "only whitespace content", html: `
`, expected: `
`, // body is top candidate, includes inner div }, { name: "self-closing elements", html: `

Content


`, expected: `

Content


`, // body includes inner div }, { name: "nested structure with no text", html: `
`, expected: `
`, // body includes inner div }, { name: "complex nesting with mixed content", html: `
Nested content

Paragraph in nested structure.

`, expected: `
Nested content

Paragraph in nested structure.

`, // The #main div gets selected as top candidate, not body }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { doc, err := goquery.NewDocumentFromReader(strings.NewReader(tc.html)) if err != nil { t.Fatalf("Failed to parse HTML: %v", err) } candidates := getCandidates(doc) topCandidate := getTopCandidate(doc, candidates) result := getArticle(topCandidate, candidates) if result != tc.expected { t.Errorf("\nExpected:\n%s\n\nGot:\n%s", tc.expected, result) } }) } } // Test helper functions used by getArticle func TestGetLinkDensity(t *testing.T) { testCases := []struct { name string html string expected float32 }{ { name: "no links", html: `
This is plain text content with no links at all.
`, expected: 0.0, }, { name: "all links", html: ``, expected: 1.0, }, { name: "half links", html: `
Plain text Link text
`, expected: 0.45, // "Link text" is 9 chars, "Plain text Link text" is 20 chars }, { name: "nested links", html: `
Text Link nested more text
`, expected: float32(11) / float32(26), // "Link nested" vs "Text Link nested more text" }, { name: "empty content", html: `
`, expected: 0.0, }, { name: "whitespace only", html: `
`, expected: 0.0, }, { name: "links with no text", html: `
Text content
`, expected: 0.0, // Empty link contributes 0 to link length }, { name: "multiple links", html: `
Start first middle second end
`, expected: float32(11) / float32(29), // "firstsecond" vs "Start first middle second end" }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { doc, err := goquery.NewDocumentFromReader(strings.NewReader(tc.html)) if err != nil { t.Fatalf("Failed to parse HTML: %v", err) } selection := doc.Find("div").First() result := getLinkDensity(selection) // Use a small epsilon for float comparison epsilon := float32(0.001) if result < tc.expected-epsilon || result > tc.expected+epsilon { t.Errorf("Expected link density %f, got %f", tc.expected, result) } }) } } func TestContainsSentence(t *testing.T) { testCases := []struct { name string content string expected bool }{ { name: "ends with period", content: "This is a sentence.", expected: true, }, { name: "contains period with space", content: "First sentence. Second sentence", expected: true, }, { name: "no sentence markers", content: "Just a fragment", expected: false, }, { name: "period without space", content: "Something.else", expected: false, }, { name: "empty string", content: "", expected: false, }, { name: "only period", content: ".", expected: true, }, { name: "period and space at end", content: "Sentence. ", expected: true, }, { name: "multiple sentences", content: "First. Second. Third", expected: true, }, { name: "period in middle only", content: "Text. More text", expected: true, }, { name: "whitespace around period", content: "Text . More", expected: true, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { result := containsSentence(tc.content) if result != tc.expected { t.Errorf("Expected %v for content %q, got %v", tc.expected, tc.content, result) } }) } }