1
0
Fork 0
mirror of https://github.com/miniflux/v2.git synced 2025-09-15 18:57:04 +00:00

Add FeedIcon API call and update dependencies

This commit is contained in:
Frédéric Guillot 2017-12-16 11:25:18 -08:00
parent 231ebf2daa
commit 27196589fb
262 changed files with 83830 additions and 30061 deletions

View file

@ -16,7 +16,7 @@ func TestLinking(t *testing.T) {
compact := getSize(t, `display.English.Languages().Name(language.English)`)
if d := base - compact; d < 1.5*1024*1024 {
t.Errorf("size(base)-size(compact) was %d; want > 1.5MB", base, compact)
t.Errorf("size(base) - size(compact) = %d - %d = was %d; want > 1.5MB", base, compact, d)
}
}

View file

@ -371,9 +371,6 @@ func TestTag(t *testing.T) {
{"sr-Latn", "sr-Latn-ME", "srpskohrvatski (Crna Gora)"},
// Double script and region
{"nl", "en-Cyrl-BE", "Engels (Cyrillisch, België)"},
// Canonical equivalents.
{"ro", "ro-MD", "moldovenească"},
{"ro", "mo", "moldovenească"},
}
for _, tt := range tests {
t.Run(tt.dict+"/"+tt.tag, func(t *testing.T) {
@ -445,9 +442,6 @@ func TestLanguage(t *testing.T) {
{"en", "af-NA", "Afrikaans"},
{"en", "zu-BR", "Zulu"},
{"agq", "zh-Hant", "|[language: zh-Hant]"},
// Canonical equivalents.
{"ro", "ro-MD", "moldovenească"},
{"ro", "mo", "moldovenească"},
{"en", "sh", "Serbo-Croatian"},
{"en", "sr-Latn", "Serbo-Croatian"},
{"en", "sr", "Serbian"},
@ -540,8 +534,6 @@ func TestRegion(t *testing.T) {
{"nl", "NL", "Nederland"},
{"en", "US", "United States"},
{"en", "ZZ", "Unknown Region"},
{"en", "UM", "U.S. Outlying Islands"},
{"en-GB", "UM", "U.S. Outlying Islands"},
{"en-GB", "NL", "Netherlands"},
// Canonical equivalents
{"en", "UK", "United Kingdom"},
@ -628,9 +620,6 @@ func TestSelf(t *testing.T) {
{"sr-Latn-ME", "srpskohrvatski"},
{"sr-Cyrl-ME", "српски"},
{"sr-NL", "српски"},
// Canonical equivalents.
{"ro-MD", "moldovenească"},
{"mo", "moldovenească"},
// NOTE: kk is defined, but in Cyrillic script. For China, Arab is the
// dominant script. We do not have data for kk-Arab and we chose to not
// fall back in such cases.
@ -644,6 +633,27 @@ func TestSelf(t *testing.T) {
}
}
func TestEquivalence(t *testing.T) {
testCases := []struct {
desc string
namer Namer
}{
{"Self", Self},
{"Tags", Tags(language.Romanian)},
{"Languages", Languages(language.Romanian)},
{"Scripts", Scripts(language.Romanian)},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
ro := tc.namer.Name(language.Raw.MustParse("ro-MD"))
mo := tc.namer.Name(language.Raw.MustParse("mo"))
if ro != mo {
t.Errorf("%q != %q", ro, mo)
}
})
}
}
func TestDictionaryLang(t *testing.T) {
tests := []struct {
d *Dictionary
@ -693,7 +703,6 @@ func TestDictionaryScript(t *testing.T) {
name string
}{
{English, "Cyrl", "Cyrillic"},
{Portuguese, "Gujr", "gujerati"},
{EuropeanPortuguese, "Gujr", "guzerate"},
}
for i, test := range tests {

File diff suppressed because it is too large Load diff

View file

@ -1497,8 +1497,14 @@ func (b *builder) writeMatchData() {
if desired == supported && desired == "*_*_*" {
continue
}
if desired != supported { // (Weird but correct.)
log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
if desired != supported {
// This is now supported by CLDR, but only one case, which
// should already be covered by paradigm locales. For instance,
// test case "und, en, en-GU, en-IN, en-GB ; en-ZA ; en-GB" in
// testdata/CLDRLocaleMatcherTest.txt tests this.
if supported != "en_*_GB" {
log.Fatalf("not supported: desired=%q; supported=%q", desired, supported)
}
continue
}
ri := regionIntelligibility{

View file

@ -49,7 +49,7 @@ func main() {
defer func() {
buf := &bytes.Buffer{}
if _, err = w.WriteGo(buf, "language"); err != nil {
if _, err = w.WriteGo(buf, "language", ""); err != nil {
log.Fatalf("Error formatting file index.go: %v", err)
}

File diff suppressed because it is too large Load diff

View file

@ -299,6 +299,26 @@ func (t Tag) String() string {
return string(buf[:t.genCoreBytes(buf[:])])
}
// MarshalText implements encoding.TextMarshaler.
func (t Tag) MarshalText() (text []byte, err error) {
if t.str != "" {
text = append(text, t.str...)
} else if t.script == 0 && t.region == 0 {
text = append(text, t.lang.String()...)
} else {
buf := [maxCoreSize]byte{}
text = buf[:t.genCoreBytes(buf[:])]
}
return text, nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (t *Tag) UnmarshalText(text []byte) error {
tag, err := Raw.Parse(string(text))
*t = tag
return err
}
// Base returns the base language of the language tag. If the base language is
// unspecified, an attempt will be made to infer it from the context.
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.

View file

@ -91,11 +91,11 @@ func TestCompactIndex(t *testing.T) {
{"ca-ES-valencia-u-co-phonebk", 1, true},
{"ca-ES-valencia-u-co-phonebk-va-posix", 0, false},
{"x-klingon", 0, false},
{"en-US", 229, true},
{"en-US", 232, true},
{"en-US-u-va-posix", 2, true},
{"en", 133, true},
{"en-u-co-phonebk", 133, true},
{"en-001", 134, true},
{"en", 136, true},
{"en-u-co-phonebk", 136, true},
{"en-001", 137, true},
{"sh", 0, false}, // We don't normalize.
}
for _, tt := range tests {
@ -106,6 +106,39 @@ func TestCompactIndex(t *testing.T) {
}
}
func TestMarshal(t *testing.T) {
testCases := []string{
// TODO: these values will change with each CLDR update. This issue
// will be solved if we decide to fix the indexes.
"und",
"ca-ES-valencia",
"ca-ES-valencia-u-va-posix",
"ca-ES-valencia-u-co-phonebk",
"ca-ES-valencia-u-co-phonebk-va-posix",
"x-klingon",
"en-US",
"en-US-u-va-posix",
"en",
"en-u-co-phonebk",
"en-001",
"sh",
}
for _, tc := range testCases {
var tag Tag
err := tag.UnmarshalText([]byte(tc))
if err != nil {
t.Errorf("UnmarshalText(%q): unexpected error: %v", tc, err)
}
b, err := tag.MarshalText()
if err != nil {
t.Errorf("MarshalText(%q): unexpected error: %v", tc, err)
}
if got := string(b); got != tc {
t.Errorf("%s: got %q; want %q", tc, got, tc)
}
}
}
func TestBase(t *testing.T) {
tests := []struct {
loc, lang string

File diff suppressed because it is too large Load diff