1
0
Fork 0
mirror of https://github.com/miniflux/v2.git synced 2025-07-27 17:28:38 +00:00

First commit

This commit is contained in:
Frédéric Guillot 2017-11-19 21:10:04 -08:00
commit 8ffb773f43
2121 changed files with 1118910 additions and 0 deletions

15
vendor/github.com/tdewolff/parse/buffer/buffer.go generated vendored Normal file
View file

@ -0,0 +1,15 @@
/*
Package buffer contains buffer and wrapper types for byte slices. It is useful for writing lexers or other high-performance byte slice handling.
The `Reader` and `Writer` types implement the `io.Reader` and `io.Writer` respectively and provide a thinner and faster interface than `bytes.Buffer`.
The `Lexer` type is useful for building lexers because it keeps track of the start and end position of a byte selection, and shifts the bytes whenever a valid token is found.
The `StreamLexer` does the same, but keeps a buffer pool so that it reads a limited amount at a time, allowing to parse from streaming sources.
*/
package buffer // import "github.com/tdewolff/parse/buffer"
// defaultBufSize specifies the default initial length of internal buffers.
var defaultBufSize = 4096
// MinBuf specifies the default initial length of internal buffers.
// Solely here to support old versions of parse.
var MinBuf = defaultBufSize

153
vendor/github.com/tdewolff/parse/buffer/lexer.go generated vendored Normal file
View file

@ -0,0 +1,153 @@
package buffer // import "github.com/tdewolff/parse/buffer"
import (
"io"
"io/ioutil"
)
var nullBuffer = []byte{0}
// Lexer is a buffered reader that allows peeking forward and shifting, taking an io.Reader.
// It keeps data in-memory until Free, taking a byte length, is called to move beyond the data.
type Lexer struct {
buf []byte
pos int // index in buf
start int // index in buf
err error
restore func()
}
// NewLexerBytes returns a new Lexer for a given io.Reader, and uses ioutil.ReadAll to read it into a byte slice.
// If the io.Reader implements Bytes, that is used instead.
// It will append a NULL at the end of the buffer.
func NewLexer(r io.Reader) *Lexer {
var b []byte
if r != nil {
if buffer, ok := r.(interface {
Bytes() []byte
}); ok {
b = buffer.Bytes()
} else {
var err error
b, err = ioutil.ReadAll(r)
if err != nil {
return &Lexer{
buf: []byte{0},
err: err,
}
}
}
}
return NewLexerBytes(b)
}
// NewLexerBytes returns a new Lexer for a given byte slice, and appends NULL at the end.
// To avoid reallocation, make sure the capacity has room for one more byte.
func NewLexerBytes(b []byte) *Lexer {
z := &Lexer{
buf: b,
}
n := len(b)
if n == 0 {
z.buf = nullBuffer
} else if b[n-1] != 0 {
// Append NULL to buffer, but try to avoid reallocation
if cap(b) > n {
// Overwrite next byte but restore when done
b = b[:n+1]
c := b[n]
b[n] = 0
z.buf = b
z.restore = func() {
b[n] = c
}
} else {
z.buf = append(b, 0)
}
}
return z
}
// Restore restores the replaced byte past the end of the buffer by NULL.
func (z *Lexer) Restore() {
if z.restore != nil {
z.restore()
z.restore = nil
}
}
// Err returns the error returned from io.Reader or io.EOF when the end has been reached.
func (z *Lexer) Err() error {
if z.err != nil {
return z.err
} else if z.pos >= len(z.buf)-1 {
return io.EOF
}
return nil
}
// Peek returns the ith byte relative to the end position.
// Peek returns 0 when an error has occurred, Err returns the error.
func (z *Lexer) Peek(pos int) byte {
pos += z.pos
return z.buf[pos]
}
// PeekRune returns the rune and rune length of the ith byte relative to the end position.
func (z *Lexer) PeekRune(pos int) (rune, int) {
// from unicode/utf8
c := z.Peek(pos)
if c < 0xC0 || z.Peek(pos+1) == 0 {
return rune(c), 1
} else if c < 0xE0 || z.Peek(pos+2) == 0 {
return rune(c&0x1F)<<6 | rune(z.Peek(pos+1)&0x3F), 2
} else if c < 0xF0 || z.Peek(pos+3) == 0 {
return rune(c&0x0F)<<12 | rune(z.Peek(pos+1)&0x3F)<<6 | rune(z.Peek(pos+2)&0x3F), 3
}
return rune(c&0x07)<<18 | rune(z.Peek(pos+1)&0x3F)<<12 | rune(z.Peek(pos+2)&0x3F)<<6 | rune(z.Peek(pos+3)&0x3F), 4
}
// Move advances the position.
func (z *Lexer) Move(n int) {
z.pos += n
}
// Pos returns a mark to which can be rewinded.
func (z *Lexer) Pos() int {
return z.pos - z.start
}
// Rewind rewinds the position to the given position.
func (z *Lexer) Rewind(pos int) {
z.pos = z.start + pos
}
// Lexeme returns the bytes of the current selection.
func (z *Lexer) Lexeme() []byte {
return z.buf[z.start:z.pos]
}
// Skip collapses the position to the end of the selection.
func (z *Lexer) Skip() {
z.start = z.pos
}
// Shift returns the bytes of the current selection and collapses the position to the end of the selection.
func (z *Lexer) Shift() []byte {
b := z.buf[z.start:z.pos]
z.start = z.pos
return b
}
// Offset returns the character position in the buffer.
func (z *Lexer) Offset() int {
return z.pos
}
// Bytes returns the underlying buffer.
func (z *Lexer) Bytes() []byte {
return z.buf
}

91
vendor/github.com/tdewolff/parse/buffer/lexer_test.go generated vendored Normal file
View file

@ -0,0 +1,91 @@
package buffer // import "github.com/tdewolff/parse/buffer"
import (
"bytes"
"io"
"testing"
"github.com/tdewolff/test"
)
func TestLexer(t *testing.T) {
s := `Lorem ipsum dolor sit amet, consectetur adipiscing elit.`
z := NewLexer(bytes.NewBufferString(s))
test.T(t, z.err, nil, "buffer has no error")
test.T(t, z.Err(), nil, "buffer is at EOF but must not return EOF until we reach that")
test.That(t, z.Pos() == 0, "buffer must start at position 0")
test.That(t, z.Peek(0) == 'L', "first character must be 'L'")
test.That(t, z.Peek(1) == 'o', "second character must be 'o'")
z.Move(1)
test.That(t, z.Peek(0) == 'o', "must be 'o' at position 1")
test.That(t, z.Peek(1) == 'r', "must be 'r' at position 1")
z.Rewind(6)
test.That(t, z.Peek(0) == 'i', "must be 'i' at position 6")
test.That(t, z.Peek(1) == 'p', "must be 'p' at position 7")
test.Bytes(t, z.Lexeme(), []byte("Lorem "), "buffered string must now read 'Lorem ' when at position 6")
test.Bytes(t, z.Shift(), []byte("Lorem "), "shift must return the buffered string")
test.That(t, z.Pos() == 0, "after shifting position must be 0")
test.That(t, z.Peek(0) == 'i', "must be 'i' at position 0 after shifting")
test.That(t, z.Peek(1) == 'p', "must be 'p' at position 1 after shifting")
test.T(t, z.Err(), nil, "error must be nil at this point")
z.Move(len(s) - len("Lorem ") - 1)
test.T(t, z.Err(), nil, "error must be nil just before the end of the buffer")
z.Skip()
test.That(t, z.Pos() == 0, "after skipping position must be 0")
z.Move(1)
test.T(t, z.Err(), io.EOF, "error must be EOF when past the buffer")
z.Move(-1)
test.T(t, z.Err(), nil, "error must be nil just before the end of the buffer, even when it has been past the buffer")
}
func TestLexerRunes(t *testing.T) {
z := NewLexer(bytes.NewBufferString("aæ†\U00100000"))
r, n := z.PeekRune(0)
test.That(t, n == 1, "first character must be length 1")
test.That(t, r == 'a', "first character must be rune 'a'")
r, n = z.PeekRune(1)
test.That(t, n == 2, "second character must be length 2")
test.That(t, r == 'æ', "second character must be rune 'æ'")
r, n = z.PeekRune(3)
test.That(t, n == 3, "fourth character must be length 3")
test.That(t, r == '†', "fourth character must be rune '†'")
r, n = z.PeekRune(6)
test.That(t, n == 4, "seventh character must be length 4")
test.That(t, r == '\U00100000', "seventh character must be rune '\U00100000'")
}
func TestLexerBadRune(t *testing.T) {
z := NewLexer(bytes.NewBufferString("\xF0")) // expect four byte rune
r, n := z.PeekRune(0)
test.T(t, n, 1, "length")
test.T(t, r, rune(0xF0), "rune")
}
func TestLexerZeroLen(t *testing.T) {
z := NewLexer(test.NewPlainReader(bytes.NewBufferString("")))
test.That(t, z.Peek(0) == 0, "first character must yield error")
}
func TestLexerEmptyReader(t *testing.T) {
z := NewLexer(test.NewEmptyReader())
test.That(t, z.Peek(0) == 0, "first character must yield error")
test.T(t, z.Err(), io.EOF, "error must be EOF")
test.That(t, z.Peek(0) == 0, "second peek must also yield error")
}
func TestLexerErrorReader(t *testing.T) {
z := NewLexer(test.NewErrorReader(0))
test.That(t, z.Peek(0) == 0, "first character must yield error")
test.T(t, z.Err(), test.ErrPlain, "error must be ErrPlain")
test.That(t, z.Peek(0) == 0, "second peek must also yield error")
}
func TestLexerBytes(t *testing.T) {
b := []byte{'t', 'e', 's', 't'}
z := NewLexerBytes(b)
test.That(t, z.Peek(4) == 0, "fifth character must yield NULL")
}

44
vendor/github.com/tdewolff/parse/buffer/reader.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
package buffer // import "github.com/tdewolff/parse/buffer"
import "io"
// Reader implements an io.Reader over a byte slice.
type Reader struct {
buf []byte
pos int
}
// NewReader returns a new Reader for a given byte slice.
func NewReader(buf []byte) *Reader {
return &Reader{
buf: buf,
}
}
// Read reads bytes into the given byte slice and returns the number of bytes read and an error if occurred.
func (r *Reader) Read(b []byte) (n int, err error) {
if len(b) == 0 {
return 0, nil
}
if r.pos >= len(r.buf) {
return 0, io.EOF
}
n = copy(b, r.buf[r.pos:])
r.pos += n
return
}
// Bytes returns the underlying byte slice.
func (r *Reader) Bytes() []byte {
return r.buf
}
// Reset resets the position of the read pointer to the beginning of the underlying byte slice.
func (r *Reader) Reset() {
r.pos = 0
}
// Len returns the length of the buffer.
func (r *Reader) Len() int {
return len(r.buf)
}

49
vendor/github.com/tdewolff/parse/buffer/reader_test.go generated vendored Normal file
View file

@ -0,0 +1,49 @@
package buffer // import "github.com/tdewolff/parse/buffer"
import (
"bytes"
"fmt"
"io"
"testing"
"github.com/tdewolff/test"
)
func TestReader(t *testing.T) {
s := []byte("abcde")
r := NewReader(s)
test.Bytes(t, r.Bytes(), s, "reader must return bytes stored")
buf := make([]byte, 3)
n, err := r.Read(buf)
test.T(t, err, nil, "error")
test.That(t, n == 3, "first read must read 3 characters")
test.Bytes(t, buf, []byte("abc"), "first read must match 'abc'")
n, err = r.Read(buf)
test.T(t, err, nil, "error")
test.That(t, n == 2, "second read must read 2 characters")
test.Bytes(t, buf[:n], []byte("de"), "second read must match 'de'")
n, err = r.Read(buf)
test.T(t, err, io.EOF, "error")
test.That(t, n == 0, "third read must read 0 characters")
n, err = r.Read(nil)
test.T(t, err, nil, "error")
test.That(t, n == 0, "read to nil buffer must return 0 characters read")
r.Reset()
n, err = r.Read(buf)
test.T(t, err, nil, "error")
test.That(t, n == 3, "read after reset must read 3 characters")
test.Bytes(t, buf, []byte("abc"), "read after reset must match 'abc'")
}
func ExampleNewReader() {
r := NewReader([]byte("Lorem ipsum"))
w := &bytes.Buffer{}
io.Copy(w, r)
fmt.Println(w.String())
// Output: Lorem ipsum
}

223
vendor/github.com/tdewolff/parse/buffer/streamlexer.go generated vendored Normal file
View file

@ -0,0 +1,223 @@
package buffer // import "github.com/tdewolff/parse/buffer"
import (
"io"
)
type block struct {
buf []byte
next int // index in pool plus one
active bool
}
type bufferPool struct {
pool []block
head int // index in pool plus one
tail int // index in pool plus one
pos int // byte pos in tail
}
func (z *bufferPool) swap(oldBuf []byte, size int) []byte {
// find new buffer that can be reused
swap := -1
for i := 0; i < len(z.pool); i++ {
if !z.pool[i].active && size <= cap(z.pool[i].buf) {
swap = i
break
}
}
if swap == -1 { // no free buffer found for reuse
if z.tail == 0 && z.pos >= len(oldBuf) && size <= cap(oldBuf) { // but we can reuse the current buffer!
z.pos -= len(oldBuf)
return oldBuf[:0]
}
// allocate new
z.pool = append(z.pool, block{make([]byte, 0, size), 0, true})
swap = len(z.pool) - 1
}
newBuf := z.pool[swap].buf
// put current buffer into pool
z.pool[swap] = block{oldBuf, 0, true}
if z.head != 0 {
z.pool[z.head-1].next = swap + 1
}
z.head = swap + 1
if z.tail == 0 {
z.tail = swap + 1
}
return newBuf[:0]
}
func (z *bufferPool) free(n int) {
z.pos += n
// move the tail over to next buffers
for z.tail != 0 && z.pos >= len(z.pool[z.tail-1].buf) {
z.pos -= len(z.pool[z.tail-1].buf)
newTail := z.pool[z.tail-1].next
z.pool[z.tail-1].active = false // after this, any thread may pick up the inactive buffer, so it can't be used anymore
z.tail = newTail
}
if z.tail == 0 {
z.head = 0
}
}
// StreamLexer is a buffered reader that allows peeking forward and shifting, taking an io.Reader.
// It keeps data in-memory until Free, taking a byte length, is called to move beyond the data.
type StreamLexer struct {
r io.Reader
err error
pool bufferPool
buf []byte
start int // index in buf
pos int // index in buf
prevStart int
free int
}
// NewStreamLexer returns a new StreamLexer for a given io.Reader with a 4kB estimated buffer size.
// If the io.Reader implements Bytes, that buffer is used instead.
func NewStreamLexer(r io.Reader) *StreamLexer {
return NewStreamLexerSize(r, defaultBufSize)
}
// NewStreamLexerSize returns a new StreamLexer for a given io.Reader and estimated required buffer size.
// If the io.Reader implements Bytes, that buffer is used instead.
func NewStreamLexerSize(r io.Reader, size int) *StreamLexer {
// if reader has the bytes in memory already, use that instead
if buffer, ok := r.(interface {
Bytes() []byte
}); ok {
return &StreamLexer{
err: io.EOF,
buf: buffer.Bytes(),
}
}
return &StreamLexer{
r: r,
buf: make([]byte, 0, size),
}
}
func (z *StreamLexer) read(pos int) byte {
if z.err != nil {
return 0
}
// free unused bytes
z.pool.free(z.free)
z.free = 0
// get new buffer
c := cap(z.buf)
p := pos - z.start + 1
if 2*p > c { // if the token is larger than half the buffer, increase buffer size
c = 2*c + p
}
d := len(z.buf) - z.start
buf := z.pool.swap(z.buf[:z.start], c)
copy(buf[:d], z.buf[z.start:]) // copy the left-overs (unfinished token) from the old buffer
// read in new data for the rest of the buffer
var n int
for pos-z.start >= d && z.err == nil {
n, z.err = z.r.Read(buf[d:cap(buf)])
d += n
}
pos -= z.start
z.pos -= z.start
z.start, z.buf = 0, buf[:d]
if pos >= d {
return 0
}
return z.buf[pos]
}
// Err returns the error returned from io.Reader. It may still return valid bytes for a while though.
func (z *StreamLexer) Err() error {
if z.err == io.EOF && z.pos < len(z.buf) {
return nil
}
return z.err
}
// Free frees up bytes of length n from previously shifted tokens.
// Each call to Shift should at one point be followed by a call to Free with a length returned by ShiftLen.
func (z *StreamLexer) Free(n int) {
z.free += n
}
// Peek returns the ith byte relative to the end position and possibly does an allocation.
// Peek returns zero when an error has occurred, Err returns the error.
// TODO: inline function
func (z *StreamLexer) Peek(pos int) byte {
pos += z.pos
if uint(pos) < uint(len(z.buf)) { // uint for BCE
return z.buf[pos]
}
return z.read(pos)
}
// PeekRune returns the rune and rune length of the ith byte relative to the end position.
func (z *StreamLexer) PeekRune(pos int) (rune, int) {
// from unicode/utf8
c := z.Peek(pos)
if c < 0xC0 {
return rune(c), 1
} else if c < 0xE0 {
return rune(c&0x1F)<<6 | rune(z.Peek(pos+1)&0x3F), 2
} else if c < 0xF0 {
return rune(c&0x0F)<<12 | rune(z.Peek(pos+1)&0x3F)<<6 | rune(z.Peek(pos+2)&0x3F), 3
}
return rune(c&0x07)<<18 | rune(z.Peek(pos+1)&0x3F)<<12 | rune(z.Peek(pos+2)&0x3F)<<6 | rune(z.Peek(pos+3)&0x3F), 4
}
// Move advances the position.
func (z *StreamLexer) Move(n int) {
z.pos += n
}
// Pos returns a mark to which can be rewinded.
func (z *StreamLexer) Pos() int {
return z.pos - z.start
}
// Rewind rewinds the position to the given position.
func (z *StreamLexer) Rewind(pos int) {
z.pos = z.start + pos
}
// Lexeme returns the bytes of the current selection.
func (z *StreamLexer) Lexeme() []byte {
return z.buf[z.start:z.pos]
}
// Skip collapses the position to the end of the selection.
func (z *StreamLexer) Skip() {
z.start = z.pos
}
// Shift returns the bytes of the current selection and collapses the position to the end of the selection.
// It also returns the number of bytes we moved since the last call to Shift. This can be used in calls to Free.
func (z *StreamLexer) Shift() []byte {
if z.pos > len(z.buf) { // make sure we peeked at least as much as we shift
z.read(z.pos - 1)
}
b := z.buf[z.start:z.pos]
z.start = z.pos
return b
}
// ShiftLen returns the number of bytes moved since the last call to ShiftLen. This can be used in calls to Free because it takes into account multiple Shifts or Skips.
func (z *StreamLexer) ShiftLen() int {
n := z.start - z.prevStart
z.prevStart = z.start
return n
}

View file

@ -0,0 +1,148 @@
package buffer // import "github.com/tdewolff/parse/buffer"
import (
"bytes"
"io"
"testing"
"github.com/tdewolff/test"
)
func TestBufferPool(t *testing.T) {
z := &bufferPool{}
lorem := []byte("Lorem ipsum")
dolor := []byte("dolor sit amet")
consectetur := []byte("consectetur adipiscing elit")
// set lorem as first buffer and get new dolor buffer
b := z.swap(lorem, len(dolor))
test.That(t, len(b) == 0)
test.That(t, cap(b) == len(dolor))
b = append(b, dolor...)
// free first buffer so it will be reused
z.free(len(lorem))
b = z.swap(b, len(lorem))
b = b[:len(lorem)]
test.Bytes(t, b, lorem)
b = z.swap(b, len(consectetur))
b = append(b, consectetur...)
// free in advance to reuse the same buffer
z.free(len(dolor) + len(lorem) + len(consectetur))
test.That(t, z.head == 0)
b = z.swap(b, len(consectetur))
b = b[:len(consectetur)]
test.Bytes(t, b, consectetur)
// free in advance but request larger buffer
z.free(len(consectetur))
b = z.swap(b, len(consectetur)+1)
b = append(b, consectetur...)
b = append(b, '.')
test.That(t, cap(b) == len(consectetur)+1)
}
func TestStreamLexer(t *testing.T) {
s := `Lorem ipsum dolor sit amet, consectetur adipiscing elit.`
z := NewStreamLexer(bytes.NewBufferString(s))
test.T(t, z.err, io.EOF, "buffer must be fully in memory")
test.T(t, z.Err(), nil, "buffer is at EOF but must not return EOF until we reach that")
test.That(t, z.Pos() == 0, "buffer must start at position 0")
test.That(t, z.Peek(0) == 'L', "first character must be 'L'")
test.That(t, z.Peek(1) == 'o', "second character must be 'o'")
z.Move(1)
test.That(t, z.Peek(0) == 'o', "must be 'o' at position 1")
test.That(t, z.Peek(1) == 'r', "must be 'r' at position 1")
z.Rewind(6)
test.That(t, z.Peek(0) == 'i', "must be 'i' at position 6")
test.That(t, z.Peek(1) == 'p', "must be 'p' at position 7")
test.Bytes(t, z.Lexeme(), []byte("Lorem "), "buffered string must now read 'Lorem ' when at position 6")
test.Bytes(t, z.Shift(), []byte("Lorem "), "shift must return the buffered string")
test.That(t, z.ShiftLen() == len("Lorem "), "shifted length must equal last shift")
test.That(t, z.Pos() == 0, "after shifting position must be 0")
test.That(t, z.Peek(0) == 'i', "must be 'i' at position 0 after shifting")
test.That(t, z.Peek(1) == 'p', "must be 'p' at position 1 after shifting")
test.T(t, z.Err(), nil, "error must be nil at this point")
z.Move(len(s) - len("Lorem ") - 1)
test.T(t, z.Err(), nil, "error must be nil just before the end of the buffer")
z.Skip()
test.That(t, z.Pos() == 0, "after skipping position must be 0")
z.Move(1)
test.T(t, z.Err(), io.EOF, "error must be EOF when past the buffer")
z.Move(-1)
test.T(t, z.Err(), nil, "error must be nil just before the end of the buffer, even when it has been past the buffer")
z.Free(0) // has already been tested
}
func TestStreamLexerShift(t *testing.T) {
s := `Lorem ipsum dolor sit amet, consectetur adipiscing elit.`
z := NewStreamLexerSize(test.NewPlainReader(bytes.NewBufferString(s)), 5)
z.Move(len("Lorem "))
test.Bytes(t, z.Shift(), []byte("Lorem "), "shift must return the buffered string")
test.That(t, z.ShiftLen() == len("Lorem "), "shifted length must equal last shift")
}
func TestStreamLexerSmall(t *testing.T) {
s := `abcdefghijklm`
z := NewStreamLexerSize(test.NewPlainReader(bytes.NewBufferString(s)), 4)
test.That(t, z.Peek(8) == 'i', "first character must be 'i' at position 8")
z = NewStreamLexerSize(test.NewPlainReader(bytes.NewBufferString(s)), 4)
test.That(t, z.Peek(12) == 'm', "first character must be 'm' at position 12")
z = NewStreamLexerSize(test.NewPlainReader(bytes.NewBufferString(s)), 0)
test.That(t, z.Peek(4) == 'e', "first character must be 'e' at position 4")
z = NewStreamLexerSize(test.NewPlainReader(bytes.NewBufferString(s)), 13)
test.That(t, z.Peek(13) == 0, "must yield error at position 13")
}
func TestStreamLexerSingle(t *testing.T) {
z := NewStreamLexer(test.NewInfiniteReader())
test.That(t, z.Peek(0) == '.')
test.That(t, z.Peek(1) == '.')
test.That(t, z.Peek(3) == '.', "required two successful reads")
}
func TestStreamLexerRunes(t *testing.T) {
z := NewStreamLexer(bytes.NewBufferString("aæ†\U00100000"))
r, n := z.PeekRune(0)
test.That(t, n == 1, "first character must be length 1")
test.That(t, r == 'a', "first character must be rune 'a'")
r, n = z.PeekRune(1)
test.That(t, n == 2, "second character must be length 2")
test.That(t, r == 'æ', "second character must be rune 'æ'")
r, n = z.PeekRune(3)
test.That(t, n == 3, "fourth character must be length 3")
test.That(t, r == '†', "fourth character must be rune '†'")
r, n = z.PeekRune(6)
test.That(t, n == 4, "seventh character must be length 4")
test.That(t, r == '\U00100000', "seventh character must be rune '\U00100000'")
}
func TestStreamLexerBadRune(t *testing.T) {
z := NewStreamLexer(bytes.NewBufferString("\xF0")) // expect four byte rune
r, n := z.PeekRune(0)
test.T(t, n, 4, "length")
test.T(t, r, rune(0), "rune")
}
func TestStreamLexerZeroLen(t *testing.T) {
z := NewStreamLexer(test.NewPlainReader(bytes.NewBufferString("")))
test.That(t, z.Peek(0) == 0, "first character must yield error")
}
func TestStreamLexerEmptyReader(t *testing.T) {
z := NewStreamLexer(test.NewEmptyReader())
test.That(t, z.Peek(0) == 0, "first character must yield error")
test.T(t, z.Err(), io.EOF, "error must be EOF")
test.That(t, z.Peek(0) == 0, "second peek must also yield error")
}

41
vendor/github.com/tdewolff/parse/buffer/writer.go generated vendored Normal file
View file

@ -0,0 +1,41 @@
package buffer // import "github.com/tdewolff/parse/buffer"
// Writer implements an io.Writer over a byte slice.
type Writer struct {
buf []byte
}
// NewWriter returns a new Writer for a given byte slice.
func NewWriter(buf []byte) *Writer {
return &Writer{
buf: buf,
}
}
// Write writes bytes from the given byte slice and returns the number of bytes written and an error if occurred. When err != nil, n == 0.
func (w *Writer) Write(b []byte) (int, error) {
n := len(b)
end := len(w.buf)
if end+n > cap(w.buf) {
buf := make([]byte, end, 2*cap(w.buf)+n)
copy(buf, w.buf)
w.buf = buf
}
w.buf = w.buf[:end+n]
return copy(w.buf[end:], b), nil
}
// Len returns the length of the underlying byte slice.
func (w *Writer) Len() int {
return len(w.buf)
}
// Bytes returns the underlying byte slice.
func (w *Writer) Bytes() []byte {
return w.buf
}
// Reset empties and reuses the current buffer. Subsequent writes will overwrite the buffer, so any reference to the underlying slice is invalidated after this call.
func (w *Writer) Reset() {
w.buf = w.buf[:0]
}

46
vendor/github.com/tdewolff/parse/buffer/writer_test.go generated vendored Normal file
View file

@ -0,0 +1,46 @@
package buffer // import "github.com/tdewolff/parse/buffer"
import (
"fmt"
"testing"
"github.com/tdewolff/test"
)
func TestWriter(t *testing.T) {
w := NewWriter(make([]byte, 0, 3))
test.That(t, w.Len() == 0, "buffer must initially have zero length")
n, _ := w.Write([]byte("abc"))
test.That(t, n == 3, "first write must write 3 characters")
test.Bytes(t, w.Bytes(), []byte("abc"), "first write must match 'abc'")
test.That(t, w.Len() == 3, "buffer must have length 3 after first write")
n, _ = w.Write([]byte("def"))
test.That(t, n == 3, "second write must write 3 characters")
test.Bytes(t, w.Bytes(), []byte("abcdef"), "second write must match 'abcdef'")
w.Reset()
test.Bytes(t, w.Bytes(), []byte(""), "reset must match ''")
n, _ = w.Write([]byte("ghijkl"))
test.That(t, n == 6, "third write must write 6 characters")
test.Bytes(t, w.Bytes(), []byte("ghijkl"), "third write must match 'ghijkl'")
}
func ExampleNewWriter() {
w := NewWriter(make([]byte, 0, 11)) // initial buffer length is 11
w.Write([]byte("Lorem ipsum"))
fmt.Println(string(w.Bytes()))
// Output: Lorem ipsum
}
func ExampleWriter_Reset() {
w := NewWriter(make([]byte, 0, 11)) // initial buffer length is 10
w.Write([]byte("garbage that will be overwritten")) // does reallocation
w.Reset()
w.Write([]byte("Lorem ipsum"))
fmt.Println(string(w.Bytes()))
// Output: Lorem ipsum
}