|
@@ -9,14 +9,15 @@ import (
|
|
|
"sync"
|
|
|
"sync/atomic"
|
|
|
"testing"
|
|
|
+ "time"
|
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
"github.com/stretchr/testify/require"
|
|
|
)
|
|
|
|
|
|
const (
|
|
|
- halfChunkSize = ChunkSize / 2
|
|
|
- quaterChunkSize = ChunkSize / 4
|
|
|
+ halfChunkSize = chunkSize / 2
|
|
|
+ quaterChunkSize = chunkSize / 4
|
|
|
)
|
|
|
|
|
|
// erraticReader is a test reader that simulates a slow read and can fail after reading a certain number of bytes
|
|
@@ -74,7 +75,7 @@ func (r *blockingReader) Read(p []byte) (n int, err error) {
|
|
|
// generateSourceData generates a byte slice with 4.5 chunks of data
|
|
|
func generateSourceData(t *testing.T, size int64) ([]byte, *bytes.Reader) {
|
|
|
// We use small chunks for tests, let's check the ChunkSize just in case
|
|
|
- assert.GreaterOrEqual(t, ChunkSize, 20, "ChunkSize required for tests must be greater than 10 bytes")
|
|
|
+ assert.GreaterOrEqual(t, chunkSize, 20, "ChunkSize required for tests must be greater than 10 bytes")
|
|
|
|
|
|
// Create a byte slice with 4 chunks of ChunkSize
|
|
|
source := make([]byte, size)
|
|
@@ -88,7 +89,7 @@ func generateSourceData(t *testing.T, size int64) ([]byte, *bytes.Reader) {
|
|
|
// TestAsyncBufferRead tests reading from AsyncBuffer using readAt method which is base for all other methods
|
|
|
func TestAsyncBufferReadAt(t *testing.T) {
|
|
|
// Let's use source buffer which is 4.5 chunks long
|
|
|
- source, bytesReader := generateSourceData(t, int64(ChunkSize*4)+halfChunkSize)
|
|
|
+ source, bytesReader := generateSourceData(t, int64(chunkSize*4)+halfChunkSize)
|
|
|
asyncBuffer := FromReader(bytesReader)
|
|
|
defer asyncBuffer.Close()
|
|
|
|
|
@@ -118,30 +119,30 @@ func TestAsyncBufferReadAt(t *testing.T) {
|
|
|
assert.Equal(t, target, source[quaterChunkSize:len(source)-quaterChunkSize])
|
|
|
|
|
|
// Let's read some data from the middle of the stream < chunk size
|
|
|
- target = make([]byte, ChunkSize/4)
|
|
|
- n, err = asyncBuffer.readAt(target, ChunkSize+ChunkSize/4)
|
|
|
+ target = make([]byte, chunkSize/4)
|
|
|
+ n, err = asyncBuffer.readAt(target, chunkSize+chunkSize/4)
|
|
|
require.NoError(t, err)
|
|
|
assert.Equal(t, quaterChunkSize, n)
|
|
|
- assert.Equal(t, target, source[ChunkSize+quaterChunkSize:ChunkSize+quaterChunkSize*2])
|
|
|
+ assert.Equal(t, target, source[chunkSize+quaterChunkSize:chunkSize+quaterChunkSize*2])
|
|
|
|
|
|
// Let's read some data from the latest half chunk
|
|
|
target = make([]byte, quaterChunkSize)
|
|
|
- n, err = asyncBuffer.readAt(target, ChunkSize*4+quaterChunkSize)
|
|
|
+ n, err = asyncBuffer.readAt(target, chunkSize*4+quaterChunkSize)
|
|
|
require.NoError(t, err)
|
|
|
assert.Equal(t, quaterChunkSize, n)
|
|
|
- assert.Equal(t, target, source[ChunkSize*4+quaterChunkSize:ChunkSize*4+halfChunkSize])
|
|
|
+ assert.Equal(t, target, source[chunkSize*4+quaterChunkSize:chunkSize*4+halfChunkSize])
|
|
|
|
|
|
// Let's try to read more data then available in the stream
|
|
|
- target = make([]byte, ChunkSize*2)
|
|
|
- n, err = asyncBuffer.readAt(target, ChunkSize*4)
|
|
|
+ target = make([]byte, chunkSize*2)
|
|
|
+ n, err = asyncBuffer.readAt(target, chunkSize*4)
|
|
|
require.Error(t, err)
|
|
|
assert.Equal(t, err, io.EOF)
|
|
|
- assert.Equal(t, ChunkSize/2, n)
|
|
|
- assert.Equal(t, target[:ChunkSize/2], source[ChunkSize*4:]) // We read only last half chunk
|
|
|
+ assert.Equal(t, chunkSize/2, n)
|
|
|
+ assert.Equal(t, target[:chunkSize/2], source[chunkSize*4:]) // We read only last half chunk
|
|
|
|
|
|
// Let's try to read data beyond the end of the stream
|
|
|
- target = make([]byte, ChunkSize*2)
|
|
|
- n, err = asyncBuffer.readAt(target, ChunkSize*5)
|
|
|
+ target = make([]byte, chunkSize*2)
|
|
|
+ n, err = asyncBuffer.readAt(target, chunkSize*5)
|
|
|
require.Error(t, err)
|
|
|
assert.Equal(t, err, io.EOF)
|
|
|
assert.Equal(t, 0, n)
|
|
@@ -177,7 +178,7 @@ func TestAsyncBufferReadAtSmallBuffer(t *testing.T) {
|
|
|
}
|
|
|
|
|
|
func TestAsyncBufferReader(t *testing.T) {
|
|
|
- source, bytesReader := generateSourceData(t, int64(ChunkSize*4)+halfChunkSize)
|
|
|
+ source, bytesReader := generateSourceData(t, int64(chunkSize*4)+halfChunkSize)
|
|
|
|
|
|
// Create an AsyncBuffer with the byte slice
|
|
|
asyncBuffer := FromReader(bytesReader)
|
|
@@ -186,7 +187,7 @@ func TestAsyncBufferReader(t *testing.T) {
|
|
|
// Let's wait for all chunks to be read
|
|
|
size, err := asyncBuffer.Wait()
|
|
|
require.NoError(t, err, "AsyncBuffer failed to wait for all chunks")
|
|
|
- assert.Equal(t, int64(ChunkSize*4+halfChunkSize), size)
|
|
|
+ assert.Equal(t, int64(chunkSize*4+halfChunkSize), size)
|
|
|
|
|
|
reader := asyncBuffer.Reader()
|
|
|
|
|
@@ -194,34 +195,34 @@ func TestAsyncBufferReader(t *testing.T) {
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
// Read the first two chunks
|
|
|
- twoChunks := make([]byte, ChunkSize*2)
|
|
|
+ twoChunks := make([]byte, chunkSize*2)
|
|
|
n, err := reader.Read(twoChunks)
|
|
|
require.NoError(t, err)
|
|
|
- assert.Equal(t, ChunkSize*2, n)
|
|
|
- assert.Equal(t, source[:ChunkSize*2], twoChunks)
|
|
|
+ assert.Equal(t, chunkSize*2, n)
|
|
|
+ assert.Equal(t, source[:chunkSize*2], twoChunks)
|
|
|
|
|
|
// Seek to the last chunk + 10 bytes
|
|
|
- pos, err := reader.Seek(ChunkSize*3+5, io.SeekStart)
|
|
|
+ pos, err := reader.Seek(chunkSize*3+5, io.SeekStart)
|
|
|
require.NoError(t, err)
|
|
|
- assert.Equal(t, int64(ChunkSize*3+5), pos)
|
|
|
+ assert.Equal(t, int64(chunkSize*3+5), pos)
|
|
|
|
|
|
// Read the next 10 bytes
|
|
|
smallSlice := make([]byte, 10)
|
|
|
n, err = reader.Read(smallSlice)
|
|
|
require.NoError(t, err)
|
|
|
assert.Equal(t, 10, n)
|
|
|
- assert.Equal(t, source[ChunkSize*3+5:ChunkSize*3+5+10], smallSlice)
|
|
|
+ assert.Equal(t, source[chunkSize*3+5:chunkSize*3+5+10], smallSlice)
|
|
|
|
|
|
// Seek -10 bytes from the current position
|
|
|
pos, err = reader.Seek(-10, io.SeekCurrent)
|
|
|
require.NoError(t, err)
|
|
|
- assert.Equal(t, int64(ChunkSize*3+5), pos)
|
|
|
+ assert.Equal(t, int64(chunkSize*3+5), pos)
|
|
|
|
|
|
// Read data again
|
|
|
n, err = reader.Read(smallSlice)
|
|
|
require.NoError(t, err)
|
|
|
assert.Equal(t, 10, n)
|
|
|
- assert.Equal(t, source[ChunkSize*3+5:ChunkSize*3+5+10], smallSlice)
|
|
|
+ assert.Equal(t, source[chunkSize*3+5:chunkSize*3+5+10], smallSlice)
|
|
|
|
|
|
// Seek -10 bytes from end of the stream
|
|
|
pos, err = reader.Seek(-10, io.SeekEnd)
|
|
@@ -245,7 +246,7 @@ func TestAsyncBufferReader(t *testing.T) {
|
|
|
|
|
|
// TestAsyncBufferClose tests closing the AsyncBuffer
|
|
|
func TestAsyncBufferClose(t *testing.T) {
|
|
|
- _, bytesReader := generateSourceData(t, int64(ChunkSize*4)+halfChunkSize)
|
|
|
+ _, bytesReader := generateSourceData(t, int64(chunkSize*4)+halfChunkSize)
|
|
|
|
|
|
// Create an AsyncBuffer with the byte slice
|
|
|
asyncBuffer := FromReader(bytesReader)
|
|
@@ -273,8 +274,8 @@ func TestAsyncBufferClose(t *testing.T) {
|
|
|
// which would fail somewhere
|
|
|
func TestAsyncBufferReadAtErrAtSomePoint(t *testing.T) {
|
|
|
// Let's use source buffer which is 4.5 chunks long
|
|
|
- source, bytesReader := generateSourceData(t, int64(ChunkSize*4)+halfChunkSize)
|
|
|
- slowReader := &erraticReader{reader: *bytesReader, failAt: ChunkSize*3 + 5} // fails at last chunk
|
|
|
+ source, bytesReader := generateSourceData(t, int64(chunkSize*4)+halfChunkSize)
|
|
|
+ slowReader := &erraticReader{reader: *bytesReader, failAt: chunkSize*3 + 5} // fails at last chunk
|
|
|
asyncBuffer := FromReader(slowReader)
|
|
|
defer asyncBuffer.Close()
|
|
|
|
|
@@ -298,7 +299,7 @@ func TestAsyncBufferReadAtErrAtSomePoint(t *testing.T) {
|
|
|
|
|
|
// Let's read something, but when error occurs
|
|
|
target = make([]byte, halfChunkSize)
|
|
|
- _, err = asyncBuffer.readAt(target, ChunkSize*3)
|
|
|
+ _, err = asyncBuffer.readAt(target, chunkSize*3)
|
|
|
require.Error(t, err, "simulated read failure")
|
|
|
}
|
|
|
|
|
@@ -306,7 +307,7 @@ func TestAsyncBufferReadAtErrAtSomePoint(t *testing.T) {
|
|
|
// with full = false
|
|
|
func TestAsyncBufferReadAsync(t *testing.T) {
|
|
|
// Let's use source buffer which is 4.5 chunks long
|
|
|
- source, bytesReader := generateSourceData(t, int64(ChunkSize)*3)
|
|
|
+ source, bytesReader := generateSourceData(t, int64(chunkSize)*3)
|
|
|
blockingReader := newBlockingReader(*bytesReader)
|
|
|
asyncBuffer := FromReader(blockingReader)
|
|
|
defer asyncBuffer.Close()
|
|
@@ -316,34 +317,34 @@ func TestAsyncBufferReadAsync(t *testing.T) {
|
|
|
|
|
|
// Let's try to read first two chunks, however,
|
|
|
// we know that only the first chunk is available
|
|
|
- target := make([]byte, ChunkSize*2)
|
|
|
+ target := make([]byte, chunkSize*2)
|
|
|
n, err := asyncBuffer.readAt(target, 0)
|
|
|
require.NoError(t, err)
|
|
|
- assert.Equal(t, ChunkSize, n)
|
|
|
- assert.Equal(t, target[:ChunkSize], source[:ChunkSize])
|
|
|
+ assert.Equal(t, chunkSize, n)
|
|
|
+ assert.Equal(t, target[:chunkSize], source[:chunkSize])
|
|
|
|
|
|
blockingReader.flushNextChunk() // unlock reader to allow read second chunk
|
|
|
- asyncBuffer.WaitFor(ChunkSize + 1) // wait for the second chunk to be available
|
|
|
+ asyncBuffer.WaitFor(chunkSize + 1) // wait for the second chunk to be available
|
|
|
|
|
|
- target = make([]byte, ChunkSize*2)
|
|
|
+ target = make([]byte, chunkSize*2)
|
|
|
n, err = asyncBuffer.readAt(target, 0)
|
|
|
require.NoError(t, err)
|
|
|
- assert.Equal(t, ChunkSize*2, n)
|
|
|
- assert.Equal(t, target, source[:ChunkSize*2])
|
|
|
+ assert.Equal(t, chunkSize*2, n)
|
|
|
+ assert.Equal(t, target, source[:chunkSize*2])
|
|
|
|
|
|
blockingReader.flush() // Flush the rest of the data
|
|
|
asyncBuffer.Wait()
|
|
|
|
|
|
// Try to read near end of the stream, EOF
|
|
|
- target = make([]byte, ChunkSize)
|
|
|
- n, err = asyncBuffer.readAt(target, ChunkSize*3-1)
|
|
|
+ target = make([]byte, chunkSize)
|
|
|
+ n, err = asyncBuffer.readAt(target, chunkSize*3-1)
|
|
|
require.ErrorIs(t, err, io.EOF)
|
|
|
assert.Equal(t, 1, n)
|
|
|
- assert.Equal(t, target[0], source[ChunkSize*3-1])
|
|
|
+ assert.Equal(t, target[0], source[chunkSize*3-1])
|
|
|
|
|
|
// Try to read beyond the end of the stream == eof
|
|
|
- target = make([]byte, ChunkSize)
|
|
|
- n, err = asyncBuffer.readAt(target, ChunkSize*3)
|
|
|
+ target = make([]byte, chunkSize)
|
|
|
+ n, err = asyncBuffer.readAt(target, chunkSize*3)
|
|
|
require.ErrorIs(t, io.EOF, err)
|
|
|
assert.Equal(t, 0, n)
|
|
|
}
|
|
@@ -359,3 +360,66 @@ func TestAsyncBufferReadAllCompability(t *testing.T) {
|
|
|
require.NoError(t, err)
|
|
|
require.Len(t, b, len(source))
|
|
|
}
|
|
|
+
|
|
|
+func TestAsyncBufferThreshold(t *testing.T) {
|
|
|
+ _, bytesReader := generateSourceData(t, int64(pauseThreshold)*2)
|
|
|
+ asyncBuffer := FromReader(bytesReader)
|
|
|
+ defer asyncBuffer.Close()
|
|
|
+
|
|
|
+ target := make([]byte, chunkSize)
|
|
|
+ n, err := asyncBuffer.readAt(target, 0)
|
|
|
+ require.NoError(t, err)
|
|
|
+ assert.Equal(t, chunkSize, n)
|
|
|
+
|
|
|
+ // Ensure that buffer hits the pause threshold
|
|
|
+ require.Eventually(t, func() bool {
|
|
|
+ return asyncBuffer.len.Load() >= pauseThreshold
|
|
|
+ }, 300*time.Millisecond, 10*time.Millisecond)
|
|
|
+
|
|
|
+ // Ensure that buffer never reaches the end of the stream
|
|
|
+ require.Never(t, func() bool {
|
|
|
+ return asyncBuffer.len.Load() >= pauseThreshold*2-1
|
|
|
+ }, 300*time.Millisecond, 10*time.Millisecond)
|
|
|
+
|
|
|
+ // Let's hit the pause threshold
|
|
|
+ target = make([]byte, pauseThreshold)
|
|
|
+ n, err = asyncBuffer.readAt(target, 0)
|
|
|
+ require.NoError(t, err)
|
|
|
+ require.Equal(t, pauseThreshold, n)
|
|
|
+
|
|
|
+ // Ensure that buffer never reaches the end of the stream
|
|
|
+ require.Never(t, func() bool {
|
|
|
+ return asyncBuffer.len.Load() >= pauseThreshold*2-1
|
|
|
+ }, 300*time.Millisecond, 10*time.Millisecond)
|
|
|
+
|
|
|
+ // Let's hit the pause threshold
|
|
|
+ target = make([]byte, pauseThreshold+1)
|
|
|
+ n, err = asyncBuffer.readAt(target, 0)
|
|
|
+ require.NoError(t, err)
|
|
|
+ require.Equal(t, pauseThreshold, n)
|
|
|
+
|
|
|
+ // It usually returns only pauseThreshold bytes because this exact operation unpauses the reader,
|
|
|
+ // but the initial offset is before the threshold, data beyond the threshold may not be available.
|
|
|
+ assert.GreaterOrEqual(t, pauseThreshold, n)
|
|
|
+
|
|
|
+ // Ensure that buffer hits the end of the stream
|
|
|
+ require.Eventually(t, func() bool {
|
|
|
+ return asyncBuffer.len.Load() >= pauseThreshold*2
|
|
|
+ }, 300*time.Millisecond, 10*time.Millisecond)
|
|
|
+}
|
|
|
+
|
|
|
+func TestAsyncBufferThresholdInstantBeyondAccess(t *testing.T) {
|
|
|
+ _, bytesReader := generateSourceData(t, int64(pauseThreshold)*2)
|
|
|
+ asyncBuffer := FromReader(bytesReader)
|
|
|
+ defer asyncBuffer.Close()
|
|
|
+
|
|
|
+ target := make([]byte, chunkSize)
|
|
|
+ n, err := asyncBuffer.readAt(target, pauseThreshold+1)
|
|
|
+ require.NoError(t, err)
|
|
|
+ assert.Equal(t, chunkSize, n)
|
|
|
+
|
|
|
+ // Ensure that buffer hits the end of the stream
|
|
|
+ require.Eventually(t, func() bool {
|
|
|
+ return asyncBuffer.len.Load() >= pauseThreshold*2
|
|
|
+ }, 300*time.Millisecond, 10*time.Millisecond)
|
|
|
+}
|