Skip to content

Commit

Permalink
improve tests (#36)
Browse files Browse the repository at this point in the history
  • Loading branch information
leeym authored Mar 4, 2024
1 parent 310e1c4 commit ac5e8cd
Show file tree
Hide file tree
Showing 9 changed files with 395 additions and 355 deletions.
5 changes: 5 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
all: test

test:
docker-compose up -d
ETCD_ENDPOINTS="127.0.0.1:2379" REDIS_ADDR="127.0.0.1:6379" ZOOKEEPER_ENDPOINTS="127.0.0.1" CONSUL_ADDR="127.0.0.1:8500" AWS_ADDR="127.0.0.1:8000" MEMCACHED_ADDR="127.0.0.1:11211" go test -race -v -failfast
3 changes: 1 addition & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,7 @@ If memcached exists already and it is okay to handle burst traffic caused by une

Run tests locally:
```bash
docker-compose up -d # start etcd, Redis, memcached, zookeeper, consul, and localstack
ETCD_ENDPOINTS="127.0.0.1:2379" REDIS_ADDR="127.0.0.1:6379" ZOOKEEPER_ENDPOINTS="127.0.0.1" CONSUL_ADDR="127.0.0.1:8500" AWS_ADDR="127.0.0.1:8000" MEMCACHED_ADDR="127.0.0.1:11211" go test -race -v
make
```

Run [Drone](https://drone.io) CI tests locally:
Expand Down
116 changes: 62 additions & 54 deletions concurrent_buffer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,93 +11,101 @@ import (
l "github.com/mennanov/limiters"
)

func (s *LimitersTestSuite) concurrentBuffers(capacity int64, ttl time.Duration, clock l.Clock) []*l.ConcurrentBuffer {
var buffers []*l.ConcurrentBuffer
for _, locker := range s.lockers(true) {
for _, b := range s.concurrentBufferBackends(ttl, clock) {
buffers = append(buffers, l.NewConcurrentBuffer(locker, b, capacity, s.logger))
func (s *LimitersTestSuite) concurrentBuffers(capacity int64, ttl time.Duration, clock l.Clock) map[string]*l.ConcurrentBuffer {
buffers := make(map[string]*l.ConcurrentBuffer)
for lockerName, locker := range s.lockers(true) {
for bName, b := range s.concurrentBufferBackends(ttl, clock) {
buffers[lockerName+":"+bName] = l.NewConcurrentBuffer(locker, b, capacity, s.logger)
}
}

return buffers
}

func (s *LimitersTestSuite) concurrentBufferBackends(ttl time.Duration, clock l.Clock) []l.ConcurrentBufferBackend {
return []l.ConcurrentBufferBackend{
l.NewConcurrentBufferInMemory(l.NewRegistry(), ttl, clock),
l.NewConcurrentBufferRedis(s.redisClient, uuid.New().String(), ttl, clock),
l.NewConcurrentBufferMemcached(s.memcacheClient, uuid.New().String(), ttl, clock),
func (s *LimitersTestSuite) concurrentBufferBackends(ttl time.Duration, clock l.Clock) map[string]l.ConcurrentBufferBackend {
return map[string]l.ConcurrentBufferBackend{
"ConcurrentBufferInMemory": l.NewConcurrentBufferInMemory(l.NewRegistry(), ttl, clock),
"ConcurrentBufferRedis": l.NewConcurrentBufferRedis(s.redisClient, uuid.New().String(), ttl, clock),
"ConcurrentBufferMemcached": l.NewConcurrentBufferMemcached(s.memcacheClient, uuid.New().String(), ttl, clock),
}
}

func (s *LimitersTestSuite) TestConcurrentBufferNoOverflow() {
clock := newFakeClock()
capacity := int64(10)
ttl := time.Second
for _, buffer := range s.concurrentBuffers(capacity, ttl, clock) {
wg := sync.WaitGroup{}
for i := int64(0); i < capacity; i++ {
wg.Add(1)
go func(i int64, buffer *l.ConcurrentBuffer) {
defer wg.Done()
key := fmt.Sprintf("key%d", i)
s.NoError(buffer.Limit(context.TODO(), key))
s.NoError(buffer.Done(context.TODO(), key))
}(i, buffer)
}
wg.Wait()
s.NoError(buffer.Limit(context.TODO(), "last"))
s.NoError(buffer.Done(context.TODO(), "last"))
for name, buffer := range s.concurrentBuffers(capacity, ttl, clock) {
s.Run(name, func() {
wg := sync.WaitGroup{}
for i := int64(0); i < capacity; i++ {
wg.Add(1)
go func(i int64, buffer *l.ConcurrentBuffer) {
defer wg.Done()
key := fmt.Sprintf("key%d", i)
s.NoError(buffer.Limit(context.TODO(), key))
s.NoError(buffer.Done(context.TODO(), key))
}(i, buffer)
}
wg.Wait()
s.NoError(buffer.Limit(context.TODO(), "last"))
s.NoError(buffer.Done(context.TODO(), "last"))
})
}
}

func (s *LimitersTestSuite) TestConcurrentBufferOverflow() {
clock := newFakeClock()
capacity := int64(3)
ttl := time.Second
for _, buffer := range s.concurrentBuffers(capacity, ttl, clock) {
mu := sync.Mutex{}
var errors []error
wg := sync.WaitGroup{}
for i := int64(0); i <= capacity; i++ {
wg.Add(1)
go func(i int64, buffer *l.ConcurrentBuffer) {
defer wg.Done()
if err := buffer.Limit(context.TODO(), fmt.Sprintf("key%d", i)); err != nil {
mu.Lock()
errors = append(errors, err)
mu.Unlock()
}
}(i, buffer)
}
wg.Wait()
s.Equal([]error{l.ErrLimitExhausted}, errors)
for name, buffer := range s.concurrentBuffers(capacity, ttl, clock) {
s.Run(name, func() {
mu := sync.Mutex{}
var errors []error
wg := sync.WaitGroup{}
for i := int64(0); i <= capacity; i++ {
wg.Add(1)
go func(i int64, buffer *l.ConcurrentBuffer) {
defer wg.Done()
if err := buffer.Limit(context.TODO(), fmt.Sprintf("key%d", i)); err != nil {
mu.Lock()
errors = append(errors, err)
mu.Unlock()
}
}(i, buffer)
}
wg.Wait()
s.Equal([]error{l.ErrLimitExhausted}, errors)
})
}
}

func (s *LimitersTestSuite) TestConcurrentBufferExpiredKeys() {
clock := newFakeClock()
capacity := int64(2)
ttl := time.Second
for _, buffer := range s.concurrentBuffers(capacity, ttl, clock) {
s.Require().NoError(buffer.Limit(context.TODO(), "key1"))
clock.Sleep(ttl / 2)
s.Require().NoError(buffer.Limit(context.TODO(), "key2"))
clock.Sleep(ttl / 2)
// No error is expected (despite the following request overflows the capacity) as the first key has already
// expired by this time.
s.NoError(buffer.Limit(context.TODO(), "key3"))
for name, buffer := range s.concurrentBuffers(capacity, ttl, clock) {
s.Run(name, func() {
s.Require().NoError(buffer.Limit(context.TODO(), "key1"))
clock.Sleep(ttl / 2)
s.Require().NoError(buffer.Limit(context.TODO(), "key2"))
clock.Sleep(ttl / 2)
// No error is expected (despite the following request overflows the capacity) as the first key has already
// expired by this time.
s.NoError(buffer.Limit(context.TODO(), "key3"))
})
}
}

func (s *LimitersTestSuite) TestConcurrentBufferDuplicateKeys() {
clock := newFakeClock()
capacity := int64(2)
ttl := time.Second
for _, buffer := range s.concurrentBuffers(capacity, ttl, clock) {
s.Require().NoError(buffer.Limit(context.TODO(), "key1"))
s.Require().NoError(buffer.Limit(context.TODO(), "key2"))
// No error is expected as it should just update the timestamp of the existing key.
s.NoError(buffer.Limit(context.TODO(), "key1"))
for name, buffer := range s.concurrentBuffers(capacity, ttl, clock) {
s.Run(name, func() {
s.Require().NoError(buffer.Limit(context.TODO(), "key1"))
s.Require().NoError(buffer.Limit(context.TODO(), "key2"))
// No error is expected as it should just update the timestamp of the existing key.
s.NoError(buffer.Limit(context.TODO(), "key1"))
})
}
}
80 changes: 42 additions & 38 deletions fixedwindow_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,20 +10,20 @@ import (
)

// fixedWindows returns all the possible FixedWindow combinations.
func (s *LimitersTestSuite) fixedWindows(capacity int64, rate time.Duration, clock l.Clock) []*l.FixedWindow {
var windows []*l.FixedWindow
for _, inc := range s.fixedWindowIncrementers() {
windows = append(windows, l.NewFixedWindow(capacity, rate, inc, clock))
func (s *LimitersTestSuite) fixedWindows(capacity int64, rate time.Duration, clock l.Clock) map[string]*l.FixedWindow {
windows := make(map[string]*l.FixedWindow)
for name, inc := range s.fixedWindowIncrementers() {
windows[name] = l.NewFixedWindow(capacity, rate, inc, clock)
}
return windows
}

func (s *LimitersTestSuite) fixedWindowIncrementers() []l.FixedWindowIncrementer {
return []l.FixedWindowIncrementer{
l.NewFixedWindowInMemory(),
l.NewFixedWindowRedis(s.redisClient, uuid.New().String()),
l.NewFixedWindowMemcached(s.memcacheClient, uuid.New().String()),
l.NewFixedWindowDynamoDB(s.dynamodbClient, uuid.New().String(), s.dynamoDBTableProps),
func (s *LimitersTestSuite) fixedWindowIncrementers() map[string]l.FixedWindowIncrementer {
return map[string]l.FixedWindowIncrementer{
"FixedWindowInMemory": l.NewFixedWindowInMemory(),
"FixedWindowRedis": l.NewFixedWindowRedis(s.redisClient, uuid.New().String()),
"FixedWindowMemcached": l.NewFixedWindowMemcached(s.memcacheClient, uuid.New().String()),
"FixedWindowDynamoDB": l.NewFixedWindowDynamoDB(s.dynamodbClient, uuid.New().String(), s.dynamoDBTableProps),
}
}

Expand Down Expand Up @@ -60,40 +60,44 @@ var fixedWindowTestCases = []struct {
func (s *LimitersTestSuite) TestFixedWindowFakeClock() {
clock := newFakeClockWithTime(time.Date(2019, 8, 30, 0, 0, 0, 0, time.UTC))
for _, testCase := range fixedWindowTestCases {
for _, bucket := range s.fixedWindows(testCase.capacity, testCase.rate, clock) {
clock.reset()
miss := 0
for i := 0; i < testCase.requestCount; i++ {
// No pause for the first request.
if i > 0 {
clock.Sleep(testCase.requestRate)
}
if _, err := bucket.Limit(context.TODO()); err != nil {
s.Equal(l.ErrLimitExhausted, err)
miss++
for name, bucket := range s.fixedWindows(testCase.capacity, testCase.rate, clock) {
s.Run(name, func() {
clock.reset()
miss := 0
for i := 0; i < testCase.requestCount; i++ {
// No pause for the first request.
if i > 0 {
clock.Sleep(testCase.requestRate)
}
if _, err := bucket.Limit(context.TODO()); err != nil {
s.Equal(l.ErrLimitExhausted, err)
miss++
}
}
}
s.Equal(testCase.missExpected, miss, testCase)
s.Equal(testCase.missExpected, miss, testCase)
})
}
}
}

func (s *LimitersTestSuite) TestFixedWindowOverflow() {
clock := newFakeClockWithTime(time.Date(2019, 8, 30, 0, 0, 0, 0, time.UTC))
for _, bucket := range s.fixedWindows(2, time.Second, clock) {
clock.reset()
w, err := bucket.Limit(context.TODO())
s.Require().NoError(err)
s.Equal(time.Duration(0), w)
w, err = bucket.Limit(context.TODO())
s.Require().NoError(err)
s.Equal(time.Duration(0), w)
w, err = bucket.Limit(context.TODO())
s.Require().Equal(l.ErrLimitExhausted, err)
s.Equal(time.Second, w)
clock.Sleep(time.Second)
w, err = bucket.Limit(context.TODO())
s.Require().NoError(err)
s.Equal(time.Duration(0), w)
for name, bucket := range s.fixedWindows(2, time.Second, clock) {
s.Run(name, func() {
clock.reset()
w, err := bucket.Limit(context.TODO())
s.Require().NoError(err)
s.Equal(time.Duration(0), w)
w, err = bucket.Limit(context.TODO())
s.Require().NoError(err)
s.Equal(time.Duration(0), w)
w, err = bucket.Limit(context.TODO())
s.Require().Equal(l.ErrLimitExhausted, err)
s.Equal(time.Second, w)
clock.Sleep(time.Second)
w, err = bucket.Limit(context.TODO())
s.Require().NoError(err)
s.Equal(time.Duration(0), w)
})
}
}
Loading

0 comments on commit ac5e8cd

Please sign in to comment.