Skip to content

Commit

Permalink
feat(config): Fix storage config naming and code cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
skhokhlov committed May 19, 2023
1 parent b22a19e commit bacf86d
Show file tree
Hide file tree
Showing 12 changed files with 52 additions and 51 deletions.
16 changes: 8 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -296,14 +296,14 @@ Here are some examples of conditions you can use:


### Storage
| Parameter | Description | Default |
|:---------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------|:-----------|
| `storage` | Storage configuration | `{}` |
| `storage.path` | Path to persist the data in. Only supported for types `sqlite` and `postgres`. | `""` |
| `storage.type` | Type of storage. Valid types: `memory`, `sqlite`, `postgres`. | `"memory"` |
| `storage.caching` | Whether to use write-through caching. Improves loading time for large dashboards. <br />Only supported if `storage.type` is `sqlite` or `postgres` | `false` |
| `storage.maximumNumberOfResults` | The maximum number of results that an endpoint can have | `100` |
| `storage.maximumNumberOfEvents` | The maximum number of events that an endpoint can have | `50` |
| Parameter | Description | Default |
|:------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------|:-----------|
| `storage` | Storage configuration | `{}` |
| `storage.path` | Path to persist the data in. Only supported for types `sqlite` and `postgres`. | `""` |
| `storage.type` | Type of storage. Valid types: `memory`, `sqlite`, `postgres`. | `"memory"` |
| `storage.caching` | Whether to use write-through caching. Improves loading time for large dashboards. <br />Only supported if `storage.type` is `sqlite` or `postgres` | `false` |
| `storage.maximum-number-of-results` | The maximum number of results that an endpoint can have | `100` |
| `storage.maximum-number-of-events` | The maximum number of events that an endpoint can have | `50` |

The results for each endpoint health check as well as the data for uptime and the past events must be persisted
so that they can be displayed on the dashboard. These parameters allow you to configure the storage in question.
Expand Down
4 changes: 2 additions & 2 deletions config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -303,8 +303,8 @@ func TestParseAndValidateConfigBytes(t *testing.T) {
storage:
type: sqlite
path: %s
maximumNumberOfResults: 10
maximumNumberOfEvents: 5
maximum-number-of-results: 10
maximum-number-of-events: 5
maintenance:
enabled: true
start: 00:00
Expand Down
2 changes: 1 addition & 1 deletion config/ui/ui.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@ package ui
import (
"bytes"
"errors"
"github.com/TwiN/gatus/v5/storage"
"html/template"

"github.com/TwiN/gatus/v5/storage"
static "github.com/TwiN/gatus/v5/web"
)

Expand Down
4 changes: 2 additions & 2 deletions controller/handler/endpoint_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ var (
// Due to how intensive this operation can be on the storage, this function leverages a cache.
func EndpointStatuses(cfg *config.Config) http.HandlerFunc {
return func(writer http.ResponseWriter, r *http.Request) {
page, pageSize := extractPageAndPageSizeFromRequest(r, cfg)
page, pageSize := extractPageAndPageSizeFromRequest(r, cfg.Storage.MaximumNumberOfResults)
value, exists := cache.Get(fmt.Sprintf("endpoint-status-%d-%d", page, pageSize))
var data []byte
if !exists {
Expand Down Expand Up @@ -100,7 +100,7 @@ func getEndpointStatusesFromRemoteInstances(remoteConfig *remote.Config) ([]*cor
// EndpointStatus retrieves a single core.EndpointStatus by group and endpoint name
func EndpointStatus(cfg *config.Config) http.HandlerFunc {
return func(writer http.ResponseWriter, r *http.Request) {
page, pageSize := extractPageAndPageSizeFromRequest(r, cfg)
page, pageSize := extractPageAndPageSizeFromRequest(r, cfg.Storage.MaximumNumberOfResults)
vars := mux.Vars(r)
endpointStatus, err := store.Get().GetEndpointStatusByKey(vars["key"], paging.NewEndpointStatusParams().WithResults(page, pageSize).WithEvents(1, cfg.Storage.MaximumNumberOfEvents))
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion controller/handler/endpoint_status_test.go
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
package handler

import (
"github.com/TwiN/gatus/v5/storage"
"net/http"
"net/http/httptest"
"testing"
"time"

"github.com/TwiN/gatus/v5/config"
"github.com/TwiN/gatus/v5/core"
"github.com/TwiN/gatus/v5/storage"
"github.com/TwiN/gatus/v5/storage/store"
"github.com/TwiN/gatus/v5/watchdog"
)
Expand Down
7 changes: 3 additions & 4 deletions controller/handler/util.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package handler

import (
"github.com/TwiN/gatus/v5/config"
"net/http"
"strconv"
)
Expand All @@ -14,7 +13,7 @@ const (
DefaultPageSize = 20
)

func extractPageAndPageSizeFromRequest(r *http.Request, cfg *config.Config) (page int, pageSize int) {
func extractPageAndPageSizeFromRequest(r *http.Request, maximumNumberOfResults int) (page int, pageSize int) {
var err error
if pageParameter := r.URL.Query().Get("page"); len(pageParameter) == 0 {
page = DefaultPage
Expand All @@ -34,8 +33,8 @@ func extractPageAndPageSizeFromRequest(r *http.Request, cfg *config.Config) (pag
if err != nil {
pageSize = DefaultPageSize
}
if pageSize > cfg.Storage.MaximumNumberOfResults {
pageSize = cfg.Storage.MaximumNumberOfResults
if pageSize > maximumNumberOfResults {
pageSize = maximumNumberOfResults
} else if pageSize < 1 {
pageSize = DefaultPageSize
}
Expand Down
7 changes: 4 additions & 3 deletions controller/handler/util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@ package handler

import (
"fmt"
"github.com/TwiN/gatus/v5/config"
"github.com/TwiN/gatus/v5/storage"
"net/http"
"testing"

"github.com/TwiN/gatus/v5/config"
"github.com/TwiN/gatus/v5/storage"
)

func TestExtractPageAndPageSizeFromRequest(t *testing.T) {
Expand Down Expand Up @@ -62,7 +63,7 @@ func TestExtractPageAndPageSizeFromRequest(t *testing.T) {
for _, scenario := range scenarios {
t.Run("page-"+scenario.Page+"-pageSize-"+scenario.PageSize, func(t *testing.T) {
request, _ := http.NewRequest("GET", fmt.Sprintf("/api/v1/statuses?page=%s&pageSize=%s", scenario.Page, scenario.PageSize), http.NoBody)
actualPage, actualPageSize := extractPageAndPageSizeFromRequest(request, cfg)
actualPage, actualPageSize := extractPageAndPageSizeFromRequest(request, cfg.Storage.MaximumNumberOfResults)
if actualPage != scenario.ExpectedPage {
t.Errorf("expected %d, got %d", scenario.ExpectedPage, actualPage)
}
Expand Down
4 changes: 2 additions & 2 deletions storage/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,10 @@ type Config struct {
Caching bool `yaml:"caching,omitempty"`

// MaximumNumberOfResults is the maximum number of results that an endpoint can have
MaximumNumberOfResults int `yaml:"maximumNumberOfResults,omitempty"`
MaximumNumberOfResults int `yaml:"maximum-number-of-results,omitempty"`

// MaximumNumberOfEvents is the maximum number of events that an endpoint can have
MaximumNumberOfEvents int `yaml:"maximumNumberOfEvents,omitempty"`
MaximumNumberOfEvents int `yaml:"maximum-number-of-events,omitempty"`
}

// ValidateAndSetDefaults validates the configuration and sets the default values (if applicable)
Expand Down
2 changes: 1 addition & 1 deletion storage/store/memory/util_test.go
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
package memory

import (
"github.com/TwiN/gatus/v5/storage"
"testing"
"time"

"github.com/TwiN/gatus/v5/core"
"github.com/TwiN/gatus/v5/storage"
"github.com/TwiN/gatus/v5/storage/store/common/paging"
)

Expand Down
23 changes: 10 additions & 13 deletions storage/store/sql/sql.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,10 @@ const (
uptimeRetention = 7 * 24 * time.Hour

cacheTTL = 10 * time.Minute

eventsAboveMaximumCleanUpThreshold = 10

resultsAboveMaximumCleanUpThreshold = 10
)

var (
Expand All @@ -59,11 +63,6 @@ type Store struct {
maximumNumberOfResults int
// maximumNumberOfEvents is the maximum number of events that an endpoint can have
maximumNumberOfEvents int

// eventsCleanUpThreshold is a maximum number of events before triggering a clean up
eventsCleanUpThreshold int
// resultsCleanUpThreshold is a maximum number of results before triggering a clean up
resultsCleanUpThreshold int
}

// NewStore initializes the database and creates the schema if it doesn't already exist in the path specified
Expand All @@ -75,12 +74,10 @@ func NewStore(driver, path string, caching bool, maximumNumberOfResults int, max
return nil, ErrPathNotSpecified
}
store := &Store{
driver: driver,
path: path,
maximumNumberOfResults: maximumNumberOfResults,
maximumNumberOfEvents: maximumNumberOfEvents,
resultsCleanUpThreshold: maximumNumberOfResults + 10,
eventsCleanUpThreshold: maximumNumberOfEvents + 10,
driver: driver,
path: path,
maximumNumberOfResults: maximumNumberOfResults,
maximumNumberOfEvents: maximumNumberOfEvents,
}
var err error
if store.db, err = sql.Open(driver, path); err != nil {
Expand Down Expand Up @@ -309,7 +306,7 @@ func (s *Store) Insert(endpoint *core.Endpoint, result *core.Result) error {
// Clean up old events if there's more than twice the maximum number of events
// This lets us both keep the table clean without impacting performance too much
// (since we're only deleting MaximumNumberOfEvents at a time instead of 1)
if numberOfEvents > int64(s.eventsCleanUpThreshold) {
if numberOfEvents > int64(s.maximumNumberOfEvents+eventsAboveMaximumCleanUpThreshold) {
if err = s.deleteOldEndpointEvents(tx, endpointID); err != nil {
log.Printf("[sql][Insert] Failed to delete old events for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
}
Expand All @@ -326,7 +323,7 @@ func (s *Store) Insert(endpoint *core.Endpoint, result *core.Result) error {
if err != nil {
log.Printf("[sql][Insert] Failed to retrieve total number of results for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
} else {
if numberOfResults > int64(s.resultsCleanUpThreshold) {
if numberOfResults > int64(s.maximumNumberOfResults+resultsAboveMaximumCleanUpThreshold) {
if err = s.deleteOldEndpointResults(tx, endpointID); err != nil {
log.Printf("[sql][Insert] Failed to delete old results for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
}
Expand Down
23 changes: 13 additions & 10 deletions storage/store/sql/sql_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,15 +154,18 @@ func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
func TestStore_InsertCleansUpEventsAndResultsProperly(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpEventsAndResultsProperly.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents)
defer store.Close()
for i := 0; i < store.resultsCleanUpThreshold+store.eventsCleanUpThreshold; i++ {
resultsCleanUpThreshold := store.maximumNumberOfResults + resultsAboveMaximumCleanUpThreshold
eventsCleanUpThreshold := store.maximumNumberOfEvents + eventsAboveMaximumCleanUpThreshold
for i := 0; i < resultsCleanUpThreshold+eventsCleanUpThreshold; i++ {
store.Insert(&testEndpoint, &testSuccessfulResult)
store.Insert(&testEndpoint, &testUnsuccessfulResult)
ss, _ := store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithResults(1, storage.DefaultMaximumNumberOfResults*5).WithEvents(1, storage.DefaultMaximumNumberOfEvents*5))
if len(ss.Results) > store.resultsCleanUpThreshold+1 {
t.Errorf("number of results shouldn't have exceeded %d, reached %d", store.resultsCleanUpThreshold, len(ss.Results))

if len(ss.Results) > resultsCleanUpThreshold+1 {
t.Errorf("number of results shouldn't have exceeded %d, reached %d", resultsCleanUpThreshold, len(ss.Results))
}
if len(ss.Events) > store.eventsCleanUpThreshold+1 {
t.Errorf("number of events shouldn't have exceeded %d, reached %d", store.eventsCleanUpThreshold, len(ss.Events))
if len(ss.Events) > eventsCleanUpThreshold+1 {
t.Errorf("number of events shouldn't have exceeded %d, reached %d", eventsCleanUpThreshold, len(ss.Events))
}
}
store.Clear()
Expand Down Expand Up @@ -390,7 +393,7 @@ func TestStore_BrokenSchema(t *testing.T) {
t.Fatal("expected no error, got", err.Error())
}
// Break
_, _ = store.db.Exec("DROP TABLE endpoints")
_, _ = store.db.Exec("drop table endpoints")
// And now we'll try to insert something in our broken schema
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err == nil {
t.Fatal("expected an error")
Expand Down Expand Up @@ -419,7 +422,7 @@ func TestStore_BrokenSchema(t *testing.T) {
t.Fatal("expected no error, got", err.Error())
}
// Break
_, _ = store.db.Exec("DROP TABLE endpoint_events")
_, _ = store.db.Exec("drop table endpoint_events")
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
t.Fatal("expected no error, because this should silently fails, got", err.Error())
}
Expand All @@ -435,7 +438,7 @@ func TestStore_BrokenSchema(t *testing.T) {
t.Fatal("expected no error, got", err.Error())
}
// Break
_, _ = store.db.Exec("DROP TABLE endpoint_results")
_, _ = store.db.Exec("drop table endpoint_results")
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err == nil {
t.Fatal("expected an error")
}
Expand All @@ -451,7 +454,7 @@ func TestStore_BrokenSchema(t *testing.T) {
t.Fatal("expected no error, got", err.Error())
}
// Break
_, _ = store.db.Exec("DROP TABLE endpoint_result_conditions")
_, _ = store.db.Exec("drop table endpoint_result_conditions")
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err == nil {
t.Fatal("expected an error")
}
Expand All @@ -464,7 +467,7 @@ func TestStore_BrokenSchema(t *testing.T) {
t.Fatal("expected no error, got", err.Error())
}
// Break
_, _ = store.db.Exec("DROP TABLE endpoint_uptimes")
_, _ = store.db.Exec("drop table endpoint_uptimes")
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
t.Fatal("expected no error, because this should silently fails, got", err.Error())
}
Expand Down
9 changes: 5 additions & 4 deletions storage/store/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,10 +95,11 @@ func Initialize(cfg *storage.Config) error {
if cfg == nil {
// This only happens in tests
log.Println("[store][Initialize] nil storage config passed as parameter. This should only happen in tests. Defaulting to an empty config.")
cfg = &storage.Config{
MaximumNumberOfResults: storage.DefaultMaximumNumberOfResults,
MaximumNumberOfEvents: storage.DefaultMaximumNumberOfEvents,
}
cfg = &storage.Config{}
}
if cfg.MaximumNumberOfResults == 0 && cfg.MaximumNumberOfEvents == 0 {
cfg.MaximumNumberOfResults = storage.DefaultMaximumNumberOfResults
cfg.MaximumNumberOfEvents = storage.DefaultMaximumNumberOfEvents
}
if len(cfg.Path) == 0 && cfg.Type != storage.TypePostgres {
log.Printf("[store][Initialize] Creating storage provider of type=%s", cfg.Type)
Expand Down

0 comments on commit bacf86d

Please sign in to comment.