Skip to content

Commit

Permalink
fix(event-db): lint checks now pass
Browse files Browse the repository at this point in the history
  • Loading branch information
stevenj committed Dec 18, 2023
1 parent 4e467a2 commit 290925d
Show file tree
Hide file tree
Showing 23 changed files with 920 additions and 994 deletions.
83 changes: 10 additions & 73 deletions .sqlfluff
Original file line number Diff line number Diff line change
@@ -1,87 +1,24 @@
[sqlfluff]

# cspell: words templater cpus ctes capitalisation organisation
# cspell: words capitalisation

# Supported dialects https://docs.sqlfluff.com/en/stable/dialects.html
# Or run 'sqlfluff dialects'
# We do not set a global SQL Dialect, so that we can support multiple
# dialects in the same way. The dialect must be specified in a .sqlfluff file with the SQL files.
[sqlfluff]
dialect = postgres

# One of [raw|jinja|python|placeholder]
templater = raw

# Comma separated list of rules to exclude, or None
# See https://docs.sqlfluff.com/en/stable/configuration.html#enabling-and-disabling-rules
# AM04 (ambiguous.column_count) and ST06 (structure.column_order) are
# two of the more controversial rules included to illustrate usage.
# exclude_rules = ambiguous.column_count, structure.column_order

# The standard max_line_length is 80 in line with the convention of
# other tools and several style guides. Many projects however prefer
# something a little longer.
# Set to zero or negative to disable checks.
large_file_skip_char_limit = 0
max_line_length = 120

# CPU processes to use while linting.
# The default is "single threaded" to allow easy debugging, but this
# is often undesirable at scale.
# If positive, just implies number of processes.
# If negative or zero, implies number_of_cpus - specified_number.
# e.g. -1 means use all processors but one. 0 means all cpus.
processes = 0

# If using the dbt templater, we recommend setting the project dir.
[sqlfluff:templater:dbt]
project_dir = ./

[sqlfluff:indentation]
# While implicit indents are not enabled by default. Many of the
# SQLFluff maintainers do use them in their projects.
#allow_implicit_indents = True
tab_space_size = 2

indent_unit = space
tab_space_size = 4
indented_joins = True
indented_ctes = True
indented_using_on = True
indented_on_contents = True
indented_then = True
indented_then_contents = True
# allow_implicit_indents = False
template_blocks_indent = True

# The default configuration for aliasing rules is "consistent"
# which will auto-detect the setting from the rest of the file. This
# is less desirable in a new project and you may find this (slightly
# more strict) setting more useful.
[sqlfluff:rules:aliasing.table]
aliasing = explicit
[sqlfluff:rules:aliasing.column]
aliasing = explicit
[sqlfluff:rules:aliasing.length]
min_alias_length = 3
[sqlfluff:rules:layout.long_lines]
ignore_comment_lines = True
ignore_comment_clauses = True

# The default configuration for capitalisation rules is "consistent"
# which will auto-detect the setting from the rest of the file. This
# is less desirable in a new project and you may find this (slightly
# more strict) setting more useful.
# Typically we find users rely on syntax highlighting rather than
# capitalisation to distinguish between keywords and identifiers.
# Clearly, if your organisation has already settled on uppercase
# formatting for any of these syntax elements then set them to "upper".
# See https://stackoverflow.com/questions/608196/why-should-i-capitalize-my-sql-keywords-is-there-a-good-reason
[sqlfluff:rules:capitalisation.keywords]
capitalisation_policy = upper
[sqlfluff:rules:capitalisation.identifiers]
capitalisation_policy = lower
extended_capitalisation_policy = lower
[sqlfluff:rules:capitalisation.functions]
extended_capitalisation_policy = lower
extended_capitalisation_policy = upper
[sqlfluff:rules:capitalisation.literals]
capitalisation_policy = lower
[sqlfluff:rules:capitalisation.types]
extended_capitalisation_policy = upper
[sqlfluff:rules:layout.long_lines]
ignore_comment_clauses = false
ignore_comment_lines = false
[sqlfluff:rules:capitalisation.types]
extended_capitalisation_policy = upper
2 changes: 1 addition & 1 deletion Earthfile
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ repo-docs:

SAVE ARTIFACT /repo repo

repo-config:
repo-config-2:
# Create artifacts of config file we need to refer to in builders.
FROM scratch

Expand Down
15 changes: 2 additions & 13 deletions catalyst-gateway/event-db/Earthfile
Original file line number Diff line number Diff line change
Expand Up @@ -12,32 +12,21 @@ builder:
DO github.com/input-output-hk/catalyst-ci/earthly/postgresql:fix/postgres-builders+BUILDER \
--sqlfluff_cfg=./../../+repo-config/repo/.sqlfluff

COPY ./../../+repo-config-2/repo/.sqlfluff .

# check if the sqlfiles are properly formatted and pass lint quality checks.
# CI target : true
check:
FROM +builder

DO github.com/input-output-hk/catalyst-ci/earthly/postgresql:fix/postgres-builders+CHECK

# Internal: build-sqlfluff is a target to build the necessary `sqlfluff` tool container.
# CI target : false
# This allows us to run sqlfluff commands locally without needing to install it locally.
# It does not need to be run directly, it is used by the `format` target.
build-sqlfluff:
BUILD github.com/input-output-hk/catalyst-ci/earthly/postgresql:fix/postgres-builders+sqlfluff-image

# format all SQL files in the current project. Local developers tool.
# CI target : false
format:
LOCALLY

# This is a trick. Earthly can NOT build and run a container in the same invocation.
# Because this target is running LOCALLY, and we know earthly must be installed, we can
# call earthly locally to build and publish the local target we need to run the sqlfluff linter.
# By recursively running earthly, we sidestep the limitation which prevents us from building
# a container and running it in the same invocation.
RUN earthly +build-sqlfluff

DO github.com/input-output-hk/catalyst-ci/earthly/postgresql:fix/postgres-builders+FORMAT --src=$(echo ${PWD}/../../)

# build an event db docker image.
Expand Down
50 changes: 25 additions & 25 deletions catalyst-gateway/event-db/migrations/V1__config_tables.sql
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@

-- Version of the schema (Used by Refinery to manage migrations.).
CREATE TABLE IF NOT EXISTS refinery_schema_history (
version INTEGER NOT NULL PRIMARY KEY,
name VARCHAR(255),
applied_on VARCHAR(255),
checksum VARCHAR(255)
version INTEGER NOT NULL PRIMARY KEY,
name VARCHAR(255),
applied_on VARCHAR(255),
checksum VARCHAR(255)
);

COMMENT ON TABLE refinery_schema_history IS
Expand All @@ -36,15 +36,15 @@ Managed by the `refinery` cli tool.';
-- * Dashes, symbols or upper case should not be used.
-- Catalyst Event Database
CREATE TABLE json_schema_type (
id UUID PRIMARY KEY,
type TEXT NOT NULL,
name TEXT NOT NULL,
schema JSONB NOT NULL
id UUID PRIMARY KEY,
type TEXT NOT NULL,
name TEXT NOT NULL,
schema JSONB NOT NULL
);

CREATE INDEX json_schema_type_idx ON json_schema_type ("type");
CREATE UNIQUE INDEX json_schema_type_name_idx ON json_schema_type (
"type", "name"
"type", "name"
);

COMMENT ON TABLE json_schema_type IS
Expand All @@ -71,14 +71,14 @@ Must match the `name` component of the $id URI inside the schema.';
-- Config Table
-- This table is looked up with three keys, `id`, `id2` and `id3`
CREATE TABLE config (
row_id SERIAL PRIMARY KEY,
id VARCHAR NOT NULL,
id2 VARCHAR NOT NULL,
id3 VARCHAR NOT NULL,
value JSONB NULL,
value_schema UUID,

FOREIGN KEY (value_schema) REFERENCES json_schema_type (id) ON DELETE CASCADE
row_id SERIAL PRIMARY KEY,
id VARCHAR NOT NULL,
id2 VARCHAR NOT NULL,
id3 VARCHAR NOT NULL,
value JSONB NULL,
value_schema UUID,

FOREIGN KEY (value_schema) REFERENCES json_schema_type (id) ON DELETE CASCADE
);

-- id+id2+id3 must be unique, they are a combined key.
Expand Down Expand Up @@ -123,12 +123,12 @@ at the app level to allow for querying groups of data.';
INSERT INTO json_schema_type (id, type, name, schema)
VALUES
(
'd899cd44-3513-487b-ab46-fdca662a724d', -- Fix the Schema ID so that it is consistent.
'config',
'dbsync',
(SELECT jsonb FROM pg_read_file('../json_schemas/config/dbsync.json')),
'62d614c0-97a7-41ec-a976-91294b8f4384', -- Fix the Schema ID so that it is consistent.
'config',
'registration',
(SELECT jsonb FROM pg_read_file('../json_schemas/config/registration.json'))
'd899cd44-3513-487b-ab46-fdca662a724d', -- Fix the Schema ID so that it is consistent.
'config',
'dbsync',
(SELECT jsonb FROM PG_READ_FILE('../json_schemas/config/dbsync.json')),
'62d614c0-97a7-41ec-a976-91294b8f4384', -- Fix the Schema ID so that it is consistent.
'config',
'registration',
(SELECT jsonb FROM PG_READ_FILE('../json_schemas/config/registration.json'))
);
76 changes: 38 additions & 38 deletions catalyst-gateway/event-db/migrations/V2__event_tables.sql
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@
-- -------------------------------------------------------------------------------------------------

CREATE TABLE event_type (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name TEXT NOT NULL,
description_schema UUID NOT NULL,
data_schema UUID NOT NULL,

FOREIGN KEY (description_schema) REFERENCES json_schema_type (
id
) ON DELETE CASCADE,
FOREIGN KEY (data_schema) REFERENCES json_schema_type (id) ON DELETE CASCADE
id UUID PRIMARY KEY DEFAULT GEN_RANDOM_UUID(),
name TEXT NOT NULL,
description_schema UUID NOT NULL,
data_schema UUID NOT NULL,

FOREIGN KEY (description_schema) REFERENCES json_schema_type (
id
) ON DELETE CASCADE,
FOREIGN KEY (data_schema) REFERENCES json_schema_type (id) ON DELETE CASCADE
);

CREATE UNIQUE INDEX event_type_name_idx ON event_type (name);
Expand Down Expand Up @@ -59,50 +59,50 @@ VALUES
INSERT INTO json_schema_type (id, type, name, schema)
VALUES
(
'd899cd44-3513-487b-ab46-fdca662a724d', -- From the schema file.
'event_description',
'multiline_text',
(
SELECT jsonb
FROM pg_read_file('../json_schemas/event/description/multiline_text.json')
)
'd899cd44-3513-487b-ab46-fdca662a724d', -- From the schema file.
'event_description',
'multiline_text',
(
SELECT jsonb
FROM PG_READ_FILE('../json_schemas/event/description/multiline_text.json')
)
),
(
'9c5df318-fa9a-4310-80fa-490f46d1cc43', -- From the schema file.
'event_data',
'catalyst_v1',
(
SELECT jsonb
FROM pg_read_file('../json_schemas/event/description/catalyst_v1.json')
)
'9c5df318-fa9a-4310-80fa-490f46d1cc43', -- From the schema file.
'event_data',
'catalyst_v1',
(
SELECT jsonb
FROM PG_READ_FILE('../json_schemas/event/description/catalyst_v1.json')
)
);

-- Define a Catalyst V1 Event.

INSERT INTO event_type (name, description_schema, data_schema)
VALUES
(
'Catalyst V1',
'd899cd44-3513-487b-ab46-fdca662a724d',
'9c5df318-fa9a-4310-80fa-490f46d1cc43'
'Catalyst V1',
'd899cd44-3513-487b-ab46-fdca662a724d',
'9c5df318-fa9a-4310-80fa-490f46d1cc43'
);

-- -------------------------------------------------------------------------------------------------

-- Event Table - Defines each voting or decision event
CREATE TABLE "event" (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
-- The Organizer/Administrator of this event.
-- Update once RBAC is defined, as Organizer is an RBAC Role.
organizer TEXT NOT NULL,
type UUID REFERENCES event_type (id),
name TEXT NOT NULL,
description JSONB NOT NULL,
start_time TIMESTAMP,
backing_start TIMESTAMP,
backing_end TIMESTAMP,
end_time TIMESTAMP,
data JSONB NOT NULL
id UUID PRIMARY KEY DEFAULT GEN_RANDOM_UUID(),
-- The Organizer/Administrator of this event.
-- Update once RBAC is defined, as Organizer is an RBAC Role.
organizer TEXT NOT NULL,
type UUID REFERENCES event_type (id),
name TEXT NOT NULL,
description JSONB NOT NULL,
start_time TIMESTAMP,
backing_start TIMESTAMP,
backing_end TIMESTAMP,
end_time TIMESTAMP,
data JSONB NOT NULL
);

CREATE UNIQUE INDEX event_name_idx ON event (name);
Expand Down
Loading

0 comments on commit 290925d

Please sign in to comment.