Skip to content

Commit

Permalink
Provide the "Initial tables" alembic migration
Browse files Browse the repository at this point in the history
Since we already have been using the database for quite some time, we
create an initial migration which will take an empty database to the
database base schema version as of commit 6a764f1. That commit was the
most recent version deployed in Red Hat's staging environment.

In order to make it convenient to work with migrations, we also create a
`tox` environment, `alembic`, which simply invokes the new script
`alembic.run` which one can modify to execute whatever Alembic commands
are needed.  The committed copy takes no actions, but has the initial
commands used to create the first migration:

    alembic stamp head
    alembic revision --autogenerate -m "Initial tables"

We also update the `alembic.ini` file to assume a `localhost` PostgreSQL
database, and provide a Pbench Server configuration file that works in
the Alembic environment.
  • Loading branch information
portante committed Jan 16, 2023
1 parent aacf782 commit 3684812
Show file tree
Hide file tree
Showing 5 changed files with 243 additions and 1 deletion.
2 changes: 1 addition & 1 deletion lib/pbench/server/database/alembic.ini
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ script_location = alembic
# are written from script.py.mako
# output_encoding = utf-8

sqlalchemy.url = driver://user:pass@localhost/dbname
sqlalchemy.url = postgresql://pbench:pbench@localhost:5432/pbench


[post_write_hooks]
Expand Down
13 changes: 13 additions & 0 deletions lib/pbench/server/database/alembic.run
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/bin/bash -e

# Alembic command expects to run in our database directory.
cd lib/pbench/server/database

# Our env requires a pbench-server config file.
export _PBENCH_SERVER_CONFIG=$(pwd)/pbench-server.cfg

# Had to run the command below before I could create the initial tables.
#alembic stamp head

# Then I could create the initial migration against an empty database.
#alembic revision --autogenerate -m "Initial tables"
Original file line number Diff line number Diff line change
@@ -0,0 +1,169 @@
"""Initial tables
Revision ID: 274bab07e3f8
Revises: 62eddcec4817
Create Date: 2023-01-16 05:54:33.496244
Since we are adding Alembic migrations after we have already been using our
database in various contexts, this "Initial tables" migration describes how to
bring an empty database up to the state of the database as of commit 6a764f154.
That commit was the latest working version of the Pbench Server deployed in Red
Hat's staging environment.
"""
from alembic import op
import sqlalchemy as sa

from pbench.server.database.models import TZDateTime

# revision identifiers, used by Alembic.
revision = "274bab07e3f8"
down_revision = "62eddcec4817"
branch_labels = None
depends_on = None


def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"audit",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("root_id", sa.Integer(), nullable=True),
sa.Column("name", sa.String(length=128), nullable=True),
sa.Column(
"operation",
sa.Enum("CREATE", "READ", "UPDATE", "DELETE", name="operationcode"),
nullable=False,
),
sa.Column(
"object_type",
sa.Enum("DATASET", "CONFIG", "NONE", "TEMPLATE", "TOKEN", name="audittype"),
nullable=True,
),
sa.Column("object_id", sa.String(length=128), nullable=True),
sa.Column("object_name", sa.String(length=256), nullable=True),
sa.Column("user_id", sa.String(length=128), nullable=True),
sa.Column("user_name", sa.String(length=256), nullable=True),
sa.Column(
"status",
sa.Enum("BEGIN", "SUCCESS", "FAILURE", "WARNING", name="auditstatus"),
nullable=False,
),
sa.Column(
"reason",
sa.Enum("PERMISSION", "INTERNAL", "CONSISTENCY", name="auditreason"),
nullable=True,
),
sa.Column("attributes", sa.JSON(), nullable=True),
sa.Column("timestamp", TZDateTime(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"datasets",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("owner_id", sa.String(length=255), nullable=False),
sa.Column("access", sa.String(length=255), nullable=False),
sa.Column("resource_id", sa.String(length=255), nullable=False),
sa.Column("uploaded", TZDateTime(), nullable=False),
sa.Column("created", TZDateTime(), nullable=True),
sa.Column(
"state",
sa.Enum(
"UPLOADING",
"UPLOADED",
"INDEXING",
"INDEXED",
"DELETING",
"DELETED",
name="states",
),
nullable=False,
),
sa.Column("transition", TZDateTime(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("resource_id"),
)
op.create_table(
"serverconfig",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("key", sa.String(length=255), nullable=False),
sa.Column("value", sa.JSON(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_serverconfig_key"), "serverconfig", ["key"], unique=True)
op.create_table(
"templates",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("idxname", sa.String(length=255), nullable=False),
sa.Column("template_name", sa.String(length=255), nullable=False),
sa.Column("file", sa.String(length=255), nullable=False),
sa.Column("mtime", sa.DateTime(), nullable=False),
sa.Column("template_pattern", sa.String(length=255), nullable=False),
sa.Column("index_template", sa.String(length=225), nullable=False),
sa.Column("settings", sa.JSON(), nullable=False),
sa.Column("mappings", sa.JSON(), nullable=False),
sa.Column("version", sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("idxname"),
sa.UniqueConstraint("name"),
sa.UniqueConstraint("template_name"),
)
op.create_table(
"users",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("username", sa.String(length=255), nullable=False),
sa.Column("first_name", sa.String(length=255), nullable=False),
sa.Column("last_name", sa.String(length=255), nullable=False),
sa.Column("password", sa.LargeBinary(length=128), nullable=False),
sa.Column("registered_on", sa.DateTime(), nullable=False),
sa.Column("email", sa.String(length=255), nullable=False),
sa.Column("role", sa.Enum("ADMIN", name="roles"), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("email"),
sa.UniqueConstraint("username"),
)
op.create_table(
"active_tokens",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("token", sa.String(length=500), nullable=False),
sa.Column("created", sa.DateTime(), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(["user_id"], ["users.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_active_tokens_token"), "active_tokens", ["token"], unique=True
)
op.create_table(
"dataset_metadata",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("key", sa.String(length=255), nullable=False),
sa.Column("value", sa.JSON(), nullable=True),
sa.Column("dataset_ref", sa.Integer(), nullable=False),
sa.Column("user_id", sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(
["dataset_ref"],
["datasets.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_dataset_metadata_key"), "dataset_metadata", ["key"], unique=False
)
# ### end Alembic commands ###


def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_dataset_metadata_key"), table_name="dataset_metadata")
op.drop_table("dataset_metadata")
op.drop_index(op.f("ix_active_tokens_token"), table_name="active_tokens")
op.drop_table("active_tokens")
op.drop_table("users")
op.drop_table("templates")
op.drop_index(op.f("ix_serverconfig_key"), table_name="serverconfig")
op.drop_table("serverconfig")
op.drop_table("datasets")
op.drop_table("audit")
# ### end Alembic commands ###
53 changes: 53 additions & 0 deletions lib/pbench/server/database/pbench-server.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
[DEFAULT]
# The values here override those in pbench-server-default.cfg.
install-dir = ../../../../server

###########################################################################
## Deployment section
###########################################################################
[pbench-server]
pbench-top-dir = /var/tmp/pbench
pbench-backup-dir = %(pbench-top-dir)s/pbench.archive.backup
environment = alembic
realhost = localhost
max-unpacked-age = 36500
maximum-dataset-retention-days = 36500
default-dataset-retention-days = 730
# Override the roles this pbench server takes on -- omit pbench-prep.
roles = pbench-maintenance, pbench-results, pbench-backup

[Indexing]
index_prefix = alembic-pbench
bulk_action_count = 2000

[elasticsearch]
host = localhost
port = 9200

[database]
uri = postgresql://pbench:pbench@localhost:5432/pbench

# User authentication section to use when authorizing the user with an OIDC
# identity provider.
[authentication]

# URL of the OIDC auth server
server_url = http://localhost:8090

# Realm name that is used for the authentication with OIDC
realm = pbench

# Client entity name requesting OIDC to authenticate a user
client = pbench-client

# Client secret if the above client is not public
secret = <keycloak_secret>

###########################################################################
# crontab roles

###########################################################################
# The rest will come from the default config file.
[config]
path = %(install-dir)s/lib/config
files = pbench-server-default.cfg
7 changes: 7 additions & 0 deletions tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -40,3 +40,10 @@ basepython = python3.6
deps =
-r{toxinidir}/agent/requirements.txt
-r{toxinidir}/agent/test-requirements.txt

[testenv:alembic]
description = Runs the alembic commands in `alembic.run`
deps =
-r{toxinidir}/server/requirements.txt
commands =
{toxinidir}/lib/pbench/server/database/alembic.run

0 comments on commit 3684812

Please sign in to comment.