Skip to content

Commit

Permalink
Apply black formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
arikfr committed Dec 10, 2019
1 parent 0385b6f commit db18d20
Show file tree
Hide file tree
Showing 208 changed files with 11,231 additions and 8,141 deletions.
8 changes: 4 additions & 4 deletions migrations/versions/0f740a081d20_inline_tags.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,20 +14,20 @@


# revision identifiers, used by Alembic.
revision = '0f740a081d20'
down_revision = 'a92d92aa678e'
revision = "0f740a081d20"
down_revision = "a92d92aa678e"
branch_labels = None
depends_on = None


def upgrade():
tags_regex = re.compile('^([\w\s]+):|#([\w-]+)', re.I | re.U)
tags_regex = re.compile("^([\w\s]+):|#([\w-]+)", re.I | re.U)
connection = op.get_bind()

dashboards = connection.execute("SELECT id, name FROM dashboards")

update_query = text("UPDATE dashboards SET tags = :tags WHERE id = :id")

for dashboard in dashboards:
tags = compact(flatten(tags_regex.findall(dashboard[1])))
if tags:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,18 +10,15 @@


# revision identifiers, used by Alembic.
revision = '1daa601d3ae5'
down_revision = '969126bd800f'
revision = "1daa601d3ae5"
down_revision = "969126bd800f"
branch_labels = None
depends_on = None


def upgrade():
op.add_column(
'users',
sa.Column('disabled_at', sa.DateTime(True), nullable=True)
)
op.add_column("users", sa.Column("disabled_at", sa.DateTime(True), nullable=True))


def downgrade():
op.drop_column('users', 'disabled_at')
op.drop_column("users", "disabled_at")
24 changes: 14 additions & 10 deletions migrations/versions/5ec5c84ba61e_.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,24 +12,28 @@


# revision identifiers, used by Alembic.
revision = '5ec5c84ba61e'
down_revision = '7671dca4e604'
revision = "5ec5c84ba61e"
down_revision = "7671dca4e604"
branch_labels = None
depends_on = None


def upgrade():
conn = op.get_bind()
op.add_column('queries', sa.Column('search_vector', su.TSVectorType()))
op.create_index('ix_queries_search_vector', 'queries', ['search_vector'],
unique=False, postgresql_using='gin')
ss.sync_trigger(conn, 'queries', 'search_vector',
['name', 'description', 'query'])
op.add_column("queries", sa.Column("search_vector", su.TSVectorType()))
op.create_index(
"ix_queries_search_vector",
"queries",
["search_vector"],
unique=False,
postgresql_using="gin",
)
ss.sync_trigger(conn, "queries", "search_vector", ["name", "description", "query"])


def downgrade():
conn = op.get_bind()

ss.drop_trigger(conn, 'queries', 'search_vector')
op.drop_index('ix_queries_search_vector', table_name='queries')
op.drop_column('queries', 'search_vector')
ss.drop_trigger(conn, "queries", "search_vector")
op.drop_index("ix_queries_search_vector", table_name="queries")
op.drop_column("queries", "search_vector")
115 changes: 66 additions & 49 deletions migrations/versions/640888ce445d_.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,93 +15,110 @@


# revision identifiers, used by Alembic.
revision = '640888ce445d'
down_revision = '71477dadd6ef'
revision = "640888ce445d"
down_revision = "71477dadd6ef"
branch_labels = None
depends_on = None


def upgrade():
# Copy "schedule" column into "old_schedule" column
op.add_column('queries', sa.Column('old_schedule', sa.String(length=10), nullable=True))
op.add_column(
"queries", sa.Column("old_schedule", sa.String(length=10), nullable=True)
)

queries = table(
'queries',
sa.Column('schedule', sa.String(length=10)),
sa.Column('old_schedule', sa.String(length=10)))
"queries",
sa.Column("schedule", sa.String(length=10)),
sa.Column("old_schedule", sa.String(length=10)),
)

op.execute(
queries
.update()
.values({'old_schedule': queries.c.schedule}))
op.execute(queries.update().values({"old_schedule": queries.c.schedule}))

# Recreate "schedule" column as a dict type
op.drop_column('queries', 'schedule')
op.add_column('queries', sa.Column('schedule', MutableDict.as_mutable(PseudoJSON), nullable=False, server_default=json.dumps({})))
op.drop_column("queries", "schedule")
op.add_column(
"queries",
sa.Column(
"schedule",
MutableDict.as_mutable(PseudoJSON),
nullable=False,
server_default=json.dumps({}),
),
)

# Move over values from old_schedule
queries = table(
'queries',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('schedule', MutableDict.as_mutable(PseudoJSON)),
sa.Column('old_schedule', sa.String(length=10)))
"queries",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("schedule", MutableDict.as_mutable(PseudoJSON)),
sa.Column("old_schedule", sa.String(length=10)),
)

conn = op.get_bind()
for query in conn.execute(queries.select()):
schedule_json = {
'interval': None,
'until': None,
'day_of_week': None,
'time': None
"interval": None,
"until": None,
"day_of_week": None,
"time": None,
}

if query.old_schedule is not None:
if ":" in query.old_schedule:
schedule_json['interval'] = 86400
schedule_json['time'] = query.old_schedule
schedule_json["interval"] = 86400
schedule_json["time"] = query.old_schedule
else:
schedule_json['interval'] = query.old_schedule
schedule_json["interval"] = query.old_schedule

conn.execute(
queries
.update()
.where(queries.c.id == query.id)
.values(schedule=MutableDict(schedule_json)))
queries.update()
.where(queries.c.id == query.id)
.values(schedule=MutableDict(schedule_json))
)

op.drop_column("queries", "old_schedule")

op.drop_column('queries', 'old_schedule')

def downgrade():
op.add_column('queries', sa.Column('old_schedule', MutableDict.as_mutable(PseudoJSON), nullable=False, server_default=json.dumps({})))
op.add_column(
"queries",
sa.Column(
"old_schedule",
MutableDict.as_mutable(PseudoJSON),
nullable=False,
server_default=json.dumps({}),
),
)

queries = table(
'queries',
sa.Column('schedule', MutableDict.as_mutable(PseudoJSON)),
sa.Column('old_schedule', MutableDict.as_mutable(PseudoJSON)))
"queries",
sa.Column("schedule", MutableDict.as_mutable(PseudoJSON)),
sa.Column("old_schedule", MutableDict.as_mutable(PseudoJSON)),
)

op.execute(
queries
.update()
.values({'old_schedule': queries.c.schedule}))
op.execute(queries.update().values({"old_schedule": queries.c.schedule}))

op.drop_column('queries', 'schedule')
op.add_column('queries', sa.Column('schedule', sa.String(length=10), nullable=True))
op.drop_column("queries", "schedule")
op.add_column("queries", sa.Column("schedule", sa.String(length=10), nullable=True))

queries = table(
'queries',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('schedule', sa.String(length=10)),
sa.Column('old_schedule', MutableDict.as_mutable(PseudoJSON)))
"queries",
sa.Column("id", sa.Integer, primary_key=True),
sa.Column("schedule", sa.String(length=10)),
sa.Column("old_schedule", MutableDict.as_mutable(PseudoJSON)),
)

conn = op.get_bind()
for query in conn.execute(queries.select()):
scheduleValue = query.old_schedule['interval']
scheduleValue = query.old_schedule["interval"]
if scheduleValue <= 86400:
scheduleValue = query.old_schedule['time']
scheduleValue = query.old_schedule["time"]

conn.execute(
queries
.update()
.where(queries.c.id == query.id)
.values(schedule=scheduleValue))
queries.update()
.where(queries.c.id == query.id)
.values(schedule=scheduleValue)
)

op.drop_column('queries', 'old_schedule')
op.drop_column("queries", "old_schedule")
Original file line number Diff line number Diff line change
Expand Up @@ -12,26 +12,36 @@
# revision identifiers, used by Alembic.
from sqlalchemy.exc import ProgrammingError

revision = '65fc9ede4746'
revision = "65fc9ede4746"
down_revision = None
branch_labels = None
depends_on = None


def upgrade():
try:
op.add_column('queries', sa.Column('is_draft', sa.Boolean, default=True, index=True))
op.add_column('dashboards', sa.Column('is_draft', sa.Boolean, default=True, index=True))
op.add_column(
"queries", sa.Column("is_draft", sa.Boolean, default=True, index=True)
)
op.add_column(
"dashboards", sa.Column("is_draft", sa.Boolean, default=True, index=True)
)
op.execute("UPDATE queries SET is_draft = (name = 'New Query')")
op.execute("UPDATE dashboards SET is_draft = false")
except ProgrammingError as e:
# The columns might exist if you ran the old migrations.
if 'column "is_draft" of relation "queries" already exists' in str(e):
print("Can't run this migration as you already have is_draft columns, please run:")
print("./manage.py db stamp {} # you might need to alter the command to match your environment.".format(revision))
print(
"Can't run this migration as you already have is_draft columns, please run:"
)
print(
"./manage.py db stamp {} # you might need to alter the command to match your environment.".format(
revision
)
)
exit()


def downgrade():
op.drop_column('queries', 'is_draft')
op.drop_column('dashboards', 'is_draft')
op.drop_column("queries", "is_draft")
op.drop_column("dashboards", "is_draft")
30 changes: 17 additions & 13 deletions migrations/versions/6b5be7e0a0ef_.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@


# revision identifiers, used by Alembic.
revision = '6b5be7e0a0ef'
down_revision = '5ec5c84ba61e'
revision = "6b5be7e0a0ef"
down_revision = "5ec5c84ba61e"
branch_labels = None
depends_on = None

Expand All @@ -23,26 +23,30 @@ def upgrade():
conn = op.get_bind()

metadata = sa.MetaData(bind=conn)
queries = sa.Table('queries', metadata, autoload=True)
queries = sa.Table("queries", metadata, autoload=True)

@ss.vectorizer(queries.c.id)
def integer_vectorizer(column):
return sa.func.cast(column, sa.Text)

ss.sync_trigger(
conn,
'queries',
'search_vector',
['id', 'name', 'description', 'query'],
metadata=metadata
"queries",
"search_vector",
["id", "name", "description", "query"],
metadata=metadata,
)


def downgrade():
conn = op.get_bind()
ss.drop_trigger(conn, 'queries', 'search_vector')
op.drop_index('ix_queries_search_vector', table_name='queries')
op.create_index('ix_queries_search_vector', 'queries', ['search_vector'],
unique=False, postgresql_using='gin')
ss.sync_trigger(conn, 'queries', 'search_vector',
['name', 'description', 'query'])
ss.drop_trigger(conn, "queries", "search_vector")
op.drop_index("ix_queries_search_vector", table_name="queries")
op.create_index(
"ix_queries_search_vector",
"queries",
["search_vector"],
unique=False,
postgresql_using="gin",
)
ss.sync_trigger(conn, "queries", "search_vector", ["name", "description", "query"])
10 changes: 6 additions & 4 deletions migrations/versions/71477dadd6ef_favorites_unique_constraint.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,17 @@


# revision identifiers, used by Alembic.
revision = '71477dadd6ef'
down_revision = '0f740a081d20'
revision = "71477dadd6ef"
down_revision = "0f740a081d20"
branch_labels = None
depends_on = None


def upgrade():
op.create_unique_constraint('unique_favorite', 'favorites', ['object_type', 'object_id', 'user_id'])
op.create_unique_constraint(
"unique_favorite", "favorites", ["object_type", "object_id", "user_id"]
)


def downgrade():
op.drop_constraint('unique_favorite', 'favorites', type_='unique')
op.drop_constraint("unique_favorite", "favorites", type_="unique")
Loading

0 comments on commit db18d20

Please sign in to comment.