Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[graphql] Replace Int with BigInt for overflow #26757

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 6 additions & 4 deletions js_modules/dagster-ui/packages/ui-core/src/graphql/types.ts

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@


class GrapheneAssetCheckEvaluationTargetMaterializationData(graphene.ObjectType):
storageId = graphene.NonNull(graphene.Int)
storageId = graphene.NonNull(graphene.BigInt)
runId = graphene.NonNull(graphene.String)
timestamp = graphene.NonNull(graphene.Float)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,8 @@ class GraphenePartitionedAssetConditionEvaluationNode(graphene.ObjectType):
startTimestamp = graphene.Field(graphene.Float)
endTimestamp = graphene.Field(graphene.Float)

numTrue = graphene.NonNull(graphene.Int)
numCandidates = graphene.Field(graphene.Int)
numTrue = graphene.NonNull(graphene.Int) # Can't have BigInt number of conditions
numCandidates = graphene.Field(graphene.Int) # Same

childUniqueIds = non_null_list(graphene.String)

Expand Down Expand Up @@ -216,8 +216,8 @@ class GrapheneAutomationConditionEvaluationNode(graphene.ObjectType):
startTimestamp = graphene.Field(graphene.Float)
endTimestamp = graphene.Field(graphene.Float)

numTrue = graphene.NonNull(graphene.Int)
numCandidates = graphene.Field(graphene.Int)
numTrue = graphene.NonNull(graphene.Int) # Same
numCandidates = graphene.Field(graphene.Int) # Same

isPartitioned = graphene.NonNull(graphene.Boolean)

Expand Down Expand Up @@ -252,7 +252,7 @@ class GrapheneAssetConditionEvaluationRecord(graphene.ObjectType):
timestamp = graphene.NonNull(graphene.Float)

assetKey = graphene.NonNull(GrapheneAssetKey)
numRequested = graphene.NonNull(graphene.Int)
numRequested = graphene.NonNull(graphene.Int) # Same

startTimestamp = graphene.Field(graphene.Float)
endTimestamp = graphene.Field(graphene.Float)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -283,8 +283,8 @@ class GrapheneAssetNode(graphene.ObjectType):
partitionKeys = non_null_list(graphene.String)
partitionKeysByDimension = graphene.Field(
non_null_list(GrapheneDimensionPartitionKeys),
startIdx=graphene.Int(),
endIdx=graphene.Int(),
startIdx=graphene.Int(), # Used to select a range of time partitions; shouldn't go into billions
endIdx=graphene.Int(), # Same
)
repository = graphene.NonNull(lambda: external.GrapheneRepository)
required_resources = non_null_list(GrapheneResourceRequirement)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -211,9 +211,9 @@ def create_graphene_auto_materialize_rules_with_rule_evaluations(
class GrapheneAutoMaterializeAssetEvaluationRecord(graphene.ObjectType):
id = graphene.NonNull(graphene.ID)
evaluationId = graphene.NonNull(graphene.ID)
numRequested = graphene.NonNull(graphene.Int)
numSkipped = graphene.NonNull(graphene.Int)
numDiscarded = graphene.NonNull(graphene.Int)
numRequested = graphene.NonNull(graphene.Int) # Makes no sense to have a BigInt quantity
numSkipped = graphene.NonNull(graphene.Int) # Same
numDiscarded = graphene.NonNull(graphene.Int) # Same
rulesWithRuleEvaluations = non_null_list(GrapheneAutoMaterializeRuleWithRuleEvaluations)
timestamp = graphene.NonNull(graphene.Float)
runIds = non_null_list(graphene.String)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,9 @@ def __init__(self, description: str, decision_type: AutoMaterializeDecisionType)

class GrapheneAutoMaterializePolicy(graphene.ObjectType):
policyType = graphene.NonNull(graphene.Enum.from_enum(AutoMaterializePolicyType))
maxMaterializationsPerMinute = graphene.Int()
maxMaterializationsPerMinute = (
graphene.Int()
) # I assume you have other problems if you need a BigInt limit here
rules = non_null_list(GrapheneAutoMaterializeRule)

class Meta:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -320,8 +320,8 @@ class Meta:
)
partitionNames = graphene.List(graphene.NonNull(graphene.String))
isValidSerialization = graphene.NonNull(graphene.Boolean)
numPartitions = graphene.Field(graphene.Int)
numCancelable = graphene.NonNull(graphene.Int)
numPartitions = graphene.Field(graphene.Int) # BigInt partitions would mean a problem
numCancelable = graphene.NonNull(graphene.Int) # Same
fromFailure = graphene.NonNull(graphene.Boolean)
reexecutionSteps = graphene.List(graphene.NonNull(graphene.String))
assetSelection = graphene.List(graphene.NonNull(GrapheneAssetKey))
Expand All @@ -345,14 +345,15 @@ class Meta:
partitionSet = graphene.Field("dagster_graphql.schema.partition_sets.GraphenePartitionSet")
runs = graphene.Field(
non_null_list("dagster_graphql.schema.pipelines.pipeline.GrapheneRun"),
limit=graphene.Int(),
limit=graphene.Int(), # Don't see this needing BigInt
)
unfinishedRuns = graphene.Field(
non_null_list("dagster_graphql.schema.pipelines.pipeline.GrapheneRun"),
limit=graphene.Int(),
limit=graphene.Int(), # Same
)
cancelableRuns = graphene.Field(
non_null_list("dagster_graphql.schema.pipelines.pipeline.GrapheneRun"), limit=graphene.Int()
non_null_list("dagster_graphql.schema.pipelines.pipeline.GrapheneRun"),
limit=graphene.Int(), # Same
)
error = graphene.Field(GraphenePythonError)
partitionStatuses = graphene.Field(
Expand Down Expand Up @@ -746,7 +747,7 @@ class Meta:


class GrapheneBackfillPolicy(graphene.ObjectType):
maxPartitionsPerRun = graphene.Field(graphene.Int())
maxPartitionsPerRun = graphene.Field(graphene.Int()) # BigInt is too many partitions
description = graphene.NonNull(graphene.String)
policyType = graphene.NonNull(GrapheneBackfillPolicyType)

Expand Down
24 changes: 16 additions & 8 deletions python_modules/dagster-graphql/dagster_graphql/schema/instance.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,9 @@ class GraphenePendingConcurrencyStep(graphene.ObjectType):
stepKey = graphene.NonNull(graphene.String)
enqueuedTimestamp = graphene.NonNull(graphene.Float)
assignedTimestamp = graphene.Float()
priority = graphene.Int()
priority = (
graphene.BigInt()
) # Maybe somebody will spam hold down the 9 key on their keyboard ¯\_(ツ)_/¯
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we actually enforce this in Python, see https://github.com/dagster-io/dagster/pull/25172/files


class Meta:
name = "PendingConcurrencyStep"
Expand All @@ -133,14 +135,18 @@ def __init__(self, pending_step_info: PendingStepInfo):

class GrapheneConcurrencyKeyInfo(graphene.ObjectType):
concurrencyKey = graphene.NonNull(graphene.String)
slotCount = graphene.NonNull(graphene.Int)
slotCount = graphene.NonNull(
graphene.Int
) # I'm guessing lots of other things will go wrong if you have such massive slot counts that you need BigInt
claimedSlots = non_null_list(GrapheneClaimedConcurrencySlot)
pendingSteps = non_null_list(GraphenePendingConcurrencyStep)
activeSlotCount = graphene.NonNull(graphene.Int)
activeSlotCount = graphene.NonNull(graphene.Int) # Same
activeRunIds = non_null_list(graphene.String)
pendingStepCount = graphene.NonNull(graphene.Int)
pendingStepCount = graphene.NonNull(
graphene.Int
) # Again, if you have so many pending steps that you need BigInt, you have bigger problems
pendingStepRunIds = non_null_list(graphene.String)
assignedStepCount = graphene.NonNull(graphene.Int)
assignedStepCount = graphene.NonNull(graphene.Int) # Same
assignedStepRunIds = non_null_list(graphene.String)

class Meta:
Expand Down Expand Up @@ -195,7 +201,9 @@ def resolve_assignedStepRunIds(self, graphene_info: ResolveInfo):


class GrapheneRunQueueConfig(graphene.ObjectType):
maxConcurrentRuns = graphene.NonNull(graphene.Int)
maxConcurrentRuns = graphene.NonNull(
graphene.Int
) # I'm guessing you won't have so many concurrent runs that you need BigInt (without causing other issues)
tagConcurrencyLimitsYaml = graphene.String()
isOpConcurrencyAware = graphene.Boolean()

Expand Down Expand Up @@ -235,8 +243,8 @@ class GrapheneInstance(graphene.ObjectType):
hasInfo = graphene.NonNull(graphene.Boolean)
autoMaterializePaused = graphene.NonNull(graphene.Boolean)
supportsConcurrencyLimits = graphene.NonNull(graphene.Boolean)
minConcurrencyLimitValue = graphene.NonNull(graphene.Int)
maxConcurrencyLimitValue = graphene.NonNull(graphene.Int)
minConcurrencyLimitValue = graphene.NonNull(graphene.Int) # Same as above
maxConcurrencyLimitValue = graphene.NonNull(graphene.Int) # Same
concurrencyLimits = non_null_list(GrapheneConcurrencyKeyInfo)
concurrencyLimit = graphene.Field(
graphene.NonNull(GrapheneConcurrencyKeyInfo),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,9 @@ class GrapheneInstigationTick(graphene.ObjectType):
dynamicPartitionsRequestResults = non_null_list(GrapheneDynamicPartitionsRequestResult)
endTimestamp = graphene.Field(graphene.Float)
requestedAssetKeys = non_null_list(GrapheneAssetKey)
requestedAssetMaterializationCount = graphene.NonNull(graphene.Int)
requestedAssetMaterializationCount = graphene.NonNull(
graphene.Int
) # Can't see why this could go into BigInt range, unless it's cumulative?
requestedMaterializationsForAssets = non_null_list(GrapheneRequestedMaterializationsForAsset)
autoMaterializeAssetEvaluationId = graphene.Field(graphene.ID)
instigationType = graphene.NonNull(GrapheneInstigationType)
Expand Down Expand Up @@ -538,23 +540,25 @@ class GrapheneInstigationState(graphene.ObjectType):
non_null_list("dagster_graphql.schema.pipelines.pipeline.GrapheneRun"),
limit=graphene.Int(),
)
runsCount = graphene.NonNull(graphene.Int)
runsCount = graphene.NonNull(graphene.Int) # Shouldn't go into billions on a single account
tick = graphene.Field(
graphene.NonNull(GrapheneInstigationTick),
tickId=graphene.NonNull(graphene.ID),
)
ticks = graphene.Field(
non_null_list(GrapheneInstigationTick),
dayRange=graphene.Int(),
dayOffset=graphene.Int(),
dayRange=graphene.Int(), # Days can't go into BigInt range
dayOffset=graphene.Int(), # Same
limit=graphene.Int(),
cursor=graphene.String(),
statuses=graphene.List(graphene.NonNull(GrapheneInstigationTickStatus)),
beforeTimestamp=graphene.Float(),
afterTimestamp=graphene.Float(),
)
nextTick = graphene.Field(GrapheneDryRunInstigationTick)
runningCount = graphene.NonNull(graphene.Int) # remove with cron scheduler
runningCount = graphene.NonNull(
graphene.Int
) # remove with cron scheduler # Shouldn't go into billions on a single account

hasStartPermission = graphene.NonNull(graphene.Boolean)
hasStopPermission = graphene.NonNull(graphene.Boolean)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,9 @@ class Meta:


class GrapheneExecutionStepUpForRetryEvent(graphene.ObjectType):
secondsToWait = graphene.Field(graphene.Int)
secondsToWait = graphene.Field(
graphene.Int
) # Guess nobody would be waiting for more than 2.1 billion seconds/68 years

class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent, GrapheneErrorEvent)
Expand Down Expand Up @@ -309,7 +311,7 @@ class Meta:
externalUrl = graphene.String()
externalStdoutUrl = graphene.String()
externalStderrUrl = graphene.String()
pid = graphene.Int()
pid = graphene.Int() # pids don't go into the billions
# legacy name for compute log file key... required for back-compat reasons, but has been
# renamed to fileKey for newer versions of the Dagster UI
logKey = graphene.NonNull(graphene.String)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import graphene


class GrapheneCursor(graphene.Int, graphene.Scalar):
class GrapheneCursor(graphene.BigInt, graphene.Scalar):
class Meta:
name = "Cursor"
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ class Meta:

class GraphenePartitionStatusCounts(graphene.ObjectType):
runStatus = graphene.NonNull(GrapheneRunStatus)
count = graphene.NonNull(graphene.Int)
count = graphene.NonNull(graphene.Int) # Can't have a BigInt number of partitions

class Meta:
name = "PartitionStatusCounts"
Expand All @@ -148,10 +148,10 @@ class Meta:
name = "AssetPartitionsStatusCounts"

assetKey = graphene.NonNull(GrapheneAssetKey)
numPartitionsTargeted = graphene.NonNull(graphene.Int)
numPartitionsInProgress = graphene.NonNull(graphene.Int)
numPartitionsMaterialized = graphene.NonNull(graphene.Int)
numPartitionsFailed = graphene.NonNull(graphene.Int)
numPartitionsTargeted = graphene.NonNull(graphene.Int) # Same
numPartitionsInProgress = graphene.NonNull(graphene.Int) # Same
numPartitionsMaterialized = graphene.NonNull(graphene.Int) # Same
numPartitionsFailed = graphene.NonNull(graphene.Int) # Same


class GrapheneUnpartitionedAssetStatus(graphene.ObjectType):
Expand Down Expand Up @@ -237,7 +237,7 @@ class GraphenePartition(graphene.ObjectType):
non_null_list(GrapheneRun),
filter=graphene.Argument(GrapheneRunsFilter),
cursor=graphene.String(),
limit=graphene.Int(),
limit=graphene.Int(), # I guess BigInt number of runs would be unreasonable? Not sure on this one
)
status = graphene.Field(GrapheneRunStatus)

Expand Down Expand Up @@ -332,7 +332,7 @@ class GraphenePartitionSet(graphene.ObjectType):
partitionsOrError = graphene.Field(
graphene.NonNull(GraphenePartitionsOrError),
cursor=graphene.String(),
limit=graphene.Int(),
limit=graphene.Int(), # Can't have BigInt number of partitions
reverse=graphene.Boolean(),
)
partition = graphene.Field(GraphenePartition, partition_name=graphene.NonNull(graphene.String))
Expand All @@ -342,7 +342,7 @@ class GraphenePartitionSet(graphene.ObjectType):
backfills = graphene.Field(
non_null_list(GraphenePartitionBackfill),
cursor=graphene.String(),
limit=graphene.Int(),
limit=graphene.Int(), # Can't have BigInt number of backfills
)

class Meta:
Expand Down
Loading
Loading