Skip to content

Commit be9fc3c

Browse files
committed
Enable mypy lintrunner, Part 2 (codegen/*, docs/*)
Summary: Enabling mypy for codegen/* and docs/* Test Plan: Lintrunner CI Internal CI
1 parent c86b39d commit be9fc3c

8 files changed

+66
-72
lines changed

.lintrunner.toml

+2-2
Original file line numberDiff line numberDiff line change
@@ -292,9 +292,9 @@ include_patterns = [
292292
# TODO(https://github.com/pytorch/executorch/issues/7441): Gradually start enabling all folders.
293293
# 'backends/**/*.py',
294294
'build/**/*.py',
295-
# 'codegen/**/*.py',
295+
'codegen/**/*.py',
296296
# 'devtools/**/*.py',
297-
# 'docs/**/*.py',
297+
'docs/**/*.py',
298298
# 'examples/**/*.py',
299299
# 'exir/**/*.py',
300300
# 'extension/**/*.py',

.mypy.ini

+21-9
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,6 @@ local_partial_types = True
1010
enable_error_code = possibly-undefined
1111
warn_unused_ignores = False
1212

13-
# TODO(https://github.com/pytorch/executorch/issues/7441): Remove this
14-
# disable_error_code = import-untyped
15-
1613
files =
1714
backends,
1815
codegen,
@@ -31,35 +28,50 @@ mypy_path = executorch
3128
[mypy-executorch.codegen.*]
3229
follow_untyped_imports = True
3330

34-
[mypy-executorch.extension.*]
31+
[mypy-executorch.devtools.*]
3532
follow_untyped_imports = True
3633

3734
[mypy-executorch.exir.*]
3835
follow_untyped_imports = True
3936

37+
[mypy-executorch.extension.*]
38+
follow_untyped_imports = True
39+
4040
[mypy-executorch.kernels.*]
4141
follow_untyped_imports = True
4242

4343
[mypy-executorch.runtime.*]
4444
follow_untyped_imports = True
4545

46+
[mypy-requests.*]
47+
follow_untyped_imports = True
48+
4649
[mypy-torchgen.*]
4750
follow_untyped_imports = True
4851

49-
[mypy-setuptools.*]
52+
[mypy-buck_util]
5053
ignore_missing_imports = True
5154

52-
[mypy-buck_util]
55+
[mypy-docutils.*]
5356
ignore_missing_imports = True
5457

55-
[mypy-tomllib]
58+
[mypy-pandas]
5659
ignore_missing_imports = True
5760

58-
[mypy-zstd]
61+
[mypy-ruamel]
62+
ignore_missing_imports = True
63+
64+
[mypy-setuptools.*]
65+
ignore_missing_imports = True
66+
67+
[mypy-sphinx.*]
68+
ignore_missing_imports = True
69+
70+
[mypy-tomllib]
5971
ignore_missing_imports = True
6072

6173
[mypy-yaml]
6274
ignore_missing_imports = True
6375

64-
[mypy-ruamel]
76+
[mypy-zstd]
6577
ignore_missing_imports = True

docs/source/conf.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,9 @@
2222
import glob
2323
import os
2424
import sys
25+
from typing import Any
2526

26-
import pytorch_sphinx_theme
27+
import pytorch_sphinx_theme # type: ignore[import-not-found]
2728

2829
# To let us import ./custom_directives.py
2930
sys.path.insert(0, os.path.abspath("."))
@@ -103,7 +104,7 @@
103104

104105
myst_heading_anchors = 4
105106

106-
sphinx_gallery_conf = {
107+
sphinx_gallery_conf: dict[str, Any] = {
107108
"examples_dirs": ["tutorials_source"],
108109
"ignore_pattern": "template_tutorial.py",
109110
"gallery_dirs": ["tutorials"],
@@ -197,7 +198,7 @@
197198
SupportedDevices,
198199
SupportedProperties,
199200
)
200-
from docutils.parsers import rst
201+
from docutils.parsers import rst # type: ignore[import-untyped]
201202

202203
# Register custom directives
203204

docs/source/custom_directives.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ class SupportedDevices(BaseShield):
102102
required_arguments = 1
103103
final_argument_whitespace = True
104104

105-
def run(self) -> List[nodes.Node]:
105+
def run(self, params, alt, _) -> List[nodes.Node]:
106106
devices = _parse_devices(self.arguments[0])
107107
alt = f"This feature supports the following devices: {devices}"
108108
params = {
@@ -121,7 +121,7 @@ class SupportedProperties(BaseShield):
121121
required_arguments = 1
122122
final_argument_whitespace = True
123123

124-
def run(self) -> List[nodes.Node]:
124+
def run(self, params, alt, _) -> List[nodes.Node]:
125125
properties = _parse_properties(self.arguments[0])
126126
alt = f"This API supports the following properties: {properties}"
127127
params = {

docs/source/executorch_custom_versions.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
"pytorch.txt",
2525
]
2626

27-
variables = {}
27+
variables: dict[str, str] = {}
2828

2929

3030
def read_version_files():

docs/source/tutorials_source/devtools-integration-tutorial.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ def forward(self, x):
232232
# Via EventBlocks
233233
for event in event_block.events:
234234
if event.name == "native_call_addmm.out":
235-
print(event.name, event.perf_data.raw)
235+
print(event.name, event.perf_data.raw if event.perf_data else "")
236236

237237
# Via Dataframe
238238
df = event_block.to_dataframe()
@@ -264,11 +264,12 @@ def forward(self, x):
264264
df = df[df.event_name == "native_call_convolution.out"]
265265
if len(df) > 0:
266266
slowest = df.loc[df["p50"].idxmax()]
267-
print(slowest.event_name)
267+
assert slowest
268+
print(slowest.name)
268269
print()
269-
pp.pprint(slowest.stack_traces)
270+
pp.pprint(slowest.stack_traces if slowest.stack_traces else "")
270271
print()
271-
pp.pprint(slowest.module_hierarchy)
272+
pp.pprint(slowest.module_hierarchy if slowest.module_hierarchy else "")
272273

273274
######################################################################
274275
# If a user wants the total runtime of a module, they can use

docs/source/tutorials_source/export-to-executorch-tutorial.py

+30-51
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
6565
return self.relu(a)
6666

6767

68-
example_args = (torch.randn(1, 3, 256, 256),)
68+
example_args: tuple[torch.Tensor] = (torch.randn(1, 3, 256, 256),)
6969
aten_dialect: ExportedProgram = export(SimpleConv(), example_args, strict=True)
7070
print(aten_dialect)
7171

@@ -100,8 +100,11 @@ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
100100
return x + y
101101

102102

103-
example_args = (torch.randn(3, 3), torch.randn(3, 3))
104-
aten_dialect: ExportedProgram = export(Basic(), example_args, strict=True)
103+
example_args_2: tuple[torch.Tensor, torch.Tensor] = (
104+
torch.randn(3, 3),
105+
torch.randn(3, 3),
106+
)
107+
aten_dialect = export(Basic(), example_args_2, strict=True)
105108

106109
# Works correctly
107110
print(aten_dialect.module()(torch.ones(3, 3), torch.ones(3, 3)))
@@ -118,20 +121,11 @@ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
118121

119122
from torch.export import Dim
120123

121-
122-
class Basic(torch.nn.Module):
123-
def __init__(self):
124-
super().__init__()
125-
126-
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
127-
return x + y
128-
129-
130-
example_args = (torch.randn(3, 3), torch.randn(3, 3))
124+
example_args_2 = (torch.randn(3, 3), torch.randn(3, 3))
131125
dim1_x = Dim("dim1_x", min=1, max=10)
132126
dynamic_shapes = {"x": {1: dim1_x}, "y": {1: dim1_x}}
133-
aten_dialect: ExportedProgram = export(
134-
Basic(), example_args, dynamic_shapes=dynamic_shapes, strict=True
127+
aten_dialect = export(
128+
Basic(), example_args_2, dynamic_shapes=dynamic_shapes, strict=True
135129
)
136130
print(aten_dialect)
137131

@@ -207,13 +201,13 @@ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
207201
)
208202

209203
quantizer = XNNPACKQuantizer().set_global(get_symmetric_quantization_config())
210-
prepared_graph = prepare_pt2e(pre_autograd_aten_dialect, quantizer)
204+
prepared_graph = prepare_pt2e(pre_autograd_aten_dialect, quantizer) # type: ignore[arg-type]
211205
# calibrate with a sample dataset
212206
converted_graph = convert_pt2e(prepared_graph)
213207
print("Quantized Graph")
214208
print(converted_graph)
215209

216-
aten_dialect: ExportedProgram = export(converted_graph, example_args, strict=True)
210+
aten_dialect = export(converted_graph, example_args, strict=True)
217211
print("ATen Dialect Graph")
218212
print(aten_dialect)
219213

@@ -243,7 +237,7 @@ def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
243237
from executorch.exir import EdgeProgramManager, to_edge
244238

245239
example_args = (torch.randn(1, 3, 256, 256),)
246-
aten_dialect: ExportedProgram = export(SimpleConv(), example_args, strict=True)
240+
aten_dialect = export(SimpleConv(), example_args, strict=True)
247241

248242
edge_program: EdgeProgramManager = to_edge(aten_dialect)
249243
print("Edge Dialect Graph")
@@ -272,9 +266,7 @@ def forward(self, x):
272266
decode_args = (torch.randn(1, 5),)
273267
aten_decode: ExportedProgram = export(Decode(), decode_args, strict=True)
274268

275-
edge_program: EdgeProgramManager = to_edge(
276-
{"encode": aten_encode, "decode": aten_decode}
277-
)
269+
edge_program = to_edge({"encode": aten_encode, "decode": aten_decode})
278270
for method in edge_program.methods:
279271
print(f"Edge Dialect graph of {method}")
280272
print(edge_program.exported_program(method))
@@ -291,8 +283,8 @@ def forward(self, x):
291283
# rather than the ``torch.ops.aten`` namespace.
292284

293285
example_args = (torch.randn(1, 3, 256, 256),)
294-
aten_dialect: ExportedProgram = export(SimpleConv(), example_args, strict=True)
295-
edge_program: EdgeProgramManager = to_edge(aten_dialect)
286+
aten_dialect = export(SimpleConv(), example_args, strict=True)
287+
edge_program = to_edge(aten_dialect)
296288
print("Edge Dialect Graph")
297289
print(edge_program.exported_program())
298290

@@ -357,8 +349,8 @@ def forward(self, x):
357349

358350
# Export and lower the module to Edge Dialect
359351
example_args = (torch.ones(1),)
360-
aten_dialect: ExportedProgram = export(LowerableModule(), example_args, strict=True)
361-
edge_program: EdgeProgramManager = to_edge(aten_dialect)
352+
aten_dialect = export(LowerableModule(), example_args, strict=True)
353+
edge_program = to_edge(aten_dialect)
362354
to_be_lowered_module = edge_program.exported_program()
363355

364356
from executorch.exir.backend.backend_api import LoweredBackendModule, to_backend
@@ -369,7 +361,7 @@ def forward(self, x):
369361
)
370362

371363
# Lower the module
372-
lowered_module: LoweredBackendModule = to_backend(
364+
lowered_module: LoweredBackendModule = to_backend( # type: ignore[call-arg]
373365
"BackendWithCompilerDemo", to_be_lowered_module, []
374366
)
375367
print(lowered_module)
@@ -423,8 +415,8 @@ def forward(self, x):
423415

424416

425417
example_args = (torch.ones(1),)
426-
aten_dialect: ExportedProgram = export(ComposedModule(), example_args, strict=True)
427-
edge_program: EdgeProgramManager = to_edge(aten_dialect)
418+
aten_dialect = export(ComposedModule(), example_args, strict=True)
419+
edge_program = to_edge(aten_dialect)
428420
exported_program = edge_program.exported_program()
429421
print("Edge Dialect graph")
430422
print(exported_program)
@@ -460,16 +452,16 @@ def forward(self, a, x, b):
460452
return z
461453

462454

463-
example_args = (torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2))
464-
aten_dialect: ExportedProgram = export(Foo(), example_args, strict=True)
465-
edge_program: EdgeProgramManager = to_edge(aten_dialect)
455+
example_args_3 = (torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2))
456+
aten_dialect = export(Foo(), example_args_3, strict=True)
457+
edge_program = to_edge(aten_dialect)
466458
exported_program = edge_program.exported_program()
467459
print("Edge Dialect graph")
468460
print(exported_program)
469461

470462
from executorch.exir.backend.test.op_partitioner_demo import AddMulPartitionerDemo
471463

472-
delegated_program = to_backend(exported_program, AddMulPartitionerDemo())
464+
delegated_program = to_backend(exported_program, AddMulPartitionerDemo()) # type: ignore[call-arg]
473465
print("Delegated program")
474466
print(delegated_program)
475467
print(delegated_program.graph_module.lowered_module_0.original_module)
@@ -484,19 +476,9 @@ def forward(self, a, x, b):
484476
# call ``to_backend`` on it:
485477

486478

487-
class Foo(torch.nn.Module):
488-
def forward(self, a, x, b):
489-
y = torch.mm(a, x)
490-
z = y + b
491-
a = z - a
492-
y = torch.mm(a, x)
493-
z = y + b
494-
return z
495-
496-
497-
example_args = (torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2))
498-
aten_dialect: ExportedProgram = export(Foo(), example_args, strict=True)
499-
edge_program: EdgeProgramManager = to_edge(aten_dialect)
479+
example_args_3 = (torch.randn(2, 2), torch.randn(2, 2), torch.randn(2, 2))
480+
aten_dialect = export(Foo(), example_args_3, strict=True)
481+
edge_program = to_edge(aten_dialect)
500482
exported_program = edge_program.exported_program()
501483
delegated_program = edge_program.to_backend(AddMulPartitionerDemo())
502484

@@ -530,7 +512,6 @@ def forward(self, a, x, b):
530512
print("ExecuTorch Dialect")
531513
print(executorch_program.exported_program())
532514

533-
import executorch.exir as exir
534515

535516
######################################################################
536517
# Notice that in the graph we now see operators like ``torch.ops.aten.sub.out``
@@ -577,13 +558,11 @@ def forward(self, x):
577558
pre_autograd_aten_dialect = export_for_training(M(), example_args).module()
578559
# Optionally do quantization:
579560
# pre_autograd_aten_dialect = convert_pt2e(prepare_pt2e(pre_autograd_aten_dialect, CustomBackendQuantizer))
580-
aten_dialect: ExportedProgram = export(
581-
pre_autograd_aten_dialect, example_args, strict=True
582-
)
583-
edge_program: exir.EdgeProgramManager = exir.to_edge(aten_dialect)
561+
aten_dialect = export(pre_autograd_aten_dialect, example_args, strict=True)
562+
edge_program = to_edge(aten_dialect)
584563
# Optionally do delegation:
585564
# edge_program = edge_program.to_backend(CustomBackendPartitioner)
586-
executorch_program: exir.ExecutorchProgramManager = edge_program.to_executorch(
565+
executorch_program = edge_program.to_executorch(
587566
ExecutorchBackendConfig(
588567
passes=[], # User-defined passes
589568
)

setup.py

+1
Original file line numberDiff line numberDiff line change
@@ -710,6 +710,7 @@ def get_ext_modules() -> List[Extension]:
710710
# include. See also setuptools/discovery.py for custom finders.
711711
package_dir={
712712
"executorch/backends": "backends",
713+
"executorch/codegen": "codegen",
713714
# TODO(mnachin T180504136): Do not put examples/models
714715
# into core pip packages. Refactor out the necessary utils
715716
# or core models files into a separate package.

0 commit comments

Comments
 (0)