Skip to content

Commit 50dc817

Browse files
mcremon-metafacebook-github-bot
authored andcommitted
Remove outdated comments and names in OSS (#13419)
Summary: As titled. Differential Revision: D80181207
1 parent 9cfb684 commit 50dc817

File tree

5 files changed

+18
-39
lines changed

5 files changed

+18
-39
lines changed

backends/cadence/aot/decompose_ops.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,7 @@
77

88

99
# This file contains all the functions that decompose one op into simpler ops in the
10-
# graph. The functions decomposing ops for models deployed with Jarvis are grouped
11-
# together in class 'DecomposeOpsInGraph'. Some examples of functions in the class are
12-
# 1. functions that decompose an ATen gelu op into an equivalent series of simpler ops
10+
# graph.
1311

1412
# pyre-strict
1513

backends/cadence/aot/ops_registrations.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -928,7 +928,7 @@ def transposed_convolution_meta(
928928
) -> torch.Tensor:
929929
# The native definition of torch transposed conv will have weight shape as
930930
# (in_channels, out_channels/groups, *kernel_size).
931-
# However, the two channel position is flipped in the Jarvis pass of replacing it
931+
# However, the two channel position is flipped in the Cadence pass of replacing it
932932
# with cadence::transposed_convolution here: https://fburl.com/code/d2s7pkyy
933933
out_channels, _input_channels, *kernel_size = weight.shape
934934
out_channels *= groups

backends/cadence/aot/remove_ops.py

Lines changed: 2 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -7,16 +7,6 @@
77
# pyre-strict
88

99

10-
# This file contains functions to remove operators from the graph. The removed
11-
# ops should belong to either of the following categories:
12-
# 1. The op should be redundant for inference (e.g., dropout). Such ops are grouped
13-
# together in 'RemoveRedundantOps'. Anyone running inference can add this class
14-
# in their pass list, and it should semantic-preserving transformation.
15-
# 2. The op should be redundant for Jarvis (e.g., contiguous). Such ops are grouped
16-
# together in 'CadenceRemoveNops'. The ops removed in this class might not be nop
17-
# in a context outside of Jarvis', so exercise caution while invoking this in a
18-
# pass list outside of Jarvis.
19-
2010
import logging
2111
from dataclasses import dataclass, field
2212
from typing import cast, List, Optional, Sequence, Set
@@ -152,7 +142,7 @@ def call_operator(
152142

153143
@register_cadence_pass(CadencePassAttribute(opt_level=0))
154144
class RemoveToOpsPass(ExportPass):
155-
# aten.to.* as of now are all nops for Jarvis
145+
# aten.to.* as of now are all nops
156146
def call_operator(
157147
self,
158148
op, # pyre-ignore
@@ -413,7 +403,7 @@ def call_operator(
413403
class RemoveAliasCopyOpPass(ExportPass):
414404
"""
415405
416-
alias_copy is a no-op for Jarvis and can be removed.
406+
alias_copy is a no-op and can be removed.
417407
"""
418408

419409
def call_operator(
@@ -936,10 +926,6 @@ def call(self, graph_module: torch.fx.GraphModule) -> PassResult:
936926
return super().call(graph_module)
937927

938928

939-
# The following class consolidates functions to remove ops that are redundant
940-
# in Jarvis. Currently, each function in this class iterates over each node of
941-
# the graph module once. In future, we could consolidate them into a monolithic
942-
# function.
943929
class CadenceRemoveNops:
944930
passes = [
945931
SimplifySliceOpPass,

backends/cadence/aot/replace_ops.py

Lines changed: 10 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,7 @@
77

88

99
# This file contains all the functions that replace one op with another in the
10-
# graph. The functions replacing ops for models deployed with Jarvis are grouped
11-
# together in class 'ReplaceOpsInGraph'. Some examples of functions in the class are
12-
# 1. functions that replace an ATen op with a custom op that accepts extra arguments
13-
# 2. functions that replace in-place variants of ATen ops with out-of-place version.
14-
# 3. functions that replace an ATen op with another semantically equivalent ATen op.
15-
# 4. functions that concretize optional args.
10+
# graph.
1611

1712
# pyre-unsafe
1813

@@ -54,7 +49,7 @@
5449
from torch.fx.node import Argument
5550

5651
# A map to represent ops that:
57-
# (a) are functionally equivalent wrt. Jarvis; and
52+
# (a) are functionally equivalent; and
5853
# (b) have identical arguments
5954
# An op whose target is 'key' in this dict can be replaced by the functionally euivalent
6055
# op whose target is 'value'. The replacement would just involve changing the op target.
@@ -650,7 +645,7 @@ def call_operator(self, op, args, kwargs, meta):
650645

651646
# Make that pass runnable standalone at opt level 0.
652647
@register_cadence_pass(CadencePassAttribute(opt_level=0))
653-
class ReplaceAtenConvolutionWithJarvisConvolutionPass(ExportPass):
648+
class ReplaceAtenConvolutionWithCadenceConvolutionPass(ExportPass):
654649
"""
655650
Replace aten convolution op with jarvis-specific convolution op, since the
656651
aten version is not supported by jarvis.
@@ -784,7 +779,7 @@ class ReplaceConvWithChannelLastConv:
784779
tensors. However, if the input and output to the convolution op are originally
785780
in NWHC layout, and are then permuted to conform to NCHW layout, we can fuse
786781
the two permute ops with the convolution op, and call the NHWC layout
787-
convolution op in Jarvis.
782+
convolution op.
788783
"""
789784

790785
def __init__(self):
@@ -821,7 +816,7 @@ def conv_layout_is_nhwc(self, node: torch.fx.Node) -> bool:
821816
out_shape = get_shape(self.graph_module, node)
822817
assert out_shape is not None
823818
out_dims = len(out_shape)
824-
assert out_dims in {3, 4}, "Jarvis only supports conv1d and conv2d"
819+
assert out_dims in {3, 4}, "Only supports conv1d and conv2d"
825820
conv1d = out_dims == 3
826821

827822
# Get the possible targets for the nodes in pt_nodes. Since conv1d has
@@ -951,7 +946,7 @@ class ReplaceConvWithChannelLastConvPass(ExportPass):
951946
"""
952947

953948
def call(self, graph_module: torch.fx.GraphModule) -> PassResult:
954-
result = ReplaceAtenConvolutionWithJarvisConvolutionPass()(graph_module)
949+
result = ReplaceAtenConvolutionWithCadenceConvolutionPass()(graph_module)
955950
assert result is not None
956951
ReplaceConvWithChannelLastConv()(result.graph_module)
957952
return result
@@ -1871,9 +1866,9 @@ def call_operator(self, op, args, kwargs, meta):
18711866

18721867

18731868
@register_cadence_pass(CadencePassAttribute(opt_level=0))
1874-
class ReplaceAtenAvgPoolWithJarvisAvgPoolPass(ExportPass):
1869+
class ReplaceAtenAvgPoolWithCadenceAvgPoolPass(ExportPass):
18751870
"""
1876-
Replace the aten avg_pool op with the jarvis custom avg_pool2d op.
1871+
Replace the aten avg_pool op with the cadence custom avg_pool2d op.
18771872
"""
18781873

18791874
def call_operator(self, op, args, kwargs, meta):
@@ -2429,7 +2424,7 @@ class CadenceReplaceOpsInGraph:
24292424
ReplacePadWithCatPass,
24302425
ReplaceConstantPadNdWithSlicePass,
24312426
ReplaceConvWithChannelLastConvPass,
2432-
ReplaceAtenConvolutionWithJarvisConvolutionPass,
2427+
ReplaceAtenConvolutionWithCadenceConvolutionPass,
24332428
ForceChannelLastForConvPass,
24342429
ReplaceTrivialConvWithLinear,
24352430
ReplaceConvWithIm2RowAndLinear,
@@ -2448,7 +2443,7 @@ class CadenceReplaceOpsInGraph:
24482443
ReplacePT2DequantWithCadenceDequantPass,
24492444
ReplaceSingleElementTensorArgumentsFromFullOpWithScalarPass,
24502445
ReplaceAdaptiveAvgPoolWithAtenAvgPoolPass,
2451-
ReplaceAtenAvgPoolWithJarvisAvgPoolPass,
2446+
ReplaceAtenAvgPoolWithCadenceAvgPoolPass,
24522447
ReplaceWhereWithFullArgsWithWhereScalar,
24532448
ReplaceAtenApproxGeluWithApproxGeluPass,
24542449
ReplaceSplitWithSlicePass,

backends/cadence/aot/tests/test_replace_ops_passes.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
ReplaceAdaptiveAvgPoolWithAtenAvgPoolPass,
2323
ReplaceAddMMWithLinearPass,
2424
ReplaceAtenApproxGeluWithApproxGeluPass,
25-
ReplaceAtenConvolutionWithJarvisConvolutionPass,
25+
ReplaceAtenConvolutionWithCadenceConvolutionPass,
2626
ReplaceConstantPadNdWithSlicePass,
2727
ReplaceConvolutionOptionalArgsWithConcreteArgsPass,
2828
ReplaceConvWithIm2RowAndLinear,
@@ -411,7 +411,7 @@ def test_replace_transposed_conv_with_linear(
411411
builder.output([convolution])
412412
original_gm = builder.get_graph_module()
413413

414-
p1 = ReplaceAtenConvolutionWithJarvisConvolutionPass()
414+
p1 = ReplaceAtenConvolutionWithCadenceConvolutionPass()
415415
p2 = ReplaceTransposedConvWithLinearPass()
416416
graph_after_passes = cast(
417417
PassResult, p2(cast(PassResult, p1(original_gm)).graph_module)
@@ -969,7 +969,7 @@ def test_replace_conv1d_with_linear(self) -> None:
969969
args=(x, weights, bias, [1], [0], [1], 1, False),
970970
)
971971
# First, replace the aten convolution with a cadence.convolution op
972-
p1 = ReplaceAtenConvolutionWithJarvisConvolutionPass()
972+
p1 = ReplaceAtenConvolutionWithCadenceConvolutionPass()
973973
temp_graph = cast(PassResult, p1(original_gm)).graph_module
974974
# temp_graph = p1(original_gm).graph_module
975975
self.assertIsNotNone(temp_graph)
@@ -1003,7 +1003,7 @@ def test_replace_conv2d_with_linear(self) -> None:
10031003
args=(x, weights, bias, [1, 1], [0, 0], [1, 1], 1, False),
10041004
)
10051005
# First, replace the aten convolution with a cadence.convolution op
1006-
p1 = ReplaceAtenConvolutionWithJarvisConvolutionPass()
1006+
p1 = ReplaceAtenConvolutionWithCadenceConvolutionPass()
10071007
temp_graph = cast(PassResult, p1(original_gm)).graph_module
10081008
self.assertIsNotNone(temp_graph)
10091009

0 commit comments

Comments
 (0)