-
Notifications
You must be signed in to change notification settings - Fork 563
[TORCH] Add support for aten.heaviside Op #4220
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -11068,6 +11068,76 @@ class DecomposeAtenSgnOp : public OpRewritePattern<AtenSgnOp> { | |
}; | ||
} // namespace | ||
|
||
namespace { | ||
// Decomposed aten.heaviside op into | ||
// using aten.eq, aten.lt, aten.logical_or, aten.where | ||
// Heaviside(x, y) returns: | ||
// 0 if x < 0 | ||
// y if x == 0 | ||
// 1 if x > 0 | ||
class DecomposeAtenHeaviside : public OpRewritePattern<AtenHeavisideOp> { | ||
public: | ||
using OpRewritePattern::OpRewritePattern; | ||
LogicalResult matchAndRewrite(AtenHeavisideOp op, | ||
PatternRewriter &rewriter) const override { | ||
auto input = op.getSelf(); | ||
auto value = op.getValues(); | ||
auto loc = op.getLoc(); | ||
auto inputTy = dyn_cast<BaseTensorType>(input.getType()); | ||
if (!inputTy || !inputTy.hasDtype() || !inputTy.hasSizes()) | ||
return rewriter.notifyMatchFailure(op, "input must have dtype and size."); | ||
|
||
auto valueTy = dyn_cast<BaseTensorType>(value.getType()); | ||
if (!valueTy || !valueTy.hasDtype() || !valueTy.hasSizes()) | ||
return rewriter.notifyMatchFailure(op, "value must have dtype and size."); | ||
auto resultTy = dyn_cast<BaseTensorType>(op.getType()); | ||
SmallVector<int64_t> broadcastShape; | ||
SmallVector<Value> broadcastShapeValue; | ||
computeBroadcastShape(rewriter, loc, input, value, broadcastShape, | ||
broadcastShapeValue); | ||
|
||
auto broadcastType = ValueTensorType::get( | ||
op.getContext(), llvm::ArrayRef(broadcastShape), resultTy.getDtype()); | ||
auto boolBroadcastType = ValueTensorType::get( | ||
op.getContext(), llvm::ArrayRef(broadcastShape), rewriter.getI1Type()); | ||
Value indexBroadcastShapeTorchList = rewriter.create<PrimListConstructOp>( | ||
loc, Torch::ListType::get(Torch::IntType::get(op.getContext())), | ||
broadcastShapeValue); | ||
auto inputBroadcasted = rewriter.create<AtenBroadcastToOp>( | ||
loc, broadcastType, input, indexBroadcastShapeTorchList); | ||
auto valueBroadcasted = rewriter.create<AtenBroadcastToOp>( | ||
loc, broadcastType, value, indexBroadcastShapeTorchList); | ||
|
||
Value zero = getConstantWithGivenDtypeAndValue(rewriter, loc, 0, | ||
resultTy.getDtype()); | ||
Value one = getConstantWithGivenDtypeAndValue(rewriter, loc, 1, | ||
resultTy.getDtype()); | ||
// Compute mask: input == 0 | ||
auto inputEqZero = rewriter | ||
.create<AtenEqScalarOp>(loc, boolBroadcastType, | ||
inputBroadcasted, zero) | ||
->getResult(0); | ||
// Compute mask: input < 0 | ||
auto inputLtZero = rewriter.create<AtenLtScalarOp>(loc, boolBroadcastType, | ||
inputBroadcasted, zero); | ||
// Compute mask: isnan(input) | ||
auto isNan = | ||
rewriter.create<AtenIsnanOp>(loc, boolBroadcastType, inputBroadcasted); | ||
Comment on lines
+11123
to
+11125
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I did not see the mention of this case here: https://docs.pytorch.org/docs/stable/generated/torch.heaviside.html. Can you please share any reference? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for the review, @vivekkhandelwal1.
Ref: https://github.com/pytorch/pytorch/blob/main/torch/_refs/__init__.py#L1448 |
||
// Combine: input < 0 || isnan(input) | ||
auto inputNegativeOrNan = rewriter.create<AtenLogicalOrOp>( | ||
loc, boolBroadcastType, inputLtZero, isNan); | ||
// Select 0 if input < 0 or input is nan, else 1 | ||
auto zerosOrOnes = rewriter.create<AtenWhereScalarOp>( | ||
loc, resultTy, inputNegativeOrNan, zero, one); | ||
// Final result: if input == 0, take from valueBroadcasted, else take from | ||
// zerosOrOnes | ||
rewriter.replaceOpWithNewOp<AtenWhereSelfOp>(op, resultTy, inputEqZero, | ||
valueBroadcasted, zerosOrOnes); | ||
return success(); | ||
} | ||
}; | ||
} // namespace | ||
|
||
namespace { | ||
// Unconditionally decompose `torch.type_as` into `prim.dtype` + | ||
// `torch.to.dtype`. | ||
|
@@ -12291,6 +12361,7 @@ class DecomposeComplexOpsPass | |
DecomposeConstantTensorNewLikeOp<AtenNewOnesOp, AtenOnesOp>>(patterns); | ||
addPatternIfTargetOpIsIllegal<DecomposeAtenHardtanhOp>(patterns); | ||
addPatternIfTargetOpIsIllegal<DecomposeAtenFullOp>(patterns); | ||
addPatternIfTargetOpIsIllegal<DecomposeAtenHeaviside>(patterns); | ||
addPatternIfTargetOpIsIllegal<DecomposeAtenLinearOp>(patterns); | ||
addPatternIfTargetOpIsIllegal<DecomposeAtenMishOp>(patterns); | ||
addPatternIfTargetOpIsIllegal<DecomposeAtenFullLikeOp>(patterns); | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think this is not needed. Since you are decomposing this op into elementwise ops, the broadcasting part will be handled during Torch->Linalg lowering.
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@vivekkhandelwal1 You're right
But I ran into an issue in a specific case: when the input shape is [1, 2, 3] and the value shape is [1, 1, 1, 1], the broadcasted result shape becomes [1, 1, 2, 3].
Without explicitly broadcasting the inputs, some intermediate ops (like aten.eq.scalar or aten.isnan) end up producing tensors of shape [1, 2, 3], which causes this error:
'tensor.cast' op operand type 'tensor<1x2x3xi1>' and result type 'tensor<1x1x2x3xi1>' are cast incompatible
So to avoid this mismatch, I added explicit broadcasting to ensure all intermediate results match the final shape.