Skip to content

Commit 287e451

Browse files
committed
Fixed or silenced all warnings
1 parent 3912a2c commit 287e451

31 files changed

+294
-260
lines changed

frontends/comet_dsl/comet.cpp

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,10 +95,13 @@
9595
#include "llvm/Support/SourceMgr.h"
9696
#include "llvm/Support/TargetSelect.h"
9797
#include "llvm/Support/raw_ostream.h"
98+
#if defined(ENABLE_GPU_TARGET) | defined(ENABLE_FPGA_TARGET)
9899
#include "comet/Conversion/ParallelLoopsToGpu/ParallelLoopsToGpu.h"
100+
#endif
101+
#ifdef ENABLE_GPU_TARGET
102+
#include "triton/Dialect/TritonGPU/IR/Dialect.h"
99103
#include "mlir/Conversion/SCFToGPU/SCFToGPUPass.h"
100104
#include "mlir/Dialect/GPU/Transforms/Passes.h"
101-
#ifdef ENABLE_GPU_TARGET
102105
#include "comet/Conversion/GpuToTriton/GpuToTritonPass.h"
103106
#include "comet/Conversion/TritonToCuda/TritonToCudaPass.h"
104107
#include "triton/Dialect/Triton/IR/Dialect.h"
@@ -578,7 +581,7 @@ int loadAndProcessMLIR(mlir::MLIRContext &context,
578581
pm.addPass(mlir::memref::createFoldMemRefAliasOpsPass());
579582
pm.addPass(mlir::createCanonicalizerPass());
580583
#ifndef ENABLE_GPU_TARGET
581-
bool IsLoweringToTriton = false;
584+
[[maybe_unused]] bool IsLoweringToTriton = false;
582585
#endif
583586
#if defined(ENABLE_GPU_TARGET) | defined(ENABLE_FPGA_TARGET)
584587
if ((CodegenTarget == TargetDevice::GPU || CodegenTarget == TargetDevice::FPGA) && (emitTriton_ || emitLLVM || isLoweringToLLVM || IsLoweringToTriton))

frontends/comet_dsl/mlir/MLIRGen.cpp

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
#include "mlir/Dialect/Arith/IR/Arith.h"
3737
#include "mlir/Dialect/Tensor/IR/Tensor.h"
3838
#include "mlir/Dialect/Func/IR/FuncOps.h"
39+
#include "mlir/IR/BuiltinAttributes.h"
3940
#include "mlir/IR/BuiltinTypeInterfaces.h"
4041
#include "mlir/IR/Verifier.h"
4142
#include "mlir/IR/Attributes.h"
@@ -476,9 +477,9 @@ namespace
476477

477478
mlir::StringAttr opAttr = builder.getStringAttr(op);
478479
mlir::RankedTensorType returnDataType;
479-
if(lhs.getType().cast<mlir::RankedTensorType>().getShape() != rhs.getType().cast<mlir::RankedTensorType>().getShape())
480+
if(mlir::cast<mlir::RankedTensorType>(lhs.getType()).getShape() != mlir::cast<mlir::RankedTensorType>(rhs.getType()).getShape())
480481
{
481-
returnDataType = lhs.getType().cast<mlir::RankedTensorType>();
482+
returnDataType = mlir::cast<mlir::RankedTensorType>(lhs.getType());
482483
auto bcastRhs = builder.create<DenseConstantOp>(location, returnDataType, mlir::cast<DenseConstantOp>(rhs.getDefiningOp()).getValueAttr());
483484
comet_vdump(bcastRhs);
484485
rhs.replaceAllUsesWith(bcastRhs);
@@ -815,7 +816,7 @@ namespace
815816
// std::vector<int64_t> result_dims = getDimSizes(ret_lbls_value);
816817
auto affineMapArrayAttr = builder.getAffineMapArrayAttr(affine_maps);
817818

818-
auto res_map = affineMapArrayAttr[affineMapArrayAttr.size() - 1].cast<mlir::AffineMapAttr>().getValue();
819+
auto res_map = mlir::cast<mlir::AffineMapAttr>(affineMapArrayAttr[affineMapArrayAttr.size() - 1]).getValue();
819820

820821
/// get return-type based on affine-maps
821822
std::vector<int64_t> result_dims;
@@ -824,7 +825,7 @@ namespace
824825
{
825826
for (size_t i = 0; i < affineMapArrayAttr.size() - 1; i++)
826827
{
827-
auto map = affineMapArrayAttr[i].cast<mlir::AffineMapAttr>().getValue();
828+
auto map = mlir::cast<mlir::AffineMapAttr>(affineMapArrayAttr[i]).getValue();
828829
if (auto pos = map.getResultPosition(v))
829830
{
830831
mlir::Value operand = i == 0 ? lhs_labeledtensor : rhs_labeledtensor;
@@ -876,7 +877,7 @@ namespace
876877
/// infer the format
877878
mlir::ArrayAttr opFormatsArrayAttr = dyn_cast<TensorMultOp>(e.getDefiningOp()).getFormats();
878879
unsigned int i = opFormatsArrayAttr.size() - 1;
879-
mlir::StringRef lhs_format = opFormatsArrayAttr[i].cast<mlir::StringAttr>().getValue();
880+
mlir::StringRef lhs_format = mlir::cast<mlir::StringAttr>(opFormatsArrayAttr[i]).getValue();
880881
comet_debug() << __LINE__ << " lhs_format: " << lhs_format << "\n";
881882
comet_debug() << " lhs_format: " << lhs_format << "\n";
882883
formats.push_back(lhs_format);
@@ -889,7 +890,7 @@ namespace
889890
/// infer the format
890891
mlir::ArrayAttr opFormatsArrayAttr = dyn_cast<TensorElewsMultOp>(e.getDefiningOp()).getFormats();
891892
unsigned int i = opFormatsArrayAttr.size() - 1;
892-
mlir::StringRef lhs_format = opFormatsArrayAttr[i].cast<mlir::StringAttr>().getValue();
893+
mlir::StringRef lhs_format = mlir::cast<mlir::StringAttr>(opFormatsArrayAttr[i]).getValue();
893894
comet_debug() << __LINE__ << " lhs_format: " << lhs_format << "\n";
894895

895896
comet_debug() << " lhs_format: " << lhs_format << "\n";
@@ -903,7 +904,7 @@ namespace
903904
/// infer the format
904905
mlir::ArrayAttr opFormatsArrayAttr = dyn_cast<TensorAddOp>(e.getDefiningOp()).getFormats();
905906
unsigned int i = opFormatsArrayAttr.size() - 1;
906-
mlir::StringRef lhs_format = opFormatsArrayAttr[i].cast<mlir::StringAttr>().getValue();
907+
mlir::StringRef lhs_format = mlir::cast<mlir::StringAttr>(opFormatsArrayAttr[i]).getValue();
907908
comet_debug() << __LINE__ << " lhs_format: " << lhs_format << "\n";
908909

909910
comet_debug() << " lhs_format: " << lhs_format << "\n";
@@ -917,7 +918,7 @@ namespace
917918
/// infer the format
918919
mlir::ArrayAttr opFormatsArrayAttr = dyn_cast<TensorSubtractOp>(e.getDefiningOp()).getFormats();
919920
unsigned int i = opFormatsArrayAttr.size() - 1;
920-
mlir::StringRef lhs_format = opFormatsArrayAttr[i].cast<mlir::StringAttr>().getValue();
921+
mlir::StringRef lhs_format = mlir::cast<mlir::StringAttr>(opFormatsArrayAttr[i]).getValue();
921922
comet_debug() << __LINE__ << " lhs_format: " << lhs_format << "\n";
922923

923924
comet_debug() << " lhs_format: " << lhs_format << "\n";
@@ -1798,17 +1799,17 @@ namespace
17981799
}
17991800

18001801
/// get return-type based on affine-maps
1801-
auto res_map = affineMapArrayAttr[1].cast<mlir::AffineMapAttr>().getValue();
1802+
auto res_map = cast<mlir::AffineMapAttr>(affineMapArrayAttr[1]).getValue();
18021803
std::vector<mlir::Value> indices;
18031804
std::vector<int64_t> shape;
18041805
for (auto v : res_map.getResults())
18051806
{
1806-
auto map = affineMapArrayAttr[0].cast<mlir::AffineMapAttr>().getValue();
1807+
auto map = cast<mlir::AffineMapAttr>(affineMapArrayAttr[0]).getValue();
18071808
if (auto pos = map.getResultPosition(v))
18081809
{
1809-
if (isa<mlir::TensorType>(rhs_tensor.getType()) && !rhs_tensor.getType().cast<mlir::TensorType>().isDynamicDim(*pos))
1810+
if (auto tensorT = dyn_cast<mlir::TensorType>(rhs_tensor.getType()); tensorT && !tensorT.isDynamicDim(*pos))
18101811
{
1811-
shape.push_back(rhs_tensor.getType().cast<mlir::TensorType>().getDimSize(*pos));
1812+
shape.push_back(tensorT.getDimSize(*pos));
18121813
}
18131814
else
18141815
{
@@ -2647,8 +2648,7 @@ namespace
26472648
StringRef tensor_name, double value)
26482649
{
26492650
mlir::Value tensorValue = symbolTable.lookup(tensor_name);
2650-
auto tensorType = tensorValue.getDefiningOp()->getOpResult(0).getType();
2651-
auto tensorElType = tensorType.cast<mlir::TensorType>().getElementType();
2651+
auto tensorElType = cast<mlir::TensorType>(tensorValue.getDefiningOp()->getOpResult(0).getType()).getElementType();
26522652

26532653
mlir::FloatAttr valueAttr;
26542654
if (tensorElType.isF64())

include/comet/Conversion/GpuToTriton/GpuToTritonConversion.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ class GpuTypeConverter2 : public TypeConverter {
4646
GpuTypeConverter2(MLIRContext *context);
4747
int blockX, blockY, blockR;
4848
private:
49-
MLIRContext *context;
49+
[[maybe_unused]]MLIRContext *context;
5050
};
5151

5252
class GpuConversionTarget2 : public ConversionTarget {

include/comet/Dialect/TensorAlgebra/IR/TAOps.td

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -154,23 +154,23 @@ def SparseTensorDeclOp : TA_Op<"spTensor_decl", [Pure]> {
154154

155155
let extraClassDeclaration = [{
156156
unsigned int getParameterCount() {
157-
mlir::tensorAlgebra::SparseTensorType type = getResult().getType().cast<mlir::tensorAlgebra::SparseTensorType>();
157+
mlir::tensorAlgebra::SparseTensorType type = cast<mlir::tensorAlgebra::SparseTensorType>(getResult().getType());
158158
return (type.getRank() * 6) + 1;
159159
}
160160

161161
unsigned int getDimArrayCount() {
162-
mlir::tensorAlgebra::SparseTensorType type = getResult().getType().cast<mlir::tensorAlgebra::SparseTensorType>();
162+
mlir::tensorAlgebra::SparseTensorType type = cast<mlir::tensorAlgebra::SparseTensorType>(getResult().getType());
163163
return type.getRank() * 4;
164164
}
165165

166166
unsigned int getValueArrayPos() {
167-
mlir::tensorAlgebra::SparseTensorType type = getResult().getType().cast<mlir::tensorAlgebra::SparseTensorType>();
167+
mlir::tensorAlgebra::SparseTensorType type = cast<mlir::tensorAlgebra::SparseTensorType>(getResult().getType());
168168

169169
return (type.getRank() * 4) + 1;
170170
}
171171

172172
unsigned int getTotalArrayCount() {
173-
mlir::tensorAlgebra::SparseTensorType type = getResult().getType().cast<mlir::tensorAlgebra::SparseTensorType>();
173+
mlir::tensorAlgebra::SparseTensorType type = cast<mlir::tensorAlgebra::SparseTensorType>(getResult().getType());
174174
return (type.getRank() * 4) + 1;
175175
}
176176
}];
@@ -1083,7 +1083,7 @@ def SpTensorGetDimPos : TA_Op<"SpTensorGetDimPos", [Pure]>{
10831083

10841084
let builders =
10851085
[OpBuilder<(ins "Value":$tensor, "IntegerAttr":$dim), [{
1086-
auto indices_type = tensor.getType().cast<SparseTensorType>().getIndicesType();
1086+
auto indices_type = mlir::cast<SparseTensorType>(tensor.getType()).getIndicesType();
10871087
auto outType = RankedTensorType::get({ShapedType::kDynamic}, indices_type);
10881088
build($_builder, $_state, outType, tensor, dim);
10891089
}]>,
@@ -1097,7 +1097,7 @@ def SpTensorGetDimCrd : TA_Op<"SpTensorGetDimCrd", [Pure]>{
10971097

10981098
let builders =
10991099
[OpBuilder<(ins "Value":$tensor, "IntegerAttr":$dim), [{
1100-
auto indices_type = tensor.getType().cast<SparseTensorType>().getIndicesType();
1100+
auto indices_type = mlir::cast<SparseTensorType>(tensor.getType()).getIndicesType();
11011101
auto outType = RankedTensorType::get({ShapedType::kDynamic}, indices_type);
11021102
build($_builder, $_state, outType, tensor, dim);
11031103
}]>];
@@ -1109,7 +1109,7 @@ def SpTensorGetDimBlockPos : TA_Op<"SpTensorGetDimBlockPos", [Pure]>{
11091109

11101110
let builders =
11111111
[OpBuilder<(ins "Value":$tensor, "IntegerAttr":$dim), [{
1112-
auto indices_type = tensor.getType().cast<SparseTensorType>().getIndicesType();
1112+
auto indices_type = mlir::cast<SparseTensorType>(tensor.getType()).getIndicesType();
11131113
auto outType = RankedTensorType::get({ShapedType::kDynamic}, indices_type);
11141114
build($_builder, $_state, outType, tensor, dim);
11151115
}]>];
@@ -1121,7 +1121,7 @@ def SpTensorGetDimBlockCrd : TA_Op<"SpTensorGetDimBlockCrd", [Pure]>{
11211121

11221122
let builders =
11231123
[OpBuilder<(ins "Value":$tensor, "IntegerAttr":$dim), [{
1124-
auto indices_type = tensor.getType().cast<SparseTensorType>().getIndicesType();
1124+
auto indices_type = mlir::cast<SparseTensorType>(tensor.getType()).getIndicesType();
11251125
auto outType = RankedTensorType::get({ShapedType::kDynamic}, indices_type);
11261126
build($_builder, $_state, outType, tensor, dim);
11271127
}]>];
@@ -1134,7 +1134,7 @@ def SpTensorGetVals : TA_Op<"SpTensorGetVals", [Pure]>{
11341134

11351135
let builders =
11361136
[OpBuilder<(ins "Value":$tensor), [{
1137-
auto val_type = tensor.getType().cast<SparseTensorType>().getElementType();
1137+
auto val_type = mlir::cast<SparseTensorType>(tensor.getType()).getElementType();
11381138
auto outType = RankedTensorType::get({ShapedType::kDynamic}, val_type);
11391139
build($_builder, $_state, outType, tensor);
11401140
}]>];

lib/Conversion/GpuToTriton/GpuToTritonConversion.cpp

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -179,13 +179,15 @@ mlir::comet::GpuConversionTarget2::GpuConversionTarget2(
179179
}
180180
if(prev != op.getType())
181181
{
182-
if(!prev.isa<RankedTensorType>() && !op.getType().isa<RankedTensorType>())
182+
auto prevRankedT = dyn_cast<RankedTensorType>(prev);
183+
auto opRankedT = dyn_cast<RankedTensorType>(op.getType());
184+
if(!prevRankedT && !opRankedT)
183185
{
184186
return true;
185187
}
186-
else if(prev.isa<RankedTensorType>() && op.getType().isa<RankedTensorType>())
188+
else if(prevRankedT && opRankedT)
187189
{
188-
if(prev.cast<RankedTensorType>().getShape() == op.getType().cast<RankedTensorType>().getShape())
190+
if(prevRankedT.getShape() == opRankedT.getShape())
189191
{
190192
return true;
191193
}
@@ -217,13 +219,15 @@ mlir::comet::GpuConversionTarget2::GpuConversionTarget2(
217219
}
218220
if(prev != op.getType())
219221
{
220-
if(!prev.isa<RankedTensorType>() && !op.getType().isa<RankedTensorType>())
222+
auto prevRankedT = dyn_cast<RankedTensorType>(prev);
223+
auto opRankedT = dyn_cast<RankedTensorType>(op.getType());
224+
if(!prevRankedT && !opRankedT)
221225
{
222226
return true;
223227
}
224-
else if(prev.isa<RankedTensorType>() && op.getType().isa<RankedTensorType>())
228+
else if(prevRankedT && opRankedT)
225229
{
226-
if(prev.cast<RankedTensorType>().getShape() == op.getType().cast<RankedTensorType>().getShape())
230+
if(prevRankedT.getShape() == opRankedT.getShape())
227231
{
228232
return true;
229233
}
@@ -243,17 +247,19 @@ mlir::comet::GpuConversionTarget2::GpuConversionTarget2(
243247
{
244248
if(op.getTrueValue().getType() == op.getResult().getType() && op.getFalseValue().getType() == op.getTrueValue().getType())
245249
{
246-
if(op.getCondition().getType().isa<RankedTensorType>() && !op.getFalseValue().getType().isa<RankedTensorType>())
250+
auto falseT = dyn_cast<RankedTensorType>(op.getFalseValue().getType());
251+
auto condT = dyn_cast<RankedTensorType>(op.getCondition().getType());
252+
if(condT && !falseT)
247253
{
248254
return false;
249255
}
250-
else if(!op.getCondition().getType().isa<RankedTensorType>() && op.getFalseValue().getType().isa<RankedTensorType>())
256+
else if(!condT && falseT)
251257
{
252258
return false;
253259
}
254-
else if(op.getCondition().getType().isa<RankedTensorType>() && op.getFalseValue().getType().isa<RankedTensorType>())
260+
else if(condT && falseT)
255261
{
256-
if(op.getCondition().getType().cast<RankedTensorType>().getShape() == op.getFalseValue().getType().cast<RankedTensorType>().getShape())
262+
if(condT.getShape() == falseT.getShape())
257263
{
258264
return true;
259265
}
@@ -339,7 +345,7 @@ mlir::comet::GpuConversionTarget2::GpuConversionTarget2(
339345
{
340346
for(auto opr: op.getArgumentTypes())
341347
{
342-
if(opr.isa<IndexType>())
348+
if(isa<IndexType>(opr))
343349
{
344350
return false;
345351
}
@@ -363,7 +369,7 @@ mlir::comet::GpuTypeConverter::GpuTypeConverter(MLIRContext *context)
363369

364370
addConversion([this](MemRefType memrefType, SmallVectorImpl<Type> &results) -> LogicalResult {
365371

366-
if(memrefType.getElementType().isa<IndexType>())
372+
if(isa<IndexType>(memrefType.getElementType()))
367373
{
368374
results.push_back(mlir::triton::PointerType::get( mlir::IntegerType::get(this->context, 32), 1));
369375
}

0 commit comments

Comments
 (0)