Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions mlir/include/mlir/IR/Builders.h
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,15 @@ class Builder {
StringAttr getStringAttr(const Twine &bytes);
ArrayAttr getArrayAttr(ArrayRef<Attribute> value);

// Convenience method for containers of specific attribute types. E.g., this
// overload will match SmallVector<IntegerAttr>.
template <typename ContainerTy>
ArrayAttr getArrayAttr(const ContainerTy &value) {
auto ref = ArrayRef(value);
return getArrayAttr(ArrayRef<Attribute>(
static_cast<const Attribute *>(ref.data()), ref.size()));
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This isn't safe in general in C++ to cast an array of Derived to an array of Base objects.
I'm not sure if we are in a special case with our attribute hierarchy but that seems scary actually.

Copy link
Member Author

@matthias-springer matthias-springer Dec 5, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I thought it would be safe for Attribute because derived attributes don't have any extra state. (Everything is in the impl (?).) Something seems to be wrong though because the Windows build is failing...

Copy link
Collaborator

@joker-eph joker-eph Dec 5, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The math work if you think of it in terms of assembly, but C++ rules are more abstracts.
For example you can't take unrelated structures and do the same kind of things (or accessing similar fields) even if they have the same memory layout in practice.

It's also why memcpy is the safest "type punning" idiom I believe.

}

// Returns a 0-valued attribute of the given `type`. This function only
// supports boolean, integer, and 16-/32-/64-bit float types, and vector or
// ranked tensor of them. Returns null attribute otherwise.
Expand Down
34 changes: 14 additions & 20 deletions mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -193,12 +193,10 @@ static void buildMatmulOp(OpBuilder &b, OperationState &state,
RegionBuilderFn regionBuilder,
ArrayRef<AffineMap> indexingMaps) {
// Initialize indexingMaps attribute, for MatmulOp.
SmallVector<Attribute, 3> indexingMapsAttrVal;
indexingMapsAttrVal =
llvm::map_to_vector(indexingMaps, [](AffineMap map) -> Attribute {
return AffineMapAttr::get(map);
});
state.addAttribute("indexing_maps", b.getArrayAttr(indexingMapsAttrVal));
state.addAttribute("indexing_maps", b.getArrayAttr(llvm::map_to_vector(
indexingMaps, [](AffineMap map) {
return AffineMapAttr::get(map);
})));
return buildStructuredOp(b, state, resultTensorTypes, inputs, outputs,
attributes, regionBuilder);
}
Expand All @@ -210,12 +208,10 @@ static void buildBatchMatmulOp(OpBuilder &b, OperationState &state,
RegionBuilderFn regionBuilder,
ArrayRef<AffineMap> indexingMaps) {
// Initialize indexingMaps attribute, for BatchMatmulOp.
SmallVector<Attribute, 4> indexingMapsAttrVal;
indexingMapsAttrVal =
llvm::map_to_vector(indexingMaps, [](AffineMap map) -> Attribute {
return AffineMapAttr::get(map);
});
state.addAttribute("indexing_maps", b.getArrayAttr(indexingMapsAttrVal));
state.addAttribute("indexing_maps", b.getArrayAttr(llvm::map_to_vector(
indexingMaps, [](AffineMap map) {
return AffineMapAttr::get(map);
})));
return buildStructuredOp(b, state, resultTensorTypes, inputs, outputs,
attributes, regionBuilder);
}
Expand All @@ -227,12 +223,10 @@ static void buildBatchReduceMatmulOp(OpBuilder &b, OperationState &state,
RegionBuilderFn regionBuilder,
ArrayRef<AffineMap> indexingMaps) {
// Initialize indexingMaps attribute, for BatchReduceMatmulOp.
SmallVector<Attribute, 4> indexingMapsAttrVal;
indexingMapsAttrVal =
llvm::map_to_vector(indexingMaps, [](AffineMap map) -> Attribute {
return AffineMapAttr::get(map);
});
state.addAttribute("indexing_maps", b.getArrayAttr(indexingMapsAttrVal));
state.addAttribute("indexing_maps", b.getArrayAttr(llvm::map_to_vector(
indexingMaps, [](AffineMap map) {
return AffineMapAttr::get(map);
})));
return buildStructuredOp(b, state, resultTensorTypes, inputs, outputs,
attributes, regionBuilder);
}
Expand Down Expand Up @@ -1121,7 +1115,7 @@ void GenericOp::build(
builder.getAffineMapArrayAttr(indexingMaps),
builder.getArrayAttr(llvm::to_vector(llvm::map_range(
iteratorTypes,
[&](utils::IteratorType iter) -> mlir::Attribute {
[&](utils::IteratorType iter) {
return IteratorTypeAttr::get(builder.getContext(), iter);
}))),
doc.empty() ? StringAttr() : builder.getStringAttr(doc),
Expand Down Expand Up @@ -3914,7 +3908,7 @@ ParseResult MatmulOp::parse(OpAsmParser &parser, OperationState &result) {
if (*indexingMapsAttr == nullptr) {
auto indexingMapAttrs = llvm::map_to_vector(
MatmulOp::getDefaultIndexingMaps(parser.getContext()),
[](AffineMap map) -> Attribute { return AffineMapAttr::get(map); });
[](AffineMap map) { return AffineMapAttr::get(map); });
indexingMapsAttr = parser.getBuilder().getArrayAttr(indexingMapAttrs);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -325,10 +325,9 @@ translateMap(linalg::GenericOp op, PatternRewriter &rewriter) {
}
};

SmallVector<Attribute> iterAttr =
llvm::map_to_vector(itTps, [ctx](auto itTp) -> Attribute {
return linalg::IteratorTypeAttr::get(ctx, itTp);
});
auto iterAttr = llvm::map_to_vector(itTps, [ctx](auto itTp) {
return linalg::IteratorTypeAttr::get(ctx, itTp);
});

return std::make_pair(rewriter.getAffineMapArrayAttr(idxMapArray),
rewriter.getArrayAttr(iterAttr));
Expand Down
9 changes: 4 additions & 5 deletions mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -422,11 +422,10 @@ static unsigned getMaxPosOfType(ArrayRef<ReassociationExprs> exprArrays) {

ArrayAttr mlir::getReassociationIndicesAttribute(
Builder &b, ArrayRef<ReassociationIndices> reassociation) {
SmallVector<Attribute, 4> reassociationAttr =
llvm::to_vector<4>(llvm::map_range(
reassociation, [&](const ReassociationIndices &indices) -> Attribute {
return cast<Attribute>(b.getI64ArrayAttr(indices));
}));
auto reassociationAttr = llvm::map_to_vector(
reassociation, [&](const ReassociationIndices &indices) {
return b.getI64ArrayAttr(indices);
});
return b.getArrayAttr(reassociationAttr);
}

Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/Dialect/Vector/IR/VectorOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -798,10 +798,10 @@ void vector::ContractionOp::build(OpBuilder &builder, OperationState &result,
AffineMap::inferFromExprList(indexingExprs, builder.getContext())));
result.addAttribute(
getIteratorTypesAttrName(result.name),
builder.getArrayAttr(llvm::to_vector(llvm::map_range(
iteratorTypes, [&](IteratorType t) -> mlir::Attribute {
builder.getArrayAttr(
llvm::map_to_vector(iteratorTypes, [&](IteratorType t) {
return IteratorTypeAttr::get(builder.getContext(), t);
}))));
})));
}

void vector::ContractionOp::build(OpBuilder &builder, OperationState &result,
Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -105,10 +105,10 @@ struct MultiReduceToContract
rewriter.replaceOpWithNewOp<mlir::vector::ContractionOp>(
reduceOp, mulOp->getOperand(0), mulOp->getOperand(1), reduceOp.getAcc(),
rewriter.getAffineMapArrayAttr({srcMap, srcMap, dstMap}),
rewriter.getArrayAttr(llvm::to_vector(llvm::map_range(
iteratorTypes, [&](IteratorType t) -> mlir::Attribute {
rewriter.getArrayAttr(
llvm::map_to_vector(iteratorTypes, [&](IteratorType t) {
return IteratorTypeAttr::get(rewriter.getContext(), t);
}))));
})));
return success();
}
};
Expand Down
Loading