From 9f2dcd9dfc30bd2e9ba7ae3998fb8eeaee28f5bd Mon Sep 17 00:00:00 2001 From: Petr Kurapov Date: Mon, 24 Nov 2025 17:19:10 +0100 Subject: [PATCH 01/10] [examples] Add a llama3 example. --- .github/workflows/examples.yml | 4 + pyproject.toml | 1 + python/examples/lit.local.cfg | 1 + python/examples/llama/LICENSE.txt | 84 + python/examples/llama/test_llama3.py | 1889 +++++++++++++++++++++++ python/lighthouse/utils/__init__.py | 1 + python/lighthouse/utils/runtime_args.py | 35 + 7 files changed, 2015 insertions(+) create mode 100644 python/examples/llama/LICENSE.txt create mode 100644 python/examples/llama/test_llama3.py diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 3396fb1..0bc26b7 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -29,3 +29,7 @@ jobs: run: | export FILECHECK=FileCheck-18 # Ubuntu's llvm-dev appends a version number. uv run lit python/examples # Makes sure to substitute FileCheck for $FILECHECK + + - name: Run pytest-enabled examples as tests + run: | + uv run pytest python/examples diff --git a/pyproject.toml b/pyproject.toml index e738e28..98722cd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,6 +11,7 @@ dev = [ "lit==18.1.8", # Tool to configure, discover and run tests "ruff==0.14.5", # Python linter and formatter "pre-commit", # Tool to manage and apply pre-commit hooks + "pytest>=8.0.0", ] [project.optional-dependencies] diff --git a/python/examples/lit.local.cfg b/python/examples/lit.local.cfg index 73171b0..d214968 100644 --- a/python/examples/lit.local.cfg +++ b/python/examples/lit.local.cfg @@ -1 +1,2 @@ config.suffixes = {'.py'} +config.excludes = ['llama'] diff --git a/python/examples/llama/LICENSE.txt b/python/examples/llama/LICENSE.txt new file mode 100644 index 0000000..1fec50c --- /dev/null +++ b/python/examples/llama/LICENSE.txt @@ -0,0 +1,84 @@ +META LLAMA 3 COMMUNITY LICENSE AGREEMENT + +Meta Llama 3 Version Release Date: April 18, 2024 +“Agreement” means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein. + +“Documentation” means the specifications, manuals and documentation accompanying Meta Llama 3 distributed by Meta at https://llama.meta.com/get-started/. + +“Licensee” or “you” means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity’s behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. + +“Meta Llama 3” means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at https://llama.meta.com/llama-downloads. + +“Llama Materials” means, collectively, Meta’s proprietary Meta Llama 3 and Documentation (and any portion thereof) made available under this Agreement. + +“Meta” or “we” means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). + +By clicking “I Accept” below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement. + +1. License Rights and Redistribution. + + a. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Meta’s intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials. + b. Redistribution and Use. + i. If you distribute or make available the Llama Materials (or any derivative works thereof), or a product or service that uses any of them, including another AI model, you shall (A) provide a copy of this Agreement with any such Llama Materials; and (B) prominently display “Built with Meta Llama 3” on a related website, user interface, blogpost, about page, or product documentation. If you use the Llama Materials to create, train, fine tune, or otherwise improve an AI model, which is distributed or made available, you shall also include “Llama 3” at the beginning of any such AI model name. + ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you. + iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a “Notice” text file distributed as a part of such copies: “Meta Llama 3 is licensed under the Meta Llama 3 Community License, Copyright © Meta Platforms, Inc. All Rights Reserved.” + iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://llama.meta.com/llama3/use-policy), which is hereby incorporated by reference into this Agreement. + v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Meta Llama 3 or derivative works thereof). + +2. Additional Commercial Terms. If, on the Meta Llama 3 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee’s affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights. + +3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS. + +4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. + +5. Intellectual Property. + a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials or as set forth in this Section 5(a). Meta hereby grants you a license to use “Llama 3” (the “Mark”) solely as required to comply with the last sentence of Section 1.b.i. You will comply with Meta’s brand guidelines (currently accessible at https://about.meta.com/brand/resources/meta/company-brand/ ). All goodwill arising out of your use of the Mark will inure to the benefit of Meta. + b. Subject to Meta’s ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. + c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Meta Llama 3 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials. + +6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. + +7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. + + +Meta Llama 3 Acceptable Use Policy +Meta is committed to promoting safe and fair use of its tools and features, including Meta Llama 3. If you access or use Meta Llama 3, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at https://llama.meta.com/llama3/use-policy +Prohibited Uses +We want everyone to use Meta Llama 3 safely and responsibly. You agree you will not use, or allow others to use, Meta Llama 3 to: +1. Violate the law or others’ rights, including to: + a. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: + i. Violence or terrorism + ii. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material + iii. Human trafficking, exploitation, and sexual violence + iv. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials. + v. Sexual solicitation + vi. Any other criminal activity + b. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals + c. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services + d. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices + e. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws + f. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama Materials + g. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system + +2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Meta Llama 3 related to the following: + a. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State + b. Guns and illegal weapons (including weapon development) + c. Illegal drugs and regulated/controlled substances + d. Operation of critical infrastructure, transportation technologies, or heavy machinery + e. Self-harm or harm to others, including suicide, cutting, and eating disorders + f. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual + +3. Intentionally deceive or mislead others, including use of Meta Llama 3 related to the following: + a. Generating, promoting, or furthering fraud or the creation or promotion of disinformation + b. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content + c. Generating, promoting, or further distributing spam + d. Impersonating another individual without consent, authorization, or legal right + e. Representing that the use of Meta Llama 3 or outputs are human-generated + f. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement + g. Fail to appropriately disclose to end users any known dangers of your AI system + +Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means: + * Reporting issues with the model: https://github.com/meta-llama/llama3 + * Reporting risky content generated by the model: developers.facebook.com/llama_output_feedback + * Reporting bugs and security concerns: facebook.com/whitehat/info + * Reporting violations of the Acceptable Use Policy or unlicensed uses of Meta Llama 3: LlamaUseReport@meta.com diff --git a/python/examples/llama/test_llama3.py b/python/examples/llama/test_llama3.py new file mode 100644 index 0000000..981c238 --- /dev/null +++ b/python/examples/llama/test_llama3.py @@ -0,0 +1,1889 @@ +from dataclasses import dataclass +import math as pymath +import pytest +import torch +from typing import Optional, Tuple + +from mlir import ir +from mlir.dialects import transform, func, linalg, tensor, arith, complex, math +from mlir.dialects.transform import structured +from mlir.dialects.transform import interpreter +from mlir.passmanager import PassManager +from mlir.runtime.np_to_memref import ( + get_ranked_memref_descriptor, +) +from mlir.execution_engine import ExecutionEngine + + +from lighthouse import utils as lh_utils + + +@dataclass +class ModelArgs: + dim: int = 4096 + n_layers: int = 32 + n_heads: int = 32 + n_kv_heads: Optional[int] = None + vocab_size: int = -1 + multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2 + ffn_dim_multiplier: Optional[float] = None + norm_eps: float = 1e-5 + rope_theta: float = 500000 + + max_batch_size: int = 32 + max_seq_len: int = 2048 + + +def affine_map(dim_count, exprs, *, symb_count=0): + return ir.AffineMap.get(dim_count, symb_count, exprs) + + +parallel = linalg.IteratorType.parallel +reduction = linalg.IteratorType.reduction + + +def create_pass_pipeline(ctx: ir.Context) -> PassManager: + with ctx: + pm = PassManager("builtin.module") + pm.add("convert-scf-to-cf") + pm.add("expand-strided-metadata") + pm.add("lower-affine") + pm.add("finalize-memref-to-llvm") + pm.add("convert-func-to-llvm") + pm.add("convert-to-llvm") + pm.add("reconcile-unrealized-casts") + pm.add("cse") + pm.add("canonicalize") + return pm + + +def create_schedule(ctx: ir.Context) -> ir.Module: + """ + Create an MLIR module containing transformation schedule. + The schedule provides partial lowering to scalar operations. + + Args: + ctx: MLIR context. + """ + with ctx, ir.Location.unknown(context=ctx): + # Create transform module. + schedule = ir.Module.create() + schedule.operation.attributes["transform.with_named_sequence"] = ( + ir.UnitAttr.get() + ) + + # Create entry point transformation sequence. + with ir.InsertionPoint(schedule.body): + named_seq = transform.NamedSequenceOp( + "__transform_main", + [transform.AnyOpType.get()], + [], + arg_attrs=[{"transform.readonly": ir.UnitAttr.get()}], + ) + + # Create the schedule. + with ir.InsertionPoint(named_seq.body): + # For simplicity, use generic transform matchers. + anytype = transform.AnyOpType.get() + + # Find the kernel's function op. + func = structured.MatchOp.match_op_names( + named_seq.bodyTarget, ["func.func"] + ) + + # Use C interface wrappers - required to make function executable after jitting. + func = transform.apply_registered_pass( + anytype, func, "llvm-request-c-wrappers" + ) + + # Find the kernel's module op. + mod = transform.get_parent_op( + anytype, func, op_name="builtin.module", deduplicate=True + ) + + # Naive lowering to loops. + mod = transform.apply_registered_pass( + anytype, mod, "convert-linalg-to-loops" + ) + # Cleanup. + transform.ApplyCommonSubexpressionEliminationOp(mod) + with ir.InsertionPoint(transform.ApplyPatternsOp(mod).patterns): + transform.ApplyCanonicalizationPatternsOp() + + # Terminate the schedule. + transform.YieldOp() + return schedule + + +def apply_schedule(kernel: ir.Module, schedule: ir.Module) -> None: + interpreter.apply_named_sequence( + payload_root=kernel, + transform_root=schedule.body.operations[0], + transform_module=schedule, + ) + + +def bufferize_module(ctx: ir.Context, kernel: ir.Module) -> None: + with ctx: + pm = PassManager("builtin.module") + pm.add("one-shot-bufferize{bufferize-function-boundaries}") + pm.run(kernel.operation) + + +#### IR builders ##### +# TODO: Move to mlir_gen module + + +def get_add(a: ir.Value, b: ir.Value, out: ir.Value) -> ir.Value: + return linalg.add(a, b, outs=(out,)) + + +def get_rsqrt(a: ir.Value, out: ir.Value) -> ir.Value: + return linalg.rsqrt(a, outs=(out,)) + + +def get_powf(a: ir.Value, out: ir.Value) -> ir.Value: + return linalg.powf(a, outs=(out,)) + + +def get_sqr(a: ir.Value, out: ir.Value) -> ir.Value: + return linalg.square(a, outs=(out,)) + + +def get_mul(a: ir.Value, b: ir.Value, out: ir.Value) -> ir.Value: + return linalg.mul(a, b, outs=(out,)) + + +# equvialent to torch.mean(-1, keepdim=True) +def get_mean(a: ir.Value, out: ir.Value) -> ir.Value: + # Need to initialize the output with zeros for accumulation + zero = arith.ConstantOp(ir.F32Type.get(), 0.0) + out_filled = linalg.fill(zero, outs=[out]) + + # Input map: (d0, d1) -> (d0, d1) + input_map = affine_map( + a.type.rank, + [ir.AffineDimExpr.get(i) for i in range(a.type.rank)], + ) + # Output map: (d0, d1) -> (d0, 0) + output_map = affine_map( + a.type.rank, + [ir.AffineDimExpr.get(i) for i in range(a.type.rank - 1)] + + [ir.AffineConstantExpr.get(0)], + ) + iterator_types = [parallel] * (a.type.rank - 1) + [reduction] + + scale = arith.ConstantOp(ir.F32Type.get(), 1.0 / a.type.shape[-1]) + + @linalg.generic( + [a], + [out_filled], + [input_map, output_map], + iterator_types, + ) + def mean_op(a_val, acc): + # Multiply input by scale factor and add to accumulator + scaled = arith.mulf(a_val, scale) + return arith.addf(scaled, acc) + + return mean_op + + +# repeat_kv +def get_repeat_kv(x: ir.Value, n_rep: int, out: ir.Value) -> ir.Value: + bs, slen, n_kv_heads, head_dim = x.type.shape + if n_rep == 1: + return x + + b, s, h_out, d = [ir.AffineDimExpr.get(i) for i in range(4)] + + # For output head h_out, we read from input head h_out // n_rep + # This is equivalent to: x[:, :, :, None, :].expand(...).reshape(...) + h_in = ir.AffineExpr.get_floor_div(h_out, ir.AffineConstantExpr.get(n_rep)) + + # Affine maps + x_map = affine_map(4, [b, s, h_in, d]) + out_map = affine_map(4, [b, s, h_out, d]) + + @linalg.generic( + [x], + [out], + [x_map, out_map], + [parallel] * 4, + ) + def repeat_kv_op(a, _out): + return a + + return repeat_kv_op + + +# equivalent to torch.nn.functional.silu +def get_silu(inputs: ir.Value, out: ir.Value) -> ir.Value: + elty = inputs.type.element_type + one = arith.constant(elty, 1.0) + + dims = [ir.AffineDimExpr.get(i) for i in range(inputs.type.rank)] + par_affine_map = affine_map(inputs.type.rank, dims) + par_iterator_types = [parallel] * inputs.type.rank + + @linalg.generic( + [inputs], + [out], + [par_affine_map, par_affine_map], + par_iterator_types, + ) + def silu_op(a, _out): + sigmoid = arith.DivFOp( + one, + arith.AddFOp( + one, + math.exp(arith.NegFOp(a).result), + ).result, + ).result + return arith.MulFOp(a, sigmoid).result + + return silu_op + + +# equivalent to torch.softmax(a, dim=-1) +# this should be just linalg.softmax, but there's no decomposition +def get_softmax(a: ir.Value, out: ir.Value) -> ir.Value: + elty = a.type.element_type + + reduced_shape = list(a.type.shape) + reduced_shape[-1] = 1 + max_uninit = tensor.EmptyOp(reduced_shape, elty) + + neg_inf = arith.ConstantOp(elty, float("-inf")) + max_init = linalg.fill(neg_inf, outs=[max_uninit.result]) + + reduce_map = affine_map( + a.type.rank, + [ir.AffineDimExpr.get(i) for i in range(a.type.rank - 1)] + + [ir.AffineConstantExpr.get(0)], + ) + identity_map = affine_map( + a.type.rank, + [ir.AffineDimExpr.get(i) for i in range(a.type.rank)], + ) + + iterator_types = [parallel] * (a.type.rank - 1) + [reduction] + + @linalg.generic( + [a], + [max_init], + [identity_map, reduce_map], + iterator_types, + ) + def compute_max(val, acc): + return arith.MaximumFOp(val, acc).result + + shifted_uninit = tensor.EmptyOp(a.type.shape, elty) + + @linalg.generic( + [a, compute_max], + [shifted_uninit.result], + [identity_map, reduce_map, identity_map], + [parallel] * a.type.rank, + ) + def subtract_max(val, max_val, _out): + return arith.SubFOp(val, max_val).result + + exp_uninit = tensor.EmptyOp(a.type.shape, elty) + + @linalg.generic( + [subtract_max], + [exp_uninit.result], + [identity_map, identity_map], + [parallel] * a.type.rank, + ) + def compute_exp(val, _out): + return math.exp(val) + + sum_uninit = tensor.EmptyOp(reduced_shape, elty) + zero = arith.ConstantOp(elty, 0.0) + sum_init = linalg.fill(zero, outs=[sum_uninit.result]) + + @linalg.generic( + [compute_exp], + [sum_init], + [identity_map, reduce_map], + iterator_types, + ) + def compute_sum(val, acc): + return arith.AddFOp(val, acc).result + + @linalg.generic( + [compute_exp, compute_sum], + [out], + [identity_map, reduce_map, identity_map], + [parallel] * a.type.rank, + ) + def divide_by_sum(exp_val, sum_val, _out): + return arith.DivFOp(exp_val, sum_val).result + + return divide_by_sum + + +# torch.triu +def get_triu(a: ir.Value, out: ir.Value) -> ir.Value: + elty = a.type.element_type + zero = arith.constant(elty, 0.0) + + rank = a.type.rank + dims = [ir.AffineDimExpr.get(i) for i in range(rank)] + par_affine_map = affine_map(rank, dims) + par_iterator_types = [parallel] * rank + + @linalg.generic( + [a], + [out], + [par_affine_map, par_affine_map], + par_iterator_types, + ) + def triu_op(a_elem, _out): + i = linalg.IndexOp(rank - 2).result + j = linalg.IndexOp(rank - 1).result + cond = arith.cmpi(arith.CmpIPredicate.sle, i, j) + result = arith.select(cond, a_elem, zero) + return result + + return triu_op + + +# torch.matmul +def get_matmul(a: ir.Value, b: ir.Value, out: ir.Value) -> ir.Value: + return linalg.matmul(a, b, outs=[out]) + + +# torch.nn.functional.linear +def get_linear(a: ir.Value, w: ir.Value, b: ir.Value, out: ir.Value) -> ir.Value: + elty = out.type.element_type + zero = arith.constant(elty, 0.0) + out_zeroed = linalg.fill(zero, outs=[out]) + + a_rank = a.type.rank + out_rank = out.type.rank + + # For matmul: a[...batch..., k] * w[j, k] -> out[...batch..., j] + num_dims = a_rank + 1 + + dims = [ir.AffineDimExpr.get(d) for d in range(num_dims)] + + # a maps to [...batch..., k] where k is the last (reduction) dimension + batch_dims = dims[: a_rank - 1] + k = dims[-1] # reduction dimension + a_map = affine_map(num_dims, batch_dims + [k]) + + # w maps to [j, k] where j is the output feature dimension + j = dims[a_rank - 1] # after batch dims, before k + w_map = affine_map(num_dims, [j, k]) + + # out maps to [...batch..., j] + out_map = affine_map(num_dims, batch_dims + [j]) + + iterator_types = [parallel] * (a_rank - 1) + [parallel, reduction] + + @linalg.generic( + [a, w], + [out_zeroed], + [a_map, w_map, out_map], + iterator_types, + ) + def matmul_op(a_elem, w_elem, out_elem): + prod = arith.MulFOp(a_elem, w_elem).result + return arith.AddFOp(out_elem, prod).result + + out_dims = [ir.AffineDimExpr.get(d) for d in range(out_rank)] + b_map = affine_map(out_rank, [out_dims[-1]]) + out_map2 = affine_map(out_rank, out_dims) + + bias_iterator_types = [parallel] * out_rank + + @linalg.generic( + [matmul_op, b], + [out_zeroed], + [out_map2, b_map, out_map2], + bias_iterator_types, + ) + def add_bias_op(matmul_elem, b_elem, _out): + return arith.AddFOp(matmul_elem, b_elem).result + + return add_bias_op + + +# x * rsqrt(mean(x^2, dim=-1, keepdim=True) + eps) +def get_l2_norm(a: ir.Value, out: ir.Value, eps: float = 1e-5) -> ir.Value: + elty = a.type.element_type + # Broadcast epsilon scalar to tensor with reduced shape + reduced_shape = list(a.type.shape) + reduced_shape[-1] = 1 + eps_const = arith.ConstantOp(elty, eps) + eps_tensor_uninit = tensor.EmptyOp(reduced_shape, elty) + eps_tensor = linalg.fill(eps_const, outs=[eps_tensor_uninit]) + # Square the input + squared_input = tensor.EmptyOp(a.type.shape, elty) + sqr = get_sqr(a, squared_input) + + # Compute mean along last dimension + reduced_shape = list(a.type.shape) + reduced_shape[-1] = 1 + mean_uninit = tensor.EmptyOp(reduced_shape, elty) + + mean = get_mean(sqr, mean_uninit) + mean_plus_eps = get_add(mean, eps_tensor, mean_uninit) + rsqrt_reduced = get_rsqrt(mean_plus_eps, mean_uninit) + + # (d0, d1) -> (d0, 0) for input, (d0, d1) -> (d0, d1) for output + input_map = affine_map( + a.type.rank, + [ir.AffineDimExpr.get(i) for i in range(a.type.rank - 1)] + + [ir.AffineConstantExpr.get(0)], + ) + output_map = affine_map( + a.type.rank, + [ir.AffineDimExpr.get(i) for i in range(a.type.rank)], + ) + iterator_types = [parallel] * a.type.rank + + @linalg.generic( + [rsqrt_reduced], + [out], + [input_map, output_map], + iterator_types, + ) + def broadcast_rsqrt(val, _out): + return val + + return get_mul(a, broadcast_rsqrt, out) + + +# equivalent to torch.polar +def get_polar(abs: ir.Value, angle: ir.Value, out: ir.Value) -> ir.Value: + elty = abs.type.element_type + shape = abs.type.shape + rank = len(shape) + + # Identity map for element-wise operations + id_map = affine_map(rank, [ir.AffineDimExpr.get(i) for i in range(rank)]) + + # Compute cos(angle) and sin(angle), then multiply by abs to get real and imag parts + @linalg.generic( + [abs, angle], + [out], + [id_map, id_map, id_map], + [parallel] * rank, + ) + def polar_convert(abs_val, angle_val, _out): + cos_val = math.CosOp(angle_val).result + sin_val = math.SinOp(angle_val).result + real_part = arith.MulFOp(abs_val, cos_val).result + imag_part = arith.MulFOp(abs_val, sin_val).result + return complex.CreateOp(ir.ComplexType.get(elty), real_part, imag_part).result + + return polar_convert + + +# equivalent to torch.outer (out[i,j] = a[i] * b[j]) +def get_outer(a: ir.Value, b: ir.Value, out: ir.Value) -> ir.Value: + # Affine maps for outer product: a[i] broadcasts to (i,j), b[j] broadcasts to (i,j) + a_map = affine_map(2, [ir.AffineDimExpr.get(0)]) + b_map = affine_map(2, [ir.AffineDimExpr.get(1)]) + out_map = affine_map(2, [ir.AffineDimExpr.get(0), ir.AffineDimExpr.get(1)]) + + @linalg.generic( + [a, b], + [out], + [a_map, b_map, out_map], + [parallel, parallel], + ) + def outer_product(a_val, b_val, _out): + return arith.MulFOp(a_val, b_val).result + + return outer_product + + +# with b broadcasting, assuming it has smaller rank +def get_complex_mul(a: ir.Value, b: ir.Value, out: ir.Value) -> ir.Value: + rank_b = b.type.rank + rank_out = out.type.rank + + dim_exprs_a = [ir.AffineDimExpr.get(i) for i in range(rank_out)] + + if rank_b < rank_out: + offset = rank_out - rank_b + dim_exprs_b = [ir.AffineConstantExpr.get(0)] * offset + [ + ir.AffineDimExpr.get(i) for i in range(offset, rank_out) + ] + else: + b_shape = list(b.type.shape) + dim_exprs_b = [] + for i in range(rank_out): + if i < len(b_shape) and b_shape[i] == 1: + dim_exprs_b.append(ir.AffineConstantExpr.get(0)) + else: + dim_exprs_b.append(ir.AffineDimExpr.get(i)) + + dim_exprs_out = [ir.AffineDimExpr.get(i) for i in range(rank_out)] + + map_a = affine_map(rank_out, dim_exprs_a) + map_b = affine_map(rank_out, dim_exprs_b) + map_out = affine_map(rank_out, dim_exprs_out) + + @linalg.generic( + [a, b], + [out], + [map_a, map_b, map_out], + [parallel] * rank_out, + ) + def complex_mul_op(a_val, b_val, _out): + result = complex.MulOp(a_val, b_val).result + return result + + return complex_mul_op + + +def get_rotary_emb( + xq: ir.Value, xk: ir.Value, freqs_cis: ir.Value, xq_out: ir.Value, xk_out: ir.Value +): + elty = xq.type.element_type + + xq_shape = list(xq.type.shape) + xk_shape = list(xk.type.shape) + batch, seq_len, n_heads, head_dim = xq_shape + n_kv_heads = xk_shape[2] + + # Reshape xq to (batch, seq_len, n_heads, head_dim//2, 2) + xq_reshaped_shape = [batch, seq_len, n_heads, head_dim // 2, 2] + xq_reshaped_type = ir.RankedTensorType.get(xq_reshaped_shape, elty) + xq_reshaped = tensor.expand_shape( + xq_reshaped_type, + xq, + reassociation=[[0], [1], [2], [3, 4]], + output_shape=[], + static_output_shape=xq_reshaped_shape, + ) + + # View xq as complex: (batch, seq_len, n_heads, head_dim//2, 2) -> (batch, seq_len, n_heads, head_dim//2) complex + xq_complex_shape = [batch, seq_len, n_heads, head_dim // 2] + xq_complex_uninit = tensor.EmptyOp( + xq_complex_shape, ir.ComplexType.get(elty) + ).result + xq_complex = get_view_as_complex(xq_reshaped, xq_complex_uninit) + + # same for xk + xk_reshaped_shape = [batch, seq_len, n_kv_heads, head_dim // 2, 2] + xk_reshaped_type = ir.RankedTensorType.get(xk_reshaped_shape, elty) + xk_reshaped = tensor.expand_shape( + xk_reshaped_type, + xk, + reassociation=[[0], [1], [2], [3, 4]], + output_shape=[], + static_output_shape=xk_reshaped_shape, + ) + + xk_complex_shape = [batch, seq_len, n_kv_heads, head_dim // 2] + xk_complex_uninit = tensor.EmptyOp( + xk_complex_shape, ir.ComplexType.get(elty) + ).result + xk_complex = get_view_as_complex(xk_reshaped, xk_complex_uninit) + + # Reshape freqs_cis for broadcasting: (seq_len, head_dim//2) -> (1, seq_len, 1, head_dim//2) + freqs_broadcast_shape = [1, seq_len, 1, head_dim // 2] + freqs_broadcast_uninit = tensor.EmptyOp(freqs_broadcast_shape, elty).result + freqs_broadcast = get_reshape_for_broadcast( + freqs_cis, xq_complex, freqs_broadcast_uninit + ) + + # cast freqs_broadcast to complex + freqs_broadcast_complex_uninit = tensor.EmptyOp( + freqs_broadcast_shape, ir.ComplexType.get(elty) + ).result + + d0, d1, d2, d3 = [ir.AffineDimExpr.get(i) for i in range(4)] + indexing_maps = [ + ir.AffineMap.get(4, 0, [d0, d1, d2, d3]), + ir.AffineMap.get(4, 0, [d0, d1, d2, d3]), + ] + + @linalg.generic( + inputs=[freqs_broadcast], + outputs=[freqs_broadcast_complex_uninit], + indexing_maps=indexing_maps, + iterator_types=["parallel", "parallel", "parallel", "parallel"], + ) + def real_to_complex(r, out): + zero = arith.constant(elty, 0.0) + return complex.CreateOp(ir.ComplexType.get(elty), r, zero).result + + freqs_broadcast_complex = real_to_complex + + # Multiply xq_complex with freqs_broadcast_complex + xq_rotated_uninit = tensor.EmptyOp( + xq_complex_shape, ir.ComplexType.get(elty) + ).result + xq_rotated = get_complex_mul(xq_complex, freqs_broadcast_complex, xq_rotated_uninit) + + xk_rotated_uninit = tensor.EmptyOp( + xk_complex_shape, ir.ComplexType.get(elty) + ).result + xk_rotated = get_complex_mul(xk_complex, freqs_broadcast_complex, xk_rotated_uninit) + + # view as real + xq_real_shape = [batch, seq_len, n_heads, head_dim // 2, 2] + xq_real_uninit = tensor.EmptyOp(xq_real_shape, elty).result + xq_real = get_view_as_real(xq_rotated, xq_real_uninit) + + xk_real_shape = [batch, seq_len, n_kv_heads, head_dim // 2, 2] + xk_real_uninit = tensor.EmptyOp(xk_real_shape, elty).result + xk_real = get_view_as_real(xk_rotated, xk_real_uninit) + + # flatten back to original shape + xq_final = tensor.collapse_shape( + xq.type, + xq_real, + reassociation=[[0], [1], [2], [3, 4]], + ) + + xk_final = tensor.collapse_shape( + xk.type, + xk_real, + reassociation=[[0], [1], [2], [3, 4]], + ) + + linalg.copy(xq_final, outs=[xq_out]) + linalg.copy(xk_final, outs=[xk_out]) + + +def get_reshape_for_broadcast(freqs_cis: ir.Value, x: ir.Value, out: ir.Value): + # broadcast freqs_cis[seq, head] -> out[0, seq, 0, head] + d0, d1, d2, d3 = [ir.AffineDimExpr.get(i) for i in range(4)] + + in_map = affine_map(4, [d1, d3]) + out_map = affine_map(4, [d0, d1, d2, d3]) + + @linalg.generic( + [freqs_cis], + [out], + [in_map, out_map], + [parallel, parallel, parallel, parallel], + ) + def reshape_op(val, _out): + return val + + return reshape_op + + +# torch.view_as_complex +def get_view_as_complex(x: ir.Value, out: ir.Value) -> ir.Value: + elty = x.type.element_type + rank = x.type.rank + shape = list(x.type.shape) + assert shape[-1] == 2, "Last dimension must be of size 2 to form complex numbers" + + rank_out = rank - 1 + dim_exprs_out = [ir.AffineDimExpr.get(i) for i in range(rank_out)] + + # real part: access input[d0, d1, ..., d_{rank-2}, 0] + dim_exprs_real = dim_exprs_out + [ir.AffineConstantExpr.get(0)] + # imag part: access input[d0, d1, ..., d_{rank-2}, 1] + dim_exprs_imag = dim_exprs_out + [ir.AffineConstantExpr.get(1)] + + input_map_real = affine_map(rank_out, dim_exprs_real) + input_map_imag = affine_map(rank_out, dim_exprs_imag) + output_map = affine_map(rank_out, dim_exprs_out) + + @linalg.generic( + [x, x], # Same input tensor accessed twice with different maps + [out], + [input_map_real, input_map_imag, output_map], + [parallel] * rank_out, + ) + def view_as_complex_op(r, i, _out): + cplx = complex.CreateOp(ir.ComplexType.get(elty), r, i).result + return cplx + + return view_as_complex_op + + +# torch.view_as_real +def get_view_as_real(x: ir.Value, out: ir.Value) -> ir.Value: + rank = x.type.rank + + # Output has shape [..., 2] + # extract real part to [..., 0] and imag part to [..., 1] + + dim_exprs_in = [ir.AffineDimExpr.get(i) for i in range(rank)] + + # For real part: write to output[..., 0] + dim_exprs_real = dim_exprs_in + [ir.AffineConstantExpr.get(0)] + # For imag part: write to output[..., 1] + dim_exprs_imag = dim_exprs_in + [ir.AffineConstantExpr.get(1)] + + input_map = affine_map(rank, dim_exprs_in) + output_map_real = affine_map(rank, dim_exprs_real) + output_map_imag = affine_map(rank, dim_exprs_imag) + + @linalg.generic( + [x], + [out], + [input_map, output_map_real], + [parallel] * rank, + ) + def write_real(cplx, _out): + return complex.ReOp(cplx).result + + @linalg.generic( + [x], + [write_real], + [input_map, output_map_imag], + [parallel] * rank, + ) + def write_imag(cplx, _out): + return complex.ImOp(cplx).result + + return write_imag + + +def get_attention( + args: ModelArgs, + x: ir.Value, + wq: ir.Value, + wk: ir.Value, + wv: ir.Value, + wo: ir.Value, + freqs_cis: ir.Value, + mask: ir.Value, + out: ir.Value, +) -> ir.Value: + elty = x.type.element_type + batch, seq_len, dim = x.type.shape + n_heads = args.n_heads + n_kv_heads = args.n_kv_heads + head_dim = args.dim // args.n_heads + n_rep = n_heads // n_kv_heads + + # Q, K, V projections + # xq = linear(x, wq) -> (batch, seq_len, n_heads * head_dim) + xq_shape = [batch, seq_len, n_heads * head_dim] + xq_uninit = tensor.EmptyOp(xq_shape, elty).result + bq_zeros = tensor.EmptyOp([n_heads * head_dim], elty).result + zero = arith.constant(elty, 0.0) + bq = linalg.fill(zero, outs=[bq_zeros]) + xq_flat = get_linear(x, wq, bq, xq_uninit) + + # Reshape to (batch, seq_len, n_heads, head_dim) + xq_reshaped_shape = [batch, seq_len, n_heads, head_dim] + xq_reshaped_type = ir.RankedTensorType.get(xq_reshaped_shape, elty) + xq = tensor.expand_shape( + xq_reshaped_type, + xq_flat, + reassociation=[[0], [1], [2, 3]], + output_shape=[], + static_output_shape=xq_reshaped_shape, + ) + + # xk = linear(x, wk) -> (batch, seq_len, n_kv_heads * head_dim) + xk_shape = [batch, seq_len, n_kv_heads * head_dim] + xk_uninit = tensor.EmptyOp(xk_shape, elty).result + bk_zeros = tensor.EmptyOp([n_kv_heads * head_dim], elty).result + bk = linalg.fill(zero, outs=[bk_zeros]) + xk_flat = get_linear(x, wk, bk, xk_uninit) + + # Reshape to (batch, seq_len, n_kv_heads, head_dim) + xk_reshaped_shape = [batch, seq_len, n_kv_heads, head_dim] + xk_reshaped_type = ir.RankedTensorType.get(xk_reshaped_shape, elty) + xk = tensor.expand_shape( + xk_reshaped_type, + xk_flat, + reassociation=[[0], [1], [2, 3]], + output_shape=[], + static_output_shape=xk_reshaped_shape, + ) + + # xv = linear(x, wv) -> (batch, seq_len, n_kv_heads * head_dim) + xv_shape = [batch, seq_len, n_kv_heads * head_dim] + xv_uninit = tensor.EmptyOp(xv_shape, elty).result + bv_zeros = tensor.EmptyOp([n_kv_heads * head_dim], elty).result + bv = linalg.fill(zero, outs=[bv_zeros]) + xv_flat = get_linear(x, wv, bv, xv_uninit) + + # Reshape to (batch, seq_len, n_kv_heads, head_dim) + xv_reshaped_shape = [batch, seq_len, n_kv_heads, head_dim] + xv_reshaped_type = ir.RankedTensorType.get(xv_reshaped_shape, elty) + xv = tensor.expand_shape( + xv_reshaped_type, + xv_flat, + reassociation=[[0], [1], [2, 3]], + output_shape=[], + static_output_shape=xv_reshaped_shape, + ) + + # Apply rotary embeddings + xq_rot_uninit = tensor.EmptyOp([batch, seq_len, n_heads, head_dim], elty).result + xk_rot_uninit = tensor.EmptyOp([batch, seq_len, n_kv_heads, head_dim], elty).result + get_rotary_emb(xq, xk, freqs_cis, xq_rot_uninit, xk_rot_uninit) + xq_rot = xq_rot_uninit + xk_rot = xk_rot_uninit + + # Repeat K/V if using GQA (n_kv_heads < n_heads) + if n_rep > 1: + keys_repeated_uninit = tensor.EmptyOp( + [batch, seq_len, n_heads, head_dim], elty + ).result + keys = get_repeat_kv(xk_rot, n_rep, keys_repeated_uninit) + values_repeated_uninit = tensor.EmptyOp( + [batch, seq_len, n_heads, head_dim], elty + ).result + values = get_repeat_kv(xv, n_rep, values_repeated_uninit) + else: + keys = xk_rot + values = xv + + # Transpose for attention: (batch, n_heads, seq_len, head_dim) + xq_t_shape = [batch, n_heads, seq_len, head_dim] + xq_t = tensor.EmptyOp(xq_t_shape, elty).result + + # Permute [0, 2, 1, 3] + d0, d1, d2, d3 = [ir.AffineDimExpr.get(i) for i in range(4)] + xq_perm_map = affine_map(4, [d0, d2, d1, d3]) + xq_t_map = affine_map(4, [d0, d1, d2, d3]) + + @linalg.generic( + [xq_rot], + [xq_t], + [xq_perm_map, xq_t_map], + [parallel] * 4, + ) + def transpose_xq(val, _out): + return val + + xq_transposed = transpose_xq + + # Transpose keys and values similarly + keys_t = tensor.EmptyOp(xq_t_shape, elty).result + + @linalg.generic( + [keys], + [keys_t], + [xq_perm_map, xq_t_map], + [parallel] * 4, + ) + def transpose_k(val, _out): + return val + + keys_transposed = transpose_k + + values_t = tensor.EmptyOp(xq_t_shape, elty).result + + @linalg.generic( + [values], + [values_t], + [xq_perm_map, xq_t_map], + [parallel] * 4, + ) + def transpose_v(val, _out): + return val + + values_transposed = transpose_v + + # Compute attention scores: matmul(xq, keys.transpose(-2, -1)) + # xq_transposed: (batch, n_heads, seq_len, head_dim) + # keys_transposed: (batch, n_heads, seq_len, head_dim) -> transpose to (batch, n_heads, head_dim, seq_len) + # scores: (batch, n_heads, seq_len, seq_len) + scores_shape = [batch, n_heads, seq_len, seq_len] + scores_uninit = tensor.EmptyOp(scores_shape, elty).result + scores_zeroed = linalg.fill(zero, outs=[scores_uninit]) + + # Batched matmul with transpose + b, h, s1, s2, d = [ir.AffineDimExpr.get(i) for i in range(5)] + xq_scores_map = affine_map(5, [b, h, s1, d]) + keys_scores_map = affine_map(5, [b, h, s2, d]) # Will read from transposed position + scores_map = affine_map(5, [b, h, s1, s2]) + + @linalg.generic( + [xq_transposed, keys_transposed], + [scores_zeroed], + [xq_scores_map, keys_scores_map, scores_map], + [parallel, parallel, parallel, parallel, reduction], + ) + def compute_scores(q_val, k_val, score_val): + prod = arith.MulFOp(q_val, k_val).result + return arith.AddFOp(score_val, prod).result + + scores_raw = compute_scores + + # Scale by 1/sqrt(head_dim) + scale_val = 1.0 / pymath.sqrt(head_dim) + scale_const = arith.constant(elty, scale_val) + scores_scaled_uninit = tensor.EmptyOp(scores_shape, elty).result + + d0, d1, d2, d3 = [ir.AffineDimExpr.get(i) for i in range(4)] + identity_map = affine_map(4, [d0, d1, d2, d3]) + + @linalg.generic( + [scores_raw], + [scores_scaled_uninit], + [identity_map, identity_map], + [parallel] * 4, + ) + def scale_scores(score, _out): + return arith.MulFOp(score, scale_const).result + + scores_scaled = scale_scores + + # Apply mask if provided (add mask to scores) + if mask is not None: + scores_masked_uninit = tensor.EmptyOp(scores_shape, elty).result + scores_final = get_add(scores_scaled, mask, scores_masked_uninit) + else: + scores_final = scores_scaled + + # Apply softmax + scores_softmax_uninit = tensor.EmptyOp(scores_shape, elty).result + attn_weights = get_softmax(scores_final, scores_softmax_uninit) + + # Compute output: matmul(attn_weights, values) + # attn_weights: (batch, n_heads, seq_len, seq_len) + # values_transposed: (batch, n_heads, seq_len, head_dim) + # output: (batch, n_heads, seq_len, head_dim) + attn_out_shape = [batch, n_heads, seq_len, head_dim] + attn_out_uninit = tensor.EmptyOp(attn_out_shape, elty).result + attn_out_zeroed = linalg.fill(zero, outs=[attn_out_uninit]) + + b, h, s1, s2, d = [ir.AffineDimExpr.get(i) for i in range(5)] + attn_map = affine_map(5, [b, h, s1, s2]) + values_map = affine_map(5, [b, h, s2, d]) + out_map = affine_map(5, [b, h, s1, d]) + + @linalg.generic( + [attn_weights, values_transposed], + [attn_out_zeroed], + [attn_map, values_map, out_map], + [parallel, parallel, parallel, parallel, reduction], + ) + def compute_attn_out(attn_val, v_val, out_val): + prod = arith.MulFOp(attn_val, v_val).result + return arith.AddFOp(out_val, prod).result + + attn_out = compute_attn_out + + # Transpose back: (batch, n_heads, seq_len, head_dim) -> (batch, seq_len, n_heads, head_dim) + attn_out_perm_shape = [batch, seq_len, n_heads, head_dim] + attn_out_perm_type = ir.RankedTensorType.get(attn_out_perm_shape, elty) + attn_out_perm = tensor.EmptyOp(attn_out_perm_shape, elty).result + + d0, d1, d2, d3 = [ir.AffineDimExpr.get(i) for i in range(4)] + from_map = affine_map(4, [d0, d1, d2, d3]) + to_map = affine_map(4, [d0, d2, d1, d3]) + + @linalg.generic( + [attn_out], + [attn_out_perm], + [from_map, to_map], + [parallel] * 4, + ) + def transpose_out(val, _out): + return val + + attn_out_transposed = transpose_out + + # Reshape to (batch, seq_len, n_heads * head_dim) + attn_out_flat_shape = [batch, seq_len, n_heads * head_dim] + attn_out_flat_type = ir.RankedTensorType.get(attn_out_flat_shape, elty) + attn_out_flat = tensor.collapse_shape( + attn_out_flat_type, + attn_out_transposed, + reassociation=[[0], [1], [2, 3]], + ) + + # Output projection + bo_zeros = tensor.EmptyOp([dim], elty).result + bo = linalg.fill(zero, outs=[bo_zeros]) + output_final = get_linear(attn_out_flat, wo, bo, out) + + return output_final + + +#### Test cases ##### + + +def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor): + ndim = x.ndim + assert 0 <= 1 < ndim + assert freqs_cis.shape == (x.shape[1], x.shape[-1]) + shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] + return freqs_cis.view(*shape) + + +def rotary_emb_ref( + xq: torch.Tensor, + xk: torch.Tensor, + freqs_cis: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor]: + xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) + xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) + freqs_cis = reshape_for_broadcast(freqs_cis, xq_) + xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) + xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3) + return xq_out.type_as(xq), xk_out.type_as(xk) + + +def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor: + """torch.repeat_interleave(x, dim=2, repeats=n_rep)""" + bs, slen, n_kv_heads, head_dim = x.shape + if n_rep == 1: + return x + return ( + x[:, :, :, None, :] + .expand(bs, slen, n_kv_heads, n_rep, head_dim) + .reshape(bs, slen, n_kv_heads * n_rep, head_dim) + ) + + +# Attention implementation without fairscale parrallel linear layers +class StandaloneAttention(torch.nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads + self.dim = args.dim + self.n_heads = args.n_heads + self.n_rep = self.n_heads // self.n_kv_heads + self.head_dim = args.dim // args.n_heads + + self.wq = torch.nn.Linear( + args.dim, + args.n_heads * self.head_dim, + bias=False, + ) + self.wk = torch.nn.Linear( + args.dim, + self.n_kv_heads * self.head_dim, + bias=False, + ) + self.wv = torch.nn.Linear( + args.dim, + self.n_kv_heads * self.head_dim, + bias=False, + ) + self.wo = torch.nn.Linear( + args.n_heads * self.head_dim, + args.dim, + bias=False, + ) + + self.cache_k = torch.zeros( + ( + args.max_batch_size, + args.max_seq_len, + self.n_kv_heads, + self.head_dim, + ) + ) + self.cache_v = torch.zeros( + ( + args.max_batch_size, + args.max_seq_len, + self.n_kv_heads, + self.head_dim, + ) + ) + + def forward( + self, + x: torch.Tensor, + start_pos: int, + freqs_cis: torch.Tensor, + mask: Optional[torch.Tensor], + ): + bsz, seqlen, _ = x.shape + xq, xk, xv = self.wq(x), self.wk(x), self.wv(x) + + xq = xq.view(bsz, seqlen, self.n_heads, self.head_dim) + xk = xk.view(bsz, seqlen, self.n_kv_heads, self.head_dim) + xv = xv.view(bsz, seqlen, self.n_kv_heads, self.head_dim) + + xq, xk = rotary_emb_ref(xq, xk, freqs_cis=freqs_cis) + + self.cache_k = self.cache_k.to(xq) + self.cache_v = self.cache_v.to(xq) + + self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk + self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv + + keys = self.cache_k[:bsz, : start_pos + seqlen] + values = self.cache_v[:bsz, : start_pos + seqlen] + + # repeat k/v heads if n_kv_heads < n_heads + keys = repeat_kv( + keys, self.n_rep + ) # (bs, cache_len + seqlen, n_heads, head_dim) + values = repeat_kv( + values, self.n_rep + ) # (bs, cache_len + seqlen, n_heads, head_dim) + + xq = xq.transpose(1, 2) # (bs, n_heads, seqlen, head_dim) + keys = keys.transpose(1, 2) # (bs, n_heads, cache_len + seqlen, head_dim) + values = values.transpose(1, 2) # (bs, n_heads, cache_len + seqlen, head_dim) + scores = torch.matmul(xq, keys.transpose(2, 3)) / pymath.sqrt(self.head_dim) + if mask is not None: + scores = scores + mask # (bs, n_heads, seqlen, cache_len + seqlen) + scores = torch.nn.functional.softmax(scores.float(), dim=-1).type_as(xq) + output = torch.matmul(scores, values) # (bs, n_heads, seqlen, head_dim) + output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1) + return self.wo(output) + + +references = { + get_add: torch.add, + get_mul: torch.mul, + get_matmul: torch.matmul, + get_rsqrt: torch.rsqrt, + get_sqr: torch.square, + get_mean: lambda x: torch.mean(x, dim=-1, keepdim=True), + get_silu: lambda x: torch.nn.functional.silu(x), + get_softmax: lambda x: torch.softmax(x, dim=-1), + get_polar: torch.polar, + get_triu: torch.triu, + get_outer: torch.outer, + get_linear: torch.nn.functional.linear, + get_repeat_kv: repeat_kv, + get_l2_norm: lambda x, eps: x + * torch.rsqrt(torch.mean(x.pow(2), dim=-1, keepdim=True) + eps), + get_rotary_emb: rotary_emb_ref, +} + + +# TODO: torch_dtype_to_mlir_type +def to_ir_type(type_str, ctx): + if type_str == "f32": + return ir.F32Type.get(context=ctx) + elif type_str == "f64": + return ir.F64Type.get(context=ctx) + else: + raise ValueError(f"Unsupported type: {type_str}") + + +@pytest.mark.parametrize( + "op,shape,elem_type", + [ + (get_add, (4, 16), "f32"), + (get_mul, (4, 16), "f32"), + (get_matmul, (16, 16), "f32"), + (get_outer, (16,), "f32"), + ], +) +def test_bin_op(op, shape, elem_type): + def generate_module(ctx, elty): + with ctx, ir.Location.unknown(): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + tensor_type = ir.RankedTensorType.get(shape, elty) + + # Outer product produces [M, M] output for 1-D input of size M + if op == get_outer: + out_shape = (shape[0], shape[0]) + out_tensor_type = ir.RankedTensorType.get(out_shape, elty) + else: + out_tensor_type = tensor_type + + @func.FuncOp.from_py_func( + tensor_type, tensor_type, out_tensor_type, name="bin_op" + ) + def bin_op(a, b, out): + op(a, b, out) + + return module + + ctx = ir.Context() + ir_type = to_ir_type(elem_type, ctx) + module = generate_module(ctx, ir_type) + bufferize_module(ctx, module) + schedule = create_schedule(ctx) + apply_schedule(module, schedule) + pm = create_pass_pipeline(ctx) + pm.run(module.operation) + + eng = ExecutionEngine(module, opt_level=2) + func_ptr = eng.lookup("bin_op") + + torch_dtype = lh_utils.mlir_type_to_torch_dtype(ir_type) + a = torch.randn(*shape, dtype=torch_dtype) + b = torch.randn(*shape, dtype=torch_dtype) + out_ref = references[op](a, b) + out = torch.empty_like(out_ref) + out.zero_() + + a_mem = get_ranked_memref_descriptor(a.numpy()) + b_mem = get_ranked_memref_descriptor(b.numpy()) + out_mem = get_ranked_memref_descriptor(out.numpy()) + args = lh_utils.memrefs_to_packed_args([a_mem, b_mem, out_mem]) + func_ptr(args) + + assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) + + +@pytest.mark.parametrize( + "op,shape,elem_type", + [ + (get_rsqrt, (4, 16), "f32"), + (get_mean, (4, 16), "f32"), + (get_sqr, (4, 16), "f32"), + (get_silu, (4, 16), "f32"), + (get_softmax, (4, 16), "f32"), + (get_triu, (4, 4), "f32"), + ], +) +def test_unary_op(op, shape, elem_type): + def generate_module(ctx, elty): + with ctx, ir.Location.unknown(): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + tensor_type = ir.RankedTensorType.get(shape, elty) + + # For mean operation, output has different shape (reduction on last dim) + if op == get_mean: + out_shape = list(shape) + out_shape[-1] = 1 + out_tensor_type = ir.RankedTensorType.get(out_shape, elty) + else: + out_tensor_type = tensor_type + + @func.FuncOp.from_py_func(tensor_type, out_tensor_type, name="unary_op") + def unary_op(a, out): + op(a, out) + + return module + + ctx = ir.Context() + ir_type = to_ir_type(elem_type, ctx) + module = generate_module(ctx, ir_type) + bufferize_module(ctx, module) + schedule = create_schedule(ctx) + apply_schedule(module, schedule) + pm = create_pass_pipeline(ctx) + pm.run(module.operation) + + eng = ExecutionEngine(module, opt_level=2) + func_ptr = eng.lookup("unary_op") + + torch_dtype = lh_utils.mlir_type_to_torch_dtype(ir_type) + a = torch.randn(*shape, dtype=torch_dtype) + out_ref = references[op](a) + out = torch.empty_like(out_ref) + + a_mem = get_ranked_memref_descriptor(a.numpy()) + out_mem = get_ranked_memref_descriptor(out.numpy()) + args = lh_utils.memrefs_to_packed_args([a_mem, out_mem]) + func_ptr(args) + + assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) + + +@pytest.mark.parametrize("shape,elem_type", [((4, 16), "f32")]) +def test_rms_norm(shape, elem_type): + eps = 1e-5 + + def generate_module(ctx, elty): + with ctx, ir.Location.unknown(): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + input_type = ir.RankedTensorType.get(shape, elty) + + @func.FuncOp.from_py_func(input_type, input_type, name="rms_norm") + def rms_norm(a, out): + get_l2_norm(a, out, eps) + + return module + + ctx = ir.Context() + ir_type = to_ir_type(elem_type, ctx) + module = generate_module(ctx, ir_type) + print(module) + bufferize_module(ctx, module) + schedule = create_schedule(ctx) + apply_schedule(module, schedule) + pm = create_pass_pipeline(ctx) + pm.run(module.operation) + eng = ExecutionEngine(module, opt_level=2) + func_ptr = eng.lookup("rms_norm") + torch_dtype = lh_utils.mlir_type_to_torch_dtype(ir_type) + a = torch.randn(*shape, dtype=torch_dtype) + out_ref = references[get_l2_norm](a, eps) + out = torch.empty_like(out_ref) + a_mem = get_ranked_memref_descriptor(a.numpy()) + out_mem = get_ranked_memref_descriptor(out.numpy()) + args = lh_utils.memrefs_to_packed_args([a_mem, out_mem]) + func_ptr(args) + + assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) + + +@pytest.mark.parametrize( + "shape,in_features,out_features", + [ + ((4,), 16, 32), + ((1,), 8, 16), + ((8,), 32, 64), + ((2,), 64, 32), + ((2, 4), 32, 32), + ((3, 5, 7), 16, 24), + ], +) +def test_linear(shape, in_features, out_features): + def generate_module(ctx, elty, input_shape, in_feat, out_feat): + with ctx, ir.Location.unknown(): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + input_type = ir.RankedTensorType.get( + list(input_shape) + [in_feat], elty + ) + weight_type = ir.RankedTensorType.get((out_feat, in_feat), elty) + bias_type = ir.RankedTensorType.get((out_feat,), elty) + output_type = ir.RankedTensorType.get( + list(input_shape) + [out_feat], elty + ) + + @func.FuncOp.from_py_func( + input_type, weight_type, bias_type, output_type, name="linear_op" + ) + def linear_op(x, w, b, out): + get_linear(x, w, b, out) + + return module + + ctx = ir.Context() + ir_type = to_ir_type("f32", ctx) + module = generate_module(ctx, ir_type, shape, in_features, out_features) + bufferize_module(ctx, module) + schedule = create_schedule(ctx) + apply_schedule(module, schedule) + pm = create_pass_pipeline(ctx) + pm.run(module.operation) + + eng = ExecutionEngine(module, opt_level=2) + func_ptr = eng.lookup("linear_op") + torch_dtype = lh_utils.mlir_type_to_torch_dtype(ir_type) + x = torch.randn(*shape, in_features, dtype=torch_dtype) + w = torch.randn(out_features, in_features, dtype=torch_dtype) + b = torch.randn(out_features, dtype=torch_dtype) + out_ref = references[get_linear](x, w, b) + out = torch.empty_like(out_ref) + out.zero_() + x_mem = get_ranked_memref_descriptor(x.numpy()) + w_mem = get_ranked_memref_descriptor(w.numpy()) + b_mem = get_ranked_memref_descriptor(b.numpy()) + out_mem = get_ranked_memref_descriptor(out.numpy()) + args = lh_utils.memrefs_to_packed_args([x_mem, w_mem, b_mem, out_mem]) + func_ptr(args) + assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) + + +def test_polar(): + def generate_module(ctx, elty): + with ctx, ir.Location.unknown(): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + tensor_type = ir.RankedTensorType.get((4, 16), elty) + complex_tensor_type = ir.RankedTensorType.get( + (4, 16), ir.ComplexType.get(elty) + ) + + @func.FuncOp.from_py_func( + tensor_type, tensor_type, complex_tensor_type, name="polar_op" + ) + def polar_op(magnitude, angle, out): + get_polar(magnitude, angle, out) + + return module + + ctx = ir.Context() + ir_type = to_ir_type("f32", ctx) + module = generate_module(ctx, ir_type) + bufferize_module(ctx, module) + schedule = create_schedule(ctx) + apply_schedule(module, schedule) + pm = create_pass_pipeline(ctx) + pm.run(module.operation) + + eng = ExecutionEngine(module, opt_level=2) + func_ptr = eng.lookup("polar_op") + torch_dtype = lh_utils.mlir_type_to_torch_dtype(ir_type) + magnitude = torch.randn(4, 16, dtype=torch_dtype) + angle = torch.randn(4, 16, dtype=torch_dtype) + out_ref = references[get_polar](magnitude, angle) + out = torch.empty_like(out_ref) + magnitude_mem = get_ranked_memref_descriptor(magnitude.numpy()) + angle_mem = get_ranked_memref_descriptor(angle.numpy()) + out_mem = get_ranked_memref_descriptor(out.numpy()) + args = lh_utils.memrefs_to_packed_args([magnitude_mem, angle_mem, out_mem]) + func_ptr(args) + assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) + + +def test_repeat_kv(): + def generate_module(ctx, elty, n_rep): + with ctx, ir.Location.unknown(): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + x_type = ir.RankedTensorType.get((2, 512, 8, 64), elty) + out_type = ir.RankedTensorType.get((2, 512, 8 * n_rep, 64), elty) + + @func.FuncOp.from_py_func(x_type, out_type, name="repeat_kv_op") + def repeat_kv_op(x, out): + get_repeat_kv(x, n_rep, out) + + return module + + n_rep = 4 + ctx = ir.Context() + ir_type = to_ir_type("f32", ctx) + module = generate_module(ctx, ir_type, n_rep) + bufferize_module(ctx, module) + schedule = create_schedule(ctx) + apply_schedule(module, schedule) + pm = create_pass_pipeline(ctx) + pm.run(module.operation) + + eng = ExecutionEngine(module, opt_level=2) + func_ptr = eng.lookup("repeat_kv_op") + + torch_dtype = lh_utils.mlir_type_to_torch_dtype(ir_type) + x = torch.randn(2, 512, 8, 64, dtype=torch_dtype) + out_ref = references[get_repeat_kv](x, n_rep) + out = torch.empty_like(out_ref) + + x_mem = get_ranked_memref_descriptor(x.numpy()) + out_mem = get_ranked_memref_descriptor(out.numpy()) + args = lh_utils.memrefs_to_packed_args([x_mem, out_mem]) + func_ptr(args) + + assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) + + +def test_reshape_for_broadcast(): + def generate_module(ctx, elty): + with ctx, ir.Location.unknown(): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + freqs_cis_type = ir.RankedTensorType.get((512, 64), elty) + x_type = ir.RankedTensorType.get((2, 512, 32, 128), elty) + out_type = ir.RankedTensorType.get((1, 512, 1, 64), elty) + + @func.FuncOp.from_py_func( + freqs_cis_type, x_type, out_type, name="reshape_for_broadcast" + ) + def reshape_for_broadcast_op(freqs_cis, x, out): + get_reshape_for_broadcast(freqs_cis, x, out) + + return module + + ctx = ir.Context() + ir_type = to_ir_type("f32", ctx) + module = generate_module(ctx, ir_type) + bufferize_module(ctx, module) + schedule = create_schedule(ctx) + apply_schedule(module, schedule) + pm = create_pass_pipeline(ctx) + pm.run(module.operation) + + eng = ExecutionEngine(module, opt_level=2) + func_ptr = eng.lookup("reshape_for_broadcast") + + torch_dtype = lh_utils.mlir_type_to_torch_dtype(ir_type) + freqs_cis = torch.randn(512, 64, dtype=torch_dtype) + x = torch.randn(2, 512, 32, 128, dtype=torch_dtype) + # Convert x to complex view as expected by reshape_for_broadcast + x_complex = torch.view_as_complex(x.reshape(*x.shape[:-1], -1, 2)) + out_ref = reshape_for_broadcast(freqs_cis, x_complex) + out = torch.empty_like(out_ref) + + freqs_cis_mem = get_ranked_memref_descriptor(freqs_cis.numpy()) + x_mem = get_ranked_memref_descriptor(x.numpy()) + out_mem = get_ranked_memref_descriptor(out.numpy()) + args = lh_utils.memrefs_to_packed_args([freqs_cis_mem, x_mem, out_mem]) + func_ptr(args) + + assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) + + +def test_view_as_complex(): + def generate_module(ctx, elty): + with ctx, ir.Location.unknown(): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + # Input should be reshaped to have last dim = 2 + x_type = ir.RankedTensorType.get((2, 512, 32, 64, 2), elty) + out_type = ir.RankedTensorType.get( + (2, 512, 32, 64), ir.ComplexType.get(elty) + ) + + @func.FuncOp.from_py_func(x_type, out_type, name="view_as_complex_op") + def view_as_complex_op(x, out): + get_view_as_complex(x, out) + + return module + + ctx = ir.Context() + ir_type = to_ir_type("f32", ctx) + module = generate_module(ctx, ir_type) + bufferize_module(ctx, module) + schedule = create_schedule(ctx) + apply_schedule(module, schedule) + pm = create_pass_pipeline(ctx) + pm.run(module.operation) + + eng = ExecutionEngine(module, opt_level=2) + func_ptr = eng.lookup("view_as_complex_op") + + torch_dtype = lh_utils.mlir_type_to_torch_dtype(ir_type) + x = torch.randn(2, 512, 32, 128, dtype=torch_dtype) + x_reshaped = x.reshape(2, 512, 32, 64, 2) + out_ref = torch.view_as_complex(x_reshaped) + out = torch.empty_like(out_ref) + + x_mem = get_ranked_memref_descriptor(x_reshaped.numpy()) + out_mem = get_ranked_memref_descriptor(out.numpy()) + args = lh_utils.memrefs_to_packed_args([x_mem, out_mem]) + func_ptr(args) + + assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) + + +def test_view_as_real(): + def generate_module(ctx, elty): + with ctx, ir.Location.unknown(): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + x_type = ir.RankedTensorType.get( + (2, 512, 32, 64), ir.ComplexType.get(elty) + ) + out_type = ir.RankedTensorType.get((2, 512, 32, 64, 2), elty) + + @func.FuncOp.from_py_func(x_type, out_type, name="as_real_op") + def as_real_op(x, out): + get_view_as_real(x, out) + + return module + + ctx = ir.Context() + ir_type = to_ir_type("f32", ctx) + module = generate_module(ctx, ir_type) + bufferize_module(ctx, module) + schedule = create_schedule(ctx) + apply_schedule(module, schedule) + pm = create_pass_pipeline(ctx) + pm.run(module.operation) + + eng = ExecutionEngine(module, opt_level=2) + func_ptr = eng.lookup("as_real_op") + + torch_dtype = lh_utils.mlir_type_to_torch_dtype(ir_type) + x = torch.randn(2, 512, 32, 64, 2, dtype=torch_dtype) + x_complex = torch.view_as_complex(x) + out_ref = torch.view_as_real(x_complex) + out = torch.empty_like(out_ref) + + x_mem = get_ranked_memref_descriptor(x_complex.numpy()) + out_mem = get_ranked_memref_descriptor(out.numpy()) + args = lh_utils.memrefs_to_packed_args([x_mem, out_mem]) + func_ptr(args) + + assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) + + +@pytest.mark.parametrize( + "batch_size,seq_len,n_heads,head_dim,n_kv_heads,elem_type", + [(2, 512, 32, 128, 8, "f32")], +) +def test_rotary_emb(batch_size, seq_len, n_heads, head_dim, n_kv_heads, elem_type): + def generate_module(ctx, elty, xq_shape, xk_shape, freqs_cis_shape): + with ctx, ir.Location.unknown(): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + xq_type = ir.RankedTensorType.get(xq_shape, elty) + xk_type = ir.RankedTensorType.get(xk_shape, elty) + freqs_cis_type = ir.RankedTensorType.get(freqs_cis_shape, elty) + + @func.FuncOp.from_py_func( + xq_type, + xk_type, + freqs_cis_type, + xq_type, + xk_type, + name="rotary_emb", + ) + def rotary_emb(xq, xk, freqs_cis, xq_out, xk_out): + get_rotary_emb(xq, xk, freqs_cis, xq_out, xk_out) + + return module + + ctx = ir.Context() + ir_type = to_ir_type(elem_type, ctx) + torch_dtype = lh_utils.mlir_type_to_torch_dtype(ir_type) + xq_shape = (batch_size, seq_len, n_heads, head_dim) + xk_shape = (batch_size, seq_len, n_kv_heads, head_dim) + freqs_cis_shape = (seq_len, head_dim // 2) + xq = torch.randn(*xq_shape, dtype=torch_dtype) + xk = torch.randn(*xk_shape, dtype=torch_dtype) + freqs_cis = torch.randn(*freqs_cis_shape, dtype=torch_dtype) + xq_out, xk_out = references[get_rotary_emb](xq, xk, freqs_cis) + + module = generate_module( + ctx, + xq_shape=xq_shape, + xk_shape=xk_shape, + freqs_cis_shape=freqs_cis_shape, + elty=ir_type, + ) + bufferize_module(ctx, module) + schedule = create_schedule(ctx) + apply_schedule(module, schedule) + pm = create_pass_pipeline(ctx) + pm.run(module.operation) + + eng = ExecutionEngine(module, opt_level=2) + func_ptr = eng.lookup("rotary_emb") + + out1 = torch.empty_like(xq_out) + out2 = torch.empty_like(xk_out) + + a_mem = get_ranked_memref_descriptor(xq.numpy()) + b_mem = get_ranked_memref_descriptor(xk.numpy()) + freqs_cis_mem = get_ranked_memref_descriptor(freqs_cis.numpy()) + out1_mem = get_ranked_memref_descriptor(out1.numpy()) + out2_mem = get_ranked_memref_descriptor(out2.numpy()) + args = lh_utils.memrefs_to_packed_args( + [a_mem, b_mem, freqs_cis_mem, out1_mem, out2_mem] + ) + func_ptr(args) + + assert torch.allclose(out1, xq_out, rtol=0.01, atol=0.01, equal_nan=True) + assert torch.allclose(out2, xk_out, rtol=0.01, atol=0.01, equal_nan=True) + + +def test_feed_forward(): + def generate_module(ctx, elty): + with ctx, ir.Location.unknown(): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + input_type = ir.RankedTensorType.get((4, 16), elty) + hidden_type = ir.RankedTensorType.get((4, 64), elty) + output_type = ir.RankedTensorType.get((4, 16), elty) + weight1_type = ir.RankedTensorType.get((64, 16), elty) + bias1_type = ir.RankedTensorType.get((64,), elty) + weight2_type = ir.RankedTensorType.get((16, 64), elty) + bias2_type = ir.RankedTensorType.get((16,), elty) + weight3_type = ir.RankedTensorType.get((64, 16), elty) + bias3_type = ir.RankedTensorType.get((64,), elty) + + @func.FuncOp.from_py_func( + input_type, + weight1_type, + bias1_type, + weight2_type, + bias2_type, + weight3_type, + bias3_type, + output_type, + name="feed_forward", + ) + def feed_forward(x, w1, b1, w2, b2, w3, b3, out): + # Compute hidden = linear(x, w1, b1) + hidden_uninit = tensor.EmptyOp(hidden_type.shape, elty).result + hidden = get_linear(x, w1, b1, hidden_uninit) + + # Compute hidden_silu = silu(hidden) + hidden_silu_uninit = tensor.EmptyOp(hidden_type.shape, elty).result + hidden_silu = get_silu(hidden, hidden_silu_uninit) + + # Compute gate = linear(x, w3, b3) + gate_uninit = tensor.EmptyOp(hidden_type.shape, elty).result + gate = get_linear(x, w3, b3, gate_uninit) + + # Compute activated = hidden_silu * gate + activated_uninit = tensor.EmptyOp(hidden_type.shape, elty).result + activated = get_mul(hidden_silu, gate, activated_uninit) + + # Compute out = linear(activated, w2, b2) + get_linear(activated, w2, b2, out) + + return module + + ctx = ir.Context() + ir_type = to_ir_type("f32", ctx) + module = generate_module(ctx, ir_type) + bufferize_module(ctx, module) + schedule = create_schedule(ctx) + apply_schedule(module, schedule) + pm = create_pass_pipeline(ctx) + pm.run(module.operation) + + eng = ExecutionEngine(module, opt_level=2) + func_ptr = eng.lookup("feed_forward") + + torch_dtype = lh_utils.mlir_type_to_torch_dtype(ir_type) + x = torch.randn(4, 16, dtype=torch_dtype) + w1 = torch.randn(64, 16, dtype=torch_dtype) + b1 = torch.randn(64, dtype=torch_dtype) + w2 = torch.randn(16, 64, dtype=torch_dtype) + b2 = torch.randn(16, dtype=torch_dtype) + w3 = torch.randn(64, 16, dtype=torch_dtype) + b3 = torch.randn(64, dtype=torch_dtype) + + hidden_ref = torch.nn.functional.linear(x, w1, b1) + activated_ref = torch.nn.functional.silu(hidden_ref) + activated_ref *= torch.nn.functional.linear(x, w3, b3) + out_ref = torch.nn.functional.linear(activated_ref, w2, b2) + out = torch.empty_like(out_ref) + out.zero_() + x_mem = get_ranked_memref_descriptor(x.numpy()) + w1_mem = get_ranked_memref_descriptor(w1.numpy()) + b1_mem = get_ranked_memref_descriptor(b1.numpy()) + w2_mem = get_ranked_memref_descriptor(w2.numpy()) + b2_mem = get_ranked_memref_descriptor(b2.numpy()) + w3_mem = get_ranked_memref_descriptor(w3.numpy()) + b3_mem = get_ranked_memref_descriptor(b3.numpy()) + out_mem = get_ranked_memref_descriptor(out.numpy()) + args = lh_utils.memrefs_to_packed_args( + [x_mem, w1_mem, b1_mem, w2_mem, b2_mem, w3_mem, b3_mem, out_mem] + ) + func_ptr(args) + assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) + + +def test_smoke_standalone_attention(): + args = ModelArgs( + dim=32, + n_layers=1, + n_heads=4, + n_kv_heads=2, + vocab_size=1000, + multiple_of=8, + norm_eps=1e-5, + max_batch_size=2, + max_seq_len=16, + ) + + attention = StandaloneAttention(args) + + batch_size = 2 + seq_len = 4 + x = torch.randn(batch_size, seq_len, args.dim) + start_pos = 0 + + freqs_cis = torch.randn( + seq_len, args.dim // args.n_heads // 2, dtype=torch.complex64 + ) + + mask = torch.full((batch_size, args.n_heads, seq_len, seq_len), float("-inf")) + mask = torch.triu(mask, diagonal=1) + + output = attention(x, start_pos, freqs_cis, mask) + + assert output.shape == ( + batch_size, + seq_len, + args.dim, + ), f"Expected shape {(batch_size, seq_len, args.dim)}, got {output.shape}" + + assert not torch.isnan(output).any(), "Output contains NaN" + assert not torch.isinf(output).any(), "Output contains inf" + + +def test_attention_fwd(): + model_args = ModelArgs( + dim=32, # Small for testing + n_layers=1, + n_heads=4, + n_kv_heads=2, # Test GQA + vocab_size=1000, + multiple_of=8, + norm_eps=1e-5, + max_batch_size=2, + max_seq_len=8, + ) + + batch = 2 + seq_len = 4 + dim = model_args.dim + n_heads = model_args.n_heads + n_kv_heads = model_args.n_kv_heads + head_dim = dim // n_heads + + def generate_module(ctx, elty, args): + with ctx, ir.Location.unknown(): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + x_type = ir.RankedTensorType.get([batch, seq_len, dim], elty) + wq_type = ir.RankedTensorType.get([n_heads * head_dim, dim], elty) + wk_type = ir.RankedTensorType.get([n_kv_heads * head_dim, dim], elty) + wv_type = ir.RankedTensorType.get([n_kv_heads * head_dim, dim], elty) + wo_type = ir.RankedTensorType.get([dim, n_heads * head_dim], elty) + freqs_cis_type = ir.RankedTensorType.get([seq_len, head_dim // 2], elty) + mask_type = ir.RankedTensorType.get( + [batch, n_heads, seq_len, seq_len], elty + ) + out_type = ir.RankedTensorType.get([batch, seq_len, dim], elty) + + @func.FuncOp.from_py_func( + x_type, + wq_type, + wk_type, + wv_type, + wo_type, + freqs_cis_type, + mask_type, + out_type, + name="attention_op", + ) + def attention_op(x, wq, wk, wv, wo, freqs_cis, mask, out): + get_attention(args, x, wq, wk, wv, wo, freqs_cis, mask, out) + + return module + + reference = StandaloneAttention(model_args) + + torch_dtype = torch.float32 + x = torch.randn(batch, seq_len, dim, dtype=torch_dtype) + freqs_cis_real = torch.randn(seq_len, head_dim // 2, dtype=torch_dtype) + freqs_cis_complex = torch.complex(freqs_cis_real, torch.zeros_like(freqs_cis_real)) + mask = torch.full( + (batch, n_heads, seq_len, seq_len), float("-inf"), dtype=torch_dtype + ) + mask = torch.triu(mask, diagonal=1) + with torch.no_grad(): + wq = reference.wq.weight.data.clone() + wk = reference.wk.weight.data.clone() + wv = reference.wv.weight.data.clone() + wo = reference.wo.weight.data.clone() + + # Run reference forward + out_ref = reference(x, start_pos=0, freqs_cis=freqs_cis_complex, mask=mask) + + ctx = ir.Context() + ir_type = to_ir_type("f32", ctx) + module = generate_module(ctx, ir_type, model_args) + bufferize_module(ctx, module) + schedule = create_schedule(ctx) + apply_schedule(module, schedule) + pm = create_pass_pipeline(ctx) + pm.run(module.operation) + eng = ExecutionEngine(module, opt_level=2) + func_ptr = eng.lookup("attention_op") + + out = torch.empty_like(out_ref) + x_mem = get_ranked_memref_descriptor(x.numpy()) + wq_mem = get_ranked_memref_descriptor(wq.numpy()) + wk_mem = get_ranked_memref_descriptor(wk.numpy()) + wv_mem = get_ranked_memref_descriptor(wv.numpy()) + wo_mem = get_ranked_memref_descriptor(wo.numpy()) + freqs_cis_mem = get_ranked_memref_descriptor(freqs_cis_real.numpy()) + mask_mem = get_ranked_memref_descriptor(mask.numpy()) + out_mem = get_ranked_memref_descriptor(out.numpy()) + args = lh_utils.memrefs_to_packed_args( + [x_mem, wq_mem, wk_mem, wv_mem, wo_mem, freqs_cis_mem, mask_mem, out_mem] + ) + func_ptr(args) + + assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) diff --git a/python/lighthouse/utils/__init__.py b/python/lighthouse/utils/__init__.py index 4cff9a5..46a2971 100644 --- a/python/lighthouse/utils/__init__.py +++ b/python/lighthouse/utils/__init__.py @@ -6,6 +6,7 @@ memrefs_to_packed_args, torch_to_memref, torch_to_packed_args, + mlir_type_to_torch_dtype, ) __all__ = [ diff --git a/python/lighthouse/utils/runtime_args.py b/python/lighthouse/utils/runtime_args.py index 6719896..eb6b22a 100644 --- a/python/lighthouse/utils/runtime_args.py +++ b/python/lighthouse/utils/runtime_args.py @@ -4,6 +4,7 @@ from mlir.runtime.np_to_memref import ( get_ranked_memref_descriptor, ) +from mlir import ir def get_packed_arg(ctypes_args) -> list[ctypes.c_void_p]: @@ -60,3 +61,37 @@ def torch_to_packed_args(inputs: list[torch.Tensor]) -> list[ctypes.c_void_p]: """ memrefs = [torch_to_memref(input) for input in inputs] return memrefs_to_packed_args(memrefs) + + +def mlir_type_to_torch_dtype(mlir_type: ir.Type): + """ + Convert an MLIR type to a PyTorch dtype. + Args: + mlir_type: An MLIR type (e.g., ir.F32Type, ir.F64Type) + Returns: + Corresponding PyTorch dtype + """ + import torch + + if isinstance(mlir_type, ir.F32Type): + return torch.float32 + elif isinstance(mlir_type, ir.F64Type): + return torch.float64 + elif isinstance(mlir_type, ir.F16Type): + return torch.float16 + elif isinstance(mlir_type, ir.BF16Type): + return torch.bfloat16 + elif isinstance(mlir_type, ir.IntegerType): + width = mlir_type.width + if width == 64: + return torch.int64 + elif width == 32: + return torch.int32 + elif width == 16: + return torch.int16 + elif width == 8: + return torch.int8 + elif width == 1: + return torch.bool + + raise ValueError(f"Unsupported MLIR type: {mlir_type}") From c23a5cd2eed53637e2a48162004e40d825cb42b7 Mon Sep 17 00:00:00 2001 From: Petr Kurapov Date: Wed, 26 Nov 2025 13:15:31 +0100 Subject: [PATCH 02/10] Add a decorator for mlir context. --- python/examples/llama/test_llama3.py | 492 ++++++++++++++------------- 1 file changed, 254 insertions(+), 238 deletions(-) diff --git a/python/examples/llama/test_llama3.py b/python/examples/llama/test_llama3.py index 981c238..94be923 100644 --- a/python/examples/llama/test_llama3.py +++ b/python/examples/llama/test_llama3.py @@ -1,4 +1,5 @@ from dataclasses import dataclass +import functools import math as pymath import pytest import torch @@ -18,6 +19,18 @@ from lighthouse import utils as lh_utils +def with_mlir_ctx(ctx: ir.Context): + def with_mlir_ctx_decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + with ctx, ir.Location.unknown(context=ctx): + return func(*args, **kwargs) + + return wrapper + + return with_mlir_ctx_decorator + + @dataclass class ModelArgs: dim: int = 4096 @@ -191,7 +204,6 @@ def mean_op(a_val, acc): # repeat_kv def get_repeat_kv(x: ir.Value, n_rep: int, out: ir.Value) -> ir.Value: - bs, slen, n_kv_heads, head_dim = x.type.shape if n_rep == 1: return x @@ -970,7 +982,6 @@ def compute_attn_out(attn_val, v_val, out_val): # Transpose back: (batch, n_heads, seq_len, head_dim) -> (batch, seq_len, n_heads, head_dim) attn_out_perm_shape = [batch, seq_len, n_heads, head_dim] - attn_out_perm_type = ir.RankedTensorType.get(attn_out_perm_shape, elty) attn_out_perm = tensor.EmptyOp(attn_out_perm_shape, elty).result d0, d1, d2, d3 = [ir.AffineDimExpr.get(i) for i in range(4)] @@ -1174,30 +1185,31 @@ def to_ir_type(type_str, ctx): ], ) def test_bin_op(op, shape, elem_type): - def generate_module(ctx, elty): - with ctx, ir.Location.unknown(): - module = ir.Module.create() - with ir.InsertionPoint(module.body): - tensor_type = ir.RankedTensorType.get(shape, elty) - - # Outer product produces [M, M] output for 1-D input of size M - if op == get_outer: - out_shape = (shape[0], shape[0]) - out_tensor_type = ir.RankedTensorType.get(out_shape, elty) - else: - out_tensor_type = tensor_type - - @func.FuncOp.from_py_func( - tensor_type, tensor_type, out_tensor_type, name="bin_op" - ) - def bin_op(a, b, out): - op(a, b, out) + ctx = ir.Context() + + @with_mlir_ctx(ctx) + def generate_module(elty): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + tensor_type = ir.RankedTensorType.get(shape, elty) + + # Outer product produces [M, M] output for 1-D input of size M + if op == get_outer: + out_shape = (shape[0], shape[0]) + out_tensor_type = ir.RankedTensorType.get(out_shape, elty) + else: + out_tensor_type = tensor_type + + @func.FuncOp.from_py_func( + tensor_type, tensor_type, out_tensor_type, name="bin_op" + ) + def bin_op(a, b, out): + op(a, b, out) return module - ctx = ir.Context() ir_type = to_ir_type(elem_type, ctx) - module = generate_module(ctx, ir_type) + module = generate_module(ir_type) bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) @@ -1235,29 +1247,30 @@ def bin_op(a, b, out): ], ) def test_unary_op(op, shape, elem_type): - def generate_module(ctx, elty): - with ctx, ir.Location.unknown(): - module = ir.Module.create() - with ir.InsertionPoint(module.body): - tensor_type = ir.RankedTensorType.get(shape, elty) - - # For mean operation, output has different shape (reduction on last dim) - if op == get_mean: - out_shape = list(shape) - out_shape[-1] = 1 - out_tensor_type = ir.RankedTensorType.get(out_shape, elty) - else: - out_tensor_type = tensor_type - - @func.FuncOp.from_py_func(tensor_type, out_tensor_type, name="unary_op") - def unary_op(a, out): - op(a, out) + ctx = ir.Context() + + @with_mlir_ctx(ctx) + def generate_module(elty): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + tensor_type = ir.RankedTensorType.get(shape, elty) + + # For mean operation, output has different shape (reduction on last dim) + if op == get_mean: + out_shape = list(shape) + out_shape[-1] = 1 + out_tensor_type = ir.RankedTensorType.get(out_shape, elty) + else: + out_tensor_type = tensor_type + + @func.FuncOp.from_py_func(tensor_type, out_tensor_type, name="unary_op") + def unary_op(a, out): + op(a, out) return module - ctx = ir.Context() ir_type = to_ir_type(elem_type, ctx) - module = generate_module(ctx, ir_type) + module = generate_module(ir_type) bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) @@ -1284,21 +1297,22 @@ def unary_op(a, out): def test_rms_norm(shape, elem_type): eps = 1e-5 - def generate_module(ctx, elty): - with ctx, ir.Location.unknown(): - module = ir.Module.create() - with ir.InsertionPoint(module.body): - input_type = ir.RankedTensorType.get(shape, elty) + ctx = ir.Context() + + @with_mlir_ctx(ctx) + def generate_module(elty): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + input_type = ir.RankedTensorType.get(shape, elty) - @func.FuncOp.from_py_func(input_type, input_type, name="rms_norm") - def rms_norm(a, out): - get_l2_norm(a, out, eps) + @func.FuncOp.from_py_func(input_type, input_type, name="rms_norm") + def rms_norm(a, out): + get_l2_norm(a, out, eps) return module - ctx = ir.Context() ir_type = to_ir_type(elem_type, ctx) - module = generate_module(ctx, ir_type) + module = generate_module(ir_type) print(module) bufferize_module(ctx, module) schedule = create_schedule(ctx) @@ -1331,30 +1345,27 @@ def rms_norm(a, out): ], ) def test_linear(shape, in_features, out_features): - def generate_module(ctx, elty, input_shape, in_feat, out_feat): - with ctx, ir.Location.unknown(): - module = ir.Module.create() - with ir.InsertionPoint(module.body): - input_type = ir.RankedTensorType.get( - list(input_shape) + [in_feat], elty - ) - weight_type = ir.RankedTensorType.get((out_feat, in_feat), elty) - bias_type = ir.RankedTensorType.get((out_feat,), elty) - output_type = ir.RankedTensorType.get( - list(input_shape) + [out_feat], elty - ) - - @func.FuncOp.from_py_func( - input_type, weight_type, bias_type, output_type, name="linear_op" - ) - def linear_op(x, w, b, out): - get_linear(x, w, b, out) + ctx = ir.Context() + + @with_mlir_ctx(ctx) + def generate_module(elty, input_shape, in_feat, out_feat): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + input_type = ir.RankedTensorType.get(list(input_shape) + [in_feat], elty) + weight_type = ir.RankedTensorType.get((out_feat, in_feat), elty) + bias_type = ir.RankedTensorType.get((out_feat,), elty) + output_type = ir.RankedTensorType.get(list(input_shape) + [out_feat], elty) + + @func.FuncOp.from_py_func( + input_type, weight_type, bias_type, output_type, name="linear_op" + ) + def linear_op(x, w, b, out): + get_linear(x, w, b, out) return module - ctx = ir.Context() ir_type = to_ir_type("f32", ctx) - module = generate_module(ctx, ir_type, shape, in_features, out_features) + module = generate_module(ir_type, shape, in_features, out_features) bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) @@ -1380,26 +1391,27 @@ def linear_op(x, w, b, out): def test_polar(): - def generate_module(ctx, elty): - with ctx, ir.Location.unknown(): - module = ir.Module.create() - with ir.InsertionPoint(module.body): - tensor_type = ir.RankedTensorType.get((4, 16), elty) - complex_tensor_type = ir.RankedTensorType.get( - (4, 16), ir.ComplexType.get(elty) - ) - - @func.FuncOp.from_py_func( - tensor_type, tensor_type, complex_tensor_type, name="polar_op" - ) - def polar_op(magnitude, angle, out): - get_polar(magnitude, angle, out) + ctx = ir.Context() + + @with_mlir_ctx(ctx) + def generate_module(elty): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + tensor_type = ir.RankedTensorType.get((4, 16), elty) + complex_tensor_type = ir.RankedTensorType.get( + (4, 16), ir.ComplexType.get(elty) + ) + + @func.FuncOp.from_py_func( + tensor_type, tensor_type, complex_tensor_type, name="polar_op" + ) + def polar_op(magnitude, angle, out): + get_polar(magnitude, angle, out) return module - ctx = ir.Context() ir_type = to_ir_type("f32", ctx) - module = generate_module(ctx, ir_type) + module = generate_module(ir_type) bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) @@ -1422,23 +1434,24 @@ def polar_op(magnitude, angle, out): def test_repeat_kv(): - def generate_module(ctx, elty, n_rep): - with ctx, ir.Location.unknown(): - module = ir.Module.create() - with ir.InsertionPoint(module.body): - x_type = ir.RankedTensorType.get((2, 512, 8, 64), elty) - out_type = ir.RankedTensorType.get((2, 512, 8 * n_rep, 64), elty) + ctx = ir.Context() - @func.FuncOp.from_py_func(x_type, out_type, name="repeat_kv_op") - def repeat_kv_op(x, out): - get_repeat_kv(x, n_rep, out) + @with_mlir_ctx(ctx) + def generate_module(elty, n_rep): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + x_type = ir.RankedTensorType.get((2, 512, 8, 64), elty) + out_type = ir.RankedTensorType.get((2, 512, 8 * n_rep, 64), elty) + + @func.FuncOp.from_py_func(x_type, out_type, name="repeat_kv_op") + def repeat_kv_op(x, out): + get_repeat_kv(x, n_rep, out) return module n_rep = 4 - ctx = ir.Context() ir_type = to_ir_type("f32", ctx) - module = generate_module(ctx, ir_type, n_rep) + module = generate_module(ir_type, n_rep) bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) @@ -1462,25 +1475,26 @@ def repeat_kv_op(x, out): def test_reshape_for_broadcast(): - def generate_module(ctx, elty): - with ctx, ir.Location.unknown(): - module = ir.Module.create() - with ir.InsertionPoint(module.body): - freqs_cis_type = ir.RankedTensorType.get((512, 64), elty) - x_type = ir.RankedTensorType.get((2, 512, 32, 128), elty) - out_type = ir.RankedTensorType.get((1, 512, 1, 64), elty) - - @func.FuncOp.from_py_func( - freqs_cis_type, x_type, out_type, name="reshape_for_broadcast" - ) - def reshape_for_broadcast_op(freqs_cis, x, out): - get_reshape_for_broadcast(freqs_cis, x, out) + ctx = ir.Context() + + @with_mlir_ctx(ctx) + def generate_module(elty): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + freqs_cis_type = ir.RankedTensorType.get((512, 64), elty) + x_type = ir.RankedTensorType.get((2, 512, 32, 128), elty) + out_type = ir.RankedTensorType.get((1, 512, 1, 64), elty) + + @func.FuncOp.from_py_func( + freqs_cis_type, x_type, out_type, name="reshape_for_broadcast" + ) + def reshape_for_broadcast_op(freqs_cis, x, out): + get_reshape_for_broadcast(freqs_cis, x, out) return module - ctx = ir.Context() ir_type = to_ir_type("f32", ctx) - module = generate_module(ctx, ir_type) + module = generate_module(ir_type) bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) @@ -1508,25 +1522,26 @@ def reshape_for_broadcast_op(freqs_cis, x, out): def test_view_as_complex(): - def generate_module(ctx, elty): - with ctx, ir.Location.unknown(): - module = ir.Module.create() - with ir.InsertionPoint(module.body): - # Input should be reshaped to have last dim = 2 - x_type = ir.RankedTensorType.get((2, 512, 32, 64, 2), elty) - out_type = ir.RankedTensorType.get( - (2, 512, 32, 64), ir.ComplexType.get(elty) - ) - - @func.FuncOp.from_py_func(x_type, out_type, name="view_as_complex_op") - def view_as_complex_op(x, out): - get_view_as_complex(x, out) + ctx = ir.Context() + + @with_mlir_ctx(ctx) + def generate_module(elty): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + # Input should be reshaped to have last dim = 2 + x_type = ir.RankedTensorType.get((2, 512, 32, 64, 2), elty) + out_type = ir.RankedTensorType.get( + (2, 512, 32, 64), ir.ComplexType.get(elty) + ) + + @func.FuncOp.from_py_func(x_type, out_type, name="view_as_complex_op") + def view_as_complex_op(x, out): + get_view_as_complex(x, out) return module - ctx = ir.Context() ir_type = to_ir_type("f32", ctx) - module = generate_module(ctx, ir_type) + module = generate_module(ir_type) bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) @@ -1551,24 +1566,23 @@ def view_as_complex_op(x, out): def test_view_as_real(): - def generate_module(ctx, elty): - with ctx, ir.Location.unknown(): - module = ir.Module.create() - with ir.InsertionPoint(module.body): - x_type = ir.RankedTensorType.get( - (2, 512, 32, 64), ir.ComplexType.get(elty) - ) - out_type = ir.RankedTensorType.get((2, 512, 32, 64, 2), elty) - - @func.FuncOp.from_py_func(x_type, out_type, name="as_real_op") - def as_real_op(x, out): - get_view_as_real(x, out) + ctx = ir.Context() + + @with_mlir_ctx(ctx) + def generate_module(elty): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + x_type = ir.RankedTensorType.get((2, 512, 32, 64), ir.ComplexType.get(elty)) + out_type = ir.RankedTensorType.get((2, 512, 32, 64, 2), elty) + + @func.FuncOp.from_py_func(x_type, out_type, name="as_real_op") + def as_real_op(x, out): + get_view_as_real(x, out) return module - ctx = ir.Context() ir_type = to_ir_type("f32", ctx) - module = generate_module(ctx, ir_type) + module = generate_module(ir_type) bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) @@ -1597,28 +1611,29 @@ def as_real_op(x, out): [(2, 512, 32, 128, 8, "f32")], ) def test_rotary_emb(batch_size, seq_len, n_heads, head_dim, n_kv_heads, elem_type): - def generate_module(ctx, elty, xq_shape, xk_shape, freqs_cis_shape): - with ctx, ir.Location.unknown(): - module = ir.Module.create() - with ir.InsertionPoint(module.body): - xq_type = ir.RankedTensorType.get(xq_shape, elty) - xk_type = ir.RankedTensorType.get(xk_shape, elty) - freqs_cis_type = ir.RankedTensorType.get(freqs_cis_shape, elty) - - @func.FuncOp.from_py_func( - xq_type, - xk_type, - freqs_cis_type, - xq_type, - xk_type, - name="rotary_emb", - ) - def rotary_emb(xq, xk, freqs_cis, xq_out, xk_out): - get_rotary_emb(xq, xk, freqs_cis, xq_out, xk_out) + ctx = ir.Context() + + @with_mlir_ctx(ctx) + def generate_module(elty, xq_shape, xk_shape, freqs_cis_shape): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + xq_type = ir.RankedTensorType.get(xq_shape, elty) + xk_type = ir.RankedTensorType.get(xk_shape, elty) + freqs_cis_type = ir.RankedTensorType.get(freqs_cis_shape, elty) + + @func.FuncOp.from_py_func( + xq_type, + xk_type, + freqs_cis_type, + xq_type, + xk_type, + name="rotary_emb", + ) + def rotary_emb(xq, xk, freqs_cis, xq_out, xk_out): + get_rotary_emb(xq, xk, freqs_cis, xq_out, xk_out) return module - ctx = ir.Context() ir_type = to_ir_type(elem_type, ctx) torch_dtype = lh_utils.mlir_type_to_torch_dtype(ir_type) xq_shape = (batch_size, seq_len, n_heads, head_dim) @@ -1630,7 +1645,6 @@ def rotary_emb(xq, xk, freqs_cis, xq_out, xk_out): xq_out, xk_out = references[get_rotary_emb](xq, xk, freqs_cis) module = generate_module( - ctx, xq_shape=xq_shape, xk_shape=xk_shape, freqs_cis_shape=freqs_cis_shape, @@ -1663,56 +1677,57 @@ def rotary_emb(xq, xk, freqs_cis, xq_out, xk_out): def test_feed_forward(): - def generate_module(ctx, elty): - with ctx, ir.Location.unknown(): - module = ir.Module.create() - with ir.InsertionPoint(module.body): - input_type = ir.RankedTensorType.get((4, 16), elty) - hidden_type = ir.RankedTensorType.get((4, 64), elty) - output_type = ir.RankedTensorType.get((4, 16), elty) - weight1_type = ir.RankedTensorType.get((64, 16), elty) - bias1_type = ir.RankedTensorType.get((64,), elty) - weight2_type = ir.RankedTensorType.get((16, 64), elty) - bias2_type = ir.RankedTensorType.get((16,), elty) - weight3_type = ir.RankedTensorType.get((64, 16), elty) - bias3_type = ir.RankedTensorType.get((64,), elty) - - @func.FuncOp.from_py_func( - input_type, - weight1_type, - bias1_type, - weight2_type, - bias2_type, - weight3_type, - bias3_type, - output_type, - name="feed_forward", - ) - def feed_forward(x, w1, b1, w2, b2, w3, b3, out): - # Compute hidden = linear(x, w1, b1) - hidden_uninit = tensor.EmptyOp(hidden_type.shape, elty).result - hidden = get_linear(x, w1, b1, hidden_uninit) - - # Compute hidden_silu = silu(hidden) - hidden_silu_uninit = tensor.EmptyOp(hidden_type.shape, elty).result - hidden_silu = get_silu(hidden, hidden_silu_uninit) - - # Compute gate = linear(x, w3, b3) - gate_uninit = tensor.EmptyOp(hidden_type.shape, elty).result - gate = get_linear(x, w3, b3, gate_uninit) - - # Compute activated = hidden_silu * gate - activated_uninit = tensor.EmptyOp(hidden_type.shape, elty).result - activated = get_mul(hidden_silu, gate, activated_uninit) - - # Compute out = linear(activated, w2, b2) - get_linear(activated, w2, b2, out) + ctx = ir.Context() + + @with_mlir_ctx(ctx) + def generate_module(elty): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + input_type = ir.RankedTensorType.get((4, 16), elty) + hidden_type = ir.RankedTensorType.get((4, 64), elty) + output_type = ir.RankedTensorType.get((4, 16), elty) + weight1_type = ir.RankedTensorType.get((64, 16), elty) + bias1_type = ir.RankedTensorType.get((64,), elty) + weight2_type = ir.RankedTensorType.get((16, 64), elty) + bias2_type = ir.RankedTensorType.get((16,), elty) + weight3_type = ir.RankedTensorType.get((64, 16), elty) + bias3_type = ir.RankedTensorType.get((64,), elty) + + @func.FuncOp.from_py_func( + input_type, + weight1_type, + bias1_type, + weight2_type, + bias2_type, + weight3_type, + bias3_type, + output_type, + name="feed_forward", + ) + def feed_forward(x, w1, b1, w2, b2, w3, b3, out): + # Compute hidden = linear(x, w1, b1) + hidden_uninit = tensor.EmptyOp(hidden_type.shape, elty).result + hidden = get_linear(x, w1, b1, hidden_uninit) + + # Compute hidden_silu = silu(hidden) + hidden_silu_uninit = tensor.EmptyOp(hidden_type.shape, elty).result + hidden_silu = get_silu(hidden, hidden_silu_uninit) + + # Compute gate = linear(x, w3, b3) + gate_uninit = tensor.EmptyOp(hidden_type.shape, elty).result + gate = get_linear(x, w3, b3, gate_uninit) + + # Compute activated = hidden_silu * gate + activated_uninit = tensor.EmptyOp(hidden_type.shape, elty).result + activated = get_mul(hidden_silu, gate, activated_uninit) + + # Compute out = linear(activated, w2, b2) + get_linear(activated, w2, b2, out) return module - ctx = ir.Context() ir_type = to_ir_type("f32", ctx) - module = generate_module(ctx, ir_type) + module = generate_module(ir_type) bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) @@ -1811,34 +1826,36 @@ def test_attention_fwd(): n_kv_heads = model_args.n_kv_heads head_dim = dim // n_heads - def generate_module(ctx, elty, args): - with ctx, ir.Location.unknown(): - module = ir.Module.create() - with ir.InsertionPoint(module.body): - x_type = ir.RankedTensorType.get([batch, seq_len, dim], elty) - wq_type = ir.RankedTensorType.get([n_heads * head_dim, dim], elty) - wk_type = ir.RankedTensorType.get([n_kv_heads * head_dim, dim], elty) - wv_type = ir.RankedTensorType.get([n_kv_heads * head_dim, dim], elty) - wo_type = ir.RankedTensorType.get([dim, n_heads * head_dim], elty) - freqs_cis_type = ir.RankedTensorType.get([seq_len, head_dim // 2], elty) - mask_type = ir.RankedTensorType.get( - [batch, n_heads, seq_len, seq_len], elty - ) - out_type = ir.RankedTensorType.get([batch, seq_len, dim], elty) - - @func.FuncOp.from_py_func( - x_type, - wq_type, - wk_type, - wv_type, - wo_type, - freqs_cis_type, - mask_type, - out_type, - name="attention_op", - ) - def attention_op(x, wq, wk, wv, wo, freqs_cis, mask, out): - get_attention(args, x, wq, wk, wv, wo, freqs_cis, mask, out) + ctx = ir.Context() + + @with_mlir_ctx(ctx) + def generate_module(elty, args): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + x_type = ir.RankedTensorType.get([batch, seq_len, dim], elty) + wq_type = ir.RankedTensorType.get([n_heads * head_dim, dim], elty) + wk_type = ir.RankedTensorType.get([n_kv_heads * head_dim, dim], elty) + wv_type = ir.RankedTensorType.get([n_kv_heads * head_dim, dim], elty) + wo_type = ir.RankedTensorType.get([dim, n_heads * head_dim], elty) + freqs_cis_type = ir.RankedTensorType.get([seq_len, head_dim // 2], elty) + mask_type = ir.RankedTensorType.get( + [batch, n_heads, seq_len, seq_len], elty + ) + out_type = ir.RankedTensorType.get([batch, seq_len, dim], elty) + + @func.FuncOp.from_py_func( + x_type, + wq_type, + wk_type, + wv_type, + wo_type, + freqs_cis_type, + mask_type, + out_type, + name="attention_op", + ) + def attention_op(x, wq, wk, wv, wo, freqs_cis, mask, out): + get_attention(args, x, wq, wk, wv, wo, freqs_cis, mask, out) return module @@ -1861,9 +1878,8 @@ def attention_op(x, wq, wk, wv, wo, freqs_cis, mask, out): # Run reference forward out_ref = reference(x, start_pos=0, freqs_cis=freqs_cis_complex, mask=mask) - ctx = ir.Context() ir_type = to_ir_type("f32", ctx) - module = generate_module(ctx, ir_type, model_args) + module = generate_module(ir_type, model_args) bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) From d93d68cd52b8d4afb8f57359abf9da0a5921ed4a Mon Sep 17 00:00:00 2001 From: Petr Kurapov Date: Wed, 26 Nov 2025 13:43:34 +0100 Subject: [PATCH 03/10] Elide most of .result --- python/examples/llama/test_llama3.py | 170 ++++++++++++--------------- 1 file changed, 78 insertions(+), 92 deletions(-) diff --git a/python/examples/llama/test_llama3.py b/python/examples/llama/test_llama3.py index 94be923..905b318 100644 --- a/python/examples/llama/test_llama3.py +++ b/python/examples/llama/test_llama3.py @@ -87,7 +87,7 @@ def create_schedule(ctx: ir.Context) -> ir.Module: # Create entry point transformation sequence. with ir.InsertionPoint(schedule.body): - named_seq = transform.NamedSequenceOp( + named_seq = transform.named_sequence( "__transform_main", [transform.AnyOpType.get()], [], @@ -119,7 +119,7 @@ def create_schedule(ctx: ir.Context) -> ir.Module: anytype, mod, "convert-linalg-to-loops" ) # Cleanup. - transform.ApplyCommonSubexpressionEliminationOp(mod) + transform.apply_cse(mod) with ir.InsertionPoint(transform.ApplyPatternsOp(mod).patterns): transform.ApplyCanonicalizationPatternsOp() @@ -245,14 +245,14 @@ def get_silu(inputs: ir.Value, out: ir.Value) -> ir.Value: par_iterator_types, ) def silu_op(a, _out): - sigmoid = arith.DivFOp( + sigmoid = arith.divf( one, - arith.AddFOp( + arith.addf( one, - math.exp(arith.NegFOp(a).result), - ).result, - ).result - return arith.MulFOp(a, sigmoid).result + math.exp(arith.negf(a)), + ), + ) + return arith.mulf(a, sigmoid) return silu_op @@ -264,10 +264,10 @@ def get_softmax(a: ir.Value, out: ir.Value) -> ir.Value: reduced_shape = list(a.type.shape) reduced_shape[-1] = 1 - max_uninit = tensor.EmptyOp(reduced_shape, elty) + max_uninit = tensor.empty(reduced_shape, elty) - neg_inf = arith.ConstantOp(elty, float("-inf")) - max_init = linalg.fill(neg_inf, outs=[max_uninit.result]) + neg_inf = arith.constant(elty, float("-inf")) + max_init = linalg.fill(neg_inf, outs=[max_uninit]) reduce_map = affine_map( a.type.rank, @@ -288,33 +288,33 @@ def get_softmax(a: ir.Value, out: ir.Value) -> ir.Value: iterator_types, ) def compute_max(val, acc): - return arith.MaximumFOp(val, acc).result + return arith.maximumf(val, acc) - shifted_uninit = tensor.EmptyOp(a.type.shape, elty) + shifted_uninit = tensor.empty(a.type.shape, elty) @linalg.generic( [a, compute_max], - [shifted_uninit.result], + [shifted_uninit], [identity_map, reduce_map, identity_map], [parallel] * a.type.rank, ) def subtract_max(val, max_val, _out): - return arith.SubFOp(val, max_val).result + return arith.subf(val, max_val) - exp_uninit = tensor.EmptyOp(a.type.shape, elty) + exp_uninit = tensor.empty(a.type.shape, elty) @linalg.generic( [subtract_max], - [exp_uninit.result], + [exp_uninit], [identity_map, identity_map], [parallel] * a.type.rank, ) def compute_exp(val, _out): return math.exp(val) - sum_uninit = tensor.EmptyOp(reduced_shape, elty) - zero = arith.ConstantOp(elty, 0.0) - sum_init = linalg.fill(zero, outs=[sum_uninit.result]) + sum_uninit = tensor.empty(reduced_shape, elty) + zero = arith.constant(elty, 0.0) + sum_init = linalg.fill(zero, outs=[sum_uninit]) @linalg.generic( [compute_exp], @@ -323,7 +323,7 @@ def compute_exp(val, _out): iterator_types, ) def compute_sum(val, acc): - return arith.AddFOp(val, acc).result + return arith.addf(val, acc) @linalg.generic( [compute_exp, compute_sum], @@ -332,7 +332,7 @@ def compute_sum(val, acc): [parallel] * a.type.rank, ) def divide_by_sum(exp_val, sum_val, _out): - return arith.DivFOp(exp_val, sum_val).result + return arith.divf(exp_val, sum_val) return divide_by_sum @@ -403,8 +403,8 @@ def get_linear(a: ir.Value, w: ir.Value, b: ir.Value, out: ir.Value) -> ir.Value iterator_types, ) def matmul_op(a_elem, w_elem, out_elem): - prod = arith.MulFOp(a_elem, w_elem).result - return arith.AddFOp(out_elem, prod).result + prod = arith.mulf(a_elem, w_elem) + return arith.addf(out_elem, prod) out_dims = [ir.AffineDimExpr.get(d) for d in range(out_rank)] b_map = affine_map(out_rank, [out_dims[-1]]) @@ -419,7 +419,7 @@ def matmul_op(a_elem, w_elem, out_elem): bias_iterator_types, ) def add_bias_op(matmul_elem, b_elem, _out): - return arith.AddFOp(matmul_elem, b_elem).result + return arith.addf(matmul_elem, b_elem) return add_bias_op @@ -430,17 +430,17 @@ def get_l2_norm(a: ir.Value, out: ir.Value, eps: float = 1e-5) -> ir.Value: # Broadcast epsilon scalar to tensor with reduced shape reduced_shape = list(a.type.shape) reduced_shape[-1] = 1 - eps_const = arith.ConstantOp(elty, eps) - eps_tensor_uninit = tensor.EmptyOp(reduced_shape, elty) + eps_const = arith.constant(elty, eps) + eps_tensor_uninit = tensor.empty(reduced_shape, elty) eps_tensor = linalg.fill(eps_const, outs=[eps_tensor_uninit]) # Square the input - squared_input = tensor.EmptyOp(a.type.shape, elty) + squared_input = tensor.empty(a.type.shape, elty) sqr = get_sqr(a, squared_input) # Compute mean along last dimension reduced_shape = list(a.type.shape) reduced_shape[-1] = 1 - mean_uninit = tensor.EmptyOp(reduced_shape, elty) + mean_uninit = tensor.empty(reduced_shape, elty) mean = get_mean(sqr, mean_uninit) mean_plus_eps = get_add(mean, eps_tensor, mean_uninit) @@ -487,11 +487,11 @@ def get_polar(abs: ir.Value, angle: ir.Value, out: ir.Value) -> ir.Value: [parallel] * rank, ) def polar_convert(abs_val, angle_val, _out): - cos_val = math.CosOp(angle_val).result - sin_val = math.SinOp(angle_val).result - real_part = arith.MulFOp(abs_val, cos_val).result - imag_part = arith.MulFOp(abs_val, sin_val).result - return complex.CreateOp(ir.ComplexType.get(elty), real_part, imag_part).result + cos_val = math.cos(angle_val) + sin_val = math.sin(angle_val) + real_part = arith.mulf(abs_val, cos_val) + imag_part = arith.mulf(abs_val, sin_val) + return complex.create_(ir.ComplexType.get(elty), real_part, imag_part) return polar_convert @@ -510,7 +510,7 @@ def get_outer(a: ir.Value, b: ir.Value, out: ir.Value) -> ir.Value: [parallel, parallel], ) def outer_product(a_val, b_val, _out): - return arith.MulFOp(a_val, b_val).result + return arith.mulf(a_val, b_val) return outer_product @@ -549,8 +549,7 @@ def get_complex_mul(a: ir.Value, b: ir.Value, out: ir.Value) -> ir.Value: [parallel] * rank_out, ) def complex_mul_op(a_val, b_val, _out): - result = complex.MulOp(a_val, b_val).result - return result + return complex.mul(a_val, b_val) return complex_mul_op @@ -578,9 +577,7 @@ def get_rotary_emb( # View xq as complex: (batch, seq_len, n_heads, head_dim//2, 2) -> (batch, seq_len, n_heads, head_dim//2) complex xq_complex_shape = [batch, seq_len, n_heads, head_dim // 2] - xq_complex_uninit = tensor.EmptyOp( - xq_complex_shape, ir.ComplexType.get(elty) - ).result + xq_complex_uninit = tensor.empty(xq_complex_shape, ir.ComplexType.get(elty)) xq_complex = get_view_as_complex(xq_reshaped, xq_complex_uninit) # same for xk @@ -595,22 +592,20 @@ def get_rotary_emb( ) xk_complex_shape = [batch, seq_len, n_kv_heads, head_dim // 2] - xk_complex_uninit = tensor.EmptyOp( - xk_complex_shape, ir.ComplexType.get(elty) - ).result + xk_complex_uninit = tensor.empty(xk_complex_shape, ir.ComplexType.get(elty)) xk_complex = get_view_as_complex(xk_reshaped, xk_complex_uninit) # Reshape freqs_cis for broadcasting: (seq_len, head_dim//2) -> (1, seq_len, 1, head_dim//2) freqs_broadcast_shape = [1, seq_len, 1, head_dim // 2] - freqs_broadcast_uninit = tensor.EmptyOp(freqs_broadcast_shape, elty).result + freqs_broadcast_uninit = tensor.empty(freqs_broadcast_shape, elty) freqs_broadcast = get_reshape_for_broadcast( freqs_cis, xq_complex, freqs_broadcast_uninit ) # cast freqs_broadcast to complex - freqs_broadcast_complex_uninit = tensor.EmptyOp( + freqs_broadcast_complex_uninit = tensor.empty( freqs_broadcast_shape, ir.ComplexType.get(elty) - ).result + ) d0, d1, d2, d3 = [ir.AffineDimExpr.get(i) for i in range(4)] indexing_maps = [ @@ -626,28 +621,24 @@ def get_rotary_emb( ) def real_to_complex(r, out): zero = arith.constant(elty, 0.0) - return complex.CreateOp(ir.ComplexType.get(elty), r, zero).result + return complex.create_(ir.ComplexType.get(elty), r, zero) freqs_broadcast_complex = real_to_complex # Multiply xq_complex with freqs_broadcast_complex - xq_rotated_uninit = tensor.EmptyOp( - xq_complex_shape, ir.ComplexType.get(elty) - ).result + xq_rotated_uninit = tensor.empty(xq_complex_shape, ir.ComplexType.get(elty)) xq_rotated = get_complex_mul(xq_complex, freqs_broadcast_complex, xq_rotated_uninit) - xk_rotated_uninit = tensor.EmptyOp( - xk_complex_shape, ir.ComplexType.get(elty) - ).result + xk_rotated_uninit = tensor.empty(xk_complex_shape, ir.ComplexType.get(elty)) xk_rotated = get_complex_mul(xk_complex, freqs_broadcast_complex, xk_rotated_uninit) # view as real xq_real_shape = [batch, seq_len, n_heads, head_dim // 2, 2] - xq_real_uninit = tensor.EmptyOp(xq_real_shape, elty).result + xq_real_uninit = tensor.empty(xq_real_shape, elty) xq_real = get_view_as_real(xq_rotated, xq_real_uninit) xk_real_shape = [batch, seq_len, n_kv_heads, head_dim // 2, 2] - xk_real_uninit = tensor.EmptyOp(xk_real_shape, elty).result + xk_real_uninit = tensor.empty(xk_real_shape, elty) xk_real = get_view_as_real(xk_rotated, xk_real_uninit) # flatten back to original shape @@ -712,8 +703,7 @@ def get_view_as_complex(x: ir.Value, out: ir.Value) -> ir.Value: [parallel] * rank_out, ) def view_as_complex_op(r, i, _out): - cplx = complex.CreateOp(ir.ComplexType.get(elty), r, i).result - return cplx + return complex.create_(ir.ComplexType.get(elty), r, i) return view_as_complex_op @@ -743,7 +733,7 @@ def get_view_as_real(x: ir.Value, out: ir.Value) -> ir.Value: [parallel] * rank, ) def write_real(cplx, _out): - return complex.ReOp(cplx).result + return complex.re(cplx) @linalg.generic( [x], @@ -752,7 +742,7 @@ def write_real(cplx, _out): [parallel] * rank, ) def write_imag(cplx, _out): - return complex.ImOp(cplx).result + return complex.im(cplx) return write_imag @@ -778,8 +768,8 @@ def get_attention( # Q, K, V projections # xq = linear(x, wq) -> (batch, seq_len, n_heads * head_dim) xq_shape = [batch, seq_len, n_heads * head_dim] - xq_uninit = tensor.EmptyOp(xq_shape, elty).result - bq_zeros = tensor.EmptyOp([n_heads * head_dim], elty).result + xq_uninit = tensor.empty(xq_shape, elty) + bq_zeros = tensor.empty([n_heads * head_dim], elty) zero = arith.constant(elty, 0.0) bq = linalg.fill(zero, outs=[bq_zeros]) xq_flat = get_linear(x, wq, bq, xq_uninit) @@ -797,8 +787,8 @@ def get_attention( # xk = linear(x, wk) -> (batch, seq_len, n_kv_heads * head_dim) xk_shape = [batch, seq_len, n_kv_heads * head_dim] - xk_uninit = tensor.EmptyOp(xk_shape, elty).result - bk_zeros = tensor.EmptyOp([n_kv_heads * head_dim], elty).result + xk_uninit = tensor.empty(xk_shape, elty) + bk_zeros = tensor.empty([n_kv_heads * head_dim], elty) bk = linalg.fill(zero, outs=[bk_zeros]) xk_flat = get_linear(x, wk, bk, xk_uninit) @@ -815,8 +805,8 @@ def get_attention( # xv = linear(x, wv) -> (batch, seq_len, n_kv_heads * head_dim) xv_shape = [batch, seq_len, n_kv_heads * head_dim] - xv_uninit = tensor.EmptyOp(xv_shape, elty).result - bv_zeros = tensor.EmptyOp([n_kv_heads * head_dim], elty).result + xv_uninit = tensor.empty(xv_shape, elty) + bv_zeros = tensor.empty([n_kv_heads * head_dim], elty) bv = linalg.fill(zero, outs=[bv_zeros]) xv_flat = get_linear(x, wv, bv, xv_uninit) @@ -832,21 +822,17 @@ def get_attention( ) # Apply rotary embeddings - xq_rot_uninit = tensor.EmptyOp([batch, seq_len, n_heads, head_dim], elty).result - xk_rot_uninit = tensor.EmptyOp([batch, seq_len, n_kv_heads, head_dim], elty).result + xq_rot_uninit = tensor.empty([batch, seq_len, n_heads, head_dim], elty) + xk_rot_uninit = tensor.empty([batch, seq_len, n_kv_heads, head_dim], elty) get_rotary_emb(xq, xk, freqs_cis, xq_rot_uninit, xk_rot_uninit) xq_rot = xq_rot_uninit xk_rot = xk_rot_uninit # Repeat K/V if using GQA (n_kv_heads < n_heads) if n_rep > 1: - keys_repeated_uninit = tensor.EmptyOp( - [batch, seq_len, n_heads, head_dim], elty - ).result + keys_repeated_uninit = tensor.empty([batch, seq_len, n_heads, head_dim], elty) keys = get_repeat_kv(xk_rot, n_rep, keys_repeated_uninit) - values_repeated_uninit = tensor.EmptyOp( - [batch, seq_len, n_heads, head_dim], elty - ).result + values_repeated_uninit = tensor.empty([batch, seq_len, n_heads, head_dim], elty) values = get_repeat_kv(xv, n_rep, values_repeated_uninit) else: keys = xk_rot @@ -854,7 +840,7 @@ def get_attention( # Transpose for attention: (batch, n_heads, seq_len, head_dim) xq_t_shape = [batch, n_heads, seq_len, head_dim] - xq_t = tensor.EmptyOp(xq_t_shape, elty).result + xq_t = tensor.empty(xq_t_shape, elty) # Permute [0, 2, 1, 3] d0, d1, d2, d3 = [ir.AffineDimExpr.get(i) for i in range(4)] @@ -873,7 +859,7 @@ def transpose_xq(val, _out): xq_transposed = transpose_xq # Transpose keys and values similarly - keys_t = tensor.EmptyOp(xq_t_shape, elty).result + keys_t = tensor.empty(xq_t_shape, elty) @linalg.generic( [keys], @@ -886,7 +872,7 @@ def transpose_k(val, _out): keys_transposed = transpose_k - values_t = tensor.EmptyOp(xq_t_shape, elty).result + values_t = tensor.empty(xq_t_shape, elty) @linalg.generic( [values], @@ -904,7 +890,7 @@ def transpose_v(val, _out): # keys_transposed: (batch, n_heads, seq_len, head_dim) -> transpose to (batch, n_heads, head_dim, seq_len) # scores: (batch, n_heads, seq_len, seq_len) scores_shape = [batch, n_heads, seq_len, seq_len] - scores_uninit = tensor.EmptyOp(scores_shape, elty).result + scores_uninit = tensor.empty(scores_shape, elty) scores_zeroed = linalg.fill(zero, outs=[scores_uninit]) # Batched matmul with transpose @@ -920,15 +906,15 @@ def transpose_v(val, _out): [parallel, parallel, parallel, parallel, reduction], ) def compute_scores(q_val, k_val, score_val): - prod = arith.MulFOp(q_val, k_val).result - return arith.AddFOp(score_val, prod).result + prod = arith.mulf(q_val, k_val) + return arith.addf(score_val, prod) scores_raw = compute_scores # Scale by 1/sqrt(head_dim) scale_val = 1.0 / pymath.sqrt(head_dim) scale_const = arith.constant(elty, scale_val) - scores_scaled_uninit = tensor.EmptyOp(scores_shape, elty).result + scores_scaled_uninit = tensor.empty(scores_shape, elty) d0, d1, d2, d3 = [ir.AffineDimExpr.get(i) for i in range(4)] identity_map = affine_map(4, [d0, d1, d2, d3]) @@ -940,19 +926,19 @@ def compute_scores(q_val, k_val, score_val): [parallel] * 4, ) def scale_scores(score, _out): - return arith.MulFOp(score, scale_const).result + return arith.mulf(score, scale_const) scores_scaled = scale_scores # Apply mask if provided (add mask to scores) if mask is not None: - scores_masked_uninit = tensor.EmptyOp(scores_shape, elty).result + scores_masked_uninit = tensor.empty(scores_shape, elty) scores_final = get_add(scores_scaled, mask, scores_masked_uninit) else: scores_final = scores_scaled # Apply softmax - scores_softmax_uninit = tensor.EmptyOp(scores_shape, elty).result + scores_softmax_uninit = tensor.empty(scores_shape, elty) attn_weights = get_softmax(scores_final, scores_softmax_uninit) # Compute output: matmul(attn_weights, values) @@ -960,7 +946,7 @@ def scale_scores(score, _out): # values_transposed: (batch, n_heads, seq_len, head_dim) # output: (batch, n_heads, seq_len, head_dim) attn_out_shape = [batch, n_heads, seq_len, head_dim] - attn_out_uninit = tensor.EmptyOp(attn_out_shape, elty).result + attn_out_uninit = tensor.empty(attn_out_shape, elty) attn_out_zeroed = linalg.fill(zero, outs=[attn_out_uninit]) b, h, s1, s2, d = [ir.AffineDimExpr.get(i) for i in range(5)] @@ -975,14 +961,14 @@ def scale_scores(score, _out): [parallel, parallel, parallel, parallel, reduction], ) def compute_attn_out(attn_val, v_val, out_val): - prod = arith.MulFOp(attn_val, v_val).result - return arith.AddFOp(out_val, prod).result + prod = arith.mulf(attn_val, v_val) + return arith.addf(out_val, prod) attn_out = compute_attn_out # Transpose back: (batch, n_heads, seq_len, head_dim) -> (batch, seq_len, n_heads, head_dim) attn_out_perm_shape = [batch, seq_len, n_heads, head_dim] - attn_out_perm = tensor.EmptyOp(attn_out_perm_shape, elty).result + attn_out_perm = tensor.empty(attn_out_perm_shape, elty) d0, d1, d2, d3 = [ir.AffineDimExpr.get(i) for i in range(4)] from_map = affine_map(4, [d0, d1, d2, d3]) @@ -1009,7 +995,7 @@ def transpose_out(val, _out): ) # Output projection - bo_zeros = tensor.EmptyOp([dim], elty).result + bo_zeros = tensor.empty([dim], elty) bo = linalg.fill(zero, outs=[bo_zeros]) output_final = get_linear(attn_out_flat, wo, bo, out) @@ -1706,19 +1692,19 @@ def generate_module(elty): ) def feed_forward(x, w1, b1, w2, b2, w3, b3, out): # Compute hidden = linear(x, w1, b1) - hidden_uninit = tensor.EmptyOp(hidden_type.shape, elty).result + hidden_uninit = tensor.empty(hidden_type.shape, elty) hidden = get_linear(x, w1, b1, hidden_uninit) # Compute hidden_silu = silu(hidden) - hidden_silu_uninit = tensor.EmptyOp(hidden_type.shape, elty).result + hidden_silu_uninit = tensor.empty(hidden_type.shape, elty) hidden_silu = get_silu(hidden, hidden_silu_uninit) # Compute gate = linear(x, w3, b3) - gate_uninit = tensor.EmptyOp(hidden_type.shape, elty).result + gate_uninit = tensor.empty(hidden_type.shape, elty) gate = get_linear(x, w3, b3, gate_uninit) # Compute activated = hidden_silu * gate - activated_uninit = tensor.EmptyOp(hidden_type.shape, elty).result + activated_uninit = tensor.empty(hidden_type.shape, elty) activated = get_mul(hidden_silu, gate, activated_uninit) # Compute out = linear(activated, w2, b2) From 2db24bc48f64919695e262e33089e7bf3cc86643 Mon Sep 17 00:00:00 2001 From: Petr Kurapov Date: Wed, 26 Nov 2025 13:48:37 +0100 Subject: [PATCH 04/10] Move bufferization and pm run into schedule application. --- python/examples/llama/test_llama3.py | 56 ++++++---------------------- 1 file changed, 12 insertions(+), 44 deletions(-) diff --git a/python/examples/llama/test_llama3.py b/python/examples/llama/test_llama3.py index 905b318..415ea98 100644 --- a/python/examples/llama/test_llama3.py +++ b/python/examples/llama/test_llama3.py @@ -128,19 +128,22 @@ def create_schedule(ctx: ir.Context) -> ir.Module: return schedule +def bufferize_module(ctx: ir.Context, kernel: ir.Module) -> None: + with ctx: + pm = PassManager("builtin.module") + pm.add("one-shot-bufferize{bufferize-function-boundaries}") + pm.run(kernel.operation) + + def apply_schedule(kernel: ir.Module, schedule: ir.Module) -> None: + bufferize_module(kernel.context, kernel) interpreter.apply_named_sequence( payload_root=kernel, transform_root=schedule.body.operations[0], transform_module=schedule, ) - - -def bufferize_module(ctx: ir.Context, kernel: ir.Module) -> None: - with ctx: - pm = PassManager("builtin.module") - pm.add("one-shot-bufferize{bufferize-function-boundaries}") - pm.run(kernel.operation) + pm = create_pass_pipeline(kernel.context) + pm.run(kernel.operation) #### IR builders ##### @@ -1196,11 +1199,8 @@ def bin_op(a, b, out): ir_type = to_ir_type(elem_type, ctx) module = generate_module(ir_type) - bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) - pm = create_pass_pipeline(ctx) - pm.run(module.operation) eng = ExecutionEngine(module, opt_level=2) func_ptr = eng.lookup("bin_op") @@ -1257,11 +1257,8 @@ def unary_op(a, out): ir_type = to_ir_type(elem_type, ctx) module = generate_module(ir_type) - bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) - pm = create_pass_pipeline(ctx) - pm.run(module.operation) eng = ExecutionEngine(module, opt_level=2) func_ptr = eng.lookup("unary_op") @@ -1299,12 +1296,9 @@ def rms_norm(a, out): ir_type = to_ir_type(elem_type, ctx) module = generate_module(ir_type) - print(module) - bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) - pm = create_pass_pipeline(ctx) - pm.run(module.operation) + eng = ExecutionEngine(module, opt_level=2) func_ptr = eng.lookup("rms_norm") torch_dtype = lh_utils.mlir_type_to_torch_dtype(ir_type) @@ -1352,11 +1346,8 @@ def linear_op(x, w, b, out): ir_type = to_ir_type("f32", ctx) module = generate_module(ir_type, shape, in_features, out_features) - bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) - pm = create_pass_pipeline(ctx) - pm.run(module.operation) eng = ExecutionEngine(module, opt_level=2) func_ptr = eng.lookup("linear_op") @@ -1398,11 +1389,8 @@ def polar_op(magnitude, angle, out): ir_type = to_ir_type("f32", ctx) module = generate_module(ir_type) - bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) - pm = create_pass_pipeline(ctx) - pm.run(module.operation) eng = ExecutionEngine(module, opt_level=2) func_ptr = eng.lookup("polar_op") @@ -1438,11 +1426,8 @@ def repeat_kv_op(x, out): n_rep = 4 ir_type = to_ir_type("f32", ctx) module = generate_module(ir_type, n_rep) - bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) - pm = create_pass_pipeline(ctx) - pm.run(module.operation) eng = ExecutionEngine(module, opt_level=2) func_ptr = eng.lookup("repeat_kv_op") @@ -1481,11 +1466,8 @@ def reshape_for_broadcast_op(freqs_cis, x, out): ir_type = to_ir_type("f32", ctx) module = generate_module(ir_type) - bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) - pm = create_pass_pipeline(ctx) - pm.run(module.operation) eng = ExecutionEngine(module, opt_level=2) func_ptr = eng.lookup("reshape_for_broadcast") @@ -1528,11 +1510,8 @@ def view_as_complex_op(x, out): ir_type = to_ir_type("f32", ctx) module = generate_module(ir_type) - bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) - pm = create_pass_pipeline(ctx) - pm.run(module.operation) eng = ExecutionEngine(module, opt_level=2) func_ptr = eng.lookup("view_as_complex_op") @@ -1569,11 +1548,8 @@ def as_real_op(x, out): ir_type = to_ir_type("f32", ctx) module = generate_module(ir_type) - bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) - pm = create_pass_pipeline(ctx) - pm.run(module.operation) eng = ExecutionEngine(module, opt_level=2) func_ptr = eng.lookup("as_real_op") @@ -1636,11 +1612,8 @@ def rotary_emb(xq, xk, freqs_cis, xq_out, xk_out): freqs_cis_shape=freqs_cis_shape, elty=ir_type, ) - bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) - pm = create_pass_pipeline(ctx) - pm.run(module.operation) eng = ExecutionEngine(module, opt_level=2) func_ptr = eng.lookup("rotary_emb") @@ -1714,11 +1687,8 @@ def feed_forward(x, w1, b1, w2, b2, w3, b3, out): ir_type = to_ir_type("f32", ctx) module = generate_module(ir_type) - bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) - pm = create_pass_pipeline(ctx) - pm.run(module.operation) eng = ExecutionEngine(module, opt_level=2) func_ptr = eng.lookup("feed_forward") @@ -1866,11 +1836,9 @@ def attention_op(x, wq, wk, wv, wo, freqs_cis, mask, out): ir_type = to_ir_type("f32", ctx) module = generate_module(ir_type, model_args) - bufferize_module(ctx, module) schedule = create_schedule(ctx) apply_schedule(module, schedule) - pm = create_pass_pipeline(ctx) - pm.run(module.operation) + eng = ExecutionEngine(module, opt_level=2) func_ptr = eng.lookup("attention_op") From c0d69370100ae47d2106c5a338c775efd9805366 Mon Sep 17 00:00:00 2001 From: Petr Kurapov Date: Wed, 26 Nov 2025 15:35:54 +0100 Subject: [PATCH 05/10] Make lintner happy. --- python/lighthouse/utils/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/lighthouse/utils/__init__.py b/python/lighthouse/utils/__init__.py index 46a2971..474b748 100644 --- a/python/lighthouse/utils/__init__.py +++ b/python/lighthouse/utils/__init__.py @@ -13,6 +13,7 @@ "get_packed_arg", "memref_to_ctype", "memrefs_to_packed_args", + "mlir_type_to_torch_dtype", "torch_to_memref", "torch_to_packed_args", ] From fcac388bc99403715a2723532f4bb484f3ddabdd Mon Sep 17 00:00:00 2001 From: Petr Kurapov Date: Wed, 26 Nov 2025 16:07:01 +0100 Subject: [PATCH 06/10] Run test_llama3 through lit. --- .github/workflows/examples.yml | 4 ---- lit.cfg.py | 1 + python/examples/lit.local.cfg | 1 - python/examples/llama/test_llama3.py | 2 ++ 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 0bc26b7..3396fb1 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -29,7 +29,3 @@ jobs: run: | export FILECHECK=FileCheck-18 # Ubuntu's llvm-dev appends a version number. uv run lit python/examples # Makes sure to substitute FileCheck for $FILECHECK - - - name: Run pytest-enabled examples as tests - run: | - uv run pytest python/examples diff --git a/lit.cfg.py b/lit.cfg.py index e8b7547..7ac027a 100644 --- a/lit.cfg.py +++ b/lit.cfg.py @@ -11,6 +11,7 @@ config.test_source_root = os.path.dirname(__file__) config.test_exec_root = os.path.dirname(__file__) + "/lit.out" +config.substitutions.append(("%pytest", "uv run")) config.substitutions.append(("%PYTHON", "uv run")) if filecheck_path := os.environ.get("FILECHECK"): config.substitutions.append(("FileCheck", filecheck_path)) diff --git a/python/examples/lit.local.cfg b/python/examples/lit.local.cfg index d214968..73171b0 100644 --- a/python/examples/lit.local.cfg +++ b/python/examples/lit.local.cfg @@ -1,2 +1 @@ config.suffixes = {'.py'} -config.excludes = ['llama'] diff --git a/python/examples/llama/test_llama3.py b/python/examples/llama/test_llama3.py index 415ea98..b79c402 100644 --- a/python/examples/llama/test_llama3.py +++ b/python/examples/llama/test_llama3.py @@ -1,3 +1,5 @@ +# RUN: %pytest %s + from dataclasses import dataclass import functools import math as pymath From db0237b1d6292277ed0df1645473476eec61d96d Mon Sep 17 00:00:00 2001 From: Petr Kurapov Date: Wed, 26 Nov 2025 18:56:47 +0100 Subject: [PATCH 07/10] Make everything dependent on the current context. --- python/examples/llama/test_llama3.py | 219 +++++++++++---------------- 1 file changed, 91 insertions(+), 128 deletions(-) diff --git a/python/examples/llama/test_llama3.py b/python/examples/llama/test_llama3.py index b79c402..72dcb1d 100644 --- a/python/examples/llama/test_llama3.py +++ b/python/examples/llama/test_llama3.py @@ -7,6 +7,7 @@ import torch from typing import Optional, Tuple + from mlir import ir from mlir.dialects import transform, func, linalg, tensor, arith, complex, math from mlir.dialects.transform import structured @@ -21,16 +22,13 @@ from lighthouse import utils as lh_utils -def with_mlir_ctx(ctx: ir.Context): - def with_mlir_ctx_decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - with ctx, ir.Location.unknown(context=ctx): - return func(*args, **kwargs) - - return wrapper +def with_mlir_ctx_and_location(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + with ir.Context(), ir.Location.unknown(): + return func(*args, **kwargs) - return with_mlir_ctx_decorator + return wrapper @dataclass @@ -58,21 +56,20 @@ def affine_map(dim_count, exprs, *, symb_count=0): def create_pass_pipeline(ctx: ir.Context) -> PassManager: - with ctx: - pm = PassManager("builtin.module") - pm.add("convert-scf-to-cf") - pm.add("expand-strided-metadata") - pm.add("lower-affine") - pm.add("finalize-memref-to-llvm") - pm.add("convert-func-to-llvm") - pm.add("convert-to-llvm") - pm.add("reconcile-unrealized-casts") - pm.add("cse") - pm.add("canonicalize") + pm = PassManager("builtin.module") + pm.add("convert-scf-to-cf") + pm.add("expand-strided-metadata") + pm.add("lower-affine") + pm.add("finalize-memref-to-llvm") + pm.add("convert-func-to-llvm") + pm.add("convert-to-llvm") + pm.add("reconcile-unrealized-casts") + pm.add("cse") + pm.add("canonicalize") return pm -def create_schedule(ctx: ir.Context) -> ir.Module: +def create_schedule() -> ir.Module: """ Create an MLIR module containing transformation schedule. The schedule provides partial lowering to scalar operations. @@ -80,61 +77,51 @@ def create_schedule(ctx: ir.Context) -> ir.Module: Args: ctx: MLIR context. """ - with ctx, ir.Location.unknown(context=ctx): - # Create transform module. - schedule = ir.Module.create() - schedule.operation.attributes["transform.with_named_sequence"] = ( - ir.UnitAttr.get() + # Create transform module. + schedule = ir.Module.create() + schedule.operation.attributes["transform.with_named_sequence"] = ir.UnitAttr.get() + + # Create entry point transformation sequence. + with ir.InsertionPoint(schedule.body): + named_seq = transform.named_sequence( + "__transform_main", + [transform.AnyOpType.get()], + [], + arg_attrs=[{"transform.readonly": ir.UnitAttr.get()}], ) - # Create entry point transformation sequence. - with ir.InsertionPoint(schedule.body): - named_seq = transform.named_sequence( - "__transform_main", - [transform.AnyOpType.get()], - [], - arg_attrs=[{"transform.readonly": ir.UnitAttr.get()}], - ) - - # Create the schedule. - with ir.InsertionPoint(named_seq.body): - # For simplicity, use generic transform matchers. - anytype = transform.AnyOpType.get() + # Create the schedule. + with ir.InsertionPoint(named_seq.body): + # For simplicity, use generic transform matchers. + anytype = transform.AnyOpType.get() - # Find the kernel's function op. - func = structured.MatchOp.match_op_names( - named_seq.bodyTarget, ["func.func"] - ) + # Find the kernel's function op. + func = structured.MatchOp.match_op_names(named_seq.bodyTarget, ["func.func"]) - # Use C interface wrappers - required to make function executable after jitting. - func = transform.apply_registered_pass( - anytype, func, "llvm-request-c-wrappers" - ) + # Use C interface wrappers - required to make function executable after jitting. + func = transform.apply_registered_pass(anytype, func, "llvm-request-c-wrappers") - # Find the kernel's module op. - mod = transform.get_parent_op( - anytype, func, op_name="builtin.module", deduplicate=True - ) + # Find the kernel's module op. + mod = transform.get_parent_op( + anytype, func, op_name="builtin.module", deduplicate=True + ) - # Naive lowering to loops. - mod = transform.apply_registered_pass( - anytype, mod, "convert-linalg-to-loops" - ) - # Cleanup. - transform.apply_cse(mod) - with ir.InsertionPoint(transform.ApplyPatternsOp(mod).patterns): - transform.ApplyCanonicalizationPatternsOp() + # Naive lowering to loops. + mod = transform.apply_registered_pass(anytype, mod, "convert-linalg-to-loops") + # Cleanup. + transform.apply_cse(mod) + with ir.InsertionPoint(transform.ApplyPatternsOp(mod).patterns): + transform.ApplyCanonicalizationPatternsOp() - # Terminate the schedule. - transform.YieldOp() + # Terminate the schedule. + transform.YieldOp() return schedule def bufferize_module(ctx: ir.Context, kernel: ir.Module) -> None: - with ctx: - pm = PassManager("builtin.module") - pm.add("one-shot-bufferize{bufferize-function-boundaries}") - pm.run(kernel.operation) + pm = PassManager("builtin.module") + pm.add("one-shot-bufferize{bufferize-function-boundaries}") + pm.run(kernel.operation) def apply_schedule(kernel: ir.Module, schedule: ir.Module) -> None: @@ -1157,15 +1144,16 @@ def forward( # TODO: torch_dtype_to_mlir_type -def to_ir_type(type_str, ctx): +def to_ir_type(type_str): if type_str == "f32": - return ir.F32Type.get(context=ctx) + return ir.F32Type.get() elif type_str == "f64": - return ir.F64Type.get(context=ctx) + return ir.F64Type.get() else: raise ValueError(f"Unsupported type: {type_str}") +@with_mlir_ctx_and_location @pytest.mark.parametrize( "op,shape,elem_type", [ @@ -1176,9 +1164,6 @@ def to_ir_type(type_str, ctx): ], ) def test_bin_op(op, shape, elem_type): - ctx = ir.Context() - - @with_mlir_ctx(ctx) def generate_module(elty): module = ir.Module.create() with ir.InsertionPoint(module.body): @@ -1199,9 +1184,9 @@ def bin_op(a, b, out): return module - ir_type = to_ir_type(elem_type, ctx) + ir_type = to_ir_type(elem_type) module = generate_module(ir_type) - schedule = create_schedule(ctx) + schedule = create_schedule() apply_schedule(module, schedule) eng = ExecutionEngine(module, opt_level=2) @@ -1234,10 +1219,8 @@ def bin_op(a, b, out): (get_triu, (4, 4), "f32"), ], ) +@with_mlir_ctx_and_location def test_unary_op(op, shape, elem_type): - ctx = ir.Context() - - @with_mlir_ctx(ctx) def generate_module(elty): module = ir.Module.create() with ir.InsertionPoint(module.body): @@ -1257,9 +1240,9 @@ def unary_op(a, out): return module - ir_type = to_ir_type(elem_type, ctx) + ir_type = to_ir_type(elem_type) module = generate_module(ir_type) - schedule = create_schedule(ctx) + schedule = create_schedule() apply_schedule(module, schedule) eng = ExecutionEngine(module, opt_level=2) @@ -1279,12 +1262,10 @@ def unary_op(a, out): @pytest.mark.parametrize("shape,elem_type", [((4, 16), "f32")]) +@with_mlir_ctx_and_location def test_rms_norm(shape, elem_type): eps = 1e-5 - ctx = ir.Context() - - @with_mlir_ctx(ctx) def generate_module(elty): module = ir.Module.create() with ir.InsertionPoint(module.body): @@ -1296,9 +1277,9 @@ def rms_norm(a, out): return module - ir_type = to_ir_type(elem_type, ctx) + ir_type = to_ir_type(elem_type) module = generate_module(ir_type) - schedule = create_schedule(ctx) + schedule = create_schedule() apply_schedule(module, schedule) eng = ExecutionEngine(module, opt_level=2) @@ -1326,10 +1307,8 @@ def rms_norm(a, out): ((3, 5, 7), 16, 24), ], ) +@with_mlir_ctx_and_location def test_linear(shape, in_features, out_features): - ctx = ir.Context() - - @with_mlir_ctx(ctx) def generate_module(elty, input_shape, in_feat, out_feat): module = ir.Module.create() with ir.InsertionPoint(module.body): @@ -1346,9 +1325,9 @@ def linear_op(x, w, b, out): return module - ir_type = to_ir_type("f32", ctx) + ir_type = to_ir_type("f32") module = generate_module(ir_type, shape, in_features, out_features) - schedule = create_schedule(ctx) + schedule = create_schedule() apply_schedule(module, schedule) eng = ExecutionEngine(module, opt_level=2) @@ -1369,10 +1348,8 @@ def linear_op(x, w, b, out): assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) +@with_mlir_ctx_and_location def test_polar(): - ctx = ir.Context() - - @with_mlir_ctx(ctx) def generate_module(elty): module = ir.Module.create() with ir.InsertionPoint(module.body): @@ -1389,9 +1366,9 @@ def polar_op(magnitude, angle, out): return module - ir_type = to_ir_type("f32", ctx) + ir_type = to_ir_type("f32") module = generate_module(ir_type) - schedule = create_schedule(ctx) + schedule = create_schedule() apply_schedule(module, schedule) eng = ExecutionEngine(module, opt_level=2) @@ -1409,10 +1386,8 @@ def polar_op(magnitude, angle, out): assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) +@with_mlir_ctx_and_location def test_repeat_kv(): - ctx = ir.Context() - - @with_mlir_ctx(ctx) def generate_module(elty, n_rep): module = ir.Module.create() with ir.InsertionPoint(module.body): @@ -1426,9 +1401,9 @@ def repeat_kv_op(x, out): return module n_rep = 4 - ir_type = to_ir_type("f32", ctx) + ir_type = to_ir_type("f32") module = generate_module(ir_type, n_rep) - schedule = create_schedule(ctx) + schedule = create_schedule() apply_schedule(module, schedule) eng = ExecutionEngine(module, opt_level=2) @@ -1447,10 +1422,8 @@ def repeat_kv_op(x, out): assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) +@with_mlir_ctx_and_location def test_reshape_for_broadcast(): - ctx = ir.Context() - - @with_mlir_ctx(ctx) def generate_module(elty): module = ir.Module.create() with ir.InsertionPoint(module.body): @@ -1466,9 +1439,9 @@ def reshape_for_broadcast_op(freqs_cis, x, out): return module - ir_type = to_ir_type("f32", ctx) + ir_type = to_ir_type("f32") module = generate_module(ir_type) - schedule = create_schedule(ctx) + schedule = create_schedule() apply_schedule(module, schedule) eng = ExecutionEngine(module, opt_level=2) @@ -1491,10 +1464,8 @@ def reshape_for_broadcast_op(freqs_cis, x, out): assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) +@with_mlir_ctx_and_location def test_view_as_complex(): - ctx = ir.Context() - - @with_mlir_ctx(ctx) def generate_module(elty): module = ir.Module.create() with ir.InsertionPoint(module.body): @@ -1510,9 +1481,9 @@ def view_as_complex_op(x, out): return module - ir_type = to_ir_type("f32", ctx) + ir_type = to_ir_type("f32") module = generate_module(ir_type) - schedule = create_schedule(ctx) + schedule = create_schedule() apply_schedule(module, schedule) eng = ExecutionEngine(module, opt_level=2) @@ -1532,10 +1503,8 @@ def view_as_complex_op(x, out): assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) +@with_mlir_ctx_and_location def test_view_as_real(): - ctx = ir.Context() - - @with_mlir_ctx(ctx) def generate_module(elty): module = ir.Module.create() with ir.InsertionPoint(module.body): @@ -1548,9 +1517,9 @@ def as_real_op(x, out): return module - ir_type = to_ir_type("f32", ctx) + ir_type = to_ir_type("f32") module = generate_module(ir_type) - schedule = create_schedule(ctx) + schedule = create_schedule() apply_schedule(module, schedule) eng = ExecutionEngine(module, opt_level=2) @@ -1574,10 +1543,8 @@ def as_real_op(x, out): "batch_size,seq_len,n_heads,head_dim,n_kv_heads,elem_type", [(2, 512, 32, 128, 8, "f32")], ) +@with_mlir_ctx_and_location def test_rotary_emb(batch_size, seq_len, n_heads, head_dim, n_kv_heads, elem_type): - ctx = ir.Context() - - @with_mlir_ctx(ctx) def generate_module(elty, xq_shape, xk_shape, freqs_cis_shape): module = ir.Module.create() with ir.InsertionPoint(module.body): @@ -1598,7 +1565,7 @@ def rotary_emb(xq, xk, freqs_cis, xq_out, xk_out): return module - ir_type = to_ir_type(elem_type, ctx) + ir_type = to_ir_type(elem_type) torch_dtype = lh_utils.mlir_type_to_torch_dtype(ir_type) xq_shape = (batch_size, seq_len, n_heads, head_dim) xk_shape = (batch_size, seq_len, n_kv_heads, head_dim) @@ -1614,7 +1581,7 @@ def rotary_emb(xq, xk, freqs_cis, xq_out, xk_out): freqs_cis_shape=freqs_cis_shape, elty=ir_type, ) - schedule = create_schedule(ctx) + schedule = create_schedule() apply_schedule(module, schedule) eng = ExecutionEngine(module, opt_level=2) @@ -1637,10 +1604,8 @@ def rotary_emb(xq, xk, freqs_cis, xq_out, xk_out): assert torch.allclose(out2, xk_out, rtol=0.01, atol=0.01, equal_nan=True) +@with_mlir_ctx_and_location def test_feed_forward(): - ctx = ir.Context() - - @with_mlir_ctx(ctx) def generate_module(elty): module = ir.Module.create() with ir.InsertionPoint(module.body): @@ -1687,9 +1652,9 @@ def feed_forward(x, w1, b1, w2, b2, w3, b3, out): return module - ir_type = to_ir_type("f32", ctx) + ir_type = to_ir_type("f32") module = generate_module(ir_type) - schedule = create_schedule(ctx) + schedule = create_schedule() apply_schedule(module, schedule) eng = ExecutionEngine(module, opt_level=2) @@ -1764,6 +1729,7 @@ def test_smoke_standalone_attention(): assert not torch.isinf(output).any(), "Output contains inf" +@with_mlir_ctx_and_location def test_attention_fwd(): model_args = ModelArgs( dim=32, # Small for testing @@ -1784,9 +1750,6 @@ def test_attention_fwd(): n_kv_heads = model_args.n_kv_heads head_dim = dim // n_heads - ctx = ir.Context() - - @with_mlir_ctx(ctx) def generate_module(elty, args): module = ir.Module.create() with ir.InsertionPoint(module.body): @@ -1836,9 +1799,9 @@ def attention_op(x, wq, wk, wv, wo, freqs_cis, mask, out): # Run reference forward out_ref = reference(x, start_pos=0, freqs_cis=freqs_cis_complex, mask=mask) - ir_type = to_ir_type("f32", ctx) + ir_type = to_ir_type("f32") module = generate_module(ir_type, model_args) - schedule = create_schedule(ctx) + schedule = create_schedule() apply_schedule(module, schedule) eng = ExecutionEngine(module, opt_level=2) From 49d6f938dba285e25cbb76c675c5ef9fa674e227 Mon Sep 17 00:00:00 2001 From: Petr Kurapov Date: Wed, 26 Nov 2025 18:31:56 +0100 Subject: [PATCH 08/10] [examples] Add the complete forward pass for llama. --- python/examples/llama/ref_model.py | 280 +++++++++++++ python/examples/llama/test_llama3.py | 598 ++++++++++++++++++++------- 2 files changed, 740 insertions(+), 138 deletions(-) create mode 100644 python/examples/llama/ref_model.py diff --git a/python/examples/llama/ref_model.py b/python/examples/llama/ref_model.py new file mode 100644 index 0000000..b0a48ca --- /dev/null +++ b/python/examples/llama/ref_model.py @@ -0,0 +1,280 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# This software may be used and distributed in accordance with the terms of the Llama 3 Community License Agreement. + + +## This is a modified version of the LLaMA 3 model implementation. +## It doesn't use any FairScale components + +import math as pymath +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch +import torch.nn.functional as F +from torch import nn + + +@dataclass +class ModelArgs: + dim: int = 4096 + n_layers: int = 32 + n_heads: int = 32 + n_kv_heads: Optional[int] = None + vocab_size: int = -1 + multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2 + ffn_dim_multiplier: Optional[float] = None + norm_eps: float = 1e-5 + rope_theta: float = 500000 + + max_batch_size: int = 32 + max_seq_len: int = 2048 + + +class RMSNorm(torch.nn.Module): + def __init__(self, dim: int, eps: float = 1e-6): + super().__init__() + self.eps = eps + self.weight = nn.Parameter(torch.ones(dim)) + + def _norm(self, x): + return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) + + def forward(self, x): + output = self._norm(x.float()).type_as(x) + return output * self.weight + + +def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0): + freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim)) + t = torch.arange(end, device=freqs.device, dtype=torch.float32) + freqs = torch.outer(t, freqs) + freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64 + return freqs_cis + + +def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor): + ndim = x.ndim + assert 0 <= 1 < ndim + assert freqs_cis.shape == (x.shape[1], x.shape[-1]) + shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] + return freqs_cis.view(*shape) + + +def apply_rotary_emb( + xq: torch.Tensor, + xk: torch.Tensor, + freqs_cis: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor]: + xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) + xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) + freqs_cis = reshape_for_broadcast(freqs_cis, xq_) + xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) + xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3) + return xq_out.type_as(xq), xk_out.type_as(xk) + + +def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor: + """torch.repeat_interleave(x, dim=2, repeats=n_rep)""" + bs, slen, n_kv_heads, head_dim = x.shape + if n_rep == 1: + return x + return ( + x[:, :, :, None, :] + .expand(bs, slen, n_kv_heads, n_rep, head_dim) + .reshape(bs, slen, n_kv_heads * n_rep, head_dim) + ) + + +# Attention implementation without fairscale parrallel linear layers +class Attention(torch.nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads + self.dim = args.dim + self.n_heads = args.n_heads + self.n_rep = self.n_heads // self.n_kv_heads + self.head_dim = args.dim // args.n_heads + + self.wq = torch.nn.Linear( + args.dim, + args.n_heads * self.head_dim, + bias=False, + ) + self.wk = torch.nn.Linear( + args.dim, + self.n_kv_heads * self.head_dim, + bias=False, + ) + self.wv = torch.nn.Linear( + args.dim, + self.n_kv_heads * self.head_dim, + bias=False, + ) + self.wo = torch.nn.Linear( + args.n_heads * self.head_dim, + args.dim, + bias=False, + ) + + self.cache_k = torch.zeros( + ( + args.max_batch_size, + args.max_seq_len, + self.n_kv_heads, + self.head_dim, + ) + ) + self.cache_v = torch.zeros( + ( + args.max_batch_size, + args.max_seq_len, + self.n_kv_heads, + self.head_dim, + ) + ) + + def forward( + self, + x: torch.Tensor, + start_pos: int, + freqs_cis: torch.Tensor, + mask: Optional[torch.Tensor], + ): + bsz, seqlen, _ = x.shape + xq, xk, xv = self.wq(x), self.wk(x), self.wv(x) + + xq = xq.view(bsz, seqlen, self.n_heads, self.head_dim) + xk = xk.view(bsz, seqlen, self.n_kv_heads, self.head_dim) + xv = xv.view(bsz, seqlen, self.n_kv_heads, self.head_dim) + + xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis) + + self.cache_k = self.cache_k.to(xq) + self.cache_v = self.cache_v.to(xq) + + self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk + self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv + + keys = self.cache_k[:bsz, : start_pos + seqlen] + values = self.cache_v[:bsz, : start_pos + seqlen] + + # repeat k/v heads if n_kv_heads < n_heads + keys = repeat_kv( + keys, self.n_rep + ) # (bs, cache_len + seqlen, n_heads, head_dim) + values = repeat_kv( + values, self.n_rep + ) # (bs, cache_len + seqlen, n_heads, head_dim) + + xq = xq.transpose(1, 2) # (bs, n_heads, seqlen, head_dim) + keys = keys.transpose(1, 2) # (bs, n_heads, cache_len + seqlen, head_dim) + values = values.transpose(1, 2) # (bs, n_heads, cache_len + seqlen, head_dim) + scores = torch.matmul(xq, keys.transpose(2, 3)) / pymath.sqrt(self.head_dim) + if mask is not None: + scores = scores + mask # (bs, n_heads, seqlen, cache_len + seqlen) + scores = torch.nn.functional.softmax(scores.float(), dim=-1).type_as(xq) + output = torch.matmul(scores, values) # (bs, n_heads, seqlen, head_dim) + output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1) + return self.wo(output) + + +class FeedForward(nn.Module): + def __init__( + self, + dim: int, + hidden_dim: int, + multiple_of: int, + ffn_dim_multiplier: Optional[float], + ): + super().__init__() + hidden_dim = int(2 * hidden_dim / 3) + # custom dim factor multiplier + if ffn_dim_multiplier is not None: + hidden_dim = int(ffn_dim_multiplier * hidden_dim) + hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) + + self.w1 = nn.Linear(dim, hidden_dim, bias=False) + self.w2 = nn.Linear(hidden_dim, dim, bias=False) + self.w3 = nn.Linear(dim, hidden_dim, bias=False) + + def forward(self, x): + return self.w2(F.silu(self.w1(x)) * self.w3(x)) + + +class TransformerBlock(nn.Module): + def __init__(self, layer_id: int, args: ModelArgs): + super().__init__() + self.n_heads = args.n_heads + self.dim = args.dim + self.head_dim = args.dim // args.n_heads + self.attention = Attention(args) + self.feed_forward = FeedForward( + dim=args.dim, + hidden_dim=4 * args.dim, + multiple_of=args.multiple_of, + ffn_dim_multiplier=args.ffn_dim_multiplier, + ) + self.layer_id = layer_id + self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps) + self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps) + + def forward( + self, + x: torch.Tensor, + start_pos: int, + freqs_cis: torch.Tensor, + mask: Optional[torch.Tensor], + ): + h = x + self.attention(self.attention_norm(x), start_pos, freqs_cis, mask) + out = h + self.feed_forward(self.ffn_norm(h)) + return out + + +class Transformer(nn.Module): + def __init__(self, params: ModelArgs): + super().__init__() + self.params = params + self.vocab_size = params.vocab_size + self.n_layers = params.n_layers + + self.tok_embeddings = nn.Embedding(params.vocab_size, params.dim) + + self.layers = torch.nn.ModuleList() + for layer_id in range(params.n_layers): + self.layers.append(TransformerBlock(layer_id, params)) + + self.norm = RMSNorm(params.dim, eps=params.norm_eps) + self.output = nn.Linear(params.dim, params.vocab_size, bias=False) + + self.freqs_cis = precompute_freqs_cis( + params.dim // params.n_heads, + params.max_seq_len * 2, + params.rope_theta, + ) + + @torch.inference_mode() + def forward(self, tokens: torch.Tensor, start_pos: int): + _bsz, seqlen = tokens.shape + h = self.tok_embeddings(tokens) + self.freqs_cis = self.freqs_cis.to(h.device) + freqs_cis = self.freqs_cis[start_pos : start_pos + seqlen] + + mask = None + if seqlen > 1: + mask = torch.full((seqlen, seqlen), float("-inf"), device=tokens.device) + + mask = torch.triu(mask, diagonal=1) + + # When performing key-value caching, we compute the attention scores + # only for the new sequence. Thus, the matrix of scores is of size + # (seqlen, cache_len + seqlen), and the only masked entries are (i, j) for + # j > cache_len + i, since row i corresponds to token cache_len + i. + mask = torch.hstack( + [torch.zeros((seqlen, start_pos), device=tokens.device), mask] + ).type_as(h) + + for layer in self.layers: + h = layer(h, start_pos, freqs_cis, mask) + h = self.norm(h) + output = self.output(h).float() + return output diff --git a/python/examples/llama/test_llama3.py b/python/examples/llama/test_llama3.py index 72dcb1d..ffef803 100644 --- a/python/examples/llama/test_llama3.py +++ b/python/examples/llama/test_llama3.py @@ -18,7 +18,15 @@ ) from mlir.execution_engine import ExecutionEngine - +from ref_model import ( + Attention, + ModelArgs, + reshape_for_broadcast, + apply_rotary_emb, + repeat_kv, + TransformerBlock, + Transformer, +) from lighthouse import utils as lh_utils @@ -31,22 +39,6 @@ def wrapper(*args, **kwargs): return wrapper -@dataclass -class ModelArgs: - dim: int = 4096 - n_layers: int = 32 - n_heads: int = 32 - n_kv_heads: Optional[int] = None - vocab_size: int = -1 - multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2 - ffn_dim_multiplier: Optional[float] = None - norm_eps: float = 1e-5 - rope_theta: float = 500000 - - max_batch_size: int = 32 - max_seq_len: int = 2048 - - def affine_map(dim_count, exprs, *, symb_count=0): return ir.AffineMap.get(dim_count, symb_count, exprs) @@ -118,14 +110,14 @@ def create_schedule() -> ir.Module: return schedule -def bufferize_module(ctx: ir.Context, kernel: ir.Module) -> None: +def bufferize_module(kernel: ir.Module) -> None: pm = PassManager("builtin.module") pm.add("one-shot-bufferize{bufferize-function-boundaries}") pm.run(kernel.operation) def apply_schedule(kernel: ir.Module, schedule: ir.Module) -> None: - bufferize_module(kernel.context, kernel) + bufferize_module(kernel) interpreter.apply_named_sequence( payload_root=kernel, transform_root=schedule.body.operations[0], @@ -994,133 +986,129 @@ def transpose_out(val, _out): return output_final -#### Test cases ##### +def get_transformer_block( + args: ModelArgs, + x: ir.Value, + wq: ir.Value, + wk: ir.Value, + wv: ir.Value, + wo: ir.Value, + freqs_cis: ir.Value, + attn_mask: ir.Value, + w1: ir.Value, + b1: ir.Value, + w2: ir.Value, + b2: ir.Value, + w3: ir.Value, + b3: ir.Value, + out: ir.Value, +) -> ir.Value: + elty = x.type.element_type + x_norm_uninit = tensor.empty(x.type.shape, elty) + x_norm = get_l2_norm(x, x_norm_uninit, eps=args.norm_eps) + + attn_out_uninit = tensor.empty(x.type.shape, elty) + attn_out = get_attention( + args, + x_norm, + wq, + wk, + wv, + wo, + freqs_cis, + attn_mask, + attn_out_uninit, + ) -def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor): - ndim = x.ndim - assert 0 <= 1 < ndim - assert freqs_cis.shape == (x.shape[1], x.shape[-1]) - shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] - return freqs_cis.view(*shape) + h = get_add(x, attn_out, attn_out_uninit) + h_norm_uninit = tensor.empty(h.type.shape, elty) + ffn_norm = get_l2_norm(h, h_norm_uninit, eps=args.norm_eps) -def rotary_emb_ref( - xq: torch.Tensor, - xk: torch.Tensor, - freqs_cis: torch.Tensor, -) -> Tuple[torch.Tensor, torch.Tensor]: - xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) - xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) - freqs_cis = reshape_for_broadcast(freqs_cis, xq_) - xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) - xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3) - return xq_out.type_as(xq), xk_out.type_as(xk) + hidden_dim = int(2 * (4 * x.type.shape[-1]) / 3) + if args.ffn_dim_multiplier is not None: + hidden_dim = int(args.ffn_dim_multiplier * hidden_dim) + hidden_dim = args.multiple_of * ( + (hidden_dim + args.multiple_of - 1) // args.multiple_of + ) + ffn_intermediate_shape = list(x.type.shape) + ffn_intermediate_shape[-1] = hidden_dim -def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor: - """torch.repeat_interleave(x, dim=2, repeats=n_rep)""" - bs, slen, n_kv_heads, head_dim = x.shape - if n_rep == 1: - return x - return ( - x[:, :, :, None, :] - .expand(bs, slen, n_kv_heads, n_rep, head_dim) - .reshape(bs, slen, n_kv_heads * n_rep, head_dim) - ) + ffn_w1_uninit = tensor.empty(ffn_intermediate_shape, elty) + ffn_w1_out = get_linear(ffn_norm, w1, b1, ffn_w1_uninit) + silu_out = get_silu(ffn_w1_out, ffn_w1_uninit) + ffn_w3_uninit = tensor.empty(ffn_intermediate_shape, elty) + ffn_w3_out = get_linear(ffn_norm, w3, b3, ffn_w3_uninit) + gated = get_mul(silu_out, ffn_w3_out, ffn_w3_uninit) -# Attention implementation without fairscale parrallel linear layers -class StandaloneAttention(torch.nn.Module): - def __init__(self, args: ModelArgs): - super().__init__() - self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads - self.dim = args.dim - self.n_heads = args.n_heads - self.n_rep = self.n_heads // self.n_kv_heads - self.head_dim = args.dim // args.n_heads - - self.wq = torch.nn.Linear( - args.dim, - args.n_heads * self.head_dim, - bias=False, - ) - self.wk = torch.nn.Linear( - args.dim, - self.n_kv_heads * self.head_dim, - bias=False, - ) - self.wv = torch.nn.Linear( - args.dim, - self.n_kv_heads * self.head_dim, - bias=False, - ) - self.wo = torch.nn.Linear( - args.n_heads * self.head_dim, - args.dim, - bias=False, - ) + ffn_output = get_linear(gated, w2, b2, out) + final_out = get_add(h, ffn_output, out) - self.cache_k = torch.zeros( - ( - args.max_batch_size, - args.max_seq_len, - self.n_kv_heads, - self.head_dim, - ) - ) - self.cache_v = torch.zeros( - ( - args.max_batch_size, - args.max_seq_len, - self.n_kv_heads, - self.head_dim, - ) + return final_out + + +def get_transformer( + args: ModelArgs, + x: ir.Value, + freqs_cis: ir.Value, + mask: ir.Value, + layer_weights: list, + out: ir.Value, +) -> ir.Value: + elty = x.type.element_type + h = x + + # Apply each transformer block sequentially + for layer_id in range(args.n_layers): + weights = layer_weights[layer_id] + + # Create output tensor for this layer + layer_out_uninit = tensor.empty(h.type.shape, elty) + + # Apply transformer block + h = get_transformer_block( + args, + h, + weights["wq"], + weights["wk"], + weights["wv"], + weights["wo"], + freqs_cis, + mask, + weights["w1"], + weights["b1"], + weights["w2"], + weights["b2"], + weights["w3"], + weights["b3"], + layer_out_uninit, ) - def forward( - self, - x: torch.Tensor, - start_pos: int, - freqs_cis: torch.Tensor, - mask: Optional[torch.Tensor], - ): - bsz, seqlen, _ = x.shape - xq, xk, xv = self.wq(x), self.wk(x), self.wv(x) - - xq = xq.view(bsz, seqlen, self.n_heads, self.head_dim) - xk = xk.view(bsz, seqlen, self.n_kv_heads, self.head_dim) - xv = xv.view(bsz, seqlen, self.n_kv_heads, self.head_dim) - - xq, xk = rotary_emb_ref(xq, xk, freqs_cis=freqs_cis) - - self.cache_k = self.cache_k.to(xq) - self.cache_v = self.cache_v.to(xq) - - self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk - self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv - - keys = self.cache_k[:bsz, : start_pos + seqlen] - values = self.cache_v[:bsz, : start_pos + seqlen] - - # repeat k/v heads if n_kv_heads < n_heads - keys = repeat_kv( - keys, self.n_rep - ) # (bs, cache_len + seqlen, n_heads, head_dim) - values = repeat_kv( - values, self.n_rep - ) # (bs, cache_len + seqlen, n_heads, head_dim) - - xq = xq.transpose(1, 2) # (bs, n_heads, seqlen, head_dim) - keys = keys.transpose(1, 2) # (bs, n_heads, cache_len + seqlen, head_dim) - values = values.transpose(1, 2) # (bs, n_heads, cache_len + seqlen, head_dim) - scores = torch.matmul(xq, keys.transpose(2, 3)) / pymath.sqrt(self.head_dim) - if mask is not None: - scores = scores + mask # (bs, n_heads, seqlen, cache_len + seqlen) - scores = torch.nn.functional.softmax(scores.float(), dim=-1).type_as(xq) - output = torch.matmul(scores, values) # (bs, n_heads, seqlen, head_dim) - output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1) - return self.wo(output) + # Apply final norm + final_norm_uninit = tensor.empty(h.type.shape, elty) + final_norm = get_l2_norm(h, final_norm_uninit, eps=args.norm_eps) + + # Copy to output + rank = len(h.type.shape) + dims = [ir.AffineDimExpr.get(i) for i in range(rank)] + id_map = affine_map(rank, dims) + + @linalg.generic( + [final_norm], + [out], + [id_map, id_map], + [parallel] * rank, + ) + def copy_op(val, _out): + return val + + return copy_op + + +#### Test cases ##### references = { @@ -1139,7 +1127,7 @@ def forward( get_repeat_kv: repeat_kv, get_l2_norm: lambda x, eps: x * torch.rsqrt(torch.mean(x.pow(2), dim=-1, keepdim=True) + eps), - get_rotary_emb: rotary_emb_ref, + get_rotary_emb: apply_rotary_emb, } @@ -1703,7 +1691,7 @@ def test_smoke_standalone_attention(): max_seq_len=16, ) - attention = StandaloneAttention(args) + attention = Attention(args) batch_size = 2 seq_len = 4 @@ -1780,7 +1768,7 @@ def attention_op(x, wq, wk, wv, wo, freqs_cis, mask, out): return module - reference = StandaloneAttention(model_args) + reference = Attention(model_args) torch_dtype = torch.float32 x = torch.randn(batch, seq_len, dim, dtype=torch_dtype) @@ -1822,3 +1810,337 @@ def attention_op(x, wq, wk, wv, wo, freqs_cis, mask, out): func_ptr(args) assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) + + +@with_mlir_ctx_and_location +def test_transformer_block_fwd(): + model_args = ModelArgs( + dim=32, + n_layers=1, + n_heads=4, + n_kv_heads=2, # Test GQA + vocab_size=1000, + multiple_of=8, + norm_eps=1e-5, + max_batch_size=2, + max_seq_len=8, + ) + + batch = 2 + seq_len = 4 + dim = model_args.dim + n_heads = model_args.n_heads + n_kv_heads = model_args.n_kv_heads + head_dim = dim // n_heads + + hidden_dim = int(2 * (4 * dim) / 3) + if model_args.ffn_dim_multiplier is not None: + hidden_dim = int(model_args.ffn_dim_multiplier * hidden_dim) + hidden_dim = model_args.multiple_of * ( + (hidden_dim + model_args.multiple_of - 1) // model_args.multiple_of + ) + + def generate_module(elty, args): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + x_type = ir.RankedTensorType.get([batch, seq_len, dim], elty) + wq_type = ir.RankedTensorType.get([n_heads * head_dim, dim], elty) + wk_type = ir.RankedTensorType.get([n_kv_heads * head_dim, dim], elty) + wv_type = ir.RankedTensorType.get([n_kv_heads * head_dim, dim], elty) + wo_type = ir.RankedTensorType.get([dim, n_heads * head_dim], elty) + freqs_cis_type = ir.RankedTensorType.get([seq_len, head_dim // 2], elty) + mask_type = ir.RankedTensorType.get( + [batch, n_heads, seq_len, seq_len], elty + ) + w1_type = ir.RankedTensorType.get([hidden_dim, dim], elty) + b1_type = ir.RankedTensorType.get([hidden_dim], elty) + w2_type = ir.RankedTensorType.get([dim, hidden_dim], elty) + b2_type = ir.RankedTensorType.get([dim], elty) + w3_type = ir.RankedTensorType.get([hidden_dim, dim], elty) + b3_type = ir.RankedTensorType.get([hidden_dim], elty) + out_type = ir.RankedTensorType.get([batch, seq_len, dim], elty) + + @func.FuncOp.from_py_func( + x_type, + wq_type, + wk_type, + wv_type, + wo_type, + freqs_cis_type, + mask_type, + w1_type, + b1_type, + w2_type, + b2_type, + w3_type, + b3_type, + out_type, + name="transformer_block_op", + ) + def transformer_block_op( + x, wq, wk, wv, wo, freqs_cis, mask, w1, b1, w2, b2, w3, b3, out + ): + get_transformer_block( + args, + x, + wq, + wk, + wv, + wo, + freqs_cis, + mask, + w1, + b1, + w2, + b2, + w3, + b3, + out, + ) + + return module + + reference = TransformerBlock(layer_id=0, args=model_args) + + torch_dtype = torch.float32 + x = torch.randn(batch, seq_len, dim, dtype=torch_dtype) + freqs_cis_real = torch.randn(seq_len, head_dim // 2, dtype=torch_dtype) + freqs_cis_complex = torch.complex(freqs_cis_real, torch.zeros_like(freqs_cis_real)) + mask = torch.full( + (batch, n_heads, seq_len, seq_len), float("-inf"), dtype=torch_dtype + ) + mask = torch.triu(mask, diagonal=1) + + with torch.no_grad(): + # Extract weights from reference model + wq = reference.attention.wq.weight.data.clone() + wk = reference.attention.wk.weight.data.clone() + wv = reference.attention.wv.weight.data.clone() + wo = reference.attention.wo.weight.data.clone() + w1 = reference.feed_forward.w1.weight.data.clone() + w2 = reference.feed_forward.w2.weight.data.clone() + w3 = reference.feed_forward.w3.weight.data.clone() + # No bias + b1 = torch.zeros(hidden_dim, dtype=torch_dtype) + b2 = torch.zeros(dim, dtype=torch_dtype) + b3 = torch.zeros(hidden_dim, dtype=torch_dtype) + + out_ref = reference(x, start_pos=0, freqs_cis=freqs_cis_complex, mask=mask) + + ir_type = to_ir_type("f32") + module = generate_module(ir_type, model_args) + schedule = create_schedule() + apply_schedule(module, schedule) + + eng = ExecutionEngine(module, opt_level=2) + func_ptr = eng.lookup("transformer_block_op") + + out = torch.empty_like(out_ref) + x_mem = get_ranked_memref_descriptor(x.numpy()) + wq_mem = get_ranked_memref_descriptor(wq.numpy()) + wk_mem = get_ranked_memref_descriptor(wk.numpy()) + wv_mem = get_ranked_memref_descriptor(wv.numpy()) + wo_mem = get_ranked_memref_descriptor(wo.numpy()) + freqs_cis_mem = get_ranked_memref_descriptor(freqs_cis_real.numpy()) + mask_mem = get_ranked_memref_descriptor(mask.numpy()) + w1_mem = get_ranked_memref_descriptor(w1.numpy()) + b1_mem = get_ranked_memref_descriptor(b1.numpy()) + w2_mem = get_ranked_memref_descriptor(w2.numpy()) + b2_mem = get_ranked_memref_descriptor(b2.numpy()) + w3_mem = get_ranked_memref_descriptor(w3.numpy()) + b3_mem = get_ranked_memref_descriptor(b3.numpy()) + out_mem = get_ranked_memref_descriptor(out.numpy()) + + args = lh_utils.memrefs_to_packed_args( + [ + x_mem, + wq_mem, + wk_mem, + wv_mem, + wo_mem, + freqs_cis_mem, + mask_mem, + w1_mem, + b1_mem, + w2_mem, + b2_mem, + w3_mem, + b3_mem, + out_mem, + ] + ) + func_ptr(args) + assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) + + +@with_mlir_ctx_and_location +def test_transformer_fwd(): + model_args = ModelArgs( + dim=32, + n_layers=2, + n_heads=4, + n_kv_heads=2, # Test GQA + vocab_size=1000, + multiple_of=8, + norm_eps=1e-5, + max_batch_size=2, + max_seq_len=8, + ) + + batch = 2 + seq_len = 4 + dim = model_args.dim + n_heads = model_args.n_heads + n_kv_heads = model_args.n_kv_heads + head_dim = dim // n_heads + + hidden_dim = int(2 * (4 * dim) / 3) + if model_args.ffn_dim_multiplier is not None: + hidden_dim = int(model_args.ffn_dim_multiplier * hidden_dim) + hidden_dim = model_args.multiple_of * ( + (hidden_dim + model_args.multiple_of - 1) // model_args.multiple_of + ) + + def generate_module(elty, args): + module = ir.Module.create() + with ir.InsertionPoint(module.body): + x_type = ir.RankedTensorType.get([batch, seq_len, dim], elty) + freqs_cis_type = ir.RankedTensorType.get([seq_len, head_dim // 2], elty) + mask_type = ir.RankedTensorType.get( + [batch, n_heads, seq_len, seq_len], elty + ) + out_type = ir.RankedTensorType.get([batch, seq_len, dim], elty) + wq_type = ir.RankedTensorType.get([n_heads * head_dim, dim], elty) + wk_type = ir.RankedTensorType.get([n_kv_heads * head_dim, dim], elty) + wv_type = ir.RankedTensorType.get([n_kv_heads * head_dim, dim], elty) + wo_type = ir.RankedTensorType.get([dim, n_heads * head_dim], elty) + w1_type = ir.RankedTensorType.get([hidden_dim, dim], elty) + b1_type = ir.RankedTensorType.get([hidden_dim], elty) + w2_type = ir.RankedTensorType.get([dim, hidden_dim], elty) + b2_type = ir.RankedTensorType.get([dim], elty) + w3_type = ir.RankedTensorType.get([hidden_dim, dim], elty) + b3_type = ir.RankedTensorType.get([hidden_dim], elty) + + param_types = [x_type, freqs_cis_type, mask_type] + for _ in range(args.n_layers): + param_types.extend( + [ + wq_type, + wk_type, + wv_type, + wo_type, + w1_type, + b1_type, + w2_type, + b2_type, + w3_type, + b3_type, + ] + ) + param_types.append(out_type) + + @func.FuncOp.from_py_func(*param_types, name="transformer_op") + def transformer_op(*params): + x = params[0] + freqs_cis = params[1] + mask = params[2] + + # Extract weights for each layer + layer_weights = [] + idx = 3 + for _ in range(args.n_layers): + layer_weights.append( + { + "wq": params[idx], + "wk": params[idx + 1], + "wv": params[idx + 2], + "wo": params[idx + 3], + "w1": params[idx + 4], + "b1": params[idx + 5], + "w2": params[idx + 6], + "b2": params[idx + 7], + "w3": params[idx + 8], + "b3": params[idx + 9], + } + ) + idx += 10 + out = params[idx] + + get_transformer(args, x, freqs_cis, mask, layer_weights, out) + + return module + + ir_type = to_ir_type("f32") + module = generate_module(ir_type, model_args) + schedule = create_schedule() + apply_schedule(module, schedule) + eng = ExecutionEngine(module, opt_level=2) + func_ptr = eng.lookup("transformer_op") + + reference = Transformer(model_args) + + torch_dtype = torch.float32 + x = torch.randn(batch, seq_len, dim, dtype=torch_dtype) + freqs_cis_real = torch.randn(seq_len, head_dim // 2, dtype=torch_dtype) + freqs_cis_complex = torch.complex(freqs_cis_real, torch.zeros_like(freqs_cis_real)) + mask = torch.full( + (batch, n_heads, seq_len, seq_len), float("-inf"), dtype=torch_dtype + ) + mask = torch.triu(mask, diagonal=1) + + with torch.no_grad(): + # Extract weights from all layers + layer_weights_torch = [] + for layer_id in range(model_args.n_layers): + layer = reference.layers[layer_id] + layer_weights_torch.append( + { + "wq": layer.attention.wq.weight.data.clone(), + "wk": layer.attention.wk.weight.data.clone(), + "wv": layer.attention.wv.weight.data.clone(), + "wo": layer.attention.wo.weight.data.clone(), + "w1": layer.feed_forward.w1.weight.data.clone(), + "b1": torch.zeros(hidden_dim, dtype=torch_dtype), + "w2": layer.feed_forward.w2.weight.data.clone(), + "b2": torch.zeros(dim, dtype=torch_dtype), + "w3": layer.feed_forward.w3.weight.data.clone(), + "b3": torch.zeros(hidden_dim, dtype=torch_dtype), + } + ) + + # Supply embeddings instead of tokens + h = x + for layer in reference.layers: + h = layer(h, start_pos=0, freqs_cis=freqs_cis_complex, mask=mask) + out_ref = reference.norm(h) + + out = torch.empty_like(out_ref) + x_mem = get_ranked_memref_descriptor(x.numpy()) + freqs_cis_mem = get_ranked_memref_descriptor(freqs_cis_real.numpy()) + mask_mem = get_ranked_memref_descriptor(mask.numpy()) + + # Add memrefs for all layer weights + memrefs = [x_mem, freqs_cis_mem, mask_mem] + for layer_weights in layer_weights_torch: + memrefs.extend( + [ + get_ranked_memref_descriptor(layer_weights["wq"].numpy()), + get_ranked_memref_descriptor(layer_weights["wk"].numpy()), + get_ranked_memref_descriptor(layer_weights["wv"].numpy()), + get_ranked_memref_descriptor(layer_weights["wo"].numpy()), + get_ranked_memref_descriptor(layer_weights["w1"].numpy()), + get_ranked_memref_descriptor(layer_weights["b1"].numpy()), + get_ranked_memref_descriptor(layer_weights["w2"].numpy()), + get_ranked_memref_descriptor(layer_weights["b2"].numpy()), + get_ranked_memref_descriptor(layer_weights["w3"].numpy()), + get_ranked_memref_descriptor(layer_weights["b3"].numpy()), + ] + ) + + out_mem = get_ranked_memref_descriptor(out.numpy()) + memrefs.append(out_mem) + + args = lh_utils.memrefs_to_packed_args(memrefs) + func_ptr(args) + + assert torch.allclose(out, out_ref, rtol=0.01, atol=0.01, equal_nan=True) From 208ccbd4357c360328f88d704c6ee59ede59246a Mon Sep 17 00:00:00 2001 From: Petr Kurapov Date: Wed, 26 Nov 2025 19:15:28 +0100 Subject: [PATCH 09/10] Exclude ref model from lit tests. --- python/examples/llama/lit.local.cfg | 1 + 1 file changed, 1 insertion(+) create mode 100644 python/examples/llama/lit.local.cfg diff --git a/python/examples/llama/lit.local.cfg b/python/examples/llama/lit.local.cfg new file mode 100644 index 0000000..c37d087 --- /dev/null +++ b/python/examples/llama/lit.local.cfg @@ -0,0 +1 @@ +config.excludes = ["ref_model.py"] \ No newline at end of file From 8493c5b175efd13eb9d10dfbd8962fb865707bcc Mon Sep 17 00:00:00 2001 From: Petr Kurapov Date: Wed, 26 Nov 2025 19:17:46 +0100 Subject: [PATCH 10/10] Make ruff happy. --- python/examples/llama/test_llama3.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/examples/llama/test_llama3.py b/python/examples/llama/test_llama3.py index ffef803..9684ad9 100644 --- a/python/examples/llama/test_llama3.py +++ b/python/examples/llama/test_llama3.py @@ -1,11 +1,9 @@ # RUN: %pytest %s -from dataclasses import dataclass import functools import math as pymath import pytest import torch -from typing import Optional, Tuple from mlir import ir