Skip to main content

rustc_codegen_llvm/builder/
gpu_offload.rs

1use std::ffi::CString;
2
3use bitflags::Flags;
4use llvm::Linkage::*;
5use rustc_abi::Align;
6use rustc_codegen_ssa::MemFlags;
7use rustc_codegen_ssa::common::TypeKind;
8use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
9use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, BuilderMethods};
10use rustc_middle::bug;
11use rustc_middle::ty::offload_meta::{MappingFlags, OffloadMetadata, OffloadSize};
12
13use crate::builder::Builder;
14use crate::common::CodegenCx;
15use crate::llvm::AttributePlace::Function;
16use crate::llvm::{self, Linkage, Type, Value};
17use crate::{SimpleCx, attributes};
18
19// LLVM kernel-independent globals required for offloading
20pub(crate) struct OffloadGlobals<'ll> {
21    pub launcher_fn: &'ll llvm::Value,
22    pub launcher_ty: &'ll llvm::Type,
23
24    pub kernel_args_ty: &'ll llvm::Type,
25
26    pub offload_entry_ty: &'ll llvm::Type,
27
28    pub begin_mapper: &'ll llvm::Value,
29    pub end_mapper: &'ll llvm::Value,
30    pub mapper_fn_ty: &'ll llvm::Type,
31
32    pub ident_t_global: &'ll llvm::Value,
33}
34
35impl<'ll> OffloadGlobals<'ll> {
36    pub(crate) fn declare(cx: &CodegenCx<'ll, '_>) -> Self {
37        let (launcher_fn, launcher_ty) = generate_launcher(cx);
38        let kernel_args_ty = KernelArgsTy::new_decl(cx);
39        let offload_entry_ty = TgtOffloadEntry::new_decl(cx);
40        let (begin_mapper, _, end_mapper, mapper_fn_ty) = gen_tgt_data_mappers(cx);
41        let ident_t_global = generate_at_one(cx);
42
43        // We want LLVM's openmp-opt pass to pick up and optimize this module, since it covers both
44        // openmp and offload optimizations.
45        llvm::add_module_flag_u32(cx.llmod(), llvm::ModuleFlagMergeBehavior::Max, "openmp", 51);
46
47        OffloadGlobals {
48            launcher_fn,
49            launcher_ty,
50            kernel_args_ty,
51            offload_entry_ty,
52            begin_mapper,
53            end_mapper,
54            mapper_fn_ty,
55            ident_t_global,
56        }
57    }
58}
59
60// We need to register offload before using it. We also should unregister it once we are done, for
61// good measures. Previously we have done so before and after each individual offload intrinsic
62// call, but that comes at a performance cost. The repeated (un)register calls might also confuse
63// the LLVM ompOpt pass, which tries to move operations to a better location. The easiest solution,
64// which we copy from clang, is to just have those two calls once, in the global ctor/dtor section
65// of the final binary.
66pub(crate) fn register_offload<'ll>(cx: &CodegenCx<'ll, '_>) {
67    // First we check quickly whether we already have done our setup, in which case we return early.
68    // Shouldn't be needed for correctness.
69    let register_lib_name = "__tgt_register_lib";
70    if cx.get_function(register_lib_name).is_some() {
71        return;
72    }
73
74    let reg_lib_decl = cx.type_func(&[cx.type_ptr()], cx.type_void());
75    let register_lib = declare_offload_fn(&cx, register_lib_name, reg_lib_decl);
76    let unregister_lib = declare_offload_fn(&cx, "__tgt_unregister_lib", reg_lib_decl);
77
78    let ptr_null = cx.const_null(cx.type_ptr());
79    let const_struct = cx.const_struct(&[cx.get_const_i32(0), ptr_null, ptr_null, ptr_null], false);
80    let omp_descriptor =
81        add_global(cx, ".omp_offloading.descriptor", const_struct, InternalLinkage);
82    // @.omp_offloading.descriptor = internal constant %__tgt_bin_desc { i32 1, ptr @.omp_offloading.device_images, ptr @__start_llvm_offload_entries, ptr @__stop_llvm_offload_entries }
83    // @.omp_offloading.descriptor = internal constant %__tgt_bin_desc { i32 0, ptr null, ptr null, ptr null }
84
85    let atexit = cx.type_func(&[cx.type_ptr()], cx.type_i32());
86    let atexit_fn = declare_offload_fn(cx, "atexit", atexit);
87
88    // FIXME(offload): Drop this, once we fully automated our offload compilation pipeline, since
89    // LLVM will initialize them for us if it sees gpu kernels being registered.
90    let init_ty = cx.type_func(&[], cx.type_void());
91    let init_rtls = declare_offload_fn(cx, "__tgt_init_all_rtls", init_ty);
92
93    let desc_ty = cx.type_func(&[], cx.type_void());
94    let reg_name = ".omp_offloading.descriptor_reg";
95    let unreg_name = ".omp_offloading.descriptor_unreg";
96    let desc_reg_fn = declare_offload_fn(cx, reg_name, desc_ty);
97    let desc_unreg_fn = declare_offload_fn(cx, unreg_name, desc_ty);
98    llvm::set_linkage(desc_reg_fn, InternalLinkage);
99    llvm::set_linkage(desc_unreg_fn, InternalLinkage);
100    llvm::set_section(desc_reg_fn, c".text.startup");
101    llvm::set_section(desc_unreg_fn, c".text.startup");
102
103    // define internal void @.omp_offloading.descriptor_reg() section ".text.startup" {
104    // entry:
105    //   call void @__tgt_register_lib(ptr @.omp_offloading.descriptor)
106    //   call void @__tgt_init_all_rtls()
107    //   %0 = call i32 @atexit(ptr @.omp_offloading.descriptor_unreg)
108    //   ret void
109    // }
110    let bb = Builder::append_block(cx, desc_reg_fn, "entry");
111    let mut a = Builder::build(cx, bb);
112    a.call(reg_lib_decl, None, None, register_lib, &[omp_descriptor], None, None);
113    a.call(init_ty, None, None, init_rtls, &[], None, None);
114    a.call(atexit, None, None, atexit_fn, &[desc_unreg_fn], None, None);
115    a.ret_void();
116
117    // define internal void @.omp_offloading.descriptor_unreg() section ".text.startup" {
118    // entry:
119    //   call void @__tgt_unregister_lib(ptr @.omp_offloading.descriptor)
120    //   ret void
121    // }
122    let bb = Builder::append_block(cx, desc_unreg_fn, "entry");
123    let mut a = Builder::build(cx, bb);
124    a.call(reg_lib_decl, None, None, unregister_lib, &[omp_descriptor], None, None);
125    a.ret_void();
126
127    // @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 101, ptr @.omp_offloading.descriptor_reg, ptr null }]
128    let args = ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
        [cx.get_const_i32(101), desc_reg_fn, ptr_null]))vec![cx.get_const_i32(101), desc_reg_fn, ptr_null];
129    let const_struct = cx.const_struct(&args, false);
130    let arr = cx.const_array(cx.val_ty(const_struct), &[const_struct]);
131    add_global(cx, "llvm.global_ctors", arr, AppendingLinkage);
132}
133
134pub(crate) struct OffloadKernelDims<'ll> {
135    num_workgroups: &'ll Value,
136    threads_per_block: &'ll Value,
137    workgroup_dims: &'ll Value,
138    thread_dims: &'ll Value,
139}
140
141impl<'ll> OffloadKernelDims<'ll> {
142    pub(crate) fn from_operands<'tcx>(
143        builder: &mut Builder<'_, 'll, 'tcx>,
144        workgroup_op: &OperandRef<'tcx, &'ll llvm::Value>,
145        thread_op: &OperandRef<'tcx, &'ll llvm::Value>,
146    ) -> Self {
147        let cx = builder.cx;
148        let arr_ty = cx.type_array(cx.type_i32(), 3);
149        let four = Align::from_bytes(4).unwrap();
150
151        let OperandValue::Ref(place) = workgroup_op.val else {
152            ::rustc_middle::util::bug::bug_fmt(format_args!("expected array operand by reference"));bug!("expected array operand by reference");
153        };
154        let workgroup_val = builder.load(arr_ty, place.llval, four);
155
156        let OperandValue::Ref(place) = thread_op.val else {
157            ::rustc_middle::util::bug::bug_fmt(format_args!("expected array operand by reference"));bug!("expected array operand by reference");
158        };
159        let thread_val = builder.load(arr_ty, place.llval, four);
160
161        fn mul_dim3<'ll, 'tcx>(
162            builder: &mut Builder<'_, 'll, 'tcx>,
163            arr: &'ll Value,
164        ) -> &'ll Value {
165            let x = builder.extract_value(arr, 0);
166            let y = builder.extract_value(arr, 1);
167            let z = builder.extract_value(arr, 2);
168
169            let xy = builder.mul(x, y);
170            builder.mul(xy, z)
171        }
172
173        let num_workgroups = mul_dim3(builder, workgroup_val);
174        let threads_per_block = mul_dim3(builder, thread_val);
175
176        OffloadKernelDims {
177            workgroup_dims: workgroup_val,
178            thread_dims: thread_val,
179            num_workgroups,
180            threads_per_block,
181        }
182    }
183}
184
185// ; Function Attrs: nounwind
186// declare i32 @__tgt_target_kernel(ptr, i64, i32, i32, ptr, ptr) #2
187fn generate_launcher<'ll>(cx: &CodegenCx<'ll, '_>) -> (&'ll llvm::Value, &'ll llvm::Type) {
188    let tptr = cx.type_ptr();
189    let ti64 = cx.type_i64();
190    let ti32 = cx.type_i32();
191    let args = ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
        [tptr, ti64, ti32, ti32, tptr, tptr]))vec![tptr, ti64, ti32, ti32, tptr, tptr];
192    let tgt_fn_ty = cx.type_func(&args, ti32);
193    let name = "__tgt_target_kernel";
194    let tgt_decl = declare_offload_fn(&cx, name, tgt_fn_ty);
195    let nounwind = llvm::AttributeKind::NoUnwind.create_attr(cx.llcx);
196    attributes::apply_to_llfn(tgt_decl, Function, &[nounwind]);
197    (tgt_decl, tgt_fn_ty)
198}
199
200// What is our @1 here? A magic global, used in our data_{begin/update/end}_mapper:
201// @0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
202// @1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 22, ptr @0 }, align 8
203// FIXME(offload): @0 should include the file name (e.g. lib.rs) in which the function to be
204// offloaded was defined.
205pub(crate) fn generate_at_one<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll llvm::Value {
206    let unknown_txt = ";unknown;unknown;0;0;;";
207    let c_entry_name = CString::new(unknown_txt).unwrap();
208    let c_val = c_entry_name.as_bytes_with_nul();
209    let initializer = crate::common::bytes_in_context(cx.llcx, c_val);
210    let at_zero = add_unnamed_global(&cx, &"", initializer, PrivateLinkage);
211    llvm::set_alignment(at_zero, Align::ONE);
212
213    // @1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 22, ptr @0 }, align 8
214    let struct_ident_ty = cx.type_named_struct("struct.ident_t");
215    let struct_elems = ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
        [cx.get_const_i32(0), cx.get_const_i32(2), cx.get_const_i32(0),
                cx.get_const_i32(22), at_zero]))vec![
216        cx.get_const_i32(0),
217        cx.get_const_i32(2),
218        cx.get_const_i32(0),
219        cx.get_const_i32(22),
220        at_zero,
221    ];
222    let struct_elems_ty: Vec<_> = struct_elems.iter().map(|&x| cx.val_ty(x)).collect();
223    let initializer = crate::common::named_struct(struct_ident_ty, &struct_elems);
224    cx.set_struct_body(struct_ident_ty, &struct_elems_ty, false);
225    let at_one = add_unnamed_global(&cx, &"", initializer, PrivateLinkage);
226    llvm::set_alignment(at_one, Align::EIGHT);
227    at_one
228}
229
230pub(crate) struct TgtOffloadEntry {
231    //   uint64_t Reserved;
232    //   uint16_t Version;
233    //   uint16_t Kind;
234    //   uint32_t Flags; Flags associated with the entry (see Target Region Entry Flags)
235    //   void *Address; Address of global symbol within device image (function or global)
236    //   char *SymbolName;
237    //   uint64_t Size; Size of the entry info (0 if it is a function)
238    //   uint64_t Data;
239    //   void *AuxAddr;
240}
241
242impl TgtOffloadEntry {
243    pub(crate) fn new_decl<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll llvm::Type {
244        let offload_entry_ty = cx.type_named_struct("struct.__tgt_offload_entry");
245        let tptr = cx.type_ptr();
246        let ti64 = cx.type_i64();
247        let ti32 = cx.type_i32();
248        let ti16 = cx.type_i16();
249        // For each kernel to run on the gpu, we will later generate one entry of this type.
250        // copied from LLVM
251        let entry_elements = ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
        [ti64, ti16, ti16, ti32, tptr, tptr, ti64, ti64, tptr]))vec![ti64, ti16, ti16, ti32, tptr, tptr, ti64, ti64, tptr];
252        cx.set_struct_body(offload_entry_ty, &entry_elements, false);
253        offload_entry_ty
254    }
255
256    fn new<'ll>(
257        cx: &CodegenCx<'ll, '_>,
258        region_id: &'ll Value,
259        llglobal: &'ll Value,
260    ) -> [&'ll Value; 9] {
261        let reserved = cx.get_const_i64(0);
262        let version = cx.get_const_i16(1);
263        let kind = cx.get_const_i16(1);
264        let flags = cx.get_const_i32(0);
265        let size = cx.get_const_i64(0);
266        let data = cx.get_const_i64(0);
267        let aux_addr = cx.const_null(cx.type_ptr());
268        [reserved, version, kind, flags, region_id, llglobal, size, data, aux_addr]
269    }
270}
271
272// Taken from the LLVM APITypes.h declaration:
273struct KernelArgsTy {
274    //  uint32_t Version = 0; // Version of this struct for ABI compatibility.
275    //  uint32_t NumArgs = 0; // Number of arguments in each input pointer.
276    //  void **ArgBasePtrs =
277    //      nullptr;                 // Base pointer of each argument (e.g. a struct).
278    //  void **ArgPtrs = nullptr;    // Pointer to the argument data.
279    //  int64_t *ArgSizes = nullptr; // Size of the argument data in bytes.
280    //  int64_t *ArgTypes = nullptr; // Type of the data (e.g. to / from).
281    //  void **ArgNames = nullptr;   // Name of the data for debugging, possibly null.
282    //  void **ArgMappers = nullptr; // User-defined mappers, possibly null.
283    //  uint64_t Tripcount =
284    // 0; // Tripcount for the teams / distribute loop, 0 otherwise.
285    // struct {
286    //    uint64_t NoWait : 1; // Was this kernel spawned with a `nowait` clause.
287    //    uint64_t IsCUDA : 1; // Was this kernel spawned via CUDA.
288    //    uint64_t Unused : 62;
289    //  } Flags = {0, 0, 0}; // totals to 64 Bit, 8 Byte
290    //  // The number of teams (for x,y,z dimension).
291    //  uint32_t NumTeams[3] = {0, 0, 0};
292    //  // The number of threads (for x,y,z dimension).
293    //  uint32_t ThreadLimit[3] = {0, 0, 0};
294    //  uint32_t DynCGroupMem = 0; // Amount of dynamic cgroup memory requested.
295}
296
297impl KernelArgsTy {
298    const OFFLOAD_VERSION: u64 = 3;
299    const FLAGS: u64 = 0;
300    const TRIPCOUNT: u64 = 0;
301    fn new_decl<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Type {
302        let kernel_arguments_ty = cx.type_named_struct("struct.__tgt_kernel_arguments");
303        let tptr = cx.type_ptr();
304        let ti64 = cx.type_i64();
305        let ti32 = cx.type_i32();
306        let tarr = cx.type_array(ti32, 3);
307
308        let kernel_elements =
309            ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
        [ti32, ti32, tptr, tptr, tptr, tptr, tptr, tptr, ti64, ti64, tarr,
                tarr, ti32]))vec![ti32, ti32, tptr, tptr, tptr, tptr, tptr, tptr, ti64, ti64, tarr, tarr, ti32];
310
311        cx.set_struct_body(kernel_arguments_ty, &kernel_elements, false);
312        kernel_arguments_ty
313    }
314
315    fn new<'ll, 'tcx>(
316        cx: &CodegenCx<'ll, 'tcx>,
317        num_args: u64,
318        memtransfer_types: &'ll Value,
319        geps: [&'ll Value; 3],
320        workgroup_dims: &'ll Value,
321        thread_dims: &'ll Value,
322    ) -> [(Align, &'ll Value); 13] {
323        let four = Align::from_bytes(4).expect("4 Byte alignment should work");
324        let eight = Align::EIGHT;
325
326        [
327            (four, cx.get_const_i32(KernelArgsTy::OFFLOAD_VERSION)),
328            (four, cx.get_const_i32(num_args)),
329            (eight, geps[0]),
330            (eight, geps[1]),
331            (eight, geps[2]),
332            (eight, memtransfer_types),
333            // The next two are debug infos. FIXME(offload): set them
334            (eight, cx.const_null(cx.type_ptr())), // dbg
335            (eight, cx.const_null(cx.type_ptr())), // dbg
336            (eight, cx.get_const_i64(KernelArgsTy::TRIPCOUNT)),
337            (eight, cx.get_const_i64(KernelArgsTy::FLAGS)),
338            (four, workgroup_dims),
339            (four, thread_dims),
340            (four, cx.get_const_i32(0)),
341        ]
342    }
343}
344
345// Contains LLVM values needed to manage offloading for a single kernel.
346#[derive(#[automatically_derived]
impl<'ll> ::core::marker::Copy for OffloadKernelGlobals<'ll> { }Copy, #[automatically_derived]
impl<'ll> ::core::clone::Clone for OffloadKernelGlobals<'ll> {
    #[inline]
    fn clone(&self) -> OffloadKernelGlobals<'ll> {
        let _: ::core::clone::AssertParamIsClone<&'ll llvm::Value>;
        let _: ::core::clone::AssertParamIsClone<&'ll llvm::Value>;
        let _: ::core::clone::AssertParamIsClone<&'ll llvm::Value>;
        let _: ::core::clone::AssertParamIsClone<&'ll llvm::Value>;
        let _: ::core::clone::AssertParamIsClone<&'ll llvm::Value>;
        *self
    }
}Clone)]
347pub(crate) struct OffloadKernelGlobals<'ll> {
348    pub offload_sizes: &'ll llvm::Value,
349    pub memtransfer_begin: &'ll llvm::Value,
350    pub memtransfer_kernel: &'ll llvm::Value,
351    pub memtransfer_end: &'ll llvm::Value,
352    pub region_id: &'ll llvm::Value,
353}
354
355fn gen_tgt_data_mappers<'ll>(
356    cx: &CodegenCx<'ll, '_>,
357) -> (&'ll llvm::Value, &'ll llvm::Value, &'ll llvm::Value, &'ll llvm::Type) {
358    let tptr = cx.type_ptr();
359    let ti64 = cx.type_i64();
360    let ti32 = cx.type_i32();
361
362    let args = ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
        [tptr, ti64, ti32, tptr, tptr, tptr, tptr, tptr, tptr]))vec![tptr, ti64, ti32, tptr, tptr, tptr, tptr, tptr, tptr];
363    let mapper_fn_ty = cx.type_func(&args, cx.type_void());
364    let mapper_begin = "__tgt_target_data_begin_mapper";
365    let mapper_update = "__tgt_target_data_update_mapper";
366    let mapper_end = "__tgt_target_data_end_mapper";
367    let begin_mapper_decl = declare_offload_fn(&cx, mapper_begin, mapper_fn_ty);
368    let update_mapper_decl = declare_offload_fn(&cx, mapper_update, mapper_fn_ty);
369    let end_mapper_decl = declare_offload_fn(&cx, mapper_end, mapper_fn_ty);
370
371    let nounwind = llvm::AttributeKind::NoUnwind.create_attr(cx.llcx);
372    attributes::apply_to_llfn(begin_mapper_decl, Function, &[nounwind]);
373    attributes::apply_to_llfn(update_mapper_decl, Function, &[nounwind]);
374    attributes::apply_to_llfn(end_mapper_decl, Function, &[nounwind]);
375
376    (begin_mapper_decl, update_mapper_decl, end_mapper_decl, mapper_fn_ty)
377}
378
379fn add_priv_unnamed_arr<'ll>(cx: &SimpleCx<'ll>, name: &str, vals: &[u64]) -> &'ll llvm::Value {
380    let ti64 = cx.type_i64();
381    let mut size_val = Vec::with_capacity(vals.len());
382    for &val in vals {
383        size_val.push(cx.get_const_i64(val));
384    }
385    let initializer = cx.const_array(ti64, &size_val);
386    add_unnamed_global(cx, name, initializer, PrivateLinkage)
387}
388
389pub(crate) fn add_unnamed_global<'ll>(
390    cx: &SimpleCx<'ll>,
391    name: &str,
392    initializer: &'ll llvm::Value,
393    l: Linkage,
394) -> &'ll llvm::Value {
395    let llglobal = add_global(cx, name, initializer, l);
396    llvm::LLVMSetUnnamedAddress(llglobal, llvm::UnnamedAddr::Global);
397    llglobal
398}
399
400pub(crate) fn add_global<'ll>(
401    cx: &SimpleCx<'ll>,
402    name: &str,
403    initializer: &'ll llvm::Value,
404    l: Linkage,
405) -> &'ll llvm::Value {
406    let c_name = CString::new(name).unwrap();
407    let llglobal: &'ll llvm::Value = llvm::add_global(cx.llmod, cx.val_ty(initializer), &c_name);
408    llvm::set_global_constant(llglobal, true);
409    llvm::set_linkage(llglobal, l);
410    llvm::set_initializer(llglobal, initializer);
411    llglobal
412}
413
414// This function returns a memtransfer value which encodes how arguments to this kernel shall be
415// mapped to/from the gpu. It also returns a region_id with the name of this kernel, to be
416// concatenated into the list of region_ids.
417pub(crate) fn gen_define_handling<'ll>(
418    cx: &CodegenCx<'ll, '_>,
419    metadata: &[OffloadMetadata],
420    symbol: String,
421    offload_globals: &OffloadGlobals<'ll>,
422) -> OffloadKernelGlobals<'ll> {
423    if let Some(entry) = cx.offload_kernel_cache.borrow().get(&symbol) {
424        return *entry;
425    }
426
427    let offload_entry_ty = offload_globals.offload_entry_ty;
428
429    let (sizes, transfer): (Vec<_>, Vec<_>) =
430        metadata.iter().map(|m| (m.payload_size, m.mode)).unzip();
431    // Our begin mapper should only see simplified information about which args have to be
432    // transferred to the device, the end mapper only about which args should be transferred back.
433    // Any information beyond that makes it harder for LLVM's opt pass to evaluate whether it can
434    // safely move (=optimize) the LLVM-IR location of this data transfer. Only the mapping types
435    // mentioned below are handled, so make sure that we don't generate any other ones.
436    let handled_mappings = MappingFlags::TO
437        | MappingFlags::FROM
438        | MappingFlags::TARGET_PARAM
439        | MappingFlags::LITERAL
440        | MappingFlags::IMPLICIT;
441    for arg in &transfer {
442        if true {
    if !!arg.contains_unknown_bits() {
        ::core::panicking::panic("assertion failed: !arg.contains_unknown_bits()")
    };
};debug_assert!(!arg.contains_unknown_bits());
443        if true {
    if !handled_mappings.contains(*arg) {
        ::core::panicking::panic("assertion failed: handled_mappings.contains(*arg)")
    };
};debug_assert!(handled_mappings.contains(*arg));
444    }
445
446    let valid_begin_mappings = MappingFlags::TO | MappingFlags::LITERAL | MappingFlags::IMPLICIT;
447    let transfer_to: Vec<u64> =
448        transfer.iter().map(|m| m.intersection(valid_begin_mappings).bits()).collect();
449    let transfer_from: Vec<u64> =
450        transfer.iter().map(|m| m.intersection(MappingFlags::FROM).bits()).collect();
451    let valid_kernel_mappings = MappingFlags::LITERAL | MappingFlags::IMPLICIT;
452    // FIXME(offload): add `OMP_MAP_TARGET_PARAM = 0x20` only if necessary
453    let transfer_kernel: Vec<u64> = transfer
454        .iter()
455        .map(|m| (m.intersection(valid_kernel_mappings) | MappingFlags::TARGET_PARAM).bits())
456        .collect();
457
458    let actual_sizes = sizes
459        .iter()
460        .map(|s| match s {
461            OffloadSize::Static(sz) => *sz,
462            // NOTE(Sa4dUs): set `.offload_sizes` entry to 0 for sizes that we determine at runtime, just like clang
463            _ => 0,
464        })
465        .collect::<Vec<_>>();
466    let offload_sizes =
467        add_priv_unnamed_arr(&cx, &::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!(".offload_sizes.{0}", symbol))
    })format!(".offload_sizes.{symbol}"), &actual_sizes);
468    let memtransfer_begin =
469        add_priv_unnamed_arr(&cx, &::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!(".offload_maptypes.{0}.begin",
                symbol))
    })format!(".offload_maptypes.{symbol}.begin"), &transfer_to);
470    let memtransfer_kernel =
471        add_priv_unnamed_arr(&cx, &::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!(".offload_maptypes.{0}.kernel",
                symbol))
    })format!(".offload_maptypes.{symbol}.kernel"), &transfer_kernel);
472    let memtransfer_end =
473        add_priv_unnamed_arr(&cx, &::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!(".offload_maptypes.{0}.end",
                symbol))
    })format!(".offload_maptypes.{symbol}.end"), &transfer_from);
474
475    // Next: For each function, generate these three entries. A weak constant,
476    // the llvm.rodata entry name, and  the llvm_offload_entries value
477
478    let name = ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!(".{0}.region_id", symbol))
    })format!(".{symbol}.region_id");
479    let initializer = cx.get_const_i8(0);
480    let region_id = add_global(&cx, &name, initializer, WeakAnyLinkage);
481
482    let c_entry_name = CString::new(symbol.clone()).unwrap();
483    let c_val = c_entry_name.as_bytes_with_nul();
484    let offload_entry_name = ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!(".offloading.entry_name.{0}",
                symbol))
    })format!(".offloading.entry_name.{symbol}");
485
486    let initializer = crate::common::bytes_in_context(cx.llcx, c_val);
487    let llglobal = add_unnamed_global(&cx, &offload_entry_name, initializer, InternalLinkage);
488    llvm::set_alignment(llglobal, Align::ONE);
489    llvm::set_section(llglobal, c".llvm.rodata.offloading");
490
491    let name = ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!(".offloading.entry.{0}", symbol))
    })format!(".offloading.entry.{symbol}");
492
493    // See the __tgt_offload_entry documentation above.
494    let elems = TgtOffloadEntry::new(&cx, region_id, llglobal);
495
496    let initializer = crate::common::named_struct(offload_entry_ty, &elems);
497    let c_name = CString::new(name).unwrap();
498    let offload_entry = llvm::add_global(cx.llmod, offload_entry_ty, &c_name);
499    llvm::set_global_constant(offload_entry, true);
500    llvm::set_linkage(offload_entry, WeakAnyLinkage);
501    llvm::set_initializer(offload_entry, initializer);
502    llvm::set_alignment(offload_entry, Align::EIGHT);
503    let c_section_name = CString::new("llvm_offload_entries").unwrap();
504    llvm::set_section(offload_entry, &c_section_name);
505
506    cx.add_compiler_used_global(offload_entry);
507
508    let result = OffloadKernelGlobals {
509        offload_sizes,
510        memtransfer_begin,
511        memtransfer_kernel,
512        memtransfer_end,
513        region_id,
514    };
515
516    cx.offload_kernel_cache.borrow_mut().insert(symbol, result);
517
518    result
519}
520
521fn declare_offload_fn<'ll>(
522    cx: &CodegenCx<'ll, '_>,
523    name: &str,
524    ty: &'ll llvm::Type,
525) -> &'ll llvm::Value {
526    crate::declare::declare_simple_fn(
527        cx,
528        name,
529        llvm::CallConv::CCallConv,
530        llvm::UnnamedAddr::No,
531        llvm::Visibility::Default,
532        ty,
533    )
534}
535
536pub(crate) fn scalar_width<'ll>(cx: &'ll SimpleCx<'_>, ty: &'ll Type) -> u64 {
537    match cx.type_kind(ty) {
538        TypeKind::Half
539        | TypeKind::Float
540        | TypeKind::Double
541        | TypeKind::X86_FP80
542        | TypeKind::FP128
543        | TypeKind::PPC_FP128 => cx.float_width(ty) as u64,
544        TypeKind::Integer => cx.int_width(ty),
545        other => ::rustc_middle::util::bug::bug_fmt(format_args!("scalar_width was called on a non scalar type {0:?}",
        other))bug!("scalar_width was called on a non scalar type {other:?}"),
546    }
547}
548
549fn get_runtime_size<'ll, 'tcx>(
550    builder: &mut Builder<'_, 'll, 'tcx>,
551    args: &[&'ll Value],
552    index: usize,
553    meta: &OffloadMetadata,
554) -> &'ll Value {
555    match meta.payload_size {
556        OffloadSize::Slice { element_size } => {
557            let length_idx = index + 1;
558            let length = args[length_idx];
559            let length_i64 = builder.intcast(length, builder.cx.type_i64(), false);
560            builder.mul(length_i64, builder.cx.get_const_i64(element_size))
561        }
562        _ => ::rustc_middle::util::bug::bug_fmt(format_args!("unexpected offload size {0:?}",
        meta.payload_size))bug!("unexpected offload size {:?}", meta.payload_size),
563    }
564}
565
566// For each kernel *call*, we now use some of our previous declared globals to move data to and from
567// the gpu. For now, we only handle the data transfer part of it.
568// If two consecutive kernels use the same memory, we still move it to the host and back to the gpu.
569// Since in our frontend users (by default) don't have to specify data transfer, this is something
570// we should optimize in the future! In some cases we can directly zero-allocate on the device and
571// only move data back, or if something is immutable, we might only copy it to the device.
572//
573// Current steps:
574// 0. Alloca some variables for the following steps
575// 1. set insert point before kernel call.
576// 2. generate all the GEPS and stores, to be used in 3)
577// 3. generate __tgt_target_data_begin calls to move data to the GPU
578//
579// unchanged: keep kernel call. Later move the kernel to the GPU
580//
581// 4. set insert point after kernel call.
582// 5. generate all the GEPS and stores, to be used in 6)
583// 6. generate __tgt_target_data_end calls to move data from the GPU
584pub(crate) fn gen_call_handling<'ll, 'tcx>(
585    builder: &mut Builder<'_, 'll, 'tcx>,
586    offload_data: &OffloadKernelGlobals<'ll>,
587    args: &[&'ll Value],
588    types: &[&Type],
589    metadata: &[OffloadMetadata],
590    offload_globals: &OffloadGlobals<'ll>,
591    offload_dims: &OffloadKernelDims<'ll>,
592) {
593    let cx = builder.cx;
594    let OffloadKernelGlobals {
595        offload_sizes,
596        memtransfer_begin,
597        memtransfer_kernel,
598        memtransfer_end,
599        region_id,
600    } = offload_data;
601    let OffloadKernelDims { num_workgroups, threads_per_block, workgroup_dims, thread_dims } =
602        offload_dims;
603
604    let has_dynamic = metadata.iter().any(|m| !#[allow(non_exhaustive_omitted_patterns)] match m.payload_size {
    OffloadSize::Static(_) => true,
    _ => false,
}matches!(m.payload_size, OffloadSize::Static(_)));
605
606    let tgt_decl = offload_globals.launcher_fn;
607    let tgt_target_kernel_ty = offload_globals.launcher_ty;
608
609    let tgt_kernel_decl = offload_globals.kernel_args_ty;
610    let begin_mapper_decl = offload_globals.begin_mapper;
611    let end_mapper_decl = offload_globals.end_mapper;
612    let fn_ty = offload_globals.mapper_fn_ty;
613
614    let num_args = types.len() as u64;
615    let bb = builder.llbb();
616
617    // Step 0)
618    unsafe {
619        llvm::LLVMRustPositionBuilderPastAllocas(&builder.llbuilder, builder.llfn());
620    }
621
622    let ty = cx.type_array(cx.type_ptr(), num_args);
623    // Baseptr are just the input pointer to the kernel, stored in a local alloca
624    let a1 = builder.direct_alloca(ty, Align::EIGHT, ".offload_baseptrs");
625    // Ptrs are the result of a gep into the baseptr, at least for our trivial types.
626    let a2 = builder.direct_alloca(ty, Align::EIGHT, ".offload_ptrs");
627    // These represent the sizes in bytes, e.g. the entry for `&[f64; 16]` will be 8*16.
628    let ty2 = cx.type_array(cx.type_i64(), num_args);
629
630    let a4 = if has_dynamic {
631        let alloc = builder.direct_alloca(ty2, Align::EIGHT, ".offload_sizes");
632
633        builder.memcpy(
634            alloc,
635            Align::EIGHT,
636            offload_sizes,
637            Align::EIGHT,
638            cx.get_const_i64(8 * args.len() as u64),
639            MemFlags::empty(),
640            None,
641        );
642
643        alloc
644    } else {
645        offload_sizes
646    };
647
648    //%kernel_args = alloca %struct.__tgt_kernel_arguments, align 8
649    let a5 = builder.direct_alloca(tgt_kernel_decl, Align::EIGHT, "kernel_args");
650
651    // Step 1)
652    unsafe {
653        llvm::LLVMPositionBuilderAtEnd(&builder.llbuilder, bb);
654    }
655
656    // Now we allocate once per function param, a copy to be passed to one of our maps.
657    let mut vals = ::alloc::vec::Vec::new()vec![];
658    let mut geps = ::alloc::vec::Vec::new()vec![];
659    let i32_0 = cx.get_const_i32(0);
660    for &v in args {
661        let ty = cx.val_ty(v);
662        let ty_kind = cx.type_kind(ty);
663        let (base_val, gep_base) = match ty_kind {
664            TypeKind::Pointer => (v, v),
665            TypeKind::Half | TypeKind::Float | TypeKind::Double | TypeKind::Integer => {
666                // FIXME(Sa4dUs): check for `f128` support, latest NVIDIA cards support it
667                let num_bits = scalar_width(cx, ty);
668
669                let bb = builder.llbb();
670                unsafe {
671                    llvm::LLVMRustPositionBuilderPastAllocas(builder.llbuilder, builder.llfn());
672                }
673                let addr = builder.direct_alloca(cx.type_i64(), Align::EIGHT, "addr");
674                unsafe {
675                    llvm::LLVMPositionBuilderAtEnd(builder.llbuilder, bb);
676                }
677
678                let cast = builder.bitcast(v, cx.type_ix(num_bits));
679                let value = builder.zext(cast, cx.type_i64());
680                builder.store(value, addr, Align::EIGHT);
681                (value, addr)
682            }
683            other => ::rustc_middle::util::bug::bug_fmt(format_args!("offload does not support {0:?}",
        other))bug!("offload does not support {other:?}"),
684        };
685
686        let gep = builder.inbounds_gep(cx.type_f32(), gep_base, &[i32_0]);
687
688        vals.push(base_val);
689        geps.push(gep);
690    }
691
692    for i in 0..num_args {
693        let idx = cx.get_const_i32(i);
694        let gep1 = builder.inbounds_gep(ty, a1, &[i32_0, idx]);
695        builder.store(vals[i as usize], gep1, Align::EIGHT);
696        let gep2 = builder.inbounds_gep(ty, a2, &[i32_0, idx]);
697        builder.store(geps[i as usize], gep2, Align::EIGHT);
698
699        if !#[allow(non_exhaustive_omitted_patterns)] match metadata[i as
                usize].payload_size {
    OffloadSize::Static(_) => true,
    _ => false,
}matches!(metadata[i as usize].payload_size, OffloadSize::Static(_)) {
700            let gep3 = builder.inbounds_gep(ty2, a4, &[i32_0, idx]);
701            let size_val = get_runtime_size(builder, args, i as usize, &metadata[i as usize]);
702            builder.store(size_val, gep3, Align::EIGHT);
703        }
704    }
705
706    // For now we have a very simplistic indexing scheme into our
707    // offload_{baseptrs,ptrs,sizes}. We will probably improve this along with our gpu frontend pr.
708    fn get_geps<'ll, 'tcx>(
709        builder: &mut Builder<'_, 'll, 'tcx>,
710        ty: &'ll Type,
711        ty2: &'ll Type,
712        a1: &'ll Value,
713        a2: &'ll Value,
714        a4: &'ll Value,
715        is_dynamic: bool,
716    ) -> [&'ll Value; 3] {
717        let cx = builder.cx;
718        let i32_0 = cx.get_const_i32(0);
719
720        let gep1 = builder.inbounds_gep(ty, a1, &[i32_0, i32_0]);
721        let gep2 = builder.inbounds_gep(ty, a2, &[i32_0, i32_0]);
722        let gep3 = if is_dynamic { builder.inbounds_gep(ty2, a4, &[i32_0, i32_0]) } else { a4 };
723        [gep1, gep2, gep3]
724    }
725
726    fn generate_mapper_call<'ll, 'tcx>(
727        builder: &mut Builder<'_, 'll, 'tcx>,
728        geps: [&'ll Value; 3],
729        o_type: &'ll Value,
730        fn_to_call: &'ll Value,
731        fn_ty: &'ll Type,
732        num_args: u64,
733        s_ident_t: &'ll Value,
734    ) {
735        let cx = builder.cx;
736        let nullptr = cx.const_null(cx.type_ptr());
737        let i64_max = cx.get_const_i64(u64::MAX);
738        let num_args = cx.get_const_i32(num_args);
739        let args =
740            ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
        [s_ident_t, i64_max, num_args, geps[0], geps[1], geps[2], o_type,
                nullptr, nullptr]))vec![s_ident_t, i64_max, num_args, geps[0], geps[1], geps[2], o_type, nullptr, nullptr];
741        builder.call(fn_ty, None, None, fn_to_call, &args, None, None);
742    }
743
744    // Step 2)
745    let s_ident_t = offload_globals.ident_t_global;
746    let geps = get_geps(builder, ty, ty2, a1, a2, a4, has_dynamic);
747    generate_mapper_call(
748        builder,
749        geps,
750        memtransfer_begin,
751        begin_mapper_decl,
752        fn_ty,
753        num_args,
754        s_ident_t,
755    );
756    let values =
757        KernelArgsTy::new(&cx, num_args, memtransfer_kernel, geps, workgroup_dims, thread_dims);
758
759    // Step 3)
760    // Here we fill the KernelArgsTy, see the documentation above
761    for (i, value) in values.iter().enumerate() {
762        let ptr = builder.inbounds_gep(tgt_kernel_decl, a5, &[i32_0, cx.get_const_i32(i as u64)]);
763        builder.store(value.1, ptr, value.0);
764    }
765
766    let args = ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
        [s_ident_t, cx.get_const_i64(u64::MAX), num_workgroups,
                threads_per_block, region_id, a5]))vec![
767        s_ident_t,
768        // FIXME(offload) give users a way to select which GPU to use.
769        cx.get_const_i64(u64::MAX), // MAX == -1.
770        num_workgroups,
771        threads_per_block,
772        region_id,
773        a5,
774    ];
775    builder.call(tgt_target_kernel_ty, None, None, tgt_decl, &args, None, None);
776    // %41 = call i32 @__tgt_target_kernel(ptr @1, i64 -1, i32 2097152, i32 256, ptr @.kernel_1.region_id, ptr %kernel_args)
777
778    // Step 4)
779    let geps = get_geps(builder, ty, ty2, a1, a2, a4, has_dynamic);
780    generate_mapper_call(
781        builder,
782        geps,
783        memtransfer_end,
784        end_mapper_decl,
785        fn_ty,
786        num_args,
787        s_ident_t,
788    );
789}