1use std::ffi::CString;
2
3use bitflags::Flags;
4use llvm::Linkage::*;
5use rustc_abi::Align;
6use rustc_codegen_ssa::MemFlags;
7use rustc_codegen_ssa::common::TypeKind;
8use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
9use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, BuilderMethods};
10use rustc_middle::bug;
11use rustc_middle::ty::offload_meta::{MappingFlags, OffloadMetadata, OffloadSize};
12
13use crate::builder::Builder;
14use crate::common::CodegenCx;
15use crate::llvm::AttributePlace::Function;
16use crate::llvm::{self, Linkage, Type, Value};
17use crate::{SimpleCx, attributes};
18
19pub(crate) struct OffloadGlobals<'ll> {
21 pub launcher_fn: &'ll llvm::Value,
22 pub launcher_ty: &'ll llvm::Type,
23
24 pub kernel_args_ty: &'ll llvm::Type,
25
26 pub offload_entry_ty: &'ll llvm::Type,
27
28 pub begin_mapper: &'ll llvm::Value,
29 pub end_mapper: &'ll llvm::Value,
30 pub mapper_fn_ty: &'ll llvm::Type,
31
32 pub ident_t_global: &'ll llvm::Value,
33}
34
35impl<'ll> OffloadGlobals<'ll> {
36 pub(crate) fn declare(cx: &CodegenCx<'ll, '_>) -> Self {
37 let (launcher_fn, launcher_ty) = generate_launcher(cx);
38 let kernel_args_ty = KernelArgsTy::new_decl(cx);
39 let offload_entry_ty = TgtOffloadEntry::new_decl(cx);
40 let (begin_mapper, _, end_mapper, mapper_fn_ty) = gen_tgt_data_mappers(cx);
41 let ident_t_global = generate_at_one(cx);
42
43 llvm::add_module_flag_u32(cx.llmod(), llvm::ModuleFlagMergeBehavior::Max, "openmp", 51);
46
47 OffloadGlobals {
48 launcher_fn,
49 launcher_ty,
50 kernel_args_ty,
51 offload_entry_ty,
52 begin_mapper,
53 end_mapper,
54 mapper_fn_ty,
55 ident_t_global,
56 }
57 }
58}
59
60pub(crate) fn register_offload<'ll>(cx: &CodegenCx<'ll, '_>) {
67 let register_lib_name = "__tgt_register_lib";
70 if cx.get_function(register_lib_name).is_some() {
71 return;
72 }
73
74 let reg_lib_decl = cx.type_func(&[cx.type_ptr()], cx.type_void());
75 let register_lib = declare_offload_fn(&cx, register_lib_name, reg_lib_decl);
76 let unregister_lib = declare_offload_fn(&cx, "__tgt_unregister_lib", reg_lib_decl);
77
78 let ptr_null = cx.const_null(cx.type_ptr());
79 let const_struct = cx.const_struct(&[cx.get_const_i32(0), ptr_null, ptr_null, ptr_null], false);
80 let omp_descriptor =
81 add_global(cx, ".omp_offloading.descriptor", const_struct, InternalLinkage);
82 let atexit = cx.type_func(&[cx.type_ptr()], cx.type_i32());
86 let atexit_fn = declare_offload_fn(cx, "atexit", atexit);
87
88 let init_ty = cx.type_func(&[], cx.type_void());
91 let init_rtls = declare_offload_fn(cx, "__tgt_init_all_rtls", init_ty);
92
93 let desc_ty = cx.type_func(&[], cx.type_void());
94 let reg_name = ".omp_offloading.descriptor_reg";
95 let unreg_name = ".omp_offloading.descriptor_unreg";
96 let desc_reg_fn = declare_offload_fn(cx, reg_name, desc_ty);
97 let desc_unreg_fn = declare_offload_fn(cx, unreg_name, desc_ty);
98 llvm::set_linkage(desc_reg_fn, InternalLinkage);
99 llvm::set_linkage(desc_unreg_fn, InternalLinkage);
100 llvm::set_section(desc_reg_fn, c".text.startup");
101 llvm::set_section(desc_unreg_fn, c".text.startup");
102
103 let bb = Builder::append_block(cx, desc_reg_fn, "entry");
111 let mut a = Builder::build(cx, bb);
112 a.call(reg_lib_decl, None, None, register_lib, &[omp_descriptor], None, None);
113 a.call(init_ty, None, None, init_rtls, &[], None, None);
114 a.call(atexit, None, None, atexit_fn, &[desc_unreg_fn], None, None);
115 a.ret_void();
116
117 let bb = Builder::append_block(cx, desc_unreg_fn, "entry");
123 let mut a = Builder::build(cx, bb);
124 a.call(reg_lib_decl, None, None, unregister_lib, &[omp_descriptor], None, None);
125 a.ret_void();
126
127 let args = ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[cx.get_const_i32(101), desc_reg_fn, ptr_null]))vec![cx.get_const_i32(101), desc_reg_fn, ptr_null];
129 let const_struct = cx.const_struct(&args, false);
130 let arr = cx.const_array(cx.val_ty(const_struct), &[const_struct]);
131 add_global(cx, "llvm.global_ctors", arr, AppendingLinkage);
132}
133
134pub(crate) struct OffloadKernelDims<'ll> {
135 num_workgroups: &'ll Value,
136 threads_per_block: &'ll Value,
137 workgroup_dims: &'ll Value,
138 thread_dims: &'ll Value,
139}
140
141impl<'ll> OffloadKernelDims<'ll> {
142 pub(crate) fn from_operands<'tcx>(
143 builder: &mut Builder<'_, 'll, 'tcx>,
144 workgroup_op: &OperandRef<'tcx, &'ll llvm::Value>,
145 thread_op: &OperandRef<'tcx, &'ll llvm::Value>,
146 ) -> Self {
147 let cx = builder.cx;
148 let arr_ty = cx.type_array(cx.type_i32(), 3);
149 let four = Align::from_bytes(4).unwrap();
150
151 let OperandValue::Ref(place) = workgroup_op.val else {
152 ::rustc_middle::util::bug::bug_fmt(format_args!("expected array operand by reference"));bug!("expected array operand by reference");
153 };
154 let workgroup_val = builder.load(arr_ty, place.llval, four);
155
156 let OperandValue::Ref(place) = thread_op.val else {
157 ::rustc_middle::util::bug::bug_fmt(format_args!("expected array operand by reference"));bug!("expected array operand by reference");
158 };
159 let thread_val = builder.load(arr_ty, place.llval, four);
160
161 fn mul_dim3<'ll, 'tcx>(
162 builder: &mut Builder<'_, 'll, 'tcx>,
163 arr: &'ll Value,
164 ) -> &'ll Value {
165 let x = builder.extract_value(arr, 0);
166 let y = builder.extract_value(arr, 1);
167 let z = builder.extract_value(arr, 2);
168
169 let xy = builder.mul(x, y);
170 builder.mul(xy, z)
171 }
172
173 let num_workgroups = mul_dim3(builder, workgroup_val);
174 let threads_per_block = mul_dim3(builder, thread_val);
175
176 OffloadKernelDims {
177 workgroup_dims: workgroup_val,
178 thread_dims: thread_val,
179 num_workgroups,
180 threads_per_block,
181 }
182 }
183}
184
185fn generate_launcher<'ll>(cx: &CodegenCx<'ll, '_>) -> (&'ll llvm::Value, &'ll llvm::Type) {
188 let tptr = cx.type_ptr();
189 let ti64 = cx.type_i64();
190 let ti32 = cx.type_i32();
191 let args = ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[tptr, ti64, ti32, ti32, tptr, tptr]))vec![tptr, ti64, ti32, ti32, tptr, tptr];
192 let tgt_fn_ty = cx.type_func(&args, ti32);
193 let name = "__tgt_target_kernel";
194 let tgt_decl = declare_offload_fn(&cx, name, tgt_fn_ty);
195 let nounwind = llvm::AttributeKind::NoUnwind.create_attr(cx.llcx);
196 attributes::apply_to_llfn(tgt_decl, Function, &[nounwind]);
197 (tgt_decl, tgt_fn_ty)
198}
199
200pub(crate) fn generate_at_one<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll llvm::Value {
206 let unknown_txt = ";unknown;unknown;0;0;;";
207 let c_entry_name = CString::new(unknown_txt).unwrap();
208 let c_val = c_entry_name.as_bytes_with_nul();
209 let initializer = crate::common::bytes_in_context(cx.llcx, c_val);
210 let at_zero = add_unnamed_global(&cx, &"", initializer, PrivateLinkage);
211 llvm::set_alignment(at_zero, Align::ONE);
212
213 let struct_ident_ty = cx.type_named_struct("struct.ident_t");
215 let struct_elems = ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[cx.get_const_i32(0), cx.get_const_i32(2), cx.get_const_i32(0),
cx.get_const_i32(22), at_zero]))vec![
216 cx.get_const_i32(0),
217 cx.get_const_i32(2),
218 cx.get_const_i32(0),
219 cx.get_const_i32(22),
220 at_zero,
221 ];
222 let struct_elems_ty: Vec<_> = struct_elems.iter().map(|&x| cx.val_ty(x)).collect();
223 let initializer = crate::common::named_struct(struct_ident_ty, &struct_elems);
224 cx.set_struct_body(struct_ident_ty, &struct_elems_ty, false);
225 let at_one = add_unnamed_global(&cx, &"", initializer, PrivateLinkage);
226 llvm::set_alignment(at_one, Align::EIGHT);
227 at_one
228}
229
230pub(crate) struct TgtOffloadEntry {
231 }
241
242impl TgtOffloadEntry {
243 pub(crate) fn new_decl<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll llvm::Type {
244 let offload_entry_ty = cx.type_named_struct("struct.__tgt_offload_entry");
245 let tptr = cx.type_ptr();
246 let ti64 = cx.type_i64();
247 let ti32 = cx.type_i32();
248 let ti16 = cx.type_i16();
249 let entry_elements = ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[ti64, ti16, ti16, ti32, tptr, tptr, ti64, ti64, tptr]))vec![ti64, ti16, ti16, ti32, tptr, tptr, ti64, ti64, tptr];
252 cx.set_struct_body(offload_entry_ty, &entry_elements, false);
253 offload_entry_ty
254 }
255
256 fn new<'ll>(
257 cx: &CodegenCx<'ll, '_>,
258 region_id: &'ll Value,
259 llglobal: &'ll Value,
260 ) -> [&'ll Value; 9] {
261 let reserved = cx.get_const_i64(0);
262 let version = cx.get_const_i16(1);
263 let kind = cx.get_const_i16(1);
264 let flags = cx.get_const_i32(0);
265 let size = cx.get_const_i64(0);
266 let data = cx.get_const_i64(0);
267 let aux_addr = cx.const_null(cx.type_ptr());
268 [reserved, version, kind, flags, region_id, llglobal, size, data, aux_addr]
269 }
270}
271
272struct KernelArgsTy {
274 }
296
297impl KernelArgsTy {
298 const OFFLOAD_VERSION: u64 = 3;
299 const FLAGS: u64 = 0;
300 const TRIPCOUNT: u64 = 0;
301 fn new_decl<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Type {
302 let kernel_arguments_ty = cx.type_named_struct("struct.__tgt_kernel_arguments");
303 let tptr = cx.type_ptr();
304 let ti64 = cx.type_i64();
305 let ti32 = cx.type_i32();
306 let tarr = cx.type_array(ti32, 3);
307
308 let kernel_elements =
309 ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[ti32, ti32, tptr, tptr, tptr, tptr, tptr, tptr, ti64, ti64, tarr,
tarr, ti32]))vec![ti32, ti32, tptr, tptr, tptr, tptr, tptr, tptr, ti64, ti64, tarr, tarr, ti32];
310
311 cx.set_struct_body(kernel_arguments_ty, &kernel_elements, false);
312 kernel_arguments_ty
313 }
314
315 fn new<'ll, 'tcx>(
316 cx: &CodegenCx<'ll, 'tcx>,
317 num_args: u64,
318 memtransfer_types: &'ll Value,
319 geps: [&'ll Value; 3],
320 workgroup_dims: &'ll Value,
321 thread_dims: &'ll Value,
322 ) -> [(Align, &'ll Value); 13] {
323 let four = Align::from_bytes(4).expect("4 Byte alignment should work");
324 let eight = Align::EIGHT;
325
326 [
327 (four, cx.get_const_i32(KernelArgsTy::OFFLOAD_VERSION)),
328 (four, cx.get_const_i32(num_args)),
329 (eight, geps[0]),
330 (eight, geps[1]),
331 (eight, geps[2]),
332 (eight, memtransfer_types),
333 (eight, cx.const_null(cx.type_ptr())), (eight, cx.const_null(cx.type_ptr())), (eight, cx.get_const_i64(KernelArgsTy::TRIPCOUNT)),
337 (eight, cx.get_const_i64(KernelArgsTy::FLAGS)),
338 (four, workgroup_dims),
339 (four, thread_dims),
340 (four, cx.get_const_i32(0)),
341 ]
342 }
343}
344
345#[derive(#[automatically_derived]
impl<'ll> ::core::marker::Copy for OffloadKernelGlobals<'ll> { }Copy, #[automatically_derived]
impl<'ll> ::core::clone::Clone for OffloadKernelGlobals<'ll> {
#[inline]
fn clone(&self) -> OffloadKernelGlobals<'ll> {
let _: ::core::clone::AssertParamIsClone<&'ll llvm::Value>;
let _: ::core::clone::AssertParamIsClone<&'ll llvm::Value>;
let _: ::core::clone::AssertParamIsClone<&'ll llvm::Value>;
let _: ::core::clone::AssertParamIsClone<&'ll llvm::Value>;
let _: ::core::clone::AssertParamIsClone<&'ll llvm::Value>;
*self
}
}Clone)]
347pub(crate) struct OffloadKernelGlobals<'ll> {
348 pub offload_sizes: &'ll llvm::Value,
349 pub memtransfer_begin: &'ll llvm::Value,
350 pub memtransfer_kernel: &'ll llvm::Value,
351 pub memtransfer_end: &'ll llvm::Value,
352 pub region_id: &'ll llvm::Value,
353}
354
355fn gen_tgt_data_mappers<'ll>(
356 cx: &CodegenCx<'ll, '_>,
357) -> (&'ll llvm::Value, &'ll llvm::Value, &'ll llvm::Value, &'ll llvm::Type) {
358 let tptr = cx.type_ptr();
359 let ti64 = cx.type_i64();
360 let ti32 = cx.type_i32();
361
362 let args = ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[tptr, ti64, ti32, tptr, tptr, tptr, tptr, tptr, tptr]))vec![tptr, ti64, ti32, tptr, tptr, tptr, tptr, tptr, tptr];
363 let mapper_fn_ty = cx.type_func(&args, cx.type_void());
364 let mapper_begin = "__tgt_target_data_begin_mapper";
365 let mapper_update = "__tgt_target_data_update_mapper";
366 let mapper_end = "__tgt_target_data_end_mapper";
367 let begin_mapper_decl = declare_offload_fn(&cx, mapper_begin, mapper_fn_ty);
368 let update_mapper_decl = declare_offload_fn(&cx, mapper_update, mapper_fn_ty);
369 let end_mapper_decl = declare_offload_fn(&cx, mapper_end, mapper_fn_ty);
370
371 let nounwind = llvm::AttributeKind::NoUnwind.create_attr(cx.llcx);
372 attributes::apply_to_llfn(begin_mapper_decl, Function, &[nounwind]);
373 attributes::apply_to_llfn(update_mapper_decl, Function, &[nounwind]);
374 attributes::apply_to_llfn(end_mapper_decl, Function, &[nounwind]);
375
376 (begin_mapper_decl, update_mapper_decl, end_mapper_decl, mapper_fn_ty)
377}
378
379fn add_priv_unnamed_arr<'ll>(cx: &SimpleCx<'ll>, name: &str, vals: &[u64]) -> &'ll llvm::Value {
380 let ti64 = cx.type_i64();
381 let mut size_val = Vec::with_capacity(vals.len());
382 for &val in vals {
383 size_val.push(cx.get_const_i64(val));
384 }
385 let initializer = cx.const_array(ti64, &size_val);
386 add_unnamed_global(cx, name, initializer, PrivateLinkage)
387}
388
389pub(crate) fn add_unnamed_global<'ll>(
390 cx: &SimpleCx<'ll>,
391 name: &str,
392 initializer: &'ll llvm::Value,
393 l: Linkage,
394) -> &'ll llvm::Value {
395 let llglobal = add_global(cx, name, initializer, l);
396 llvm::LLVMSetUnnamedAddress(llglobal, llvm::UnnamedAddr::Global);
397 llglobal
398}
399
400pub(crate) fn add_global<'ll>(
401 cx: &SimpleCx<'ll>,
402 name: &str,
403 initializer: &'ll llvm::Value,
404 l: Linkage,
405) -> &'ll llvm::Value {
406 let c_name = CString::new(name).unwrap();
407 let llglobal: &'ll llvm::Value = llvm::add_global(cx.llmod, cx.val_ty(initializer), &c_name);
408 llvm::set_global_constant(llglobal, true);
409 llvm::set_linkage(llglobal, l);
410 llvm::set_initializer(llglobal, initializer);
411 llglobal
412}
413
414pub(crate) fn gen_define_handling<'ll>(
418 cx: &CodegenCx<'ll, '_>,
419 metadata: &[OffloadMetadata],
420 symbol: String,
421 offload_globals: &OffloadGlobals<'ll>,
422) -> OffloadKernelGlobals<'ll> {
423 if let Some(entry) = cx.offload_kernel_cache.borrow().get(&symbol) {
424 return *entry;
425 }
426
427 let offload_entry_ty = offload_globals.offload_entry_ty;
428
429 let (sizes, transfer): (Vec<_>, Vec<_>) =
430 metadata.iter().map(|m| (m.payload_size, m.mode)).unzip();
431 let handled_mappings = MappingFlags::TO
437 | MappingFlags::FROM
438 | MappingFlags::TARGET_PARAM
439 | MappingFlags::LITERAL
440 | MappingFlags::IMPLICIT;
441 for arg in &transfer {
442 if true {
if !!arg.contains_unknown_bits() {
::core::panicking::panic("assertion failed: !arg.contains_unknown_bits()")
};
};debug_assert!(!arg.contains_unknown_bits());
443 if true {
if !handled_mappings.contains(*arg) {
::core::panicking::panic("assertion failed: handled_mappings.contains(*arg)")
};
};debug_assert!(handled_mappings.contains(*arg));
444 }
445
446 let valid_begin_mappings = MappingFlags::TO | MappingFlags::LITERAL | MappingFlags::IMPLICIT;
447 let transfer_to: Vec<u64> =
448 transfer.iter().map(|m| m.intersection(valid_begin_mappings).bits()).collect();
449 let transfer_from: Vec<u64> =
450 transfer.iter().map(|m| m.intersection(MappingFlags::FROM).bits()).collect();
451 let transfer_kernel = ::alloc::vec::from_elem(MappingFlags::TARGET_PARAM.bits(), transfer_to.len())vec![MappingFlags::TARGET_PARAM.bits(); transfer_to.len()];
453
454 let actual_sizes = sizes
455 .iter()
456 .map(|s| match s {
457 OffloadSize::Static(sz) => *sz,
458 OffloadSize::Dynamic => 0,
459 })
460 .collect::<Vec<_>>();
461 let offload_sizes =
462 add_priv_unnamed_arr(&cx, &::alloc::__export::must_use({
::alloc::fmt::format(format_args!(".offload_sizes.{0}", symbol))
})format!(".offload_sizes.{symbol}"), &actual_sizes);
463 let memtransfer_begin =
464 add_priv_unnamed_arr(&cx, &::alloc::__export::must_use({
::alloc::fmt::format(format_args!(".offload_maptypes.{0}.begin",
symbol))
})format!(".offload_maptypes.{symbol}.begin"), &transfer_to);
465 let memtransfer_kernel =
466 add_priv_unnamed_arr(&cx, &::alloc::__export::must_use({
::alloc::fmt::format(format_args!(".offload_maptypes.{0}.kernel",
symbol))
})format!(".offload_maptypes.{symbol}.kernel"), &transfer_kernel);
467 let memtransfer_end =
468 add_priv_unnamed_arr(&cx, &::alloc::__export::must_use({
::alloc::fmt::format(format_args!(".offload_maptypes.{0}.end",
symbol))
})format!(".offload_maptypes.{symbol}.end"), &transfer_from);
469
470 let name = ::alloc::__export::must_use({
::alloc::fmt::format(format_args!(".{0}.region_id", symbol))
})format!(".{symbol}.region_id");
474 let initializer = cx.get_const_i8(0);
475 let region_id = add_global(&cx, &name, initializer, WeakAnyLinkage);
476
477 let c_entry_name = CString::new(symbol.clone()).unwrap();
478 let c_val = c_entry_name.as_bytes_with_nul();
479 let offload_entry_name = ::alloc::__export::must_use({
::alloc::fmt::format(format_args!(".offloading.entry_name.{0}",
symbol))
})format!(".offloading.entry_name.{symbol}");
480
481 let initializer = crate::common::bytes_in_context(cx.llcx, c_val);
482 let llglobal = add_unnamed_global(&cx, &offload_entry_name, initializer, InternalLinkage);
483 llvm::set_alignment(llglobal, Align::ONE);
484 llvm::set_section(llglobal, c".llvm.rodata.offloading");
485
486 let name = ::alloc::__export::must_use({
::alloc::fmt::format(format_args!(".offloading.entry.{0}", symbol))
})format!(".offloading.entry.{symbol}");
487
488 let elems = TgtOffloadEntry::new(&cx, region_id, llglobal);
490
491 let initializer = crate::common::named_struct(offload_entry_ty, &elems);
492 let c_name = CString::new(name).unwrap();
493 let offload_entry = llvm::add_global(cx.llmod, offload_entry_ty, &c_name);
494 llvm::set_global_constant(offload_entry, true);
495 llvm::set_linkage(offload_entry, WeakAnyLinkage);
496 llvm::set_initializer(offload_entry, initializer);
497 llvm::set_alignment(offload_entry, Align::EIGHT);
498 let c_section_name = CString::new("llvm_offload_entries").unwrap();
499 llvm::set_section(offload_entry, &c_section_name);
500
501 cx.add_compiler_used_global(offload_entry);
502
503 let result = OffloadKernelGlobals {
504 offload_sizes,
505 memtransfer_begin,
506 memtransfer_kernel,
507 memtransfer_end,
508 region_id,
509 };
510
511 cx.offload_kernel_cache.borrow_mut().insert(symbol, result);
512
513 result
514}
515
516fn declare_offload_fn<'ll>(
517 cx: &CodegenCx<'ll, '_>,
518 name: &str,
519 ty: &'ll llvm::Type,
520) -> &'ll llvm::Value {
521 crate::declare::declare_simple_fn(
522 cx,
523 name,
524 llvm::CallConv::CCallConv,
525 llvm::UnnamedAddr::No,
526 llvm::Visibility::Default,
527 ty,
528 )
529}
530
531pub(crate) fn scalar_width<'ll>(cx: &'ll SimpleCx<'_>, ty: &'ll Type) -> u64 {
532 match cx.type_kind(ty) {
533 TypeKind::Half
534 | TypeKind::Float
535 | TypeKind::Double
536 | TypeKind::X86_FP80
537 | TypeKind::FP128
538 | TypeKind::PPC_FP128 => cx.float_width(ty) as u64,
539 TypeKind::Integer => cx.int_width(ty),
540 other => ::rustc_middle::util::bug::bug_fmt(format_args!("scalar_width was called on a non scalar type {0:?}",
other))bug!("scalar_width was called on a non scalar type {other:?}"),
541 }
542}
543
544fn get_runtime_size<'ll, 'tcx>(
545 _cx: &CodegenCx<'ll, 'tcx>,
546 _val: &'ll Value,
547 _meta: &OffloadMetadata,
548) -> &'ll Value {
549 ::rustc_middle::util::bug::bug_fmt(format_args!("offload does not support dynamic sizes yet"));bug!("offload does not support dynamic sizes yet");
551}
552
553pub(crate) fn gen_call_handling<'ll, 'tcx>(
572 builder: &mut Builder<'_, 'll, 'tcx>,
573 offload_data: &OffloadKernelGlobals<'ll>,
574 args: &[&'ll Value],
575 types: &[&Type],
576 metadata: &[OffloadMetadata],
577 offload_globals: &OffloadGlobals<'ll>,
578 offload_dims: &OffloadKernelDims<'ll>,
579) {
580 let cx = builder.cx;
581 let OffloadKernelGlobals {
582 offload_sizes,
583 memtransfer_begin,
584 memtransfer_kernel,
585 memtransfer_end,
586 region_id,
587 } = offload_data;
588 let OffloadKernelDims { num_workgroups, threads_per_block, workgroup_dims, thread_dims } =
589 offload_dims;
590
591 let has_dynamic = metadata.iter().any(|m| #[allow(non_exhaustive_omitted_patterns)] match m.payload_size {
OffloadSize::Dynamic => true,
_ => false,
}matches!(m.payload_size, OffloadSize::Dynamic));
592
593 let tgt_decl = offload_globals.launcher_fn;
594 let tgt_target_kernel_ty = offload_globals.launcher_ty;
595
596 let tgt_kernel_decl = offload_globals.kernel_args_ty;
597 let begin_mapper_decl = offload_globals.begin_mapper;
598 let end_mapper_decl = offload_globals.end_mapper;
599 let fn_ty = offload_globals.mapper_fn_ty;
600
601 let num_args = types.len() as u64;
602 let bb = builder.llbb();
603
604 unsafe {
606 llvm::LLVMRustPositionBuilderPastAllocas(&builder.llbuilder, builder.llfn());
607 }
608
609 let ty = cx.type_array(cx.type_ptr(), num_args);
610 let a1 = builder.direct_alloca(ty, Align::EIGHT, ".offload_baseptrs");
612 let a2 = builder.direct_alloca(ty, Align::EIGHT, ".offload_ptrs");
614 let ty2 = cx.type_array(cx.type_i64(), num_args);
616
617 let a4 = if has_dynamic {
618 let alloc = builder.direct_alloca(ty2, Align::EIGHT, ".offload_sizes");
619
620 builder.memcpy(
621 alloc,
622 Align::EIGHT,
623 offload_sizes,
624 Align::EIGHT,
625 cx.get_const_i64(8 * args.len() as u64),
626 MemFlags::empty(),
627 None,
628 );
629
630 alloc
631 } else {
632 offload_sizes
633 };
634
635 let a5 = builder.direct_alloca(tgt_kernel_decl, Align::EIGHT, "kernel_args");
637
638 unsafe {
640 llvm::LLVMPositionBuilderAtEnd(&builder.llbuilder, bb);
641 }
642
643 let mut vals = ::alloc::vec::Vec::new()vec![];
645 let mut geps = ::alloc::vec::Vec::new()vec![];
646 let i32_0 = cx.get_const_i32(0);
647 for &v in args {
648 let ty = cx.val_ty(v);
649 let ty_kind = cx.type_kind(ty);
650 let (base_val, gep_base) = match ty_kind {
651 TypeKind::Pointer => (v, v),
652 TypeKind::Half | TypeKind::Float | TypeKind::Double | TypeKind::Integer => {
653 let num_bits = scalar_width(cx, ty);
655
656 let bb = builder.llbb();
657 unsafe {
658 llvm::LLVMRustPositionBuilderPastAllocas(builder.llbuilder, builder.llfn());
659 }
660 let addr = builder.direct_alloca(cx.type_i64(), Align::EIGHT, "addr");
661 unsafe {
662 llvm::LLVMPositionBuilderAtEnd(builder.llbuilder, bb);
663 }
664
665 let cast = builder.bitcast(v, cx.type_ix(num_bits));
666 let value = builder.zext(cast, cx.type_i64());
667 builder.store(value, addr, Align::EIGHT);
668 (value, addr)
669 }
670 other => ::rustc_middle::util::bug::bug_fmt(format_args!("offload does not support {0:?}",
other))bug!("offload does not support {other:?}"),
671 };
672
673 let gep = builder.inbounds_gep(cx.type_f32(), gep_base, &[i32_0]);
674
675 vals.push(base_val);
676 geps.push(gep);
677 }
678
679 for i in 0..num_args {
680 let idx = cx.get_const_i32(i);
681 let gep1 = builder.inbounds_gep(ty, a1, &[i32_0, idx]);
682 builder.store(vals[i as usize], gep1, Align::EIGHT);
683 let gep2 = builder.inbounds_gep(ty, a2, &[i32_0, idx]);
684 builder.store(geps[i as usize], gep2, Align::EIGHT);
685
686 if #[allow(non_exhaustive_omitted_patterns)] match metadata[i as
usize].payload_size {
OffloadSize::Dynamic => true,
_ => false,
}matches!(metadata[i as usize].payload_size, OffloadSize::Dynamic) {
687 let gep3 = builder.inbounds_gep(ty2, a4, &[i32_0, idx]);
688 let size_val = get_runtime_size(cx, args[i as usize], &metadata[i as usize]);
689 builder.store(size_val, gep3, Align::EIGHT);
690 }
691 }
692
693 fn get_geps<'ll, 'tcx>(
696 builder: &mut Builder<'_, 'll, 'tcx>,
697 ty: &'ll Type,
698 ty2: &'ll Type,
699 a1: &'ll Value,
700 a2: &'ll Value,
701 a4: &'ll Value,
702 is_dynamic: bool,
703 ) -> [&'ll Value; 3] {
704 let cx = builder.cx;
705 let i32_0 = cx.get_const_i32(0);
706
707 let gep1 = builder.inbounds_gep(ty, a1, &[i32_0, i32_0]);
708 let gep2 = builder.inbounds_gep(ty, a2, &[i32_0, i32_0]);
709 let gep3 = if is_dynamic { builder.inbounds_gep(ty2, a4, &[i32_0, i32_0]) } else { a4 };
710 [gep1, gep2, gep3]
711 }
712
713 fn generate_mapper_call<'ll, 'tcx>(
714 builder: &mut Builder<'_, 'll, 'tcx>,
715 geps: [&'ll Value; 3],
716 o_type: &'ll Value,
717 fn_to_call: &'ll Value,
718 fn_ty: &'ll Type,
719 num_args: u64,
720 s_ident_t: &'ll Value,
721 ) {
722 let cx = builder.cx;
723 let nullptr = cx.const_null(cx.type_ptr());
724 let i64_max = cx.get_const_i64(u64::MAX);
725 let num_args = cx.get_const_i32(num_args);
726 let args =
727 ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[s_ident_t, i64_max, num_args, geps[0], geps[1], geps[2], o_type,
nullptr, nullptr]))vec![s_ident_t, i64_max, num_args, geps[0], geps[1], geps[2], o_type, nullptr, nullptr];
728 builder.call(fn_ty, None, None, fn_to_call, &args, None, None);
729 }
730
731 let s_ident_t = offload_globals.ident_t_global;
733 let geps = get_geps(builder, ty, ty2, a1, a2, a4, has_dynamic);
734 generate_mapper_call(
735 builder,
736 geps,
737 memtransfer_begin,
738 begin_mapper_decl,
739 fn_ty,
740 num_args,
741 s_ident_t,
742 );
743 let values =
744 KernelArgsTy::new(&cx, num_args, memtransfer_kernel, geps, workgroup_dims, thread_dims);
745
746 for (i, value) in values.iter().enumerate() {
749 let ptr = builder.inbounds_gep(tgt_kernel_decl, a5, &[i32_0, cx.get_const_i32(i as u64)]);
750 builder.store(value.1, ptr, value.0);
751 }
752
753 let args = ::alloc::boxed::box_assume_init_into_vec_unsafe(::alloc::intrinsics::write_box_via_move(::alloc::boxed::Box::new_uninit(),
[s_ident_t, cx.get_const_i64(u64::MAX), num_workgroups,
threads_per_block, region_id, a5]))vec![
754 s_ident_t,
755 cx.get_const_i64(u64::MAX), num_workgroups,
758 threads_per_block,
759 region_id,
760 a5,
761 ];
762 builder.call(tgt_target_kernel_ty, None, None, tgt_decl, &args, None, None);
763 let geps = get_geps(builder, ty, ty2, a1, a2, a4, has_dynamic);
767 generate_mapper_call(
768 builder,
769 geps,
770 memtransfer_end,
771 end_mapper_decl,
772 fn_ty,
773 num_args,
774 s_ident_t,
775 );
776}