Skip to main content

core/stdarch/crates/core_arch/src/hexagon/
v128.rs

1//! Hexagon HVX 128-byte vector mode intrinsics
2//!
3//! This module provides intrinsics for the Hexagon Vector Extensions (HVX)
4//! in 128-byte vector mode (1024-bit vectors).
5//!
6//! HVX is a wide vector extension designed for high-performance signal processing.
7//! [Hexagon HVX Programmer's Reference Manual](https://docs.qualcomm.com/doc/80-N2040-61)
8//!
9//! ## Vector Types
10//!
11//! In 128-byte mode:
12//! - `HvxVector` is 1024 bits (128 bytes) containing 32 x 32-bit values
13//! - `HvxVectorPair` is 2048 bits (256 bytes)
14//! - `HvxVectorPred` is 1024 bits (128 bytes) for predicate operations
15//!
16//! To use this module, compile with `-C target-feature=+hvx-length128b`.
17//!
18//! ## Architecture Versions
19//!
20//! Different intrinsics require different HVX architecture versions. Use the
21//! appropriate target feature to enable the required version:
22//! - HVX v60: `-C target-feature=+hvxv60` (basic HVX operations)
23//! - HVX v62: `-C target-feature=+hvxv62`
24//! - HVX v65: `-C target-feature=+hvxv65` (includes floating-point support)
25//! - HVX v66: `-C target-feature=+hvxv66`
26//! - HVX v68: `-C target-feature=+hvxv68`
27//! - HVX v69: `-C target-feature=+hvxv69`
28//! - HVX v73: `-C target-feature=+hvxv73`
29//! - HVX v79: `-C target-feature=+hvxv79`
30//!
31//! Each version includes all features from previous versions.
32
33#![allow(non_camel_case_types)]
34
35#[cfg(test)]
36use stdarch_test::assert_instr;
37
38use crate::intrinsics::simd::{simd_add, simd_and, simd_or, simd_sub, simd_xor};
39
40// HVX type definitions for 128-byte vector mode
41types! {
42    #![unstable(feature = "stdarch_hexagon", issue = "151523")]
43
44    /// HVX vector type (1024 bits / 128 bytes)
45    ///
46    /// This type represents a single HVX vector register containing 32 x 32-bit values.
47    pub struct HvxVector(32 x i32);
48
49    /// HVX vector pair type (2048 bits / 256 bytes)
50    ///
51    /// This type represents a pair of HVX vector registers, often used for
52    /// operations that produce double-width results.
53    pub struct HvxVectorPair(64 x i32);
54
55    /// HVX vector predicate type (1024 bits / 128 bytes)
56    ///
57    /// This type represents a predicate vector used for conditional operations.
58    /// Each bit corresponds to a lane in the vector.
59    pub struct HvxVectorPred(32 x i32);
60}
61
62// LLVM intrinsic declarations for 128-byte vector mode
63#[allow(improper_ctypes)]
64unsafe extern "unadjusted" {
65    #[link_name = "llvm.hexagon.V6.extractw.128B"]
66    fn extractw(_: HvxVector, _: i32) -> i32;
67    #[link_name = "llvm.hexagon.V6.get.qfext.128B"]
68    fn get_qfext(_: HvxVector, _: i32) -> HvxVector;
69    #[link_name = "llvm.hexagon.V6.hi.128B"]
70    fn hi(_: HvxVectorPair) -> HvxVector;
71    #[link_name = "llvm.hexagon.V6.lo.128B"]
72    fn lo(_: HvxVectorPair) -> HvxVector;
73    #[link_name = "llvm.hexagon.V6.lvsplatb.128B"]
74    fn lvsplatb(_: i32) -> HvxVector;
75    #[link_name = "llvm.hexagon.V6.lvsplath.128B"]
76    fn lvsplath(_: i32) -> HvxVector;
77    #[link_name = "llvm.hexagon.V6.lvsplatw.128B"]
78    fn lvsplatw(_: i32) -> HvxVector;
79    #[link_name = "llvm.hexagon.V6.pred.and.128B"]
80    fn pred_and(_: HvxVector, _: HvxVector) -> HvxVector;
81    #[link_name = "llvm.hexagon.V6.pred.and.n.128B"]
82    fn pred_and_n(_: HvxVector, _: HvxVector) -> HvxVector;
83    #[link_name = "llvm.hexagon.V6.pred.not.128B"]
84    fn pred_not(_: HvxVector) -> HvxVector;
85    #[link_name = "llvm.hexagon.V6.pred.or.128B"]
86    fn pred_or(_: HvxVector, _: HvxVector) -> HvxVector;
87    #[link_name = "llvm.hexagon.V6.pred.or.n.128B"]
88    fn pred_or_n(_: HvxVector, _: HvxVector) -> HvxVector;
89    #[link_name = "llvm.hexagon.V6.pred.scalar2.128B"]
90    fn pred_scalar2(_: i32) -> HvxVector;
91    #[link_name = "llvm.hexagon.V6.pred.scalar2v2.128B"]
92    fn pred_scalar2v2(_: i32) -> HvxVector;
93    #[link_name = "llvm.hexagon.V6.pred.xor.128B"]
94    fn pred_xor(_: HvxVector, _: HvxVector) -> HvxVector;
95    #[link_name = "llvm.hexagon.V6.set.qfext.128B"]
96    fn set_qfext(_: HvxVector, _: i32) -> HvxVector;
97    #[link_name = "llvm.hexagon.V6.shuffeqh.128B"]
98    fn shuffeqh(_: HvxVector, _: HvxVector) -> HvxVector;
99    #[link_name = "llvm.hexagon.V6.shuffeqw.128B"]
100    fn shuffeqw(_: HvxVector, _: HvxVector) -> HvxVector;
101    #[link_name = "llvm.hexagon.V6.v6mpyhubs10.128B"]
102    fn v6mpyhubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
103    #[link_name = "llvm.hexagon.V6.v6mpyhubs10.vxx.128B"]
104    fn v6mpyhubs10_vxx(
105        _: HvxVectorPair,
106        _: HvxVectorPair,
107        _: HvxVectorPair,
108        _: i32,
109    ) -> HvxVectorPair;
110    #[link_name = "llvm.hexagon.V6.v6mpyvubs10.128B"]
111    fn v6mpyvubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
112    #[link_name = "llvm.hexagon.V6.v6mpyvubs10.vxx.128B"]
113    fn v6mpyvubs10_vxx(
114        _: HvxVectorPair,
115        _: HvxVectorPair,
116        _: HvxVectorPair,
117        _: i32,
118    ) -> HvxVectorPair;
119    #[link_name = "llvm.hexagon.V6.vS32b.nqpred.ai.128B"]
120    fn vS32b_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> ();
121    #[link_name = "llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B"]
122    fn vS32b_nt_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> ();
123    #[link_name = "llvm.hexagon.V6.vS32b.nt.qpred.ai.128B"]
124    fn vS32b_nt_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> ();
125    #[link_name = "llvm.hexagon.V6.vS32b.qpred.ai.128B"]
126    fn vS32b_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> ();
127    #[link_name = "llvm.hexagon.V6.vabs.f8.128B"]
128    fn vabs_f8(_: HvxVector) -> HvxVector;
129    #[link_name = "llvm.hexagon.V6.vabs.hf.128B"]
130    fn vabs_hf(_: HvxVector) -> HvxVector;
131    #[link_name = "llvm.hexagon.V6.vabs.sf.128B"]
132    fn vabs_sf(_: HvxVector) -> HvxVector;
133    #[link_name = "llvm.hexagon.V6.vabsb.128B"]
134    fn vabsb(_: HvxVector) -> HvxVector;
135    #[link_name = "llvm.hexagon.V6.vabsb.sat.128B"]
136    fn vabsb_sat(_: HvxVector) -> HvxVector;
137    #[link_name = "llvm.hexagon.V6.vabsdiffh.128B"]
138    fn vabsdiffh(_: HvxVector, _: HvxVector) -> HvxVector;
139    #[link_name = "llvm.hexagon.V6.vabsdiffub.128B"]
140    fn vabsdiffub(_: HvxVector, _: HvxVector) -> HvxVector;
141    #[link_name = "llvm.hexagon.V6.vabsdiffuh.128B"]
142    fn vabsdiffuh(_: HvxVector, _: HvxVector) -> HvxVector;
143    #[link_name = "llvm.hexagon.V6.vabsdiffw.128B"]
144    fn vabsdiffw(_: HvxVector, _: HvxVector) -> HvxVector;
145    #[link_name = "llvm.hexagon.V6.vabsh.128B"]
146    fn vabsh(_: HvxVector) -> HvxVector;
147    #[link_name = "llvm.hexagon.V6.vabsh.sat.128B"]
148    fn vabsh_sat(_: HvxVector) -> HvxVector;
149    #[link_name = "llvm.hexagon.V6.vabsw.128B"]
150    fn vabsw(_: HvxVector) -> HvxVector;
151    #[link_name = "llvm.hexagon.V6.vabsw.sat.128B"]
152    fn vabsw_sat(_: HvxVector) -> HvxVector;
153    #[link_name = "llvm.hexagon.V6.vadd.hf.128B"]
154    fn vadd_hf(_: HvxVector, _: HvxVector) -> HvxVector;
155    #[link_name = "llvm.hexagon.V6.vadd.hf.hf.128B"]
156    fn vadd_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector;
157    #[link_name = "llvm.hexagon.V6.vadd.qf16.128B"]
158    fn vadd_qf16(_: HvxVector, _: HvxVector) -> HvxVector;
159    #[link_name = "llvm.hexagon.V6.vadd.qf16.mix.128B"]
160    fn vadd_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector;
161    #[link_name = "llvm.hexagon.V6.vadd.qf32.128B"]
162    fn vadd_qf32(_: HvxVector, _: HvxVector) -> HvxVector;
163    #[link_name = "llvm.hexagon.V6.vadd.qf32.mix.128B"]
164    fn vadd_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector;
165    #[link_name = "llvm.hexagon.V6.vadd.sf.128B"]
166    fn vadd_sf(_: HvxVector, _: HvxVector) -> HvxVector;
167    #[link_name = "llvm.hexagon.V6.vadd.sf.hf.128B"]
168    fn vadd_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair;
169    #[link_name = "llvm.hexagon.V6.vadd.sf.sf.128B"]
170    fn vadd_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector;
171    #[link_name = "llvm.hexagon.V6.vaddb.128B"]
172    fn vaddb(_: HvxVector, _: HvxVector) -> HvxVector;
173    #[link_name = "llvm.hexagon.V6.vaddb.dv.128B"]
174    fn vaddb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
175    #[link_name = "llvm.hexagon.V6.vaddbnq.128B"]
176    fn vaddbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
177    #[link_name = "llvm.hexagon.V6.vaddbq.128B"]
178    fn vaddbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
179    #[link_name = "llvm.hexagon.V6.vaddbsat.128B"]
180    fn vaddbsat(_: HvxVector, _: HvxVector) -> HvxVector;
181    #[link_name = "llvm.hexagon.V6.vaddbsat.dv.128B"]
182    fn vaddbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
183    #[link_name = "llvm.hexagon.V6.vaddcarrysat.128B"]
184    fn vaddcarrysat(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
185    #[link_name = "llvm.hexagon.V6.vaddclbh.128B"]
186    fn vaddclbh(_: HvxVector, _: HvxVector) -> HvxVector;
187    #[link_name = "llvm.hexagon.V6.vaddclbw.128B"]
188    fn vaddclbw(_: HvxVector, _: HvxVector) -> HvxVector;
189    #[link_name = "llvm.hexagon.V6.vaddh.128B"]
190    fn vaddh(_: HvxVector, _: HvxVector) -> HvxVector;
191    #[link_name = "llvm.hexagon.V6.vaddh.dv.128B"]
192    fn vaddh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
193    #[link_name = "llvm.hexagon.V6.vaddhnq.128B"]
194    fn vaddhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
195    #[link_name = "llvm.hexagon.V6.vaddhq.128B"]
196    fn vaddhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
197    #[link_name = "llvm.hexagon.V6.vaddhsat.128B"]
198    fn vaddhsat(_: HvxVector, _: HvxVector) -> HvxVector;
199    #[link_name = "llvm.hexagon.V6.vaddhsat.dv.128B"]
200    fn vaddhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
201    #[link_name = "llvm.hexagon.V6.vaddhw.128B"]
202    fn vaddhw(_: HvxVector, _: HvxVector) -> HvxVectorPair;
203    #[link_name = "llvm.hexagon.V6.vaddhw.acc.128B"]
204    fn vaddhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
205    #[link_name = "llvm.hexagon.V6.vaddubh.128B"]
206    fn vaddubh(_: HvxVector, _: HvxVector) -> HvxVectorPair;
207    #[link_name = "llvm.hexagon.V6.vaddubh.acc.128B"]
208    fn vaddubh_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
209    #[link_name = "llvm.hexagon.V6.vaddubsat.128B"]
210    fn vaddubsat(_: HvxVector, _: HvxVector) -> HvxVector;
211    #[link_name = "llvm.hexagon.V6.vaddubsat.dv.128B"]
212    fn vaddubsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
213    #[link_name = "llvm.hexagon.V6.vaddububb.sat.128B"]
214    fn vaddububb_sat(_: HvxVector, _: HvxVector) -> HvxVector;
215    #[link_name = "llvm.hexagon.V6.vadduhsat.128B"]
216    fn vadduhsat(_: HvxVector, _: HvxVector) -> HvxVector;
217    #[link_name = "llvm.hexagon.V6.vadduhsat.dv.128B"]
218    fn vadduhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
219    #[link_name = "llvm.hexagon.V6.vadduhw.128B"]
220    fn vadduhw(_: HvxVector, _: HvxVector) -> HvxVectorPair;
221    #[link_name = "llvm.hexagon.V6.vadduhw.acc.128B"]
222    fn vadduhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
223    #[link_name = "llvm.hexagon.V6.vadduwsat.128B"]
224    fn vadduwsat(_: HvxVector, _: HvxVector) -> HvxVector;
225    #[link_name = "llvm.hexagon.V6.vadduwsat.dv.128B"]
226    fn vadduwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
227    #[link_name = "llvm.hexagon.V6.vaddw.128B"]
228    fn vaddw(_: HvxVector, _: HvxVector) -> HvxVector;
229    #[link_name = "llvm.hexagon.V6.vaddw.dv.128B"]
230    fn vaddw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
231    #[link_name = "llvm.hexagon.V6.vaddwnq.128B"]
232    fn vaddwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
233    #[link_name = "llvm.hexagon.V6.vaddwq.128B"]
234    fn vaddwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
235    #[link_name = "llvm.hexagon.V6.vaddwsat.128B"]
236    fn vaddwsat(_: HvxVector, _: HvxVector) -> HvxVector;
237    #[link_name = "llvm.hexagon.V6.vaddwsat.dv.128B"]
238    fn vaddwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
239    #[link_name = "llvm.hexagon.V6.valignb.128B"]
240    fn valignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
241    #[link_name = "llvm.hexagon.V6.valignbi.128B"]
242    fn valignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
243    #[link_name = "llvm.hexagon.V6.vand.128B"]
244    fn vand(_: HvxVector, _: HvxVector) -> HvxVector;
245    #[link_name = "llvm.hexagon.V6.vandnqrt.128B"]
246    fn vandnqrt(_: HvxVector, _: i32) -> HvxVector;
247    #[link_name = "llvm.hexagon.V6.vandnqrt.acc.128B"]
248    fn vandnqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
249    #[link_name = "llvm.hexagon.V6.vandqrt.128B"]
250    fn vandqrt(_: HvxVector, _: i32) -> HvxVector;
251    #[link_name = "llvm.hexagon.V6.vandqrt.acc.128B"]
252    fn vandqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
253    #[link_name = "llvm.hexagon.V6.vandvnqv.128B"]
254    fn vandvnqv(_: HvxVector, _: HvxVector) -> HvxVector;
255    #[link_name = "llvm.hexagon.V6.vandvqv.128B"]
256    fn vandvqv(_: HvxVector, _: HvxVector) -> HvxVector;
257    #[link_name = "llvm.hexagon.V6.vandvrt.128B"]
258    fn vandvrt(_: HvxVector, _: i32) -> HvxVector;
259    #[link_name = "llvm.hexagon.V6.vandvrt.acc.128B"]
260    fn vandvrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
261    #[link_name = "llvm.hexagon.V6.vaslh.128B"]
262    fn vaslh(_: HvxVector, _: i32) -> HvxVector;
263    #[link_name = "llvm.hexagon.V6.vaslh.acc.128B"]
264    fn vaslh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
265    #[link_name = "llvm.hexagon.V6.vaslhv.128B"]
266    fn vaslhv(_: HvxVector, _: HvxVector) -> HvxVector;
267    #[link_name = "llvm.hexagon.V6.vaslw.128B"]
268    fn vaslw(_: HvxVector, _: i32) -> HvxVector;
269    #[link_name = "llvm.hexagon.V6.vaslw.acc.128B"]
270    fn vaslw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
271    #[link_name = "llvm.hexagon.V6.vaslwv.128B"]
272    fn vaslwv(_: HvxVector, _: HvxVector) -> HvxVector;
273    #[link_name = "llvm.hexagon.V6.vasr.into.128B"]
274    fn vasr_into(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
275    #[link_name = "llvm.hexagon.V6.vasrh.128B"]
276    fn vasrh(_: HvxVector, _: i32) -> HvxVector;
277    #[link_name = "llvm.hexagon.V6.vasrh.acc.128B"]
278    fn vasrh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
279    #[link_name = "llvm.hexagon.V6.vasrhbrndsat.128B"]
280    fn vasrhbrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
281    #[link_name = "llvm.hexagon.V6.vasrhbsat.128B"]
282    fn vasrhbsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
283    #[link_name = "llvm.hexagon.V6.vasrhubrndsat.128B"]
284    fn vasrhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
285    #[link_name = "llvm.hexagon.V6.vasrhubsat.128B"]
286    fn vasrhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
287    #[link_name = "llvm.hexagon.V6.vasrhv.128B"]
288    fn vasrhv(_: HvxVector, _: HvxVector) -> HvxVector;
289    #[link_name = "llvm.hexagon.V6.vasruhubrndsat.128B"]
290    fn vasruhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
291    #[link_name = "llvm.hexagon.V6.vasruhubsat.128B"]
292    fn vasruhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
293    #[link_name = "llvm.hexagon.V6.vasruwuhrndsat.128B"]
294    fn vasruwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
295    #[link_name = "llvm.hexagon.V6.vasruwuhsat.128B"]
296    fn vasruwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
297    #[link_name = "llvm.hexagon.V6.vasrvuhubrndsat.128B"]
298    fn vasrvuhubrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector;
299    #[link_name = "llvm.hexagon.V6.vasrvuhubsat.128B"]
300    fn vasrvuhubsat(_: HvxVectorPair, _: HvxVector) -> HvxVector;
301    #[link_name = "llvm.hexagon.V6.vasrvwuhrndsat.128B"]
302    fn vasrvwuhrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector;
303    #[link_name = "llvm.hexagon.V6.vasrvwuhsat.128B"]
304    fn vasrvwuhsat(_: HvxVectorPair, _: HvxVector) -> HvxVector;
305    #[link_name = "llvm.hexagon.V6.vasrw.128B"]
306    fn vasrw(_: HvxVector, _: i32) -> HvxVector;
307    #[link_name = "llvm.hexagon.V6.vasrw.acc.128B"]
308    fn vasrw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
309    #[link_name = "llvm.hexagon.V6.vasrwh.128B"]
310    fn vasrwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
311    #[link_name = "llvm.hexagon.V6.vasrwhrndsat.128B"]
312    fn vasrwhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
313    #[link_name = "llvm.hexagon.V6.vasrwhsat.128B"]
314    fn vasrwhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
315    #[link_name = "llvm.hexagon.V6.vasrwuhrndsat.128B"]
316    fn vasrwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
317    #[link_name = "llvm.hexagon.V6.vasrwuhsat.128B"]
318    fn vasrwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
319    #[link_name = "llvm.hexagon.V6.vasrwv.128B"]
320    fn vasrwv(_: HvxVector, _: HvxVector) -> HvxVector;
321    #[link_name = "llvm.hexagon.V6.vassign.128B"]
322    fn vassign(_: HvxVector) -> HvxVector;
323    #[link_name = "llvm.hexagon.V6.vassign.fp.128B"]
324    fn vassign_fp(_: HvxVector) -> HvxVector;
325    #[link_name = "llvm.hexagon.V6.vassignp.128B"]
326    fn vassignp(_: HvxVectorPair) -> HvxVectorPair;
327    #[link_name = "llvm.hexagon.V6.vavgb.128B"]
328    fn vavgb(_: HvxVector, _: HvxVector) -> HvxVector;
329    #[link_name = "llvm.hexagon.V6.vavgbrnd.128B"]
330    fn vavgbrnd(_: HvxVector, _: HvxVector) -> HvxVector;
331    #[link_name = "llvm.hexagon.V6.vavgh.128B"]
332    fn vavgh(_: HvxVector, _: HvxVector) -> HvxVector;
333    #[link_name = "llvm.hexagon.V6.vavghrnd.128B"]
334    fn vavghrnd(_: HvxVector, _: HvxVector) -> HvxVector;
335    #[link_name = "llvm.hexagon.V6.vavgub.128B"]
336    fn vavgub(_: HvxVector, _: HvxVector) -> HvxVector;
337    #[link_name = "llvm.hexagon.V6.vavgubrnd.128B"]
338    fn vavgubrnd(_: HvxVector, _: HvxVector) -> HvxVector;
339    #[link_name = "llvm.hexagon.V6.vavguh.128B"]
340    fn vavguh(_: HvxVector, _: HvxVector) -> HvxVector;
341    #[link_name = "llvm.hexagon.V6.vavguhrnd.128B"]
342    fn vavguhrnd(_: HvxVector, _: HvxVector) -> HvxVector;
343    #[link_name = "llvm.hexagon.V6.vavguw.128B"]
344    fn vavguw(_: HvxVector, _: HvxVector) -> HvxVector;
345    #[link_name = "llvm.hexagon.V6.vavguwrnd.128B"]
346    fn vavguwrnd(_: HvxVector, _: HvxVector) -> HvxVector;
347    #[link_name = "llvm.hexagon.V6.vavgw.128B"]
348    fn vavgw(_: HvxVector, _: HvxVector) -> HvxVector;
349    #[link_name = "llvm.hexagon.V6.vavgwrnd.128B"]
350    fn vavgwrnd(_: HvxVector, _: HvxVector) -> HvxVector;
351    #[link_name = "llvm.hexagon.V6.vcl0h.128B"]
352    fn vcl0h(_: HvxVector) -> HvxVector;
353    #[link_name = "llvm.hexagon.V6.vcl0w.128B"]
354    fn vcl0w(_: HvxVector) -> HvxVector;
355    #[link_name = "llvm.hexagon.V6.vcombine.128B"]
356    fn vcombine(_: HvxVector, _: HvxVector) -> HvxVectorPair;
357    #[link_name = "llvm.hexagon.V6.vconv.h.hf.128B"]
358    fn vconv_h_hf(_: HvxVector) -> HvxVector;
359    #[link_name = "llvm.hexagon.V6.vconv.hf.h.128B"]
360    fn vconv_hf_h(_: HvxVector) -> HvxVector;
361    #[link_name = "llvm.hexagon.V6.vconv.hf.qf16.128B"]
362    fn vconv_hf_qf16(_: HvxVector) -> HvxVector;
363    #[link_name = "llvm.hexagon.V6.vconv.hf.qf32.128B"]
364    fn vconv_hf_qf32(_: HvxVectorPair) -> HvxVector;
365    #[link_name = "llvm.hexagon.V6.vconv.sf.qf32.128B"]
366    fn vconv_sf_qf32(_: HvxVector) -> HvxVector;
367    #[link_name = "llvm.hexagon.V6.vconv.sf.w.128B"]
368    fn vconv_sf_w(_: HvxVector) -> HvxVector;
369    #[link_name = "llvm.hexagon.V6.vconv.w.sf.128B"]
370    fn vconv_w_sf(_: HvxVector) -> HvxVector;
371    #[link_name = "llvm.hexagon.V6.vcvt2.hf.b.128B"]
372    fn vcvt2_hf_b(_: HvxVector) -> HvxVectorPair;
373    #[link_name = "llvm.hexagon.V6.vcvt2.hf.ub.128B"]
374    fn vcvt2_hf_ub(_: HvxVector) -> HvxVectorPair;
375    #[link_name = "llvm.hexagon.V6.vcvt.b.hf.128B"]
376    fn vcvt_b_hf(_: HvxVector, _: HvxVector) -> HvxVector;
377    #[link_name = "llvm.hexagon.V6.vcvt.h.hf.128B"]
378    fn vcvt_h_hf(_: HvxVector) -> HvxVector;
379    #[link_name = "llvm.hexagon.V6.vcvt.hf.b.128B"]
380    fn vcvt_hf_b(_: HvxVector) -> HvxVectorPair;
381    #[link_name = "llvm.hexagon.V6.vcvt.hf.f8.128B"]
382    fn vcvt_hf_f8(_: HvxVector) -> HvxVectorPair;
383    #[link_name = "llvm.hexagon.V6.vcvt.hf.h.128B"]
384    fn vcvt_hf_h(_: HvxVector) -> HvxVector;
385    #[link_name = "llvm.hexagon.V6.vcvt.hf.sf.128B"]
386    fn vcvt_hf_sf(_: HvxVector, _: HvxVector) -> HvxVector;
387    #[link_name = "llvm.hexagon.V6.vcvt.hf.ub.128B"]
388    fn vcvt_hf_ub(_: HvxVector) -> HvxVectorPair;
389    #[link_name = "llvm.hexagon.V6.vcvt.hf.uh.128B"]
390    fn vcvt_hf_uh(_: HvxVector) -> HvxVector;
391    #[link_name = "llvm.hexagon.V6.vcvt.sf.hf.128B"]
392    fn vcvt_sf_hf(_: HvxVector) -> HvxVectorPair;
393    #[link_name = "llvm.hexagon.V6.vcvt.ub.hf.128B"]
394    fn vcvt_ub_hf(_: HvxVector, _: HvxVector) -> HvxVector;
395    #[link_name = "llvm.hexagon.V6.vcvt.uh.hf.128B"]
396    fn vcvt_uh_hf(_: HvxVector) -> HvxVector;
397    #[link_name = "llvm.hexagon.V6.vd0.128B"]
398    fn vd0() -> HvxVector;
399    #[link_name = "llvm.hexagon.V6.vdd0.128B"]
400    fn vdd0() -> HvxVectorPair;
401    #[link_name = "llvm.hexagon.V6.vdealb.128B"]
402    fn vdealb(_: HvxVector) -> HvxVector;
403    #[link_name = "llvm.hexagon.V6.vdealb4w.128B"]
404    fn vdealb4w(_: HvxVector, _: HvxVector) -> HvxVector;
405    #[link_name = "llvm.hexagon.V6.vdealh.128B"]
406    fn vdealh(_: HvxVector) -> HvxVector;
407    #[link_name = "llvm.hexagon.V6.vdealvdd.128B"]
408    fn vdealvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair;
409    #[link_name = "llvm.hexagon.V6.vdelta.128B"]
410    fn vdelta(_: HvxVector, _: HvxVector) -> HvxVector;
411    #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf.128B"]
412    fn vdmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVector;
413    #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf.acc.128B"]
414    fn vdmpy_sf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
415    #[link_name = "llvm.hexagon.V6.vdmpybus.128B"]
416    fn vdmpybus(_: HvxVector, _: i32) -> HvxVector;
417    #[link_name = "llvm.hexagon.V6.vdmpybus.acc.128B"]
418    fn vdmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
419    #[link_name = "llvm.hexagon.V6.vdmpybus.dv.128B"]
420    fn vdmpybus_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair;
421    #[link_name = "llvm.hexagon.V6.vdmpybus.dv.acc.128B"]
422    fn vdmpybus_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
423    #[link_name = "llvm.hexagon.V6.vdmpyhb.128B"]
424    fn vdmpyhb(_: HvxVector, _: i32) -> HvxVector;
425    #[link_name = "llvm.hexagon.V6.vdmpyhb.acc.128B"]
426    fn vdmpyhb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
427    #[link_name = "llvm.hexagon.V6.vdmpyhb.dv.128B"]
428    fn vdmpyhb_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair;
429    #[link_name = "llvm.hexagon.V6.vdmpyhb.dv.acc.128B"]
430    fn vdmpyhb_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
431    #[link_name = "llvm.hexagon.V6.vdmpyhisat.128B"]
432    fn vdmpyhisat(_: HvxVectorPair, _: i32) -> HvxVector;
433    #[link_name = "llvm.hexagon.V6.vdmpyhisat.acc.128B"]
434    fn vdmpyhisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector;
435    #[link_name = "llvm.hexagon.V6.vdmpyhsat.128B"]
436    fn vdmpyhsat(_: HvxVector, _: i32) -> HvxVector;
437    #[link_name = "llvm.hexagon.V6.vdmpyhsat.acc.128B"]
438    fn vdmpyhsat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
439    #[link_name = "llvm.hexagon.V6.vdmpyhsuisat.128B"]
440    fn vdmpyhsuisat(_: HvxVectorPair, _: i32) -> HvxVector;
441    #[link_name = "llvm.hexagon.V6.vdmpyhsuisat.acc.128B"]
442    fn vdmpyhsuisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector;
443    #[link_name = "llvm.hexagon.V6.vdmpyhsusat.128B"]
444    fn vdmpyhsusat(_: HvxVector, _: i32) -> HvxVector;
445    #[link_name = "llvm.hexagon.V6.vdmpyhsusat.acc.128B"]
446    fn vdmpyhsusat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
447    #[link_name = "llvm.hexagon.V6.vdmpyhvsat.128B"]
448    fn vdmpyhvsat(_: HvxVector, _: HvxVector) -> HvxVector;
449    #[link_name = "llvm.hexagon.V6.vdmpyhvsat.acc.128B"]
450    fn vdmpyhvsat_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
451    #[link_name = "llvm.hexagon.V6.vdsaduh.128B"]
452    fn vdsaduh(_: HvxVectorPair, _: i32) -> HvxVectorPair;
453    #[link_name = "llvm.hexagon.V6.vdsaduh.acc.128B"]
454    fn vdsaduh_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
455    #[link_name = "llvm.hexagon.V6.veqb.128B"]
456    fn veqb(_: HvxVector, _: HvxVector) -> HvxVector;
457    #[link_name = "llvm.hexagon.V6.veqb.and.128B"]
458    fn veqb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
459    #[link_name = "llvm.hexagon.V6.veqb.or.128B"]
460    fn veqb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
461    #[link_name = "llvm.hexagon.V6.veqb.xor.128B"]
462    fn veqb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
463    #[link_name = "llvm.hexagon.V6.veqh.128B"]
464    fn veqh(_: HvxVector, _: HvxVector) -> HvxVector;
465    #[link_name = "llvm.hexagon.V6.veqh.and.128B"]
466    fn veqh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
467    #[link_name = "llvm.hexagon.V6.veqh.or.128B"]
468    fn veqh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
469    #[link_name = "llvm.hexagon.V6.veqh.xor.128B"]
470    fn veqh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
471    #[link_name = "llvm.hexagon.V6.veqw.128B"]
472    fn veqw(_: HvxVector, _: HvxVector) -> HvxVector;
473    #[link_name = "llvm.hexagon.V6.veqw.and.128B"]
474    fn veqw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
475    #[link_name = "llvm.hexagon.V6.veqw.or.128B"]
476    fn veqw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
477    #[link_name = "llvm.hexagon.V6.veqw.xor.128B"]
478    fn veqw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
479    #[link_name = "llvm.hexagon.V6.vfmax.f8.128B"]
480    fn vfmax_f8(_: HvxVector, _: HvxVector) -> HvxVector;
481    #[link_name = "llvm.hexagon.V6.vfmax.hf.128B"]
482    fn vfmax_hf(_: HvxVector, _: HvxVector) -> HvxVector;
483    #[link_name = "llvm.hexagon.V6.vfmax.sf.128B"]
484    fn vfmax_sf(_: HvxVector, _: HvxVector) -> HvxVector;
485    #[link_name = "llvm.hexagon.V6.vfmin.f8.128B"]
486    fn vfmin_f8(_: HvxVector, _: HvxVector) -> HvxVector;
487    #[link_name = "llvm.hexagon.V6.vfmin.hf.128B"]
488    fn vfmin_hf(_: HvxVector, _: HvxVector) -> HvxVector;
489    #[link_name = "llvm.hexagon.V6.vfmin.sf.128B"]
490    fn vfmin_sf(_: HvxVector, _: HvxVector) -> HvxVector;
491    #[link_name = "llvm.hexagon.V6.vfneg.f8.128B"]
492    fn vfneg_f8(_: HvxVector) -> HvxVector;
493    #[link_name = "llvm.hexagon.V6.vfneg.hf.128B"]
494    fn vfneg_hf(_: HvxVector) -> HvxVector;
495    #[link_name = "llvm.hexagon.V6.vfneg.sf.128B"]
496    fn vfneg_sf(_: HvxVector) -> HvxVector;
497    #[link_name = "llvm.hexagon.V6.vgathermh.128B"]
498    fn vgathermh(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> ();
499    #[link_name = "llvm.hexagon.V6.vgathermhq.128B"]
500    fn vgathermhq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> ();
501    #[link_name = "llvm.hexagon.V6.vgathermhw.128B"]
502    fn vgathermhw(_: *mut HvxVector, _: i32, _: i32, _: HvxVectorPair) -> ();
503    #[link_name = "llvm.hexagon.V6.vgathermhwq.128B"]
504    fn vgathermhwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVectorPair) -> ();
505    #[link_name = "llvm.hexagon.V6.vgathermw.128B"]
506    fn vgathermw(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> ();
507    #[link_name = "llvm.hexagon.V6.vgathermwq.128B"]
508    fn vgathermwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> ();
509    #[link_name = "llvm.hexagon.V6.vgtb.128B"]
510    fn vgtb(_: HvxVector, _: HvxVector) -> HvxVector;
511    #[link_name = "llvm.hexagon.V6.vgtb.and.128B"]
512    fn vgtb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
513    #[link_name = "llvm.hexagon.V6.vgtb.or.128B"]
514    fn vgtb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
515    #[link_name = "llvm.hexagon.V6.vgtb.xor.128B"]
516    fn vgtb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
517    #[link_name = "llvm.hexagon.V6.vgth.128B"]
518    fn vgth(_: HvxVector, _: HvxVector) -> HvxVector;
519    #[link_name = "llvm.hexagon.V6.vgth.and.128B"]
520    fn vgth_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
521    #[link_name = "llvm.hexagon.V6.vgth.or.128B"]
522    fn vgth_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
523    #[link_name = "llvm.hexagon.V6.vgth.xor.128B"]
524    fn vgth_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
525    #[link_name = "llvm.hexagon.V6.vgthf.128B"]
526    fn vgthf(_: HvxVector, _: HvxVector) -> HvxVector;
527    #[link_name = "llvm.hexagon.V6.vgthf.and.128B"]
528    fn vgthf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
529    #[link_name = "llvm.hexagon.V6.vgthf.or.128B"]
530    fn vgthf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
531    #[link_name = "llvm.hexagon.V6.vgthf.xor.128B"]
532    fn vgthf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
533    #[link_name = "llvm.hexagon.V6.vgtsf.128B"]
534    fn vgtsf(_: HvxVector, _: HvxVector) -> HvxVector;
535    #[link_name = "llvm.hexagon.V6.vgtsf.and.128B"]
536    fn vgtsf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
537    #[link_name = "llvm.hexagon.V6.vgtsf.or.128B"]
538    fn vgtsf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
539    #[link_name = "llvm.hexagon.V6.vgtsf.xor.128B"]
540    fn vgtsf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
541    #[link_name = "llvm.hexagon.V6.vgtub.128B"]
542    fn vgtub(_: HvxVector, _: HvxVector) -> HvxVector;
543    #[link_name = "llvm.hexagon.V6.vgtub.and.128B"]
544    fn vgtub_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
545    #[link_name = "llvm.hexagon.V6.vgtub.or.128B"]
546    fn vgtub_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
547    #[link_name = "llvm.hexagon.V6.vgtub.xor.128B"]
548    fn vgtub_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
549    #[link_name = "llvm.hexagon.V6.vgtuh.128B"]
550    fn vgtuh(_: HvxVector, _: HvxVector) -> HvxVector;
551    #[link_name = "llvm.hexagon.V6.vgtuh.and.128B"]
552    fn vgtuh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
553    #[link_name = "llvm.hexagon.V6.vgtuh.or.128B"]
554    fn vgtuh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
555    #[link_name = "llvm.hexagon.V6.vgtuh.xor.128B"]
556    fn vgtuh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
557    #[link_name = "llvm.hexagon.V6.vgtuw.128B"]
558    fn vgtuw(_: HvxVector, _: HvxVector) -> HvxVector;
559    #[link_name = "llvm.hexagon.V6.vgtuw.and.128B"]
560    fn vgtuw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
561    #[link_name = "llvm.hexagon.V6.vgtuw.or.128B"]
562    fn vgtuw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
563    #[link_name = "llvm.hexagon.V6.vgtuw.xor.128B"]
564    fn vgtuw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
565    #[link_name = "llvm.hexagon.V6.vgtw.128B"]
566    fn vgtw(_: HvxVector, _: HvxVector) -> HvxVector;
567    #[link_name = "llvm.hexagon.V6.vgtw.and.128B"]
568    fn vgtw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
569    #[link_name = "llvm.hexagon.V6.vgtw.or.128B"]
570    fn vgtw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
571    #[link_name = "llvm.hexagon.V6.vgtw.xor.128B"]
572    fn vgtw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
573    #[link_name = "llvm.hexagon.V6.vinsertwr.128B"]
574    fn vinsertwr(_: HvxVector, _: i32) -> HvxVector;
575    #[link_name = "llvm.hexagon.V6.vlalignb.128B"]
576    fn vlalignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
577    #[link_name = "llvm.hexagon.V6.vlalignbi.128B"]
578    fn vlalignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
579    #[link_name = "llvm.hexagon.V6.vlsrb.128B"]
580    fn vlsrb(_: HvxVector, _: i32) -> HvxVector;
581    #[link_name = "llvm.hexagon.V6.vlsrh.128B"]
582    fn vlsrh(_: HvxVector, _: i32) -> HvxVector;
583    #[link_name = "llvm.hexagon.V6.vlsrhv.128B"]
584    fn vlsrhv(_: HvxVector, _: HvxVector) -> HvxVector;
585    #[link_name = "llvm.hexagon.V6.vlsrw.128B"]
586    fn vlsrw(_: HvxVector, _: i32) -> HvxVector;
587    #[link_name = "llvm.hexagon.V6.vlsrwv.128B"]
588    fn vlsrwv(_: HvxVector, _: HvxVector) -> HvxVector;
589    #[link_name = "llvm.hexagon.V6.vlutvvb.128B"]
590    fn vlutvvb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
591    #[link_name = "llvm.hexagon.V6.vlutvvb.nm.128B"]
592    fn vlutvvb_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
593    #[link_name = "llvm.hexagon.V6.vlutvvb.oracc.128B"]
594    fn vlutvvb_oracc(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector;
595    #[link_name = "llvm.hexagon.V6.vlutvvb.oracci.128B"]
596    fn vlutvvb_oracci(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector;
597    #[link_name = "llvm.hexagon.V6.vlutvvbi.128B"]
598    fn vlutvvbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
599    #[link_name = "llvm.hexagon.V6.vlutvwh.128B"]
600    fn vlutvwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair;
601    #[link_name = "llvm.hexagon.V6.vlutvwh.nm.128B"]
602    fn vlutvwh_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair;
603    #[link_name = "llvm.hexagon.V6.vlutvwh.oracc.128B"]
604    fn vlutvwh_oracc(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair;
605    #[link_name = "llvm.hexagon.V6.vlutvwh.oracci.128B"]
606    fn vlutvwh_oracci(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair;
607    #[link_name = "llvm.hexagon.V6.vlutvwhi.128B"]
608    fn vlutvwhi(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair;
609    #[link_name = "llvm.hexagon.V6.vmax.hf.128B"]
610    fn vmax_hf(_: HvxVector, _: HvxVector) -> HvxVector;
611    #[link_name = "llvm.hexagon.V6.vmax.sf.128B"]
612    fn vmax_sf(_: HvxVector, _: HvxVector) -> HvxVector;
613    #[link_name = "llvm.hexagon.V6.vmaxb.128B"]
614    fn vmaxb(_: HvxVector, _: HvxVector) -> HvxVector;
615    #[link_name = "llvm.hexagon.V6.vmaxh.128B"]
616    fn vmaxh(_: HvxVector, _: HvxVector) -> HvxVector;
617    #[link_name = "llvm.hexagon.V6.vmaxub.128B"]
618    fn vmaxub(_: HvxVector, _: HvxVector) -> HvxVector;
619    #[link_name = "llvm.hexagon.V6.vmaxuh.128B"]
620    fn vmaxuh(_: HvxVector, _: HvxVector) -> HvxVector;
621    #[link_name = "llvm.hexagon.V6.vmaxw.128B"]
622    fn vmaxw(_: HvxVector, _: HvxVector) -> HvxVector;
623    #[link_name = "llvm.hexagon.V6.vmin.hf.128B"]
624    fn vmin_hf(_: HvxVector, _: HvxVector) -> HvxVector;
625    #[link_name = "llvm.hexagon.V6.vmin.sf.128B"]
626    fn vmin_sf(_: HvxVector, _: HvxVector) -> HvxVector;
627    #[link_name = "llvm.hexagon.V6.vminb.128B"]
628    fn vminb(_: HvxVector, _: HvxVector) -> HvxVector;
629    #[link_name = "llvm.hexagon.V6.vminh.128B"]
630    fn vminh(_: HvxVector, _: HvxVector) -> HvxVector;
631    #[link_name = "llvm.hexagon.V6.vminub.128B"]
632    fn vminub(_: HvxVector, _: HvxVector) -> HvxVector;
633    #[link_name = "llvm.hexagon.V6.vminuh.128B"]
634    fn vminuh(_: HvxVector, _: HvxVector) -> HvxVector;
635    #[link_name = "llvm.hexagon.V6.vminw.128B"]
636    fn vminw(_: HvxVector, _: HvxVector) -> HvxVector;
637    #[link_name = "llvm.hexagon.V6.vmpabus.128B"]
638    fn vmpabus(_: HvxVectorPair, _: i32) -> HvxVectorPair;
639    #[link_name = "llvm.hexagon.V6.vmpabus.acc.128B"]
640    fn vmpabus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
641    #[link_name = "llvm.hexagon.V6.vmpabusv.128B"]
642    fn vmpabusv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
643    #[link_name = "llvm.hexagon.V6.vmpabuu.128B"]
644    fn vmpabuu(_: HvxVectorPair, _: i32) -> HvxVectorPair;
645    #[link_name = "llvm.hexagon.V6.vmpabuu.acc.128B"]
646    fn vmpabuu_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
647    #[link_name = "llvm.hexagon.V6.vmpabuuv.128B"]
648    fn vmpabuuv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
649    #[link_name = "llvm.hexagon.V6.vmpahb.128B"]
650    fn vmpahb(_: HvxVectorPair, _: i32) -> HvxVectorPair;
651    #[link_name = "llvm.hexagon.V6.vmpahb.acc.128B"]
652    fn vmpahb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
653    #[link_name = "llvm.hexagon.V6.vmpauhb.128B"]
654    fn vmpauhb(_: HvxVectorPair, _: i32) -> HvxVectorPair;
655    #[link_name = "llvm.hexagon.V6.vmpauhb.acc.128B"]
656    fn vmpauhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
657    #[link_name = "llvm.hexagon.V6.vmpy.hf.hf.128B"]
658    fn vmpy_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector;
659    #[link_name = "llvm.hexagon.V6.vmpy.hf.hf.acc.128B"]
660    fn vmpy_hf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
661    #[link_name = "llvm.hexagon.V6.vmpy.qf16.128B"]
662    fn vmpy_qf16(_: HvxVector, _: HvxVector) -> HvxVector;
663    #[link_name = "llvm.hexagon.V6.vmpy.qf16.hf.128B"]
664    fn vmpy_qf16_hf(_: HvxVector, _: HvxVector) -> HvxVector;
665    #[link_name = "llvm.hexagon.V6.vmpy.qf16.mix.hf.128B"]
666    fn vmpy_qf16_mix_hf(_: HvxVector, _: HvxVector) -> HvxVector;
667    #[link_name = "llvm.hexagon.V6.vmpy.qf32.128B"]
668    fn vmpy_qf32(_: HvxVector, _: HvxVector) -> HvxVector;
669    #[link_name = "llvm.hexagon.V6.vmpy.qf32.hf.128B"]
670    fn vmpy_qf32_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair;
671    #[link_name = "llvm.hexagon.V6.vmpy.qf32.mix.hf.128B"]
672    fn vmpy_qf32_mix_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair;
673    #[link_name = "llvm.hexagon.V6.vmpy.qf32.qf16.128B"]
674    fn vmpy_qf32_qf16(_: HvxVector, _: HvxVector) -> HvxVectorPair;
675    #[link_name = "llvm.hexagon.V6.vmpy.qf32.sf.128B"]
676    fn vmpy_qf32_sf(_: HvxVector, _: HvxVector) -> HvxVector;
677    #[link_name = "llvm.hexagon.V6.vmpy.sf.hf.128B"]
678    fn vmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair;
679    #[link_name = "llvm.hexagon.V6.vmpy.sf.hf.acc.128B"]
680    fn vmpy_sf_hf_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
681    #[link_name = "llvm.hexagon.V6.vmpy.sf.sf.128B"]
682    fn vmpy_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector;
683    #[link_name = "llvm.hexagon.V6.vmpybus.128B"]
684    fn vmpybus(_: HvxVector, _: i32) -> HvxVectorPair;
685    #[link_name = "llvm.hexagon.V6.vmpybus.acc.128B"]
686    fn vmpybus_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair;
687    #[link_name = "llvm.hexagon.V6.vmpybusv.128B"]
688    fn vmpybusv(_: HvxVector, _: HvxVector) -> HvxVectorPair;
689    #[link_name = "llvm.hexagon.V6.vmpybusv.acc.128B"]
690    fn vmpybusv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
691    #[link_name = "llvm.hexagon.V6.vmpybv.128B"]
692    fn vmpybv(_: HvxVector, _: HvxVector) -> HvxVectorPair;
693    #[link_name = "llvm.hexagon.V6.vmpybv.acc.128B"]
694    fn vmpybv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
695    #[link_name = "llvm.hexagon.V6.vmpyewuh.128B"]
696    fn vmpyewuh(_: HvxVector, _: HvxVector) -> HvxVector;
697    #[link_name = "llvm.hexagon.V6.vmpyewuh.64.128B"]
698    fn vmpyewuh_64(_: HvxVector, _: HvxVector) -> HvxVectorPair;
699    #[link_name = "llvm.hexagon.V6.vmpyh.128B"]
700    fn vmpyh(_: HvxVector, _: i32) -> HvxVectorPair;
701    #[link_name = "llvm.hexagon.V6.vmpyh.acc.128B"]
702    fn vmpyh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair;
703    #[link_name = "llvm.hexagon.V6.vmpyhsat.acc.128B"]
704    fn vmpyhsat_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair;
705    #[link_name = "llvm.hexagon.V6.vmpyhsrs.128B"]
706    fn vmpyhsrs(_: HvxVector, _: i32) -> HvxVector;
707    #[link_name = "llvm.hexagon.V6.vmpyhss.128B"]
708    fn vmpyhss(_: HvxVector, _: i32) -> HvxVector;
709    #[link_name = "llvm.hexagon.V6.vmpyhus.128B"]
710    fn vmpyhus(_: HvxVector, _: HvxVector) -> HvxVectorPair;
711    #[link_name = "llvm.hexagon.V6.vmpyhus.acc.128B"]
712    fn vmpyhus_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
713    #[link_name = "llvm.hexagon.V6.vmpyhv.128B"]
714    fn vmpyhv(_: HvxVector, _: HvxVector) -> HvxVectorPair;
715    #[link_name = "llvm.hexagon.V6.vmpyhv.acc.128B"]
716    fn vmpyhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
717    #[link_name = "llvm.hexagon.V6.vmpyhvsrs.128B"]
718    fn vmpyhvsrs(_: HvxVector, _: HvxVector) -> HvxVector;
719    #[link_name = "llvm.hexagon.V6.vmpyieoh.128B"]
720    fn vmpyieoh(_: HvxVector, _: HvxVector) -> HvxVector;
721    #[link_name = "llvm.hexagon.V6.vmpyiewh.acc.128B"]
722    fn vmpyiewh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
723    #[link_name = "llvm.hexagon.V6.vmpyiewuh.128B"]
724    fn vmpyiewuh(_: HvxVector, _: HvxVector) -> HvxVector;
725    #[link_name = "llvm.hexagon.V6.vmpyiewuh.acc.128B"]
726    fn vmpyiewuh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
727    #[link_name = "llvm.hexagon.V6.vmpyih.128B"]
728    fn vmpyih(_: HvxVector, _: HvxVector) -> HvxVector;
729    #[link_name = "llvm.hexagon.V6.vmpyih.acc.128B"]
730    fn vmpyih_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
731    #[link_name = "llvm.hexagon.V6.vmpyihb.128B"]
732    fn vmpyihb(_: HvxVector, _: i32) -> HvxVector;
733    #[link_name = "llvm.hexagon.V6.vmpyihb.acc.128B"]
734    fn vmpyihb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
735    #[link_name = "llvm.hexagon.V6.vmpyiowh.128B"]
736    fn vmpyiowh(_: HvxVector, _: HvxVector) -> HvxVector;
737    #[link_name = "llvm.hexagon.V6.vmpyiwb.128B"]
738    fn vmpyiwb(_: HvxVector, _: i32) -> HvxVector;
739    #[link_name = "llvm.hexagon.V6.vmpyiwb.acc.128B"]
740    fn vmpyiwb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
741    #[link_name = "llvm.hexagon.V6.vmpyiwh.128B"]
742    fn vmpyiwh(_: HvxVector, _: i32) -> HvxVector;
743    #[link_name = "llvm.hexagon.V6.vmpyiwh.acc.128B"]
744    fn vmpyiwh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
745    #[link_name = "llvm.hexagon.V6.vmpyiwub.128B"]
746    fn vmpyiwub(_: HvxVector, _: i32) -> HvxVector;
747    #[link_name = "llvm.hexagon.V6.vmpyiwub.acc.128B"]
748    fn vmpyiwub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
749    #[link_name = "llvm.hexagon.V6.vmpyowh.128B"]
750    fn vmpyowh(_: HvxVector, _: HvxVector) -> HvxVector;
751    #[link_name = "llvm.hexagon.V6.vmpyowh.64.acc.128B"]
752    fn vmpyowh_64_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
753    #[link_name = "llvm.hexagon.V6.vmpyowh.rnd.128B"]
754    fn vmpyowh_rnd(_: HvxVector, _: HvxVector) -> HvxVector;
755    #[link_name = "llvm.hexagon.V6.vmpyowh.rnd.sacc.128B"]
756    fn vmpyowh_rnd_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
757    #[link_name = "llvm.hexagon.V6.vmpyowh.sacc.128B"]
758    fn vmpyowh_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
759    #[link_name = "llvm.hexagon.V6.vmpyub.128B"]
760    fn vmpyub(_: HvxVector, _: i32) -> HvxVectorPair;
761    #[link_name = "llvm.hexagon.V6.vmpyub.acc.128B"]
762    fn vmpyub_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair;
763    #[link_name = "llvm.hexagon.V6.vmpyubv.128B"]
764    fn vmpyubv(_: HvxVector, _: HvxVector) -> HvxVectorPair;
765    #[link_name = "llvm.hexagon.V6.vmpyubv.acc.128B"]
766    fn vmpyubv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
767    #[link_name = "llvm.hexagon.V6.vmpyuh.128B"]
768    fn vmpyuh(_: HvxVector, _: i32) -> HvxVectorPair;
769    #[link_name = "llvm.hexagon.V6.vmpyuh.acc.128B"]
770    fn vmpyuh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair;
771    #[link_name = "llvm.hexagon.V6.vmpyuhe.128B"]
772    fn vmpyuhe(_: HvxVector, _: i32) -> HvxVector;
773    #[link_name = "llvm.hexagon.V6.vmpyuhe.acc.128B"]
774    fn vmpyuhe_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
775    #[link_name = "llvm.hexagon.V6.vmpyuhv.128B"]
776    fn vmpyuhv(_: HvxVector, _: HvxVector) -> HvxVectorPair;
777    #[link_name = "llvm.hexagon.V6.vmpyuhv.acc.128B"]
778    fn vmpyuhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
779    #[link_name = "llvm.hexagon.V6.vmpyuhvs.128B"]
780    fn vmpyuhvs(_: HvxVector, _: HvxVector) -> HvxVector;
781    #[link_name = "llvm.hexagon.V6.vmux.128B"]
782    fn vmux(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
783    #[link_name = "llvm.hexagon.V6.vnavgb.128B"]
784    fn vnavgb(_: HvxVector, _: HvxVector) -> HvxVector;
785    #[link_name = "llvm.hexagon.V6.vnavgh.128B"]
786    fn vnavgh(_: HvxVector, _: HvxVector) -> HvxVector;
787    #[link_name = "llvm.hexagon.V6.vnavgub.128B"]
788    fn vnavgub(_: HvxVector, _: HvxVector) -> HvxVector;
789    #[link_name = "llvm.hexagon.V6.vnavgw.128B"]
790    fn vnavgw(_: HvxVector, _: HvxVector) -> HvxVector;
791    #[link_name = "llvm.hexagon.V6.vnormamth.128B"]
792    fn vnormamth(_: HvxVector) -> HvxVector;
793    #[link_name = "llvm.hexagon.V6.vnormamtw.128B"]
794    fn vnormamtw(_: HvxVector) -> HvxVector;
795    #[link_name = "llvm.hexagon.V6.vnot.128B"]
796    fn vnot(_: HvxVector) -> HvxVector;
797    #[link_name = "llvm.hexagon.V6.vor.128B"]
798    fn vor(_: HvxVector, _: HvxVector) -> HvxVector;
799    #[link_name = "llvm.hexagon.V6.vpackeb.128B"]
800    fn vpackeb(_: HvxVector, _: HvxVector) -> HvxVector;
801    #[link_name = "llvm.hexagon.V6.vpackeh.128B"]
802    fn vpackeh(_: HvxVector, _: HvxVector) -> HvxVector;
803    #[link_name = "llvm.hexagon.V6.vpackhb.sat.128B"]
804    fn vpackhb_sat(_: HvxVector, _: HvxVector) -> HvxVector;
805    #[link_name = "llvm.hexagon.V6.vpackhub.sat.128B"]
806    fn vpackhub_sat(_: HvxVector, _: HvxVector) -> HvxVector;
807    #[link_name = "llvm.hexagon.V6.vpackob.128B"]
808    fn vpackob(_: HvxVector, _: HvxVector) -> HvxVector;
809    #[link_name = "llvm.hexagon.V6.vpackoh.128B"]
810    fn vpackoh(_: HvxVector, _: HvxVector) -> HvxVector;
811    #[link_name = "llvm.hexagon.V6.vpackwh.sat.128B"]
812    fn vpackwh_sat(_: HvxVector, _: HvxVector) -> HvxVector;
813    #[link_name = "llvm.hexagon.V6.vpackwuh.sat.128B"]
814    fn vpackwuh_sat(_: HvxVector, _: HvxVector) -> HvxVector;
815    #[link_name = "llvm.hexagon.V6.vpopcounth.128B"]
816    fn vpopcounth(_: HvxVector) -> HvxVector;
817    #[link_name = "llvm.hexagon.V6.vprefixqb.128B"]
818    fn vprefixqb(_: HvxVector) -> HvxVector;
819    #[link_name = "llvm.hexagon.V6.vprefixqh.128B"]
820    fn vprefixqh(_: HvxVector) -> HvxVector;
821    #[link_name = "llvm.hexagon.V6.vprefixqw.128B"]
822    fn vprefixqw(_: HvxVector) -> HvxVector;
823    #[link_name = "llvm.hexagon.V6.vrdelta.128B"]
824    fn vrdelta(_: HvxVector, _: HvxVector) -> HvxVector;
825    #[link_name = "llvm.hexagon.V6.vrmpybus.128B"]
826    fn vrmpybus(_: HvxVector, _: i32) -> HvxVector;
827    #[link_name = "llvm.hexagon.V6.vrmpybus.acc.128B"]
828    fn vrmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
829    #[link_name = "llvm.hexagon.V6.vrmpybusi.128B"]
830    fn vrmpybusi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair;
831    #[link_name = "llvm.hexagon.V6.vrmpybusi.acc.128B"]
832    fn vrmpybusi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair;
833    #[link_name = "llvm.hexagon.V6.vrmpybusv.128B"]
834    fn vrmpybusv(_: HvxVector, _: HvxVector) -> HvxVector;
835    #[link_name = "llvm.hexagon.V6.vrmpybusv.acc.128B"]
836    fn vrmpybusv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
837    #[link_name = "llvm.hexagon.V6.vrmpybv.128B"]
838    fn vrmpybv(_: HvxVector, _: HvxVector) -> HvxVector;
839    #[link_name = "llvm.hexagon.V6.vrmpybv.acc.128B"]
840    fn vrmpybv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
841    #[link_name = "llvm.hexagon.V6.vrmpyub.128B"]
842    fn vrmpyub(_: HvxVector, _: i32) -> HvxVector;
843    #[link_name = "llvm.hexagon.V6.vrmpyub.acc.128B"]
844    fn vrmpyub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
845    #[link_name = "llvm.hexagon.V6.vrmpyubi.128B"]
846    fn vrmpyubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair;
847    #[link_name = "llvm.hexagon.V6.vrmpyubi.acc.128B"]
848    fn vrmpyubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair;
849    #[link_name = "llvm.hexagon.V6.vrmpyubv.128B"]
850    fn vrmpyubv(_: HvxVector, _: HvxVector) -> HvxVector;
851    #[link_name = "llvm.hexagon.V6.vrmpyubv.acc.128B"]
852    fn vrmpyubv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
853    #[link_name = "llvm.hexagon.V6.vror.128B"]
854    fn vror(_: HvxVector, _: i32) -> HvxVector;
855    #[link_name = "llvm.hexagon.V6.vrotr.128B"]
856    fn vrotr(_: HvxVector, _: HvxVector) -> HvxVector;
857    #[link_name = "llvm.hexagon.V6.vroundhb.128B"]
858    fn vroundhb(_: HvxVector, _: HvxVector) -> HvxVector;
859    #[link_name = "llvm.hexagon.V6.vroundhub.128B"]
860    fn vroundhub(_: HvxVector, _: HvxVector) -> HvxVector;
861    #[link_name = "llvm.hexagon.V6.vrounduhub.128B"]
862    fn vrounduhub(_: HvxVector, _: HvxVector) -> HvxVector;
863    #[link_name = "llvm.hexagon.V6.vrounduwuh.128B"]
864    fn vrounduwuh(_: HvxVector, _: HvxVector) -> HvxVector;
865    #[link_name = "llvm.hexagon.V6.vroundwh.128B"]
866    fn vroundwh(_: HvxVector, _: HvxVector) -> HvxVector;
867    #[link_name = "llvm.hexagon.V6.vroundwuh.128B"]
868    fn vroundwuh(_: HvxVector, _: HvxVector) -> HvxVector;
869    #[link_name = "llvm.hexagon.V6.vrsadubi.128B"]
870    fn vrsadubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair;
871    #[link_name = "llvm.hexagon.V6.vrsadubi.acc.128B"]
872    fn vrsadubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair;
873    #[link_name = "llvm.hexagon.V6.vsatdw.128B"]
874    fn vsatdw(_: HvxVector, _: HvxVector) -> HvxVector;
875    #[link_name = "llvm.hexagon.V6.vsathub.128B"]
876    fn vsathub(_: HvxVector, _: HvxVector) -> HvxVector;
877    #[link_name = "llvm.hexagon.V6.vsatuwuh.128B"]
878    fn vsatuwuh(_: HvxVector, _: HvxVector) -> HvxVector;
879    #[link_name = "llvm.hexagon.V6.vsatwh.128B"]
880    fn vsatwh(_: HvxVector, _: HvxVector) -> HvxVector;
881    #[link_name = "llvm.hexagon.V6.vsb.128B"]
882    fn vsb(_: HvxVector) -> HvxVectorPair;
883    #[link_name = "llvm.hexagon.V6.vscattermh.128B"]
884    fn vscattermh(_: i32, _: i32, _: HvxVector, _: HvxVector) -> ();
885    #[link_name = "llvm.hexagon.V6.vscattermh.add.128B"]
886    fn vscattermh_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> ();
887    #[link_name = "llvm.hexagon.V6.vscattermhq.128B"]
888    fn vscattermhq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> ();
889    #[link_name = "llvm.hexagon.V6.vscattermhw.128B"]
890    fn vscattermhw(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> ();
891    #[link_name = "llvm.hexagon.V6.vscattermhw.add.128B"]
892    fn vscattermhw_add(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> ();
893    #[link_name = "llvm.hexagon.V6.vscattermhwq.128B"]
894    fn vscattermhwq(_: HvxVector, _: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> ();
895    #[link_name = "llvm.hexagon.V6.vscattermw.128B"]
896    fn vscattermw(_: i32, _: i32, _: HvxVector, _: HvxVector) -> ();
897    #[link_name = "llvm.hexagon.V6.vscattermw.add.128B"]
898    fn vscattermw_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> ();
899    #[link_name = "llvm.hexagon.V6.vscattermwq.128B"]
900    fn vscattermwq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> ();
901    #[link_name = "llvm.hexagon.V6.vsh.128B"]
902    fn vsh(_: HvxVector) -> HvxVectorPair;
903    #[link_name = "llvm.hexagon.V6.vshufeh.128B"]
904    fn vshufeh(_: HvxVector, _: HvxVector) -> HvxVector;
905    #[link_name = "llvm.hexagon.V6.vshuffb.128B"]
906    fn vshuffb(_: HvxVector) -> HvxVector;
907    #[link_name = "llvm.hexagon.V6.vshuffeb.128B"]
908    fn vshuffeb(_: HvxVector, _: HvxVector) -> HvxVector;
909    #[link_name = "llvm.hexagon.V6.vshuffh.128B"]
910    fn vshuffh(_: HvxVector) -> HvxVector;
911    #[link_name = "llvm.hexagon.V6.vshuffob.128B"]
912    fn vshuffob(_: HvxVector, _: HvxVector) -> HvxVector;
913    #[link_name = "llvm.hexagon.V6.vshuffvdd.128B"]
914    fn vshuffvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair;
915    #[link_name = "llvm.hexagon.V6.vshufoeb.128B"]
916    fn vshufoeb(_: HvxVector, _: HvxVector) -> HvxVectorPair;
917    #[link_name = "llvm.hexagon.V6.vshufoeh.128B"]
918    fn vshufoeh(_: HvxVector, _: HvxVector) -> HvxVectorPair;
919    #[link_name = "llvm.hexagon.V6.vshufoh.128B"]
920    fn vshufoh(_: HvxVector, _: HvxVector) -> HvxVector;
921    #[link_name = "llvm.hexagon.V6.vsub.hf.128B"]
922    fn vsub_hf(_: HvxVector, _: HvxVector) -> HvxVector;
923    #[link_name = "llvm.hexagon.V6.vsub.hf.hf.128B"]
924    fn vsub_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector;
925    #[link_name = "llvm.hexagon.V6.vsub.qf16.128B"]
926    fn vsub_qf16(_: HvxVector, _: HvxVector) -> HvxVector;
927    #[link_name = "llvm.hexagon.V6.vsub.qf16.mix.128B"]
928    fn vsub_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector;
929    #[link_name = "llvm.hexagon.V6.vsub.qf32.128B"]
930    fn vsub_qf32(_: HvxVector, _: HvxVector) -> HvxVector;
931    #[link_name = "llvm.hexagon.V6.vsub.qf32.mix.128B"]
932    fn vsub_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector;
933    #[link_name = "llvm.hexagon.V6.vsub.sf.128B"]
934    fn vsub_sf(_: HvxVector, _: HvxVector) -> HvxVector;
935    #[link_name = "llvm.hexagon.V6.vsub.sf.hf.128B"]
936    fn vsub_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair;
937    #[link_name = "llvm.hexagon.V6.vsub.sf.sf.128B"]
938    fn vsub_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector;
939    #[link_name = "llvm.hexagon.V6.vsubb.128B"]
940    fn vsubb(_: HvxVector, _: HvxVector) -> HvxVector;
941    #[link_name = "llvm.hexagon.V6.vsubb.dv.128B"]
942    fn vsubb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
943    #[link_name = "llvm.hexagon.V6.vsubbnq.128B"]
944    fn vsubbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
945    #[link_name = "llvm.hexagon.V6.vsubbq.128B"]
946    fn vsubbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
947    #[link_name = "llvm.hexagon.V6.vsubbsat.128B"]
948    fn vsubbsat(_: HvxVector, _: HvxVector) -> HvxVector;
949    #[link_name = "llvm.hexagon.V6.vsubbsat.dv.128B"]
950    fn vsubbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
951    #[link_name = "llvm.hexagon.V6.vsubh.128B"]
952    fn vsubh(_: HvxVector, _: HvxVector) -> HvxVector;
953    #[link_name = "llvm.hexagon.V6.vsubh.dv.128B"]
954    fn vsubh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
955    #[link_name = "llvm.hexagon.V6.vsubhnq.128B"]
956    fn vsubhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
957    #[link_name = "llvm.hexagon.V6.vsubhq.128B"]
958    fn vsubhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
959    #[link_name = "llvm.hexagon.V6.vsubhsat.128B"]
960    fn vsubhsat(_: HvxVector, _: HvxVector) -> HvxVector;
961    #[link_name = "llvm.hexagon.V6.vsubhsat.dv.128B"]
962    fn vsubhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
963    #[link_name = "llvm.hexagon.V6.vsubhw.128B"]
964    fn vsubhw(_: HvxVector, _: HvxVector) -> HvxVectorPair;
965    #[link_name = "llvm.hexagon.V6.vsububh.128B"]
966    fn vsububh(_: HvxVector, _: HvxVector) -> HvxVectorPair;
967    #[link_name = "llvm.hexagon.V6.vsububsat.128B"]
968    fn vsububsat(_: HvxVector, _: HvxVector) -> HvxVector;
969    #[link_name = "llvm.hexagon.V6.vsububsat.dv.128B"]
970    fn vsububsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
971    #[link_name = "llvm.hexagon.V6.vsubububb.sat.128B"]
972    fn vsubububb_sat(_: HvxVector, _: HvxVector) -> HvxVector;
973    #[link_name = "llvm.hexagon.V6.vsubuhsat.128B"]
974    fn vsubuhsat(_: HvxVector, _: HvxVector) -> HvxVector;
975    #[link_name = "llvm.hexagon.V6.vsubuhsat.dv.128B"]
976    fn vsubuhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
977    #[link_name = "llvm.hexagon.V6.vsubuhw.128B"]
978    fn vsubuhw(_: HvxVector, _: HvxVector) -> HvxVectorPair;
979    #[link_name = "llvm.hexagon.V6.vsubuwsat.128B"]
980    fn vsubuwsat(_: HvxVector, _: HvxVector) -> HvxVector;
981    #[link_name = "llvm.hexagon.V6.vsubuwsat.dv.128B"]
982    fn vsubuwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
983    #[link_name = "llvm.hexagon.V6.vsubw.128B"]
984    fn vsubw(_: HvxVector, _: HvxVector) -> HvxVector;
985    #[link_name = "llvm.hexagon.V6.vsubw.dv.128B"]
986    fn vsubw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
987    #[link_name = "llvm.hexagon.V6.vsubwnq.128B"]
988    fn vsubwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
989    #[link_name = "llvm.hexagon.V6.vsubwq.128B"]
990    fn vsubwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
991    #[link_name = "llvm.hexagon.V6.vsubwsat.128B"]
992    fn vsubwsat(_: HvxVector, _: HvxVector) -> HvxVector;
993    #[link_name = "llvm.hexagon.V6.vsubwsat.dv.128B"]
994    fn vsubwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
995    #[link_name = "llvm.hexagon.V6.vswap.128B"]
996    fn vswap(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVectorPair;
997    #[link_name = "llvm.hexagon.V6.vtmpyb.128B"]
998    fn vtmpyb(_: HvxVectorPair, _: i32) -> HvxVectorPair;
999    #[link_name = "llvm.hexagon.V6.vtmpyb.acc.128B"]
1000    fn vtmpyb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
1001    #[link_name = "llvm.hexagon.V6.vtmpybus.128B"]
1002    fn vtmpybus(_: HvxVectorPair, _: i32) -> HvxVectorPair;
1003    #[link_name = "llvm.hexagon.V6.vtmpybus.acc.128B"]
1004    fn vtmpybus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
1005    #[link_name = "llvm.hexagon.V6.vtmpyhb.128B"]
1006    fn vtmpyhb(_: HvxVectorPair, _: i32) -> HvxVectorPair;
1007    #[link_name = "llvm.hexagon.V6.vtmpyhb.acc.128B"]
1008    fn vtmpyhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
1009    #[link_name = "llvm.hexagon.V6.vunpackb.128B"]
1010    fn vunpackb(_: HvxVector) -> HvxVectorPair;
1011    #[link_name = "llvm.hexagon.V6.vunpackh.128B"]
1012    fn vunpackh(_: HvxVector) -> HvxVectorPair;
1013    #[link_name = "llvm.hexagon.V6.vunpackob.128B"]
1014    fn vunpackob(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair;
1015    #[link_name = "llvm.hexagon.V6.vunpackoh.128B"]
1016    fn vunpackoh(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair;
1017    #[link_name = "llvm.hexagon.V6.vunpackub.128B"]
1018    fn vunpackub(_: HvxVector) -> HvxVectorPair;
1019    #[link_name = "llvm.hexagon.V6.vunpackuh.128B"]
1020    fn vunpackuh(_: HvxVector) -> HvxVectorPair;
1021    #[link_name = "llvm.hexagon.V6.vxor.128B"]
1022    fn vxor(_: HvxVector, _: HvxVector) -> HvxVector;
1023    #[link_name = "llvm.hexagon.V6.vzb.128B"]
1024    fn vzb(_: HvxVector) -> HvxVectorPair;
1025    #[link_name = "llvm.hexagon.V6.vzh.128B"]
1026    fn vzh(_: HvxVector) -> HvxVectorPair;
1027}
1028
1029/// `Rd32=vextract(Vu32,Rs32)`
1030///
1031/// Instruction Type: LD
1032/// Execution Slots: SLOT0
1033#[inline(always)]
1034#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1035#[cfg_attr(test, assert_instr(extractw))]
1036#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1037pub unsafe fn q6_r_vextract_vr(vu: HvxVector, rs: i32) -> i32 {
1038    extractw(vu, rs)
1039}
1040
1041/// `Vd32=hi(Vss32)`
1042///
1043/// Instruction Type: CVI_VA
1044/// Execution Slots: SLOT0123
1045#[inline(always)]
1046#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1047#[cfg_attr(test, assert_instr(hi))]
1048#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1049pub unsafe fn q6_v_hi_w(vss: HvxVectorPair) -> HvxVector {
1050    hi(vss)
1051}
1052
1053/// `Vd32=lo(Vss32)`
1054///
1055/// Instruction Type: CVI_VA
1056/// Execution Slots: SLOT0123
1057#[inline(always)]
1058#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1059#[cfg_attr(test, assert_instr(lo))]
1060#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1061pub unsafe fn q6_v_lo_w(vss: HvxVectorPair) -> HvxVector {
1062    lo(vss)
1063}
1064
1065/// `Vd32=vsplat(Rt32)`
1066///
1067/// Instruction Type: CVI_VX_LATE
1068/// Execution Slots: SLOT23
1069#[inline(always)]
1070#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1071#[cfg_attr(test, assert_instr(lvsplatw))]
1072#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1073pub unsafe fn q6_v_vsplat_r(rt: i32) -> HvxVector {
1074    lvsplatw(rt)
1075}
1076
1077/// `Vd32.uh=vabsdiff(Vu32.h,Vv32.h)`
1078///
1079/// Instruction Type: CVI_VX
1080/// Execution Slots: SLOT23
1081#[inline(always)]
1082#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1083#[cfg_attr(test, assert_instr(vabsdiffh))]
1084#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1085pub unsafe fn q6_vuh_vabsdiff_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
1086    vabsdiffh(vu, vv)
1087}
1088
1089/// `Vd32.ub=vabsdiff(Vu32.ub,Vv32.ub)`
1090///
1091/// Instruction Type: CVI_VX
1092/// Execution Slots: SLOT23
1093#[inline(always)]
1094#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1095#[cfg_attr(test, assert_instr(vabsdiffub))]
1096#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1097pub unsafe fn q6_vub_vabsdiff_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector {
1098    vabsdiffub(vu, vv)
1099}
1100
1101/// `Vd32.uh=vabsdiff(Vu32.uh,Vv32.uh)`
1102///
1103/// Instruction Type: CVI_VX
1104/// Execution Slots: SLOT23
1105#[inline(always)]
1106#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1107#[cfg_attr(test, assert_instr(vabsdiffuh))]
1108#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1109pub unsafe fn q6_vuh_vabsdiff_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector {
1110    vabsdiffuh(vu, vv)
1111}
1112
1113/// `Vd32.uw=vabsdiff(Vu32.w,Vv32.w)`
1114///
1115/// Instruction Type: CVI_VX
1116/// Execution Slots: SLOT23
1117#[inline(always)]
1118#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1119#[cfg_attr(test, assert_instr(vabsdiffw))]
1120#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1121pub unsafe fn q6_vuw_vabsdiff_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector {
1122    vabsdiffw(vu, vv)
1123}
1124
1125/// `Vd32.h=vabs(Vu32.h)`
1126///
1127/// Instruction Type: CVI_VA
1128/// Execution Slots: SLOT0123
1129#[inline(always)]
1130#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1131#[cfg_attr(test, assert_instr(vabsh))]
1132#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1133pub unsafe fn q6_vh_vabs_vh(vu: HvxVector) -> HvxVector {
1134    vabsh(vu)
1135}
1136
1137/// `Vd32.h=vabs(Vu32.h):sat`
1138///
1139/// Instruction Type: CVI_VA
1140/// Execution Slots: SLOT0123
1141#[inline(always)]
1142#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1143#[cfg_attr(test, assert_instr(vabsh_sat))]
1144#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1145pub unsafe fn q6_vh_vabs_vh_sat(vu: HvxVector) -> HvxVector {
1146    vabsh_sat(vu)
1147}
1148
1149/// `Vd32.w=vabs(Vu32.w)`
1150///
1151/// Instruction Type: CVI_VA
1152/// Execution Slots: SLOT0123
1153#[inline(always)]
1154#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1155#[cfg_attr(test, assert_instr(vabsw))]
1156#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1157pub unsafe fn q6_vw_vabs_vw(vu: HvxVector) -> HvxVector {
1158    vabsw(vu)
1159}
1160
1161/// `Vd32.w=vabs(Vu32.w):sat`
1162///
1163/// Instruction Type: CVI_VA
1164/// Execution Slots: SLOT0123
1165#[inline(always)]
1166#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1167#[cfg_attr(test, assert_instr(vabsw_sat))]
1168#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1169pub unsafe fn q6_vw_vabs_vw_sat(vu: HvxVector) -> HvxVector {
1170    vabsw_sat(vu)
1171}
1172
1173/// `Vd32.b=vadd(Vu32.b,Vv32.b)`
1174///
1175/// Instruction Type: CVI_VA
1176/// Execution Slots: SLOT0123
1177#[inline(always)]
1178#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1179#[cfg_attr(test, assert_instr(vaddb))]
1180#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1181pub unsafe fn q6_vb_vadd_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector {
1182    vaddb(vu, vv)
1183}
1184
1185/// `Vdd32.b=vadd(Vuu32.b,Vvv32.b)`
1186///
1187/// Instruction Type: CVI_VA_DV
1188/// Execution Slots: SLOT0123
1189#[inline(always)]
1190#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1191#[cfg_attr(test, assert_instr(vaddb_dv))]
1192#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1193pub unsafe fn q6_wb_vadd_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
1194    vaddb_dv(vuu, vvv)
1195}
1196
1197/// `Vd32.h=vadd(Vu32.h,Vv32.h)`
1198///
1199/// Instruction Type: CVI_VA
1200/// Execution Slots: SLOT0123
1201#[inline(always)]
1202#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1203#[cfg_attr(test, assert_instr(vaddh))]
1204#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1205pub unsafe fn q6_vh_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
1206    vaddh(vu, vv)
1207}
1208
1209/// `Vdd32.h=vadd(Vuu32.h,Vvv32.h)`
1210///
1211/// Instruction Type: CVI_VA_DV
1212/// Execution Slots: SLOT0123
1213#[inline(always)]
1214#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1215#[cfg_attr(test, assert_instr(vaddh_dv))]
1216#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1217pub unsafe fn q6_wh_vadd_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
1218    vaddh_dv(vuu, vvv)
1219}
1220
1221/// `Vd32.h=vadd(Vu32.h,Vv32.h):sat`
1222///
1223/// Instruction Type: CVI_VA
1224/// Execution Slots: SLOT0123
1225#[inline(always)]
1226#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1227#[cfg_attr(test, assert_instr(vaddhsat))]
1228#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1229pub unsafe fn q6_vh_vadd_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
1230    vaddhsat(vu, vv)
1231}
1232
1233/// `Vdd32.h=vadd(Vuu32.h,Vvv32.h):sat`
1234///
1235/// Instruction Type: CVI_VA_DV
1236/// Execution Slots: SLOT0123
1237#[inline(always)]
1238#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1239#[cfg_attr(test, assert_instr(vaddhsat_dv))]
1240#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1241pub unsafe fn q6_wh_vadd_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
1242    vaddhsat_dv(vuu, vvv)
1243}
1244
1245/// `Vdd32.w=vadd(Vu32.h,Vv32.h)`
1246///
1247/// Instruction Type: CVI_VX_DV
1248/// Execution Slots: SLOT23
1249#[inline(always)]
1250#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1251#[cfg_attr(test, assert_instr(vaddhw))]
1252#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1253pub unsafe fn q6_ww_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
1254    vaddhw(vu, vv)
1255}
1256
1257/// `Vdd32.h=vadd(Vu32.ub,Vv32.ub)`
1258///
1259/// Instruction Type: CVI_VX_DV
1260/// Execution Slots: SLOT23
1261#[inline(always)]
1262#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1263#[cfg_attr(test, assert_instr(vaddubh))]
1264#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1265pub unsafe fn q6_wh_vadd_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
1266    vaddubh(vu, vv)
1267}
1268
1269/// `Vd32.ub=vadd(Vu32.ub,Vv32.ub):sat`
1270///
1271/// Instruction Type: CVI_VA
1272/// Execution Slots: SLOT0123
1273#[inline(always)]
1274#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1275#[cfg_attr(test, assert_instr(vaddubsat))]
1276#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1277pub unsafe fn q6_vub_vadd_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
1278    vaddubsat(vu, vv)
1279}
1280
1281/// `Vdd32.ub=vadd(Vuu32.ub,Vvv32.ub):sat`
1282///
1283/// Instruction Type: CVI_VA_DV
1284/// Execution Slots: SLOT0123
1285#[inline(always)]
1286#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1287#[cfg_attr(test, assert_instr(vaddubsat_dv))]
1288#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1289pub unsafe fn q6_wub_vadd_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
1290    vaddubsat_dv(vuu, vvv)
1291}
1292
1293/// `Vd32.uh=vadd(Vu32.uh,Vv32.uh):sat`
1294///
1295/// Instruction Type: CVI_VA
1296/// Execution Slots: SLOT0123
1297#[inline(always)]
1298#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1299#[cfg_attr(test, assert_instr(vadduhsat))]
1300#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1301pub unsafe fn q6_vuh_vadd_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
1302    vadduhsat(vu, vv)
1303}
1304
1305/// `Vdd32.uh=vadd(Vuu32.uh,Vvv32.uh):sat`
1306///
1307/// Instruction Type: CVI_VA_DV
1308/// Execution Slots: SLOT0123
1309#[inline(always)]
1310#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1311#[cfg_attr(test, assert_instr(vadduhsat_dv))]
1312#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1313pub unsafe fn q6_wuh_vadd_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
1314    vadduhsat_dv(vuu, vvv)
1315}
1316
1317/// `Vdd32.w=vadd(Vu32.uh,Vv32.uh)`
1318///
1319/// Instruction Type: CVI_VX_DV
1320/// Execution Slots: SLOT23
1321#[inline(always)]
1322#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1323#[cfg_attr(test, assert_instr(vadduhw))]
1324#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1325pub unsafe fn q6_ww_vadd_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
1326    vadduhw(vu, vv)
1327}
1328
1329/// `Vd32.w=vadd(Vu32.w,Vv32.w)`
1330///
1331/// Instruction Type: CVI_VA
1332/// Execution Slots: SLOT0123
1333#[inline(always)]
1334#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1335#[cfg_attr(test, assert_instr(vaddw))]
1336#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1337pub unsafe fn q6_vw_vadd_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector {
1338    simd_add(vu, vv)
1339}
1340
1341/// `Vdd32.w=vadd(Vuu32.w,Vvv32.w)`
1342///
1343/// Instruction Type: CVI_VA_DV
1344/// Execution Slots: SLOT0123
1345#[inline(always)]
1346#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1347#[cfg_attr(test, assert_instr(vaddw_dv))]
1348#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1349pub unsafe fn q6_ww_vadd_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
1350    vaddw_dv(vuu, vvv)
1351}
1352
1353/// `Vd32.w=vadd(Vu32.w,Vv32.w):sat`
1354///
1355/// Instruction Type: CVI_VA
1356/// Execution Slots: SLOT0123
1357#[inline(always)]
1358#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1359#[cfg_attr(test, assert_instr(vaddwsat))]
1360#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1361pub unsafe fn q6_vw_vadd_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
1362    vaddwsat(vu, vv)
1363}
1364
1365/// `Vdd32.w=vadd(Vuu32.w,Vvv32.w):sat`
1366///
1367/// Instruction Type: CVI_VA_DV
1368/// Execution Slots: SLOT0123
1369#[inline(always)]
1370#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1371#[cfg_attr(test, assert_instr(vaddwsat_dv))]
1372#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1373pub unsafe fn q6_ww_vadd_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
1374    vaddwsat_dv(vuu, vvv)
1375}
1376
1377/// `Vd32=valign(Vu32,Vv32,Rt8)`
1378///
1379/// Instruction Type: CVI_VP
1380/// Execution Slots: SLOT0123
1381#[inline(always)]
1382#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1383#[cfg_attr(test, assert_instr(valignb))]
1384#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1385pub unsafe fn q6_v_valign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1386    valignb(vu, vv, rt)
1387}
1388
1389/// `Vd32=valign(Vu32,Vv32,#u3)`
1390///
1391/// Instruction Type: CVI_VP
1392/// Execution Slots: SLOT0123
1393#[inline(always)]
1394#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1395#[cfg_attr(test, assert_instr(valignbi))]
1396#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1397pub unsafe fn q6_v_valign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector {
1398    valignbi(vu, vv, iu3)
1399}
1400
1401/// `Vd32=vand(Vu32,Vv32)`
1402///
1403/// Instruction Type: CVI_VA
1404/// Execution Slots: SLOT0123
1405#[inline(always)]
1406#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1407#[cfg_attr(test, assert_instr(vand))]
1408#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1409pub unsafe fn q6_v_vand_vv(vu: HvxVector, vv: HvxVector) -> HvxVector {
1410    simd_and(vu, vv)
1411}
1412
1413/// `Vd32.h=vasl(Vu32.h,Rt32)`
1414///
1415/// Instruction Type: CVI_VS
1416/// Execution Slots: SLOT0123
1417#[inline(always)]
1418#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1419#[cfg_attr(test, assert_instr(vaslh))]
1420#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1421pub unsafe fn q6_vh_vasl_vhr(vu: HvxVector, rt: i32) -> HvxVector {
1422    vaslh(vu, rt)
1423}
1424
1425/// `Vd32.h=vasl(Vu32.h,Vv32.h)`
1426///
1427/// Instruction Type: CVI_VS
1428/// Execution Slots: SLOT0123
1429#[inline(always)]
1430#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1431#[cfg_attr(test, assert_instr(vaslhv))]
1432#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1433pub unsafe fn q6_vh_vasl_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
1434    vaslhv(vu, vv)
1435}
1436
1437/// `Vd32.w=vasl(Vu32.w,Rt32)`
1438///
1439/// Instruction Type: CVI_VS
1440/// Execution Slots: SLOT0123
1441#[inline(always)]
1442#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1443#[cfg_attr(test, assert_instr(vaslw))]
1444#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1445pub unsafe fn q6_vw_vasl_vwr(vu: HvxVector, rt: i32) -> HvxVector {
1446    vaslw(vu, rt)
1447}
1448
1449/// `Vx32.w+=vasl(Vu32.w,Rt32)`
1450///
1451/// Instruction Type: CVI_VS
1452/// Execution Slots: SLOT0123
1453#[inline(always)]
1454#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1455#[cfg_attr(test, assert_instr(vaslw_acc))]
1456#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1457pub unsafe fn q6_vw_vaslacc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
1458    vaslw_acc(vx, vu, rt)
1459}
1460
1461/// `Vd32.w=vasl(Vu32.w,Vv32.w)`
1462///
1463/// Instruction Type: CVI_VS
1464/// Execution Slots: SLOT0123
1465#[inline(always)]
1466#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1467#[cfg_attr(test, assert_instr(vaslwv))]
1468#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1469pub unsafe fn q6_vw_vasl_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector {
1470    vaslwv(vu, vv)
1471}
1472
1473/// `Vd32.h=vasr(Vu32.h,Rt32)`
1474///
1475/// Instruction Type: CVI_VS
1476/// Execution Slots: SLOT0123
1477#[inline(always)]
1478#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1479#[cfg_attr(test, assert_instr(vasrh))]
1480#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1481pub unsafe fn q6_vh_vasr_vhr(vu: HvxVector, rt: i32) -> HvxVector {
1482    vasrh(vu, rt)
1483}
1484
1485/// `Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat`
1486///
1487/// Instruction Type: CVI_VS
1488/// Execution Slots: SLOT0123
1489#[inline(always)]
1490#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1491#[cfg_attr(test, assert_instr(vasrhbrndsat))]
1492#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1493pub unsafe fn q6_vb_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1494    vasrhbrndsat(vu, vv, rt)
1495}
1496
1497/// `Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat`
1498///
1499/// Instruction Type: CVI_VS
1500/// Execution Slots: SLOT0123
1501#[inline(always)]
1502#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1503#[cfg_attr(test, assert_instr(vasrhubrndsat))]
1504#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1505pub unsafe fn q6_vub_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1506    vasrhubrndsat(vu, vv, rt)
1507}
1508
1509/// `Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):sat`
1510///
1511/// Instruction Type: CVI_VS
1512/// Execution Slots: SLOT0123
1513#[inline(always)]
1514#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1515#[cfg_attr(test, assert_instr(vasrhubsat))]
1516#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1517pub unsafe fn q6_vub_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1518    vasrhubsat(vu, vv, rt)
1519}
1520
1521/// `Vd32.h=vasr(Vu32.h,Vv32.h)`
1522///
1523/// Instruction Type: CVI_VS
1524/// Execution Slots: SLOT0123
1525#[inline(always)]
1526#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1527#[cfg_attr(test, assert_instr(vasrhv))]
1528#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1529pub unsafe fn q6_vh_vasr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
1530    vasrhv(vu, vv)
1531}
1532
1533/// `Vd32.w=vasr(Vu32.w,Rt32)`
1534///
1535/// Instruction Type: CVI_VS
1536/// Execution Slots: SLOT0123
1537#[inline(always)]
1538#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1539#[cfg_attr(test, assert_instr(vasrw))]
1540#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1541pub unsafe fn q6_vw_vasr_vwr(vu: HvxVector, rt: i32) -> HvxVector {
1542    vasrw(vu, rt)
1543}
1544
1545/// `Vx32.w+=vasr(Vu32.w,Rt32)`
1546///
1547/// Instruction Type: CVI_VS
1548/// Execution Slots: SLOT0123
1549#[inline(always)]
1550#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1551#[cfg_attr(test, assert_instr(vasrw_acc))]
1552#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1553pub unsafe fn q6_vw_vasracc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
1554    vasrw_acc(vx, vu, rt)
1555}
1556
1557/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8)`
1558///
1559/// Instruction Type: CVI_VS
1560/// Execution Slots: SLOT0123
1561#[inline(always)]
1562#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1563#[cfg_attr(test, assert_instr(vasrwh))]
1564#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1565pub unsafe fn q6_vh_vasr_vwvwr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1566    vasrwh(vu, vv, rt)
1567}
1568
1569/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat`
1570///
1571/// Instruction Type: CVI_VS
1572/// Execution Slots: SLOT0123
1573#[inline(always)]
1574#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1575#[cfg_attr(test, assert_instr(vasrwhrndsat))]
1576#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1577pub unsafe fn q6_vh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1578    vasrwhrndsat(vu, vv, rt)
1579}
1580
1581/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat`
1582///
1583/// Instruction Type: CVI_VS
1584/// Execution Slots: SLOT0123
1585#[inline(always)]
1586#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1587#[cfg_attr(test, assert_instr(vasrwhsat))]
1588#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1589pub unsafe fn q6_vh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1590    vasrwhsat(vu, vv, rt)
1591}
1592
1593/// `Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat`
1594///
1595/// Instruction Type: CVI_VS
1596/// Execution Slots: SLOT0123
1597#[inline(always)]
1598#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1599#[cfg_attr(test, assert_instr(vasrwuhsat))]
1600#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1601pub unsafe fn q6_vuh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1602    vasrwuhsat(vu, vv, rt)
1603}
1604
1605/// `Vd32.w=vasr(Vu32.w,Vv32.w)`
1606///
1607/// Instruction Type: CVI_VS
1608/// Execution Slots: SLOT0123
1609#[inline(always)]
1610#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1611#[cfg_attr(test, assert_instr(vasrwv))]
1612#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1613pub unsafe fn q6_vw_vasr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector {
1614    vasrwv(vu, vv)
1615}
1616
1617/// `Vd32=Vu32`
1618///
1619/// Instruction Type: CVI_VA
1620/// Execution Slots: SLOT0123
1621#[inline(always)]
1622#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1623#[cfg_attr(test, assert_instr(vassign))]
1624#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1625pub unsafe fn q6_v_equals_v(vu: HvxVector) -> HvxVector {
1626    vassign(vu)
1627}
1628
1629/// `Vdd32=Vuu32`
1630///
1631/// Instruction Type: CVI_VA_DV
1632/// Execution Slots: SLOT0123
1633#[inline(always)]
1634#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1635#[cfg_attr(test, assert_instr(vassignp))]
1636#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1637pub unsafe fn q6_w_equals_w(vuu: HvxVectorPair) -> HvxVectorPair {
1638    vassignp(vuu)
1639}
1640
1641/// `Vd32.h=vavg(Vu32.h,Vv32.h)`
1642///
1643/// Instruction Type: CVI_VA
1644/// Execution Slots: SLOT0123
1645#[inline(always)]
1646#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1647#[cfg_attr(test, assert_instr(vavgh))]
1648#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1649pub unsafe fn q6_vh_vavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
1650    vavgh(vu, vv)
1651}
1652
1653/// `Vd32.h=vavg(Vu32.h,Vv32.h):rnd`
1654///
1655/// Instruction Type: CVI_VA
1656/// Execution Slots: SLOT0123
1657#[inline(always)]
1658#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1659#[cfg_attr(test, assert_instr(vavghrnd))]
1660#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1661pub unsafe fn q6_vh_vavg_vhvh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector {
1662    vavghrnd(vu, vv)
1663}
1664
1665/// `Vd32.ub=vavg(Vu32.ub,Vv32.ub)`
1666///
1667/// Instruction Type: CVI_VA
1668/// Execution Slots: SLOT0123
1669#[inline(always)]
1670#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1671#[cfg_attr(test, assert_instr(vavgub))]
1672#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1673pub unsafe fn q6_vub_vavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector {
1674    vavgub(vu, vv)
1675}
1676
1677/// `Vd32.ub=vavg(Vu32.ub,Vv32.ub):rnd`
1678///
1679/// Instruction Type: CVI_VA
1680/// Execution Slots: SLOT0123
1681#[inline(always)]
1682#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1683#[cfg_attr(test, assert_instr(vavgubrnd))]
1684#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1685pub unsafe fn q6_vub_vavg_vubvub_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector {
1686    vavgubrnd(vu, vv)
1687}
1688
1689/// `Vd32.uh=vavg(Vu32.uh,Vv32.uh)`
1690///
1691/// Instruction Type: CVI_VA
1692/// Execution Slots: SLOT0123
1693#[inline(always)]
1694#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1695#[cfg_attr(test, assert_instr(vavguh))]
1696#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1697pub unsafe fn q6_vuh_vavg_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector {
1698    vavguh(vu, vv)
1699}
1700
1701/// `Vd32.uh=vavg(Vu32.uh,Vv32.uh):rnd`
1702///
1703/// Instruction Type: CVI_VA
1704/// Execution Slots: SLOT0123
1705#[inline(always)]
1706#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1707#[cfg_attr(test, assert_instr(vavguhrnd))]
1708#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1709pub unsafe fn q6_vuh_vavg_vuhvuh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector {
1710    vavguhrnd(vu, vv)
1711}
1712
1713/// `Vd32.w=vavg(Vu32.w,Vv32.w)`
1714///
1715/// Instruction Type: CVI_VA
1716/// Execution Slots: SLOT0123
1717#[inline(always)]
1718#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1719#[cfg_attr(test, assert_instr(vavgw))]
1720#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1721pub unsafe fn q6_vw_vavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector {
1722    vavgw(vu, vv)
1723}
1724
1725/// `Vd32.w=vavg(Vu32.w,Vv32.w):rnd`
1726///
1727/// Instruction Type: CVI_VA
1728/// Execution Slots: SLOT0123
1729#[inline(always)]
1730#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1731#[cfg_attr(test, assert_instr(vavgwrnd))]
1732#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1733pub unsafe fn q6_vw_vavg_vwvw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector {
1734    vavgwrnd(vu, vv)
1735}
1736
1737/// `Vd32.uh=vcl0(Vu32.uh)`
1738///
1739/// Instruction Type: CVI_VS
1740/// Execution Slots: SLOT0123
1741#[inline(always)]
1742#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1743#[cfg_attr(test, assert_instr(vcl0h))]
1744#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1745pub unsafe fn q6_vuh_vcl0_vuh(vu: HvxVector) -> HvxVector {
1746    vcl0h(vu)
1747}
1748
1749/// `Vd32.uw=vcl0(Vu32.uw)`
1750///
1751/// Instruction Type: CVI_VS
1752/// Execution Slots: SLOT0123
1753#[inline(always)]
1754#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1755#[cfg_attr(test, assert_instr(vcl0w))]
1756#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1757pub unsafe fn q6_vuw_vcl0_vuw(vu: HvxVector) -> HvxVector {
1758    vcl0w(vu)
1759}
1760
1761/// `Vdd32=vcombine(Vu32,Vv32)`
1762///
1763/// Instruction Type: CVI_VA_DV
1764/// Execution Slots: SLOT0123
1765#[inline(always)]
1766#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1767#[cfg_attr(test, assert_instr(vcombine))]
1768#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1769pub unsafe fn q6_w_vcombine_vv(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
1770    vcombine(vu, vv)
1771}
1772
1773/// `Vd32=#0`
1774///
1775/// Instruction Type: CVI_VA
1776/// Execution Slots: SLOT0123
1777#[inline(always)]
1778#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1779#[cfg_attr(test, assert_instr(vd0))]
1780#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1781pub unsafe fn q6_v_vzero() -> HvxVector {
1782    vd0()
1783}
1784
1785/// `Vd32.b=vdeal(Vu32.b)`
1786///
1787/// Instruction Type: CVI_VP
1788/// Execution Slots: SLOT0123
1789#[inline(always)]
1790#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1791#[cfg_attr(test, assert_instr(vdealb))]
1792#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1793pub unsafe fn q6_vb_vdeal_vb(vu: HvxVector) -> HvxVector {
1794    vdealb(vu)
1795}
1796
1797/// `Vd32.b=vdeale(Vu32.b,Vv32.b)`
1798///
1799/// Instruction Type: CVI_VP
1800/// Execution Slots: SLOT0123
1801#[inline(always)]
1802#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1803#[cfg_attr(test, assert_instr(vdealb4w))]
1804#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1805pub unsafe fn q6_vb_vdeale_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector {
1806    vdealb4w(vu, vv)
1807}
1808
1809/// `Vd32.h=vdeal(Vu32.h)`
1810///
1811/// Instruction Type: CVI_VP
1812/// Execution Slots: SLOT0123
1813#[inline(always)]
1814#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1815#[cfg_attr(test, assert_instr(vdealh))]
1816#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1817pub unsafe fn q6_vh_vdeal_vh(vu: HvxVector) -> HvxVector {
1818    vdealh(vu)
1819}
1820
1821/// `Vdd32=vdeal(Vu32,Vv32,Rt8)`
1822///
1823/// Instruction Type: CVI_VP_VS
1824/// Execution Slots: SLOT0123
1825#[inline(always)]
1826#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1827#[cfg_attr(test, assert_instr(vdealvdd))]
1828#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1829pub unsafe fn q6_w_vdeal_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair {
1830    vdealvdd(vu, vv, rt)
1831}
1832
1833/// `Vd32=vdelta(Vu32,Vv32)`
1834///
1835/// Instruction Type: CVI_VP
1836/// Execution Slots: SLOT0123
1837#[inline(always)]
1838#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1839#[cfg_attr(test, assert_instr(vdelta))]
1840#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1841pub unsafe fn q6_v_vdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector {
1842    vdelta(vu, vv)
1843}
1844
1845/// `Vd32.h=vdmpy(Vu32.ub,Rt32.b)`
1846///
1847/// Instruction Type: CVI_VX
1848/// Execution Slots: SLOT23
1849#[inline(always)]
1850#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1851#[cfg_attr(test, assert_instr(vdmpybus))]
1852#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1853pub unsafe fn q6_vh_vdmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector {
1854    vdmpybus(vu, rt)
1855}
1856
1857/// `Vx32.h+=vdmpy(Vu32.ub,Rt32.b)`
1858///
1859/// Instruction Type: CVI_VX
1860/// Execution Slots: SLOT23
1861#[inline(always)]
1862#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1863#[cfg_attr(test, assert_instr(vdmpybus_acc))]
1864#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1865pub unsafe fn q6_vh_vdmpyacc_vhvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
1866    vdmpybus_acc(vx, vu, rt)
1867}
1868
1869/// `Vdd32.h=vdmpy(Vuu32.ub,Rt32.b)`
1870///
1871/// Instruction Type: CVI_VX_DV
1872/// Execution Slots: SLOT23
1873#[inline(always)]
1874#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1875#[cfg_attr(test, assert_instr(vdmpybus_dv))]
1876#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1877pub unsafe fn q6_wh_vdmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
1878    vdmpybus_dv(vuu, rt)
1879}
1880
1881/// `Vxx32.h+=vdmpy(Vuu32.ub,Rt32.b)`
1882///
1883/// Instruction Type: CVI_VX_DV
1884/// Execution Slots: SLOT23
1885#[inline(always)]
1886#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1887#[cfg_attr(test, assert_instr(vdmpybus_dv_acc))]
1888#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1889pub unsafe fn q6_wh_vdmpyacc_whwubrb(
1890    vxx: HvxVectorPair,
1891    vuu: HvxVectorPair,
1892    rt: i32,
1893) -> HvxVectorPair {
1894    vdmpybus_dv_acc(vxx, vuu, rt)
1895}
1896
1897/// `Vd32.w=vdmpy(Vu32.h,Rt32.b)`
1898///
1899/// Instruction Type: CVI_VX
1900/// Execution Slots: SLOT23
1901#[inline(always)]
1902#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1903#[cfg_attr(test, assert_instr(vdmpyhb))]
1904#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1905pub unsafe fn q6_vw_vdmpy_vhrb(vu: HvxVector, rt: i32) -> HvxVector {
1906    vdmpyhb(vu, rt)
1907}
1908
1909/// `Vx32.w+=vdmpy(Vu32.h,Rt32.b)`
1910///
1911/// Instruction Type: CVI_VX
1912/// Execution Slots: SLOT23
1913#[inline(always)]
1914#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1915#[cfg_attr(test, assert_instr(vdmpyhb_acc))]
1916#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1917pub unsafe fn q6_vw_vdmpyacc_vwvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
1918    vdmpyhb_acc(vx, vu, rt)
1919}
1920
1921/// `Vdd32.w=vdmpy(Vuu32.h,Rt32.b)`
1922///
1923/// Instruction Type: CVI_VX_DV
1924/// Execution Slots: SLOT23
1925#[inline(always)]
1926#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1927#[cfg_attr(test, assert_instr(vdmpyhb_dv))]
1928#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1929pub unsafe fn q6_ww_vdmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
1930    vdmpyhb_dv(vuu, rt)
1931}
1932
1933/// `Vxx32.w+=vdmpy(Vuu32.h,Rt32.b)`
1934///
1935/// Instruction Type: CVI_VX_DV
1936/// Execution Slots: SLOT23
1937#[inline(always)]
1938#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1939#[cfg_attr(test, assert_instr(vdmpyhb_dv_acc))]
1940#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1941pub unsafe fn q6_ww_vdmpyacc_wwwhrb(
1942    vxx: HvxVectorPair,
1943    vuu: HvxVectorPair,
1944    rt: i32,
1945) -> HvxVectorPair {
1946    vdmpyhb_dv_acc(vxx, vuu, rt)
1947}
1948
1949/// `Vd32.w=vdmpy(Vuu32.h,Rt32.h):sat`
1950///
1951/// Instruction Type: CVI_VX_DV
1952/// Execution Slots: SLOT23
1953#[inline(always)]
1954#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1955#[cfg_attr(test, assert_instr(vdmpyhisat))]
1956#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1957pub unsafe fn q6_vw_vdmpy_whrh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector {
1958    vdmpyhisat(vuu, rt)
1959}
1960
1961/// `Vx32.w+=vdmpy(Vuu32.h,Rt32.h):sat`
1962///
1963/// Instruction Type: CVI_VX_DV
1964/// Execution Slots: SLOT23
1965#[inline(always)]
1966#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1967#[cfg_attr(test, assert_instr(vdmpyhisat_acc))]
1968#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1969pub unsafe fn q6_vw_vdmpyacc_vwwhrh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector {
1970    vdmpyhisat_acc(vx, vuu, rt)
1971}
1972
1973/// `Vd32.w=vdmpy(Vu32.h,Rt32.h):sat`
1974///
1975/// Instruction Type: CVI_VX
1976/// Execution Slots: SLOT23
1977#[inline(always)]
1978#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1979#[cfg_attr(test, assert_instr(vdmpyhsat))]
1980#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1981pub unsafe fn q6_vw_vdmpy_vhrh_sat(vu: HvxVector, rt: i32) -> HvxVector {
1982    vdmpyhsat(vu, rt)
1983}
1984
1985/// `Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat`
1986///
1987/// Instruction Type: CVI_VX
1988/// Execution Slots: SLOT23
1989#[inline(always)]
1990#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1991#[cfg_attr(test, assert_instr(vdmpyhsat_acc))]
1992#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1993pub unsafe fn q6_vw_vdmpyacc_vwvhrh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
1994    vdmpyhsat_acc(vx, vu, rt)
1995}
1996
1997/// `Vd32.w=vdmpy(Vuu32.h,Rt32.uh,#1):sat`
1998///
1999/// Instruction Type: CVI_VX_DV
2000/// Execution Slots: SLOT23
2001#[inline(always)]
2002#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2003#[cfg_attr(test, assert_instr(vdmpyhsuisat))]
2004#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2005pub unsafe fn q6_vw_vdmpy_whruh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector {
2006    vdmpyhsuisat(vuu, rt)
2007}
2008
2009/// `Vx32.w+=vdmpy(Vuu32.h,Rt32.uh,#1):sat`
2010///
2011/// Instruction Type: CVI_VX_DV
2012/// Execution Slots: SLOT23
2013#[inline(always)]
2014#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2015#[cfg_attr(test, assert_instr(vdmpyhsuisat_acc))]
2016#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2017pub unsafe fn q6_vw_vdmpyacc_vwwhruh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector {
2018    vdmpyhsuisat_acc(vx, vuu, rt)
2019}
2020
2021/// `Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat`
2022///
2023/// Instruction Type: CVI_VX
2024/// Execution Slots: SLOT23
2025#[inline(always)]
2026#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2027#[cfg_attr(test, assert_instr(vdmpyhsusat))]
2028#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2029pub unsafe fn q6_vw_vdmpy_vhruh_sat(vu: HvxVector, rt: i32) -> HvxVector {
2030    vdmpyhsusat(vu, rt)
2031}
2032
2033/// `Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat`
2034///
2035/// Instruction Type: CVI_VX
2036/// Execution Slots: SLOT23
2037#[inline(always)]
2038#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2039#[cfg_attr(test, assert_instr(vdmpyhsusat_acc))]
2040#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2041pub unsafe fn q6_vw_vdmpyacc_vwvhruh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
2042    vdmpyhsusat_acc(vx, vu, rt)
2043}
2044
2045/// `Vd32.w=vdmpy(Vu32.h,Vv32.h):sat`
2046///
2047/// Instruction Type: CVI_VX
2048/// Execution Slots: SLOT23
2049#[inline(always)]
2050#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2051#[cfg_attr(test, assert_instr(vdmpyhvsat))]
2052#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2053pub unsafe fn q6_vw_vdmpy_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
2054    vdmpyhvsat(vu, vv)
2055}
2056
2057/// `Vx32.w+=vdmpy(Vu32.h,Vv32.h):sat`
2058///
2059/// Instruction Type: CVI_VX_DV
2060/// Execution Slots: SLOT23
2061#[inline(always)]
2062#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2063#[cfg_attr(test, assert_instr(vdmpyhvsat_acc))]
2064#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2065pub unsafe fn q6_vw_vdmpyacc_vwvhvh_sat(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
2066    vdmpyhvsat_acc(vx, vu, vv)
2067}
2068
2069/// `Vdd32.uw=vdsad(Vuu32.uh,Rt32.uh)`
2070///
2071/// Instruction Type: CVI_VX_DV
2072/// Execution Slots: SLOT23
2073#[inline(always)]
2074#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2075#[cfg_attr(test, assert_instr(vdsaduh))]
2076#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2077pub unsafe fn q6_wuw_vdsad_wuhruh(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
2078    vdsaduh(vuu, rt)
2079}
2080
2081/// `Vxx32.uw+=vdsad(Vuu32.uh,Rt32.uh)`
2082///
2083/// Instruction Type: CVI_VX_DV
2084/// Execution Slots: SLOT23
2085#[inline(always)]
2086#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2087#[cfg_attr(test, assert_instr(vdsaduh_acc))]
2088#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2089pub unsafe fn q6_wuw_vdsadacc_wuwwuhruh(
2090    vxx: HvxVectorPair,
2091    vuu: HvxVectorPair,
2092    rt: i32,
2093) -> HvxVectorPair {
2094    vdsaduh_acc(vxx, vuu, rt)
2095}
2096
2097/// `Vx32.w=vinsert(Rt32)`
2098///
2099/// Instruction Type: CVI_VX_LATE
2100/// Execution Slots: SLOT23
2101#[inline(always)]
2102#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2103#[cfg_attr(test, assert_instr(vinsertwr))]
2104#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2105pub unsafe fn q6_vw_vinsert_vwr(vx: HvxVector, rt: i32) -> HvxVector {
2106    vinsertwr(vx, rt)
2107}
2108
2109/// `Vd32=vlalign(Vu32,Vv32,Rt8)`
2110///
2111/// Instruction Type: CVI_VP
2112/// Execution Slots: SLOT0123
2113#[inline(always)]
2114#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2115#[cfg_attr(test, assert_instr(vlalignb))]
2116#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2117pub unsafe fn q6_v_vlalign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
2118    vlalignb(vu, vv, rt)
2119}
2120
2121/// `Vd32=vlalign(Vu32,Vv32,#u3)`
2122///
2123/// Instruction Type: CVI_VP
2124/// Execution Slots: SLOT0123
2125#[inline(always)]
2126#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2127#[cfg_attr(test, assert_instr(vlalignbi))]
2128#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2129pub unsafe fn q6_v_vlalign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector {
2130    vlalignbi(vu, vv, iu3)
2131}
2132
2133/// `Vd32.uh=vlsr(Vu32.uh,Rt32)`
2134///
2135/// Instruction Type: CVI_VS
2136/// Execution Slots: SLOT0123
2137#[inline(always)]
2138#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2139#[cfg_attr(test, assert_instr(vlsrh))]
2140#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2141pub unsafe fn q6_vuh_vlsr_vuhr(vu: HvxVector, rt: i32) -> HvxVector {
2142    vlsrh(vu, rt)
2143}
2144
2145/// `Vd32.h=vlsr(Vu32.h,Vv32.h)`
2146///
2147/// Instruction Type: CVI_VS
2148/// Execution Slots: SLOT0123
2149#[inline(always)]
2150#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2151#[cfg_attr(test, assert_instr(vlsrhv))]
2152#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2153pub unsafe fn q6_vh_vlsr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2154    vlsrhv(vu, vv)
2155}
2156
2157/// `Vd32.uw=vlsr(Vu32.uw,Rt32)`
2158///
2159/// Instruction Type: CVI_VS
2160/// Execution Slots: SLOT0123
2161#[inline(always)]
2162#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2163#[cfg_attr(test, assert_instr(vlsrw))]
2164#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2165pub unsafe fn q6_vuw_vlsr_vuwr(vu: HvxVector, rt: i32) -> HvxVector {
2166    vlsrw(vu, rt)
2167}
2168
2169/// `Vd32.w=vlsr(Vu32.w,Vv32.w)`
2170///
2171/// Instruction Type: CVI_VS
2172/// Execution Slots: SLOT0123
2173#[inline(always)]
2174#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2175#[cfg_attr(test, assert_instr(vlsrwv))]
2176#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2177pub unsafe fn q6_vw_vlsr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector {
2178    vlsrwv(vu, vv)
2179}
2180
2181/// `Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8)`
2182///
2183/// Instruction Type: CVI_VP
2184/// Execution Slots: SLOT0123
2185#[inline(always)]
2186#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2187#[cfg_attr(test, assert_instr(vlutvvb))]
2188#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2189pub unsafe fn q6_vb_vlut32_vbvbr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
2190    vlutvvb(vu, vv, rt)
2191}
2192
2193/// `Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8)`
2194///
2195/// Instruction Type: CVI_VP_VS
2196/// Execution Slots: SLOT0123
2197#[inline(always)]
2198#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2199#[cfg_attr(test, assert_instr(vlutvvb_oracc))]
2200#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2201pub unsafe fn q6_vb_vlut32or_vbvbvbr(
2202    vx: HvxVector,
2203    vu: HvxVector,
2204    vv: HvxVector,
2205    rt: i32,
2206) -> HvxVector {
2207    vlutvvb_oracc(vx, vu, vv, rt)
2208}
2209
2210/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8)`
2211///
2212/// Instruction Type: CVI_VP_VS
2213/// Execution Slots: SLOT0123
2214#[inline(always)]
2215#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2216#[cfg_attr(test, assert_instr(vlutvwh))]
2217#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2218pub unsafe fn q6_wh_vlut16_vbvhr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair {
2219    vlutvwh(vu, vv, rt)
2220}
2221
2222/// `Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8)`
2223///
2224/// Instruction Type: CVI_VP_VS
2225/// Execution Slots: SLOT0123
2226#[inline(always)]
2227#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2228#[cfg_attr(test, assert_instr(vlutvwh_oracc))]
2229#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2230pub unsafe fn q6_wh_vlut16or_whvbvhr(
2231    vxx: HvxVectorPair,
2232    vu: HvxVector,
2233    vv: HvxVector,
2234    rt: i32,
2235) -> HvxVectorPair {
2236    vlutvwh_oracc(vxx, vu, vv, rt)
2237}
2238
2239/// `Vd32.h=vmax(Vu32.h,Vv32.h)`
2240///
2241/// Instruction Type: CVI_VA
2242/// Execution Slots: SLOT0123
2243#[inline(always)]
2244#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2245#[cfg_attr(test, assert_instr(vmaxh))]
2246#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2247pub unsafe fn q6_vh_vmax_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2248    vmaxh(vu, vv)
2249}
2250
2251/// `Vd32.ub=vmax(Vu32.ub,Vv32.ub)`
2252///
2253/// Instruction Type: CVI_VA
2254/// Execution Slots: SLOT0123
2255#[inline(always)]
2256#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2257#[cfg_attr(test, assert_instr(vmaxub))]
2258#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2259pub unsafe fn q6_vub_vmax_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector {
2260    vmaxub(vu, vv)
2261}
2262
2263/// `Vd32.uh=vmax(Vu32.uh,Vv32.uh)`
2264///
2265/// Instruction Type: CVI_VA
2266/// Execution Slots: SLOT0123
2267#[inline(always)]
2268#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2269#[cfg_attr(test, assert_instr(vmaxuh))]
2270#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2271pub unsafe fn q6_vuh_vmax_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2272    vmaxuh(vu, vv)
2273}
2274
2275/// `Vd32.w=vmax(Vu32.w,Vv32.w)`
2276///
2277/// Instruction Type: CVI_VA
2278/// Execution Slots: SLOT0123
2279#[inline(always)]
2280#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2281#[cfg_attr(test, assert_instr(vmaxw))]
2282#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2283pub unsafe fn q6_vw_vmax_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector {
2284    vmaxw(vu, vv)
2285}
2286
2287/// `Vd32.h=vmin(Vu32.h,Vv32.h)`
2288///
2289/// Instruction Type: CVI_VA
2290/// Execution Slots: SLOT0123
2291#[inline(always)]
2292#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2293#[cfg_attr(test, assert_instr(vminh))]
2294#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2295pub unsafe fn q6_vh_vmin_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2296    vminh(vu, vv)
2297}
2298
2299/// `Vd32.ub=vmin(Vu32.ub,Vv32.ub)`
2300///
2301/// Instruction Type: CVI_VA
2302/// Execution Slots: SLOT0123
2303#[inline(always)]
2304#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2305#[cfg_attr(test, assert_instr(vminub))]
2306#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2307pub unsafe fn q6_vub_vmin_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector {
2308    vminub(vu, vv)
2309}
2310
2311/// `Vd32.uh=vmin(Vu32.uh,Vv32.uh)`
2312///
2313/// Instruction Type: CVI_VA
2314/// Execution Slots: SLOT0123
2315#[inline(always)]
2316#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2317#[cfg_attr(test, assert_instr(vminuh))]
2318#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2319pub unsafe fn q6_vuh_vmin_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2320    vminuh(vu, vv)
2321}
2322
2323/// `Vd32.w=vmin(Vu32.w,Vv32.w)`
2324///
2325/// Instruction Type: CVI_VA
2326/// Execution Slots: SLOT0123
2327#[inline(always)]
2328#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2329#[cfg_attr(test, assert_instr(vminw))]
2330#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2331pub unsafe fn q6_vw_vmin_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector {
2332    vminw(vu, vv)
2333}
2334
2335/// `Vdd32.h=vmpa(Vuu32.ub,Rt32.b)`
2336///
2337/// Instruction Type: CVI_VX_DV
2338/// Execution Slots: SLOT23
2339#[inline(always)]
2340#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2341#[cfg_attr(test, assert_instr(vmpabus))]
2342#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2343pub unsafe fn q6_wh_vmpa_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
2344    vmpabus(vuu, rt)
2345}
2346
2347/// `Vxx32.h+=vmpa(Vuu32.ub,Rt32.b)`
2348///
2349/// Instruction Type: CVI_VX_DV
2350/// Execution Slots: SLOT23
2351#[inline(always)]
2352#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2353#[cfg_attr(test, assert_instr(vmpabus_acc))]
2354#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2355pub unsafe fn q6_wh_vmpaacc_whwubrb(
2356    vxx: HvxVectorPair,
2357    vuu: HvxVectorPair,
2358    rt: i32,
2359) -> HvxVectorPair {
2360    vmpabus_acc(vxx, vuu, rt)
2361}
2362
2363/// `Vdd32.h=vmpa(Vuu32.ub,Vvv32.b)`
2364///
2365/// Instruction Type: CVI_VX_DV
2366/// Execution Slots: SLOT23
2367#[inline(always)]
2368#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2369#[cfg_attr(test, assert_instr(vmpabusv))]
2370#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2371pub unsafe fn q6_wh_vmpa_wubwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
2372    vmpabusv(vuu, vvv)
2373}
2374
2375/// `Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub)`
2376///
2377/// Instruction Type: CVI_VX_DV
2378/// Execution Slots: SLOT23
2379#[inline(always)]
2380#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2381#[cfg_attr(test, assert_instr(vmpabuuv))]
2382#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2383pub unsafe fn q6_wh_vmpa_wubwub(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
2384    vmpabuuv(vuu, vvv)
2385}
2386
2387/// `Vdd32.w=vmpa(Vuu32.h,Rt32.b)`
2388///
2389/// Instruction Type: CVI_VX_DV
2390/// Execution Slots: SLOT23
2391#[inline(always)]
2392#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2393#[cfg_attr(test, assert_instr(vmpahb))]
2394#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2395pub unsafe fn q6_ww_vmpa_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
2396    vmpahb(vuu, rt)
2397}
2398
2399/// `Vxx32.w+=vmpa(Vuu32.h,Rt32.b)`
2400///
2401/// Instruction Type: CVI_VX_DV
2402/// Execution Slots: SLOT23
2403#[inline(always)]
2404#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2405#[cfg_attr(test, assert_instr(vmpahb_acc))]
2406#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2407pub unsafe fn q6_ww_vmpaacc_wwwhrb(
2408    vxx: HvxVectorPair,
2409    vuu: HvxVectorPair,
2410    rt: i32,
2411) -> HvxVectorPair {
2412    vmpahb_acc(vxx, vuu, rt)
2413}
2414
2415/// `Vdd32.h=vmpy(Vu32.ub,Rt32.b)`
2416///
2417/// Instruction Type: CVI_VX_DV
2418/// Execution Slots: SLOT23
2419#[inline(always)]
2420#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2421#[cfg_attr(test, assert_instr(vmpybus))]
2422#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2423pub unsafe fn q6_wh_vmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVectorPair {
2424    vmpybus(vu, rt)
2425}
2426
2427/// `Vxx32.h+=vmpy(Vu32.ub,Rt32.b)`
2428///
2429/// Instruction Type: CVI_VX_DV
2430/// Execution Slots: SLOT23
2431#[inline(always)]
2432#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2433#[cfg_attr(test, assert_instr(vmpybus_acc))]
2434#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2435pub unsafe fn q6_wh_vmpyacc_whvubrb(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair {
2436    vmpybus_acc(vxx, vu, rt)
2437}
2438
2439/// `Vdd32.h=vmpy(Vu32.ub,Vv32.b)`
2440///
2441/// Instruction Type: CVI_VX_DV
2442/// Execution Slots: SLOT23
2443#[inline(always)]
2444#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2445#[cfg_attr(test, assert_instr(vmpybusv))]
2446#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2447pub unsafe fn q6_wh_vmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
2448    vmpybusv(vu, vv)
2449}
2450
2451/// `Vxx32.h+=vmpy(Vu32.ub,Vv32.b)`
2452///
2453/// Instruction Type: CVI_VX_DV
2454/// Execution Slots: SLOT23
2455#[inline(always)]
2456#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2457#[cfg_attr(test, assert_instr(vmpybusv_acc))]
2458#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2459pub unsafe fn q6_wh_vmpyacc_whvubvb(
2460    vxx: HvxVectorPair,
2461    vu: HvxVector,
2462    vv: HvxVector,
2463) -> HvxVectorPair {
2464    vmpybusv_acc(vxx, vu, vv)
2465}
2466
2467/// `Vdd32.h=vmpy(Vu32.b,Vv32.b)`
2468///
2469/// Instruction Type: CVI_VX_DV
2470/// Execution Slots: SLOT23
2471#[inline(always)]
2472#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2473#[cfg_attr(test, assert_instr(vmpybv))]
2474#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2475pub unsafe fn q6_wh_vmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
2476    vmpybv(vu, vv)
2477}
2478
2479/// `Vxx32.h+=vmpy(Vu32.b,Vv32.b)`
2480///
2481/// Instruction Type: CVI_VX_DV
2482/// Execution Slots: SLOT23
2483#[inline(always)]
2484#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2485#[cfg_attr(test, assert_instr(vmpybv_acc))]
2486#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2487pub unsafe fn q6_wh_vmpyacc_whvbvb(
2488    vxx: HvxVectorPair,
2489    vu: HvxVector,
2490    vv: HvxVector,
2491) -> HvxVectorPair {
2492    vmpybv_acc(vxx, vu, vv)
2493}
2494
2495/// `Vd32.w=vmpye(Vu32.w,Vv32.uh)`
2496///
2497/// Instruction Type: CVI_VX_DV
2498/// Execution Slots: SLOT23
2499#[inline(always)]
2500#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2501#[cfg_attr(test, assert_instr(vmpyewuh))]
2502#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2503pub unsafe fn q6_vw_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2504    vmpyewuh(vu, vv)
2505}
2506
2507/// `Vdd32.w=vmpy(Vu32.h,Rt32.h)`
2508///
2509/// Instruction Type: CVI_VX_DV
2510/// Execution Slots: SLOT23
2511#[inline(always)]
2512#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2513#[cfg_attr(test, assert_instr(vmpyh))]
2514#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2515pub unsafe fn q6_ww_vmpy_vhrh(vu: HvxVector, rt: i32) -> HvxVectorPair {
2516    vmpyh(vu, rt)
2517}
2518
2519/// `Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat`
2520///
2521/// Instruction Type: CVI_VX_DV
2522/// Execution Slots: SLOT23
2523#[inline(always)]
2524#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2525#[cfg_attr(test, assert_instr(vmpyhsat_acc))]
2526#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2527pub unsafe fn q6_ww_vmpyacc_wwvhrh_sat(
2528    vxx: HvxVectorPair,
2529    vu: HvxVector,
2530    rt: i32,
2531) -> HvxVectorPair {
2532    vmpyhsat_acc(vxx, vu, rt)
2533}
2534
2535/// `Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat`
2536///
2537/// Instruction Type: CVI_VX
2538/// Execution Slots: SLOT23
2539#[inline(always)]
2540#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2541#[cfg_attr(test, assert_instr(vmpyhsrs))]
2542#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2543pub unsafe fn q6_vh_vmpy_vhrh_s1_rnd_sat(vu: HvxVector, rt: i32) -> HvxVector {
2544    vmpyhsrs(vu, rt)
2545}
2546
2547/// `Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat`
2548///
2549/// Instruction Type: CVI_VX
2550/// Execution Slots: SLOT23
2551#[inline(always)]
2552#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2553#[cfg_attr(test, assert_instr(vmpyhss))]
2554#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2555pub unsafe fn q6_vh_vmpy_vhrh_s1_sat(vu: HvxVector, rt: i32) -> HvxVector {
2556    vmpyhss(vu, rt)
2557}
2558
2559/// `Vdd32.w=vmpy(Vu32.h,Vv32.uh)`
2560///
2561/// Instruction Type: CVI_VX_DV
2562/// Execution Slots: SLOT23
2563#[inline(always)]
2564#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2565#[cfg_attr(test, assert_instr(vmpyhus))]
2566#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2567pub unsafe fn q6_ww_vmpy_vhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
2568    vmpyhus(vu, vv)
2569}
2570
2571/// `Vxx32.w+=vmpy(Vu32.h,Vv32.uh)`
2572///
2573/// Instruction Type: CVI_VX_DV
2574/// Execution Slots: SLOT23
2575#[inline(always)]
2576#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2577#[cfg_attr(test, assert_instr(vmpyhus_acc))]
2578#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2579pub unsafe fn q6_ww_vmpyacc_wwvhvuh(
2580    vxx: HvxVectorPair,
2581    vu: HvxVector,
2582    vv: HvxVector,
2583) -> HvxVectorPair {
2584    vmpyhus_acc(vxx, vu, vv)
2585}
2586
2587/// `Vdd32.w=vmpy(Vu32.h,Vv32.h)`
2588///
2589/// Instruction Type: CVI_VX_DV
2590/// Execution Slots: SLOT23
2591#[inline(always)]
2592#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2593#[cfg_attr(test, assert_instr(vmpyhv))]
2594#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2595pub unsafe fn q6_ww_vmpy_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
2596    vmpyhv(vu, vv)
2597}
2598
2599/// `Vxx32.w+=vmpy(Vu32.h,Vv32.h)`
2600///
2601/// Instruction Type: CVI_VX_DV
2602/// Execution Slots: SLOT23
2603#[inline(always)]
2604#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2605#[cfg_attr(test, assert_instr(vmpyhv_acc))]
2606#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2607pub unsafe fn q6_ww_vmpyacc_wwvhvh(
2608    vxx: HvxVectorPair,
2609    vu: HvxVector,
2610    vv: HvxVector,
2611) -> HvxVectorPair {
2612    vmpyhv_acc(vxx, vu, vv)
2613}
2614
2615/// `Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat`
2616///
2617/// Instruction Type: CVI_VX
2618/// Execution Slots: SLOT23
2619#[inline(always)]
2620#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2621#[cfg_attr(test, assert_instr(vmpyhvsrs))]
2622#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2623pub unsafe fn q6_vh_vmpy_vhvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
2624    vmpyhvsrs(vu, vv)
2625}
2626
2627/// `Vd32.w=vmpyieo(Vu32.h,Vv32.h)`
2628///
2629/// Instruction Type: CVI_VX
2630/// Execution Slots: SLOT23
2631#[inline(always)]
2632#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2633#[cfg_attr(test, assert_instr(vmpyieoh))]
2634#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2635pub unsafe fn q6_vw_vmpyieo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2636    vmpyieoh(vu, vv)
2637}
2638
2639/// `Vx32.w+=vmpyie(Vu32.w,Vv32.h)`
2640///
2641/// Instruction Type: CVI_VX_DV
2642/// Execution Slots: SLOT23
2643#[inline(always)]
2644#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2645#[cfg_attr(test, assert_instr(vmpyiewh_acc))]
2646#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2647pub unsafe fn q6_vw_vmpyieacc_vwvwvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
2648    vmpyiewh_acc(vx, vu, vv)
2649}
2650
2651/// `Vd32.w=vmpyie(Vu32.w,Vv32.uh)`
2652///
2653/// Instruction Type: CVI_VX_DV
2654/// Execution Slots: SLOT23
2655#[inline(always)]
2656#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2657#[cfg_attr(test, assert_instr(vmpyiewuh))]
2658#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2659pub unsafe fn q6_vw_vmpyie_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2660    vmpyiewuh(vu, vv)
2661}
2662
2663/// `Vx32.w+=vmpyie(Vu32.w,Vv32.uh)`
2664///
2665/// Instruction Type: CVI_VX_DV
2666/// Execution Slots: SLOT23
2667#[inline(always)]
2668#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2669#[cfg_attr(test, assert_instr(vmpyiewuh_acc))]
2670#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2671pub unsafe fn q6_vw_vmpyieacc_vwvwvuh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
2672    vmpyiewuh_acc(vx, vu, vv)
2673}
2674
2675/// `Vd32.h=vmpyi(Vu32.h,Vv32.h)`
2676///
2677/// Instruction Type: CVI_VX_DV
2678/// Execution Slots: SLOT23
2679#[inline(always)]
2680#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2681#[cfg_attr(test, assert_instr(vmpyih))]
2682#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2683pub unsafe fn q6_vh_vmpyi_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2684    vmpyih(vu, vv)
2685}
2686
2687/// `Vx32.h+=vmpyi(Vu32.h,Vv32.h)`
2688///
2689/// Instruction Type: CVI_VX_DV
2690/// Execution Slots: SLOT23
2691#[inline(always)]
2692#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2693#[cfg_attr(test, assert_instr(vmpyih_acc))]
2694#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2695pub unsafe fn q6_vh_vmpyiacc_vhvhvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
2696    vmpyih_acc(vx, vu, vv)
2697}
2698
2699/// `Vd32.h=vmpyi(Vu32.h,Rt32.b)`
2700///
2701/// Instruction Type: CVI_VX
2702/// Execution Slots: SLOT23
2703#[inline(always)]
2704#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2705#[cfg_attr(test, assert_instr(vmpyihb))]
2706#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2707pub unsafe fn q6_vh_vmpyi_vhrb(vu: HvxVector, rt: i32) -> HvxVector {
2708    vmpyihb(vu, rt)
2709}
2710
2711/// `Vx32.h+=vmpyi(Vu32.h,Rt32.b)`
2712///
2713/// Instruction Type: CVI_VX
2714/// Execution Slots: SLOT23
2715#[inline(always)]
2716#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2717#[cfg_attr(test, assert_instr(vmpyihb_acc))]
2718#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2719pub unsafe fn q6_vh_vmpyiacc_vhvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
2720    vmpyihb_acc(vx, vu, rt)
2721}
2722
2723/// `Vd32.w=vmpyio(Vu32.w,Vv32.h)`
2724///
2725/// Instruction Type: CVI_VX_DV
2726/// Execution Slots: SLOT23
2727#[inline(always)]
2728#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2729#[cfg_attr(test, assert_instr(vmpyiowh))]
2730#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2731pub unsafe fn q6_vw_vmpyio_vwvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2732    vmpyiowh(vu, vv)
2733}
2734
2735/// `Vd32.w=vmpyi(Vu32.w,Rt32.b)`
2736///
2737/// Instruction Type: CVI_VX
2738/// Execution Slots: SLOT23
2739#[inline(always)]
2740#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2741#[cfg_attr(test, assert_instr(vmpyiwb))]
2742#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2743pub unsafe fn q6_vw_vmpyi_vwrb(vu: HvxVector, rt: i32) -> HvxVector {
2744    vmpyiwb(vu, rt)
2745}
2746
2747/// `Vx32.w+=vmpyi(Vu32.w,Rt32.b)`
2748///
2749/// Instruction Type: CVI_VX
2750/// Execution Slots: SLOT23
2751#[inline(always)]
2752#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2753#[cfg_attr(test, assert_instr(vmpyiwb_acc))]
2754#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2755pub unsafe fn q6_vw_vmpyiacc_vwvwrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
2756    vmpyiwb_acc(vx, vu, rt)
2757}
2758
2759/// `Vd32.w=vmpyi(Vu32.w,Rt32.h)`
2760///
2761/// Instruction Type: CVI_VX_DV
2762/// Execution Slots: SLOT23
2763#[inline(always)]
2764#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2765#[cfg_attr(test, assert_instr(vmpyiwh))]
2766#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2767pub unsafe fn q6_vw_vmpyi_vwrh(vu: HvxVector, rt: i32) -> HvxVector {
2768    vmpyiwh(vu, rt)
2769}
2770
2771/// `Vx32.w+=vmpyi(Vu32.w,Rt32.h)`
2772///
2773/// Instruction Type: CVI_VX_DV
2774/// Execution Slots: SLOT23
2775#[inline(always)]
2776#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2777#[cfg_attr(test, assert_instr(vmpyiwh_acc))]
2778#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2779pub unsafe fn q6_vw_vmpyiacc_vwvwrh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
2780    vmpyiwh_acc(vx, vu, rt)
2781}
2782
2783/// `Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat`
2784///
2785/// Instruction Type: CVI_VX_DV
2786/// Execution Slots: SLOT23
2787#[inline(always)]
2788#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2789#[cfg_attr(test, assert_instr(vmpyowh))]
2790#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2791pub unsafe fn q6_vw_vmpyo_vwvh_s1_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
2792    vmpyowh(vu, vv)
2793}
2794
2795/// `Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat`
2796///
2797/// Instruction Type: CVI_VX_DV
2798/// Execution Slots: SLOT23
2799#[inline(always)]
2800#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2801#[cfg_attr(test, assert_instr(vmpyowh_rnd))]
2802#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2803pub unsafe fn q6_vw_vmpyo_vwvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
2804    vmpyowh_rnd(vu, vv)
2805}
2806
2807/// `Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift`
2808///
2809/// Instruction Type: CVI_VX_DV
2810/// Execution Slots: SLOT23
2811#[inline(always)]
2812#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2813#[cfg_attr(test, assert_instr(vmpyowh_rnd_sacc))]
2814#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2815pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_rnd_sat_shift(
2816    vx: HvxVector,
2817    vu: HvxVector,
2818    vv: HvxVector,
2819) -> HvxVector {
2820    vmpyowh_rnd_sacc(vx, vu, vv)
2821}
2822
2823/// `Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift`
2824///
2825/// Instruction Type: CVI_VX_DV
2826/// Execution Slots: SLOT23
2827#[inline(always)]
2828#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2829#[cfg_attr(test, assert_instr(vmpyowh_sacc))]
2830#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2831pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_sat_shift(
2832    vx: HvxVector,
2833    vu: HvxVector,
2834    vv: HvxVector,
2835) -> HvxVector {
2836    vmpyowh_sacc(vx, vu, vv)
2837}
2838
2839/// `Vdd32.uh=vmpy(Vu32.ub,Rt32.ub)`
2840///
2841/// Instruction Type: CVI_VX_DV
2842/// Execution Slots: SLOT23
2843#[inline(always)]
2844#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2845#[cfg_attr(test, assert_instr(vmpyub))]
2846#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2847pub unsafe fn q6_wuh_vmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVectorPair {
2848    vmpyub(vu, rt)
2849}
2850
2851/// `Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub)`
2852///
2853/// Instruction Type: CVI_VX_DV
2854/// Execution Slots: SLOT23
2855#[inline(always)]
2856#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2857#[cfg_attr(test, assert_instr(vmpyub_acc))]
2858#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2859pub unsafe fn q6_wuh_vmpyacc_wuhvubrub(
2860    vxx: HvxVectorPair,
2861    vu: HvxVector,
2862    rt: i32,
2863) -> HvxVectorPair {
2864    vmpyub_acc(vxx, vu, rt)
2865}
2866
2867/// `Vdd32.uh=vmpy(Vu32.ub,Vv32.ub)`
2868///
2869/// Instruction Type: CVI_VX_DV
2870/// Execution Slots: SLOT23
2871#[inline(always)]
2872#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2873#[cfg_attr(test, assert_instr(vmpyubv))]
2874#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2875pub unsafe fn q6_wuh_vmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
2876    vmpyubv(vu, vv)
2877}
2878
2879/// `Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub)`
2880///
2881/// Instruction Type: CVI_VX_DV
2882/// Execution Slots: SLOT23
2883#[inline(always)]
2884#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2885#[cfg_attr(test, assert_instr(vmpyubv_acc))]
2886#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2887pub unsafe fn q6_wuh_vmpyacc_wuhvubvub(
2888    vxx: HvxVectorPair,
2889    vu: HvxVector,
2890    vv: HvxVector,
2891) -> HvxVectorPair {
2892    vmpyubv_acc(vxx, vu, vv)
2893}
2894
2895/// `Vdd32.uw=vmpy(Vu32.uh,Rt32.uh)`
2896///
2897/// Instruction Type: CVI_VX_DV
2898/// Execution Slots: SLOT23
2899#[inline(always)]
2900#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2901#[cfg_attr(test, assert_instr(vmpyuh))]
2902#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2903pub unsafe fn q6_wuw_vmpy_vuhruh(vu: HvxVector, rt: i32) -> HvxVectorPair {
2904    vmpyuh(vu, rt)
2905}
2906
2907/// `Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh)`
2908///
2909/// Instruction Type: CVI_VX_DV
2910/// Execution Slots: SLOT23
2911#[inline(always)]
2912#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2913#[cfg_attr(test, assert_instr(vmpyuh_acc))]
2914#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2915pub unsafe fn q6_wuw_vmpyacc_wuwvuhruh(
2916    vxx: HvxVectorPair,
2917    vu: HvxVector,
2918    rt: i32,
2919) -> HvxVectorPair {
2920    vmpyuh_acc(vxx, vu, rt)
2921}
2922
2923/// `Vdd32.uw=vmpy(Vu32.uh,Vv32.uh)`
2924///
2925/// Instruction Type: CVI_VX_DV
2926/// Execution Slots: SLOT23
2927#[inline(always)]
2928#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2929#[cfg_attr(test, assert_instr(vmpyuhv))]
2930#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2931pub unsafe fn q6_wuw_vmpy_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
2932    vmpyuhv(vu, vv)
2933}
2934
2935/// `Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh)`
2936///
2937/// Instruction Type: CVI_VX_DV
2938/// Execution Slots: SLOT23
2939#[inline(always)]
2940#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2941#[cfg_attr(test, assert_instr(vmpyuhv_acc))]
2942#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2943pub unsafe fn q6_wuw_vmpyacc_wuwvuhvuh(
2944    vxx: HvxVectorPair,
2945    vu: HvxVector,
2946    vv: HvxVector,
2947) -> HvxVectorPair {
2948    vmpyuhv_acc(vxx, vu, vv)
2949}
2950
2951/// `Vd32.h=vnavg(Vu32.h,Vv32.h)`
2952///
2953/// Instruction Type: CVI_VA
2954/// Execution Slots: SLOT0123
2955#[inline(always)]
2956#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2957#[cfg_attr(test, assert_instr(vnavgh))]
2958#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2959pub unsafe fn q6_vh_vnavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2960    vnavgh(vu, vv)
2961}
2962
2963/// `Vd32.b=vnavg(Vu32.ub,Vv32.ub)`
2964///
2965/// Instruction Type: CVI_VA
2966/// Execution Slots: SLOT0123
2967#[inline(always)]
2968#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2969#[cfg_attr(test, assert_instr(vnavgub))]
2970#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2971pub unsafe fn q6_vb_vnavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector {
2972    vnavgub(vu, vv)
2973}
2974
2975/// `Vd32.w=vnavg(Vu32.w,Vv32.w)`
2976///
2977/// Instruction Type: CVI_VA
2978/// Execution Slots: SLOT0123
2979#[inline(always)]
2980#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2981#[cfg_attr(test, assert_instr(vnavgw))]
2982#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2983pub unsafe fn q6_vw_vnavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector {
2984    vnavgw(vu, vv)
2985}
2986
2987/// `Vd32.h=vnormamt(Vu32.h)`
2988///
2989/// Instruction Type: CVI_VS
2990/// Execution Slots: SLOT0123
2991#[inline(always)]
2992#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2993#[cfg_attr(test, assert_instr(vnormamth))]
2994#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2995pub unsafe fn q6_vh_vnormamt_vh(vu: HvxVector) -> HvxVector {
2996    vnormamth(vu)
2997}
2998
2999/// `Vd32.w=vnormamt(Vu32.w)`
3000///
3001/// Instruction Type: CVI_VS
3002/// Execution Slots: SLOT0123
3003#[inline(always)]
3004#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3005#[cfg_attr(test, assert_instr(vnormamtw))]
3006#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3007pub unsafe fn q6_vw_vnormamt_vw(vu: HvxVector) -> HvxVector {
3008    vnormamtw(vu)
3009}
3010
3011/// `Vd32=vnot(Vu32)`
3012///
3013/// Instruction Type: CVI_VA
3014/// Execution Slots: SLOT0123
3015#[inline(always)]
3016#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3017#[cfg_attr(test, assert_instr(vnot))]
3018#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3019pub unsafe fn q6_v_vnot_v(vu: HvxVector) -> HvxVector {
3020    vnot(vu)
3021}
3022
3023/// `Vd32=vor(Vu32,Vv32)`
3024///
3025/// Instruction Type: CVI_VA
3026/// Execution Slots: SLOT0123
3027#[inline(always)]
3028#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3029#[cfg_attr(test, assert_instr(vor))]
3030#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3031pub unsafe fn q6_v_vor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector {
3032    simd_or(vu, vv)
3033}
3034
3035/// `Vd32.b=vpacke(Vu32.h,Vv32.h)`
3036///
3037/// Instruction Type: CVI_VP
3038/// Execution Slots: SLOT0123
3039#[inline(always)]
3040#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3041#[cfg_attr(test, assert_instr(vpackeb))]
3042#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3043pub unsafe fn q6_vb_vpacke_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
3044    vpackeb(vu, vv)
3045}
3046
3047/// `Vd32.h=vpacke(Vu32.w,Vv32.w)`
3048///
3049/// Instruction Type: CVI_VP
3050/// Execution Slots: SLOT0123
3051#[inline(always)]
3052#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3053#[cfg_attr(test, assert_instr(vpackeh))]
3054#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3055pub unsafe fn q6_vh_vpacke_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector {
3056    vpackeh(vu, vv)
3057}
3058
3059/// `Vd32.b=vpack(Vu32.h,Vv32.h):sat`
3060///
3061/// Instruction Type: CVI_VP
3062/// Execution Slots: SLOT0123
3063#[inline(always)]
3064#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3065#[cfg_attr(test, assert_instr(vpackhb_sat))]
3066#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3067pub unsafe fn q6_vb_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3068    vpackhb_sat(vu, vv)
3069}
3070
3071/// `Vd32.ub=vpack(Vu32.h,Vv32.h):sat`
3072///
3073/// Instruction Type: CVI_VP
3074/// Execution Slots: SLOT0123
3075#[inline(always)]
3076#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3077#[cfg_attr(test, assert_instr(vpackhub_sat))]
3078#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3079pub unsafe fn q6_vub_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3080    vpackhub_sat(vu, vv)
3081}
3082
3083/// `Vd32.b=vpacko(Vu32.h,Vv32.h)`
3084///
3085/// Instruction Type: CVI_VP
3086/// Execution Slots: SLOT0123
3087#[inline(always)]
3088#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3089#[cfg_attr(test, assert_instr(vpackob))]
3090#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3091pub unsafe fn q6_vb_vpacko_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
3092    vpackob(vu, vv)
3093}
3094
3095/// `Vd32.h=vpacko(Vu32.w,Vv32.w)`
3096///
3097/// Instruction Type: CVI_VP
3098/// Execution Slots: SLOT0123
3099#[inline(always)]
3100#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3101#[cfg_attr(test, assert_instr(vpackoh))]
3102#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3103pub unsafe fn q6_vh_vpacko_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector {
3104    vpackoh(vu, vv)
3105}
3106
3107/// `Vd32.h=vpack(Vu32.w,Vv32.w):sat`
3108///
3109/// Instruction Type: CVI_VP
3110/// Execution Slots: SLOT0123
3111#[inline(always)]
3112#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3113#[cfg_attr(test, assert_instr(vpackwh_sat))]
3114#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3115pub unsafe fn q6_vh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3116    vpackwh_sat(vu, vv)
3117}
3118
3119/// `Vd32.uh=vpack(Vu32.w,Vv32.w):sat`
3120///
3121/// Instruction Type: CVI_VP
3122/// Execution Slots: SLOT0123
3123#[inline(always)]
3124#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3125#[cfg_attr(test, assert_instr(vpackwuh_sat))]
3126#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3127pub unsafe fn q6_vuh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3128    vpackwuh_sat(vu, vv)
3129}
3130
3131/// `Vd32.h=vpopcount(Vu32.h)`
3132///
3133/// Instruction Type: CVI_VS
3134/// Execution Slots: SLOT0123
3135#[inline(always)]
3136#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3137#[cfg_attr(test, assert_instr(vpopcounth))]
3138#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3139pub unsafe fn q6_vh_vpopcount_vh(vu: HvxVector) -> HvxVector {
3140    vpopcounth(vu)
3141}
3142
3143/// `Vd32=vrdelta(Vu32,Vv32)`
3144///
3145/// Instruction Type: CVI_VP
3146/// Execution Slots: SLOT0123
3147#[inline(always)]
3148#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3149#[cfg_attr(test, assert_instr(vrdelta))]
3150#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3151pub unsafe fn q6_v_vrdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector {
3152    vrdelta(vu, vv)
3153}
3154
3155/// `Vd32.w=vrmpy(Vu32.ub,Rt32.b)`
3156///
3157/// Instruction Type: CVI_VX
3158/// Execution Slots: SLOT23
3159#[inline(always)]
3160#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3161#[cfg_attr(test, assert_instr(vrmpybus))]
3162#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3163pub unsafe fn q6_vw_vrmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector {
3164    vrmpybus(vu, rt)
3165}
3166
3167/// `Vx32.w+=vrmpy(Vu32.ub,Rt32.b)`
3168///
3169/// Instruction Type: CVI_VX
3170/// Execution Slots: SLOT23
3171#[inline(always)]
3172#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3173#[cfg_attr(test, assert_instr(vrmpybus_acc))]
3174#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3175pub unsafe fn q6_vw_vrmpyacc_vwvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
3176    vrmpybus_acc(vx, vu, rt)
3177}
3178
3179/// `Vdd32.w=vrmpy(Vuu32.ub,Rt32.b,#u1)`
3180///
3181/// Instruction Type: CVI_VX_DV
3182/// Execution Slots: SLOT23
3183#[inline(always)]
3184#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3185#[cfg_attr(test, assert_instr(vrmpybusi))]
3186#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3187pub unsafe fn q6_ww_vrmpy_wubrbi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair {
3188    vrmpybusi(vuu, rt, iu1)
3189}
3190
3191/// `Vxx32.w+=vrmpy(Vuu32.ub,Rt32.b,#u1)`
3192///
3193/// Instruction Type: CVI_VX_DV
3194/// Execution Slots: SLOT23
3195#[inline(always)]
3196#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3197#[cfg_attr(test, assert_instr(vrmpybusi_acc))]
3198#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3199pub unsafe fn q6_ww_vrmpyacc_wwwubrbi(
3200    vxx: HvxVectorPair,
3201    vuu: HvxVectorPair,
3202    rt: i32,
3203    iu1: i32,
3204) -> HvxVectorPair {
3205    vrmpybusi_acc(vxx, vuu, rt, iu1)
3206}
3207
3208/// `Vd32.w=vrmpy(Vu32.ub,Vv32.b)`
3209///
3210/// Instruction Type: CVI_VX
3211/// Execution Slots: SLOT23
3212#[inline(always)]
3213#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3214#[cfg_attr(test, assert_instr(vrmpybusv))]
3215#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3216pub unsafe fn q6_vw_vrmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVector {
3217    vrmpybusv(vu, vv)
3218}
3219
3220/// `Vx32.w+=vrmpy(Vu32.ub,Vv32.b)`
3221///
3222/// Instruction Type: CVI_VX
3223/// Execution Slots: SLOT23
3224#[inline(always)]
3225#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3226#[cfg_attr(test, assert_instr(vrmpybusv_acc))]
3227#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3228pub unsafe fn q6_vw_vrmpyacc_vwvubvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
3229    vrmpybusv_acc(vx, vu, vv)
3230}
3231
3232/// `Vd32.w=vrmpy(Vu32.b,Vv32.b)`
3233///
3234/// Instruction Type: CVI_VX
3235/// Execution Slots: SLOT23
3236#[inline(always)]
3237#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3238#[cfg_attr(test, assert_instr(vrmpybv))]
3239#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3240pub unsafe fn q6_vw_vrmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector {
3241    vrmpybv(vu, vv)
3242}
3243
3244/// `Vx32.w+=vrmpy(Vu32.b,Vv32.b)`
3245///
3246/// Instruction Type: CVI_VX
3247/// Execution Slots: SLOT23
3248#[inline(always)]
3249#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3250#[cfg_attr(test, assert_instr(vrmpybv_acc))]
3251#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3252pub unsafe fn q6_vw_vrmpyacc_vwvbvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
3253    vrmpybv_acc(vx, vu, vv)
3254}
3255
3256/// `Vd32.uw=vrmpy(Vu32.ub,Rt32.ub)`
3257///
3258/// Instruction Type: CVI_VX
3259/// Execution Slots: SLOT23
3260#[inline(always)]
3261#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3262#[cfg_attr(test, assert_instr(vrmpyub))]
3263#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3264pub unsafe fn q6_vuw_vrmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVector {
3265    vrmpyub(vu, rt)
3266}
3267
3268/// `Vx32.uw+=vrmpy(Vu32.ub,Rt32.ub)`
3269///
3270/// Instruction Type: CVI_VX
3271/// Execution Slots: SLOT23
3272#[inline(always)]
3273#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3274#[cfg_attr(test, assert_instr(vrmpyub_acc))]
3275#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3276pub unsafe fn q6_vuw_vrmpyacc_vuwvubrub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
3277    vrmpyub_acc(vx, vu, rt)
3278}
3279
3280/// `Vdd32.uw=vrmpy(Vuu32.ub,Rt32.ub,#u1)`
3281///
3282/// Instruction Type: CVI_VX_DV
3283/// Execution Slots: SLOT23
3284#[inline(always)]
3285#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3286#[cfg_attr(test, assert_instr(vrmpyubi))]
3287#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3288pub unsafe fn q6_wuw_vrmpy_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair {
3289    vrmpyubi(vuu, rt, iu1)
3290}
3291
3292/// `Vxx32.uw+=vrmpy(Vuu32.ub,Rt32.ub,#u1)`
3293///
3294/// Instruction Type: CVI_VX_DV
3295/// Execution Slots: SLOT23
3296#[inline(always)]
3297#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3298#[cfg_attr(test, assert_instr(vrmpyubi_acc))]
3299#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3300pub unsafe fn q6_wuw_vrmpyacc_wuwwubrubi(
3301    vxx: HvxVectorPair,
3302    vuu: HvxVectorPair,
3303    rt: i32,
3304    iu1: i32,
3305) -> HvxVectorPair {
3306    vrmpyubi_acc(vxx, vuu, rt, iu1)
3307}
3308
3309/// `Vd32.uw=vrmpy(Vu32.ub,Vv32.ub)`
3310///
3311/// Instruction Type: CVI_VX
3312/// Execution Slots: SLOT23
3313#[inline(always)]
3314#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3315#[cfg_attr(test, assert_instr(vrmpyubv))]
3316#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3317pub unsafe fn q6_vuw_vrmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector {
3318    vrmpyubv(vu, vv)
3319}
3320
3321/// `Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)`
3322///
3323/// Instruction Type: CVI_VX
3324/// Execution Slots: SLOT23
3325#[inline(always)]
3326#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3327#[cfg_attr(test, assert_instr(vrmpyubv_acc))]
3328#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3329pub unsafe fn q6_vuw_vrmpyacc_vuwvubvub(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
3330    vrmpyubv_acc(vx, vu, vv)
3331}
3332
3333/// `Vd32=vror(Vu32,Rt32)`
3334///
3335/// Instruction Type: CVI_VP
3336/// Execution Slots: SLOT0123
3337#[inline(always)]
3338#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3339#[cfg_attr(test, assert_instr(vror))]
3340#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3341pub unsafe fn q6_v_vror_vr(vu: HvxVector, rt: i32) -> HvxVector {
3342    vror(vu, rt)
3343}
3344
3345/// `Vd32.b=vround(Vu32.h,Vv32.h):sat`
3346///
3347/// Instruction Type: CVI_VS
3348/// Execution Slots: SLOT0123
3349#[inline(always)]
3350#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3351#[cfg_attr(test, assert_instr(vroundhb))]
3352#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3353pub unsafe fn q6_vb_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3354    vroundhb(vu, vv)
3355}
3356
3357/// `Vd32.ub=vround(Vu32.h,Vv32.h):sat`
3358///
3359/// Instruction Type: CVI_VS
3360/// Execution Slots: SLOT0123
3361#[inline(always)]
3362#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3363#[cfg_attr(test, assert_instr(vroundhub))]
3364#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3365pub unsafe fn q6_vub_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3366    vroundhub(vu, vv)
3367}
3368
3369/// `Vd32.h=vround(Vu32.w,Vv32.w):sat`
3370///
3371/// Instruction Type: CVI_VS
3372/// Execution Slots: SLOT0123
3373#[inline(always)]
3374#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3375#[cfg_attr(test, assert_instr(vroundwh))]
3376#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3377pub unsafe fn q6_vh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3378    vroundwh(vu, vv)
3379}
3380
3381/// `Vd32.uh=vround(Vu32.w,Vv32.w):sat`
3382///
3383/// Instruction Type: CVI_VS
3384/// Execution Slots: SLOT0123
3385#[inline(always)]
3386#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3387#[cfg_attr(test, assert_instr(vroundwuh))]
3388#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3389pub unsafe fn q6_vuh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3390    vroundwuh(vu, vv)
3391}
3392
3393/// `Vdd32.uw=vrsad(Vuu32.ub,Rt32.ub,#u1)`
3394///
3395/// Instruction Type: CVI_VX_DV
3396/// Execution Slots: SLOT23
3397#[inline(always)]
3398#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3399#[cfg_attr(test, assert_instr(vrsadubi))]
3400#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3401pub unsafe fn q6_wuw_vrsad_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair {
3402    vrsadubi(vuu, rt, iu1)
3403}
3404
3405/// `Vxx32.uw+=vrsad(Vuu32.ub,Rt32.ub,#u1)`
3406///
3407/// Instruction Type: CVI_VX_DV
3408/// Execution Slots: SLOT23
3409#[inline(always)]
3410#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3411#[cfg_attr(test, assert_instr(vrsadubi_acc))]
3412#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3413pub unsafe fn q6_wuw_vrsadacc_wuwwubrubi(
3414    vxx: HvxVectorPair,
3415    vuu: HvxVectorPair,
3416    rt: i32,
3417    iu1: i32,
3418) -> HvxVectorPair {
3419    vrsadubi_acc(vxx, vuu, rt, iu1)
3420}
3421
3422/// `Vd32.ub=vsat(Vu32.h,Vv32.h)`
3423///
3424/// Instruction Type: CVI_VA
3425/// Execution Slots: SLOT0123
3426#[inline(always)]
3427#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3428#[cfg_attr(test, assert_instr(vsathub))]
3429#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3430pub unsafe fn q6_vub_vsat_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
3431    vsathub(vu, vv)
3432}
3433
3434/// `Vd32.h=vsat(Vu32.w,Vv32.w)`
3435///
3436/// Instruction Type: CVI_VA
3437/// Execution Slots: SLOT0123
3438#[inline(always)]
3439#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3440#[cfg_attr(test, assert_instr(vsatwh))]
3441#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3442pub unsafe fn q6_vh_vsat_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector {
3443    vsatwh(vu, vv)
3444}
3445
3446/// `Vdd32.h=vsxt(Vu32.b)`
3447///
3448/// Instruction Type: CVI_VA_DV
3449/// Execution Slots: SLOT0123
3450#[inline(always)]
3451#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3452#[cfg_attr(test, assert_instr(vsb))]
3453#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3454pub unsafe fn q6_wh_vsxt_vb(vu: HvxVector) -> HvxVectorPair {
3455    vsb(vu)
3456}
3457
3458/// `Vdd32.w=vsxt(Vu32.h)`
3459///
3460/// Instruction Type: CVI_VA_DV
3461/// Execution Slots: SLOT0123
3462#[inline(always)]
3463#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3464#[cfg_attr(test, assert_instr(vsh))]
3465#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3466pub unsafe fn q6_ww_vsxt_vh(vu: HvxVector) -> HvxVectorPair {
3467    vsh(vu)
3468}
3469
3470/// `Vd32.h=vshuffe(Vu32.h,Vv32.h)`
3471///
3472/// Instruction Type: CVI_VA
3473/// Execution Slots: SLOT0123
3474#[inline(always)]
3475#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3476#[cfg_attr(test, assert_instr(vshufeh))]
3477#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3478pub unsafe fn q6_vh_vshuffe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
3479    vshufeh(vu, vv)
3480}
3481
3482/// `Vd32.b=vshuff(Vu32.b)`
3483///
3484/// Instruction Type: CVI_VP
3485/// Execution Slots: SLOT0123
3486#[inline(always)]
3487#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3488#[cfg_attr(test, assert_instr(vshuffb))]
3489#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3490pub unsafe fn q6_vb_vshuff_vb(vu: HvxVector) -> HvxVector {
3491    vshuffb(vu)
3492}
3493
3494/// `Vd32.b=vshuffe(Vu32.b,Vv32.b)`
3495///
3496/// Instruction Type: CVI_VA
3497/// Execution Slots: SLOT0123
3498#[inline(always)]
3499#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3500#[cfg_attr(test, assert_instr(vshuffeb))]
3501#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3502pub unsafe fn q6_vb_vshuffe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector {
3503    vshuffeb(vu, vv)
3504}
3505
3506/// `Vd32.h=vshuff(Vu32.h)`
3507///
3508/// Instruction Type: CVI_VP
3509/// Execution Slots: SLOT0123
3510#[inline(always)]
3511#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3512#[cfg_attr(test, assert_instr(vshuffh))]
3513#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3514pub unsafe fn q6_vh_vshuff_vh(vu: HvxVector) -> HvxVector {
3515    vshuffh(vu)
3516}
3517
3518/// `Vd32.b=vshuffo(Vu32.b,Vv32.b)`
3519///
3520/// Instruction Type: CVI_VA
3521/// Execution Slots: SLOT0123
3522#[inline(always)]
3523#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3524#[cfg_attr(test, assert_instr(vshuffob))]
3525#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3526pub unsafe fn q6_vb_vshuffo_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector {
3527    vshuffob(vu, vv)
3528}
3529
3530/// `Vdd32=vshuff(Vu32,Vv32,Rt8)`
3531///
3532/// Instruction Type: CVI_VP_VS
3533/// Execution Slots: SLOT0123
3534#[inline(always)]
3535#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3536#[cfg_attr(test, assert_instr(vshuffvdd))]
3537#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3538pub unsafe fn q6_w_vshuff_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair {
3539    vshuffvdd(vu, vv, rt)
3540}
3541
3542/// `Vdd32.b=vshuffoe(Vu32.b,Vv32.b)`
3543///
3544/// Instruction Type: CVI_VA_DV
3545/// Execution Slots: SLOT0123
3546#[inline(always)]
3547#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3548#[cfg_attr(test, assert_instr(vshufoeb))]
3549#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3550pub unsafe fn q6_wb_vshuffoe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
3551    vshufoeb(vu, vv)
3552}
3553
3554/// `Vdd32.h=vshuffoe(Vu32.h,Vv32.h)`
3555///
3556/// Instruction Type: CVI_VA_DV
3557/// Execution Slots: SLOT0123
3558#[inline(always)]
3559#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3560#[cfg_attr(test, assert_instr(vshufoeh))]
3561#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3562pub unsafe fn q6_wh_vshuffoe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
3563    vshufoeh(vu, vv)
3564}
3565
3566/// `Vd32.h=vshuffo(Vu32.h,Vv32.h)`
3567///
3568/// Instruction Type: CVI_VA
3569/// Execution Slots: SLOT0123
3570#[inline(always)]
3571#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3572#[cfg_attr(test, assert_instr(vshufoh))]
3573#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3574pub unsafe fn q6_vh_vshuffo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
3575    vshufoh(vu, vv)
3576}
3577
3578/// `Vd32.b=vsub(Vu32.b,Vv32.b)`
3579///
3580/// Instruction Type: CVI_VA
3581/// Execution Slots: SLOT0123
3582#[inline(always)]
3583#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3584#[cfg_attr(test, assert_instr(vsubb))]
3585#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3586pub unsafe fn q6_vb_vsub_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector {
3587    vsubb(vu, vv)
3588}
3589
3590/// `Vdd32.b=vsub(Vuu32.b,Vvv32.b)`
3591///
3592/// Instruction Type: CVI_VA_DV
3593/// Execution Slots: SLOT0123
3594#[inline(always)]
3595#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3596#[cfg_attr(test, assert_instr(vsubb_dv))]
3597#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3598pub unsafe fn q6_wb_vsub_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
3599    vsubb_dv(vuu, vvv)
3600}
3601
3602/// `Vd32.h=vsub(Vu32.h,Vv32.h)`
3603///
3604/// Instruction Type: CVI_VA
3605/// Execution Slots: SLOT0123
3606#[inline(always)]
3607#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3608#[cfg_attr(test, assert_instr(vsubh))]
3609#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3610pub unsafe fn q6_vh_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
3611    vsubh(vu, vv)
3612}
3613
3614/// `Vdd32.h=vsub(Vuu32.h,Vvv32.h)`
3615///
3616/// Instruction Type: CVI_VA_DV
3617/// Execution Slots: SLOT0123
3618#[inline(always)]
3619#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3620#[cfg_attr(test, assert_instr(vsubh_dv))]
3621#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3622pub unsafe fn q6_wh_vsub_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
3623    vsubh_dv(vuu, vvv)
3624}
3625
3626/// `Vd32.h=vsub(Vu32.h,Vv32.h):sat`
3627///
3628/// Instruction Type: CVI_VA
3629/// Execution Slots: SLOT0123
3630#[inline(always)]
3631#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3632#[cfg_attr(test, assert_instr(vsubhsat))]
3633#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3634pub unsafe fn q6_vh_vsub_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3635    vsubhsat(vu, vv)
3636}
3637
3638/// `Vdd32.h=vsub(Vuu32.h,Vvv32.h):sat`
3639///
3640/// Instruction Type: CVI_VA_DV
3641/// Execution Slots: SLOT0123
3642#[inline(always)]
3643#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3644#[cfg_attr(test, assert_instr(vsubhsat_dv))]
3645#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3646pub unsafe fn q6_wh_vsub_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
3647    vsubhsat_dv(vuu, vvv)
3648}
3649
3650/// `Vdd32.w=vsub(Vu32.h,Vv32.h)`
3651///
3652/// Instruction Type: CVI_VX_DV
3653/// Execution Slots: SLOT23
3654#[inline(always)]
3655#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3656#[cfg_attr(test, assert_instr(vsubhw))]
3657#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3658pub unsafe fn q6_ww_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
3659    vsubhw(vu, vv)
3660}
3661
3662/// `Vdd32.h=vsub(Vu32.ub,Vv32.ub)`
3663///
3664/// Instruction Type: CVI_VX_DV
3665/// Execution Slots: SLOT23
3666#[inline(always)]
3667#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3668#[cfg_attr(test, assert_instr(vsububh))]
3669#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3670pub unsafe fn q6_wh_vsub_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
3671    vsububh(vu, vv)
3672}
3673
3674/// `Vd32.ub=vsub(Vu32.ub,Vv32.ub):sat`
3675///
3676/// Instruction Type: CVI_VA
3677/// Execution Slots: SLOT0123
3678#[inline(always)]
3679#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3680#[cfg_attr(test, assert_instr(vsububsat))]
3681#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3682pub unsafe fn q6_vub_vsub_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3683    vsububsat(vu, vv)
3684}
3685
3686/// `Vdd32.ub=vsub(Vuu32.ub,Vvv32.ub):sat`
3687///
3688/// Instruction Type: CVI_VA_DV
3689/// Execution Slots: SLOT0123
3690#[inline(always)]
3691#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3692#[cfg_attr(test, assert_instr(vsububsat_dv))]
3693#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3694pub unsafe fn q6_wub_vsub_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
3695    vsububsat_dv(vuu, vvv)
3696}
3697
3698/// `Vd32.uh=vsub(Vu32.uh,Vv32.uh):sat`
3699///
3700/// Instruction Type: CVI_VA
3701/// Execution Slots: SLOT0123
3702#[inline(always)]
3703#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3704#[cfg_attr(test, assert_instr(vsubuhsat))]
3705#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3706pub unsafe fn q6_vuh_vsub_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3707    vsubuhsat(vu, vv)
3708}
3709
3710/// `Vdd32.uh=vsub(Vuu32.uh,Vvv32.uh):sat`
3711///
3712/// Instruction Type: CVI_VA_DV
3713/// Execution Slots: SLOT0123
3714#[inline(always)]
3715#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3716#[cfg_attr(test, assert_instr(vsubuhsat_dv))]
3717#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3718pub unsafe fn q6_wuh_vsub_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
3719    vsubuhsat_dv(vuu, vvv)
3720}
3721
3722/// `Vdd32.w=vsub(Vu32.uh,Vv32.uh)`
3723///
3724/// Instruction Type: CVI_VX_DV
3725/// Execution Slots: SLOT23
3726#[inline(always)]
3727#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3728#[cfg_attr(test, assert_instr(vsubuhw))]
3729#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3730pub unsafe fn q6_ww_vsub_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
3731    vsubuhw(vu, vv)
3732}
3733
3734/// `Vd32.w=vsub(Vu32.w,Vv32.w)`
3735///
3736/// Instruction Type: CVI_VA
3737/// Execution Slots: SLOT0123
3738#[inline(always)]
3739#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3740#[cfg_attr(test, assert_instr(vsubw))]
3741#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3742pub unsafe fn q6_vw_vsub_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector {
3743    simd_sub(vu, vv)
3744}
3745
3746/// `Vdd32.w=vsub(Vuu32.w,Vvv32.w)`
3747///
3748/// Instruction Type: CVI_VA_DV
3749/// Execution Slots: SLOT0123
3750#[inline(always)]
3751#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3752#[cfg_attr(test, assert_instr(vsubw_dv))]
3753#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3754pub unsafe fn q6_ww_vsub_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
3755    vsubw_dv(vuu, vvv)
3756}
3757
3758/// `Vd32.w=vsub(Vu32.w,Vv32.w):sat`
3759///
3760/// Instruction Type: CVI_VA
3761/// Execution Slots: SLOT0123
3762#[inline(always)]
3763#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3764#[cfg_attr(test, assert_instr(vsubwsat))]
3765#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3766pub unsafe fn q6_vw_vsub_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3767    vsubwsat(vu, vv)
3768}
3769
3770/// `Vdd32.w=vsub(Vuu32.w,Vvv32.w):sat`
3771///
3772/// Instruction Type: CVI_VA_DV
3773/// Execution Slots: SLOT0123
3774#[inline(always)]
3775#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3776#[cfg_attr(test, assert_instr(vsubwsat_dv))]
3777#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3778pub unsafe fn q6_ww_vsub_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
3779    vsubwsat_dv(vuu, vvv)
3780}
3781
3782/// `Vdd32.h=vtmpy(Vuu32.b,Rt32.b)`
3783///
3784/// Instruction Type: CVI_VX_DV
3785/// Execution Slots: SLOT23
3786#[inline(always)]
3787#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3788#[cfg_attr(test, assert_instr(vtmpyb))]
3789#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3790pub unsafe fn q6_wh_vtmpy_wbrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
3791    vtmpyb(vuu, rt)
3792}
3793
3794/// `Vxx32.h+=vtmpy(Vuu32.b,Rt32.b)`
3795///
3796/// Instruction Type: CVI_VX_DV
3797/// Execution Slots: SLOT23
3798#[inline(always)]
3799#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3800#[cfg_attr(test, assert_instr(vtmpyb_acc))]
3801#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3802pub unsafe fn q6_wh_vtmpyacc_whwbrb(
3803    vxx: HvxVectorPair,
3804    vuu: HvxVectorPair,
3805    rt: i32,
3806) -> HvxVectorPair {
3807    vtmpyb_acc(vxx, vuu, rt)
3808}
3809
3810/// `Vdd32.h=vtmpy(Vuu32.ub,Rt32.b)`
3811///
3812/// Instruction Type: CVI_VX_DV
3813/// Execution Slots: SLOT23
3814#[inline(always)]
3815#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3816#[cfg_attr(test, assert_instr(vtmpybus))]
3817#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3818pub unsafe fn q6_wh_vtmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
3819    vtmpybus(vuu, rt)
3820}
3821
3822/// `Vxx32.h+=vtmpy(Vuu32.ub,Rt32.b)`
3823///
3824/// Instruction Type: CVI_VX_DV
3825/// Execution Slots: SLOT23
3826#[inline(always)]
3827#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3828#[cfg_attr(test, assert_instr(vtmpybus_acc))]
3829#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3830pub unsafe fn q6_wh_vtmpyacc_whwubrb(
3831    vxx: HvxVectorPair,
3832    vuu: HvxVectorPair,
3833    rt: i32,
3834) -> HvxVectorPair {
3835    vtmpybus_acc(vxx, vuu, rt)
3836}
3837
3838/// `Vdd32.w=vtmpy(Vuu32.h,Rt32.b)`
3839///
3840/// Instruction Type: CVI_VX_DV
3841/// Execution Slots: SLOT23
3842#[inline(always)]
3843#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3844#[cfg_attr(test, assert_instr(vtmpyhb))]
3845#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3846pub unsafe fn q6_ww_vtmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
3847    vtmpyhb(vuu, rt)
3848}
3849
3850/// `Vxx32.w+=vtmpy(Vuu32.h,Rt32.b)`
3851///
3852/// Instruction Type: CVI_VX_DV
3853/// Execution Slots: SLOT23
3854#[inline(always)]
3855#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3856#[cfg_attr(test, assert_instr(vtmpyhb_acc))]
3857#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3858pub unsafe fn q6_ww_vtmpyacc_wwwhrb(
3859    vxx: HvxVectorPair,
3860    vuu: HvxVectorPair,
3861    rt: i32,
3862) -> HvxVectorPair {
3863    vtmpyhb_acc(vxx, vuu, rt)
3864}
3865
3866/// `Vdd32.h=vunpack(Vu32.b)`
3867///
3868/// Instruction Type: CVI_VP_VS
3869/// Execution Slots: SLOT0123
3870#[inline(always)]
3871#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3872#[cfg_attr(test, assert_instr(vunpackb))]
3873#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3874pub unsafe fn q6_wh_vunpack_vb(vu: HvxVector) -> HvxVectorPair {
3875    vunpackb(vu)
3876}
3877
3878/// `Vdd32.w=vunpack(Vu32.h)`
3879///
3880/// Instruction Type: CVI_VP_VS
3881/// Execution Slots: SLOT0123
3882#[inline(always)]
3883#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3884#[cfg_attr(test, assert_instr(vunpackh))]
3885#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3886pub unsafe fn q6_ww_vunpack_vh(vu: HvxVector) -> HvxVectorPair {
3887    vunpackh(vu)
3888}
3889
3890/// `Vxx32.h|=vunpacko(Vu32.b)`
3891///
3892/// Instruction Type: CVI_VP_VS
3893/// Execution Slots: SLOT0123
3894#[inline(always)]
3895#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3896#[cfg_attr(test, assert_instr(vunpackob))]
3897#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3898pub unsafe fn q6_wh_vunpackoor_whvb(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair {
3899    vunpackob(vxx, vu)
3900}
3901
3902/// `Vxx32.w|=vunpacko(Vu32.h)`
3903///
3904/// Instruction Type: CVI_VP_VS
3905/// Execution Slots: SLOT0123
3906#[inline(always)]
3907#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3908#[cfg_attr(test, assert_instr(vunpackoh))]
3909#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3910pub unsafe fn q6_ww_vunpackoor_wwvh(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair {
3911    vunpackoh(vxx, vu)
3912}
3913
3914/// `Vdd32.uh=vunpack(Vu32.ub)`
3915///
3916/// Instruction Type: CVI_VP_VS
3917/// Execution Slots: SLOT0123
3918#[inline(always)]
3919#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3920#[cfg_attr(test, assert_instr(vunpackub))]
3921#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3922pub unsafe fn q6_wuh_vunpack_vub(vu: HvxVector) -> HvxVectorPair {
3923    vunpackub(vu)
3924}
3925
3926/// `Vdd32.uw=vunpack(Vu32.uh)`
3927///
3928/// Instruction Type: CVI_VP_VS
3929/// Execution Slots: SLOT0123
3930#[inline(always)]
3931#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3932#[cfg_attr(test, assert_instr(vunpackuh))]
3933#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3934pub unsafe fn q6_wuw_vunpack_vuh(vu: HvxVector) -> HvxVectorPair {
3935    vunpackuh(vu)
3936}
3937
3938/// `Vd32=vxor(Vu32,Vv32)`
3939///
3940/// Instruction Type: CVI_VA
3941/// Execution Slots: SLOT0123
3942#[inline(always)]
3943#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3944#[cfg_attr(test, assert_instr(vxor))]
3945#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3946pub unsafe fn q6_v_vxor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector {
3947    simd_xor(vu, vv)
3948}
3949
3950/// `Vdd32.uh=vzxt(Vu32.ub)`
3951///
3952/// Instruction Type: CVI_VA_DV
3953/// Execution Slots: SLOT0123
3954#[inline(always)]
3955#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3956#[cfg_attr(test, assert_instr(vzb))]
3957#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3958pub unsafe fn q6_wuh_vzxt_vub(vu: HvxVector) -> HvxVectorPair {
3959    vzb(vu)
3960}
3961
3962/// `Vdd32.uw=vzxt(Vu32.uh)`
3963///
3964/// Instruction Type: CVI_VA_DV
3965/// Execution Slots: SLOT0123
3966#[inline(always)]
3967#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3968#[cfg_attr(test, assert_instr(vzh))]
3969#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3970pub unsafe fn q6_wuw_vzxt_vuh(vu: HvxVector) -> HvxVectorPair {
3971    vzh(vu)
3972}
3973
3974/// `Vd32.b=vsplat(Rt32)`
3975///
3976/// Instruction Type: CVI_VX_LATE
3977/// Execution Slots: SLOT23
3978#[inline(always)]
3979#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
3980#[cfg_attr(test, assert_instr(lvsplatb))]
3981#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3982pub unsafe fn q6_vb_vsplat_r(rt: i32) -> HvxVector {
3983    lvsplatb(rt)
3984}
3985
3986/// `Vd32.h=vsplat(Rt32)`
3987///
3988/// Instruction Type: CVI_VX_LATE
3989/// Execution Slots: SLOT23
3990#[inline(always)]
3991#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
3992#[cfg_attr(test, assert_instr(lvsplath))]
3993#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3994pub unsafe fn q6_vh_vsplat_r(rt: i32) -> HvxVector {
3995    lvsplath(rt)
3996}
3997
3998/// `Vd32.b=vadd(Vu32.b,Vv32.b):sat`
3999///
4000/// Instruction Type: CVI_VA
4001/// Execution Slots: SLOT0123
4002#[inline(always)]
4003#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4004#[cfg_attr(test, assert_instr(vaddbsat))]
4005#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4006pub unsafe fn q6_vb_vadd_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4007    vaddbsat(vu, vv)
4008}
4009
4010/// `Vdd32.b=vadd(Vuu32.b,Vvv32.b):sat`
4011///
4012/// Instruction Type: CVI_VA_DV
4013/// Execution Slots: SLOT0123
4014#[inline(always)]
4015#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4016#[cfg_attr(test, assert_instr(vaddbsat_dv))]
4017#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4018pub unsafe fn q6_wb_vadd_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
4019    vaddbsat_dv(vuu, vvv)
4020}
4021
4022/// `Vd32.h=vadd(vclb(Vu32.h),Vv32.h)`
4023///
4024/// Instruction Type: CVI_VS
4025/// Execution Slots: SLOT0123
4026#[inline(always)]
4027#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4028#[cfg_attr(test, assert_instr(vaddclbh))]
4029#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4030pub unsafe fn q6_vh_vadd_vclb_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector {
4031    vaddclbh(vu, vv)
4032}
4033
4034/// `Vd32.w=vadd(vclb(Vu32.w),Vv32.w)`
4035///
4036/// Instruction Type: CVI_VS
4037/// Execution Slots: SLOT0123
4038#[inline(always)]
4039#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4040#[cfg_attr(test, assert_instr(vaddclbw))]
4041#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4042pub unsafe fn q6_vw_vadd_vclb_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector {
4043    vaddclbw(vu, vv)
4044}
4045
4046/// `Vxx32.w+=vadd(Vu32.h,Vv32.h)`
4047///
4048/// Instruction Type: CVI_VX_DV
4049/// Execution Slots: SLOT23
4050#[inline(always)]
4051#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4052#[cfg_attr(test, assert_instr(vaddhw_acc))]
4053#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4054pub unsafe fn q6_ww_vaddacc_wwvhvh(
4055    vxx: HvxVectorPair,
4056    vu: HvxVector,
4057    vv: HvxVector,
4058) -> HvxVectorPair {
4059    vaddhw_acc(vxx, vu, vv)
4060}
4061
4062/// `Vxx32.h+=vadd(Vu32.ub,Vv32.ub)`
4063///
4064/// Instruction Type: CVI_VX_DV
4065/// Execution Slots: SLOT23
4066#[inline(always)]
4067#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4068#[cfg_attr(test, assert_instr(vaddubh_acc))]
4069#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4070pub unsafe fn q6_wh_vaddacc_whvubvub(
4071    vxx: HvxVectorPair,
4072    vu: HvxVector,
4073    vv: HvxVector,
4074) -> HvxVectorPair {
4075    vaddubh_acc(vxx, vu, vv)
4076}
4077
4078/// `Vd32.ub=vadd(Vu32.ub,Vv32.b):sat`
4079///
4080/// Instruction Type: CVI_VA
4081/// Execution Slots: SLOT0123
4082#[inline(always)]
4083#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4084#[cfg_attr(test, assert_instr(vaddububb_sat))]
4085#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4086pub unsafe fn q6_vub_vadd_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4087    vaddububb_sat(vu, vv)
4088}
4089
4090/// `Vxx32.w+=vadd(Vu32.uh,Vv32.uh)`
4091///
4092/// Instruction Type: CVI_VX_DV
4093/// Execution Slots: SLOT23
4094#[inline(always)]
4095#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4096#[cfg_attr(test, assert_instr(vadduhw_acc))]
4097#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4098pub unsafe fn q6_ww_vaddacc_wwvuhvuh(
4099    vxx: HvxVectorPair,
4100    vu: HvxVector,
4101    vv: HvxVector,
4102) -> HvxVectorPair {
4103    vadduhw_acc(vxx, vu, vv)
4104}
4105
4106/// `Vd32.uw=vadd(Vu32.uw,Vv32.uw):sat`
4107///
4108/// Instruction Type: CVI_VA
4109/// Execution Slots: SLOT0123
4110#[inline(always)]
4111#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4112#[cfg_attr(test, assert_instr(vadduwsat))]
4113#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4114pub unsafe fn q6_vuw_vadd_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4115    vadduwsat(vu, vv)
4116}
4117
4118/// `Vdd32.uw=vadd(Vuu32.uw,Vvv32.uw):sat`
4119///
4120/// Instruction Type: CVI_VA_DV
4121/// Execution Slots: SLOT0123
4122#[inline(always)]
4123#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4124#[cfg_attr(test, assert_instr(vadduwsat_dv))]
4125#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4126pub unsafe fn q6_wuw_vadd_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
4127    vadduwsat_dv(vuu, vvv)
4128}
4129
4130/// `Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):sat`
4131///
4132/// Instruction Type: CVI_VS
4133/// Execution Slots: SLOT0123
4134#[inline(always)]
4135#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4136#[cfg_attr(test, assert_instr(vasrhbsat))]
4137#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4138pub unsafe fn q6_vb_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
4139    vasrhbsat(vu, vv, rt)
4140}
4141
4142/// `Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat`
4143///
4144/// Instruction Type: CVI_VS
4145/// Execution Slots: SLOT0123
4146#[inline(always)]
4147#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4148#[cfg_attr(test, assert_instr(vasruwuhrndsat))]
4149#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4150pub unsafe fn q6_vuh_vasr_vuwvuwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
4151    vasruwuhrndsat(vu, vv, rt)
4152}
4153
4154/// `Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat`
4155///
4156/// Instruction Type: CVI_VS
4157/// Execution Slots: SLOT0123
4158#[inline(always)]
4159#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4160#[cfg_attr(test, assert_instr(vasrwuhrndsat))]
4161#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4162pub unsafe fn q6_vuh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
4163    vasrwuhrndsat(vu, vv, rt)
4164}
4165
4166/// `Vd32.ub=vlsr(Vu32.ub,Rt32)`
4167///
4168/// Instruction Type: CVI_VS
4169/// Execution Slots: SLOT0123
4170#[inline(always)]
4171#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4172#[cfg_attr(test, assert_instr(vlsrb))]
4173#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4174pub unsafe fn q6_vub_vlsr_vubr(vu: HvxVector, rt: i32) -> HvxVector {
4175    vlsrb(vu, rt)
4176}
4177
4178/// `Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch`
4179///
4180/// Instruction Type: CVI_VP
4181/// Execution Slots: SLOT0123
4182#[inline(always)]
4183#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4184#[cfg_attr(test, assert_instr(vlutvvb_nm))]
4185#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4186pub unsafe fn q6_vb_vlut32_vbvbr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
4187    vlutvvb_nm(vu, vv, rt)
4188}
4189
4190/// `Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3)`
4191///
4192/// Instruction Type: CVI_VP_VS
4193/// Execution Slots: SLOT0123
4194#[inline(always)]
4195#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4196#[cfg_attr(test, assert_instr(vlutvvb_oracci))]
4197#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4198pub unsafe fn q6_vb_vlut32or_vbvbvbi(
4199    vx: HvxVector,
4200    vu: HvxVector,
4201    vv: HvxVector,
4202    iu3: i32,
4203) -> HvxVector {
4204    vlutvvb_oracci(vx, vu, vv, iu3)
4205}
4206
4207/// `Vd32.b=vlut32(Vu32.b,Vv32.b,#u3)`
4208///
4209/// Instruction Type: CVI_VP
4210/// Execution Slots: SLOT0123
4211#[inline(always)]
4212#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4213#[cfg_attr(test, assert_instr(vlutvvbi))]
4214#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4215pub unsafe fn q6_vb_vlut32_vbvbi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector {
4216    vlutvvbi(vu, vv, iu3)
4217}
4218
4219/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch`
4220///
4221/// Instruction Type: CVI_VP_VS
4222/// Execution Slots: SLOT0123
4223#[inline(always)]
4224#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4225#[cfg_attr(test, assert_instr(vlutvwh_nm))]
4226#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4227pub unsafe fn q6_wh_vlut16_vbvhr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair {
4228    vlutvwh_nm(vu, vv, rt)
4229}
4230
4231/// `Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3)`
4232///
4233/// Instruction Type: CVI_VP_VS
4234/// Execution Slots: SLOT0123
4235#[inline(always)]
4236#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4237#[cfg_attr(test, assert_instr(vlutvwh_oracci))]
4238#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4239pub unsafe fn q6_wh_vlut16or_whvbvhi(
4240    vxx: HvxVectorPair,
4241    vu: HvxVector,
4242    vv: HvxVector,
4243    iu3: i32,
4244) -> HvxVectorPair {
4245    vlutvwh_oracci(vxx, vu, vv, iu3)
4246}
4247
4248/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3)`
4249///
4250/// Instruction Type: CVI_VP_VS
4251/// Execution Slots: SLOT0123
4252#[inline(always)]
4253#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4254#[cfg_attr(test, assert_instr(vlutvwhi))]
4255#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4256pub unsafe fn q6_wh_vlut16_vbvhi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVectorPair {
4257    vlutvwhi(vu, vv, iu3)
4258}
4259
4260/// `Vd32.b=vmax(Vu32.b,Vv32.b)`
4261///
4262/// Instruction Type: CVI_VA
4263/// Execution Slots: SLOT0123
4264#[inline(always)]
4265#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4266#[cfg_attr(test, assert_instr(vmaxb))]
4267#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4268pub unsafe fn q6_vb_vmax_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector {
4269    vmaxb(vu, vv)
4270}
4271
4272/// `Vd32.b=vmin(Vu32.b,Vv32.b)`
4273///
4274/// Instruction Type: CVI_VA
4275/// Execution Slots: SLOT0123
4276#[inline(always)]
4277#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4278#[cfg_attr(test, assert_instr(vminb))]
4279#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4280pub unsafe fn q6_vb_vmin_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector {
4281    vminb(vu, vv)
4282}
4283
4284/// `Vdd32.w=vmpa(Vuu32.uh,Rt32.b)`
4285///
4286/// Instruction Type: CVI_VX_DV
4287/// Execution Slots: SLOT23
4288#[inline(always)]
4289#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4290#[cfg_attr(test, assert_instr(vmpauhb))]
4291#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4292pub unsafe fn q6_ww_vmpa_wuhrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
4293    vmpauhb(vuu, rt)
4294}
4295
4296/// `Vxx32.w+=vmpa(Vuu32.uh,Rt32.b)`
4297///
4298/// Instruction Type: CVI_VX_DV
4299/// Execution Slots: SLOT23
4300#[inline(always)]
4301#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4302#[cfg_attr(test, assert_instr(vmpauhb_acc))]
4303#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4304pub unsafe fn q6_ww_vmpaacc_wwwuhrb(
4305    vxx: HvxVectorPair,
4306    vuu: HvxVectorPair,
4307    rt: i32,
4308) -> HvxVectorPair {
4309    vmpauhb_acc(vxx, vuu, rt)
4310}
4311
4312/// `Vdd32=vmpye(Vu32.w,Vv32.uh)`
4313///
4314/// Instruction Type: CVI_VX_DV
4315/// Execution Slots: SLOT23
4316#[inline(always)]
4317#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4318#[cfg_attr(test, assert_instr(vmpyewuh_64))]
4319#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4320pub unsafe fn q6_w_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
4321    vmpyewuh_64(vu, vv)
4322}
4323
4324/// `Vd32.w=vmpyi(Vu32.w,Rt32.ub)`
4325///
4326/// Instruction Type: CVI_VX
4327/// Execution Slots: SLOT23
4328#[inline(always)]
4329#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4330#[cfg_attr(test, assert_instr(vmpyiwub))]
4331#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4332pub unsafe fn q6_vw_vmpyi_vwrub(vu: HvxVector, rt: i32) -> HvxVector {
4333    vmpyiwub(vu, rt)
4334}
4335
4336/// `Vx32.w+=vmpyi(Vu32.w,Rt32.ub)`
4337///
4338/// Instruction Type: CVI_VX
4339/// Execution Slots: SLOT23
4340#[inline(always)]
4341#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4342#[cfg_attr(test, assert_instr(vmpyiwub_acc))]
4343#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4344pub unsafe fn q6_vw_vmpyiacc_vwvwrub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
4345    vmpyiwub_acc(vx, vu, rt)
4346}
4347
4348/// `Vxx32+=vmpyo(Vu32.w,Vv32.h)`
4349///
4350/// Instruction Type: CVI_VX_DV
4351/// Execution Slots: SLOT23
4352#[inline(always)]
4353#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4354#[cfg_attr(test, assert_instr(vmpyowh_64_acc))]
4355#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4356pub unsafe fn q6_w_vmpyoacc_wvwvh(
4357    vxx: HvxVectorPair,
4358    vu: HvxVector,
4359    vv: HvxVector,
4360) -> HvxVectorPair {
4361    vmpyowh_64_acc(vxx, vu, vv)
4362}
4363
4364/// `Vd32.ub=vround(Vu32.uh,Vv32.uh):sat`
4365///
4366/// Instruction Type: CVI_VS
4367/// Execution Slots: SLOT0123
4368#[inline(always)]
4369#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4370#[cfg_attr(test, assert_instr(vrounduhub))]
4371#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4372pub unsafe fn q6_vub_vround_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4373    vrounduhub(vu, vv)
4374}
4375
4376/// `Vd32.uh=vround(Vu32.uw,Vv32.uw):sat`
4377///
4378/// Instruction Type: CVI_VS
4379/// Execution Slots: SLOT0123
4380#[inline(always)]
4381#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4382#[cfg_attr(test, assert_instr(vrounduwuh))]
4383#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4384pub unsafe fn q6_vuh_vround_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4385    vrounduwuh(vu, vv)
4386}
4387
4388/// `Vd32.uh=vsat(Vu32.uw,Vv32.uw)`
4389///
4390/// Instruction Type: CVI_VA
4391/// Execution Slots: SLOT0123
4392#[inline(always)]
4393#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4394#[cfg_attr(test, assert_instr(vsatuwuh))]
4395#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4396pub unsafe fn q6_vuh_vsat_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector {
4397    vsatuwuh(vu, vv)
4398}
4399
4400/// `Vd32.b=vsub(Vu32.b,Vv32.b):sat`
4401///
4402/// Instruction Type: CVI_VA
4403/// Execution Slots: SLOT0123
4404#[inline(always)]
4405#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4406#[cfg_attr(test, assert_instr(vsubbsat))]
4407#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4408pub unsafe fn q6_vb_vsub_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4409    vsubbsat(vu, vv)
4410}
4411
4412/// `Vdd32.b=vsub(Vuu32.b,Vvv32.b):sat`
4413///
4414/// Instruction Type: CVI_VA_DV
4415/// Execution Slots: SLOT0123
4416#[inline(always)]
4417#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4418#[cfg_attr(test, assert_instr(vsubbsat_dv))]
4419#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4420pub unsafe fn q6_wb_vsub_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
4421    vsubbsat_dv(vuu, vvv)
4422}
4423
4424/// `Vd32.ub=vsub(Vu32.ub,Vv32.b):sat`
4425///
4426/// Instruction Type: CVI_VA
4427/// Execution Slots: SLOT0123
4428#[inline(always)]
4429#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4430#[cfg_attr(test, assert_instr(vsubububb_sat))]
4431#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4432pub unsafe fn q6_vub_vsub_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4433    vsubububb_sat(vu, vv)
4434}
4435
4436/// `Vd32.uw=vsub(Vu32.uw,Vv32.uw):sat`
4437///
4438/// Instruction Type: CVI_VA
4439/// Execution Slots: SLOT0123
4440#[inline(always)]
4441#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4442#[cfg_attr(test, assert_instr(vsubuwsat))]
4443#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4444pub unsafe fn q6_vuw_vsub_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4445    vsubuwsat(vu, vv)
4446}
4447
4448/// `Vdd32.uw=vsub(Vuu32.uw,Vvv32.uw):sat`
4449///
4450/// Instruction Type: CVI_VA_DV
4451/// Execution Slots: SLOT0123
4452#[inline(always)]
4453#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4454#[cfg_attr(test, assert_instr(vsubuwsat_dv))]
4455#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4456pub unsafe fn q6_wuw_vsub_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
4457    vsubuwsat_dv(vuu, vvv)
4458}
4459
4460/// `Vd32.b=vabs(Vu32.b)`
4461///
4462/// Instruction Type: CVI_VA
4463/// Execution Slots: SLOT0123
4464#[inline(always)]
4465#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4466#[cfg_attr(test, assert_instr(vabsb))]
4467#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4468pub unsafe fn q6_vb_vabs_vb(vu: HvxVector) -> HvxVector {
4469    vabsb(vu)
4470}
4471
4472/// `Vd32.b=vabs(Vu32.b):sat`
4473///
4474/// Instruction Type: CVI_VA
4475/// Execution Slots: SLOT0123
4476#[inline(always)]
4477#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4478#[cfg_attr(test, assert_instr(vabsb_sat))]
4479#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4480pub unsafe fn q6_vb_vabs_vb_sat(vu: HvxVector) -> HvxVector {
4481    vabsb_sat(vu)
4482}
4483
4484/// `Vx32.h+=vasl(Vu32.h,Rt32)`
4485///
4486/// Instruction Type: CVI_VS
4487/// Execution Slots: SLOT0123
4488#[inline(always)]
4489#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4490#[cfg_attr(test, assert_instr(vaslh_acc))]
4491#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4492pub unsafe fn q6_vh_vaslacc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
4493    vaslh_acc(vx, vu, rt)
4494}
4495
4496/// `Vx32.h+=vasr(Vu32.h,Rt32)`
4497///
4498/// Instruction Type: CVI_VS
4499/// Execution Slots: SLOT0123
4500#[inline(always)]
4501#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4502#[cfg_attr(test, assert_instr(vasrh_acc))]
4503#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4504pub unsafe fn q6_vh_vasracc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
4505    vasrh_acc(vx, vu, rt)
4506}
4507
4508/// `Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):rnd:sat`
4509///
4510/// Instruction Type: CVI_VS
4511/// Execution Slots: SLOT0123
4512#[inline(always)]
4513#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4514#[cfg_attr(test, assert_instr(vasruhubrndsat))]
4515#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4516pub unsafe fn q6_vub_vasr_vuhvuhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
4517    vasruhubrndsat(vu, vv, rt)
4518}
4519
4520/// `Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):sat`
4521///
4522/// Instruction Type: CVI_VS
4523/// Execution Slots: SLOT0123
4524#[inline(always)]
4525#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4526#[cfg_attr(test, assert_instr(vasruhubsat))]
4527#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4528pub unsafe fn q6_vub_vasr_vuhvuhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
4529    vasruhubsat(vu, vv, rt)
4530}
4531
4532/// `Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):sat`
4533///
4534/// Instruction Type: CVI_VS
4535/// Execution Slots: SLOT0123
4536#[inline(always)]
4537#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4538#[cfg_attr(test, assert_instr(vasruwuhsat))]
4539#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4540pub unsafe fn q6_vuh_vasr_vuwvuwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
4541    vasruwuhsat(vu, vv, rt)
4542}
4543
4544/// `Vd32.b=vavg(Vu32.b,Vv32.b)`
4545///
4546/// Instruction Type: CVI_VA
4547/// Execution Slots: SLOT0123
4548#[inline(always)]
4549#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4550#[cfg_attr(test, assert_instr(vavgb))]
4551#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4552pub unsafe fn q6_vb_vavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector {
4553    vavgb(vu, vv)
4554}
4555
4556/// `Vd32.b=vavg(Vu32.b,Vv32.b):rnd`
4557///
4558/// Instruction Type: CVI_VA
4559/// Execution Slots: SLOT0123
4560#[inline(always)]
4561#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4562#[cfg_attr(test, assert_instr(vavgbrnd))]
4563#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4564pub unsafe fn q6_vb_vavg_vbvb_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector {
4565    vavgbrnd(vu, vv)
4566}
4567
4568/// `Vd32.uw=vavg(Vu32.uw,Vv32.uw)`
4569///
4570/// Instruction Type: CVI_VA
4571/// Execution Slots: SLOT0123
4572#[inline(always)]
4573#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4574#[cfg_attr(test, assert_instr(vavguw))]
4575#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4576pub unsafe fn q6_vuw_vavg_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector {
4577    vavguw(vu, vv)
4578}
4579
4580/// `Vd32.uw=vavg(Vu32.uw,Vv32.uw):rnd`
4581///
4582/// Instruction Type: CVI_VA
4583/// Execution Slots: SLOT0123
4584#[inline(always)]
4585#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4586#[cfg_attr(test, assert_instr(vavguwrnd))]
4587#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4588pub unsafe fn q6_vuw_vavg_vuwvuw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector {
4589    vavguwrnd(vu, vv)
4590}
4591
4592/// `Vdd32=#0`
4593///
4594/// Instruction Type: MAPPING
4595/// Execution Slots: SLOT0123
4596#[inline(always)]
4597#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4598#[cfg_attr(test, assert_instr(vdd0))]
4599#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4600pub unsafe fn q6_w_vzero() -> HvxVectorPair {
4601    vdd0()
4602}
4603
4604/// `vtmp.h=vgather(Rt32,Mu2,Vv32.h).h`
4605///
4606/// Instruction Type: CVI_GATHER
4607/// Execution Slots: SLOT01
4608#[inline(always)]
4609#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4610#[cfg_attr(test, assert_instr(vgathermh))]
4611#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4612pub unsafe fn q6_vgather_armvh(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) {
4613    vgathermh(rs, rt, mu, vv)
4614}
4615
4616/// `vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h`
4617///
4618/// Instruction Type: CVI_GATHER_DV
4619/// Execution Slots: SLOT01
4620#[inline(always)]
4621#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4622#[cfg_attr(test, assert_instr(vgathermhw))]
4623#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4624pub unsafe fn q6_vgather_armww(rs: *mut HvxVector, rt: i32, mu: i32, vvv: HvxVectorPair) {
4625    vgathermhw(rs, rt, mu, vvv)
4626}
4627
4628/// `vtmp.w=vgather(Rt32,Mu2,Vv32.w).w`
4629///
4630/// Instruction Type: CVI_GATHER
4631/// Execution Slots: SLOT01
4632#[inline(always)]
4633#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4634#[cfg_attr(test, assert_instr(vgathermw))]
4635#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4636pub unsafe fn q6_vgather_armvw(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) {
4637    vgathermw(rs, rt, mu, vv)
4638}
4639
4640/// `Vdd32.h=vmpa(Vuu32.ub,Rt32.ub)`
4641///
4642/// Instruction Type: CVI_VX_DV
4643/// Execution Slots: SLOT23
4644#[inline(always)]
4645#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4646#[cfg_attr(test, assert_instr(vmpabuu))]
4647#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4648pub unsafe fn q6_wh_vmpa_wubrub(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
4649    vmpabuu(vuu, rt)
4650}
4651
4652/// `Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub)`
4653///
4654/// Instruction Type: CVI_VX_DV
4655/// Execution Slots: SLOT23
4656#[inline(always)]
4657#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4658#[cfg_attr(test, assert_instr(vmpabuu_acc))]
4659#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4660pub unsafe fn q6_wh_vmpaacc_whwubrub(
4661    vxx: HvxVectorPair,
4662    vuu: HvxVectorPair,
4663    rt: i32,
4664) -> HvxVectorPair {
4665    vmpabuu_acc(vxx, vuu, rt)
4666}
4667
4668/// `Vxx32.w+=vmpy(Vu32.h,Rt32.h)`
4669///
4670/// Instruction Type: CVI_VX_DV
4671/// Execution Slots: SLOT23
4672#[inline(always)]
4673#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4674#[cfg_attr(test, assert_instr(vmpyh_acc))]
4675#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4676pub unsafe fn q6_ww_vmpyacc_wwvhrh(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair {
4677    vmpyh_acc(vxx, vu, rt)
4678}
4679
4680/// `Vd32.uw=vmpye(Vu32.uh,Rt32.uh)`
4681///
4682/// Instruction Type: CVI_VX
4683/// Execution Slots: SLOT23
4684#[inline(always)]
4685#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4686#[cfg_attr(test, assert_instr(vmpyuhe))]
4687#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4688pub unsafe fn q6_vuw_vmpye_vuhruh(vu: HvxVector, rt: i32) -> HvxVector {
4689    vmpyuhe(vu, rt)
4690}
4691
4692/// `Vx32.uw+=vmpye(Vu32.uh,Rt32.uh)`
4693///
4694/// Instruction Type: CVI_VX
4695/// Execution Slots: SLOT23
4696#[inline(always)]
4697#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4698#[cfg_attr(test, assert_instr(vmpyuhe_acc))]
4699#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4700pub unsafe fn q6_vuw_vmpyeacc_vuwvuhruh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
4701    vmpyuhe_acc(vx, vu, rt)
4702}
4703
4704/// `Vd32.b=vnavg(Vu32.b,Vv32.b)`
4705///
4706/// Instruction Type: CVI_VA
4707/// Execution Slots: SLOT0123
4708#[inline(always)]
4709#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4710#[cfg_attr(test, assert_instr(vnavgb))]
4711#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4712pub unsafe fn q6_vb_vnavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector {
4713    vnavgb(vu, vv)
4714}
4715
4716/// `vscatter(Rt32,Mu2,Vv32.h).h=Vw32`
4717///
4718/// Instruction Type: CVI_SCATTER
4719/// Execution Slots: SLOT0
4720#[inline(always)]
4721#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4722#[cfg_attr(test, assert_instr(vscattermh))]
4723#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4724pub unsafe fn q6_vscatter_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) {
4725    vscattermh(rt, mu, vv, vw)
4726}
4727
4728/// `vscatter(Rt32,Mu2,Vv32.h).h+=Vw32`
4729///
4730/// Instruction Type: CVI_SCATTER
4731/// Execution Slots: SLOT0
4732#[inline(always)]
4733#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4734#[cfg_attr(test, assert_instr(vscattermh_add))]
4735#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4736pub unsafe fn q6_vscatteracc_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) {
4737    vscattermh_add(rt, mu, vv, vw)
4738}
4739
4740/// `vscatter(Rt32,Mu2,Vvv32.w).h=Vw32`
4741///
4742/// Instruction Type: CVI_SCATTER_DV
4743/// Execution Slots: SLOT0
4744#[inline(always)]
4745#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4746#[cfg_attr(test, assert_instr(vscattermhw))]
4747#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4748pub unsafe fn q6_vscatter_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) {
4749    vscattermhw(rt, mu, vvv, vw)
4750}
4751
4752/// `vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32`
4753///
4754/// Instruction Type: CVI_SCATTER_DV
4755/// Execution Slots: SLOT0
4756#[inline(always)]
4757#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4758#[cfg_attr(test, assert_instr(vscattermhw_add))]
4759#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4760pub unsafe fn q6_vscatteracc_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) {
4761    vscattermhw_add(rt, mu, vvv, vw)
4762}
4763
4764/// `vscatter(Rt32,Mu2,Vv32.w).w=Vw32`
4765///
4766/// Instruction Type: CVI_SCATTER
4767/// Execution Slots: SLOT0
4768#[inline(always)]
4769#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4770#[cfg_attr(test, assert_instr(vscattermw))]
4771#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4772pub unsafe fn q6_vscatter_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) {
4773    vscattermw(rt, mu, vv, vw)
4774}
4775
4776/// `vscatter(Rt32,Mu2,Vv32.w).w+=Vw32`
4777///
4778/// Instruction Type: CVI_SCATTER
4779/// Execution Slots: SLOT0
4780#[inline(always)]
4781#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4782#[cfg_attr(test, assert_instr(vscattermw_add))]
4783#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4784pub unsafe fn q6_vscatteracc_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) {
4785    vscattermw_add(rt, mu, vv, vw)
4786}
4787
4788/// `Vxx32.w=vasrinto(Vu32.w,Vv32.w)`
4789///
4790/// Instruction Type: CVI_VP_VS
4791/// Execution Slots: SLOT0123
4792#[inline(always)]
4793#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))]
4794#[cfg_attr(test, assert_instr(vasr_into))]
4795#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4796pub unsafe fn q6_ww_vasrinto_wwvwvw(
4797    vxx: HvxVectorPair,
4798    vu: HvxVector,
4799    vv: HvxVector,
4800) -> HvxVectorPair {
4801    vasr_into(vxx, vu, vv)
4802}
4803
4804/// `Vd32.uw=vrotr(Vu32.uw,Vv32.uw)`
4805///
4806/// Instruction Type: CVI_VS
4807/// Execution Slots: SLOT0123
4808#[inline(always)]
4809#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))]
4810#[cfg_attr(test, assert_instr(vrotr))]
4811#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4812pub unsafe fn q6_vuw_vrotr_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector {
4813    vrotr(vu, vv)
4814}
4815
4816/// `Vd32.w=vsatdw(Vu32.w,Vv32.w)`
4817///
4818/// Instruction Type: CVI_VA
4819/// Execution Slots: SLOT0123
4820#[inline(always)]
4821#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))]
4822#[cfg_attr(test, assert_instr(vsatdw))]
4823#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4824pub unsafe fn q6_vw_vsatdw_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector {
4825    vsatdw(vu, vv)
4826}
4827
4828/// `Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):h`
4829///
4830/// Instruction Type: CVI_VX_DV
4831/// Execution Slots: SLOT23
4832#[inline(always)]
4833#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4834#[cfg_attr(test, assert_instr(v6mpyhubs10))]
4835#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4836pub unsafe fn q6_ww_v6mpy_wubwbi_h(
4837    vuu: HvxVectorPair,
4838    vvv: HvxVectorPair,
4839    iu2: i32,
4840) -> HvxVectorPair {
4841    v6mpyhubs10(vuu, vvv, iu2)
4842}
4843
4844/// `Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):h`
4845///
4846/// Instruction Type: CVI_VX_DV
4847/// Execution Slots: SLOT23
4848#[inline(always)]
4849#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4850#[cfg_attr(test, assert_instr(v6mpyhubs10_vxx))]
4851#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4852pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_h(
4853    vxx: HvxVectorPair,
4854    vuu: HvxVectorPair,
4855    vvv: HvxVectorPair,
4856    iu2: i32,
4857) -> HvxVectorPair {
4858    v6mpyhubs10_vxx(vxx, vuu, vvv, iu2)
4859}
4860
4861/// `Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):v`
4862///
4863/// Instruction Type: CVI_VX_DV
4864/// Execution Slots: SLOT23
4865#[inline(always)]
4866#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4867#[cfg_attr(test, assert_instr(v6mpyvubs10))]
4868#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4869pub unsafe fn q6_ww_v6mpy_wubwbi_v(
4870    vuu: HvxVectorPair,
4871    vvv: HvxVectorPair,
4872    iu2: i32,
4873) -> HvxVectorPair {
4874    v6mpyvubs10(vuu, vvv, iu2)
4875}
4876
4877/// `Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):v`
4878///
4879/// Instruction Type: CVI_VX_DV
4880/// Execution Slots: SLOT23
4881#[inline(always)]
4882#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4883#[cfg_attr(test, assert_instr(v6mpyvubs10_vxx))]
4884#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4885pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_v(
4886    vxx: HvxVectorPair,
4887    vuu: HvxVectorPair,
4888    vvv: HvxVectorPair,
4889    iu2: i32,
4890) -> HvxVectorPair {
4891    v6mpyvubs10_vxx(vxx, vuu, vvv, iu2)
4892}
4893
4894/// `Vd32.hf=vabs(Vu32.hf)`
4895///
4896/// Instruction Type: CVI_VX_LATE
4897/// Execution Slots: SLOT23
4898#[inline(always)]
4899#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4900#[cfg_attr(test, assert_instr(vabs_hf))]
4901#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4902pub unsafe fn q6_vhf_vabs_vhf(vu: HvxVector) -> HvxVector {
4903    vabs_hf(vu)
4904}
4905
4906/// `Vd32.sf=vabs(Vu32.sf)`
4907///
4908/// Instruction Type: CVI_VX_LATE
4909/// Execution Slots: SLOT23
4910#[inline(always)]
4911#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4912#[cfg_attr(test, assert_instr(vabs_sf))]
4913#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4914pub unsafe fn q6_vsf_vabs_vsf(vu: HvxVector) -> HvxVector {
4915    vabs_sf(vu)
4916}
4917
4918/// `Vd32.qf16=vadd(Vu32.hf,Vv32.hf)`
4919///
4920/// Instruction Type: CVI_VS
4921/// Execution Slots: SLOT0123
4922#[inline(always)]
4923#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4924#[cfg_attr(test, assert_instr(vadd_hf))]
4925#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4926pub unsafe fn q6_vqf16_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
4927    vadd_hf(vu, vv)
4928}
4929
4930/// `Vd32.hf=vadd(Vu32.hf,Vv32.hf)`
4931///
4932/// Instruction Type: CVI_VX
4933/// Execution Slots: SLOT23
4934#[inline(always)]
4935#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4936#[cfg_attr(test, assert_instr(vadd_hf_hf))]
4937#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4938pub unsafe fn q6_vhf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
4939    vadd_hf_hf(vu, vv)
4940}
4941
4942/// `Vd32.qf16=vadd(Vu32.qf16,Vv32.qf16)`
4943///
4944/// Instruction Type: CVI_VS
4945/// Execution Slots: SLOT0123
4946#[inline(always)]
4947#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4948#[cfg_attr(test, assert_instr(vadd_qf16))]
4949#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4950pub unsafe fn q6_vqf16_vadd_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector {
4951    vadd_qf16(vu, vv)
4952}
4953
4954/// `Vd32.qf16=vadd(Vu32.qf16,Vv32.hf)`
4955///
4956/// Instruction Type: CVI_VS
4957/// Execution Slots: SLOT0123
4958#[inline(always)]
4959#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4960#[cfg_attr(test, assert_instr(vadd_qf16_mix))]
4961#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4962pub unsafe fn q6_vqf16_vadd_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
4963    vadd_qf16_mix(vu, vv)
4964}
4965
4966/// `Vd32.qf32=vadd(Vu32.qf32,Vv32.qf32)`
4967///
4968/// Instruction Type: CVI_VS
4969/// Execution Slots: SLOT0123
4970#[inline(always)]
4971#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4972#[cfg_attr(test, assert_instr(vadd_qf32))]
4973#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4974pub unsafe fn q6_vqf32_vadd_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector {
4975    vadd_qf32(vu, vv)
4976}
4977
4978/// `Vd32.qf32=vadd(Vu32.qf32,Vv32.sf)`
4979///
4980/// Instruction Type: CVI_VS
4981/// Execution Slots: SLOT0123
4982#[inline(always)]
4983#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4984#[cfg_attr(test, assert_instr(vadd_qf32_mix))]
4985#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4986pub unsafe fn q6_vqf32_vadd_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
4987    vadd_qf32_mix(vu, vv)
4988}
4989
4990/// `Vd32.qf32=vadd(Vu32.sf,Vv32.sf)`
4991///
4992/// Instruction Type: CVI_VS
4993/// Execution Slots: SLOT0123
4994#[inline(always)]
4995#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4996#[cfg_attr(test, assert_instr(vadd_sf))]
4997#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4998pub unsafe fn q6_vqf32_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
4999    vadd_sf(vu, vv)
5000}
5001
5002/// `Vdd32.sf=vadd(Vu32.hf,Vv32.hf)`
5003///
5004/// Instruction Type: CVI_VX_DV
5005/// Execution Slots: SLOT23
5006#[inline(always)]
5007#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5008#[cfg_attr(test, assert_instr(vadd_sf_hf))]
5009#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5010pub unsafe fn q6_wsf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
5011    vadd_sf_hf(vu, vv)
5012}
5013
5014/// `Vd32.sf=vadd(Vu32.sf,Vv32.sf)`
5015///
5016/// Instruction Type: CVI_VX
5017/// Execution Slots: SLOT23
5018#[inline(always)]
5019#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5020#[cfg_attr(test, assert_instr(vadd_sf_sf))]
5021#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5022pub unsafe fn q6_vsf_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5023    vadd_sf_sf(vu, vv)
5024}
5025
5026/// `Vd32.w=vfmv(Vu32.w)`
5027///
5028/// Instruction Type: CVI_VX_LATE
5029/// Execution Slots: SLOT23
5030#[inline(always)]
5031#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5032#[cfg_attr(test, assert_instr(vassign_fp))]
5033#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5034pub unsafe fn q6_vw_vfmv_vw(vu: HvxVector) -> HvxVector {
5035    vassign_fp(vu)
5036}
5037
5038/// `Vd32.hf=Vu32.qf16`
5039///
5040/// Instruction Type: CVI_VS
5041/// Execution Slots: SLOT0123
5042#[inline(always)]
5043#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5044#[cfg_attr(test, assert_instr(vconv_hf_qf16))]
5045#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5046pub unsafe fn q6_vhf_equals_vqf16(vu: HvxVector) -> HvxVector {
5047    vconv_hf_qf16(vu)
5048}
5049
5050/// `Vd32.hf=Vuu32.qf32`
5051///
5052/// Instruction Type: CVI_VS
5053/// Execution Slots: SLOT0123
5054#[inline(always)]
5055#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5056#[cfg_attr(test, assert_instr(vconv_hf_qf32))]
5057#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5058pub unsafe fn q6_vhf_equals_wqf32(vuu: HvxVectorPair) -> HvxVector {
5059    vconv_hf_qf32(vuu)
5060}
5061
5062/// `Vd32.sf=Vu32.qf32`
5063///
5064/// Instruction Type: CVI_VS
5065/// Execution Slots: SLOT0123
5066#[inline(always)]
5067#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5068#[cfg_attr(test, assert_instr(vconv_sf_qf32))]
5069#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5070pub unsafe fn q6_vsf_equals_vqf32(vu: HvxVector) -> HvxVector {
5071    vconv_sf_qf32(vu)
5072}
5073
5074/// `Vd32.b=vcvt(Vu32.hf,Vv32.hf)`
5075///
5076/// Instruction Type: CVI_VX
5077/// Execution Slots: SLOT23
5078#[inline(always)]
5079#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5080#[cfg_attr(test, assert_instr(vcvt_b_hf))]
5081#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5082pub unsafe fn q6_vb_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5083    vcvt_b_hf(vu, vv)
5084}
5085
5086/// `Vd32.h=vcvt(Vu32.hf)`
5087///
5088/// Instruction Type: CVI_VX
5089/// Execution Slots: SLOT23
5090#[inline(always)]
5091#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5092#[cfg_attr(test, assert_instr(vcvt_h_hf))]
5093#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5094pub unsafe fn q6_vh_vcvt_vhf(vu: HvxVector) -> HvxVector {
5095    vcvt_h_hf(vu)
5096}
5097
5098/// `Vdd32.hf=vcvt(Vu32.b)`
5099///
5100/// Instruction Type: CVI_VX_DV
5101/// Execution Slots: SLOT23
5102#[inline(always)]
5103#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5104#[cfg_attr(test, assert_instr(vcvt_hf_b))]
5105#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5106pub unsafe fn q6_whf_vcvt_vb(vu: HvxVector) -> HvxVectorPair {
5107    vcvt_hf_b(vu)
5108}
5109
5110/// `Vd32.hf=vcvt(Vu32.h)`
5111///
5112/// Instruction Type: CVI_VX
5113/// Execution Slots: SLOT23
5114#[inline(always)]
5115#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5116#[cfg_attr(test, assert_instr(vcvt_hf_h))]
5117#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5118pub unsafe fn q6_vhf_vcvt_vh(vu: HvxVector) -> HvxVector {
5119    vcvt_hf_h(vu)
5120}
5121
5122/// `Vd32.hf=vcvt(Vu32.sf,Vv32.sf)`
5123///
5124/// Instruction Type: CVI_VX
5125/// Execution Slots: SLOT23
5126#[inline(always)]
5127#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5128#[cfg_attr(test, assert_instr(vcvt_hf_sf))]
5129#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5130pub unsafe fn q6_vhf_vcvt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5131    vcvt_hf_sf(vu, vv)
5132}
5133
5134/// `Vdd32.hf=vcvt(Vu32.ub)`
5135///
5136/// Instruction Type: CVI_VX_DV
5137/// Execution Slots: SLOT23
5138#[inline(always)]
5139#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5140#[cfg_attr(test, assert_instr(vcvt_hf_ub))]
5141#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5142pub unsafe fn q6_whf_vcvt_vub(vu: HvxVector) -> HvxVectorPair {
5143    vcvt_hf_ub(vu)
5144}
5145
5146/// `Vd32.hf=vcvt(Vu32.uh)`
5147///
5148/// Instruction Type: CVI_VX
5149/// Execution Slots: SLOT23
5150#[inline(always)]
5151#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5152#[cfg_attr(test, assert_instr(vcvt_hf_uh))]
5153#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5154pub unsafe fn q6_vhf_vcvt_vuh(vu: HvxVector) -> HvxVector {
5155    vcvt_hf_uh(vu)
5156}
5157
5158/// `Vdd32.sf=vcvt(Vu32.hf)`
5159///
5160/// Instruction Type: CVI_VX_DV
5161/// Execution Slots: SLOT23
5162#[inline(always)]
5163#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5164#[cfg_attr(test, assert_instr(vcvt_sf_hf))]
5165#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5166pub unsafe fn q6_wsf_vcvt_vhf(vu: HvxVector) -> HvxVectorPair {
5167    vcvt_sf_hf(vu)
5168}
5169
5170/// `Vd32.ub=vcvt(Vu32.hf,Vv32.hf)`
5171///
5172/// Instruction Type: CVI_VX
5173/// Execution Slots: SLOT23
5174#[inline(always)]
5175#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5176#[cfg_attr(test, assert_instr(vcvt_ub_hf))]
5177#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5178pub unsafe fn q6_vub_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5179    vcvt_ub_hf(vu, vv)
5180}
5181
5182/// `Vd32.uh=vcvt(Vu32.hf)`
5183///
5184/// Instruction Type: CVI_VX
5185/// Execution Slots: SLOT23
5186#[inline(always)]
5187#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5188#[cfg_attr(test, assert_instr(vcvt_uh_hf))]
5189#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5190pub unsafe fn q6_vuh_vcvt_vhf(vu: HvxVector) -> HvxVector {
5191    vcvt_uh_hf(vu)
5192}
5193
5194/// `Vd32.sf=vdmpy(Vu32.hf,Vv32.hf)`
5195///
5196/// Instruction Type: CVI_VX
5197/// Execution Slots: SLOT23
5198#[inline(always)]
5199#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5200#[cfg_attr(test, assert_instr(vdmpy_sf_hf))]
5201#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5202pub unsafe fn q6_vsf_vdmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5203    vdmpy_sf_hf(vu, vv)
5204}
5205
5206/// `Vx32.sf+=vdmpy(Vu32.hf,Vv32.hf)`
5207///
5208/// Instruction Type: CVI_VX
5209/// Execution Slots: SLOT23
5210#[inline(always)]
5211#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5212#[cfg_attr(test, assert_instr(vdmpy_sf_hf_acc))]
5213#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5214pub unsafe fn q6_vsf_vdmpyacc_vsfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
5215    vdmpy_sf_hf_acc(vx, vu, vv)
5216}
5217
5218/// `Vd32.hf=vfmax(Vu32.hf,Vv32.hf)`
5219///
5220/// Instruction Type: CVI_VX_LATE
5221/// Execution Slots: SLOT23
5222#[inline(always)]
5223#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5224#[cfg_attr(test, assert_instr(vfmax_hf))]
5225#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5226pub unsafe fn q6_vhf_vfmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5227    vfmax_hf(vu, vv)
5228}
5229
5230/// `Vd32.sf=vfmax(Vu32.sf,Vv32.sf)`
5231///
5232/// Instruction Type: CVI_VX_LATE
5233/// Execution Slots: SLOT23
5234#[inline(always)]
5235#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5236#[cfg_attr(test, assert_instr(vfmax_sf))]
5237#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5238pub unsafe fn q6_vsf_vfmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5239    vfmax_sf(vu, vv)
5240}
5241
5242/// `Vd32.hf=vfmin(Vu32.hf,Vv32.hf)`
5243///
5244/// Instruction Type: CVI_VX_LATE
5245/// Execution Slots: SLOT23
5246#[inline(always)]
5247#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5248#[cfg_attr(test, assert_instr(vfmin_hf))]
5249#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5250pub unsafe fn q6_vhf_vfmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5251    vfmin_hf(vu, vv)
5252}
5253
5254/// `Vd32.sf=vfmin(Vu32.sf,Vv32.sf)`
5255///
5256/// Instruction Type: CVI_VX_LATE
5257/// Execution Slots: SLOT23
5258#[inline(always)]
5259#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5260#[cfg_attr(test, assert_instr(vfmin_sf))]
5261#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5262pub unsafe fn q6_vsf_vfmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5263    vfmin_sf(vu, vv)
5264}
5265
5266/// `Vd32.hf=vfneg(Vu32.hf)`
5267///
5268/// Instruction Type: CVI_VX_LATE
5269/// Execution Slots: SLOT23
5270#[inline(always)]
5271#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5272#[cfg_attr(test, assert_instr(vfneg_hf))]
5273#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5274pub unsafe fn q6_vhf_vfneg_vhf(vu: HvxVector) -> HvxVector {
5275    vfneg_hf(vu)
5276}
5277
5278/// `Vd32.sf=vfneg(Vu32.sf)`
5279///
5280/// Instruction Type: CVI_VX_LATE
5281/// Execution Slots: SLOT23
5282#[inline(always)]
5283#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5284#[cfg_attr(test, assert_instr(vfneg_sf))]
5285#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5286pub unsafe fn q6_vsf_vfneg_vsf(vu: HvxVector) -> HvxVector {
5287    vfneg_sf(vu)
5288}
5289
5290/// `Vd32.hf=vmax(Vu32.hf,Vv32.hf)`
5291///
5292/// Instruction Type: CVI_VA
5293/// Execution Slots: SLOT0123
5294#[inline(always)]
5295#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5296#[cfg_attr(test, assert_instr(vmax_hf))]
5297#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5298pub unsafe fn q6_vhf_vmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5299    vmax_hf(vu, vv)
5300}
5301
5302/// `Vd32.sf=vmax(Vu32.sf,Vv32.sf)`
5303///
5304/// Instruction Type: CVI_VA
5305/// Execution Slots: SLOT0123
5306#[inline(always)]
5307#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5308#[cfg_attr(test, assert_instr(vmax_sf))]
5309#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5310pub unsafe fn q6_vsf_vmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5311    vmax_sf(vu, vv)
5312}
5313
5314/// `Vd32.hf=vmin(Vu32.hf,Vv32.hf)`
5315///
5316/// Instruction Type: CVI_VA
5317/// Execution Slots: SLOT0123
5318#[inline(always)]
5319#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5320#[cfg_attr(test, assert_instr(vmin_hf))]
5321#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5322pub unsafe fn q6_vhf_vmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5323    vmin_hf(vu, vv)
5324}
5325
5326/// `Vd32.sf=vmin(Vu32.sf,Vv32.sf)`
5327///
5328/// Instruction Type: CVI_VA
5329/// Execution Slots: SLOT0123
5330#[inline(always)]
5331#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5332#[cfg_attr(test, assert_instr(vmin_sf))]
5333#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5334pub unsafe fn q6_vsf_vmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5335    vmin_sf(vu, vv)
5336}
5337
5338/// `Vd32.hf=vmpy(Vu32.hf,Vv32.hf)`
5339///
5340/// Instruction Type: CVI_VX
5341/// Execution Slots: SLOT23
5342#[inline(always)]
5343#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5344#[cfg_attr(test, assert_instr(vmpy_hf_hf))]
5345#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5346pub unsafe fn q6_vhf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5347    vmpy_hf_hf(vu, vv)
5348}
5349
5350/// `Vx32.hf+=vmpy(Vu32.hf,Vv32.hf)`
5351///
5352/// Instruction Type: CVI_VX
5353/// Execution Slots: SLOT23
5354#[inline(always)]
5355#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5356#[cfg_attr(test, assert_instr(vmpy_hf_hf_acc))]
5357#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5358pub unsafe fn q6_vhf_vmpyacc_vhfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
5359    vmpy_hf_hf_acc(vx, vu, vv)
5360}
5361
5362/// `Vd32.qf16=vmpy(Vu32.qf16,Vv32.qf16)`
5363///
5364/// Instruction Type: CVI_VX_DV
5365/// Execution Slots: SLOT23
5366#[inline(always)]
5367#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5368#[cfg_attr(test, assert_instr(vmpy_qf16))]
5369#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5370pub unsafe fn q6_vqf16_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector {
5371    vmpy_qf16(vu, vv)
5372}
5373
5374/// `Vd32.qf16=vmpy(Vu32.hf,Vv32.hf)`
5375///
5376/// Instruction Type: CVI_VX_DV
5377/// Execution Slots: SLOT23
5378#[inline(always)]
5379#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5380#[cfg_attr(test, assert_instr(vmpy_qf16_hf))]
5381#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5382pub unsafe fn q6_vqf16_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5383    vmpy_qf16_hf(vu, vv)
5384}
5385
5386/// `Vd32.qf16=vmpy(Vu32.qf16,Vv32.hf)`
5387///
5388/// Instruction Type: CVI_VX_DV
5389/// Execution Slots: SLOT23
5390#[inline(always)]
5391#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5392#[cfg_attr(test, assert_instr(vmpy_qf16_mix_hf))]
5393#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5394pub unsafe fn q6_vqf16_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5395    vmpy_qf16_mix_hf(vu, vv)
5396}
5397
5398/// `Vd32.qf32=vmpy(Vu32.qf32,Vv32.qf32)`
5399///
5400/// Instruction Type: CVI_VX_DV
5401/// Execution Slots: SLOT23
5402#[inline(always)]
5403#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5404#[cfg_attr(test, assert_instr(vmpy_qf32))]
5405#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5406pub unsafe fn q6_vqf32_vmpy_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector {
5407    vmpy_qf32(vu, vv)
5408}
5409
5410/// `Vdd32.qf32=vmpy(Vu32.hf,Vv32.hf)`
5411///
5412/// Instruction Type: CVI_VX_DV
5413/// Execution Slots: SLOT23
5414#[inline(always)]
5415#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5416#[cfg_attr(test, assert_instr(vmpy_qf32_hf))]
5417#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5418pub unsafe fn q6_wqf32_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
5419    vmpy_qf32_hf(vu, vv)
5420}
5421
5422/// `Vdd32.qf32=vmpy(Vu32.qf16,Vv32.hf)`
5423///
5424/// Instruction Type: CVI_VX_DV
5425/// Execution Slots: SLOT23
5426#[inline(always)]
5427#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5428#[cfg_attr(test, assert_instr(vmpy_qf32_mix_hf))]
5429#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5430pub unsafe fn q6_wqf32_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
5431    vmpy_qf32_mix_hf(vu, vv)
5432}
5433
5434/// `Vdd32.qf32=vmpy(Vu32.qf16,Vv32.qf16)`
5435///
5436/// Instruction Type: CVI_VX_DV
5437/// Execution Slots: SLOT23
5438#[inline(always)]
5439#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5440#[cfg_attr(test, assert_instr(vmpy_qf32_qf16))]
5441#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5442pub unsafe fn q6_wqf32_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
5443    vmpy_qf32_qf16(vu, vv)
5444}
5445
5446/// `Vd32.qf32=vmpy(Vu32.sf,Vv32.sf)`
5447///
5448/// Instruction Type: CVI_VX_DV
5449/// Execution Slots: SLOT23
5450#[inline(always)]
5451#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5452#[cfg_attr(test, assert_instr(vmpy_qf32_sf))]
5453#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5454pub unsafe fn q6_vqf32_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5455    vmpy_qf32_sf(vu, vv)
5456}
5457
5458/// `Vdd32.sf=vmpy(Vu32.hf,Vv32.hf)`
5459///
5460/// Instruction Type: CVI_VX_DV
5461/// Execution Slots: SLOT23
5462#[inline(always)]
5463#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5464#[cfg_attr(test, assert_instr(vmpy_sf_hf))]
5465#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5466pub unsafe fn q6_wsf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
5467    vmpy_sf_hf(vu, vv)
5468}
5469
5470/// `Vxx32.sf+=vmpy(Vu32.hf,Vv32.hf)`
5471///
5472/// Instruction Type: CVI_VX_DV
5473/// Execution Slots: SLOT23
5474#[inline(always)]
5475#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5476#[cfg_attr(test, assert_instr(vmpy_sf_hf_acc))]
5477#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5478pub unsafe fn q6_wsf_vmpyacc_wsfvhfvhf(
5479    vxx: HvxVectorPair,
5480    vu: HvxVector,
5481    vv: HvxVector,
5482) -> HvxVectorPair {
5483    vmpy_sf_hf_acc(vxx, vu, vv)
5484}
5485
5486/// `Vd32.sf=vmpy(Vu32.sf,Vv32.sf)`
5487///
5488/// Instruction Type: CVI_VX_DV
5489/// Execution Slots: SLOT23
5490#[inline(always)]
5491#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5492#[cfg_attr(test, assert_instr(vmpy_sf_sf))]
5493#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5494pub unsafe fn q6_vsf_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5495    vmpy_sf_sf(vu, vv)
5496}
5497
5498/// `Vd32.qf16=vsub(Vu32.hf,Vv32.hf)`
5499///
5500/// Instruction Type: CVI_VS
5501/// Execution Slots: SLOT0123
5502#[inline(always)]
5503#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5504#[cfg_attr(test, assert_instr(vsub_hf))]
5505#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5506pub unsafe fn q6_vqf16_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5507    vsub_hf(vu, vv)
5508}
5509
5510/// `Vd32.hf=vsub(Vu32.hf,Vv32.hf)`
5511///
5512/// Instruction Type: CVI_VX
5513/// Execution Slots: SLOT23
5514#[inline(always)]
5515#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5516#[cfg_attr(test, assert_instr(vsub_hf_hf))]
5517#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5518pub unsafe fn q6_vhf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5519    vsub_hf_hf(vu, vv)
5520}
5521
5522/// `Vd32.qf16=vsub(Vu32.qf16,Vv32.qf16)`
5523///
5524/// Instruction Type: CVI_VS
5525/// Execution Slots: SLOT0123
5526#[inline(always)]
5527#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5528#[cfg_attr(test, assert_instr(vsub_qf16))]
5529#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5530pub unsafe fn q6_vqf16_vsub_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector {
5531    vsub_qf16(vu, vv)
5532}
5533
5534/// `Vd32.qf16=vsub(Vu32.qf16,Vv32.hf)`
5535///
5536/// Instruction Type: CVI_VS
5537/// Execution Slots: SLOT0123
5538#[inline(always)]
5539#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5540#[cfg_attr(test, assert_instr(vsub_qf16_mix))]
5541#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5542pub unsafe fn q6_vqf16_vsub_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5543    vsub_qf16_mix(vu, vv)
5544}
5545
5546/// `Vd32.qf32=vsub(Vu32.qf32,Vv32.qf32)`
5547///
5548/// Instruction Type: CVI_VS
5549/// Execution Slots: SLOT0123
5550#[inline(always)]
5551#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5552#[cfg_attr(test, assert_instr(vsub_qf32))]
5553#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5554pub unsafe fn q6_vqf32_vsub_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector {
5555    vsub_qf32(vu, vv)
5556}
5557
5558/// `Vd32.qf32=vsub(Vu32.qf32,Vv32.sf)`
5559///
5560/// Instruction Type: CVI_VS
5561/// Execution Slots: SLOT0123
5562#[inline(always)]
5563#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5564#[cfg_attr(test, assert_instr(vsub_qf32_mix))]
5565#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5566pub unsafe fn q6_vqf32_vsub_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5567    vsub_qf32_mix(vu, vv)
5568}
5569
5570/// `Vd32.qf32=vsub(Vu32.sf,Vv32.sf)`
5571///
5572/// Instruction Type: CVI_VS
5573/// Execution Slots: SLOT0123
5574#[inline(always)]
5575#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5576#[cfg_attr(test, assert_instr(vsub_sf))]
5577#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5578pub unsafe fn q6_vqf32_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5579    vsub_sf(vu, vv)
5580}
5581
5582/// `Vdd32.sf=vsub(Vu32.hf,Vv32.hf)`
5583///
5584/// Instruction Type: CVI_VX_DV
5585/// Execution Slots: SLOT23
5586#[inline(always)]
5587#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5588#[cfg_attr(test, assert_instr(vsub_sf_hf))]
5589#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5590pub unsafe fn q6_wsf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
5591    vsub_sf_hf(vu, vv)
5592}
5593
5594/// `Vd32.sf=vsub(Vu32.sf,Vv32.sf)`
5595///
5596/// Instruction Type: CVI_VX
5597/// Execution Slots: SLOT23
5598#[inline(always)]
5599#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5600#[cfg_attr(test, assert_instr(vsub_sf_sf))]
5601#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5602pub unsafe fn q6_vsf_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5603    vsub_sf_sf(vu, vv)
5604}
5605
5606/// `Vd32.ub=vasr(Vuu32.uh,Vv32.ub):rnd:sat`
5607///
5608/// Instruction Type: CVI_VS
5609/// Execution Slots: SLOT0123
5610#[inline(always)]
5611#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))]
5612#[cfg_attr(test, assert_instr(vasrvuhubrndsat))]
5613#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5614pub unsafe fn q6_vub_vasr_wuhvub_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector {
5615    vasrvuhubrndsat(vuu, vv)
5616}
5617
5618/// `Vd32.ub=vasr(Vuu32.uh,Vv32.ub):sat`
5619///
5620/// Instruction Type: CVI_VS
5621/// Execution Slots: SLOT0123
5622#[inline(always)]
5623#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))]
5624#[cfg_attr(test, assert_instr(vasrvuhubsat))]
5625#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5626pub unsafe fn q6_vub_vasr_wuhvub_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector {
5627    vasrvuhubsat(vuu, vv)
5628}
5629
5630/// `Vd32.uh=vasr(Vuu32.w,Vv32.uh):rnd:sat`
5631///
5632/// Instruction Type: CVI_VS
5633/// Execution Slots: SLOT0123
5634#[inline(always)]
5635#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))]
5636#[cfg_attr(test, assert_instr(vasrvwuhrndsat))]
5637#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5638pub unsafe fn q6_vuh_vasr_wwvuh_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector {
5639    vasrvwuhrndsat(vuu, vv)
5640}
5641
5642/// `Vd32.uh=vasr(Vuu32.w,Vv32.uh):sat`
5643///
5644/// Instruction Type: CVI_VS
5645/// Execution Slots: SLOT0123
5646#[inline(always)]
5647#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))]
5648#[cfg_attr(test, assert_instr(vasrvwuhsat))]
5649#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5650pub unsafe fn q6_vuh_vasr_wwvuh_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector {
5651    vasrvwuhsat(vuu, vv)
5652}
5653
5654/// `Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16`
5655///
5656/// Instruction Type: CVI_VX
5657/// Execution Slots: SLOT23
5658#[inline(always)]
5659#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))]
5660#[cfg_attr(test, assert_instr(vmpyuhvs))]
5661#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5662pub unsafe fn q6_vuh_vmpy_vuhvuh_rs16(vu: HvxVector, vv: HvxVector) -> HvxVector {
5663    vmpyuhvs(vu, vv)
5664}
5665
5666/// `Vd32.h=Vu32.hf`
5667///
5668/// Instruction Type: CVI_VS
5669/// Execution Slots: SLOT0123
5670#[inline(always)]
5671#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))]
5672#[cfg_attr(test, assert_instr(vconv_h_hf))]
5673#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5674pub unsafe fn q6_vh_equals_vhf(vu: HvxVector) -> HvxVector {
5675    vconv_h_hf(vu)
5676}
5677
5678/// `Vd32.hf=Vu32.h`
5679///
5680/// Instruction Type: CVI_VS
5681/// Execution Slots: SLOT0123
5682#[inline(always)]
5683#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))]
5684#[cfg_attr(test, assert_instr(vconv_hf_h))]
5685#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5686pub unsafe fn q6_vhf_equals_vh(vu: HvxVector) -> HvxVector {
5687    vconv_hf_h(vu)
5688}
5689
5690/// `Vd32.sf=Vu32.w`
5691///
5692/// Instruction Type: CVI_VS
5693/// Execution Slots: SLOT0123
5694#[inline(always)]
5695#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))]
5696#[cfg_attr(test, assert_instr(vconv_sf_w))]
5697#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5698pub unsafe fn q6_vsf_equals_vw(vu: HvxVector) -> HvxVector {
5699    vconv_sf_w(vu)
5700}
5701
5702/// `Vd32.w=Vu32.sf`
5703///
5704/// Instruction Type: CVI_VS
5705/// Execution Slots: SLOT0123
5706#[inline(always)]
5707#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))]
5708#[cfg_attr(test, assert_instr(vconv_w_sf))]
5709#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5710pub unsafe fn q6_vw_equals_vsf(vu: HvxVector) -> HvxVector {
5711    vconv_w_sf(vu)
5712}
5713
5714/// `Vd32=vgetqfext(Vu32.x,Rt32)`
5715///
5716/// Instruction Type: CVI_VX
5717/// Execution Slots: SLOT23
5718#[inline(always)]
5719#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5720#[cfg_attr(test, assert_instr(get_qfext))]
5721#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5722pub unsafe fn q6_v_vgetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector {
5723    get_qfext(vu, rt)
5724}
5725
5726/// `Vd32.x=vsetqfext(Vu32,Rt32)`
5727///
5728/// Instruction Type: CVI_VX
5729/// Execution Slots: SLOT23
5730#[inline(always)]
5731#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5732#[cfg_attr(test, assert_instr(set_qfext))]
5733#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5734pub unsafe fn q6_v_vsetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector {
5735    set_qfext(vu, rt)
5736}
5737
5738/// `Vd32.f8=vabs(Vu32.f8)`
5739///
5740/// Instruction Type: CVI_VX_LATE
5741/// Execution Slots: SLOT23
5742#[inline(always)]
5743#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5744#[cfg_attr(test, assert_instr(vabs_f8))]
5745#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5746pub unsafe fn q6_v_vabs_v(vu: HvxVector) -> HvxVector {
5747    vabs_f8(vu)
5748}
5749
5750/// `Vdd32.hf=vcvt2(Vu32.b)`
5751///
5752/// Instruction Type: CVI_VX_DV
5753/// Execution Slots: SLOT23
5754#[inline(always)]
5755#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5756#[cfg_attr(test, assert_instr(vcvt2_hf_b))]
5757#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5758pub unsafe fn q6_whf_vcvt2_vb(vu: HvxVector) -> HvxVectorPair {
5759    vcvt2_hf_b(vu)
5760}
5761
5762/// `Vdd32.hf=vcvt2(Vu32.ub)`
5763///
5764/// Instruction Type: CVI_VX_DV
5765/// Execution Slots: SLOT23
5766#[inline(always)]
5767#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5768#[cfg_attr(test, assert_instr(vcvt2_hf_ub))]
5769#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5770pub unsafe fn q6_whf_vcvt2_vub(vu: HvxVector) -> HvxVectorPair {
5771    vcvt2_hf_ub(vu)
5772}
5773
5774/// `Vdd32.hf=vcvt(Vu32.f8)`
5775///
5776/// Instruction Type: CVI_VX_DV
5777/// Execution Slots: SLOT23
5778#[inline(always)]
5779#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5780#[cfg_attr(test, assert_instr(vcvt_hf_f8))]
5781#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5782pub unsafe fn q6_whf_vcvt_v(vu: HvxVector) -> HvxVectorPair {
5783    vcvt_hf_f8(vu)
5784}
5785
5786/// `Vd32.f8=vfmax(Vu32.f8,Vv32.f8)`
5787///
5788/// Instruction Type: CVI_VX_LATE
5789/// Execution Slots: SLOT23
5790#[inline(always)]
5791#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5792#[cfg_attr(test, assert_instr(vfmax_f8))]
5793#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5794pub unsafe fn q6_v_vfmax_vv(vu: HvxVector, vv: HvxVector) -> HvxVector {
5795    vfmax_f8(vu, vv)
5796}
5797
5798/// `Vd32.f8=vfmin(Vu32.f8,Vv32.f8)`
5799///
5800/// Instruction Type: CVI_VX_LATE
5801/// Execution Slots: SLOT23
5802#[inline(always)]
5803#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5804#[cfg_attr(test, assert_instr(vfmin_f8))]
5805#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5806pub unsafe fn q6_v_vfmin_vv(vu: HvxVector, vv: HvxVector) -> HvxVector {
5807    vfmin_f8(vu, vv)
5808}
5809
5810/// `Vd32.f8=vfneg(Vu32.f8)`
5811///
5812/// Instruction Type: CVI_VX_LATE
5813/// Execution Slots: SLOT23
5814#[inline(always)]
5815#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5816#[cfg_attr(test, assert_instr(vfneg_f8))]
5817#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5818pub unsafe fn q6_v_vfneg_v(vu: HvxVector) -> HvxVector {
5819    vfneg_f8(vu)
5820}
5821
5822/// `Qd4=and(Qs4,Qt4)`
5823///
5824/// This is a compound operation composed of multiple HVX instructions.
5825/// Instruction Type: CVI_VA_DV
5826/// Execution Slots: SLOT0123
5827#[inline(always)]
5828#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5829#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5830pub unsafe fn q6_q_and_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred {
5831    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
5832        pred_and(
5833            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
5834            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
5835        ),
5836        -1,
5837    ))
5838}
5839
5840/// `Qd4=and(Qs4,!Qt4)`
5841///
5842/// This is a compound operation composed of multiple HVX instructions.
5843/// Instruction Type: CVI_VA_DV
5844/// Execution Slots: SLOT0123
5845#[inline(always)]
5846#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5847#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5848pub unsafe fn q6_q_and_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred {
5849    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
5850        pred_and_n(
5851            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
5852            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
5853        ),
5854        -1,
5855    ))
5856}
5857
5858/// `Qd4=not(Qs4)`
5859///
5860/// This is a compound operation composed of multiple HVX instructions.
5861/// Instruction Type: CVI_VA
5862/// Execution Slots: SLOT0123
5863#[inline(always)]
5864#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5865#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5866pub unsafe fn q6_q_not_q(qs: HvxVectorPred) -> HvxVectorPred {
5867    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
5868        pred_not(vandvrt(
5869            core::mem::transmute::<HvxVectorPred, HvxVector>(qs),
5870            -1,
5871        )),
5872        -1,
5873    ))
5874}
5875
5876/// `Qd4=or(Qs4,Qt4)`
5877///
5878/// This is a compound operation composed of multiple HVX instructions.
5879/// Instruction Type: CVI_VA_DV
5880/// Execution Slots: SLOT0123
5881#[inline(always)]
5882#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5883#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5884pub unsafe fn q6_q_or_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred {
5885    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
5886        pred_or(
5887            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
5888            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
5889        ),
5890        -1,
5891    ))
5892}
5893
5894/// `Qd4=or(Qs4,!Qt4)`
5895///
5896/// This is a compound operation composed of multiple HVX instructions.
5897/// Instruction Type: CVI_VA_DV
5898/// Execution Slots: SLOT0123
5899#[inline(always)]
5900#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5901#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5902pub unsafe fn q6_q_or_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred {
5903    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
5904        pred_or_n(
5905            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
5906            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
5907        ),
5908        -1,
5909    ))
5910}
5911
5912/// `Qd4=vsetq(Rt32)`
5913///
5914/// This is a compound operation composed of multiple HVX instructions.
5915/// Instruction Type: CVI_VP
5916/// Execution Slots: SLOT0123
5917#[inline(always)]
5918#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5919#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5920pub unsafe fn q6_q_vsetq_r(rt: i32) -> HvxVectorPred {
5921    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(pred_scalar2(rt), -1))
5922}
5923
5924/// `Qd4=xor(Qs4,Qt4)`
5925///
5926/// This is a compound operation composed of multiple HVX instructions.
5927/// Instruction Type: CVI_VA_DV
5928/// Execution Slots: SLOT0123
5929#[inline(always)]
5930#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5931#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5932pub unsafe fn q6_q_xor_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred {
5933    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
5934        pred_xor(
5935            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
5936            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
5937        ),
5938        -1,
5939    ))
5940}
5941
5942/// `if (!Qv4) vmem(Rt32+#s4)=Vs32`
5943///
5944/// This is a compound operation composed of multiple HVX instructions.
5945/// Instruction Type: CVI_VM_ST
5946/// Execution Slots: SLOT0
5947#[inline(always)]
5948#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5949#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5950pub unsafe fn q6_vmem_qnriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) {
5951    vS32b_nqpred_ai(
5952        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
5953        rt,
5954        vs,
5955    )
5956}
5957
5958/// `if (!Qv4) vmem(Rt32+#s4):nt=Vs32`
5959///
5960/// This is a compound operation composed of multiple HVX instructions.
5961/// Instruction Type: CVI_VM_ST
5962/// Execution Slots: SLOT0
5963#[inline(always)]
5964#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5965#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5966pub unsafe fn q6_vmem_qnriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) {
5967    vS32b_nt_nqpred_ai(
5968        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
5969        rt,
5970        vs,
5971    )
5972}
5973
5974/// `if (Qv4) vmem(Rt32+#s4):nt=Vs32`
5975///
5976/// This is a compound operation composed of multiple HVX instructions.
5977/// Instruction Type: CVI_VM_ST
5978/// Execution Slots: SLOT0
5979#[inline(always)]
5980#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5981#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5982pub unsafe fn q6_vmem_qriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) {
5983    vS32b_nt_qpred_ai(
5984        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
5985        rt,
5986        vs,
5987    )
5988}
5989
5990/// `if (Qv4) vmem(Rt32+#s4)=Vs32`
5991///
5992/// This is a compound operation composed of multiple HVX instructions.
5993/// Instruction Type: CVI_VM_ST
5994/// Execution Slots: SLOT0
5995#[inline(always)]
5996#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5997#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5998pub unsafe fn q6_vmem_qriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) {
5999    vS32b_qpred_ai(
6000        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6001        rt,
6002        vs,
6003    )
6004}
6005
6006/// `if (!Qv4) Vx32.b+=Vu32.b`
6007///
6008/// This is a compound operation composed of multiple HVX instructions.
6009/// Instruction Type: CVI_VA
6010/// Execution Slots: SLOT0123
6011#[inline(always)]
6012#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6013#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6014pub unsafe fn q6_vb_condacc_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6015    vaddbnq(
6016        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6017        vx,
6018        vu,
6019    )
6020}
6021
6022/// `if (Qv4) Vx32.b+=Vu32.b`
6023///
6024/// This is a compound operation composed of multiple HVX instructions.
6025/// Instruction Type: CVI_VA
6026/// Execution Slots: SLOT0123
6027#[inline(always)]
6028#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6029#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6030pub unsafe fn q6_vb_condacc_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6031    vaddbq(
6032        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6033        vx,
6034        vu,
6035    )
6036}
6037
6038/// `if (!Qv4) Vx32.h+=Vu32.h`
6039///
6040/// This is a compound operation composed of multiple HVX instructions.
6041/// Instruction Type: CVI_VA
6042/// Execution Slots: SLOT0123
6043#[inline(always)]
6044#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6045#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6046pub unsafe fn q6_vh_condacc_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6047    vaddhnq(
6048        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6049        vx,
6050        vu,
6051    )
6052}
6053
6054/// `if (Qv4) Vx32.h+=Vu32.h`
6055///
6056/// This is a compound operation composed of multiple HVX instructions.
6057/// Instruction Type: CVI_VA
6058/// Execution Slots: SLOT0123
6059#[inline(always)]
6060#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6061#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6062pub unsafe fn q6_vh_condacc_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6063    vaddhq(
6064        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6065        vx,
6066        vu,
6067    )
6068}
6069
6070/// `if (!Qv4) Vx32.w+=Vu32.w`
6071///
6072/// This is a compound operation composed of multiple HVX instructions.
6073/// Instruction Type: CVI_VA
6074/// Execution Slots: SLOT0123
6075#[inline(always)]
6076#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6077#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6078pub unsafe fn q6_vw_condacc_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6079    vaddwnq(
6080        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6081        vx,
6082        vu,
6083    )
6084}
6085
6086/// `if (Qv4) Vx32.w+=Vu32.w`
6087///
6088/// This is a compound operation composed of multiple HVX instructions.
6089/// Instruction Type: CVI_VA
6090/// Execution Slots: SLOT0123
6091#[inline(always)]
6092#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6093#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6094pub unsafe fn q6_vw_condacc_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6095    vaddwq(
6096        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6097        vx,
6098        vu,
6099    )
6100}
6101
6102/// `Vd32=vand(Qu4,Rt32)`
6103///
6104/// This is a compound operation composed of multiple HVX instructions.
6105/// Instruction Type: CVI_VX_LATE
6106/// Execution Slots: SLOT23
6107#[inline(always)]
6108#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6109#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6110pub unsafe fn q6_v_vand_qr(qu: HvxVectorPred, rt: i32) -> HvxVector {
6111    vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qu), rt)
6112}
6113
6114/// `Vx32|=vand(Qu4,Rt32)`
6115///
6116/// This is a compound operation composed of multiple HVX instructions.
6117/// Instruction Type: CVI_VX_LATE
6118/// Execution Slots: SLOT23
6119#[inline(always)]
6120#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6121#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6122pub unsafe fn q6_v_vandor_vqr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector {
6123    vandvrt_acc(vx, core::mem::transmute::<HvxVectorPred, HvxVector>(qu), rt)
6124}
6125
6126/// `Qd4=vand(Vu32,Rt32)`
6127///
6128/// This is a compound operation composed of multiple HVX instructions.
6129/// Instruction Type: CVI_VX_LATE
6130/// Execution Slots: SLOT23
6131#[inline(always)]
6132#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6133#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6134pub unsafe fn q6_q_vand_vr(vu: HvxVector, rt: i32) -> HvxVectorPred {
6135    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vu, rt))
6136}
6137
6138/// `Qx4|=vand(Vu32,Rt32)`
6139///
6140/// This is a compound operation composed of multiple HVX instructions.
6141/// Instruction Type: CVI_VX_LATE
6142/// Execution Slots: SLOT23
6143#[inline(always)]
6144#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6145#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6146pub unsafe fn q6_q_vandor_qvr(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxVectorPred {
6147    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt_acc(
6148        core::mem::transmute::<HvxVectorPred, HvxVector>(qx),
6149        vu,
6150        rt,
6151    ))
6152}
6153
6154/// `Qd4=vcmp.eq(Vu32.b,Vv32.b)`
6155///
6156/// This is a compound operation composed of multiple HVX instructions.
6157/// Instruction Type: CVI_VA
6158/// Execution Slots: SLOT0123
6159#[inline(always)]
6160#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6161#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6162pub unsafe fn q6_q_vcmp_eq_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6163    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(veqb(vu, vv), -1))
6164}
6165
6166/// `Qx4&=vcmp.eq(Vu32.b,Vv32.b)`
6167///
6168/// This is a compound operation composed of multiple HVX instructions.
6169/// Instruction Type: CVI_VA
6170/// Execution Slots: SLOT0123
6171#[inline(always)]
6172#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6173#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6174pub unsafe fn q6_q_vcmp_eqand_qvbvb(
6175    qx: HvxVectorPred,
6176    vu: HvxVector,
6177    vv: HvxVector,
6178) -> HvxVectorPred {
6179    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6180        veqb_and(
6181            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6182            vu,
6183            vv,
6184        ),
6185        -1,
6186    ))
6187}
6188
6189/// `Qx4|=vcmp.eq(Vu32.b,Vv32.b)`
6190///
6191/// This is a compound operation composed of multiple HVX instructions.
6192/// Instruction Type: CVI_VA
6193/// Execution Slots: SLOT0123
6194#[inline(always)]
6195#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6196#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6197pub unsafe fn q6_q_vcmp_eqor_qvbvb(
6198    qx: HvxVectorPred,
6199    vu: HvxVector,
6200    vv: HvxVector,
6201) -> HvxVectorPred {
6202    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6203        veqb_or(
6204            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6205            vu,
6206            vv,
6207        ),
6208        -1,
6209    ))
6210}
6211
6212/// `Qx4^=vcmp.eq(Vu32.b,Vv32.b)`
6213///
6214/// This is a compound operation composed of multiple HVX instructions.
6215/// Instruction Type: CVI_VA
6216/// Execution Slots: SLOT0123
6217#[inline(always)]
6218#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6219#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6220pub unsafe fn q6_q_vcmp_eqxacc_qvbvb(
6221    qx: HvxVectorPred,
6222    vu: HvxVector,
6223    vv: HvxVector,
6224) -> HvxVectorPred {
6225    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6226        veqb_xor(
6227            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6228            vu,
6229            vv,
6230        ),
6231        -1,
6232    ))
6233}
6234
6235/// `Qd4=vcmp.eq(Vu32.h,Vv32.h)`
6236///
6237/// This is a compound operation composed of multiple HVX instructions.
6238/// Instruction Type: CVI_VA
6239/// Execution Slots: SLOT0123
6240#[inline(always)]
6241#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6242#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6243pub unsafe fn q6_q_vcmp_eq_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6244    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(veqh(vu, vv), -1))
6245}
6246
6247/// `Qx4&=vcmp.eq(Vu32.h,Vv32.h)`
6248///
6249/// This is a compound operation composed of multiple HVX instructions.
6250/// Instruction Type: CVI_VA
6251/// Execution Slots: SLOT0123
6252#[inline(always)]
6253#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6254#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6255pub unsafe fn q6_q_vcmp_eqand_qvhvh(
6256    qx: HvxVectorPred,
6257    vu: HvxVector,
6258    vv: HvxVector,
6259) -> HvxVectorPred {
6260    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6261        veqh_and(
6262            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6263            vu,
6264            vv,
6265        ),
6266        -1,
6267    ))
6268}
6269
6270/// `Qx4|=vcmp.eq(Vu32.h,Vv32.h)`
6271///
6272/// This is a compound operation composed of multiple HVX instructions.
6273/// Instruction Type: CVI_VA
6274/// Execution Slots: SLOT0123
6275#[inline(always)]
6276#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6277#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6278pub unsafe fn q6_q_vcmp_eqor_qvhvh(
6279    qx: HvxVectorPred,
6280    vu: HvxVector,
6281    vv: HvxVector,
6282) -> HvxVectorPred {
6283    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6284        veqh_or(
6285            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6286            vu,
6287            vv,
6288        ),
6289        -1,
6290    ))
6291}
6292
6293/// `Qx4^=vcmp.eq(Vu32.h,Vv32.h)`
6294///
6295/// This is a compound operation composed of multiple HVX instructions.
6296/// Instruction Type: CVI_VA
6297/// Execution Slots: SLOT0123
6298#[inline(always)]
6299#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6300#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6301pub unsafe fn q6_q_vcmp_eqxacc_qvhvh(
6302    qx: HvxVectorPred,
6303    vu: HvxVector,
6304    vv: HvxVector,
6305) -> HvxVectorPred {
6306    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6307        veqh_xor(
6308            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6309            vu,
6310            vv,
6311        ),
6312        -1,
6313    ))
6314}
6315
6316/// `Qd4=vcmp.eq(Vu32.w,Vv32.w)`
6317///
6318/// This is a compound operation composed of multiple HVX instructions.
6319/// Instruction Type: CVI_VA
6320/// Execution Slots: SLOT0123
6321#[inline(always)]
6322#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6323#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6324pub unsafe fn q6_q_vcmp_eq_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6325    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(veqw(vu, vv), -1))
6326}
6327
6328/// `Qx4&=vcmp.eq(Vu32.w,Vv32.w)`
6329///
6330/// This is a compound operation composed of multiple HVX instructions.
6331/// Instruction Type: CVI_VA
6332/// Execution Slots: SLOT0123
6333#[inline(always)]
6334#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6335#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6336pub unsafe fn q6_q_vcmp_eqand_qvwvw(
6337    qx: HvxVectorPred,
6338    vu: HvxVector,
6339    vv: HvxVector,
6340) -> HvxVectorPred {
6341    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6342        veqw_and(
6343            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6344            vu,
6345            vv,
6346        ),
6347        -1,
6348    ))
6349}
6350
6351/// `Qx4|=vcmp.eq(Vu32.w,Vv32.w)`
6352///
6353/// This is a compound operation composed of multiple HVX instructions.
6354/// Instruction Type: CVI_VA
6355/// Execution Slots: SLOT0123
6356#[inline(always)]
6357#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6358#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6359pub unsafe fn q6_q_vcmp_eqor_qvwvw(
6360    qx: HvxVectorPred,
6361    vu: HvxVector,
6362    vv: HvxVector,
6363) -> HvxVectorPred {
6364    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6365        veqw_or(
6366            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6367            vu,
6368            vv,
6369        ),
6370        -1,
6371    ))
6372}
6373
6374/// `Qx4^=vcmp.eq(Vu32.w,Vv32.w)`
6375///
6376/// This is a compound operation composed of multiple HVX instructions.
6377/// Instruction Type: CVI_VA
6378/// Execution Slots: SLOT0123
6379#[inline(always)]
6380#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6381#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6382pub unsafe fn q6_q_vcmp_eqxacc_qvwvw(
6383    qx: HvxVectorPred,
6384    vu: HvxVector,
6385    vv: HvxVector,
6386) -> HvxVectorPred {
6387    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6388        veqw_xor(
6389            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6390            vu,
6391            vv,
6392        ),
6393        -1,
6394    ))
6395}
6396
6397/// `Qd4=vcmp.gt(Vu32.b,Vv32.b)`
6398///
6399/// This is a compound operation composed of multiple HVX instructions.
6400/// Instruction Type: CVI_VA
6401/// Execution Slots: SLOT0123
6402#[inline(always)]
6403#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6404#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6405pub unsafe fn q6_q_vcmp_gt_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6406    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgtb(vu, vv), -1))
6407}
6408
6409/// `Qx4&=vcmp.gt(Vu32.b,Vv32.b)`
6410///
6411/// This is a compound operation composed of multiple HVX instructions.
6412/// Instruction Type: CVI_VA
6413/// Execution Slots: SLOT0123
6414#[inline(always)]
6415#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6416#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6417pub unsafe fn q6_q_vcmp_gtand_qvbvb(
6418    qx: HvxVectorPred,
6419    vu: HvxVector,
6420    vv: HvxVector,
6421) -> HvxVectorPred {
6422    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6423        vgtb_and(
6424            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6425            vu,
6426            vv,
6427        ),
6428        -1,
6429    ))
6430}
6431
6432/// `Qx4|=vcmp.gt(Vu32.b,Vv32.b)`
6433///
6434/// This is a compound operation composed of multiple HVX instructions.
6435/// Instruction Type: CVI_VA
6436/// Execution Slots: SLOT0123
6437#[inline(always)]
6438#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6439#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6440pub unsafe fn q6_q_vcmp_gtor_qvbvb(
6441    qx: HvxVectorPred,
6442    vu: HvxVector,
6443    vv: HvxVector,
6444) -> HvxVectorPred {
6445    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6446        vgtb_or(
6447            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6448            vu,
6449            vv,
6450        ),
6451        -1,
6452    ))
6453}
6454
6455/// `Qx4^=vcmp.gt(Vu32.b,Vv32.b)`
6456///
6457/// This is a compound operation composed of multiple HVX instructions.
6458/// Instruction Type: CVI_VA
6459/// Execution Slots: SLOT0123
6460#[inline(always)]
6461#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6462#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6463pub unsafe fn q6_q_vcmp_gtxacc_qvbvb(
6464    qx: HvxVectorPred,
6465    vu: HvxVector,
6466    vv: HvxVector,
6467) -> HvxVectorPred {
6468    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6469        vgtb_xor(
6470            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6471            vu,
6472            vv,
6473        ),
6474        -1,
6475    ))
6476}
6477
6478/// `Qd4=vcmp.gt(Vu32.h,Vv32.h)`
6479///
6480/// This is a compound operation composed of multiple HVX instructions.
6481/// Instruction Type: CVI_VA
6482/// Execution Slots: SLOT0123
6483#[inline(always)]
6484#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6485#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6486pub unsafe fn q6_q_vcmp_gt_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6487    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgth(vu, vv), -1))
6488}
6489
6490/// `Qx4&=vcmp.gt(Vu32.h,Vv32.h)`
6491///
6492/// This is a compound operation composed of multiple HVX instructions.
6493/// Instruction Type: CVI_VA
6494/// Execution Slots: SLOT0123
6495#[inline(always)]
6496#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6497#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6498pub unsafe fn q6_q_vcmp_gtand_qvhvh(
6499    qx: HvxVectorPred,
6500    vu: HvxVector,
6501    vv: HvxVector,
6502) -> HvxVectorPred {
6503    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6504        vgth_and(
6505            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6506            vu,
6507            vv,
6508        ),
6509        -1,
6510    ))
6511}
6512
6513/// `Qx4|=vcmp.gt(Vu32.h,Vv32.h)`
6514///
6515/// This is a compound operation composed of multiple HVX instructions.
6516/// Instruction Type: CVI_VA
6517/// Execution Slots: SLOT0123
6518#[inline(always)]
6519#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6520#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6521pub unsafe fn q6_q_vcmp_gtor_qvhvh(
6522    qx: HvxVectorPred,
6523    vu: HvxVector,
6524    vv: HvxVector,
6525) -> HvxVectorPred {
6526    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6527        vgth_or(
6528            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6529            vu,
6530            vv,
6531        ),
6532        -1,
6533    ))
6534}
6535
6536/// `Qx4^=vcmp.gt(Vu32.h,Vv32.h)`
6537///
6538/// This is a compound operation composed of multiple HVX instructions.
6539/// Instruction Type: CVI_VA
6540/// Execution Slots: SLOT0123
6541#[inline(always)]
6542#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6543#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6544pub unsafe fn q6_q_vcmp_gtxacc_qvhvh(
6545    qx: HvxVectorPred,
6546    vu: HvxVector,
6547    vv: HvxVector,
6548) -> HvxVectorPred {
6549    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6550        vgth_xor(
6551            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6552            vu,
6553            vv,
6554        ),
6555        -1,
6556    ))
6557}
6558
6559/// `Qd4=vcmp.gt(Vu32.ub,Vv32.ub)`
6560///
6561/// This is a compound operation composed of multiple HVX instructions.
6562/// Instruction Type: CVI_VA
6563/// Execution Slots: SLOT0123
6564#[inline(always)]
6565#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6566#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6567pub unsafe fn q6_q_vcmp_gt_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6568    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgtub(vu, vv), -1))
6569}
6570
6571/// `Qx4&=vcmp.gt(Vu32.ub,Vv32.ub)`
6572///
6573/// This is a compound operation composed of multiple HVX instructions.
6574/// Instruction Type: CVI_VA
6575/// Execution Slots: SLOT0123
6576#[inline(always)]
6577#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6578#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6579pub unsafe fn q6_q_vcmp_gtand_qvubvub(
6580    qx: HvxVectorPred,
6581    vu: HvxVector,
6582    vv: HvxVector,
6583) -> HvxVectorPred {
6584    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6585        vgtub_and(
6586            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6587            vu,
6588            vv,
6589        ),
6590        -1,
6591    ))
6592}
6593
6594/// `Qx4|=vcmp.gt(Vu32.ub,Vv32.ub)`
6595///
6596/// This is a compound operation composed of multiple HVX instructions.
6597/// Instruction Type: CVI_VA
6598/// Execution Slots: SLOT0123
6599#[inline(always)]
6600#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6601#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6602pub unsafe fn q6_q_vcmp_gtor_qvubvub(
6603    qx: HvxVectorPred,
6604    vu: HvxVector,
6605    vv: HvxVector,
6606) -> HvxVectorPred {
6607    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6608        vgtub_or(
6609            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6610            vu,
6611            vv,
6612        ),
6613        -1,
6614    ))
6615}
6616
6617/// `Qx4^=vcmp.gt(Vu32.ub,Vv32.ub)`
6618///
6619/// This is a compound operation composed of multiple HVX instructions.
6620/// Instruction Type: CVI_VA
6621/// Execution Slots: SLOT0123
6622#[inline(always)]
6623#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6624#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6625pub unsafe fn q6_q_vcmp_gtxacc_qvubvub(
6626    qx: HvxVectorPred,
6627    vu: HvxVector,
6628    vv: HvxVector,
6629) -> HvxVectorPred {
6630    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6631        vgtub_xor(
6632            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6633            vu,
6634            vv,
6635        ),
6636        -1,
6637    ))
6638}
6639
6640/// `Qd4=vcmp.gt(Vu32.uh,Vv32.uh)`
6641///
6642/// This is a compound operation composed of multiple HVX instructions.
6643/// Instruction Type: CVI_VA
6644/// Execution Slots: SLOT0123
6645#[inline(always)]
6646#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6647#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6648pub unsafe fn q6_q_vcmp_gt_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6649    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgtuh(vu, vv), -1))
6650}
6651
6652/// `Qx4&=vcmp.gt(Vu32.uh,Vv32.uh)`
6653///
6654/// This is a compound operation composed of multiple HVX instructions.
6655/// Instruction Type: CVI_VA
6656/// Execution Slots: SLOT0123
6657#[inline(always)]
6658#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6659#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6660pub unsafe fn q6_q_vcmp_gtand_qvuhvuh(
6661    qx: HvxVectorPred,
6662    vu: HvxVector,
6663    vv: HvxVector,
6664) -> HvxVectorPred {
6665    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6666        vgtuh_and(
6667            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6668            vu,
6669            vv,
6670        ),
6671        -1,
6672    ))
6673}
6674
6675/// `Qx4|=vcmp.gt(Vu32.uh,Vv32.uh)`
6676///
6677/// This is a compound operation composed of multiple HVX instructions.
6678/// Instruction Type: CVI_VA
6679/// Execution Slots: SLOT0123
6680#[inline(always)]
6681#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6682#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6683pub unsafe fn q6_q_vcmp_gtor_qvuhvuh(
6684    qx: HvxVectorPred,
6685    vu: HvxVector,
6686    vv: HvxVector,
6687) -> HvxVectorPred {
6688    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6689        vgtuh_or(
6690            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6691            vu,
6692            vv,
6693        ),
6694        -1,
6695    ))
6696}
6697
6698/// `Qx4^=vcmp.gt(Vu32.uh,Vv32.uh)`
6699///
6700/// This is a compound operation composed of multiple HVX instructions.
6701/// Instruction Type: CVI_VA
6702/// Execution Slots: SLOT0123
6703#[inline(always)]
6704#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6705#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6706pub unsafe fn q6_q_vcmp_gtxacc_qvuhvuh(
6707    qx: HvxVectorPred,
6708    vu: HvxVector,
6709    vv: HvxVector,
6710) -> HvxVectorPred {
6711    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6712        vgtuh_xor(
6713            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6714            vu,
6715            vv,
6716        ),
6717        -1,
6718    ))
6719}
6720
6721/// `Qd4=vcmp.gt(Vu32.uw,Vv32.uw)`
6722///
6723/// This is a compound operation composed of multiple HVX instructions.
6724/// Instruction Type: CVI_VA
6725/// Execution Slots: SLOT0123
6726#[inline(always)]
6727#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6728#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6729pub unsafe fn q6_q_vcmp_gt_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6730    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgtuw(vu, vv), -1))
6731}
6732
6733/// `Qx4&=vcmp.gt(Vu32.uw,Vv32.uw)`
6734///
6735/// This is a compound operation composed of multiple HVX instructions.
6736/// Instruction Type: CVI_VA
6737/// Execution Slots: SLOT0123
6738#[inline(always)]
6739#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6740#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6741pub unsafe fn q6_q_vcmp_gtand_qvuwvuw(
6742    qx: HvxVectorPred,
6743    vu: HvxVector,
6744    vv: HvxVector,
6745) -> HvxVectorPred {
6746    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6747        vgtuw_and(
6748            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6749            vu,
6750            vv,
6751        ),
6752        -1,
6753    ))
6754}
6755
6756/// `Qx4|=vcmp.gt(Vu32.uw,Vv32.uw)`
6757///
6758/// This is a compound operation composed of multiple HVX instructions.
6759/// Instruction Type: CVI_VA
6760/// Execution Slots: SLOT0123
6761#[inline(always)]
6762#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6763#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6764pub unsafe fn q6_q_vcmp_gtor_qvuwvuw(
6765    qx: HvxVectorPred,
6766    vu: HvxVector,
6767    vv: HvxVector,
6768) -> HvxVectorPred {
6769    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6770        vgtuw_or(
6771            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6772            vu,
6773            vv,
6774        ),
6775        -1,
6776    ))
6777}
6778
6779/// `Qx4^=vcmp.gt(Vu32.uw,Vv32.uw)`
6780///
6781/// This is a compound operation composed of multiple HVX instructions.
6782/// Instruction Type: CVI_VA
6783/// Execution Slots: SLOT0123
6784#[inline(always)]
6785#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6786#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6787pub unsafe fn q6_q_vcmp_gtxacc_qvuwvuw(
6788    qx: HvxVectorPred,
6789    vu: HvxVector,
6790    vv: HvxVector,
6791) -> HvxVectorPred {
6792    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6793        vgtuw_xor(
6794            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6795            vu,
6796            vv,
6797        ),
6798        -1,
6799    ))
6800}
6801
6802/// `Qd4=vcmp.gt(Vu32.w,Vv32.w)`
6803///
6804/// This is a compound operation composed of multiple HVX instructions.
6805/// Instruction Type: CVI_VA
6806/// Execution Slots: SLOT0123
6807#[inline(always)]
6808#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6809#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6810pub unsafe fn q6_q_vcmp_gt_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6811    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgtw(vu, vv), -1))
6812}
6813
6814/// `Qx4&=vcmp.gt(Vu32.w,Vv32.w)`
6815///
6816/// This is a compound operation composed of multiple HVX instructions.
6817/// Instruction Type: CVI_VA
6818/// Execution Slots: SLOT0123
6819#[inline(always)]
6820#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6821#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6822pub unsafe fn q6_q_vcmp_gtand_qvwvw(
6823    qx: HvxVectorPred,
6824    vu: HvxVector,
6825    vv: HvxVector,
6826) -> HvxVectorPred {
6827    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6828        vgtw_and(
6829            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6830            vu,
6831            vv,
6832        ),
6833        -1,
6834    ))
6835}
6836
6837/// `Qx4|=vcmp.gt(Vu32.w,Vv32.w)`
6838///
6839/// This is a compound operation composed of multiple HVX instructions.
6840/// Instruction Type: CVI_VA
6841/// Execution Slots: SLOT0123
6842#[inline(always)]
6843#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6844#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6845pub unsafe fn q6_q_vcmp_gtor_qvwvw(
6846    qx: HvxVectorPred,
6847    vu: HvxVector,
6848    vv: HvxVector,
6849) -> HvxVectorPred {
6850    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6851        vgtw_or(
6852            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6853            vu,
6854            vv,
6855        ),
6856        -1,
6857    ))
6858}
6859
6860/// `Qx4^=vcmp.gt(Vu32.w,Vv32.w)`
6861///
6862/// This is a compound operation composed of multiple HVX instructions.
6863/// Instruction Type: CVI_VA
6864/// Execution Slots: SLOT0123
6865#[inline(always)]
6866#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6867#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6868pub unsafe fn q6_q_vcmp_gtxacc_qvwvw(
6869    qx: HvxVectorPred,
6870    vu: HvxVector,
6871    vv: HvxVector,
6872) -> HvxVectorPred {
6873    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6874        vgtw_xor(
6875            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6876            vu,
6877            vv,
6878        ),
6879        -1,
6880    ))
6881}
6882
6883/// `Vd32=vmux(Qt4,Vu32,Vv32)`
6884///
6885/// This is a compound operation composed of multiple HVX instructions.
6886/// Instruction Type: CVI_VA
6887/// Execution Slots: SLOT0123
6888#[inline(always)]
6889#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6890#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6891pub unsafe fn q6_v_vmux_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVector {
6892    vmux(
6893        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
6894        vu,
6895        vv,
6896    )
6897}
6898
6899/// `if (!Qv4) Vx32.b-=Vu32.b`
6900///
6901/// This is a compound operation composed of multiple HVX instructions.
6902/// Instruction Type: CVI_VA
6903/// Execution Slots: SLOT0123
6904#[inline(always)]
6905#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6906#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6907pub unsafe fn q6_vb_condnac_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6908    vsubbnq(
6909        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6910        vx,
6911        vu,
6912    )
6913}
6914
6915/// `if (Qv4) Vx32.b-=Vu32.b`
6916///
6917/// This is a compound operation composed of multiple HVX instructions.
6918/// Instruction Type: CVI_VA
6919/// Execution Slots: SLOT0123
6920#[inline(always)]
6921#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6922#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6923pub unsafe fn q6_vb_condnac_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6924    vsubbq(
6925        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6926        vx,
6927        vu,
6928    )
6929}
6930
6931/// `if (!Qv4) Vx32.h-=Vu32.h`
6932///
6933/// This is a compound operation composed of multiple HVX instructions.
6934/// Instruction Type: CVI_VA
6935/// Execution Slots: SLOT0123
6936#[inline(always)]
6937#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6938#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6939pub unsafe fn q6_vh_condnac_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6940    vsubhnq(
6941        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6942        vx,
6943        vu,
6944    )
6945}
6946
6947/// `if (Qv4) Vx32.h-=Vu32.h`
6948///
6949/// This is a compound operation composed of multiple HVX instructions.
6950/// Instruction Type: CVI_VA
6951/// Execution Slots: SLOT0123
6952#[inline(always)]
6953#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6954#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6955pub unsafe fn q6_vh_condnac_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6956    vsubhq(
6957        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6958        vx,
6959        vu,
6960    )
6961}
6962
6963/// `if (!Qv4) Vx32.w-=Vu32.w`
6964///
6965/// This is a compound operation composed of multiple HVX instructions.
6966/// Instruction Type: CVI_VA
6967/// Execution Slots: SLOT0123
6968#[inline(always)]
6969#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6970#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6971pub unsafe fn q6_vw_condnac_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6972    vsubwnq(
6973        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6974        vx,
6975        vu,
6976    )
6977}
6978
6979/// `if (Qv4) Vx32.w-=Vu32.w`
6980///
6981/// This is a compound operation composed of multiple HVX instructions.
6982/// Instruction Type: CVI_VA
6983/// Execution Slots: SLOT0123
6984#[inline(always)]
6985#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6986#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6987pub unsafe fn q6_vw_condnac_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6988    vsubwq(
6989        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6990        vx,
6991        vu,
6992    )
6993}
6994
6995/// `Vdd32=vswap(Qt4,Vu32,Vv32)`
6996///
6997/// This is a compound operation composed of multiple HVX instructions.
6998/// Instruction Type: CVI_VA_DV
6999/// Execution Slots: SLOT0123
7000#[inline(always)]
7001#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
7002#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7003pub unsafe fn q6_w_vswap_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
7004    vswap(
7005        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
7006        vu,
7007        vv,
7008    )
7009}
7010
7011/// `Qd4=vsetq2(Rt32)`
7012///
7013/// This is a compound operation composed of multiple HVX instructions.
7014/// Instruction Type: CVI_VP
7015/// Execution Slots: SLOT0123
7016#[inline(always)]
7017#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
7018#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7019pub unsafe fn q6_q_vsetq2_r(rt: i32) -> HvxVectorPred {
7020    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(pred_scalar2v2(rt), -1))
7021}
7022
7023/// `Qd4.b=vshuffe(Qs4.h,Qt4.h)`
7024///
7025/// This is a compound operation composed of multiple HVX instructions.
7026/// Instruction Type: CVI_VA_DV
7027/// Execution Slots: SLOT0123
7028#[inline(always)]
7029#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
7030#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7031pub unsafe fn q6_qb_vshuffe_qhqh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred {
7032    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7033        shuffeqh(
7034            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7035            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
7036        ),
7037        -1,
7038    ))
7039}
7040
7041/// `Qd4.h=vshuffe(Qs4.w,Qt4.w)`
7042///
7043/// This is a compound operation composed of multiple HVX instructions.
7044/// Instruction Type: CVI_VA_DV
7045/// Execution Slots: SLOT0123
7046#[inline(always)]
7047#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
7048#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7049pub unsafe fn q6_qh_vshuffe_qwqw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred {
7050    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7051        shuffeqw(
7052            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7053            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
7054        ),
7055        -1,
7056    ))
7057}
7058
7059/// `Vd32=vand(!Qu4,Rt32)`
7060///
7061/// This is a compound operation composed of multiple HVX instructions.
7062/// Instruction Type: CVI_VX_LATE
7063/// Execution Slots: SLOT23
7064#[inline(always)]
7065#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
7066#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7067pub unsafe fn q6_v_vand_qnr(qu: HvxVectorPred, rt: i32) -> HvxVector {
7068    vandnqrt(
7069        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qu), -1),
7070        rt,
7071    )
7072}
7073
7074/// `Vx32|=vand(!Qu4,Rt32)`
7075///
7076/// This is a compound operation composed of multiple HVX instructions.
7077/// Instruction Type: CVI_VX_LATE
7078/// Execution Slots: SLOT23
7079#[inline(always)]
7080#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
7081#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7082pub unsafe fn q6_v_vandor_vqnr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector {
7083    vandnqrt_acc(
7084        vx,
7085        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qu), -1),
7086        rt,
7087    )
7088}
7089
7090/// `Vd32=vand(!Qv4,Vu32)`
7091///
7092/// This is a compound operation composed of multiple HVX instructions.
7093/// Instruction Type: CVI_VA
7094/// Execution Slots: SLOT0123
7095#[inline(always)]
7096#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
7097#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7098pub unsafe fn q6_v_vand_qnv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector {
7099    vandvnqv(
7100        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
7101        vu,
7102    )
7103}
7104
7105/// `Vd32=vand(Qv4,Vu32)`
7106///
7107/// This is a compound operation composed of multiple HVX instructions.
7108/// Instruction Type: CVI_VA
7109/// Execution Slots: SLOT0123
7110#[inline(always)]
7111#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
7112#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7113pub unsafe fn q6_v_vand_qv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector {
7114    vandvqv(
7115        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
7116        vu,
7117    )
7118}
7119
7120/// `if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h`
7121///
7122/// This is a compound operation composed of multiple HVX instructions.
7123/// Instruction Type: CVI_GATHER
7124/// Execution Slots: SLOT01
7125#[inline(always)]
7126#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7127#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7128pub unsafe fn q6_vgather_aqrmvh(
7129    rs: *mut HvxVector,
7130    qs: HvxVectorPred,
7131    rt: i32,
7132    mu: i32,
7133    vv: HvxVector,
7134) {
7135    vgathermhq(
7136        rs,
7137        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7138        rt,
7139        mu,
7140        vv,
7141    )
7142}
7143
7144/// `if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h`
7145///
7146/// This is a compound operation composed of multiple HVX instructions.
7147/// Instruction Type: CVI_GATHER_DV
7148/// Execution Slots: SLOT01
7149#[inline(always)]
7150#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7151#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7152pub unsafe fn q6_vgather_aqrmww(
7153    rs: *mut HvxVector,
7154    qs: HvxVectorPred,
7155    rt: i32,
7156    mu: i32,
7157    vvv: HvxVectorPair,
7158) {
7159    vgathermhwq(
7160        rs,
7161        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7162        rt,
7163        mu,
7164        vvv,
7165    )
7166}
7167
7168/// `if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w`
7169///
7170/// This is a compound operation composed of multiple HVX instructions.
7171/// Instruction Type: CVI_GATHER
7172/// Execution Slots: SLOT01
7173#[inline(always)]
7174#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7175#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7176pub unsafe fn q6_vgather_aqrmvw(
7177    rs: *mut HvxVector,
7178    qs: HvxVectorPred,
7179    rt: i32,
7180    mu: i32,
7181    vv: HvxVector,
7182) {
7183    vgathermwq(
7184        rs,
7185        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7186        rt,
7187        mu,
7188        vv,
7189    )
7190}
7191
7192/// `Vd32.b=prefixsum(Qv4)`
7193///
7194/// This is a compound operation composed of multiple HVX instructions.
7195/// Instruction Type: CVI_VS
7196/// Execution Slots: SLOT0123
7197#[inline(always)]
7198#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7199#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7200pub unsafe fn q6_vb_prefixsum_q(qv: HvxVectorPred) -> HvxVector {
7201    vprefixqb(vandvrt(
7202        core::mem::transmute::<HvxVectorPred, HvxVector>(qv),
7203        -1,
7204    ))
7205}
7206
7207/// `Vd32.h=prefixsum(Qv4)`
7208///
7209/// This is a compound operation composed of multiple HVX instructions.
7210/// Instruction Type: CVI_VS
7211/// Execution Slots: SLOT0123
7212#[inline(always)]
7213#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7214#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7215pub unsafe fn q6_vh_prefixsum_q(qv: HvxVectorPred) -> HvxVector {
7216    vprefixqh(vandvrt(
7217        core::mem::transmute::<HvxVectorPred, HvxVector>(qv),
7218        -1,
7219    ))
7220}
7221
7222/// `Vd32.w=prefixsum(Qv4)`
7223///
7224/// This is a compound operation composed of multiple HVX instructions.
7225/// Instruction Type: CVI_VS
7226/// Execution Slots: SLOT0123
7227#[inline(always)]
7228#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7229#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7230pub unsafe fn q6_vw_prefixsum_q(qv: HvxVectorPred) -> HvxVector {
7231    vprefixqw(vandvrt(
7232        core::mem::transmute::<HvxVectorPred, HvxVector>(qv),
7233        -1,
7234    ))
7235}
7236
7237/// `if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32`
7238///
7239/// This is a compound operation composed of multiple HVX instructions.
7240/// Instruction Type: CVI_SCATTER
7241/// Execution Slots: SLOT0
7242#[inline(always)]
7243#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7244#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7245pub unsafe fn q6_vscatter_qrmvhv(
7246    qs: HvxVectorPred,
7247    rt: i32,
7248    mu: i32,
7249    vv: HvxVector,
7250    vw: HvxVector,
7251) {
7252    vscattermhq(
7253        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7254        rt,
7255        mu,
7256        vv,
7257        vw,
7258    )
7259}
7260
7261/// `if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32`
7262///
7263/// This is a compound operation composed of multiple HVX instructions.
7264/// Instruction Type: CVI_SCATTER_DV
7265/// Execution Slots: SLOT0
7266#[inline(always)]
7267#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7268#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7269pub unsafe fn q6_vscatter_qrmwwv(
7270    qs: HvxVectorPred,
7271    rt: i32,
7272    mu: i32,
7273    vvv: HvxVectorPair,
7274    vw: HvxVector,
7275) {
7276    vscattermhwq(
7277        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7278        rt,
7279        mu,
7280        vvv,
7281        vw,
7282    )
7283}
7284
7285/// `if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32`
7286///
7287/// This is a compound operation composed of multiple HVX instructions.
7288/// Instruction Type: CVI_SCATTER
7289/// Execution Slots: SLOT0
7290#[inline(always)]
7291#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7292#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7293pub unsafe fn q6_vscatter_qrmvwv(
7294    qs: HvxVectorPred,
7295    rt: i32,
7296    mu: i32,
7297    vv: HvxVector,
7298    vw: HvxVector,
7299) {
7300    vscattermwq(
7301        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7302        rt,
7303        mu,
7304        vv,
7305        vw,
7306    )
7307}
7308
7309/// `Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat`
7310///
7311/// This is a compound operation composed of multiple HVX instructions.
7312/// Instruction Type: CVI_VA
7313/// Execution Slots: SLOT0123
7314#[inline(always)]
7315#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))]
7316#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7317pub unsafe fn q6_vw_vadd_vwvwq_carry_sat(
7318    vu: HvxVector,
7319    vv: HvxVector,
7320    qs: HvxVectorPred,
7321) -> HvxVector {
7322    vaddcarrysat(
7323        vu,
7324        vv,
7325        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7326    )
7327}
7328
7329/// `Qd4=vcmp.gt(Vu32.hf,Vv32.hf)`
7330///
7331/// This is a compound operation composed of multiple HVX instructions.
7332/// Instruction Type: CVI_VA
7333/// Execution Slots: SLOT0123
7334#[inline(always)]
7335#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7336#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7337pub unsafe fn q6_q_vcmp_gt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
7338    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgthf(vu, vv), -1))
7339}
7340
7341/// `Qx4&=vcmp.gt(Vu32.hf,Vv32.hf)`
7342///
7343/// This is a compound operation composed of multiple HVX instructions.
7344/// Instruction Type: CVI_VA
7345/// Execution Slots: SLOT0123
7346#[inline(always)]
7347#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7348#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7349pub unsafe fn q6_q_vcmp_gtand_qvhfvhf(
7350    qx: HvxVectorPred,
7351    vu: HvxVector,
7352    vv: HvxVector,
7353) -> HvxVectorPred {
7354    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7355        vgthf_and(
7356            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
7357            vu,
7358            vv,
7359        ),
7360        -1,
7361    ))
7362}
7363
7364/// `Qx4|=vcmp.gt(Vu32.hf,Vv32.hf)`
7365///
7366/// This is a compound operation composed of multiple HVX instructions.
7367/// Instruction Type: CVI_VA
7368/// Execution Slots: SLOT0123
7369#[inline(always)]
7370#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7371#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7372pub unsafe fn q6_q_vcmp_gtor_qvhfvhf(
7373    qx: HvxVectorPred,
7374    vu: HvxVector,
7375    vv: HvxVector,
7376) -> HvxVectorPred {
7377    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7378        vgthf_or(
7379            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
7380            vu,
7381            vv,
7382        ),
7383        -1,
7384    ))
7385}
7386
7387/// `Qx4^=vcmp.gt(Vu32.hf,Vv32.hf)`
7388///
7389/// This is a compound operation composed of multiple HVX instructions.
7390/// Instruction Type: CVI_VA
7391/// Execution Slots: SLOT0123
7392#[inline(always)]
7393#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7394#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7395pub unsafe fn q6_q_vcmp_gtxacc_qvhfvhf(
7396    qx: HvxVectorPred,
7397    vu: HvxVector,
7398    vv: HvxVector,
7399) -> HvxVectorPred {
7400    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7401        vgthf_xor(
7402            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
7403            vu,
7404            vv,
7405        ),
7406        -1,
7407    ))
7408}
7409
7410/// `Qd4=vcmp.gt(Vu32.sf,Vv32.sf)`
7411///
7412/// This is a compound operation composed of multiple HVX instructions.
7413/// Instruction Type: CVI_VA
7414/// Execution Slots: SLOT0123
7415#[inline(always)]
7416#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7417#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7418pub unsafe fn q6_q_vcmp_gt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
7419    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgtsf(vu, vv), -1))
7420}
7421
7422/// `Qx4&=vcmp.gt(Vu32.sf,Vv32.sf)`
7423///
7424/// This is a compound operation composed of multiple HVX instructions.
7425/// Instruction Type: CVI_VA
7426/// Execution Slots: SLOT0123
7427#[inline(always)]
7428#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7429#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7430pub unsafe fn q6_q_vcmp_gtand_qvsfvsf(
7431    qx: HvxVectorPred,
7432    vu: HvxVector,
7433    vv: HvxVector,
7434) -> HvxVectorPred {
7435    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7436        vgtsf_and(
7437            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
7438            vu,
7439            vv,
7440        ),
7441        -1,
7442    ))
7443}
7444
7445/// `Qx4|=vcmp.gt(Vu32.sf,Vv32.sf)`
7446///
7447/// This is a compound operation composed of multiple HVX instructions.
7448/// Instruction Type: CVI_VA
7449/// Execution Slots: SLOT0123
7450#[inline(always)]
7451#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7452#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7453pub unsafe fn q6_q_vcmp_gtor_qvsfvsf(
7454    qx: HvxVectorPred,
7455    vu: HvxVector,
7456    vv: HvxVector,
7457) -> HvxVectorPred {
7458    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7459        vgtsf_or(
7460            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
7461            vu,
7462            vv,
7463        ),
7464        -1,
7465    ))
7466}
7467
7468/// `Qx4^=vcmp.gt(Vu32.sf,Vv32.sf)`
7469///
7470/// This is a compound operation composed of multiple HVX instructions.
7471/// Instruction Type: CVI_VA
7472/// Execution Slots: SLOT0123
7473#[inline(always)]
7474#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7475#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7476pub unsafe fn q6_q_vcmp_gtxacc_qvsfvsf(
7477    qx: HvxVectorPred,
7478    vu: HvxVector,
7479    vv: HvxVector,
7480) -> HvxVectorPred {
7481    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7482        vgtsf_xor(
7483            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
7484            vu,
7485            vv,
7486        ),
7487        -1,
7488    ))
7489}