Skip to main content

core/stdarch/crates/core_arch/src/hexagon/
v128.rs

1//! Hexagon HVX 128-byte vector mode intrinsics
2//!
3//! This module provides intrinsics for the Hexagon Vector Extensions (HVX)
4//! in 128-byte vector mode (1024-bit vectors).
5//!
6//! HVX is a wide vector extension designed for high-performance signal processing.
7//! [Hexagon HVX Programmer's Reference Manual](https://docs.qualcomm.com/doc/80-N2040-61)
8//!
9//! ## Vector Types
10//!
11//! In 128-byte mode:
12//! - `HvxVector` is 1024 bits (128 bytes) containing 32 x 32-bit values
13//! - `HvxVectorPair` is 2048 bits (256 bytes)
14//! - `HvxVectorPred` is 1024 bits (128 bytes) for predicate operations
15//!
16//! To use this module, compile with `-C target-feature=+hvx-length128b`.
17//!
18//! ## Naming Convention
19//!
20//! Function names preserve the original Q6 naming case because the convention
21//! uses case to distinguish register types:
22//! - `W` (uppercase) = vector pair (`HvxVectorPair`)
23//! - `V` (uppercase) = vector (`HvxVector`)
24//! - `Q` (uppercase) = predicate (`HvxVectorPred`)
25//! - `R` = scalar register (`i32`)
26//!
27//! For example, `Q6_W_vcombine_VV` operates on a vector pair while
28//! `Q6_V_hi_W` extracts a vector from a pair.
29//!
30//! ## Architecture Versions
31//!
32//! Different intrinsics require different HVX architecture versions. Use the
33//! appropriate target feature to enable the required version:
34//! - HVX v60: `-C target-feature=+hvxv60` (basic HVX operations)
35//! - HVX v62: `-C target-feature=+hvxv62`
36//! - HVX v65: `-C target-feature=+hvxv65` (includes floating-point support)
37//! - HVX v66: `-C target-feature=+hvxv66`
38//! - HVX v68: `-C target-feature=+hvxv68`
39//! - HVX v69: `-C target-feature=+hvxv69`
40//! - HVX v73: `-C target-feature=+hvxv73`
41//! - HVX v79: `-C target-feature=+hvxv79`
42//!
43//! Each version includes all features from previous versions.
44
45#![allow(non_camel_case_types)]
46#![allow(non_snake_case)]
47
48#[cfg(test)]
49use stdarch_test::assert_instr;
50
51use crate::intrinsics::simd::{simd_add, simd_and, simd_or, simd_sub, simd_xor};
52
53// HVX type definitions for 128-byte vector mode
54types! {
55    #![unstable(feature = "stdarch_hexagon", issue = "151523")]
56
57    /// HVX vector type (1024 bits / 128 bytes)
58    ///
59    /// This type represents a single HVX vector register containing 32 x 32-bit values.
60    pub struct HvxVector(32 x i32);
61
62    /// HVX vector pair type (2048 bits / 256 bytes)
63    ///
64    /// This type represents a pair of HVX vector registers, often used for
65    /// operations that produce double-width results.
66    pub struct HvxVectorPair(64 x i32);
67
68    /// HVX vector predicate type (1024 bits / 128 bytes)
69    ///
70    /// This type represents a predicate vector used for conditional operations.
71    /// Each bit corresponds to a lane in the vector.
72    pub struct HvxVectorPred(32 x i32);
73}
74
75// LLVM intrinsic declarations for 128-byte vector mode
76#[allow(improper_ctypes)]
77unsafe extern "unadjusted" {
78    #[link_name = "llvm.hexagon.V6.extractw.128B"]
79    fn extractw(_: HvxVector, _: i32) -> i32;
80    #[link_name = "llvm.hexagon.V6.get.qfext.128B"]
81    fn get_qfext(_: HvxVector, _: i32) -> HvxVector;
82    #[link_name = "llvm.hexagon.V6.hi.128B"]
83    fn hi(_: HvxVectorPair) -> HvxVector;
84    #[link_name = "llvm.hexagon.V6.lo.128B"]
85    fn lo(_: HvxVectorPair) -> HvxVector;
86    #[link_name = "llvm.hexagon.V6.lvsplatb.128B"]
87    fn lvsplatb(_: i32) -> HvxVector;
88    #[link_name = "llvm.hexagon.V6.lvsplath.128B"]
89    fn lvsplath(_: i32) -> HvxVector;
90    #[link_name = "llvm.hexagon.V6.lvsplatw.128B"]
91    fn lvsplatw(_: i32) -> HvxVector;
92    #[link_name = "llvm.hexagon.V6.pred.and.128B"]
93    fn pred_and(_: HvxVector, _: HvxVector) -> HvxVector;
94    #[link_name = "llvm.hexagon.V6.pred.and.n.128B"]
95    fn pred_and_n(_: HvxVector, _: HvxVector) -> HvxVector;
96    #[link_name = "llvm.hexagon.V6.pred.not.128B"]
97    fn pred_not(_: HvxVector) -> HvxVector;
98    #[link_name = "llvm.hexagon.V6.pred.or.128B"]
99    fn pred_or(_: HvxVector, _: HvxVector) -> HvxVector;
100    #[link_name = "llvm.hexagon.V6.pred.or.n.128B"]
101    fn pred_or_n(_: HvxVector, _: HvxVector) -> HvxVector;
102    #[link_name = "llvm.hexagon.V6.pred.scalar2.128B"]
103    fn pred_scalar2(_: i32) -> HvxVector;
104    #[link_name = "llvm.hexagon.V6.pred.scalar2v2.128B"]
105    fn pred_scalar2v2(_: i32) -> HvxVector;
106    #[link_name = "llvm.hexagon.V6.pred.xor.128B"]
107    fn pred_xor(_: HvxVector, _: HvxVector) -> HvxVector;
108    #[link_name = "llvm.hexagon.V6.set.qfext.128B"]
109    fn set_qfext(_: HvxVector, _: i32) -> HvxVector;
110    #[link_name = "llvm.hexagon.V6.shuffeqh.128B"]
111    fn shuffeqh(_: HvxVector, _: HvxVector) -> HvxVector;
112    #[link_name = "llvm.hexagon.V6.shuffeqw.128B"]
113    fn shuffeqw(_: HvxVector, _: HvxVector) -> HvxVector;
114    #[link_name = "llvm.hexagon.V6.v6mpyhubs10.128B"]
115    fn v6mpyhubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
116    #[link_name = "llvm.hexagon.V6.v6mpyhubs10.vxx.128B"]
117    fn v6mpyhubs10_vxx(
118        _: HvxVectorPair,
119        _: HvxVectorPair,
120        _: HvxVectorPair,
121        _: i32,
122    ) -> HvxVectorPair;
123    #[link_name = "llvm.hexagon.V6.v6mpyvubs10.128B"]
124    fn v6mpyvubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
125    #[link_name = "llvm.hexagon.V6.v6mpyvubs10.vxx.128B"]
126    fn v6mpyvubs10_vxx(
127        _: HvxVectorPair,
128        _: HvxVectorPair,
129        _: HvxVectorPair,
130        _: i32,
131    ) -> HvxVectorPair;
132    #[link_name = "llvm.hexagon.V6.vS32b.nqpred.ai.128B"]
133    fn vS32b_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> ();
134    #[link_name = "llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B"]
135    fn vS32b_nt_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> ();
136    #[link_name = "llvm.hexagon.V6.vS32b.nt.qpred.ai.128B"]
137    fn vS32b_nt_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> ();
138    #[link_name = "llvm.hexagon.V6.vS32b.qpred.ai.128B"]
139    fn vS32b_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> ();
140    #[link_name = "llvm.hexagon.V6.vabs.f8.128B"]
141    fn vabs_f8(_: HvxVector) -> HvxVector;
142    #[link_name = "llvm.hexagon.V6.vabs.hf.128B"]
143    fn vabs_hf(_: HvxVector) -> HvxVector;
144    #[link_name = "llvm.hexagon.V6.vabs.sf.128B"]
145    fn vabs_sf(_: HvxVector) -> HvxVector;
146    #[link_name = "llvm.hexagon.V6.vabsb.128B"]
147    fn vabsb(_: HvxVector) -> HvxVector;
148    #[link_name = "llvm.hexagon.V6.vabsb.sat.128B"]
149    fn vabsb_sat(_: HvxVector) -> HvxVector;
150    #[link_name = "llvm.hexagon.V6.vabsdiffh.128B"]
151    fn vabsdiffh(_: HvxVector, _: HvxVector) -> HvxVector;
152    #[link_name = "llvm.hexagon.V6.vabsdiffub.128B"]
153    fn vabsdiffub(_: HvxVector, _: HvxVector) -> HvxVector;
154    #[link_name = "llvm.hexagon.V6.vabsdiffuh.128B"]
155    fn vabsdiffuh(_: HvxVector, _: HvxVector) -> HvxVector;
156    #[link_name = "llvm.hexagon.V6.vabsdiffw.128B"]
157    fn vabsdiffw(_: HvxVector, _: HvxVector) -> HvxVector;
158    #[link_name = "llvm.hexagon.V6.vabsh.128B"]
159    fn vabsh(_: HvxVector) -> HvxVector;
160    #[link_name = "llvm.hexagon.V6.vabsh.sat.128B"]
161    fn vabsh_sat(_: HvxVector) -> HvxVector;
162    #[link_name = "llvm.hexagon.V6.vabsw.128B"]
163    fn vabsw(_: HvxVector) -> HvxVector;
164    #[link_name = "llvm.hexagon.V6.vabsw.sat.128B"]
165    fn vabsw_sat(_: HvxVector) -> HvxVector;
166    #[link_name = "llvm.hexagon.V6.vadd.hf.128B"]
167    fn vadd_hf(_: HvxVector, _: HvxVector) -> HvxVector;
168    #[link_name = "llvm.hexagon.V6.vadd.hf.hf.128B"]
169    fn vadd_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector;
170    #[link_name = "llvm.hexagon.V6.vadd.qf16.128B"]
171    fn vadd_qf16(_: HvxVector, _: HvxVector) -> HvxVector;
172    #[link_name = "llvm.hexagon.V6.vadd.qf16.mix.128B"]
173    fn vadd_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector;
174    #[link_name = "llvm.hexagon.V6.vadd.qf32.128B"]
175    fn vadd_qf32(_: HvxVector, _: HvxVector) -> HvxVector;
176    #[link_name = "llvm.hexagon.V6.vadd.qf32.mix.128B"]
177    fn vadd_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector;
178    #[link_name = "llvm.hexagon.V6.vadd.sf.128B"]
179    fn vadd_sf(_: HvxVector, _: HvxVector) -> HvxVector;
180    #[link_name = "llvm.hexagon.V6.vadd.sf.hf.128B"]
181    fn vadd_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair;
182    #[link_name = "llvm.hexagon.V6.vadd.sf.sf.128B"]
183    fn vadd_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector;
184    #[link_name = "llvm.hexagon.V6.vaddb.128B"]
185    fn vaddb(_: HvxVector, _: HvxVector) -> HvxVector;
186    #[link_name = "llvm.hexagon.V6.vaddb.dv.128B"]
187    fn vaddb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
188    #[link_name = "llvm.hexagon.V6.vaddbnq.128B"]
189    fn vaddbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
190    #[link_name = "llvm.hexagon.V6.vaddbq.128B"]
191    fn vaddbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
192    #[link_name = "llvm.hexagon.V6.vaddbsat.128B"]
193    fn vaddbsat(_: HvxVector, _: HvxVector) -> HvxVector;
194    #[link_name = "llvm.hexagon.V6.vaddbsat.dv.128B"]
195    fn vaddbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
196    #[link_name = "llvm.hexagon.V6.vaddcarrysat.128B"]
197    fn vaddcarrysat(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
198    #[link_name = "llvm.hexagon.V6.vaddclbh.128B"]
199    fn vaddclbh(_: HvxVector, _: HvxVector) -> HvxVector;
200    #[link_name = "llvm.hexagon.V6.vaddclbw.128B"]
201    fn vaddclbw(_: HvxVector, _: HvxVector) -> HvxVector;
202    #[link_name = "llvm.hexagon.V6.vaddh.128B"]
203    fn vaddh(_: HvxVector, _: HvxVector) -> HvxVector;
204    #[link_name = "llvm.hexagon.V6.vaddh.dv.128B"]
205    fn vaddh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
206    #[link_name = "llvm.hexagon.V6.vaddhnq.128B"]
207    fn vaddhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
208    #[link_name = "llvm.hexagon.V6.vaddhq.128B"]
209    fn vaddhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
210    #[link_name = "llvm.hexagon.V6.vaddhsat.128B"]
211    fn vaddhsat(_: HvxVector, _: HvxVector) -> HvxVector;
212    #[link_name = "llvm.hexagon.V6.vaddhsat.dv.128B"]
213    fn vaddhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
214    #[link_name = "llvm.hexagon.V6.vaddhw.128B"]
215    fn vaddhw(_: HvxVector, _: HvxVector) -> HvxVectorPair;
216    #[link_name = "llvm.hexagon.V6.vaddhw.acc.128B"]
217    fn vaddhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
218    #[link_name = "llvm.hexagon.V6.vaddubh.128B"]
219    fn vaddubh(_: HvxVector, _: HvxVector) -> HvxVectorPair;
220    #[link_name = "llvm.hexagon.V6.vaddubh.acc.128B"]
221    fn vaddubh_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
222    #[link_name = "llvm.hexagon.V6.vaddubsat.128B"]
223    fn vaddubsat(_: HvxVector, _: HvxVector) -> HvxVector;
224    #[link_name = "llvm.hexagon.V6.vaddubsat.dv.128B"]
225    fn vaddubsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
226    #[link_name = "llvm.hexagon.V6.vaddububb.sat.128B"]
227    fn vaddububb_sat(_: HvxVector, _: HvxVector) -> HvxVector;
228    #[link_name = "llvm.hexagon.V6.vadduhsat.128B"]
229    fn vadduhsat(_: HvxVector, _: HvxVector) -> HvxVector;
230    #[link_name = "llvm.hexagon.V6.vadduhsat.dv.128B"]
231    fn vadduhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
232    #[link_name = "llvm.hexagon.V6.vadduhw.128B"]
233    fn vadduhw(_: HvxVector, _: HvxVector) -> HvxVectorPair;
234    #[link_name = "llvm.hexagon.V6.vadduhw.acc.128B"]
235    fn vadduhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
236    #[link_name = "llvm.hexagon.V6.vadduwsat.128B"]
237    fn vadduwsat(_: HvxVector, _: HvxVector) -> HvxVector;
238    #[link_name = "llvm.hexagon.V6.vadduwsat.dv.128B"]
239    fn vadduwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
240    #[link_name = "llvm.hexagon.V6.vaddw.128B"]
241    fn vaddw(_: HvxVector, _: HvxVector) -> HvxVector;
242    #[link_name = "llvm.hexagon.V6.vaddw.dv.128B"]
243    fn vaddw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
244    #[link_name = "llvm.hexagon.V6.vaddwnq.128B"]
245    fn vaddwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
246    #[link_name = "llvm.hexagon.V6.vaddwq.128B"]
247    fn vaddwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
248    #[link_name = "llvm.hexagon.V6.vaddwsat.128B"]
249    fn vaddwsat(_: HvxVector, _: HvxVector) -> HvxVector;
250    #[link_name = "llvm.hexagon.V6.vaddwsat.dv.128B"]
251    fn vaddwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
252    #[link_name = "llvm.hexagon.V6.valignb.128B"]
253    fn valignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
254    #[link_name = "llvm.hexagon.V6.valignbi.128B"]
255    fn valignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
256    #[link_name = "llvm.hexagon.V6.vand.128B"]
257    fn vand(_: HvxVector, _: HvxVector) -> HvxVector;
258    #[link_name = "llvm.hexagon.V6.vandnqrt.128B"]
259    fn vandnqrt(_: HvxVector, _: i32) -> HvxVector;
260    #[link_name = "llvm.hexagon.V6.vandnqrt.acc.128B"]
261    fn vandnqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
262    #[link_name = "llvm.hexagon.V6.vandqrt.128B"]
263    fn vandqrt(_: HvxVector, _: i32) -> HvxVector;
264    #[link_name = "llvm.hexagon.V6.vandqrt.acc.128B"]
265    fn vandqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
266    #[link_name = "llvm.hexagon.V6.vandvnqv.128B"]
267    fn vandvnqv(_: HvxVector, _: HvxVector) -> HvxVector;
268    #[link_name = "llvm.hexagon.V6.vandvqv.128B"]
269    fn vandvqv(_: HvxVector, _: HvxVector) -> HvxVector;
270    #[link_name = "llvm.hexagon.V6.vandvrt.128B"]
271    fn vandvrt(_: HvxVector, _: i32) -> HvxVector;
272    #[link_name = "llvm.hexagon.V6.vandvrt.acc.128B"]
273    fn vandvrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
274    #[link_name = "llvm.hexagon.V6.vaslh.128B"]
275    fn vaslh(_: HvxVector, _: i32) -> HvxVector;
276    #[link_name = "llvm.hexagon.V6.vaslh.acc.128B"]
277    fn vaslh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
278    #[link_name = "llvm.hexagon.V6.vaslhv.128B"]
279    fn vaslhv(_: HvxVector, _: HvxVector) -> HvxVector;
280    #[link_name = "llvm.hexagon.V6.vaslw.128B"]
281    fn vaslw(_: HvxVector, _: i32) -> HvxVector;
282    #[link_name = "llvm.hexagon.V6.vaslw.acc.128B"]
283    fn vaslw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
284    #[link_name = "llvm.hexagon.V6.vaslwv.128B"]
285    fn vaslwv(_: HvxVector, _: HvxVector) -> HvxVector;
286    #[link_name = "llvm.hexagon.V6.vasr.into.128B"]
287    fn vasr_into(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
288    #[link_name = "llvm.hexagon.V6.vasrh.128B"]
289    fn vasrh(_: HvxVector, _: i32) -> HvxVector;
290    #[link_name = "llvm.hexagon.V6.vasrh.acc.128B"]
291    fn vasrh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
292    #[link_name = "llvm.hexagon.V6.vasrhbrndsat.128B"]
293    fn vasrhbrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
294    #[link_name = "llvm.hexagon.V6.vasrhbsat.128B"]
295    fn vasrhbsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
296    #[link_name = "llvm.hexagon.V6.vasrhubrndsat.128B"]
297    fn vasrhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
298    #[link_name = "llvm.hexagon.V6.vasrhubsat.128B"]
299    fn vasrhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
300    #[link_name = "llvm.hexagon.V6.vasrhv.128B"]
301    fn vasrhv(_: HvxVector, _: HvxVector) -> HvxVector;
302    #[link_name = "llvm.hexagon.V6.vasruhubrndsat.128B"]
303    fn vasruhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
304    #[link_name = "llvm.hexagon.V6.vasruhubsat.128B"]
305    fn vasruhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
306    #[link_name = "llvm.hexagon.V6.vasruwuhrndsat.128B"]
307    fn vasruwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
308    #[link_name = "llvm.hexagon.V6.vasruwuhsat.128B"]
309    fn vasruwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
310    #[link_name = "llvm.hexagon.V6.vasrvuhubrndsat.128B"]
311    fn vasrvuhubrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector;
312    #[link_name = "llvm.hexagon.V6.vasrvuhubsat.128B"]
313    fn vasrvuhubsat(_: HvxVectorPair, _: HvxVector) -> HvxVector;
314    #[link_name = "llvm.hexagon.V6.vasrvwuhrndsat.128B"]
315    fn vasrvwuhrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector;
316    #[link_name = "llvm.hexagon.V6.vasrvwuhsat.128B"]
317    fn vasrvwuhsat(_: HvxVectorPair, _: HvxVector) -> HvxVector;
318    #[link_name = "llvm.hexagon.V6.vasrw.128B"]
319    fn vasrw(_: HvxVector, _: i32) -> HvxVector;
320    #[link_name = "llvm.hexagon.V6.vasrw.acc.128B"]
321    fn vasrw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
322    #[link_name = "llvm.hexagon.V6.vasrwh.128B"]
323    fn vasrwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
324    #[link_name = "llvm.hexagon.V6.vasrwhrndsat.128B"]
325    fn vasrwhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
326    #[link_name = "llvm.hexagon.V6.vasrwhsat.128B"]
327    fn vasrwhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
328    #[link_name = "llvm.hexagon.V6.vasrwuhrndsat.128B"]
329    fn vasrwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
330    #[link_name = "llvm.hexagon.V6.vasrwuhsat.128B"]
331    fn vasrwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
332    #[link_name = "llvm.hexagon.V6.vasrwv.128B"]
333    fn vasrwv(_: HvxVector, _: HvxVector) -> HvxVector;
334    #[link_name = "llvm.hexagon.V6.vassign.128B"]
335    fn vassign(_: HvxVector) -> HvxVector;
336    #[link_name = "llvm.hexagon.V6.vassign.fp.128B"]
337    fn vassign_fp(_: HvxVector) -> HvxVector;
338    #[link_name = "llvm.hexagon.V6.vassignp.128B"]
339    fn vassignp(_: HvxVectorPair) -> HvxVectorPair;
340    #[link_name = "llvm.hexagon.V6.vavgb.128B"]
341    fn vavgb(_: HvxVector, _: HvxVector) -> HvxVector;
342    #[link_name = "llvm.hexagon.V6.vavgbrnd.128B"]
343    fn vavgbrnd(_: HvxVector, _: HvxVector) -> HvxVector;
344    #[link_name = "llvm.hexagon.V6.vavgh.128B"]
345    fn vavgh(_: HvxVector, _: HvxVector) -> HvxVector;
346    #[link_name = "llvm.hexagon.V6.vavghrnd.128B"]
347    fn vavghrnd(_: HvxVector, _: HvxVector) -> HvxVector;
348    #[link_name = "llvm.hexagon.V6.vavgub.128B"]
349    fn vavgub(_: HvxVector, _: HvxVector) -> HvxVector;
350    #[link_name = "llvm.hexagon.V6.vavgubrnd.128B"]
351    fn vavgubrnd(_: HvxVector, _: HvxVector) -> HvxVector;
352    #[link_name = "llvm.hexagon.V6.vavguh.128B"]
353    fn vavguh(_: HvxVector, _: HvxVector) -> HvxVector;
354    #[link_name = "llvm.hexagon.V6.vavguhrnd.128B"]
355    fn vavguhrnd(_: HvxVector, _: HvxVector) -> HvxVector;
356    #[link_name = "llvm.hexagon.V6.vavguw.128B"]
357    fn vavguw(_: HvxVector, _: HvxVector) -> HvxVector;
358    #[link_name = "llvm.hexagon.V6.vavguwrnd.128B"]
359    fn vavguwrnd(_: HvxVector, _: HvxVector) -> HvxVector;
360    #[link_name = "llvm.hexagon.V6.vavgw.128B"]
361    fn vavgw(_: HvxVector, _: HvxVector) -> HvxVector;
362    #[link_name = "llvm.hexagon.V6.vavgwrnd.128B"]
363    fn vavgwrnd(_: HvxVector, _: HvxVector) -> HvxVector;
364    #[link_name = "llvm.hexagon.V6.vcl0h.128B"]
365    fn vcl0h(_: HvxVector) -> HvxVector;
366    #[link_name = "llvm.hexagon.V6.vcl0w.128B"]
367    fn vcl0w(_: HvxVector) -> HvxVector;
368    #[link_name = "llvm.hexagon.V6.vcombine.128B"]
369    fn vcombine(_: HvxVector, _: HvxVector) -> HvxVectorPair;
370    #[link_name = "llvm.hexagon.V6.vconv.h.hf.128B"]
371    fn vconv_h_hf(_: HvxVector) -> HvxVector;
372    #[link_name = "llvm.hexagon.V6.vconv.hf.h.128B"]
373    fn vconv_hf_h(_: HvxVector) -> HvxVector;
374    #[link_name = "llvm.hexagon.V6.vconv.hf.qf16.128B"]
375    fn vconv_hf_qf16(_: HvxVector) -> HvxVector;
376    #[link_name = "llvm.hexagon.V6.vconv.hf.qf32.128B"]
377    fn vconv_hf_qf32(_: HvxVectorPair) -> HvxVector;
378    #[link_name = "llvm.hexagon.V6.vconv.sf.qf32.128B"]
379    fn vconv_sf_qf32(_: HvxVector) -> HvxVector;
380    #[link_name = "llvm.hexagon.V6.vconv.sf.w.128B"]
381    fn vconv_sf_w(_: HvxVector) -> HvxVector;
382    #[link_name = "llvm.hexagon.V6.vconv.w.sf.128B"]
383    fn vconv_w_sf(_: HvxVector) -> HvxVector;
384    #[link_name = "llvm.hexagon.V6.vcvt2.hf.b.128B"]
385    fn vcvt2_hf_b(_: HvxVector) -> HvxVectorPair;
386    #[link_name = "llvm.hexagon.V6.vcvt2.hf.ub.128B"]
387    fn vcvt2_hf_ub(_: HvxVector) -> HvxVectorPair;
388    #[link_name = "llvm.hexagon.V6.vcvt.b.hf.128B"]
389    fn vcvt_b_hf(_: HvxVector, _: HvxVector) -> HvxVector;
390    #[link_name = "llvm.hexagon.V6.vcvt.h.hf.128B"]
391    fn vcvt_h_hf(_: HvxVector) -> HvxVector;
392    #[link_name = "llvm.hexagon.V6.vcvt.hf.b.128B"]
393    fn vcvt_hf_b(_: HvxVector) -> HvxVectorPair;
394    #[link_name = "llvm.hexagon.V6.vcvt.hf.f8.128B"]
395    fn vcvt_hf_f8(_: HvxVector) -> HvxVectorPair;
396    #[link_name = "llvm.hexagon.V6.vcvt.hf.h.128B"]
397    fn vcvt_hf_h(_: HvxVector) -> HvxVector;
398    #[link_name = "llvm.hexagon.V6.vcvt.hf.sf.128B"]
399    fn vcvt_hf_sf(_: HvxVector, _: HvxVector) -> HvxVector;
400    #[link_name = "llvm.hexagon.V6.vcvt.hf.ub.128B"]
401    fn vcvt_hf_ub(_: HvxVector) -> HvxVectorPair;
402    #[link_name = "llvm.hexagon.V6.vcvt.hf.uh.128B"]
403    fn vcvt_hf_uh(_: HvxVector) -> HvxVector;
404    #[link_name = "llvm.hexagon.V6.vcvt.sf.hf.128B"]
405    fn vcvt_sf_hf(_: HvxVector) -> HvxVectorPair;
406    #[link_name = "llvm.hexagon.V6.vcvt.ub.hf.128B"]
407    fn vcvt_ub_hf(_: HvxVector, _: HvxVector) -> HvxVector;
408    #[link_name = "llvm.hexagon.V6.vcvt.uh.hf.128B"]
409    fn vcvt_uh_hf(_: HvxVector) -> HvxVector;
410    #[link_name = "llvm.hexagon.V6.vd0.128B"]
411    fn vd0() -> HvxVector;
412    #[link_name = "llvm.hexagon.V6.vdd0.128B"]
413    fn vdd0() -> HvxVectorPair;
414    #[link_name = "llvm.hexagon.V6.vdealb.128B"]
415    fn vdealb(_: HvxVector) -> HvxVector;
416    #[link_name = "llvm.hexagon.V6.vdealb4w.128B"]
417    fn vdealb4w(_: HvxVector, _: HvxVector) -> HvxVector;
418    #[link_name = "llvm.hexagon.V6.vdealh.128B"]
419    fn vdealh(_: HvxVector) -> HvxVector;
420    #[link_name = "llvm.hexagon.V6.vdealvdd.128B"]
421    fn vdealvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair;
422    #[link_name = "llvm.hexagon.V6.vdelta.128B"]
423    fn vdelta(_: HvxVector, _: HvxVector) -> HvxVector;
424    #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf.128B"]
425    fn vdmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVector;
426    #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf.acc.128B"]
427    fn vdmpy_sf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
428    #[link_name = "llvm.hexagon.V6.vdmpybus.128B"]
429    fn vdmpybus(_: HvxVector, _: i32) -> HvxVector;
430    #[link_name = "llvm.hexagon.V6.vdmpybus.acc.128B"]
431    fn vdmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
432    #[link_name = "llvm.hexagon.V6.vdmpybus.dv.128B"]
433    fn vdmpybus_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair;
434    #[link_name = "llvm.hexagon.V6.vdmpybus.dv.acc.128B"]
435    fn vdmpybus_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
436    #[link_name = "llvm.hexagon.V6.vdmpyhb.128B"]
437    fn vdmpyhb(_: HvxVector, _: i32) -> HvxVector;
438    #[link_name = "llvm.hexagon.V6.vdmpyhb.acc.128B"]
439    fn vdmpyhb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
440    #[link_name = "llvm.hexagon.V6.vdmpyhb.dv.128B"]
441    fn vdmpyhb_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair;
442    #[link_name = "llvm.hexagon.V6.vdmpyhb.dv.acc.128B"]
443    fn vdmpyhb_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
444    #[link_name = "llvm.hexagon.V6.vdmpyhisat.128B"]
445    fn vdmpyhisat(_: HvxVectorPair, _: i32) -> HvxVector;
446    #[link_name = "llvm.hexagon.V6.vdmpyhisat.acc.128B"]
447    fn vdmpyhisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector;
448    #[link_name = "llvm.hexagon.V6.vdmpyhsat.128B"]
449    fn vdmpyhsat(_: HvxVector, _: i32) -> HvxVector;
450    #[link_name = "llvm.hexagon.V6.vdmpyhsat.acc.128B"]
451    fn vdmpyhsat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
452    #[link_name = "llvm.hexagon.V6.vdmpyhsuisat.128B"]
453    fn vdmpyhsuisat(_: HvxVectorPair, _: i32) -> HvxVector;
454    #[link_name = "llvm.hexagon.V6.vdmpyhsuisat.acc.128B"]
455    fn vdmpyhsuisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector;
456    #[link_name = "llvm.hexagon.V6.vdmpyhsusat.128B"]
457    fn vdmpyhsusat(_: HvxVector, _: i32) -> HvxVector;
458    #[link_name = "llvm.hexagon.V6.vdmpyhsusat.acc.128B"]
459    fn vdmpyhsusat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
460    #[link_name = "llvm.hexagon.V6.vdmpyhvsat.128B"]
461    fn vdmpyhvsat(_: HvxVector, _: HvxVector) -> HvxVector;
462    #[link_name = "llvm.hexagon.V6.vdmpyhvsat.acc.128B"]
463    fn vdmpyhvsat_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
464    #[link_name = "llvm.hexagon.V6.vdsaduh.128B"]
465    fn vdsaduh(_: HvxVectorPair, _: i32) -> HvxVectorPair;
466    #[link_name = "llvm.hexagon.V6.vdsaduh.acc.128B"]
467    fn vdsaduh_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
468    #[link_name = "llvm.hexagon.V6.veqb.128B"]
469    fn veqb(_: HvxVector, _: HvxVector) -> HvxVector;
470    #[link_name = "llvm.hexagon.V6.veqb.and.128B"]
471    fn veqb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
472    #[link_name = "llvm.hexagon.V6.veqb.or.128B"]
473    fn veqb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
474    #[link_name = "llvm.hexagon.V6.veqb.xor.128B"]
475    fn veqb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
476    #[link_name = "llvm.hexagon.V6.veqh.128B"]
477    fn veqh(_: HvxVector, _: HvxVector) -> HvxVector;
478    #[link_name = "llvm.hexagon.V6.veqh.and.128B"]
479    fn veqh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
480    #[link_name = "llvm.hexagon.V6.veqh.or.128B"]
481    fn veqh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
482    #[link_name = "llvm.hexagon.V6.veqh.xor.128B"]
483    fn veqh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
484    #[link_name = "llvm.hexagon.V6.veqw.128B"]
485    fn veqw(_: HvxVector, _: HvxVector) -> HvxVector;
486    #[link_name = "llvm.hexagon.V6.veqw.and.128B"]
487    fn veqw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
488    #[link_name = "llvm.hexagon.V6.veqw.or.128B"]
489    fn veqw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
490    #[link_name = "llvm.hexagon.V6.veqw.xor.128B"]
491    fn veqw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
492    #[link_name = "llvm.hexagon.V6.vfmax.f8.128B"]
493    fn vfmax_f8(_: HvxVector, _: HvxVector) -> HvxVector;
494    #[link_name = "llvm.hexagon.V6.vfmax.hf.128B"]
495    fn vfmax_hf(_: HvxVector, _: HvxVector) -> HvxVector;
496    #[link_name = "llvm.hexagon.V6.vfmax.sf.128B"]
497    fn vfmax_sf(_: HvxVector, _: HvxVector) -> HvxVector;
498    #[link_name = "llvm.hexagon.V6.vfmin.f8.128B"]
499    fn vfmin_f8(_: HvxVector, _: HvxVector) -> HvxVector;
500    #[link_name = "llvm.hexagon.V6.vfmin.hf.128B"]
501    fn vfmin_hf(_: HvxVector, _: HvxVector) -> HvxVector;
502    #[link_name = "llvm.hexagon.V6.vfmin.sf.128B"]
503    fn vfmin_sf(_: HvxVector, _: HvxVector) -> HvxVector;
504    #[link_name = "llvm.hexagon.V6.vfneg.f8.128B"]
505    fn vfneg_f8(_: HvxVector) -> HvxVector;
506    #[link_name = "llvm.hexagon.V6.vfneg.hf.128B"]
507    fn vfneg_hf(_: HvxVector) -> HvxVector;
508    #[link_name = "llvm.hexagon.V6.vfneg.sf.128B"]
509    fn vfneg_sf(_: HvxVector) -> HvxVector;
510    #[link_name = "llvm.hexagon.V6.vgathermh.128B"]
511    fn vgathermh(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> ();
512    #[link_name = "llvm.hexagon.V6.vgathermhq.128B"]
513    fn vgathermhq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> ();
514    #[link_name = "llvm.hexagon.V6.vgathermhw.128B"]
515    fn vgathermhw(_: *mut HvxVector, _: i32, _: i32, _: HvxVectorPair) -> ();
516    #[link_name = "llvm.hexagon.V6.vgathermhwq.128B"]
517    fn vgathermhwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVectorPair) -> ();
518    #[link_name = "llvm.hexagon.V6.vgathermw.128B"]
519    fn vgathermw(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> ();
520    #[link_name = "llvm.hexagon.V6.vgathermwq.128B"]
521    fn vgathermwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> ();
522    #[link_name = "llvm.hexagon.V6.vgtb.128B"]
523    fn vgtb(_: HvxVector, _: HvxVector) -> HvxVector;
524    #[link_name = "llvm.hexagon.V6.vgtb.and.128B"]
525    fn vgtb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
526    #[link_name = "llvm.hexagon.V6.vgtb.or.128B"]
527    fn vgtb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
528    #[link_name = "llvm.hexagon.V6.vgtb.xor.128B"]
529    fn vgtb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
530    #[link_name = "llvm.hexagon.V6.vgth.128B"]
531    fn vgth(_: HvxVector, _: HvxVector) -> HvxVector;
532    #[link_name = "llvm.hexagon.V6.vgth.and.128B"]
533    fn vgth_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
534    #[link_name = "llvm.hexagon.V6.vgth.or.128B"]
535    fn vgth_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
536    #[link_name = "llvm.hexagon.V6.vgth.xor.128B"]
537    fn vgth_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
538    #[link_name = "llvm.hexagon.V6.vgthf.128B"]
539    fn vgthf(_: HvxVector, _: HvxVector) -> HvxVector;
540    #[link_name = "llvm.hexagon.V6.vgthf.and.128B"]
541    fn vgthf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
542    #[link_name = "llvm.hexagon.V6.vgthf.or.128B"]
543    fn vgthf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
544    #[link_name = "llvm.hexagon.V6.vgthf.xor.128B"]
545    fn vgthf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
546    #[link_name = "llvm.hexagon.V6.vgtsf.128B"]
547    fn vgtsf(_: HvxVector, _: HvxVector) -> HvxVector;
548    #[link_name = "llvm.hexagon.V6.vgtsf.and.128B"]
549    fn vgtsf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
550    #[link_name = "llvm.hexagon.V6.vgtsf.or.128B"]
551    fn vgtsf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
552    #[link_name = "llvm.hexagon.V6.vgtsf.xor.128B"]
553    fn vgtsf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
554    #[link_name = "llvm.hexagon.V6.vgtub.128B"]
555    fn vgtub(_: HvxVector, _: HvxVector) -> HvxVector;
556    #[link_name = "llvm.hexagon.V6.vgtub.and.128B"]
557    fn vgtub_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
558    #[link_name = "llvm.hexagon.V6.vgtub.or.128B"]
559    fn vgtub_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
560    #[link_name = "llvm.hexagon.V6.vgtub.xor.128B"]
561    fn vgtub_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
562    #[link_name = "llvm.hexagon.V6.vgtuh.128B"]
563    fn vgtuh(_: HvxVector, _: HvxVector) -> HvxVector;
564    #[link_name = "llvm.hexagon.V6.vgtuh.and.128B"]
565    fn vgtuh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
566    #[link_name = "llvm.hexagon.V6.vgtuh.or.128B"]
567    fn vgtuh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
568    #[link_name = "llvm.hexagon.V6.vgtuh.xor.128B"]
569    fn vgtuh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
570    #[link_name = "llvm.hexagon.V6.vgtuw.128B"]
571    fn vgtuw(_: HvxVector, _: HvxVector) -> HvxVector;
572    #[link_name = "llvm.hexagon.V6.vgtuw.and.128B"]
573    fn vgtuw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
574    #[link_name = "llvm.hexagon.V6.vgtuw.or.128B"]
575    fn vgtuw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
576    #[link_name = "llvm.hexagon.V6.vgtuw.xor.128B"]
577    fn vgtuw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
578    #[link_name = "llvm.hexagon.V6.vgtw.128B"]
579    fn vgtw(_: HvxVector, _: HvxVector) -> HvxVector;
580    #[link_name = "llvm.hexagon.V6.vgtw.and.128B"]
581    fn vgtw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
582    #[link_name = "llvm.hexagon.V6.vgtw.or.128B"]
583    fn vgtw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
584    #[link_name = "llvm.hexagon.V6.vgtw.xor.128B"]
585    fn vgtw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
586    #[link_name = "llvm.hexagon.V6.vinsertwr.128B"]
587    fn vinsertwr(_: HvxVector, _: i32) -> HvxVector;
588    #[link_name = "llvm.hexagon.V6.vlalignb.128B"]
589    fn vlalignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
590    #[link_name = "llvm.hexagon.V6.vlalignbi.128B"]
591    fn vlalignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
592    #[link_name = "llvm.hexagon.V6.vlsrb.128B"]
593    fn vlsrb(_: HvxVector, _: i32) -> HvxVector;
594    #[link_name = "llvm.hexagon.V6.vlsrh.128B"]
595    fn vlsrh(_: HvxVector, _: i32) -> HvxVector;
596    #[link_name = "llvm.hexagon.V6.vlsrhv.128B"]
597    fn vlsrhv(_: HvxVector, _: HvxVector) -> HvxVector;
598    #[link_name = "llvm.hexagon.V6.vlsrw.128B"]
599    fn vlsrw(_: HvxVector, _: i32) -> HvxVector;
600    #[link_name = "llvm.hexagon.V6.vlsrwv.128B"]
601    fn vlsrwv(_: HvxVector, _: HvxVector) -> HvxVector;
602    #[link_name = "llvm.hexagon.V6.vlutvvb.128B"]
603    fn vlutvvb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
604    #[link_name = "llvm.hexagon.V6.vlutvvb.nm.128B"]
605    fn vlutvvb_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
606    #[link_name = "llvm.hexagon.V6.vlutvvb.oracc.128B"]
607    fn vlutvvb_oracc(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector;
608    #[link_name = "llvm.hexagon.V6.vlutvvb.oracci.128B"]
609    fn vlutvvb_oracci(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector;
610    #[link_name = "llvm.hexagon.V6.vlutvvbi.128B"]
611    fn vlutvvbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
612    #[link_name = "llvm.hexagon.V6.vlutvwh.128B"]
613    fn vlutvwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair;
614    #[link_name = "llvm.hexagon.V6.vlutvwh.nm.128B"]
615    fn vlutvwh_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair;
616    #[link_name = "llvm.hexagon.V6.vlutvwh.oracc.128B"]
617    fn vlutvwh_oracc(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair;
618    #[link_name = "llvm.hexagon.V6.vlutvwh.oracci.128B"]
619    fn vlutvwh_oracci(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair;
620    #[link_name = "llvm.hexagon.V6.vlutvwhi.128B"]
621    fn vlutvwhi(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair;
622    #[link_name = "llvm.hexagon.V6.vmax.hf.128B"]
623    fn vmax_hf(_: HvxVector, _: HvxVector) -> HvxVector;
624    #[link_name = "llvm.hexagon.V6.vmax.sf.128B"]
625    fn vmax_sf(_: HvxVector, _: HvxVector) -> HvxVector;
626    #[link_name = "llvm.hexagon.V6.vmaxb.128B"]
627    fn vmaxb(_: HvxVector, _: HvxVector) -> HvxVector;
628    #[link_name = "llvm.hexagon.V6.vmaxh.128B"]
629    fn vmaxh(_: HvxVector, _: HvxVector) -> HvxVector;
630    #[link_name = "llvm.hexagon.V6.vmaxub.128B"]
631    fn vmaxub(_: HvxVector, _: HvxVector) -> HvxVector;
632    #[link_name = "llvm.hexagon.V6.vmaxuh.128B"]
633    fn vmaxuh(_: HvxVector, _: HvxVector) -> HvxVector;
634    #[link_name = "llvm.hexagon.V6.vmaxw.128B"]
635    fn vmaxw(_: HvxVector, _: HvxVector) -> HvxVector;
636    #[link_name = "llvm.hexagon.V6.vmin.hf.128B"]
637    fn vmin_hf(_: HvxVector, _: HvxVector) -> HvxVector;
638    #[link_name = "llvm.hexagon.V6.vmin.sf.128B"]
639    fn vmin_sf(_: HvxVector, _: HvxVector) -> HvxVector;
640    #[link_name = "llvm.hexagon.V6.vminb.128B"]
641    fn vminb(_: HvxVector, _: HvxVector) -> HvxVector;
642    #[link_name = "llvm.hexagon.V6.vminh.128B"]
643    fn vminh(_: HvxVector, _: HvxVector) -> HvxVector;
644    #[link_name = "llvm.hexagon.V6.vminub.128B"]
645    fn vminub(_: HvxVector, _: HvxVector) -> HvxVector;
646    #[link_name = "llvm.hexagon.V6.vminuh.128B"]
647    fn vminuh(_: HvxVector, _: HvxVector) -> HvxVector;
648    #[link_name = "llvm.hexagon.V6.vminw.128B"]
649    fn vminw(_: HvxVector, _: HvxVector) -> HvxVector;
650    #[link_name = "llvm.hexagon.V6.vmpabus.128B"]
651    fn vmpabus(_: HvxVectorPair, _: i32) -> HvxVectorPair;
652    #[link_name = "llvm.hexagon.V6.vmpabus.acc.128B"]
653    fn vmpabus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
654    #[link_name = "llvm.hexagon.V6.vmpabusv.128B"]
655    fn vmpabusv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
656    #[link_name = "llvm.hexagon.V6.vmpabuu.128B"]
657    fn vmpabuu(_: HvxVectorPair, _: i32) -> HvxVectorPair;
658    #[link_name = "llvm.hexagon.V6.vmpabuu.acc.128B"]
659    fn vmpabuu_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
660    #[link_name = "llvm.hexagon.V6.vmpabuuv.128B"]
661    fn vmpabuuv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
662    #[link_name = "llvm.hexagon.V6.vmpahb.128B"]
663    fn vmpahb(_: HvxVectorPair, _: i32) -> HvxVectorPair;
664    #[link_name = "llvm.hexagon.V6.vmpahb.acc.128B"]
665    fn vmpahb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
666    #[link_name = "llvm.hexagon.V6.vmpauhb.128B"]
667    fn vmpauhb(_: HvxVectorPair, _: i32) -> HvxVectorPair;
668    #[link_name = "llvm.hexagon.V6.vmpauhb.acc.128B"]
669    fn vmpauhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
670    #[link_name = "llvm.hexagon.V6.vmpy.hf.hf.128B"]
671    fn vmpy_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector;
672    #[link_name = "llvm.hexagon.V6.vmpy.hf.hf.acc.128B"]
673    fn vmpy_hf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
674    #[link_name = "llvm.hexagon.V6.vmpy.qf16.128B"]
675    fn vmpy_qf16(_: HvxVector, _: HvxVector) -> HvxVector;
676    #[link_name = "llvm.hexagon.V6.vmpy.qf16.hf.128B"]
677    fn vmpy_qf16_hf(_: HvxVector, _: HvxVector) -> HvxVector;
678    #[link_name = "llvm.hexagon.V6.vmpy.qf16.mix.hf.128B"]
679    fn vmpy_qf16_mix_hf(_: HvxVector, _: HvxVector) -> HvxVector;
680    #[link_name = "llvm.hexagon.V6.vmpy.qf32.128B"]
681    fn vmpy_qf32(_: HvxVector, _: HvxVector) -> HvxVector;
682    #[link_name = "llvm.hexagon.V6.vmpy.qf32.hf.128B"]
683    fn vmpy_qf32_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair;
684    #[link_name = "llvm.hexagon.V6.vmpy.qf32.mix.hf.128B"]
685    fn vmpy_qf32_mix_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair;
686    #[link_name = "llvm.hexagon.V6.vmpy.qf32.qf16.128B"]
687    fn vmpy_qf32_qf16(_: HvxVector, _: HvxVector) -> HvxVectorPair;
688    #[link_name = "llvm.hexagon.V6.vmpy.qf32.sf.128B"]
689    fn vmpy_qf32_sf(_: HvxVector, _: HvxVector) -> HvxVector;
690    #[link_name = "llvm.hexagon.V6.vmpy.sf.hf.128B"]
691    fn vmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair;
692    #[link_name = "llvm.hexagon.V6.vmpy.sf.hf.acc.128B"]
693    fn vmpy_sf_hf_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
694    #[link_name = "llvm.hexagon.V6.vmpy.sf.sf.128B"]
695    fn vmpy_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector;
696    #[link_name = "llvm.hexagon.V6.vmpybus.128B"]
697    fn vmpybus(_: HvxVector, _: i32) -> HvxVectorPair;
698    #[link_name = "llvm.hexagon.V6.vmpybus.acc.128B"]
699    fn vmpybus_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair;
700    #[link_name = "llvm.hexagon.V6.vmpybusv.128B"]
701    fn vmpybusv(_: HvxVector, _: HvxVector) -> HvxVectorPair;
702    #[link_name = "llvm.hexagon.V6.vmpybusv.acc.128B"]
703    fn vmpybusv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
704    #[link_name = "llvm.hexagon.V6.vmpybv.128B"]
705    fn vmpybv(_: HvxVector, _: HvxVector) -> HvxVectorPair;
706    #[link_name = "llvm.hexagon.V6.vmpybv.acc.128B"]
707    fn vmpybv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
708    #[link_name = "llvm.hexagon.V6.vmpyewuh.128B"]
709    fn vmpyewuh(_: HvxVector, _: HvxVector) -> HvxVector;
710    #[link_name = "llvm.hexagon.V6.vmpyewuh.64.128B"]
711    fn vmpyewuh_64(_: HvxVector, _: HvxVector) -> HvxVectorPair;
712    #[link_name = "llvm.hexagon.V6.vmpyh.128B"]
713    fn vmpyh(_: HvxVector, _: i32) -> HvxVectorPair;
714    #[link_name = "llvm.hexagon.V6.vmpyh.acc.128B"]
715    fn vmpyh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair;
716    #[link_name = "llvm.hexagon.V6.vmpyhsat.acc.128B"]
717    fn vmpyhsat_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair;
718    #[link_name = "llvm.hexagon.V6.vmpyhsrs.128B"]
719    fn vmpyhsrs(_: HvxVector, _: i32) -> HvxVector;
720    #[link_name = "llvm.hexagon.V6.vmpyhss.128B"]
721    fn vmpyhss(_: HvxVector, _: i32) -> HvxVector;
722    #[link_name = "llvm.hexagon.V6.vmpyhus.128B"]
723    fn vmpyhus(_: HvxVector, _: HvxVector) -> HvxVectorPair;
724    #[link_name = "llvm.hexagon.V6.vmpyhus.acc.128B"]
725    fn vmpyhus_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
726    #[link_name = "llvm.hexagon.V6.vmpyhv.128B"]
727    fn vmpyhv(_: HvxVector, _: HvxVector) -> HvxVectorPair;
728    #[link_name = "llvm.hexagon.V6.vmpyhv.acc.128B"]
729    fn vmpyhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
730    #[link_name = "llvm.hexagon.V6.vmpyhvsrs.128B"]
731    fn vmpyhvsrs(_: HvxVector, _: HvxVector) -> HvxVector;
732    #[link_name = "llvm.hexagon.V6.vmpyieoh.128B"]
733    fn vmpyieoh(_: HvxVector, _: HvxVector) -> HvxVector;
734    #[link_name = "llvm.hexagon.V6.vmpyiewh.acc.128B"]
735    fn vmpyiewh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
736    #[link_name = "llvm.hexagon.V6.vmpyiewuh.128B"]
737    fn vmpyiewuh(_: HvxVector, _: HvxVector) -> HvxVector;
738    #[link_name = "llvm.hexagon.V6.vmpyiewuh.acc.128B"]
739    fn vmpyiewuh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
740    #[link_name = "llvm.hexagon.V6.vmpyih.128B"]
741    fn vmpyih(_: HvxVector, _: HvxVector) -> HvxVector;
742    #[link_name = "llvm.hexagon.V6.vmpyih.acc.128B"]
743    fn vmpyih_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
744    #[link_name = "llvm.hexagon.V6.vmpyihb.128B"]
745    fn vmpyihb(_: HvxVector, _: i32) -> HvxVector;
746    #[link_name = "llvm.hexagon.V6.vmpyihb.acc.128B"]
747    fn vmpyihb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
748    #[link_name = "llvm.hexagon.V6.vmpyiowh.128B"]
749    fn vmpyiowh(_: HvxVector, _: HvxVector) -> HvxVector;
750    #[link_name = "llvm.hexagon.V6.vmpyiwb.128B"]
751    fn vmpyiwb(_: HvxVector, _: i32) -> HvxVector;
752    #[link_name = "llvm.hexagon.V6.vmpyiwb.acc.128B"]
753    fn vmpyiwb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
754    #[link_name = "llvm.hexagon.V6.vmpyiwh.128B"]
755    fn vmpyiwh(_: HvxVector, _: i32) -> HvxVector;
756    #[link_name = "llvm.hexagon.V6.vmpyiwh.acc.128B"]
757    fn vmpyiwh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
758    #[link_name = "llvm.hexagon.V6.vmpyiwub.128B"]
759    fn vmpyiwub(_: HvxVector, _: i32) -> HvxVector;
760    #[link_name = "llvm.hexagon.V6.vmpyiwub.acc.128B"]
761    fn vmpyiwub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
762    #[link_name = "llvm.hexagon.V6.vmpyowh.128B"]
763    fn vmpyowh(_: HvxVector, _: HvxVector) -> HvxVector;
764    #[link_name = "llvm.hexagon.V6.vmpyowh.64.acc.128B"]
765    fn vmpyowh_64_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
766    #[link_name = "llvm.hexagon.V6.vmpyowh.rnd.128B"]
767    fn vmpyowh_rnd(_: HvxVector, _: HvxVector) -> HvxVector;
768    #[link_name = "llvm.hexagon.V6.vmpyowh.rnd.sacc.128B"]
769    fn vmpyowh_rnd_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
770    #[link_name = "llvm.hexagon.V6.vmpyowh.sacc.128B"]
771    fn vmpyowh_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
772    #[link_name = "llvm.hexagon.V6.vmpyub.128B"]
773    fn vmpyub(_: HvxVector, _: i32) -> HvxVectorPair;
774    #[link_name = "llvm.hexagon.V6.vmpyub.acc.128B"]
775    fn vmpyub_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair;
776    #[link_name = "llvm.hexagon.V6.vmpyubv.128B"]
777    fn vmpyubv(_: HvxVector, _: HvxVector) -> HvxVectorPair;
778    #[link_name = "llvm.hexagon.V6.vmpyubv.acc.128B"]
779    fn vmpyubv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
780    #[link_name = "llvm.hexagon.V6.vmpyuh.128B"]
781    fn vmpyuh(_: HvxVector, _: i32) -> HvxVectorPair;
782    #[link_name = "llvm.hexagon.V6.vmpyuh.acc.128B"]
783    fn vmpyuh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair;
784    #[link_name = "llvm.hexagon.V6.vmpyuhe.128B"]
785    fn vmpyuhe(_: HvxVector, _: i32) -> HvxVector;
786    #[link_name = "llvm.hexagon.V6.vmpyuhe.acc.128B"]
787    fn vmpyuhe_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
788    #[link_name = "llvm.hexagon.V6.vmpyuhv.128B"]
789    fn vmpyuhv(_: HvxVector, _: HvxVector) -> HvxVectorPair;
790    #[link_name = "llvm.hexagon.V6.vmpyuhv.acc.128B"]
791    fn vmpyuhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair;
792    #[link_name = "llvm.hexagon.V6.vmpyuhvs.128B"]
793    fn vmpyuhvs(_: HvxVector, _: HvxVector) -> HvxVector;
794    #[link_name = "llvm.hexagon.V6.vmux.128B"]
795    fn vmux(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
796    #[link_name = "llvm.hexagon.V6.vnavgb.128B"]
797    fn vnavgb(_: HvxVector, _: HvxVector) -> HvxVector;
798    #[link_name = "llvm.hexagon.V6.vnavgh.128B"]
799    fn vnavgh(_: HvxVector, _: HvxVector) -> HvxVector;
800    #[link_name = "llvm.hexagon.V6.vnavgub.128B"]
801    fn vnavgub(_: HvxVector, _: HvxVector) -> HvxVector;
802    #[link_name = "llvm.hexagon.V6.vnavgw.128B"]
803    fn vnavgw(_: HvxVector, _: HvxVector) -> HvxVector;
804    #[link_name = "llvm.hexagon.V6.vnormamth.128B"]
805    fn vnormamth(_: HvxVector) -> HvxVector;
806    #[link_name = "llvm.hexagon.V6.vnormamtw.128B"]
807    fn vnormamtw(_: HvxVector) -> HvxVector;
808    #[link_name = "llvm.hexagon.V6.vnot.128B"]
809    fn vnot(_: HvxVector) -> HvxVector;
810    #[link_name = "llvm.hexagon.V6.vor.128B"]
811    fn vor(_: HvxVector, _: HvxVector) -> HvxVector;
812    #[link_name = "llvm.hexagon.V6.vpackeb.128B"]
813    fn vpackeb(_: HvxVector, _: HvxVector) -> HvxVector;
814    #[link_name = "llvm.hexagon.V6.vpackeh.128B"]
815    fn vpackeh(_: HvxVector, _: HvxVector) -> HvxVector;
816    #[link_name = "llvm.hexagon.V6.vpackhb.sat.128B"]
817    fn vpackhb_sat(_: HvxVector, _: HvxVector) -> HvxVector;
818    #[link_name = "llvm.hexagon.V6.vpackhub.sat.128B"]
819    fn vpackhub_sat(_: HvxVector, _: HvxVector) -> HvxVector;
820    #[link_name = "llvm.hexagon.V6.vpackob.128B"]
821    fn vpackob(_: HvxVector, _: HvxVector) -> HvxVector;
822    #[link_name = "llvm.hexagon.V6.vpackoh.128B"]
823    fn vpackoh(_: HvxVector, _: HvxVector) -> HvxVector;
824    #[link_name = "llvm.hexagon.V6.vpackwh.sat.128B"]
825    fn vpackwh_sat(_: HvxVector, _: HvxVector) -> HvxVector;
826    #[link_name = "llvm.hexagon.V6.vpackwuh.sat.128B"]
827    fn vpackwuh_sat(_: HvxVector, _: HvxVector) -> HvxVector;
828    #[link_name = "llvm.hexagon.V6.vpopcounth.128B"]
829    fn vpopcounth(_: HvxVector) -> HvxVector;
830    #[link_name = "llvm.hexagon.V6.vprefixqb.128B"]
831    fn vprefixqb(_: HvxVector) -> HvxVector;
832    #[link_name = "llvm.hexagon.V6.vprefixqh.128B"]
833    fn vprefixqh(_: HvxVector) -> HvxVector;
834    #[link_name = "llvm.hexagon.V6.vprefixqw.128B"]
835    fn vprefixqw(_: HvxVector) -> HvxVector;
836    #[link_name = "llvm.hexagon.V6.vrdelta.128B"]
837    fn vrdelta(_: HvxVector, _: HvxVector) -> HvxVector;
838    #[link_name = "llvm.hexagon.V6.vrmpybus.128B"]
839    fn vrmpybus(_: HvxVector, _: i32) -> HvxVector;
840    #[link_name = "llvm.hexagon.V6.vrmpybus.acc.128B"]
841    fn vrmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
842    #[link_name = "llvm.hexagon.V6.vrmpybusi.128B"]
843    fn vrmpybusi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair;
844    #[link_name = "llvm.hexagon.V6.vrmpybusi.acc.128B"]
845    fn vrmpybusi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair;
846    #[link_name = "llvm.hexagon.V6.vrmpybusv.128B"]
847    fn vrmpybusv(_: HvxVector, _: HvxVector) -> HvxVector;
848    #[link_name = "llvm.hexagon.V6.vrmpybusv.acc.128B"]
849    fn vrmpybusv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
850    #[link_name = "llvm.hexagon.V6.vrmpybv.128B"]
851    fn vrmpybv(_: HvxVector, _: HvxVector) -> HvxVector;
852    #[link_name = "llvm.hexagon.V6.vrmpybv.acc.128B"]
853    fn vrmpybv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
854    #[link_name = "llvm.hexagon.V6.vrmpyub.128B"]
855    fn vrmpyub(_: HvxVector, _: i32) -> HvxVector;
856    #[link_name = "llvm.hexagon.V6.vrmpyub.acc.128B"]
857    fn vrmpyub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector;
858    #[link_name = "llvm.hexagon.V6.vrmpyubi.128B"]
859    fn vrmpyubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair;
860    #[link_name = "llvm.hexagon.V6.vrmpyubi.acc.128B"]
861    fn vrmpyubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair;
862    #[link_name = "llvm.hexagon.V6.vrmpyubv.128B"]
863    fn vrmpyubv(_: HvxVector, _: HvxVector) -> HvxVector;
864    #[link_name = "llvm.hexagon.V6.vrmpyubv.acc.128B"]
865    fn vrmpyubv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
866    #[link_name = "llvm.hexagon.V6.vror.128B"]
867    fn vror(_: HvxVector, _: i32) -> HvxVector;
868    #[link_name = "llvm.hexagon.V6.vrotr.128B"]
869    fn vrotr(_: HvxVector, _: HvxVector) -> HvxVector;
870    #[link_name = "llvm.hexagon.V6.vroundhb.128B"]
871    fn vroundhb(_: HvxVector, _: HvxVector) -> HvxVector;
872    #[link_name = "llvm.hexagon.V6.vroundhub.128B"]
873    fn vroundhub(_: HvxVector, _: HvxVector) -> HvxVector;
874    #[link_name = "llvm.hexagon.V6.vrounduhub.128B"]
875    fn vrounduhub(_: HvxVector, _: HvxVector) -> HvxVector;
876    #[link_name = "llvm.hexagon.V6.vrounduwuh.128B"]
877    fn vrounduwuh(_: HvxVector, _: HvxVector) -> HvxVector;
878    #[link_name = "llvm.hexagon.V6.vroundwh.128B"]
879    fn vroundwh(_: HvxVector, _: HvxVector) -> HvxVector;
880    #[link_name = "llvm.hexagon.V6.vroundwuh.128B"]
881    fn vroundwuh(_: HvxVector, _: HvxVector) -> HvxVector;
882    #[link_name = "llvm.hexagon.V6.vrsadubi.128B"]
883    fn vrsadubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair;
884    #[link_name = "llvm.hexagon.V6.vrsadubi.acc.128B"]
885    fn vrsadubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair;
886    #[link_name = "llvm.hexagon.V6.vsatdw.128B"]
887    fn vsatdw(_: HvxVector, _: HvxVector) -> HvxVector;
888    #[link_name = "llvm.hexagon.V6.vsathub.128B"]
889    fn vsathub(_: HvxVector, _: HvxVector) -> HvxVector;
890    #[link_name = "llvm.hexagon.V6.vsatuwuh.128B"]
891    fn vsatuwuh(_: HvxVector, _: HvxVector) -> HvxVector;
892    #[link_name = "llvm.hexagon.V6.vsatwh.128B"]
893    fn vsatwh(_: HvxVector, _: HvxVector) -> HvxVector;
894    #[link_name = "llvm.hexagon.V6.vsb.128B"]
895    fn vsb(_: HvxVector) -> HvxVectorPair;
896    #[link_name = "llvm.hexagon.V6.vscattermh.128B"]
897    fn vscattermh(_: i32, _: i32, _: HvxVector, _: HvxVector) -> ();
898    #[link_name = "llvm.hexagon.V6.vscattermh.add.128B"]
899    fn vscattermh_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> ();
900    #[link_name = "llvm.hexagon.V6.vscattermhq.128B"]
901    fn vscattermhq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> ();
902    #[link_name = "llvm.hexagon.V6.vscattermhw.128B"]
903    fn vscattermhw(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> ();
904    #[link_name = "llvm.hexagon.V6.vscattermhw.add.128B"]
905    fn vscattermhw_add(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> ();
906    #[link_name = "llvm.hexagon.V6.vscattermhwq.128B"]
907    fn vscattermhwq(_: HvxVector, _: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> ();
908    #[link_name = "llvm.hexagon.V6.vscattermw.128B"]
909    fn vscattermw(_: i32, _: i32, _: HvxVector, _: HvxVector) -> ();
910    #[link_name = "llvm.hexagon.V6.vscattermw.add.128B"]
911    fn vscattermw_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> ();
912    #[link_name = "llvm.hexagon.V6.vscattermwq.128B"]
913    fn vscattermwq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> ();
914    #[link_name = "llvm.hexagon.V6.vsh.128B"]
915    fn vsh(_: HvxVector) -> HvxVectorPair;
916    #[link_name = "llvm.hexagon.V6.vshufeh.128B"]
917    fn vshufeh(_: HvxVector, _: HvxVector) -> HvxVector;
918    #[link_name = "llvm.hexagon.V6.vshuffb.128B"]
919    fn vshuffb(_: HvxVector) -> HvxVector;
920    #[link_name = "llvm.hexagon.V6.vshuffeb.128B"]
921    fn vshuffeb(_: HvxVector, _: HvxVector) -> HvxVector;
922    #[link_name = "llvm.hexagon.V6.vshuffh.128B"]
923    fn vshuffh(_: HvxVector) -> HvxVector;
924    #[link_name = "llvm.hexagon.V6.vshuffob.128B"]
925    fn vshuffob(_: HvxVector, _: HvxVector) -> HvxVector;
926    #[link_name = "llvm.hexagon.V6.vshuffvdd.128B"]
927    fn vshuffvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair;
928    #[link_name = "llvm.hexagon.V6.vshufoeb.128B"]
929    fn vshufoeb(_: HvxVector, _: HvxVector) -> HvxVectorPair;
930    #[link_name = "llvm.hexagon.V6.vshufoeh.128B"]
931    fn vshufoeh(_: HvxVector, _: HvxVector) -> HvxVectorPair;
932    #[link_name = "llvm.hexagon.V6.vshufoh.128B"]
933    fn vshufoh(_: HvxVector, _: HvxVector) -> HvxVector;
934    #[link_name = "llvm.hexagon.V6.vsub.hf.128B"]
935    fn vsub_hf(_: HvxVector, _: HvxVector) -> HvxVector;
936    #[link_name = "llvm.hexagon.V6.vsub.hf.hf.128B"]
937    fn vsub_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector;
938    #[link_name = "llvm.hexagon.V6.vsub.qf16.128B"]
939    fn vsub_qf16(_: HvxVector, _: HvxVector) -> HvxVector;
940    #[link_name = "llvm.hexagon.V6.vsub.qf16.mix.128B"]
941    fn vsub_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector;
942    #[link_name = "llvm.hexagon.V6.vsub.qf32.128B"]
943    fn vsub_qf32(_: HvxVector, _: HvxVector) -> HvxVector;
944    #[link_name = "llvm.hexagon.V6.vsub.qf32.mix.128B"]
945    fn vsub_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector;
946    #[link_name = "llvm.hexagon.V6.vsub.sf.128B"]
947    fn vsub_sf(_: HvxVector, _: HvxVector) -> HvxVector;
948    #[link_name = "llvm.hexagon.V6.vsub.sf.hf.128B"]
949    fn vsub_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair;
950    #[link_name = "llvm.hexagon.V6.vsub.sf.sf.128B"]
951    fn vsub_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector;
952    #[link_name = "llvm.hexagon.V6.vsubb.128B"]
953    fn vsubb(_: HvxVector, _: HvxVector) -> HvxVector;
954    #[link_name = "llvm.hexagon.V6.vsubb.dv.128B"]
955    fn vsubb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
956    #[link_name = "llvm.hexagon.V6.vsubbnq.128B"]
957    fn vsubbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
958    #[link_name = "llvm.hexagon.V6.vsubbq.128B"]
959    fn vsubbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
960    #[link_name = "llvm.hexagon.V6.vsubbsat.128B"]
961    fn vsubbsat(_: HvxVector, _: HvxVector) -> HvxVector;
962    #[link_name = "llvm.hexagon.V6.vsubbsat.dv.128B"]
963    fn vsubbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
964    #[link_name = "llvm.hexagon.V6.vsubh.128B"]
965    fn vsubh(_: HvxVector, _: HvxVector) -> HvxVector;
966    #[link_name = "llvm.hexagon.V6.vsubh.dv.128B"]
967    fn vsubh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
968    #[link_name = "llvm.hexagon.V6.vsubhnq.128B"]
969    fn vsubhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
970    #[link_name = "llvm.hexagon.V6.vsubhq.128B"]
971    fn vsubhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
972    #[link_name = "llvm.hexagon.V6.vsubhsat.128B"]
973    fn vsubhsat(_: HvxVector, _: HvxVector) -> HvxVector;
974    #[link_name = "llvm.hexagon.V6.vsubhsat.dv.128B"]
975    fn vsubhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
976    #[link_name = "llvm.hexagon.V6.vsubhw.128B"]
977    fn vsubhw(_: HvxVector, _: HvxVector) -> HvxVectorPair;
978    #[link_name = "llvm.hexagon.V6.vsububh.128B"]
979    fn vsububh(_: HvxVector, _: HvxVector) -> HvxVectorPair;
980    #[link_name = "llvm.hexagon.V6.vsububsat.128B"]
981    fn vsububsat(_: HvxVector, _: HvxVector) -> HvxVector;
982    #[link_name = "llvm.hexagon.V6.vsububsat.dv.128B"]
983    fn vsububsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
984    #[link_name = "llvm.hexagon.V6.vsubububb.sat.128B"]
985    fn vsubububb_sat(_: HvxVector, _: HvxVector) -> HvxVector;
986    #[link_name = "llvm.hexagon.V6.vsubuhsat.128B"]
987    fn vsubuhsat(_: HvxVector, _: HvxVector) -> HvxVector;
988    #[link_name = "llvm.hexagon.V6.vsubuhsat.dv.128B"]
989    fn vsubuhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
990    #[link_name = "llvm.hexagon.V6.vsubuhw.128B"]
991    fn vsubuhw(_: HvxVector, _: HvxVector) -> HvxVectorPair;
992    #[link_name = "llvm.hexagon.V6.vsubuwsat.128B"]
993    fn vsubuwsat(_: HvxVector, _: HvxVector) -> HvxVector;
994    #[link_name = "llvm.hexagon.V6.vsubuwsat.dv.128B"]
995    fn vsubuwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
996    #[link_name = "llvm.hexagon.V6.vsubw.128B"]
997    fn vsubw(_: HvxVector, _: HvxVector) -> HvxVector;
998    #[link_name = "llvm.hexagon.V6.vsubw.dv.128B"]
999    fn vsubw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
1000    #[link_name = "llvm.hexagon.V6.vsubwnq.128B"]
1001    fn vsubwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
1002    #[link_name = "llvm.hexagon.V6.vsubwq.128B"]
1003    fn vsubwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector;
1004    #[link_name = "llvm.hexagon.V6.vsubwsat.128B"]
1005    fn vsubwsat(_: HvxVector, _: HvxVector) -> HvxVector;
1006    #[link_name = "llvm.hexagon.V6.vsubwsat.dv.128B"]
1007    fn vsubwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair;
1008    #[link_name = "llvm.hexagon.V6.vswap.128B"]
1009    fn vswap(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVectorPair;
1010    #[link_name = "llvm.hexagon.V6.vtmpyb.128B"]
1011    fn vtmpyb(_: HvxVectorPair, _: i32) -> HvxVectorPair;
1012    #[link_name = "llvm.hexagon.V6.vtmpyb.acc.128B"]
1013    fn vtmpyb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
1014    #[link_name = "llvm.hexagon.V6.vtmpybus.128B"]
1015    fn vtmpybus(_: HvxVectorPair, _: i32) -> HvxVectorPair;
1016    #[link_name = "llvm.hexagon.V6.vtmpybus.acc.128B"]
1017    fn vtmpybus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
1018    #[link_name = "llvm.hexagon.V6.vtmpyhb.128B"]
1019    fn vtmpyhb(_: HvxVectorPair, _: i32) -> HvxVectorPair;
1020    #[link_name = "llvm.hexagon.V6.vtmpyhb.acc.128B"]
1021    fn vtmpyhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair;
1022    #[link_name = "llvm.hexagon.V6.vunpackb.128B"]
1023    fn vunpackb(_: HvxVector) -> HvxVectorPair;
1024    #[link_name = "llvm.hexagon.V6.vunpackh.128B"]
1025    fn vunpackh(_: HvxVector) -> HvxVectorPair;
1026    #[link_name = "llvm.hexagon.V6.vunpackob.128B"]
1027    fn vunpackob(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair;
1028    #[link_name = "llvm.hexagon.V6.vunpackoh.128B"]
1029    fn vunpackoh(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair;
1030    #[link_name = "llvm.hexagon.V6.vunpackub.128B"]
1031    fn vunpackub(_: HvxVector) -> HvxVectorPair;
1032    #[link_name = "llvm.hexagon.V6.vunpackuh.128B"]
1033    fn vunpackuh(_: HvxVector) -> HvxVectorPair;
1034    #[link_name = "llvm.hexagon.V6.vxor.128B"]
1035    fn vxor(_: HvxVector, _: HvxVector) -> HvxVector;
1036    #[link_name = "llvm.hexagon.V6.vzb.128B"]
1037    fn vzb(_: HvxVector) -> HvxVectorPair;
1038    #[link_name = "llvm.hexagon.V6.vzh.128B"]
1039    fn vzh(_: HvxVector) -> HvxVectorPair;
1040}
1041
1042/// `Rd32=vextract(Vu32,Rs32)`
1043///
1044/// Instruction Type: LD
1045/// Execution Slots: SLOT0
1046#[inline(always)]
1047#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1048#[cfg_attr(test, assert_instr(extractw))]
1049#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1050pub unsafe fn Q6_R_vextract_VR(vu: HvxVector, rs: i32) -> i32 {
1051    extractw(vu, rs)
1052}
1053
1054/// `Vd32=hi(Vss32)`
1055///
1056/// Instruction Type: CVI_VA
1057/// Execution Slots: SLOT0123
1058#[inline(always)]
1059#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1060#[cfg_attr(test, assert_instr(hi))]
1061#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1062pub unsafe fn Q6_V_hi_W(vss: HvxVectorPair) -> HvxVector {
1063    hi(vss)
1064}
1065
1066/// `Vd32=lo(Vss32)`
1067///
1068/// Instruction Type: CVI_VA
1069/// Execution Slots: SLOT0123
1070#[inline(always)]
1071#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1072#[cfg_attr(test, assert_instr(lo))]
1073#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1074pub unsafe fn Q6_V_lo_W(vss: HvxVectorPair) -> HvxVector {
1075    lo(vss)
1076}
1077
1078/// `Vd32=vsplat(Rt32)`
1079///
1080/// Instruction Type: CVI_VX_LATE
1081/// Execution Slots: SLOT23
1082#[inline(always)]
1083#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1084#[cfg_attr(test, assert_instr(lvsplatw))]
1085#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1086pub unsafe fn Q6_V_vsplat_R(rt: i32) -> HvxVector {
1087    lvsplatw(rt)
1088}
1089
1090/// `Vd32.uh=vabsdiff(Vu32.h,Vv32.h)`
1091///
1092/// Instruction Type: CVI_VX
1093/// Execution Slots: SLOT23
1094#[inline(always)]
1095#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1096#[cfg_attr(test, assert_instr(vabsdiffh))]
1097#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1098pub unsafe fn Q6_Vuh_vabsdiff_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
1099    vabsdiffh(vu, vv)
1100}
1101
1102/// `Vd32.ub=vabsdiff(Vu32.ub,Vv32.ub)`
1103///
1104/// Instruction Type: CVI_VX
1105/// Execution Slots: SLOT23
1106#[inline(always)]
1107#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1108#[cfg_attr(test, assert_instr(vabsdiffub))]
1109#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1110pub unsafe fn Q6_Vub_vabsdiff_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector {
1111    vabsdiffub(vu, vv)
1112}
1113
1114/// `Vd32.uh=vabsdiff(Vu32.uh,Vv32.uh)`
1115///
1116/// Instruction Type: CVI_VX
1117/// Execution Slots: SLOT23
1118#[inline(always)]
1119#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1120#[cfg_attr(test, assert_instr(vabsdiffuh))]
1121#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1122pub unsafe fn Q6_Vuh_vabsdiff_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector {
1123    vabsdiffuh(vu, vv)
1124}
1125
1126/// `Vd32.uw=vabsdiff(Vu32.w,Vv32.w)`
1127///
1128/// Instruction Type: CVI_VX
1129/// Execution Slots: SLOT23
1130#[inline(always)]
1131#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1132#[cfg_attr(test, assert_instr(vabsdiffw))]
1133#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1134pub unsafe fn Q6_Vuw_vabsdiff_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector {
1135    vabsdiffw(vu, vv)
1136}
1137
1138/// `Vd32.h=vabs(Vu32.h)`
1139///
1140/// Instruction Type: CVI_VA
1141/// Execution Slots: SLOT0123
1142#[inline(always)]
1143#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1144#[cfg_attr(test, assert_instr(vabsh))]
1145#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1146pub unsafe fn Q6_Vh_vabs_Vh(vu: HvxVector) -> HvxVector {
1147    vabsh(vu)
1148}
1149
1150/// `Vd32.h=vabs(Vu32.h):sat`
1151///
1152/// Instruction Type: CVI_VA
1153/// Execution Slots: SLOT0123
1154#[inline(always)]
1155#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1156#[cfg_attr(test, assert_instr(vabsh_sat))]
1157#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1158pub unsafe fn Q6_Vh_vabs_Vh_sat(vu: HvxVector) -> HvxVector {
1159    vabsh_sat(vu)
1160}
1161
1162/// `Vd32.w=vabs(Vu32.w)`
1163///
1164/// Instruction Type: CVI_VA
1165/// Execution Slots: SLOT0123
1166#[inline(always)]
1167#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1168#[cfg_attr(test, assert_instr(vabsw))]
1169#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1170pub unsafe fn Q6_Vw_vabs_Vw(vu: HvxVector) -> HvxVector {
1171    vabsw(vu)
1172}
1173
1174/// `Vd32.w=vabs(Vu32.w):sat`
1175///
1176/// Instruction Type: CVI_VA
1177/// Execution Slots: SLOT0123
1178#[inline(always)]
1179#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1180#[cfg_attr(test, assert_instr(vabsw_sat))]
1181#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1182pub unsafe fn Q6_Vw_vabs_Vw_sat(vu: HvxVector) -> HvxVector {
1183    vabsw_sat(vu)
1184}
1185
1186/// `Vd32.b=vadd(Vu32.b,Vv32.b)`
1187///
1188/// Instruction Type: CVI_VA
1189/// Execution Slots: SLOT0123
1190#[inline(always)]
1191#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1192#[cfg_attr(test, assert_instr(vaddb))]
1193#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1194pub unsafe fn Q6_Vb_vadd_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector {
1195    vaddb(vu, vv)
1196}
1197
1198/// `Vdd32.b=vadd(Vuu32.b,Vvv32.b)`
1199///
1200/// Instruction Type: CVI_VA_DV
1201/// Execution Slots: SLOT0123
1202#[inline(always)]
1203#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1204#[cfg_attr(test, assert_instr(vaddb_dv))]
1205#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1206pub unsafe fn Q6_Wb_vadd_WbWb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
1207    vaddb_dv(vuu, vvv)
1208}
1209
1210/// `Vd32.h=vadd(Vu32.h,Vv32.h)`
1211///
1212/// Instruction Type: CVI_VA
1213/// Execution Slots: SLOT0123
1214#[inline(always)]
1215#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1216#[cfg_attr(test, assert_instr(vaddh))]
1217#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1218pub unsafe fn Q6_Vh_vadd_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
1219    vaddh(vu, vv)
1220}
1221
1222/// `Vdd32.h=vadd(Vuu32.h,Vvv32.h)`
1223///
1224/// Instruction Type: CVI_VA_DV
1225/// Execution Slots: SLOT0123
1226#[inline(always)]
1227#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1228#[cfg_attr(test, assert_instr(vaddh_dv))]
1229#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1230pub unsafe fn Q6_Wh_vadd_WhWh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
1231    vaddh_dv(vuu, vvv)
1232}
1233
1234/// `Vd32.h=vadd(Vu32.h,Vv32.h):sat`
1235///
1236/// Instruction Type: CVI_VA
1237/// Execution Slots: SLOT0123
1238#[inline(always)]
1239#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1240#[cfg_attr(test, assert_instr(vaddhsat))]
1241#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1242pub unsafe fn Q6_Vh_vadd_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
1243    vaddhsat(vu, vv)
1244}
1245
1246/// `Vdd32.h=vadd(Vuu32.h,Vvv32.h):sat`
1247///
1248/// Instruction Type: CVI_VA_DV
1249/// Execution Slots: SLOT0123
1250#[inline(always)]
1251#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1252#[cfg_attr(test, assert_instr(vaddhsat_dv))]
1253#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1254pub unsafe fn Q6_Wh_vadd_WhWh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
1255    vaddhsat_dv(vuu, vvv)
1256}
1257
1258/// `Vdd32.w=vadd(Vu32.h,Vv32.h)`
1259///
1260/// Instruction Type: CVI_VX_DV
1261/// Execution Slots: SLOT23
1262#[inline(always)]
1263#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1264#[cfg_attr(test, assert_instr(vaddhw))]
1265#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1266pub unsafe fn Q6_Ww_vadd_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
1267    vaddhw(vu, vv)
1268}
1269
1270/// `Vdd32.h=vadd(Vu32.ub,Vv32.ub)`
1271///
1272/// Instruction Type: CVI_VX_DV
1273/// Execution Slots: SLOT23
1274#[inline(always)]
1275#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1276#[cfg_attr(test, assert_instr(vaddubh))]
1277#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1278pub unsafe fn Q6_Wh_vadd_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
1279    vaddubh(vu, vv)
1280}
1281
1282/// `Vd32.ub=vadd(Vu32.ub,Vv32.ub):sat`
1283///
1284/// Instruction Type: CVI_VA
1285/// Execution Slots: SLOT0123
1286#[inline(always)]
1287#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1288#[cfg_attr(test, assert_instr(vaddubsat))]
1289#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1290pub unsafe fn Q6_Vub_vadd_VubVub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
1291    vaddubsat(vu, vv)
1292}
1293
1294/// `Vdd32.ub=vadd(Vuu32.ub,Vvv32.ub):sat`
1295///
1296/// Instruction Type: CVI_VA_DV
1297/// Execution Slots: SLOT0123
1298#[inline(always)]
1299#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1300#[cfg_attr(test, assert_instr(vaddubsat_dv))]
1301#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1302pub unsafe fn Q6_Wub_vadd_WubWub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
1303    vaddubsat_dv(vuu, vvv)
1304}
1305
1306/// `Vd32.uh=vadd(Vu32.uh,Vv32.uh):sat`
1307///
1308/// Instruction Type: CVI_VA
1309/// Execution Slots: SLOT0123
1310#[inline(always)]
1311#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1312#[cfg_attr(test, assert_instr(vadduhsat))]
1313#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1314pub unsafe fn Q6_Vuh_vadd_VuhVuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
1315    vadduhsat(vu, vv)
1316}
1317
1318/// `Vdd32.uh=vadd(Vuu32.uh,Vvv32.uh):sat`
1319///
1320/// Instruction Type: CVI_VA_DV
1321/// Execution Slots: SLOT0123
1322#[inline(always)]
1323#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1324#[cfg_attr(test, assert_instr(vadduhsat_dv))]
1325#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1326pub unsafe fn Q6_Wuh_vadd_WuhWuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
1327    vadduhsat_dv(vuu, vvv)
1328}
1329
1330/// `Vdd32.w=vadd(Vu32.uh,Vv32.uh)`
1331///
1332/// Instruction Type: CVI_VX_DV
1333/// Execution Slots: SLOT23
1334#[inline(always)]
1335#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1336#[cfg_attr(test, assert_instr(vadduhw))]
1337#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1338pub unsafe fn Q6_Ww_vadd_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
1339    vadduhw(vu, vv)
1340}
1341
1342/// `Vd32.w=vadd(Vu32.w,Vv32.w)`
1343///
1344/// Instruction Type: CVI_VA
1345/// Execution Slots: SLOT0123
1346#[inline(always)]
1347#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1348#[cfg_attr(test, assert_instr(vaddw))]
1349#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1350pub unsafe fn Q6_Vw_vadd_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector {
1351    simd_add(vu, vv)
1352}
1353
1354/// `Vdd32.w=vadd(Vuu32.w,Vvv32.w)`
1355///
1356/// Instruction Type: CVI_VA_DV
1357/// Execution Slots: SLOT0123
1358#[inline(always)]
1359#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1360#[cfg_attr(test, assert_instr(vaddw_dv))]
1361#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1362pub unsafe fn Q6_Ww_vadd_WwWw(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
1363    vaddw_dv(vuu, vvv)
1364}
1365
1366/// `Vd32.w=vadd(Vu32.w,Vv32.w):sat`
1367///
1368/// Instruction Type: CVI_VA
1369/// Execution Slots: SLOT0123
1370#[inline(always)]
1371#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1372#[cfg_attr(test, assert_instr(vaddwsat))]
1373#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1374pub unsafe fn Q6_Vw_vadd_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
1375    vaddwsat(vu, vv)
1376}
1377
1378/// `Vdd32.w=vadd(Vuu32.w,Vvv32.w):sat`
1379///
1380/// Instruction Type: CVI_VA_DV
1381/// Execution Slots: SLOT0123
1382#[inline(always)]
1383#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1384#[cfg_attr(test, assert_instr(vaddwsat_dv))]
1385#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1386pub unsafe fn Q6_Ww_vadd_WwWw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
1387    vaddwsat_dv(vuu, vvv)
1388}
1389
1390/// `Vd32=valign(Vu32,Vv32,Rt8)`
1391///
1392/// Instruction Type: CVI_VP
1393/// Execution Slots: SLOT0123
1394#[inline(always)]
1395#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1396#[cfg_attr(test, assert_instr(valignb))]
1397#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1398pub unsafe fn Q6_V_valign_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1399    valignb(vu, vv, rt)
1400}
1401
1402/// `Vd32=valign(Vu32,Vv32,#u3)`
1403///
1404/// Instruction Type: CVI_VP
1405/// Execution Slots: SLOT0123
1406#[inline(always)]
1407#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1408#[cfg_attr(test, assert_instr(valignbi))]
1409#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1410pub unsafe fn Q6_V_valign_VVI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector {
1411    valignbi(vu, vv, iu3)
1412}
1413
1414/// `Vd32=vand(Vu32,Vv32)`
1415///
1416/// Instruction Type: CVI_VA
1417/// Execution Slots: SLOT0123
1418#[inline(always)]
1419#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1420#[cfg_attr(test, assert_instr(vand))]
1421#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1422pub unsafe fn Q6_V_vand_VV(vu: HvxVector, vv: HvxVector) -> HvxVector {
1423    simd_and(vu, vv)
1424}
1425
1426/// `Vd32.h=vasl(Vu32.h,Rt32)`
1427///
1428/// Instruction Type: CVI_VS
1429/// Execution Slots: SLOT0123
1430#[inline(always)]
1431#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1432#[cfg_attr(test, assert_instr(vaslh))]
1433#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1434pub unsafe fn Q6_Vh_vasl_VhR(vu: HvxVector, rt: i32) -> HvxVector {
1435    vaslh(vu, rt)
1436}
1437
1438/// `Vd32.h=vasl(Vu32.h,Vv32.h)`
1439///
1440/// Instruction Type: CVI_VS
1441/// Execution Slots: SLOT0123
1442#[inline(always)]
1443#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1444#[cfg_attr(test, assert_instr(vaslhv))]
1445#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1446pub unsafe fn Q6_Vh_vasl_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
1447    vaslhv(vu, vv)
1448}
1449
1450/// `Vd32.w=vasl(Vu32.w,Rt32)`
1451///
1452/// Instruction Type: CVI_VS
1453/// Execution Slots: SLOT0123
1454#[inline(always)]
1455#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1456#[cfg_attr(test, assert_instr(vaslw))]
1457#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1458pub unsafe fn Q6_Vw_vasl_VwR(vu: HvxVector, rt: i32) -> HvxVector {
1459    vaslw(vu, rt)
1460}
1461
1462/// `Vx32.w+=vasl(Vu32.w,Rt32)`
1463///
1464/// Instruction Type: CVI_VS
1465/// Execution Slots: SLOT0123
1466#[inline(always)]
1467#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1468#[cfg_attr(test, assert_instr(vaslw_acc))]
1469#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1470pub unsafe fn Q6_Vw_vaslacc_VwVwR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
1471    vaslw_acc(vx, vu, rt)
1472}
1473
1474/// `Vd32.w=vasl(Vu32.w,Vv32.w)`
1475///
1476/// Instruction Type: CVI_VS
1477/// Execution Slots: SLOT0123
1478#[inline(always)]
1479#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1480#[cfg_attr(test, assert_instr(vaslwv))]
1481#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1482pub unsafe fn Q6_Vw_vasl_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector {
1483    vaslwv(vu, vv)
1484}
1485
1486/// `Vd32.h=vasr(Vu32.h,Rt32)`
1487///
1488/// Instruction Type: CVI_VS
1489/// Execution Slots: SLOT0123
1490#[inline(always)]
1491#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1492#[cfg_attr(test, assert_instr(vasrh))]
1493#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1494pub unsafe fn Q6_Vh_vasr_VhR(vu: HvxVector, rt: i32) -> HvxVector {
1495    vasrh(vu, rt)
1496}
1497
1498/// `Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat`
1499///
1500/// Instruction Type: CVI_VS
1501/// Execution Slots: SLOT0123
1502#[inline(always)]
1503#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1504#[cfg_attr(test, assert_instr(vasrhbrndsat))]
1505#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1506pub unsafe fn Q6_Vb_vasr_VhVhR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1507    vasrhbrndsat(vu, vv, rt)
1508}
1509
1510/// `Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat`
1511///
1512/// Instruction Type: CVI_VS
1513/// Execution Slots: SLOT0123
1514#[inline(always)]
1515#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1516#[cfg_attr(test, assert_instr(vasrhubrndsat))]
1517#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1518pub unsafe fn Q6_Vub_vasr_VhVhR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1519    vasrhubrndsat(vu, vv, rt)
1520}
1521
1522/// `Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):sat`
1523///
1524/// Instruction Type: CVI_VS
1525/// Execution Slots: SLOT0123
1526#[inline(always)]
1527#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1528#[cfg_attr(test, assert_instr(vasrhubsat))]
1529#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1530pub unsafe fn Q6_Vub_vasr_VhVhR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1531    vasrhubsat(vu, vv, rt)
1532}
1533
1534/// `Vd32.h=vasr(Vu32.h,Vv32.h)`
1535///
1536/// Instruction Type: CVI_VS
1537/// Execution Slots: SLOT0123
1538#[inline(always)]
1539#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1540#[cfg_attr(test, assert_instr(vasrhv))]
1541#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1542pub unsafe fn Q6_Vh_vasr_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
1543    vasrhv(vu, vv)
1544}
1545
1546/// `Vd32.w=vasr(Vu32.w,Rt32)`
1547///
1548/// Instruction Type: CVI_VS
1549/// Execution Slots: SLOT0123
1550#[inline(always)]
1551#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1552#[cfg_attr(test, assert_instr(vasrw))]
1553#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1554pub unsafe fn Q6_Vw_vasr_VwR(vu: HvxVector, rt: i32) -> HvxVector {
1555    vasrw(vu, rt)
1556}
1557
1558/// `Vx32.w+=vasr(Vu32.w,Rt32)`
1559///
1560/// Instruction Type: CVI_VS
1561/// Execution Slots: SLOT0123
1562#[inline(always)]
1563#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1564#[cfg_attr(test, assert_instr(vasrw_acc))]
1565#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1566pub unsafe fn Q6_Vw_vasracc_VwVwR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
1567    vasrw_acc(vx, vu, rt)
1568}
1569
1570/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8)`
1571///
1572/// Instruction Type: CVI_VS
1573/// Execution Slots: SLOT0123
1574#[inline(always)]
1575#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1576#[cfg_attr(test, assert_instr(vasrwh))]
1577#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1578pub unsafe fn Q6_Vh_vasr_VwVwR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1579    vasrwh(vu, vv, rt)
1580}
1581
1582/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat`
1583///
1584/// Instruction Type: CVI_VS
1585/// Execution Slots: SLOT0123
1586#[inline(always)]
1587#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1588#[cfg_attr(test, assert_instr(vasrwhrndsat))]
1589#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1590pub unsafe fn Q6_Vh_vasr_VwVwR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1591    vasrwhrndsat(vu, vv, rt)
1592}
1593
1594/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat`
1595///
1596/// Instruction Type: CVI_VS
1597/// Execution Slots: SLOT0123
1598#[inline(always)]
1599#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1600#[cfg_attr(test, assert_instr(vasrwhsat))]
1601#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1602pub unsafe fn Q6_Vh_vasr_VwVwR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1603    vasrwhsat(vu, vv, rt)
1604}
1605
1606/// `Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat`
1607///
1608/// Instruction Type: CVI_VS
1609/// Execution Slots: SLOT0123
1610#[inline(always)]
1611#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1612#[cfg_attr(test, assert_instr(vasrwuhsat))]
1613#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1614pub unsafe fn Q6_Vuh_vasr_VwVwR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
1615    vasrwuhsat(vu, vv, rt)
1616}
1617
1618/// `Vd32.w=vasr(Vu32.w,Vv32.w)`
1619///
1620/// Instruction Type: CVI_VS
1621/// Execution Slots: SLOT0123
1622#[inline(always)]
1623#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1624#[cfg_attr(test, assert_instr(vasrwv))]
1625#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1626pub unsafe fn Q6_Vw_vasr_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector {
1627    vasrwv(vu, vv)
1628}
1629
1630/// `Vd32=Vu32`
1631///
1632/// Instruction Type: CVI_VA
1633/// Execution Slots: SLOT0123
1634#[inline(always)]
1635#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1636#[cfg_attr(test, assert_instr(vassign))]
1637#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1638pub unsafe fn Q6_V_equals_V(vu: HvxVector) -> HvxVector {
1639    vassign(vu)
1640}
1641
1642/// `Vdd32=Vuu32`
1643///
1644/// Instruction Type: CVI_VA_DV
1645/// Execution Slots: SLOT0123
1646#[inline(always)]
1647#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1648#[cfg_attr(test, assert_instr(vassignp))]
1649#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1650pub unsafe fn Q6_W_equals_W(vuu: HvxVectorPair) -> HvxVectorPair {
1651    vassignp(vuu)
1652}
1653
1654/// `Vd32.h=vavg(Vu32.h,Vv32.h)`
1655///
1656/// Instruction Type: CVI_VA
1657/// Execution Slots: SLOT0123
1658#[inline(always)]
1659#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1660#[cfg_attr(test, assert_instr(vavgh))]
1661#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1662pub unsafe fn Q6_Vh_vavg_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
1663    vavgh(vu, vv)
1664}
1665
1666/// `Vd32.h=vavg(Vu32.h,Vv32.h):rnd`
1667///
1668/// Instruction Type: CVI_VA
1669/// Execution Slots: SLOT0123
1670#[inline(always)]
1671#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1672#[cfg_attr(test, assert_instr(vavghrnd))]
1673#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1674pub unsafe fn Q6_Vh_vavg_VhVh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector {
1675    vavghrnd(vu, vv)
1676}
1677
1678/// `Vd32.ub=vavg(Vu32.ub,Vv32.ub)`
1679///
1680/// Instruction Type: CVI_VA
1681/// Execution Slots: SLOT0123
1682#[inline(always)]
1683#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1684#[cfg_attr(test, assert_instr(vavgub))]
1685#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1686pub unsafe fn Q6_Vub_vavg_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector {
1687    vavgub(vu, vv)
1688}
1689
1690/// `Vd32.ub=vavg(Vu32.ub,Vv32.ub):rnd`
1691///
1692/// Instruction Type: CVI_VA
1693/// Execution Slots: SLOT0123
1694#[inline(always)]
1695#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1696#[cfg_attr(test, assert_instr(vavgubrnd))]
1697#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1698pub unsafe fn Q6_Vub_vavg_VubVub_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector {
1699    vavgubrnd(vu, vv)
1700}
1701
1702/// `Vd32.uh=vavg(Vu32.uh,Vv32.uh)`
1703///
1704/// Instruction Type: CVI_VA
1705/// Execution Slots: SLOT0123
1706#[inline(always)]
1707#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1708#[cfg_attr(test, assert_instr(vavguh))]
1709#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1710pub unsafe fn Q6_Vuh_vavg_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector {
1711    vavguh(vu, vv)
1712}
1713
1714/// `Vd32.uh=vavg(Vu32.uh,Vv32.uh):rnd`
1715///
1716/// Instruction Type: CVI_VA
1717/// Execution Slots: SLOT0123
1718#[inline(always)]
1719#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1720#[cfg_attr(test, assert_instr(vavguhrnd))]
1721#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1722pub unsafe fn Q6_Vuh_vavg_VuhVuh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector {
1723    vavguhrnd(vu, vv)
1724}
1725
1726/// `Vd32.w=vavg(Vu32.w,Vv32.w)`
1727///
1728/// Instruction Type: CVI_VA
1729/// Execution Slots: SLOT0123
1730#[inline(always)]
1731#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1732#[cfg_attr(test, assert_instr(vavgw))]
1733#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1734pub unsafe fn Q6_Vw_vavg_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector {
1735    vavgw(vu, vv)
1736}
1737
1738/// `Vd32.w=vavg(Vu32.w,Vv32.w):rnd`
1739///
1740/// Instruction Type: CVI_VA
1741/// Execution Slots: SLOT0123
1742#[inline(always)]
1743#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1744#[cfg_attr(test, assert_instr(vavgwrnd))]
1745#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1746pub unsafe fn Q6_Vw_vavg_VwVw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector {
1747    vavgwrnd(vu, vv)
1748}
1749
1750/// `Vd32.uh=vcl0(Vu32.uh)`
1751///
1752/// Instruction Type: CVI_VS
1753/// Execution Slots: SLOT0123
1754#[inline(always)]
1755#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1756#[cfg_attr(test, assert_instr(vcl0h))]
1757#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1758pub unsafe fn Q6_Vuh_vcl0_Vuh(vu: HvxVector) -> HvxVector {
1759    vcl0h(vu)
1760}
1761
1762/// `Vd32.uw=vcl0(Vu32.uw)`
1763///
1764/// Instruction Type: CVI_VS
1765/// Execution Slots: SLOT0123
1766#[inline(always)]
1767#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1768#[cfg_attr(test, assert_instr(vcl0w))]
1769#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1770pub unsafe fn Q6_Vuw_vcl0_Vuw(vu: HvxVector) -> HvxVector {
1771    vcl0w(vu)
1772}
1773
1774/// `Vdd32=vcombine(Vu32,Vv32)`
1775///
1776/// Instruction Type: CVI_VA_DV
1777/// Execution Slots: SLOT0123
1778#[inline(always)]
1779#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1780#[cfg_attr(test, assert_instr(vcombine))]
1781#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1782pub unsafe fn Q6_W_vcombine_VV(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
1783    vcombine(vu, vv)
1784}
1785
1786/// `Vd32=#0`
1787///
1788/// Instruction Type: CVI_VA
1789/// Execution Slots: SLOT0123
1790#[inline(always)]
1791#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1792#[cfg_attr(test, assert_instr(vd0))]
1793#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1794pub unsafe fn Q6_V_vzero() -> HvxVector {
1795    vd0()
1796}
1797
1798/// `Vd32.b=vdeal(Vu32.b)`
1799///
1800/// Instruction Type: CVI_VP
1801/// Execution Slots: SLOT0123
1802#[inline(always)]
1803#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1804#[cfg_attr(test, assert_instr(vdealb))]
1805#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1806pub unsafe fn Q6_Vb_vdeal_Vb(vu: HvxVector) -> HvxVector {
1807    vdealb(vu)
1808}
1809
1810/// `Vd32.b=vdeale(Vu32.b,Vv32.b)`
1811///
1812/// Instruction Type: CVI_VP
1813/// Execution Slots: SLOT0123
1814#[inline(always)]
1815#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1816#[cfg_attr(test, assert_instr(vdealb4w))]
1817#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1818pub unsafe fn Q6_Vb_vdeale_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector {
1819    vdealb4w(vu, vv)
1820}
1821
1822/// `Vd32.h=vdeal(Vu32.h)`
1823///
1824/// Instruction Type: CVI_VP
1825/// Execution Slots: SLOT0123
1826#[inline(always)]
1827#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1828#[cfg_attr(test, assert_instr(vdealh))]
1829#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1830pub unsafe fn Q6_Vh_vdeal_Vh(vu: HvxVector) -> HvxVector {
1831    vdealh(vu)
1832}
1833
1834/// `Vdd32=vdeal(Vu32,Vv32,Rt8)`
1835///
1836/// Instruction Type: CVI_VP_VS
1837/// Execution Slots: SLOT0123
1838#[inline(always)]
1839#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1840#[cfg_attr(test, assert_instr(vdealvdd))]
1841#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1842pub unsafe fn Q6_W_vdeal_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair {
1843    vdealvdd(vu, vv, rt)
1844}
1845
1846/// `Vd32=vdelta(Vu32,Vv32)`
1847///
1848/// Instruction Type: CVI_VP
1849/// Execution Slots: SLOT0123
1850#[inline(always)]
1851#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1852#[cfg_attr(test, assert_instr(vdelta))]
1853#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1854pub unsafe fn Q6_V_vdelta_VV(vu: HvxVector, vv: HvxVector) -> HvxVector {
1855    vdelta(vu, vv)
1856}
1857
1858/// `Vd32.h=vdmpy(Vu32.ub,Rt32.b)`
1859///
1860/// Instruction Type: CVI_VX
1861/// Execution Slots: SLOT23
1862#[inline(always)]
1863#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1864#[cfg_attr(test, assert_instr(vdmpybus))]
1865#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1866pub unsafe fn Q6_Vh_vdmpy_VubRb(vu: HvxVector, rt: i32) -> HvxVector {
1867    vdmpybus(vu, rt)
1868}
1869
1870/// `Vx32.h+=vdmpy(Vu32.ub,Rt32.b)`
1871///
1872/// Instruction Type: CVI_VX
1873/// Execution Slots: SLOT23
1874#[inline(always)]
1875#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1876#[cfg_attr(test, assert_instr(vdmpybus_acc))]
1877#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1878pub unsafe fn Q6_Vh_vdmpyacc_VhVubRb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
1879    vdmpybus_acc(vx, vu, rt)
1880}
1881
1882/// `Vdd32.h=vdmpy(Vuu32.ub,Rt32.b)`
1883///
1884/// Instruction Type: CVI_VX_DV
1885/// Execution Slots: SLOT23
1886#[inline(always)]
1887#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1888#[cfg_attr(test, assert_instr(vdmpybus_dv))]
1889#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1890pub unsafe fn Q6_Wh_vdmpy_WubRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
1891    vdmpybus_dv(vuu, rt)
1892}
1893
1894/// `Vxx32.h+=vdmpy(Vuu32.ub,Rt32.b)`
1895///
1896/// Instruction Type: CVI_VX_DV
1897/// Execution Slots: SLOT23
1898#[inline(always)]
1899#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1900#[cfg_attr(test, assert_instr(vdmpybus_dv_acc))]
1901#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1902pub unsafe fn Q6_Wh_vdmpyacc_WhWubRb(
1903    vxx: HvxVectorPair,
1904    vuu: HvxVectorPair,
1905    rt: i32,
1906) -> HvxVectorPair {
1907    vdmpybus_dv_acc(vxx, vuu, rt)
1908}
1909
1910/// `Vd32.w=vdmpy(Vu32.h,Rt32.b)`
1911///
1912/// Instruction Type: CVI_VX
1913/// Execution Slots: SLOT23
1914#[inline(always)]
1915#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1916#[cfg_attr(test, assert_instr(vdmpyhb))]
1917#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1918pub unsafe fn Q6_Vw_vdmpy_VhRb(vu: HvxVector, rt: i32) -> HvxVector {
1919    vdmpyhb(vu, rt)
1920}
1921
1922/// `Vx32.w+=vdmpy(Vu32.h,Rt32.b)`
1923///
1924/// Instruction Type: CVI_VX
1925/// Execution Slots: SLOT23
1926#[inline(always)]
1927#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1928#[cfg_attr(test, assert_instr(vdmpyhb_acc))]
1929#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1930pub unsafe fn Q6_Vw_vdmpyacc_VwVhRb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
1931    vdmpyhb_acc(vx, vu, rt)
1932}
1933
1934/// `Vdd32.w=vdmpy(Vuu32.h,Rt32.b)`
1935///
1936/// Instruction Type: CVI_VX_DV
1937/// Execution Slots: SLOT23
1938#[inline(always)]
1939#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1940#[cfg_attr(test, assert_instr(vdmpyhb_dv))]
1941#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1942pub unsafe fn Q6_Ww_vdmpy_WhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
1943    vdmpyhb_dv(vuu, rt)
1944}
1945
1946/// `Vxx32.w+=vdmpy(Vuu32.h,Rt32.b)`
1947///
1948/// Instruction Type: CVI_VX_DV
1949/// Execution Slots: SLOT23
1950#[inline(always)]
1951#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1952#[cfg_attr(test, assert_instr(vdmpyhb_dv_acc))]
1953#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1954pub unsafe fn Q6_Ww_vdmpyacc_WwWhRb(
1955    vxx: HvxVectorPair,
1956    vuu: HvxVectorPair,
1957    rt: i32,
1958) -> HvxVectorPair {
1959    vdmpyhb_dv_acc(vxx, vuu, rt)
1960}
1961
1962/// `Vd32.w=vdmpy(Vuu32.h,Rt32.h):sat`
1963///
1964/// Instruction Type: CVI_VX_DV
1965/// Execution Slots: SLOT23
1966#[inline(always)]
1967#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1968#[cfg_attr(test, assert_instr(vdmpyhisat))]
1969#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1970pub unsafe fn Q6_Vw_vdmpy_WhRh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector {
1971    vdmpyhisat(vuu, rt)
1972}
1973
1974/// `Vx32.w+=vdmpy(Vuu32.h,Rt32.h):sat`
1975///
1976/// Instruction Type: CVI_VX_DV
1977/// Execution Slots: SLOT23
1978#[inline(always)]
1979#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1980#[cfg_attr(test, assert_instr(vdmpyhisat_acc))]
1981#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1982pub unsafe fn Q6_Vw_vdmpyacc_VwWhRh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector {
1983    vdmpyhisat_acc(vx, vuu, rt)
1984}
1985
1986/// `Vd32.w=vdmpy(Vu32.h,Rt32.h):sat`
1987///
1988/// Instruction Type: CVI_VX
1989/// Execution Slots: SLOT23
1990#[inline(always)]
1991#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
1992#[cfg_attr(test, assert_instr(vdmpyhsat))]
1993#[unstable(feature = "stdarch_hexagon", issue = "151523")]
1994pub unsafe fn Q6_Vw_vdmpy_VhRh_sat(vu: HvxVector, rt: i32) -> HvxVector {
1995    vdmpyhsat(vu, rt)
1996}
1997
1998/// `Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat`
1999///
2000/// Instruction Type: CVI_VX
2001/// Execution Slots: SLOT23
2002#[inline(always)]
2003#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2004#[cfg_attr(test, assert_instr(vdmpyhsat_acc))]
2005#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2006pub unsafe fn Q6_Vw_vdmpyacc_VwVhRh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
2007    vdmpyhsat_acc(vx, vu, rt)
2008}
2009
2010/// `Vd32.w=vdmpy(Vuu32.h,Rt32.uh,#1):sat`
2011///
2012/// Instruction Type: CVI_VX_DV
2013/// Execution Slots: SLOT23
2014#[inline(always)]
2015#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2016#[cfg_attr(test, assert_instr(vdmpyhsuisat))]
2017#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2018pub unsafe fn Q6_Vw_vdmpy_WhRuh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector {
2019    vdmpyhsuisat(vuu, rt)
2020}
2021
2022/// `Vx32.w+=vdmpy(Vuu32.h,Rt32.uh,#1):sat`
2023///
2024/// Instruction Type: CVI_VX_DV
2025/// Execution Slots: SLOT23
2026#[inline(always)]
2027#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2028#[cfg_attr(test, assert_instr(vdmpyhsuisat_acc))]
2029#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2030pub unsafe fn Q6_Vw_vdmpyacc_VwWhRuh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector {
2031    vdmpyhsuisat_acc(vx, vuu, rt)
2032}
2033
2034/// `Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat`
2035///
2036/// Instruction Type: CVI_VX
2037/// Execution Slots: SLOT23
2038#[inline(always)]
2039#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2040#[cfg_attr(test, assert_instr(vdmpyhsusat))]
2041#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2042pub unsafe fn Q6_Vw_vdmpy_VhRuh_sat(vu: HvxVector, rt: i32) -> HvxVector {
2043    vdmpyhsusat(vu, rt)
2044}
2045
2046/// `Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat`
2047///
2048/// Instruction Type: CVI_VX
2049/// Execution Slots: SLOT23
2050#[inline(always)]
2051#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2052#[cfg_attr(test, assert_instr(vdmpyhsusat_acc))]
2053#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2054pub unsafe fn Q6_Vw_vdmpyacc_VwVhRuh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
2055    vdmpyhsusat_acc(vx, vu, rt)
2056}
2057
2058/// `Vd32.w=vdmpy(Vu32.h,Vv32.h):sat`
2059///
2060/// Instruction Type: CVI_VX
2061/// Execution Slots: SLOT23
2062#[inline(always)]
2063#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2064#[cfg_attr(test, assert_instr(vdmpyhvsat))]
2065#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2066pub unsafe fn Q6_Vw_vdmpy_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
2067    vdmpyhvsat(vu, vv)
2068}
2069
2070/// `Vx32.w+=vdmpy(Vu32.h,Vv32.h):sat`
2071///
2072/// Instruction Type: CVI_VX_DV
2073/// Execution Slots: SLOT23
2074#[inline(always)]
2075#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2076#[cfg_attr(test, assert_instr(vdmpyhvsat_acc))]
2077#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2078pub unsafe fn Q6_Vw_vdmpyacc_VwVhVh_sat(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
2079    vdmpyhvsat_acc(vx, vu, vv)
2080}
2081
2082/// `Vdd32.uw=vdsad(Vuu32.uh,Rt32.uh)`
2083///
2084/// Instruction Type: CVI_VX_DV
2085/// Execution Slots: SLOT23
2086#[inline(always)]
2087#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2088#[cfg_attr(test, assert_instr(vdsaduh))]
2089#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2090pub unsafe fn Q6_Wuw_vdsad_WuhRuh(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
2091    vdsaduh(vuu, rt)
2092}
2093
2094/// `Vxx32.uw+=vdsad(Vuu32.uh,Rt32.uh)`
2095///
2096/// Instruction Type: CVI_VX_DV
2097/// Execution Slots: SLOT23
2098#[inline(always)]
2099#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2100#[cfg_attr(test, assert_instr(vdsaduh_acc))]
2101#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2102pub unsafe fn Q6_Wuw_vdsadacc_WuwWuhRuh(
2103    vxx: HvxVectorPair,
2104    vuu: HvxVectorPair,
2105    rt: i32,
2106) -> HvxVectorPair {
2107    vdsaduh_acc(vxx, vuu, rt)
2108}
2109
2110/// `Vx32.w=vinsert(Rt32)`
2111///
2112/// Instruction Type: CVI_VX_LATE
2113/// Execution Slots: SLOT23
2114#[inline(always)]
2115#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2116#[cfg_attr(test, assert_instr(vinsertwr))]
2117#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2118pub unsafe fn Q6_Vw_vinsert_VwR(vx: HvxVector, rt: i32) -> HvxVector {
2119    vinsertwr(vx, rt)
2120}
2121
2122/// `Vd32=vlalign(Vu32,Vv32,Rt8)`
2123///
2124/// Instruction Type: CVI_VP
2125/// Execution Slots: SLOT0123
2126#[inline(always)]
2127#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2128#[cfg_attr(test, assert_instr(vlalignb))]
2129#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2130pub unsafe fn Q6_V_vlalign_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
2131    vlalignb(vu, vv, rt)
2132}
2133
2134/// `Vd32=vlalign(Vu32,Vv32,#u3)`
2135///
2136/// Instruction Type: CVI_VP
2137/// Execution Slots: SLOT0123
2138#[inline(always)]
2139#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2140#[cfg_attr(test, assert_instr(vlalignbi))]
2141#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2142pub unsafe fn Q6_V_vlalign_VVI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector {
2143    vlalignbi(vu, vv, iu3)
2144}
2145
2146/// `Vd32.uh=vlsr(Vu32.uh,Rt32)`
2147///
2148/// Instruction Type: CVI_VS
2149/// Execution Slots: SLOT0123
2150#[inline(always)]
2151#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2152#[cfg_attr(test, assert_instr(vlsrh))]
2153#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2154pub unsafe fn Q6_Vuh_vlsr_VuhR(vu: HvxVector, rt: i32) -> HvxVector {
2155    vlsrh(vu, rt)
2156}
2157
2158/// `Vd32.h=vlsr(Vu32.h,Vv32.h)`
2159///
2160/// Instruction Type: CVI_VS
2161/// Execution Slots: SLOT0123
2162#[inline(always)]
2163#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2164#[cfg_attr(test, assert_instr(vlsrhv))]
2165#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2166pub unsafe fn Q6_Vh_vlsr_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2167    vlsrhv(vu, vv)
2168}
2169
2170/// `Vd32.uw=vlsr(Vu32.uw,Rt32)`
2171///
2172/// Instruction Type: CVI_VS
2173/// Execution Slots: SLOT0123
2174#[inline(always)]
2175#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2176#[cfg_attr(test, assert_instr(vlsrw))]
2177#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2178pub unsafe fn Q6_Vuw_vlsr_VuwR(vu: HvxVector, rt: i32) -> HvxVector {
2179    vlsrw(vu, rt)
2180}
2181
2182/// `Vd32.w=vlsr(Vu32.w,Vv32.w)`
2183///
2184/// Instruction Type: CVI_VS
2185/// Execution Slots: SLOT0123
2186#[inline(always)]
2187#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2188#[cfg_attr(test, assert_instr(vlsrwv))]
2189#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2190pub unsafe fn Q6_Vw_vlsr_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector {
2191    vlsrwv(vu, vv)
2192}
2193
2194/// `Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8)`
2195///
2196/// Instruction Type: CVI_VP
2197/// Execution Slots: SLOT0123
2198#[inline(always)]
2199#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2200#[cfg_attr(test, assert_instr(vlutvvb))]
2201#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2202pub unsafe fn Q6_Vb_vlut32_VbVbR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
2203    vlutvvb(vu, vv, rt)
2204}
2205
2206/// `Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8)`
2207///
2208/// Instruction Type: CVI_VP_VS
2209/// Execution Slots: SLOT0123
2210#[inline(always)]
2211#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2212#[cfg_attr(test, assert_instr(vlutvvb_oracc))]
2213#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2214pub unsafe fn Q6_Vb_vlut32or_VbVbVbR(
2215    vx: HvxVector,
2216    vu: HvxVector,
2217    vv: HvxVector,
2218    rt: i32,
2219) -> HvxVector {
2220    vlutvvb_oracc(vx, vu, vv, rt)
2221}
2222
2223/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8)`
2224///
2225/// Instruction Type: CVI_VP_VS
2226/// Execution Slots: SLOT0123
2227#[inline(always)]
2228#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2229#[cfg_attr(test, assert_instr(vlutvwh))]
2230#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2231pub unsafe fn Q6_Wh_vlut16_VbVhR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair {
2232    vlutvwh(vu, vv, rt)
2233}
2234
2235/// `Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8)`
2236///
2237/// Instruction Type: CVI_VP_VS
2238/// Execution Slots: SLOT0123
2239#[inline(always)]
2240#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2241#[cfg_attr(test, assert_instr(vlutvwh_oracc))]
2242#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2243pub unsafe fn Q6_Wh_vlut16or_WhVbVhR(
2244    vxx: HvxVectorPair,
2245    vu: HvxVector,
2246    vv: HvxVector,
2247    rt: i32,
2248) -> HvxVectorPair {
2249    vlutvwh_oracc(vxx, vu, vv, rt)
2250}
2251
2252/// `Vd32.h=vmax(Vu32.h,Vv32.h)`
2253///
2254/// Instruction Type: CVI_VA
2255/// Execution Slots: SLOT0123
2256#[inline(always)]
2257#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2258#[cfg_attr(test, assert_instr(vmaxh))]
2259#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2260pub unsafe fn Q6_Vh_vmax_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2261    vmaxh(vu, vv)
2262}
2263
2264/// `Vd32.ub=vmax(Vu32.ub,Vv32.ub)`
2265///
2266/// Instruction Type: CVI_VA
2267/// Execution Slots: SLOT0123
2268#[inline(always)]
2269#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2270#[cfg_attr(test, assert_instr(vmaxub))]
2271#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2272pub unsafe fn Q6_Vub_vmax_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector {
2273    vmaxub(vu, vv)
2274}
2275
2276/// `Vd32.uh=vmax(Vu32.uh,Vv32.uh)`
2277///
2278/// Instruction Type: CVI_VA
2279/// Execution Slots: SLOT0123
2280#[inline(always)]
2281#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2282#[cfg_attr(test, assert_instr(vmaxuh))]
2283#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2284pub unsafe fn Q6_Vuh_vmax_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2285    vmaxuh(vu, vv)
2286}
2287
2288/// `Vd32.w=vmax(Vu32.w,Vv32.w)`
2289///
2290/// Instruction Type: CVI_VA
2291/// Execution Slots: SLOT0123
2292#[inline(always)]
2293#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2294#[cfg_attr(test, assert_instr(vmaxw))]
2295#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2296pub unsafe fn Q6_Vw_vmax_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector {
2297    vmaxw(vu, vv)
2298}
2299
2300/// `Vd32.h=vmin(Vu32.h,Vv32.h)`
2301///
2302/// Instruction Type: CVI_VA
2303/// Execution Slots: SLOT0123
2304#[inline(always)]
2305#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2306#[cfg_attr(test, assert_instr(vminh))]
2307#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2308pub unsafe fn Q6_Vh_vmin_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2309    vminh(vu, vv)
2310}
2311
2312/// `Vd32.ub=vmin(Vu32.ub,Vv32.ub)`
2313///
2314/// Instruction Type: CVI_VA
2315/// Execution Slots: SLOT0123
2316#[inline(always)]
2317#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2318#[cfg_attr(test, assert_instr(vminub))]
2319#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2320pub unsafe fn Q6_Vub_vmin_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector {
2321    vminub(vu, vv)
2322}
2323
2324/// `Vd32.uh=vmin(Vu32.uh,Vv32.uh)`
2325///
2326/// Instruction Type: CVI_VA
2327/// Execution Slots: SLOT0123
2328#[inline(always)]
2329#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2330#[cfg_attr(test, assert_instr(vminuh))]
2331#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2332pub unsafe fn Q6_Vuh_vmin_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2333    vminuh(vu, vv)
2334}
2335
2336/// `Vd32.w=vmin(Vu32.w,Vv32.w)`
2337///
2338/// Instruction Type: CVI_VA
2339/// Execution Slots: SLOT0123
2340#[inline(always)]
2341#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2342#[cfg_attr(test, assert_instr(vminw))]
2343#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2344pub unsafe fn Q6_Vw_vmin_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector {
2345    vminw(vu, vv)
2346}
2347
2348/// `Vdd32.h=vmpa(Vuu32.ub,Rt32.b)`
2349///
2350/// Instruction Type: CVI_VX_DV
2351/// Execution Slots: SLOT23
2352#[inline(always)]
2353#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2354#[cfg_attr(test, assert_instr(vmpabus))]
2355#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2356pub unsafe fn Q6_Wh_vmpa_WubRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
2357    vmpabus(vuu, rt)
2358}
2359
2360/// `Vxx32.h+=vmpa(Vuu32.ub,Rt32.b)`
2361///
2362/// Instruction Type: CVI_VX_DV
2363/// Execution Slots: SLOT23
2364#[inline(always)]
2365#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2366#[cfg_attr(test, assert_instr(vmpabus_acc))]
2367#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2368pub unsafe fn Q6_Wh_vmpaacc_WhWubRb(
2369    vxx: HvxVectorPair,
2370    vuu: HvxVectorPair,
2371    rt: i32,
2372) -> HvxVectorPair {
2373    vmpabus_acc(vxx, vuu, rt)
2374}
2375
2376/// `Vdd32.h=vmpa(Vuu32.ub,Vvv32.b)`
2377///
2378/// Instruction Type: CVI_VX_DV
2379/// Execution Slots: SLOT23
2380#[inline(always)]
2381#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2382#[cfg_attr(test, assert_instr(vmpabusv))]
2383#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2384pub unsafe fn Q6_Wh_vmpa_WubWb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
2385    vmpabusv(vuu, vvv)
2386}
2387
2388/// `Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub)`
2389///
2390/// Instruction Type: CVI_VX_DV
2391/// Execution Slots: SLOT23
2392#[inline(always)]
2393#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2394#[cfg_attr(test, assert_instr(vmpabuuv))]
2395#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2396pub unsafe fn Q6_Wh_vmpa_WubWub(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
2397    vmpabuuv(vuu, vvv)
2398}
2399
2400/// `Vdd32.w=vmpa(Vuu32.h,Rt32.b)`
2401///
2402/// Instruction Type: CVI_VX_DV
2403/// Execution Slots: SLOT23
2404#[inline(always)]
2405#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2406#[cfg_attr(test, assert_instr(vmpahb))]
2407#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2408pub unsafe fn Q6_Ww_vmpa_WhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
2409    vmpahb(vuu, rt)
2410}
2411
2412/// `Vxx32.w+=vmpa(Vuu32.h,Rt32.b)`
2413///
2414/// Instruction Type: CVI_VX_DV
2415/// Execution Slots: SLOT23
2416#[inline(always)]
2417#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2418#[cfg_attr(test, assert_instr(vmpahb_acc))]
2419#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2420pub unsafe fn Q6_Ww_vmpaacc_WwWhRb(
2421    vxx: HvxVectorPair,
2422    vuu: HvxVectorPair,
2423    rt: i32,
2424) -> HvxVectorPair {
2425    vmpahb_acc(vxx, vuu, rt)
2426}
2427
2428/// `Vdd32.h=vmpy(Vu32.ub,Rt32.b)`
2429///
2430/// Instruction Type: CVI_VX_DV
2431/// Execution Slots: SLOT23
2432#[inline(always)]
2433#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2434#[cfg_attr(test, assert_instr(vmpybus))]
2435#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2436pub unsafe fn Q6_Wh_vmpy_VubRb(vu: HvxVector, rt: i32) -> HvxVectorPair {
2437    vmpybus(vu, rt)
2438}
2439
2440/// `Vxx32.h+=vmpy(Vu32.ub,Rt32.b)`
2441///
2442/// Instruction Type: CVI_VX_DV
2443/// Execution Slots: SLOT23
2444#[inline(always)]
2445#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2446#[cfg_attr(test, assert_instr(vmpybus_acc))]
2447#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2448pub unsafe fn Q6_Wh_vmpyacc_WhVubRb(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair {
2449    vmpybus_acc(vxx, vu, rt)
2450}
2451
2452/// `Vdd32.h=vmpy(Vu32.ub,Vv32.b)`
2453///
2454/// Instruction Type: CVI_VX_DV
2455/// Execution Slots: SLOT23
2456#[inline(always)]
2457#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2458#[cfg_attr(test, assert_instr(vmpybusv))]
2459#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2460pub unsafe fn Q6_Wh_vmpy_VubVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
2461    vmpybusv(vu, vv)
2462}
2463
2464/// `Vxx32.h+=vmpy(Vu32.ub,Vv32.b)`
2465///
2466/// Instruction Type: CVI_VX_DV
2467/// Execution Slots: SLOT23
2468#[inline(always)]
2469#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2470#[cfg_attr(test, assert_instr(vmpybusv_acc))]
2471#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2472pub unsafe fn Q6_Wh_vmpyacc_WhVubVb(
2473    vxx: HvxVectorPair,
2474    vu: HvxVector,
2475    vv: HvxVector,
2476) -> HvxVectorPair {
2477    vmpybusv_acc(vxx, vu, vv)
2478}
2479
2480/// `Vdd32.h=vmpy(Vu32.b,Vv32.b)`
2481///
2482/// Instruction Type: CVI_VX_DV
2483/// Execution Slots: SLOT23
2484#[inline(always)]
2485#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2486#[cfg_attr(test, assert_instr(vmpybv))]
2487#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2488pub unsafe fn Q6_Wh_vmpy_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
2489    vmpybv(vu, vv)
2490}
2491
2492/// `Vxx32.h+=vmpy(Vu32.b,Vv32.b)`
2493///
2494/// Instruction Type: CVI_VX_DV
2495/// Execution Slots: SLOT23
2496#[inline(always)]
2497#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2498#[cfg_attr(test, assert_instr(vmpybv_acc))]
2499#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2500pub unsafe fn Q6_Wh_vmpyacc_WhVbVb(
2501    vxx: HvxVectorPair,
2502    vu: HvxVector,
2503    vv: HvxVector,
2504) -> HvxVectorPair {
2505    vmpybv_acc(vxx, vu, vv)
2506}
2507
2508/// `Vd32.w=vmpye(Vu32.w,Vv32.uh)`
2509///
2510/// Instruction Type: CVI_VX_DV
2511/// Execution Slots: SLOT23
2512#[inline(always)]
2513#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2514#[cfg_attr(test, assert_instr(vmpyewuh))]
2515#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2516pub unsafe fn Q6_Vw_vmpye_VwVuh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2517    vmpyewuh(vu, vv)
2518}
2519
2520/// `Vdd32.w=vmpy(Vu32.h,Rt32.h)`
2521///
2522/// Instruction Type: CVI_VX_DV
2523/// Execution Slots: SLOT23
2524#[inline(always)]
2525#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2526#[cfg_attr(test, assert_instr(vmpyh))]
2527#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2528pub unsafe fn Q6_Ww_vmpy_VhRh(vu: HvxVector, rt: i32) -> HvxVectorPair {
2529    vmpyh(vu, rt)
2530}
2531
2532/// `Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat`
2533///
2534/// Instruction Type: CVI_VX_DV
2535/// Execution Slots: SLOT23
2536#[inline(always)]
2537#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2538#[cfg_attr(test, assert_instr(vmpyhsat_acc))]
2539#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2540pub unsafe fn Q6_Ww_vmpyacc_WwVhRh_sat(
2541    vxx: HvxVectorPair,
2542    vu: HvxVector,
2543    rt: i32,
2544) -> HvxVectorPair {
2545    vmpyhsat_acc(vxx, vu, rt)
2546}
2547
2548/// `Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat`
2549///
2550/// Instruction Type: CVI_VX
2551/// Execution Slots: SLOT23
2552#[inline(always)]
2553#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2554#[cfg_attr(test, assert_instr(vmpyhsrs))]
2555#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2556pub unsafe fn Q6_Vh_vmpy_VhRh_s1_rnd_sat(vu: HvxVector, rt: i32) -> HvxVector {
2557    vmpyhsrs(vu, rt)
2558}
2559
2560/// `Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat`
2561///
2562/// Instruction Type: CVI_VX
2563/// Execution Slots: SLOT23
2564#[inline(always)]
2565#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2566#[cfg_attr(test, assert_instr(vmpyhss))]
2567#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2568pub unsafe fn Q6_Vh_vmpy_VhRh_s1_sat(vu: HvxVector, rt: i32) -> HvxVector {
2569    vmpyhss(vu, rt)
2570}
2571
2572/// `Vdd32.w=vmpy(Vu32.h,Vv32.uh)`
2573///
2574/// Instruction Type: CVI_VX_DV
2575/// Execution Slots: SLOT23
2576#[inline(always)]
2577#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2578#[cfg_attr(test, assert_instr(vmpyhus))]
2579#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2580pub unsafe fn Q6_Ww_vmpy_VhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
2581    vmpyhus(vu, vv)
2582}
2583
2584/// `Vxx32.w+=vmpy(Vu32.h,Vv32.uh)`
2585///
2586/// Instruction Type: CVI_VX_DV
2587/// Execution Slots: SLOT23
2588#[inline(always)]
2589#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2590#[cfg_attr(test, assert_instr(vmpyhus_acc))]
2591#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2592pub unsafe fn Q6_Ww_vmpyacc_WwVhVuh(
2593    vxx: HvxVectorPair,
2594    vu: HvxVector,
2595    vv: HvxVector,
2596) -> HvxVectorPair {
2597    vmpyhus_acc(vxx, vu, vv)
2598}
2599
2600/// `Vdd32.w=vmpy(Vu32.h,Vv32.h)`
2601///
2602/// Instruction Type: CVI_VX_DV
2603/// Execution Slots: SLOT23
2604#[inline(always)]
2605#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2606#[cfg_attr(test, assert_instr(vmpyhv))]
2607#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2608pub unsafe fn Q6_Ww_vmpy_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
2609    vmpyhv(vu, vv)
2610}
2611
2612/// `Vxx32.w+=vmpy(Vu32.h,Vv32.h)`
2613///
2614/// Instruction Type: CVI_VX_DV
2615/// Execution Slots: SLOT23
2616#[inline(always)]
2617#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2618#[cfg_attr(test, assert_instr(vmpyhv_acc))]
2619#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2620pub unsafe fn Q6_Ww_vmpyacc_WwVhVh(
2621    vxx: HvxVectorPair,
2622    vu: HvxVector,
2623    vv: HvxVector,
2624) -> HvxVectorPair {
2625    vmpyhv_acc(vxx, vu, vv)
2626}
2627
2628/// `Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat`
2629///
2630/// Instruction Type: CVI_VX
2631/// Execution Slots: SLOT23
2632#[inline(always)]
2633#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2634#[cfg_attr(test, assert_instr(vmpyhvsrs))]
2635#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2636pub unsafe fn Q6_Vh_vmpy_VhVh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
2637    vmpyhvsrs(vu, vv)
2638}
2639
2640/// `Vd32.w=vmpyieo(Vu32.h,Vv32.h)`
2641///
2642/// Instruction Type: CVI_VX
2643/// Execution Slots: SLOT23
2644#[inline(always)]
2645#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2646#[cfg_attr(test, assert_instr(vmpyieoh))]
2647#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2648pub unsafe fn Q6_Vw_vmpyieo_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2649    vmpyieoh(vu, vv)
2650}
2651
2652/// `Vx32.w+=vmpyie(Vu32.w,Vv32.h)`
2653///
2654/// Instruction Type: CVI_VX_DV
2655/// Execution Slots: SLOT23
2656#[inline(always)]
2657#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2658#[cfg_attr(test, assert_instr(vmpyiewh_acc))]
2659#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2660pub unsafe fn Q6_Vw_vmpyieacc_VwVwVh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
2661    vmpyiewh_acc(vx, vu, vv)
2662}
2663
2664/// `Vd32.w=vmpyie(Vu32.w,Vv32.uh)`
2665///
2666/// Instruction Type: CVI_VX_DV
2667/// Execution Slots: SLOT23
2668#[inline(always)]
2669#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2670#[cfg_attr(test, assert_instr(vmpyiewuh))]
2671#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2672pub unsafe fn Q6_Vw_vmpyie_VwVuh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2673    vmpyiewuh(vu, vv)
2674}
2675
2676/// `Vx32.w+=vmpyie(Vu32.w,Vv32.uh)`
2677///
2678/// Instruction Type: CVI_VX_DV
2679/// Execution Slots: SLOT23
2680#[inline(always)]
2681#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2682#[cfg_attr(test, assert_instr(vmpyiewuh_acc))]
2683#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2684pub unsafe fn Q6_Vw_vmpyieacc_VwVwVuh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
2685    vmpyiewuh_acc(vx, vu, vv)
2686}
2687
2688/// `Vd32.h=vmpyi(Vu32.h,Vv32.h)`
2689///
2690/// Instruction Type: CVI_VX_DV
2691/// Execution Slots: SLOT23
2692#[inline(always)]
2693#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2694#[cfg_attr(test, assert_instr(vmpyih))]
2695#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2696pub unsafe fn Q6_Vh_vmpyi_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2697    vmpyih(vu, vv)
2698}
2699
2700/// `Vx32.h+=vmpyi(Vu32.h,Vv32.h)`
2701///
2702/// Instruction Type: CVI_VX_DV
2703/// Execution Slots: SLOT23
2704#[inline(always)]
2705#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2706#[cfg_attr(test, assert_instr(vmpyih_acc))]
2707#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2708pub unsafe fn Q6_Vh_vmpyiacc_VhVhVh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
2709    vmpyih_acc(vx, vu, vv)
2710}
2711
2712/// `Vd32.h=vmpyi(Vu32.h,Rt32.b)`
2713///
2714/// Instruction Type: CVI_VX
2715/// Execution Slots: SLOT23
2716#[inline(always)]
2717#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2718#[cfg_attr(test, assert_instr(vmpyihb))]
2719#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2720pub unsafe fn Q6_Vh_vmpyi_VhRb(vu: HvxVector, rt: i32) -> HvxVector {
2721    vmpyihb(vu, rt)
2722}
2723
2724/// `Vx32.h+=vmpyi(Vu32.h,Rt32.b)`
2725///
2726/// Instruction Type: CVI_VX
2727/// Execution Slots: SLOT23
2728#[inline(always)]
2729#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2730#[cfg_attr(test, assert_instr(vmpyihb_acc))]
2731#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2732pub unsafe fn Q6_Vh_vmpyiacc_VhVhRb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
2733    vmpyihb_acc(vx, vu, rt)
2734}
2735
2736/// `Vd32.w=vmpyio(Vu32.w,Vv32.h)`
2737///
2738/// Instruction Type: CVI_VX_DV
2739/// Execution Slots: SLOT23
2740#[inline(always)]
2741#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2742#[cfg_attr(test, assert_instr(vmpyiowh))]
2743#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2744pub unsafe fn Q6_Vw_vmpyio_VwVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2745    vmpyiowh(vu, vv)
2746}
2747
2748/// `Vd32.w=vmpyi(Vu32.w,Rt32.b)`
2749///
2750/// Instruction Type: CVI_VX
2751/// Execution Slots: SLOT23
2752#[inline(always)]
2753#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2754#[cfg_attr(test, assert_instr(vmpyiwb))]
2755#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2756pub unsafe fn Q6_Vw_vmpyi_VwRb(vu: HvxVector, rt: i32) -> HvxVector {
2757    vmpyiwb(vu, rt)
2758}
2759
2760/// `Vx32.w+=vmpyi(Vu32.w,Rt32.b)`
2761///
2762/// Instruction Type: CVI_VX
2763/// Execution Slots: SLOT23
2764#[inline(always)]
2765#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2766#[cfg_attr(test, assert_instr(vmpyiwb_acc))]
2767#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2768pub unsafe fn Q6_Vw_vmpyiacc_VwVwRb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
2769    vmpyiwb_acc(vx, vu, rt)
2770}
2771
2772/// `Vd32.w=vmpyi(Vu32.w,Rt32.h)`
2773///
2774/// Instruction Type: CVI_VX_DV
2775/// Execution Slots: SLOT23
2776#[inline(always)]
2777#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2778#[cfg_attr(test, assert_instr(vmpyiwh))]
2779#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2780pub unsafe fn Q6_Vw_vmpyi_VwRh(vu: HvxVector, rt: i32) -> HvxVector {
2781    vmpyiwh(vu, rt)
2782}
2783
2784/// `Vx32.w+=vmpyi(Vu32.w,Rt32.h)`
2785///
2786/// Instruction Type: CVI_VX_DV
2787/// Execution Slots: SLOT23
2788#[inline(always)]
2789#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2790#[cfg_attr(test, assert_instr(vmpyiwh_acc))]
2791#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2792pub unsafe fn Q6_Vw_vmpyiacc_VwVwRh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
2793    vmpyiwh_acc(vx, vu, rt)
2794}
2795
2796/// `Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat`
2797///
2798/// Instruction Type: CVI_VX_DV
2799/// Execution Slots: SLOT23
2800#[inline(always)]
2801#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2802#[cfg_attr(test, assert_instr(vmpyowh))]
2803#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2804pub unsafe fn Q6_Vw_vmpyo_VwVh_s1_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
2805    vmpyowh(vu, vv)
2806}
2807
2808/// `Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat`
2809///
2810/// Instruction Type: CVI_VX_DV
2811/// Execution Slots: SLOT23
2812#[inline(always)]
2813#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2814#[cfg_attr(test, assert_instr(vmpyowh_rnd))]
2815#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2816pub unsafe fn Q6_Vw_vmpyo_VwVh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
2817    vmpyowh_rnd(vu, vv)
2818}
2819
2820/// `Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift`
2821///
2822/// Instruction Type: CVI_VX_DV
2823/// Execution Slots: SLOT23
2824#[inline(always)]
2825#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2826#[cfg_attr(test, assert_instr(vmpyowh_rnd_sacc))]
2827#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2828pub unsafe fn Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(
2829    vx: HvxVector,
2830    vu: HvxVector,
2831    vv: HvxVector,
2832) -> HvxVector {
2833    vmpyowh_rnd_sacc(vx, vu, vv)
2834}
2835
2836/// `Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift`
2837///
2838/// Instruction Type: CVI_VX_DV
2839/// Execution Slots: SLOT23
2840#[inline(always)]
2841#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2842#[cfg_attr(test, assert_instr(vmpyowh_sacc))]
2843#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2844pub unsafe fn Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(
2845    vx: HvxVector,
2846    vu: HvxVector,
2847    vv: HvxVector,
2848) -> HvxVector {
2849    vmpyowh_sacc(vx, vu, vv)
2850}
2851
2852/// `Vdd32.uh=vmpy(Vu32.ub,Rt32.ub)`
2853///
2854/// Instruction Type: CVI_VX_DV
2855/// Execution Slots: SLOT23
2856#[inline(always)]
2857#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2858#[cfg_attr(test, assert_instr(vmpyub))]
2859#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2860pub unsafe fn Q6_Wuh_vmpy_VubRub(vu: HvxVector, rt: i32) -> HvxVectorPair {
2861    vmpyub(vu, rt)
2862}
2863
2864/// `Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub)`
2865///
2866/// Instruction Type: CVI_VX_DV
2867/// Execution Slots: SLOT23
2868#[inline(always)]
2869#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2870#[cfg_attr(test, assert_instr(vmpyub_acc))]
2871#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2872pub unsafe fn Q6_Wuh_vmpyacc_WuhVubRub(
2873    vxx: HvxVectorPair,
2874    vu: HvxVector,
2875    rt: i32,
2876) -> HvxVectorPair {
2877    vmpyub_acc(vxx, vu, rt)
2878}
2879
2880/// `Vdd32.uh=vmpy(Vu32.ub,Vv32.ub)`
2881///
2882/// Instruction Type: CVI_VX_DV
2883/// Execution Slots: SLOT23
2884#[inline(always)]
2885#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2886#[cfg_attr(test, assert_instr(vmpyubv))]
2887#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2888pub unsafe fn Q6_Wuh_vmpy_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
2889    vmpyubv(vu, vv)
2890}
2891
2892/// `Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub)`
2893///
2894/// Instruction Type: CVI_VX_DV
2895/// Execution Slots: SLOT23
2896#[inline(always)]
2897#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2898#[cfg_attr(test, assert_instr(vmpyubv_acc))]
2899#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2900pub unsafe fn Q6_Wuh_vmpyacc_WuhVubVub(
2901    vxx: HvxVectorPair,
2902    vu: HvxVector,
2903    vv: HvxVector,
2904) -> HvxVectorPair {
2905    vmpyubv_acc(vxx, vu, vv)
2906}
2907
2908/// `Vdd32.uw=vmpy(Vu32.uh,Rt32.uh)`
2909///
2910/// Instruction Type: CVI_VX_DV
2911/// Execution Slots: SLOT23
2912#[inline(always)]
2913#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2914#[cfg_attr(test, assert_instr(vmpyuh))]
2915#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2916pub unsafe fn Q6_Wuw_vmpy_VuhRuh(vu: HvxVector, rt: i32) -> HvxVectorPair {
2917    vmpyuh(vu, rt)
2918}
2919
2920/// `Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh)`
2921///
2922/// Instruction Type: CVI_VX_DV
2923/// Execution Slots: SLOT23
2924#[inline(always)]
2925#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2926#[cfg_attr(test, assert_instr(vmpyuh_acc))]
2927#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2928pub unsafe fn Q6_Wuw_vmpyacc_WuwVuhRuh(
2929    vxx: HvxVectorPair,
2930    vu: HvxVector,
2931    rt: i32,
2932) -> HvxVectorPair {
2933    vmpyuh_acc(vxx, vu, rt)
2934}
2935
2936/// `Vdd32.uw=vmpy(Vu32.uh,Vv32.uh)`
2937///
2938/// Instruction Type: CVI_VX_DV
2939/// Execution Slots: SLOT23
2940#[inline(always)]
2941#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2942#[cfg_attr(test, assert_instr(vmpyuhv))]
2943#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2944pub unsafe fn Q6_Wuw_vmpy_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
2945    vmpyuhv(vu, vv)
2946}
2947
2948/// `Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh)`
2949///
2950/// Instruction Type: CVI_VX_DV
2951/// Execution Slots: SLOT23
2952#[inline(always)]
2953#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2954#[cfg_attr(test, assert_instr(vmpyuhv_acc))]
2955#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2956pub unsafe fn Q6_Wuw_vmpyacc_WuwVuhVuh(
2957    vxx: HvxVectorPair,
2958    vu: HvxVector,
2959    vv: HvxVector,
2960) -> HvxVectorPair {
2961    vmpyuhv_acc(vxx, vu, vv)
2962}
2963
2964/// `Vd32.h=vnavg(Vu32.h,Vv32.h)`
2965///
2966/// Instruction Type: CVI_VA
2967/// Execution Slots: SLOT0123
2968#[inline(always)]
2969#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2970#[cfg_attr(test, assert_instr(vnavgh))]
2971#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2972pub unsafe fn Q6_Vh_vnavg_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
2973    vnavgh(vu, vv)
2974}
2975
2976/// `Vd32.b=vnavg(Vu32.ub,Vv32.ub)`
2977///
2978/// Instruction Type: CVI_VA
2979/// Execution Slots: SLOT0123
2980#[inline(always)]
2981#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2982#[cfg_attr(test, assert_instr(vnavgub))]
2983#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2984pub unsafe fn Q6_Vb_vnavg_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector {
2985    vnavgub(vu, vv)
2986}
2987
2988/// `Vd32.w=vnavg(Vu32.w,Vv32.w)`
2989///
2990/// Instruction Type: CVI_VA
2991/// Execution Slots: SLOT0123
2992#[inline(always)]
2993#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
2994#[cfg_attr(test, assert_instr(vnavgw))]
2995#[unstable(feature = "stdarch_hexagon", issue = "151523")]
2996pub unsafe fn Q6_Vw_vnavg_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector {
2997    vnavgw(vu, vv)
2998}
2999
3000/// `Vd32.h=vnormamt(Vu32.h)`
3001///
3002/// Instruction Type: CVI_VS
3003/// Execution Slots: SLOT0123
3004#[inline(always)]
3005#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3006#[cfg_attr(test, assert_instr(vnormamth))]
3007#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3008pub unsafe fn Q6_Vh_vnormamt_Vh(vu: HvxVector) -> HvxVector {
3009    vnormamth(vu)
3010}
3011
3012/// `Vd32.w=vnormamt(Vu32.w)`
3013///
3014/// Instruction Type: CVI_VS
3015/// Execution Slots: SLOT0123
3016#[inline(always)]
3017#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3018#[cfg_attr(test, assert_instr(vnormamtw))]
3019#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3020pub unsafe fn Q6_Vw_vnormamt_Vw(vu: HvxVector) -> HvxVector {
3021    vnormamtw(vu)
3022}
3023
3024/// `Vd32=vnot(Vu32)`
3025///
3026/// Instruction Type: CVI_VA
3027/// Execution Slots: SLOT0123
3028#[inline(always)]
3029#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3030#[cfg_attr(test, assert_instr(vnot))]
3031#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3032pub unsafe fn Q6_V_vnot_V(vu: HvxVector) -> HvxVector {
3033    vnot(vu)
3034}
3035
3036/// `Vd32=vor(Vu32,Vv32)`
3037///
3038/// Instruction Type: CVI_VA
3039/// Execution Slots: SLOT0123
3040#[inline(always)]
3041#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3042#[cfg_attr(test, assert_instr(vor))]
3043#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3044pub unsafe fn Q6_V_vor_VV(vu: HvxVector, vv: HvxVector) -> HvxVector {
3045    simd_or(vu, vv)
3046}
3047
3048/// `Vd32.b=vpacke(Vu32.h,Vv32.h)`
3049///
3050/// Instruction Type: CVI_VP
3051/// Execution Slots: SLOT0123
3052#[inline(always)]
3053#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3054#[cfg_attr(test, assert_instr(vpackeb))]
3055#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3056pub unsafe fn Q6_Vb_vpacke_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
3057    vpackeb(vu, vv)
3058}
3059
3060/// `Vd32.h=vpacke(Vu32.w,Vv32.w)`
3061///
3062/// Instruction Type: CVI_VP
3063/// Execution Slots: SLOT0123
3064#[inline(always)]
3065#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3066#[cfg_attr(test, assert_instr(vpackeh))]
3067#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3068pub unsafe fn Q6_Vh_vpacke_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector {
3069    vpackeh(vu, vv)
3070}
3071
3072/// `Vd32.b=vpack(Vu32.h,Vv32.h):sat`
3073///
3074/// Instruction Type: CVI_VP
3075/// Execution Slots: SLOT0123
3076#[inline(always)]
3077#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3078#[cfg_attr(test, assert_instr(vpackhb_sat))]
3079#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3080pub unsafe fn Q6_Vb_vpack_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3081    vpackhb_sat(vu, vv)
3082}
3083
3084/// `Vd32.ub=vpack(Vu32.h,Vv32.h):sat`
3085///
3086/// Instruction Type: CVI_VP
3087/// Execution Slots: SLOT0123
3088#[inline(always)]
3089#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3090#[cfg_attr(test, assert_instr(vpackhub_sat))]
3091#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3092pub unsafe fn Q6_Vub_vpack_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3093    vpackhub_sat(vu, vv)
3094}
3095
3096/// `Vd32.b=vpacko(Vu32.h,Vv32.h)`
3097///
3098/// Instruction Type: CVI_VP
3099/// Execution Slots: SLOT0123
3100#[inline(always)]
3101#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3102#[cfg_attr(test, assert_instr(vpackob))]
3103#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3104pub unsafe fn Q6_Vb_vpacko_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
3105    vpackob(vu, vv)
3106}
3107
3108/// `Vd32.h=vpacko(Vu32.w,Vv32.w)`
3109///
3110/// Instruction Type: CVI_VP
3111/// Execution Slots: SLOT0123
3112#[inline(always)]
3113#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3114#[cfg_attr(test, assert_instr(vpackoh))]
3115#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3116pub unsafe fn Q6_Vh_vpacko_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector {
3117    vpackoh(vu, vv)
3118}
3119
3120/// `Vd32.h=vpack(Vu32.w,Vv32.w):sat`
3121///
3122/// Instruction Type: CVI_VP
3123/// Execution Slots: SLOT0123
3124#[inline(always)]
3125#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3126#[cfg_attr(test, assert_instr(vpackwh_sat))]
3127#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3128pub unsafe fn Q6_Vh_vpack_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3129    vpackwh_sat(vu, vv)
3130}
3131
3132/// `Vd32.uh=vpack(Vu32.w,Vv32.w):sat`
3133///
3134/// Instruction Type: CVI_VP
3135/// Execution Slots: SLOT0123
3136#[inline(always)]
3137#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3138#[cfg_attr(test, assert_instr(vpackwuh_sat))]
3139#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3140pub unsafe fn Q6_Vuh_vpack_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3141    vpackwuh_sat(vu, vv)
3142}
3143
3144/// `Vd32.h=vpopcount(Vu32.h)`
3145///
3146/// Instruction Type: CVI_VS
3147/// Execution Slots: SLOT0123
3148#[inline(always)]
3149#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3150#[cfg_attr(test, assert_instr(vpopcounth))]
3151#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3152pub unsafe fn Q6_Vh_vpopcount_Vh(vu: HvxVector) -> HvxVector {
3153    vpopcounth(vu)
3154}
3155
3156/// `Vd32=vrdelta(Vu32,Vv32)`
3157///
3158/// Instruction Type: CVI_VP
3159/// Execution Slots: SLOT0123
3160#[inline(always)]
3161#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3162#[cfg_attr(test, assert_instr(vrdelta))]
3163#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3164pub unsafe fn Q6_V_vrdelta_VV(vu: HvxVector, vv: HvxVector) -> HvxVector {
3165    vrdelta(vu, vv)
3166}
3167
3168/// `Vd32.w=vrmpy(Vu32.ub,Rt32.b)`
3169///
3170/// Instruction Type: CVI_VX
3171/// Execution Slots: SLOT23
3172#[inline(always)]
3173#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3174#[cfg_attr(test, assert_instr(vrmpybus))]
3175#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3176pub unsafe fn Q6_Vw_vrmpy_VubRb(vu: HvxVector, rt: i32) -> HvxVector {
3177    vrmpybus(vu, rt)
3178}
3179
3180/// `Vx32.w+=vrmpy(Vu32.ub,Rt32.b)`
3181///
3182/// Instruction Type: CVI_VX
3183/// Execution Slots: SLOT23
3184#[inline(always)]
3185#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3186#[cfg_attr(test, assert_instr(vrmpybus_acc))]
3187#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3188pub unsafe fn Q6_Vw_vrmpyacc_VwVubRb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
3189    vrmpybus_acc(vx, vu, rt)
3190}
3191
3192/// `Vdd32.w=vrmpy(Vuu32.ub,Rt32.b,#u1)`
3193///
3194/// Instruction Type: CVI_VX_DV
3195/// Execution Slots: SLOT23
3196#[inline(always)]
3197#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3198#[cfg_attr(test, assert_instr(vrmpybusi))]
3199#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3200pub unsafe fn Q6_Ww_vrmpy_WubRbI(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair {
3201    vrmpybusi(vuu, rt, iu1)
3202}
3203
3204/// `Vxx32.w+=vrmpy(Vuu32.ub,Rt32.b,#u1)`
3205///
3206/// Instruction Type: CVI_VX_DV
3207/// Execution Slots: SLOT23
3208#[inline(always)]
3209#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3210#[cfg_attr(test, assert_instr(vrmpybusi_acc))]
3211#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3212pub unsafe fn Q6_Ww_vrmpyacc_WwWubRbI(
3213    vxx: HvxVectorPair,
3214    vuu: HvxVectorPair,
3215    rt: i32,
3216    iu1: i32,
3217) -> HvxVectorPair {
3218    vrmpybusi_acc(vxx, vuu, rt, iu1)
3219}
3220
3221/// `Vd32.w=vrmpy(Vu32.ub,Vv32.b)`
3222///
3223/// Instruction Type: CVI_VX
3224/// Execution Slots: SLOT23
3225#[inline(always)]
3226#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3227#[cfg_attr(test, assert_instr(vrmpybusv))]
3228#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3229pub unsafe fn Q6_Vw_vrmpy_VubVb(vu: HvxVector, vv: HvxVector) -> HvxVector {
3230    vrmpybusv(vu, vv)
3231}
3232
3233/// `Vx32.w+=vrmpy(Vu32.ub,Vv32.b)`
3234///
3235/// Instruction Type: CVI_VX
3236/// Execution Slots: SLOT23
3237#[inline(always)]
3238#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3239#[cfg_attr(test, assert_instr(vrmpybusv_acc))]
3240#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3241pub unsafe fn Q6_Vw_vrmpyacc_VwVubVb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
3242    vrmpybusv_acc(vx, vu, vv)
3243}
3244
3245/// `Vd32.w=vrmpy(Vu32.b,Vv32.b)`
3246///
3247/// Instruction Type: CVI_VX
3248/// Execution Slots: SLOT23
3249#[inline(always)]
3250#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3251#[cfg_attr(test, assert_instr(vrmpybv))]
3252#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3253pub unsafe fn Q6_Vw_vrmpy_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector {
3254    vrmpybv(vu, vv)
3255}
3256
3257/// `Vx32.w+=vrmpy(Vu32.b,Vv32.b)`
3258///
3259/// Instruction Type: CVI_VX
3260/// Execution Slots: SLOT23
3261#[inline(always)]
3262#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3263#[cfg_attr(test, assert_instr(vrmpybv_acc))]
3264#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3265pub unsafe fn Q6_Vw_vrmpyacc_VwVbVb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
3266    vrmpybv_acc(vx, vu, vv)
3267}
3268
3269/// `Vd32.uw=vrmpy(Vu32.ub,Rt32.ub)`
3270///
3271/// Instruction Type: CVI_VX
3272/// Execution Slots: SLOT23
3273#[inline(always)]
3274#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3275#[cfg_attr(test, assert_instr(vrmpyub))]
3276#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3277pub unsafe fn Q6_Vuw_vrmpy_VubRub(vu: HvxVector, rt: i32) -> HvxVector {
3278    vrmpyub(vu, rt)
3279}
3280
3281/// `Vx32.uw+=vrmpy(Vu32.ub,Rt32.ub)`
3282///
3283/// Instruction Type: CVI_VX
3284/// Execution Slots: SLOT23
3285#[inline(always)]
3286#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3287#[cfg_attr(test, assert_instr(vrmpyub_acc))]
3288#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3289pub unsafe fn Q6_Vuw_vrmpyacc_VuwVubRub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
3290    vrmpyub_acc(vx, vu, rt)
3291}
3292
3293/// `Vdd32.uw=vrmpy(Vuu32.ub,Rt32.ub,#u1)`
3294///
3295/// Instruction Type: CVI_VX_DV
3296/// Execution Slots: SLOT23
3297#[inline(always)]
3298#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3299#[cfg_attr(test, assert_instr(vrmpyubi))]
3300#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3301pub unsafe fn Q6_Wuw_vrmpy_WubRubI(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair {
3302    vrmpyubi(vuu, rt, iu1)
3303}
3304
3305/// `Vxx32.uw+=vrmpy(Vuu32.ub,Rt32.ub,#u1)`
3306///
3307/// Instruction Type: CVI_VX_DV
3308/// Execution Slots: SLOT23
3309#[inline(always)]
3310#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3311#[cfg_attr(test, assert_instr(vrmpyubi_acc))]
3312#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3313pub unsafe fn Q6_Wuw_vrmpyacc_WuwWubRubI(
3314    vxx: HvxVectorPair,
3315    vuu: HvxVectorPair,
3316    rt: i32,
3317    iu1: i32,
3318) -> HvxVectorPair {
3319    vrmpyubi_acc(vxx, vuu, rt, iu1)
3320}
3321
3322/// `Vd32.uw=vrmpy(Vu32.ub,Vv32.ub)`
3323///
3324/// Instruction Type: CVI_VX
3325/// Execution Slots: SLOT23
3326#[inline(always)]
3327#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3328#[cfg_attr(test, assert_instr(vrmpyubv))]
3329#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3330pub unsafe fn Q6_Vuw_vrmpy_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector {
3331    vrmpyubv(vu, vv)
3332}
3333
3334/// `Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)`
3335///
3336/// Instruction Type: CVI_VX
3337/// Execution Slots: SLOT23
3338#[inline(always)]
3339#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3340#[cfg_attr(test, assert_instr(vrmpyubv_acc))]
3341#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3342pub unsafe fn Q6_Vuw_vrmpyacc_VuwVubVub(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
3343    vrmpyubv_acc(vx, vu, vv)
3344}
3345
3346/// `Vd32=vror(Vu32,Rt32)`
3347///
3348/// Instruction Type: CVI_VP
3349/// Execution Slots: SLOT0123
3350#[inline(always)]
3351#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3352#[cfg_attr(test, assert_instr(vror))]
3353#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3354pub unsafe fn Q6_V_vror_VR(vu: HvxVector, rt: i32) -> HvxVector {
3355    vror(vu, rt)
3356}
3357
3358/// `Vd32.b=vround(Vu32.h,Vv32.h):sat`
3359///
3360/// Instruction Type: CVI_VS
3361/// Execution Slots: SLOT0123
3362#[inline(always)]
3363#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3364#[cfg_attr(test, assert_instr(vroundhb))]
3365#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3366pub unsafe fn Q6_Vb_vround_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3367    vroundhb(vu, vv)
3368}
3369
3370/// `Vd32.ub=vround(Vu32.h,Vv32.h):sat`
3371///
3372/// Instruction Type: CVI_VS
3373/// Execution Slots: SLOT0123
3374#[inline(always)]
3375#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3376#[cfg_attr(test, assert_instr(vroundhub))]
3377#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3378pub unsafe fn Q6_Vub_vround_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3379    vroundhub(vu, vv)
3380}
3381
3382/// `Vd32.h=vround(Vu32.w,Vv32.w):sat`
3383///
3384/// Instruction Type: CVI_VS
3385/// Execution Slots: SLOT0123
3386#[inline(always)]
3387#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3388#[cfg_attr(test, assert_instr(vroundwh))]
3389#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3390pub unsafe fn Q6_Vh_vround_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3391    vroundwh(vu, vv)
3392}
3393
3394/// `Vd32.uh=vround(Vu32.w,Vv32.w):sat`
3395///
3396/// Instruction Type: CVI_VS
3397/// Execution Slots: SLOT0123
3398#[inline(always)]
3399#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3400#[cfg_attr(test, assert_instr(vroundwuh))]
3401#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3402pub unsafe fn Q6_Vuh_vround_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3403    vroundwuh(vu, vv)
3404}
3405
3406/// `Vdd32.uw=vrsad(Vuu32.ub,Rt32.ub,#u1)`
3407///
3408/// Instruction Type: CVI_VX_DV
3409/// Execution Slots: SLOT23
3410#[inline(always)]
3411#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3412#[cfg_attr(test, assert_instr(vrsadubi))]
3413#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3414pub unsafe fn Q6_Wuw_vrsad_WubRubI(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair {
3415    vrsadubi(vuu, rt, iu1)
3416}
3417
3418/// `Vxx32.uw+=vrsad(Vuu32.ub,Rt32.ub,#u1)`
3419///
3420/// Instruction Type: CVI_VX_DV
3421/// Execution Slots: SLOT23
3422#[inline(always)]
3423#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3424#[cfg_attr(test, assert_instr(vrsadubi_acc))]
3425#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3426pub unsafe fn Q6_Wuw_vrsadacc_WuwWubRubI(
3427    vxx: HvxVectorPair,
3428    vuu: HvxVectorPair,
3429    rt: i32,
3430    iu1: i32,
3431) -> HvxVectorPair {
3432    vrsadubi_acc(vxx, vuu, rt, iu1)
3433}
3434
3435/// `Vd32.ub=vsat(Vu32.h,Vv32.h)`
3436///
3437/// Instruction Type: CVI_VA
3438/// Execution Slots: SLOT0123
3439#[inline(always)]
3440#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3441#[cfg_attr(test, assert_instr(vsathub))]
3442#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3443pub unsafe fn Q6_Vub_vsat_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
3444    vsathub(vu, vv)
3445}
3446
3447/// `Vd32.h=vsat(Vu32.w,Vv32.w)`
3448///
3449/// Instruction Type: CVI_VA
3450/// Execution Slots: SLOT0123
3451#[inline(always)]
3452#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3453#[cfg_attr(test, assert_instr(vsatwh))]
3454#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3455pub unsafe fn Q6_Vh_vsat_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector {
3456    vsatwh(vu, vv)
3457}
3458
3459/// `Vdd32.h=vsxt(Vu32.b)`
3460///
3461/// Instruction Type: CVI_VA_DV
3462/// Execution Slots: SLOT0123
3463#[inline(always)]
3464#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3465#[cfg_attr(test, assert_instr(vsb))]
3466#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3467pub unsafe fn Q6_Wh_vsxt_Vb(vu: HvxVector) -> HvxVectorPair {
3468    vsb(vu)
3469}
3470
3471/// `Vdd32.w=vsxt(Vu32.h)`
3472///
3473/// Instruction Type: CVI_VA_DV
3474/// Execution Slots: SLOT0123
3475#[inline(always)]
3476#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3477#[cfg_attr(test, assert_instr(vsh))]
3478#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3479pub unsafe fn Q6_Ww_vsxt_Vh(vu: HvxVector) -> HvxVectorPair {
3480    vsh(vu)
3481}
3482
3483/// `Vd32.h=vshuffe(Vu32.h,Vv32.h)`
3484///
3485/// Instruction Type: CVI_VA
3486/// Execution Slots: SLOT0123
3487#[inline(always)]
3488#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3489#[cfg_attr(test, assert_instr(vshufeh))]
3490#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3491pub unsafe fn Q6_Vh_vshuffe_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
3492    vshufeh(vu, vv)
3493}
3494
3495/// `Vd32.b=vshuff(Vu32.b)`
3496///
3497/// Instruction Type: CVI_VP
3498/// Execution Slots: SLOT0123
3499#[inline(always)]
3500#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3501#[cfg_attr(test, assert_instr(vshuffb))]
3502#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3503pub unsafe fn Q6_Vb_vshuff_Vb(vu: HvxVector) -> HvxVector {
3504    vshuffb(vu)
3505}
3506
3507/// `Vd32.b=vshuffe(Vu32.b,Vv32.b)`
3508///
3509/// Instruction Type: CVI_VA
3510/// Execution Slots: SLOT0123
3511#[inline(always)]
3512#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3513#[cfg_attr(test, assert_instr(vshuffeb))]
3514#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3515pub unsafe fn Q6_Vb_vshuffe_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector {
3516    vshuffeb(vu, vv)
3517}
3518
3519/// `Vd32.h=vshuff(Vu32.h)`
3520///
3521/// Instruction Type: CVI_VP
3522/// Execution Slots: SLOT0123
3523#[inline(always)]
3524#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3525#[cfg_attr(test, assert_instr(vshuffh))]
3526#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3527pub unsafe fn Q6_Vh_vshuff_Vh(vu: HvxVector) -> HvxVector {
3528    vshuffh(vu)
3529}
3530
3531/// `Vd32.b=vshuffo(Vu32.b,Vv32.b)`
3532///
3533/// Instruction Type: CVI_VA
3534/// Execution Slots: SLOT0123
3535#[inline(always)]
3536#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3537#[cfg_attr(test, assert_instr(vshuffob))]
3538#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3539pub unsafe fn Q6_Vb_vshuffo_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector {
3540    vshuffob(vu, vv)
3541}
3542
3543/// `Vdd32=vshuff(Vu32,Vv32,Rt8)`
3544///
3545/// Instruction Type: CVI_VP_VS
3546/// Execution Slots: SLOT0123
3547#[inline(always)]
3548#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3549#[cfg_attr(test, assert_instr(vshuffvdd))]
3550#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3551pub unsafe fn Q6_W_vshuff_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair {
3552    vshuffvdd(vu, vv, rt)
3553}
3554
3555/// `Vdd32.b=vshuffoe(Vu32.b,Vv32.b)`
3556///
3557/// Instruction Type: CVI_VA_DV
3558/// Execution Slots: SLOT0123
3559#[inline(always)]
3560#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3561#[cfg_attr(test, assert_instr(vshufoeb))]
3562#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3563pub unsafe fn Q6_Wb_vshuffoe_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
3564    vshufoeb(vu, vv)
3565}
3566
3567/// `Vdd32.h=vshuffoe(Vu32.h,Vv32.h)`
3568///
3569/// Instruction Type: CVI_VA_DV
3570/// Execution Slots: SLOT0123
3571#[inline(always)]
3572#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3573#[cfg_attr(test, assert_instr(vshufoeh))]
3574#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3575pub unsafe fn Q6_Wh_vshuffoe_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
3576    vshufoeh(vu, vv)
3577}
3578
3579/// `Vd32.h=vshuffo(Vu32.h,Vv32.h)`
3580///
3581/// Instruction Type: CVI_VA
3582/// Execution Slots: SLOT0123
3583#[inline(always)]
3584#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3585#[cfg_attr(test, assert_instr(vshufoh))]
3586#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3587pub unsafe fn Q6_Vh_vshuffo_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
3588    vshufoh(vu, vv)
3589}
3590
3591/// `Vd32.b=vsub(Vu32.b,Vv32.b)`
3592///
3593/// Instruction Type: CVI_VA
3594/// Execution Slots: SLOT0123
3595#[inline(always)]
3596#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3597#[cfg_attr(test, assert_instr(vsubb))]
3598#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3599pub unsafe fn Q6_Vb_vsub_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector {
3600    vsubb(vu, vv)
3601}
3602
3603/// `Vdd32.b=vsub(Vuu32.b,Vvv32.b)`
3604///
3605/// Instruction Type: CVI_VA_DV
3606/// Execution Slots: SLOT0123
3607#[inline(always)]
3608#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3609#[cfg_attr(test, assert_instr(vsubb_dv))]
3610#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3611pub unsafe fn Q6_Wb_vsub_WbWb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
3612    vsubb_dv(vuu, vvv)
3613}
3614
3615/// `Vd32.h=vsub(Vu32.h,Vv32.h)`
3616///
3617/// Instruction Type: CVI_VA
3618/// Execution Slots: SLOT0123
3619#[inline(always)]
3620#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3621#[cfg_attr(test, assert_instr(vsubh))]
3622#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3623pub unsafe fn Q6_Vh_vsub_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
3624    vsubh(vu, vv)
3625}
3626
3627/// `Vdd32.h=vsub(Vuu32.h,Vvv32.h)`
3628///
3629/// Instruction Type: CVI_VA_DV
3630/// Execution Slots: SLOT0123
3631#[inline(always)]
3632#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3633#[cfg_attr(test, assert_instr(vsubh_dv))]
3634#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3635pub unsafe fn Q6_Wh_vsub_WhWh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
3636    vsubh_dv(vuu, vvv)
3637}
3638
3639/// `Vd32.h=vsub(Vu32.h,Vv32.h):sat`
3640///
3641/// Instruction Type: CVI_VA
3642/// Execution Slots: SLOT0123
3643#[inline(always)]
3644#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3645#[cfg_attr(test, assert_instr(vsubhsat))]
3646#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3647pub unsafe fn Q6_Vh_vsub_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3648    vsubhsat(vu, vv)
3649}
3650
3651/// `Vdd32.h=vsub(Vuu32.h,Vvv32.h):sat`
3652///
3653/// Instruction Type: CVI_VA_DV
3654/// Execution Slots: SLOT0123
3655#[inline(always)]
3656#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3657#[cfg_attr(test, assert_instr(vsubhsat_dv))]
3658#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3659pub unsafe fn Q6_Wh_vsub_WhWh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
3660    vsubhsat_dv(vuu, vvv)
3661}
3662
3663/// `Vdd32.w=vsub(Vu32.h,Vv32.h)`
3664///
3665/// Instruction Type: CVI_VX_DV
3666/// Execution Slots: SLOT23
3667#[inline(always)]
3668#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3669#[cfg_attr(test, assert_instr(vsubhw))]
3670#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3671pub unsafe fn Q6_Ww_vsub_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
3672    vsubhw(vu, vv)
3673}
3674
3675/// `Vdd32.h=vsub(Vu32.ub,Vv32.ub)`
3676///
3677/// Instruction Type: CVI_VX_DV
3678/// Execution Slots: SLOT23
3679#[inline(always)]
3680#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3681#[cfg_attr(test, assert_instr(vsububh))]
3682#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3683pub unsafe fn Q6_Wh_vsub_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
3684    vsububh(vu, vv)
3685}
3686
3687/// `Vd32.ub=vsub(Vu32.ub,Vv32.ub):sat`
3688///
3689/// Instruction Type: CVI_VA
3690/// Execution Slots: SLOT0123
3691#[inline(always)]
3692#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3693#[cfg_attr(test, assert_instr(vsububsat))]
3694#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3695pub unsafe fn Q6_Vub_vsub_VubVub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3696    vsububsat(vu, vv)
3697}
3698
3699/// `Vdd32.ub=vsub(Vuu32.ub,Vvv32.ub):sat`
3700///
3701/// Instruction Type: CVI_VA_DV
3702/// Execution Slots: SLOT0123
3703#[inline(always)]
3704#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3705#[cfg_attr(test, assert_instr(vsububsat_dv))]
3706#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3707pub unsafe fn Q6_Wub_vsub_WubWub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
3708    vsububsat_dv(vuu, vvv)
3709}
3710
3711/// `Vd32.uh=vsub(Vu32.uh,Vv32.uh):sat`
3712///
3713/// Instruction Type: CVI_VA
3714/// Execution Slots: SLOT0123
3715#[inline(always)]
3716#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3717#[cfg_attr(test, assert_instr(vsubuhsat))]
3718#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3719pub unsafe fn Q6_Vuh_vsub_VuhVuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3720    vsubuhsat(vu, vv)
3721}
3722
3723/// `Vdd32.uh=vsub(Vuu32.uh,Vvv32.uh):sat`
3724///
3725/// Instruction Type: CVI_VA_DV
3726/// Execution Slots: SLOT0123
3727#[inline(always)]
3728#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3729#[cfg_attr(test, assert_instr(vsubuhsat_dv))]
3730#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3731pub unsafe fn Q6_Wuh_vsub_WuhWuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
3732    vsubuhsat_dv(vuu, vvv)
3733}
3734
3735/// `Vdd32.w=vsub(Vu32.uh,Vv32.uh)`
3736///
3737/// Instruction Type: CVI_VX_DV
3738/// Execution Slots: SLOT23
3739#[inline(always)]
3740#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3741#[cfg_attr(test, assert_instr(vsubuhw))]
3742#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3743pub unsafe fn Q6_Ww_vsub_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
3744    vsubuhw(vu, vv)
3745}
3746
3747/// `Vd32.w=vsub(Vu32.w,Vv32.w)`
3748///
3749/// Instruction Type: CVI_VA
3750/// Execution Slots: SLOT0123
3751#[inline(always)]
3752#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3753#[cfg_attr(test, assert_instr(vsubw))]
3754#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3755pub unsafe fn Q6_Vw_vsub_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector {
3756    simd_sub(vu, vv)
3757}
3758
3759/// `Vdd32.w=vsub(Vuu32.w,Vvv32.w)`
3760///
3761/// Instruction Type: CVI_VA_DV
3762/// Execution Slots: SLOT0123
3763#[inline(always)]
3764#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3765#[cfg_attr(test, assert_instr(vsubw_dv))]
3766#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3767pub unsafe fn Q6_Ww_vsub_WwWw(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
3768    vsubw_dv(vuu, vvv)
3769}
3770
3771/// `Vd32.w=vsub(Vu32.w,Vv32.w):sat`
3772///
3773/// Instruction Type: CVI_VA
3774/// Execution Slots: SLOT0123
3775#[inline(always)]
3776#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3777#[cfg_attr(test, assert_instr(vsubwsat))]
3778#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3779pub unsafe fn Q6_Vw_vsub_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
3780    vsubwsat(vu, vv)
3781}
3782
3783/// `Vdd32.w=vsub(Vuu32.w,Vvv32.w):sat`
3784///
3785/// Instruction Type: CVI_VA_DV
3786/// Execution Slots: SLOT0123
3787#[inline(always)]
3788#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3789#[cfg_attr(test, assert_instr(vsubwsat_dv))]
3790#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3791pub unsafe fn Q6_Ww_vsub_WwWw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
3792    vsubwsat_dv(vuu, vvv)
3793}
3794
3795/// `Vdd32.h=vtmpy(Vuu32.b,Rt32.b)`
3796///
3797/// Instruction Type: CVI_VX_DV
3798/// Execution Slots: SLOT23
3799#[inline(always)]
3800#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3801#[cfg_attr(test, assert_instr(vtmpyb))]
3802#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3803pub unsafe fn Q6_Wh_vtmpy_WbRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
3804    vtmpyb(vuu, rt)
3805}
3806
3807/// `Vxx32.h+=vtmpy(Vuu32.b,Rt32.b)`
3808///
3809/// Instruction Type: CVI_VX_DV
3810/// Execution Slots: SLOT23
3811#[inline(always)]
3812#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3813#[cfg_attr(test, assert_instr(vtmpyb_acc))]
3814#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3815pub unsafe fn Q6_Wh_vtmpyacc_WhWbRb(
3816    vxx: HvxVectorPair,
3817    vuu: HvxVectorPair,
3818    rt: i32,
3819) -> HvxVectorPair {
3820    vtmpyb_acc(vxx, vuu, rt)
3821}
3822
3823/// `Vdd32.h=vtmpy(Vuu32.ub,Rt32.b)`
3824///
3825/// Instruction Type: CVI_VX_DV
3826/// Execution Slots: SLOT23
3827#[inline(always)]
3828#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3829#[cfg_attr(test, assert_instr(vtmpybus))]
3830#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3831pub unsafe fn Q6_Wh_vtmpy_WubRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
3832    vtmpybus(vuu, rt)
3833}
3834
3835/// `Vxx32.h+=vtmpy(Vuu32.ub,Rt32.b)`
3836///
3837/// Instruction Type: CVI_VX_DV
3838/// Execution Slots: SLOT23
3839#[inline(always)]
3840#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3841#[cfg_attr(test, assert_instr(vtmpybus_acc))]
3842#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3843pub unsafe fn Q6_Wh_vtmpyacc_WhWubRb(
3844    vxx: HvxVectorPair,
3845    vuu: HvxVectorPair,
3846    rt: i32,
3847) -> HvxVectorPair {
3848    vtmpybus_acc(vxx, vuu, rt)
3849}
3850
3851/// `Vdd32.w=vtmpy(Vuu32.h,Rt32.b)`
3852///
3853/// Instruction Type: CVI_VX_DV
3854/// Execution Slots: SLOT23
3855#[inline(always)]
3856#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3857#[cfg_attr(test, assert_instr(vtmpyhb))]
3858#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3859pub unsafe fn Q6_Ww_vtmpy_WhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
3860    vtmpyhb(vuu, rt)
3861}
3862
3863/// `Vxx32.w+=vtmpy(Vuu32.h,Rt32.b)`
3864///
3865/// Instruction Type: CVI_VX_DV
3866/// Execution Slots: SLOT23
3867#[inline(always)]
3868#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3869#[cfg_attr(test, assert_instr(vtmpyhb_acc))]
3870#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3871pub unsafe fn Q6_Ww_vtmpyacc_WwWhRb(
3872    vxx: HvxVectorPair,
3873    vuu: HvxVectorPair,
3874    rt: i32,
3875) -> HvxVectorPair {
3876    vtmpyhb_acc(vxx, vuu, rt)
3877}
3878
3879/// `Vdd32.h=vunpack(Vu32.b)`
3880///
3881/// Instruction Type: CVI_VP_VS
3882/// Execution Slots: SLOT0123
3883#[inline(always)]
3884#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3885#[cfg_attr(test, assert_instr(vunpackb))]
3886#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3887pub unsafe fn Q6_Wh_vunpack_Vb(vu: HvxVector) -> HvxVectorPair {
3888    vunpackb(vu)
3889}
3890
3891/// `Vdd32.w=vunpack(Vu32.h)`
3892///
3893/// Instruction Type: CVI_VP_VS
3894/// Execution Slots: SLOT0123
3895#[inline(always)]
3896#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3897#[cfg_attr(test, assert_instr(vunpackh))]
3898#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3899pub unsafe fn Q6_Ww_vunpack_Vh(vu: HvxVector) -> HvxVectorPair {
3900    vunpackh(vu)
3901}
3902
3903/// `Vxx32.h|=vunpacko(Vu32.b)`
3904///
3905/// Instruction Type: CVI_VP_VS
3906/// Execution Slots: SLOT0123
3907#[inline(always)]
3908#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3909#[cfg_attr(test, assert_instr(vunpackob))]
3910#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3911pub unsafe fn Q6_Wh_vunpackoor_WhVb(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair {
3912    vunpackob(vxx, vu)
3913}
3914
3915/// `Vxx32.w|=vunpacko(Vu32.h)`
3916///
3917/// Instruction Type: CVI_VP_VS
3918/// Execution Slots: SLOT0123
3919#[inline(always)]
3920#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3921#[cfg_attr(test, assert_instr(vunpackoh))]
3922#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3923pub unsafe fn Q6_Ww_vunpackoor_WwVh(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair {
3924    vunpackoh(vxx, vu)
3925}
3926
3927/// `Vdd32.uh=vunpack(Vu32.ub)`
3928///
3929/// Instruction Type: CVI_VP_VS
3930/// Execution Slots: SLOT0123
3931#[inline(always)]
3932#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3933#[cfg_attr(test, assert_instr(vunpackub))]
3934#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3935pub unsafe fn Q6_Wuh_vunpack_Vub(vu: HvxVector) -> HvxVectorPair {
3936    vunpackub(vu)
3937}
3938
3939/// `Vdd32.uw=vunpack(Vu32.uh)`
3940///
3941/// Instruction Type: CVI_VP_VS
3942/// Execution Slots: SLOT0123
3943#[inline(always)]
3944#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3945#[cfg_attr(test, assert_instr(vunpackuh))]
3946#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3947pub unsafe fn Q6_Wuw_vunpack_Vuh(vu: HvxVector) -> HvxVectorPair {
3948    vunpackuh(vu)
3949}
3950
3951/// `Vd32=vxor(Vu32,Vv32)`
3952///
3953/// Instruction Type: CVI_VA
3954/// Execution Slots: SLOT0123
3955#[inline(always)]
3956#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3957#[cfg_attr(test, assert_instr(vxor))]
3958#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3959pub unsafe fn Q6_V_vxor_VV(vu: HvxVector, vv: HvxVector) -> HvxVector {
3960    simd_xor(vu, vv)
3961}
3962
3963/// `Vdd32.uh=vzxt(Vu32.ub)`
3964///
3965/// Instruction Type: CVI_VA_DV
3966/// Execution Slots: SLOT0123
3967#[inline(always)]
3968#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3969#[cfg_attr(test, assert_instr(vzb))]
3970#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3971pub unsafe fn Q6_Wuh_vzxt_Vub(vu: HvxVector) -> HvxVectorPair {
3972    vzb(vu)
3973}
3974
3975/// `Vdd32.uw=vzxt(Vu32.uh)`
3976///
3977/// Instruction Type: CVI_VA_DV
3978/// Execution Slots: SLOT0123
3979#[inline(always)]
3980#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
3981#[cfg_attr(test, assert_instr(vzh))]
3982#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3983pub unsafe fn Q6_Wuw_vzxt_Vuh(vu: HvxVector) -> HvxVectorPair {
3984    vzh(vu)
3985}
3986
3987/// `Vd32.b=vsplat(Rt32)`
3988///
3989/// Instruction Type: CVI_VX_LATE
3990/// Execution Slots: SLOT23
3991#[inline(always)]
3992#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
3993#[cfg_attr(test, assert_instr(lvsplatb))]
3994#[unstable(feature = "stdarch_hexagon", issue = "151523")]
3995pub unsafe fn Q6_Vb_vsplat_R(rt: i32) -> HvxVector {
3996    lvsplatb(rt)
3997}
3998
3999/// `Vd32.h=vsplat(Rt32)`
4000///
4001/// Instruction Type: CVI_VX_LATE
4002/// Execution Slots: SLOT23
4003#[inline(always)]
4004#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4005#[cfg_attr(test, assert_instr(lvsplath))]
4006#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4007pub unsafe fn Q6_Vh_vsplat_R(rt: i32) -> HvxVector {
4008    lvsplath(rt)
4009}
4010
4011/// `Vd32.b=vadd(Vu32.b,Vv32.b):sat`
4012///
4013/// Instruction Type: CVI_VA
4014/// Execution Slots: SLOT0123
4015#[inline(always)]
4016#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4017#[cfg_attr(test, assert_instr(vaddbsat))]
4018#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4019pub unsafe fn Q6_Vb_vadd_VbVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4020    vaddbsat(vu, vv)
4021}
4022
4023/// `Vdd32.b=vadd(Vuu32.b,Vvv32.b):sat`
4024///
4025/// Instruction Type: CVI_VA_DV
4026/// Execution Slots: SLOT0123
4027#[inline(always)]
4028#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4029#[cfg_attr(test, assert_instr(vaddbsat_dv))]
4030#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4031pub unsafe fn Q6_Wb_vadd_WbWb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
4032    vaddbsat_dv(vuu, vvv)
4033}
4034
4035/// `Vd32.h=vadd(vclb(Vu32.h),Vv32.h)`
4036///
4037/// Instruction Type: CVI_VS
4038/// Execution Slots: SLOT0123
4039#[inline(always)]
4040#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4041#[cfg_attr(test, assert_instr(vaddclbh))]
4042#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4043pub unsafe fn Q6_Vh_vadd_vclb_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector {
4044    vaddclbh(vu, vv)
4045}
4046
4047/// `Vd32.w=vadd(vclb(Vu32.w),Vv32.w)`
4048///
4049/// Instruction Type: CVI_VS
4050/// Execution Slots: SLOT0123
4051#[inline(always)]
4052#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4053#[cfg_attr(test, assert_instr(vaddclbw))]
4054#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4055pub unsafe fn Q6_Vw_vadd_vclb_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector {
4056    vaddclbw(vu, vv)
4057}
4058
4059/// `Vxx32.w+=vadd(Vu32.h,Vv32.h)`
4060///
4061/// Instruction Type: CVI_VX_DV
4062/// Execution Slots: SLOT23
4063#[inline(always)]
4064#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4065#[cfg_attr(test, assert_instr(vaddhw_acc))]
4066#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4067pub unsafe fn Q6_Ww_vaddacc_WwVhVh(
4068    vxx: HvxVectorPair,
4069    vu: HvxVector,
4070    vv: HvxVector,
4071) -> HvxVectorPair {
4072    vaddhw_acc(vxx, vu, vv)
4073}
4074
4075/// `Vxx32.h+=vadd(Vu32.ub,Vv32.ub)`
4076///
4077/// Instruction Type: CVI_VX_DV
4078/// Execution Slots: SLOT23
4079#[inline(always)]
4080#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4081#[cfg_attr(test, assert_instr(vaddubh_acc))]
4082#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4083pub unsafe fn Q6_Wh_vaddacc_WhVubVub(
4084    vxx: HvxVectorPair,
4085    vu: HvxVector,
4086    vv: HvxVector,
4087) -> HvxVectorPair {
4088    vaddubh_acc(vxx, vu, vv)
4089}
4090
4091/// `Vd32.ub=vadd(Vu32.ub,Vv32.b):sat`
4092///
4093/// Instruction Type: CVI_VA
4094/// Execution Slots: SLOT0123
4095#[inline(always)]
4096#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4097#[cfg_attr(test, assert_instr(vaddububb_sat))]
4098#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4099pub unsafe fn Q6_Vub_vadd_VubVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4100    vaddububb_sat(vu, vv)
4101}
4102
4103/// `Vxx32.w+=vadd(Vu32.uh,Vv32.uh)`
4104///
4105/// Instruction Type: CVI_VX_DV
4106/// Execution Slots: SLOT23
4107#[inline(always)]
4108#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4109#[cfg_attr(test, assert_instr(vadduhw_acc))]
4110#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4111pub unsafe fn Q6_Ww_vaddacc_WwVuhVuh(
4112    vxx: HvxVectorPair,
4113    vu: HvxVector,
4114    vv: HvxVector,
4115) -> HvxVectorPair {
4116    vadduhw_acc(vxx, vu, vv)
4117}
4118
4119/// `Vd32.uw=vadd(Vu32.uw,Vv32.uw):sat`
4120///
4121/// Instruction Type: CVI_VA
4122/// Execution Slots: SLOT0123
4123#[inline(always)]
4124#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4125#[cfg_attr(test, assert_instr(vadduwsat))]
4126#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4127pub unsafe fn Q6_Vuw_vadd_VuwVuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4128    vadduwsat(vu, vv)
4129}
4130
4131/// `Vdd32.uw=vadd(Vuu32.uw,Vvv32.uw):sat`
4132///
4133/// Instruction Type: CVI_VA_DV
4134/// Execution Slots: SLOT0123
4135#[inline(always)]
4136#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4137#[cfg_attr(test, assert_instr(vadduwsat_dv))]
4138#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4139pub unsafe fn Q6_Wuw_vadd_WuwWuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
4140    vadduwsat_dv(vuu, vvv)
4141}
4142
4143/// `Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):sat`
4144///
4145/// Instruction Type: CVI_VS
4146/// Execution Slots: SLOT0123
4147#[inline(always)]
4148#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4149#[cfg_attr(test, assert_instr(vasrhbsat))]
4150#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4151pub unsafe fn Q6_Vb_vasr_VhVhR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
4152    vasrhbsat(vu, vv, rt)
4153}
4154
4155/// `Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat`
4156///
4157/// Instruction Type: CVI_VS
4158/// Execution Slots: SLOT0123
4159#[inline(always)]
4160#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4161#[cfg_attr(test, assert_instr(vasruwuhrndsat))]
4162#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4163pub unsafe fn Q6_Vuh_vasr_VuwVuwR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
4164    vasruwuhrndsat(vu, vv, rt)
4165}
4166
4167/// `Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat`
4168///
4169/// Instruction Type: CVI_VS
4170/// Execution Slots: SLOT0123
4171#[inline(always)]
4172#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4173#[cfg_attr(test, assert_instr(vasrwuhrndsat))]
4174#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4175pub unsafe fn Q6_Vuh_vasr_VwVwR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
4176    vasrwuhrndsat(vu, vv, rt)
4177}
4178
4179/// `Vd32.ub=vlsr(Vu32.ub,Rt32)`
4180///
4181/// Instruction Type: CVI_VS
4182/// Execution Slots: SLOT0123
4183#[inline(always)]
4184#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4185#[cfg_attr(test, assert_instr(vlsrb))]
4186#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4187pub unsafe fn Q6_Vub_vlsr_VubR(vu: HvxVector, rt: i32) -> HvxVector {
4188    vlsrb(vu, rt)
4189}
4190
4191/// `Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch`
4192///
4193/// Instruction Type: CVI_VP
4194/// Execution Slots: SLOT0123
4195#[inline(always)]
4196#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4197#[cfg_attr(test, assert_instr(vlutvvb_nm))]
4198#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4199pub unsafe fn Q6_Vb_vlut32_VbVbR_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
4200    vlutvvb_nm(vu, vv, rt)
4201}
4202
4203/// `Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3)`
4204///
4205/// Instruction Type: CVI_VP_VS
4206/// Execution Slots: SLOT0123
4207#[inline(always)]
4208#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4209#[cfg_attr(test, assert_instr(vlutvvb_oracci))]
4210#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4211pub unsafe fn Q6_Vb_vlut32or_VbVbVbI(
4212    vx: HvxVector,
4213    vu: HvxVector,
4214    vv: HvxVector,
4215    iu3: i32,
4216) -> HvxVector {
4217    vlutvvb_oracci(vx, vu, vv, iu3)
4218}
4219
4220/// `Vd32.b=vlut32(Vu32.b,Vv32.b,#u3)`
4221///
4222/// Instruction Type: CVI_VP
4223/// Execution Slots: SLOT0123
4224#[inline(always)]
4225#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4226#[cfg_attr(test, assert_instr(vlutvvbi))]
4227#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4228pub unsafe fn Q6_Vb_vlut32_VbVbI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector {
4229    vlutvvbi(vu, vv, iu3)
4230}
4231
4232/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch`
4233///
4234/// Instruction Type: CVI_VP_VS
4235/// Execution Slots: SLOT0123
4236#[inline(always)]
4237#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4238#[cfg_attr(test, assert_instr(vlutvwh_nm))]
4239#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4240pub unsafe fn Q6_Wh_vlut16_VbVhR_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair {
4241    vlutvwh_nm(vu, vv, rt)
4242}
4243
4244/// `Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3)`
4245///
4246/// Instruction Type: CVI_VP_VS
4247/// Execution Slots: SLOT0123
4248#[inline(always)]
4249#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4250#[cfg_attr(test, assert_instr(vlutvwh_oracci))]
4251#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4252pub unsafe fn Q6_Wh_vlut16or_WhVbVhI(
4253    vxx: HvxVectorPair,
4254    vu: HvxVector,
4255    vv: HvxVector,
4256    iu3: i32,
4257) -> HvxVectorPair {
4258    vlutvwh_oracci(vxx, vu, vv, iu3)
4259}
4260
4261/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3)`
4262///
4263/// Instruction Type: CVI_VP_VS
4264/// Execution Slots: SLOT0123
4265#[inline(always)]
4266#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4267#[cfg_attr(test, assert_instr(vlutvwhi))]
4268#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4269pub unsafe fn Q6_Wh_vlut16_VbVhI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVectorPair {
4270    vlutvwhi(vu, vv, iu3)
4271}
4272
4273/// `Vd32.b=vmax(Vu32.b,Vv32.b)`
4274///
4275/// Instruction Type: CVI_VA
4276/// Execution Slots: SLOT0123
4277#[inline(always)]
4278#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4279#[cfg_attr(test, assert_instr(vmaxb))]
4280#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4281pub unsafe fn Q6_Vb_vmax_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector {
4282    vmaxb(vu, vv)
4283}
4284
4285/// `Vd32.b=vmin(Vu32.b,Vv32.b)`
4286///
4287/// Instruction Type: CVI_VA
4288/// Execution Slots: SLOT0123
4289#[inline(always)]
4290#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4291#[cfg_attr(test, assert_instr(vminb))]
4292#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4293pub unsafe fn Q6_Vb_vmin_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector {
4294    vminb(vu, vv)
4295}
4296
4297/// `Vdd32.w=vmpa(Vuu32.uh,Rt32.b)`
4298///
4299/// Instruction Type: CVI_VX_DV
4300/// Execution Slots: SLOT23
4301#[inline(always)]
4302#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4303#[cfg_attr(test, assert_instr(vmpauhb))]
4304#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4305pub unsafe fn Q6_Ww_vmpa_WuhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
4306    vmpauhb(vuu, rt)
4307}
4308
4309/// `Vxx32.w+=vmpa(Vuu32.uh,Rt32.b)`
4310///
4311/// Instruction Type: CVI_VX_DV
4312/// Execution Slots: SLOT23
4313#[inline(always)]
4314#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4315#[cfg_attr(test, assert_instr(vmpauhb_acc))]
4316#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4317pub unsafe fn Q6_Ww_vmpaacc_WwWuhRb(
4318    vxx: HvxVectorPair,
4319    vuu: HvxVectorPair,
4320    rt: i32,
4321) -> HvxVectorPair {
4322    vmpauhb_acc(vxx, vuu, rt)
4323}
4324
4325/// `Vdd32=vmpye(Vu32.w,Vv32.uh)`
4326///
4327/// Instruction Type: CVI_VX_DV
4328/// Execution Slots: SLOT23
4329#[inline(always)]
4330#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4331#[cfg_attr(test, assert_instr(vmpyewuh_64))]
4332#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4333pub unsafe fn Q6_W_vmpye_VwVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
4334    vmpyewuh_64(vu, vv)
4335}
4336
4337/// `Vd32.w=vmpyi(Vu32.w,Rt32.ub)`
4338///
4339/// Instruction Type: CVI_VX
4340/// Execution Slots: SLOT23
4341#[inline(always)]
4342#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4343#[cfg_attr(test, assert_instr(vmpyiwub))]
4344#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4345pub unsafe fn Q6_Vw_vmpyi_VwRub(vu: HvxVector, rt: i32) -> HvxVector {
4346    vmpyiwub(vu, rt)
4347}
4348
4349/// `Vx32.w+=vmpyi(Vu32.w,Rt32.ub)`
4350///
4351/// Instruction Type: CVI_VX
4352/// Execution Slots: SLOT23
4353#[inline(always)]
4354#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4355#[cfg_attr(test, assert_instr(vmpyiwub_acc))]
4356#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4357pub unsafe fn Q6_Vw_vmpyiacc_VwVwRub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
4358    vmpyiwub_acc(vx, vu, rt)
4359}
4360
4361/// `Vxx32+=vmpyo(Vu32.w,Vv32.h)`
4362///
4363/// Instruction Type: CVI_VX_DV
4364/// Execution Slots: SLOT23
4365#[inline(always)]
4366#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4367#[cfg_attr(test, assert_instr(vmpyowh_64_acc))]
4368#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4369pub unsafe fn Q6_W_vmpyoacc_WVwVh(
4370    vxx: HvxVectorPair,
4371    vu: HvxVector,
4372    vv: HvxVector,
4373) -> HvxVectorPair {
4374    vmpyowh_64_acc(vxx, vu, vv)
4375}
4376
4377/// `Vd32.ub=vround(Vu32.uh,Vv32.uh):sat`
4378///
4379/// Instruction Type: CVI_VS
4380/// Execution Slots: SLOT0123
4381#[inline(always)]
4382#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4383#[cfg_attr(test, assert_instr(vrounduhub))]
4384#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4385pub unsafe fn Q6_Vub_vround_VuhVuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4386    vrounduhub(vu, vv)
4387}
4388
4389/// `Vd32.uh=vround(Vu32.uw,Vv32.uw):sat`
4390///
4391/// Instruction Type: CVI_VS
4392/// Execution Slots: SLOT0123
4393#[inline(always)]
4394#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4395#[cfg_attr(test, assert_instr(vrounduwuh))]
4396#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4397pub unsafe fn Q6_Vuh_vround_VuwVuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4398    vrounduwuh(vu, vv)
4399}
4400
4401/// `Vd32.uh=vsat(Vu32.uw,Vv32.uw)`
4402///
4403/// Instruction Type: CVI_VA
4404/// Execution Slots: SLOT0123
4405#[inline(always)]
4406#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4407#[cfg_attr(test, assert_instr(vsatuwuh))]
4408#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4409pub unsafe fn Q6_Vuh_vsat_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVector {
4410    vsatuwuh(vu, vv)
4411}
4412
4413/// `Vd32.b=vsub(Vu32.b,Vv32.b):sat`
4414///
4415/// Instruction Type: CVI_VA
4416/// Execution Slots: SLOT0123
4417#[inline(always)]
4418#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4419#[cfg_attr(test, assert_instr(vsubbsat))]
4420#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4421pub unsafe fn Q6_Vb_vsub_VbVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4422    vsubbsat(vu, vv)
4423}
4424
4425/// `Vdd32.b=vsub(Vuu32.b,Vvv32.b):sat`
4426///
4427/// Instruction Type: CVI_VA_DV
4428/// Execution Slots: SLOT0123
4429#[inline(always)]
4430#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4431#[cfg_attr(test, assert_instr(vsubbsat_dv))]
4432#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4433pub unsafe fn Q6_Wb_vsub_WbWb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
4434    vsubbsat_dv(vuu, vvv)
4435}
4436
4437/// `Vd32.ub=vsub(Vu32.ub,Vv32.b):sat`
4438///
4439/// Instruction Type: CVI_VA
4440/// Execution Slots: SLOT0123
4441#[inline(always)]
4442#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4443#[cfg_attr(test, assert_instr(vsubububb_sat))]
4444#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4445pub unsafe fn Q6_Vub_vsub_VubVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4446    vsubububb_sat(vu, vv)
4447}
4448
4449/// `Vd32.uw=vsub(Vu32.uw,Vv32.uw):sat`
4450///
4451/// Instruction Type: CVI_VA
4452/// Execution Slots: SLOT0123
4453#[inline(always)]
4454#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4455#[cfg_attr(test, assert_instr(vsubuwsat))]
4456#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4457pub unsafe fn Q6_Vuw_vsub_VuwVuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector {
4458    vsubuwsat(vu, vv)
4459}
4460
4461/// `Vdd32.uw=vsub(Vuu32.uw,Vvv32.uw):sat`
4462///
4463/// Instruction Type: CVI_VA_DV
4464/// Execution Slots: SLOT0123
4465#[inline(always)]
4466#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
4467#[cfg_attr(test, assert_instr(vsubuwsat_dv))]
4468#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4469pub unsafe fn Q6_Wuw_vsub_WuwWuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair {
4470    vsubuwsat_dv(vuu, vvv)
4471}
4472
4473/// `Vd32.b=vabs(Vu32.b)`
4474///
4475/// Instruction Type: CVI_VA
4476/// Execution Slots: SLOT0123
4477#[inline(always)]
4478#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4479#[cfg_attr(test, assert_instr(vabsb))]
4480#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4481pub unsafe fn Q6_Vb_vabs_Vb(vu: HvxVector) -> HvxVector {
4482    vabsb(vu)
4483}
4484
4485/// `Vd32.b=vabs(Vu32.b):sat`
4486///
4487/// Instruction Type: CVI_VA
4488/// Execution Slots: SLOT0123
4489#[inline(always)]
4490#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4491#[cfg_attr(test, assert_instr(vabsb_sat))]
4492#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4493pub unsafe fn Q6_Vb_vabs_Vb_sat(vu: HvxVector) -> HvxVector {
4494    vabsb_sat(vu)
4495}
4496
4497/// `Vx32.h+=vasl(Vu32.h,Rt32)`
4498///
4499/// Instruction Type: CVI_VS
4500/// Execution Slots: SLOT0123
4501#[inline(always)]
4502#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4503#[cfg_attr(test, assert_instr(vaslh_acc))]
4504#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4505pub unsafe fn Q6_Vh_vaslacc_VhVhR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
4506    vaslh_acc(vx, vu, rt)
4507}
4508
4509/// `Vx32.h+=vasr(Vu32.h,Rt32)`
4510///
4511/// Instruction Type: CVI_VS
4512/// Execution Slots: SLOT0123
4513#[inline(always)]
4514#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4515#[cfg_attr(test, assert_instr(vasrh_acc))]
4516#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4517pub unsafe fn Q6_Vh_vasracc_VhVhR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
4518    vasrh_acc(vx, vu, rt)
4519}
4520
4521/// `Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):rnd:sat`
4522///
4523/// Instruction Type: CVI_VS
4524/// Execution Slots: SLOT0123
4525#[inline(always)]
4526#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4527#[cfg_attr(test, assert_instr(vasruhubrndsat))]
4528#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4529pub unsafe fn Q6_Vub_vasr_VuhVuhR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
4530    vasruhubrndsat(vu, vv, rt)
4531}
4532
4533/// `Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):sat`
4534///
4535/// Instruction Type: CVI_VS
4536/// Execution Slots: SLOT0123
4537#[inline(always)]
4538#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4539#[cfg_attr(test, assert_instr(vasruhubsat))]
4540#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4541pub unsafe fn Q6_Vub_vasr_VuhVuhR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
4542    vasruhubsat(vu, vv, rt)
4543}
4544
4545/// `Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):sat`
4546///
4547/// Instruction Type: CVI_VS
4548/// Execution Slots: SLOT0123
4549#[inline(always)]
4550#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4551#[cfg_attr(test, assert_instr(vasruwuhsat))]
4552#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4553pub unsafe fn Q6_Vuh_vasr_VuwVuwR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector {
4554    vasruwuhsat(vu, vv, rt)
4555}
4556
4557/// `Vd32.b=vavg(Vu32.b,Vv32.b)`
4558///
4559/// Instruction Type: CVI_VA
4560/// Execution Slots: SLOT0123
4561#[inline(always)]
4562#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4563#[cfg_attr(test, assert_instr(vavgb))]
4564#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4565pub unsafe fn Q6_Vb_vavg_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector {
4566    vavgb(vu, vv)
4567}
4568
4569/// `Vd32.b=vavg(Vu32.b,Vv32.b):rnd`
4570///
4571/// Instruction Type: CVI_VA
4572/// Execution Slots: SLOT0123
4573#[inline(always)]
4574#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4575#[cfg_attr(test, assert_instr(vavgbrnd))]
4576#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4577pub unsafe fn Q6_Vb_vavg_VbVb_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector {
4578    vavgbrnd(vu, vv)
4579}
4580
4581/// `Vd32.uw=vavg(Vu32.uw,Vv32.uw)`
4582///
4583/// Instruction Type: CVI_VA
4584/// Execution Slots: SLOT0123
4585#[inline(always)]
4586#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4587#[cfg_attr(test, assert_instr(vavguw))]
4588#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4589pub unsafe fn Q6_Vuw_vavg_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVector {
4590    vavguw(vu, vv)
4591}
4592
4593/// `Vd32.uw=vavg(Vu32.uw,Vv32.uw):rnd`
4594///
4595/// Instruction Type: CVI_VA
4596/// Execution Slots: SLOT0123
4597#[inline(always)]
4598#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4599#[cfg_attr(test, assert_instr(vavguwrnd))]
4600#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4601pub unsafe fn Q6_Vuw_vavg_VuwVuw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector {
4602    vavguwrnd(vu, vv)
4603}
4604
4605/// `Vdd32=#0`
4606///
4607/// Instruction Type: MAPPING
4608/// Execution Slots: SLOT0123
4609#[inline(always)]
4610#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4611#[cfg_attr(test, assert_instr(vdd0))]
4612#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4613pub unsafe fn Q6_W_vzero() -> HvxVectorPair {
4614    vdd0()
4615}
4616
4617/// `vtmp.h=vgather(Rt32,Mu2,Vv32.h).h`
4618///
4619/// Instruction Type: CVI_GATHER
4620/// Execution Slots: SLOT01
4621#[inline(always)]
4622#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4623#[cfg_attr(test, assert_instr(vgathermh))]
4624#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4625pub unsafe fn Q6_vgather_ARMVh(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) {
4626    vgathermh(rs, rt, mu, vv)
4627}
4628
4629/// `vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h`
4630///
4631/// Instruction Type: CVI_GATHER_DV
4632/// Execution Slots: SLOT01
4633#[inline(always)]
4634#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4635#[cfg_attr(test, assert_instr(vgathermhw))]
4636#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4637pub unsafe fn Q6_vgather_ARMWw(rs: *mut HvxVector, rt: i32, mu: i32, vvv: HvxVectorPair) {
4638    vgathermhw(rs, rt, mu, vvv)
4639}
4640
4641/// `vtmp.w=vgather(Rt32,Mu2,Vv32.w).w`
4642///
4643/// Instruction Type: CVI_GATHER
4644/// Execution Slots: SLOT01
4645#[inline(always)]
4646#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4647#[cfg_attr(test, assert_instr(vgathermw))]
4648#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4649pub unsafe fn Q6_vgather_ARMVw(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) {
4650    vgathermw(rs, rt, mu, vv)
4651}
4652
4653/// `Vdd32.h=vmpa(Vuu32.ub,Rt32.ub)`
4654///
4655/// Instruction Type: CVI_VX_DV
4656/// Execution Slots: SLOT23
4657#[inline(always)]
4658#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4659#[cfg_attr(test, assert_instr(vmpabuu))]
4660#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4661pub unsafe fn Q6_Wh_vmpa_WubRub(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair {
4662    vmpabuu(vuu, rt)
4663}
4664
4665/// `Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub)`
4666///
4667/// Instruction Type: CVI_VX_DV
4668/// Execution Slots: SLOT23
4669#[inline(always)]
4670#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4671#[cfg_attr(test, assert_instr(vmpabuu_acc))]
4672#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4673pub unsafe fn Q6_Wh_vmpaacc_WhWubRub(
4674    vxx: HvxVectorPair,
4675    vuu: HvxVectorPair,
4676    rt: i32,
4677) -> HvxVectorPair {
4678    vmpabuu_acc(vxx, vuu, rt)
4679}
4680
4681/// `Vxx32.w+=vmpy(Vu32.h,Rt32.h)`
4682///
4683/// Instruction Type: CVI_VX_DV
4684/// Execution Slots: SLOT23
4685#[inline(always)]
4686#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4687#[cfg_attr(test, assert_instr(vmpyh_acc))]
4688#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4689pub unsafe fn Q6_Ww_vmpyacc_WwVhRh(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair {
4690    vmpyh_acc(vxx, vu, rt)
4691}
4692
4693/// `Vd32.uw=vmpye(Vu32.uh,Rt32.uh)`
4694///
4695/// Instruction Type: CVI_VX
4696/// Execution Slots: SLOT23
4697#[inline(always)]
4698#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4699#[cfg_attr(test, assert_instr(vmpyuhe))]
4700#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4701pub unsafe fn Q6_Vuw_vmpye_VuhRuh(vu: HvxVector, rt: i32) -> HvxVector {
4702    vmpyuhe(vu, rt)
4703}
4704
4705/// `Vx32.uw+=vmpye(Vu32.uh,Rt32.uh)`
4706///
4707/// Instruction Type: CVI_VX
4708/// Execution Slots: SLOT23
4709#[inline(always)]
4710#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4711#[cfg_attr(test, assert_instr(vmpyuhe_acc))]
4712#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4713pub unsafe fn Q6_Vuw_vmpyeacc_VuwVuhRuh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector {
4714    vmpyuhe_acc(vx, vu, rt)
4715}
4716
4717/// `Vd32.b=vnavg(Vu32.b,Vv32.b)`
4718///
4719/// Instruction Type: CVI_VA
4720/// Execution Slots: SLOT0123
4721#[inline(always)]
4722#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4723#[cfg_attr(test, assert_instr(vnavgb))]
4724#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4725pub unsafe fn Q6_Vb_vnavg_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector {
4726    vnavgb(vu, vv)
4727}
4728
4729/// `vscatter(Rt32,Mu2,Vv32.h).h=Vw32`
4730///
4731/// Instruction Type: CVI_SCATTER
4732/// Execution Slots: SLOT0
4733#[inline(always)]
4734#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4735#[cfg_attr(test, assert_instr(vscattermh))]
4736#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4737pub unsafe fn Q6_vscatter_RMVhV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) {
4738    vscattermh(rt, mu, vv, vw)
4739}
4740
4741/// `vscatter(Rt32,Mu2,Vv32.h).h+=Vw32`
4742///
4743/// Instruction Type: CVI_SCATTER
4744/// Execution Slots: SLOT0
4745#[inline(always)]
4746#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4747#[cfg_attr(test, assert_instr(vscattermh_add))]
4748#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4749pub unsafe fn Q6_vscatteracc_RMVhV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) {
4750    vscattermh_add(rt, mu, vv, vw)
4751}
4752
4753/// `vscatter(Rt32,Mu2,Vvv32.w).h=Vw32`
4754///
4755/// Instruction Type: CVI_SCATTER_DV
4756/// Execution Slots: SLOT0
4757#[inline(always)]
4758#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4759#[cfg_attr(test, assert_instr(vscattermhw))]
4760#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4761pub unsafe fn Q6_vscatter_RMWwV(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) {
4762    vscattermhw(rt, mu, vvv, vw)
4763}
4764
4765/// `vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32`
4766///
4767/// Instruction Type: CVI_SCATTER_DV
4768/// Execution Slots: SLOT0
4769#[inline(always)]
4770#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4771#[cfg_attr(test, assert_instr(vscattermhw_add))]
4772#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4773pub unsafe fn Q6_vscatteracc_RMWwV(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) {
4774    vscattermhw_add(rt, mu, vvv, vw)
4775}
4776
4777/// `vscatter(Rt32,Mu2,Vv32.w).w=Vw32`
4778///
4779/// Instruction Type: CVI_SCATTER
4780/// Execution Slots: SLOT0
4781#[inline(always)]
4782#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4783#[cfg_attr(test, assert_instr(vscattermw))]
4784#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4785pub unsafe fn Q6_vscatter_RMVwV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) {
4786    vscattermw(rt, mu, vv, vw)
4787}
4788
4789/// `vscatter(Rt32,Mu2,Vv32.w).w+=Vw32`
4790///
4791/// Instruction Type: CVI_SCATTER
4792/// Execution Slots: SLOT0
4793#[inline(always)]
4794#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
4795#[cfg_attr(test, assert_instr(vscattermw_add))]
4796#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4797pub unsafe fn Q6_vscatteracc_RMVwV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) {
4798    vscattermw_add(rt, mu, vv, vw)
4799}
4800
4801/// `Vxx32.w=vasrinto(Vu32.w,Vv32.w)`
4802///
4803/// Instruction Type: CVI_VP_VS
4804/// Execution Slots: SLOT0123
4805#[inline(always)]
4806#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))]
4807#[cfg_attr(test, assert_instr(vasr_into))]
4808#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4809pub unsafe fn Q6_Ww_vasrinto_WwVwVw(
4810    vxx: HvxVectorPair,
4811    vu: HvxVector,
4812    vv: HvxVector,
4813) -> HvxVectorPair {
4814    vasr_into(vxx, vu, vv)
4815}
4816
4817/// `Vd32.uw=vrotr(Vu32.uw,Vv32.uw)`
4818///
4819/// Instruction Type: CVI_VS
4820/// Execution Slots: SLOT0123
4821#[inline(always)]
4822#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))]
4823#[cfg_attr(test, assert_instr(vrotr))]
4824#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4825pub unsafe fn Q6_Vuw_vrotr_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVector {
4826    vrotr(vu, vv)
4827}
4828
4829/// `Vd32.w=vsatdw(Vu32.w,Vv32.w)`
4830///
4831/// Instruction Type: CVI_VA
4832/// Execution Slots: SLOT0123
4833#[inline(always)]
4834#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))]
4835#[cfg_attr(test, assert_instr(vsatdw))]
4836#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4837pub unsafe fn Q6_Vw_vsatdw_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector {
4838    vsatdw(vu, vv)
4839}
4840
4841/// `Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):h`
4842///
4843/// Instruction Type: CVI_VX_DV
4844/// Execution Slots: SLOT23
4845#[inline(always)]
4846#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4847#[cfg_attr(test, assert_instr(v6mpyhubs10))]
4848#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4849pub unsafe fn Q6_Ww_v6mpy_WubWbI_h(
4850    vuu: HvxVectorPair,
4851    vvv: HvxVectorPair,
4852    iu2: i32,
4853) -> HvxVectorPair {
4854    v6mpyhubs10(vuu, vvv, iu2)
4855}
4856
4857/// `Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):h`
4858///
4859/// Instruction Type: CVI_VX_DV
4860/// Execution Slots: SLOT23
4861#[inline(always)]
4862#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4863#[cfg_attr(test, assert_instr(v6mpyhubs10_vxx))]
4864#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4865pub unsafe fn Q6_Ww_v6mpyacc_WwWubWbI_h(
4866    vxx: HvxVectorPair,
4867    vuu: HvxVectorPair,
4868    vvv: HvxVectorPair,
4869    iu2: i32,
4870) -> HvxVectorPair {
4871    v6mpyhubs10_vxx(vxx, vuu, vvv, iu2)
4872}
4873
4874/// `Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):v`
4875///
4876/// Instruction Type: CVI_VX_DV
4877/// Execution Slots: SLOT23
4878#[inline(always)]
4879#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4880#[cfg_attr(test, assert_instr(v6mpyvubs10))]
4881#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4882pub unsafe fn Q6_Ww_v6mpy_WubWbI_v(
4883    vuu: HvxVectorPair,
4884    vvv: HvxVectorPair,
4885    iu2: i32,
4886) -> HvxVectorPair {
4887    v6mpyvubs10(vuu, vvv, iu2)
4888}
4889
4890/// `Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):v`
4891///
4892/// Instruction Type: CVI_VX_DV
4893/// Execution Slots: SLOT23
4894#[inline(always)]
4895#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4896#[cfg_attr(test, assert_instr(v6mpyvubs10_vxx))]
4897#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4898pub unsafe fn Q6_Ww_v6mpyacc_WwWubWbI_v(
4899    vxx: HvxVectorPair,
4900    vuu: HvxVectorPair,
4901    vvv: HvxVectorPair,
4902    iu2: i32,
4903) -> HvxVectorPair {
4904    v6mpyvubs10_vxx(vxx, vuu, vvv, iu2)
4905}
4906
4907/// `Vd32.hf=vabs(Vu32.hf)`
4908///
4909/// Instruction Type: CVI_VX_LATE
4910/// Execution Slots: SLOT23
4911#[inline(always)]
4912#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4913#[cfg_attr(test, assert_instr(vabs_hf))]
4914#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4915pub unsafe fn Q6_Vhf_vabs_Vhf(vu: HvxVector) -> HvxVector {
4916    vabs_hf(vu)
4917}
4918
4919/// `Vd32.sf=vabs(Vu32.sf)`
4920///
4921/// Instruction Type: CVI_VX_LATE
4922/// Execution Slots: SLOT23
4923#[inline(always)]
4924#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4925#[cfg_attr(test, assert_instr(vabs_sf))]
4926#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4927pub unsafe fn Q6_Vsf_vabs_Vsf(vu: HvxVector) -> HvxVector {
4928    vabs_sf(vu)
4929}
4930
4931/// `Vd32.qf16=vadd(Vu32.hf,Vv32.hf)`
4932///
4933/// Instruction Type: CVI_VS
4934/// Execution Slots: SLOT0123
4935#[inline(always)]
4936#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4937#[cfg_attr(test, assert_instr(vadd_hf))]
4938#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4939pub unsafe fn Q6_Vqf16_vadd_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
4940    vadd_hf(vu, vv)
4941}
4942
4943/// `Vd32.hf=vadd(Vu32.hf,Vv32.hf)`
4944///
4945/// Instruction Type: CVI_VX
4946/// Execution Slots: SLOT23
4947#[inline(always)]
4948#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4949#[cfg_attr(test, assert_instr(vadd_hf_hf))]
4950#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4951pub unsafe fn Q6_Vhf_vadd_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
4952    vadd_hf_hf(vu, vv)
4953}
4954
4955/// `Vd32.qf16=vadd(Vu32.qf16,Vv32.qf16)`
4956///
4957/// Instruction Type: CVI_VS
4958/// Execution Slots: SLOT0123
4959#[inline(always)]
4960#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4961#[cfg_attr(test, assert_instr(vadd_qf16))]
4962#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4963pub unsafe fn Q6_Vqf16_vadd_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector {
4964    vadd_qf16(vu, vv)
4965}
4966
4967/// `Vd32.qf16=vadd(Vu32.qf16,Vv32.hf)`
4968///
4969/// Instruction Type: CVI_VS
4970/// Execution Slots: SLOT0123
4971#[inline(always)]
4972#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4973#[cfg_attr(test, assert_instr(vadd_qf16_mix))]
4974#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4975pub unsafe fn Q6_Vqf16_vadd_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
4976    vadd_qf16_mix(vu, vv)
4977}
4978
4979/// `Vd32.qf32=vadd(Vu32.qf32,Vv32.qf32)`
4980///
4981/// Instruction Type: CVI_VS
4982/// Execution Slots: SLOT0123
4983#[inline(always)]
4984#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4985#[cfg_attr(test, assert_instr(vadd_qf32))]
4986#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4987pub unsafe fn Q6_Vqf32_vadd_Vqf32Vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector {
4988    vadd_qf32(vu, vv)
4989}
4990
4991/// `Vd32.qf32=vadd(Vu32.qf32,Vv32.sf)`
4992///
4993/// Instruction Type: CVI_VS
4994/// Execution Slots: SLOT0123
4995#[inline(always)]
4996#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
4997#[cfg_attr(test, assert_instr(vadd_qf32_mix))]
4998#[unstable(feature = "stdarch_hexagon", issue = "151523")]
4999pub unsafe fn Q6_Vqf32_vadd_Vqf32Vsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5000    vadd_qf32_mix(vu, vv)
5001}
5002
5003/// `Vd32.qf32=vadd(Vu32.sf,Vv32.sf)`
5004///
5005/// Instruction Type: CVI_VS
5006/// Execution Slots: SLOT0123
5007#[inline(always)]
5008#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5009#[cfg_attr(test, assert_instr(vadd_sf))]
5010#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5011pub unsafe fn Q6_Vqf32_vadd_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5012    vadd_sf(vu, vv)
5013}
5014
5015/// `Vdd32.sf=vadd(Vu32.hf,Vv32.hf)`
5016///
5017/// Instruction Type: CVI_VX_DV
5018/// Execution Slots: SLOT23
5019#[inline(always)]
5020#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5021#[cfg_attr(test, assert_instr(vadd_sf_hf))]
5022#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5023pub unsafe fn Q6_Wsf_vadd_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
5024    vadd_sf_hf(vu, vv)
5025}
5026
5027/// `Vd32.sf=vadd(Vu32.sf,Vv32.sf)`
5028///
5029/// Instruction Type: CVI_VX
5030/// Execution Slots: SLOT23
5031#[inline(always)]
5032#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5033#[cfg_attr(test, assert_instr(vadd_sf_sf))]
5034#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5035pub unsafe fn Q6_Vsf_vadd_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5036    vadd_sf_sf(vu, vv)
5037}
5038
5039/// `Vd32.w=vfmv(Vu32.w)`
5040///
5041/// Instruction Type: CVI_VX_LATE
5042/// Execution Slots: SLOT23
5043#[inline(always)]
5044#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5045#[cfg_attr(test, assert_instr(vassign_fp))]
5046#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5047pub unsafe fn Q6_Vw_vfmv_Vw(vu: HvxVector) -> HvxVector {
5048    vassign_fp(vu)
5049}
5050
5051/// `Vd32.hf=Vu32.qf16`
5052///
5053/// Instruction Type: CVI_VS
5054/// Execution Slots: SLOT0123
5055#[inline(always)]
5056#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5057#[cfg_attr(test, assert_instr(vconv_hf_qf16))]
5058#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5059pub unsafe fn Q6_Vhf_equals_Vqf16(vu: HvxVector) -> HvxVector {
5060    vconv_hf_qf16(vu)
5061}
5062
5063/// `Vd32.hf=Vuu32.qf32`
5064///
5065/// Instruction Type: CVI_VS
5066/// Execution Slots: SLOT0123
5067#[inline(always)]
5068#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5069#[cfg_attr(test, assert_instr(vconv_hf_qf32))]
5070#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5071pub unsafe fn Q6_Vhf_equals_Wqf32(vuu: HvxVectorPair) -> HvxVector {
5072    vconv_hf_qf32(vuu)
5073}
5074
5075/// `Vd32.sf=Vu32.qf32`
5076///
5077/// Instruction Type: CVI_VS
5078/// Execution Slots: SLOT0123
5079#[inline(always)]
5080#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5081#[cfg_attr(test, assert_instr(vconv_sf_qf32))]
5082#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5083pub unsafe fn Q6_Vsf_equals_Vqf32(vu: HvxVector) -> HvxVector {
5084    vconv_sf_qf32(vu)
5085}
5086
5087/// `Vd32.b=vcvt(Vu32.hf,Vv32.hf)`
5088///
5089/// Instruction Type: CVI_VX
5090/// Execution Slots: SLOT23
5091#[inline(always)]
5092#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5093#[cfg_attr(test, assert_instr(vcvt_b_hf))]
5094#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5095pub unsafe fn Q6_Vb_vcvt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5096    vcvt_b_hf(vu, vv)
5097}
5098
5099/// `Vd32.h=vcvt(Vu32.hf)`
5100///
5101/// Instruction Type: CVI_VX
5102/// Execution Slots: SLOT23
5103#[inline(always)]
5104#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5105#[cfg_attr(test, assert_instr(vcvt_h_hf))]
5106#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5107pub unsafe fn Q6_Vh_vcvt_Vhf(vu: HvxVector) -> HvxVector {
5108    vcvt_h_hf(vu)
5109}
5110
5111/// `Vdd32.hf=vcvt(Vu32.b)`
5112///
5113/// Instruction Type: CVI_VX_DV
5114/// Execution Slots: SLOT23
5115#[inline(always)]
5116#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5117#[cfg_attr(test, assert_instr(vcvt_hf_b))]
5118#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5119pub unsafe fn Q6_Whf_vcvt_Vb(vu: HvxVector) -> HvxVectorPair {
5120    vcvt_hf_b(vu)
5121}
5122
5123/// `Vd32.hf=vcvt(Vu32.h)`
5124///
5125/// Instruction Type: CVI_VX
5126/// Execution Slots: SLOT23
5127#[inline(always)]
5128#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5129#[cfg_attr(test, assert_instr(vcvt_hf_h))]
5130#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5131pub unsafe fn Q6_Vhf_vcvt_Vh(vu: HvxVector) -> HvxVector {
5132    vcvt_hf_h(vu)
5133}
5134
5135/// `Vd32.hf=vcvt(Vu32.sf,Vv32.sf)`
5136///
5137/// Instruction Type: CVI_VX
5138/// Execution Slots: SLOT23
5139#[inline(always)]
5140#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5141#[cfg_attr(test, assert_instr(vcvt_hf_sf))]
5142#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5143pub unsafe fn Q6_Vhf_vcvt_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5144    vcvt_hf_sf(vu, vv)
5145}
5146
5147/// `Vdd32.hf=vcvt(Vu32.ub)`
5148///
5149/// Instruction Type: CVI_VX_DV
5150/// Execution Slots: SLOT23
5151#[inline(always)]
5152#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5153#[cfg_attr(test, assert_instr(vcvt_hf_ub))]
5154#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5155pub unsafe fn Q6_Whf_vcvt_Vub(vu: HvxVector) -> HvxVectorPair {
5156    vcvt_hf_ub(vu)
5157}
5158
5159/// `Vd32.hf=vcvt(Vu32.uh)`
5160///
5161/// Instruction Type: CVI_VX
5162/// Execution Slots: SLOT23
5163#[inline(always)]
5164#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5165#[cfg_attr(test, assert_instr(vcvt_hf_uh))]
5166#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5167pub unsafe fn Q6_Vhf_vcvt_Vuh(vu: HvxVector) -> HvxVector {
5168    vcvt_hf_uh(vu)
5169}
5170
5171/// `Vdd32.sf=vcvt(Vu32.hf)`
5172///
5173/// Instruction Type: CVI_VX_DV
5174/// Execution Slots: SLOT23
5175#[inline(always)]
5176#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5177#[cfg_attr(test, assert_instr(vcvt_sf_hf))]
5178#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5179pub unsafe fn Q6_Wsf_vcvt_Vhf(vu: HvxVector) -> HvxVectorPair {
5180    vcvt_sf_hf(vu)
5181}
5182
5183/// `Vd32.ub=vcvt(Vu32.hf,Vv32.hf)`
5184///
5185/// Instruction Type: CVI_VX
5186/// Execution Slots: SLOT23
5187#[inline(always)]
5188#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5189#[cfg_attr(test, assert_instr(vcvt_ub_hf))]
5190#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5191pub unsafe fn Q6_Vub_vcvt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5192    vcvt_ub_hf(vu, vv)
5193}
5194
5195/// `Vd32.uh=vcvt(Vu32.hf)`
5196///
5197/// Instruction Type: CVI_VX
5198/// Execution Slots: SLOT23
5199#[inline(always)]
5200#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5201#[cfg_attr(test, assert_instr(vcvt_uh_hf))]
5202#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5203pub unsafe fn Q6_Vuh_vcvt_Vhf(vu: HvxVector) -> HvxVector {
5204    vcvt_uh_hf(vu)
5205}
5206
5207/// `Vd32.sf=vdmpy(Vu32.hf,Vv32.hf)`
5208///
5209/// Instruction Type: CVI_VX
5210/// Execution Slots: SLOT23
5211#[inline(always)]
5212#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5213#[cfg_attr(test, assert_instr(vdmpy_sf_hf))]
5214#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5215pub unsafe fn Q6_Vsf_vdmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5216    vdmpy_sf_hf(vu, vv)
5217}
5218
5219/// `Vx32.sf+=vdmpy(Vu32.hf,Vv32.hf)`
5220///
5221/// Instruction Type: CVI_VX
5222/// Execution Slots: SLOT23
5223#[inline(always)]
5224#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5225#[cfg_attr(test, assert_instr(vdmpy_sf_hf_acc))]
5226#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5227pub unsafe fn Q6_Vsf_vdmpyacc_VsfVhfVhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
5228    vdmpy_sf_hf_acc(vx, vu, vv)
5229}
5230
5231/// `Vd32.hf=vfmax(Vu32.hf,Vv32.hf)`
5232///
5233/// Instruction Type: CVI_VX_LATE
5234/// Execution Slots: SLOT23
5235#[inline(always)]
5236#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5237#[cfg_attr(test, assert_instr(vfmax_hf))]
5238#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5239pub unsafe fn Q6_Vhf_vfmax_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5240    vfmax_hf(vu, vv)
5241}
5242
5243/// `Vd32.sf=vfmax(Vu32.sf,Vv32.sf)`
5244///
5245/// Instruction Type: CVI_VX_LATE
5246/// Execution Slots: SLOT23
5247#[inline(always)]
5248#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5249#[cfg_attr(test, assert_instr(vfmax_sf))]
5250#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5251pub unsafe fn Q6_Vsf_vfmax_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5252    vfmax_sf(vu, vv)
5253}
5254
5255/// `Vd32.hf=vfmin(Vu32.hf,Vv32.hf)`
5256///
5257/// Instruction Type: CVI_VX_LATE
5258/// Execution Slots: SLOT23
5259#[inline(always)]
5260#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5261#[cfg_attr(test, assert_instr(vfmin_hf))]
5262#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5263pub unsafe fn Q6_Vhf_vfmin_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5264    vfmin_hf(vu, vv)
5265}
5266
5267/// `Vd32.sf=vfmin(Vu32.sf,Vv32.sf)`
5268///
5269/// Instruction Type: CVI_VX_LATE
5270/// Execution Slots: SLOT23
5271#[inline(always)]
5272#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5273#[cfg_attr(test, assert_instr(vfmin_sf))]
5274#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5275pub unsafe fn Q6_Vsf_vfmin_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5276    vfmin_sf(vu, vv)
5277}
5278
5279/// `Vd32.hf=vfneg(Vu32.hf)`
5280///
5281/// Instruction Type: CVI_VX_LATE
5282/// Execution Slots: SLOT23
5283#[inline(always)]
5284#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5285#[cfg_attr(test, assert_instr(vfneg_hf))]
5286#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5287pub unsafe fn Q6_Vhf_vfneg_Vhf(vu: HvxVector) -> HvxVector {
5288    vfneg_hf(vu)
5289}
5290
5291/// `Vd32.sf=vfneg(Vu32.sf)`
5292///
5293/// Instruction Type: CVI_VX_LATE
5294/// Execution Slots: SLOT23
5295#[inline(always)]
5296#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5297#[cfg_attr(test, assert_instr(vfneg_sf))]
5298#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5299pub unsafe fn Q6_Vsf_vfneg_Vsf(vu: HvxVector) -> HvxVector {
5300    vfneg_sf(vu)
5301}
5302
5303/// `Vd32.hf=vmax(Vu32.hf,Vv32.hf)`
5304///
5305/// Instruction Type: CVI_VA
5306/// Execution Slots: SLOT0123
5307#[inline(always)]
5308#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5309#[cfg_attr(test, assert_instr(vmax_hf))]
5310#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5311pub unsafe fn Q6_Vhf_vmax_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5312    vmax_hf(vu, vv)
5313}
5314
5315/// `Vd32.sf=vmax(Vu32.sf,Vv32.sf)`
5316///
5317/// Instruction Type: CVI_VA
5318/// Execution Slots: SLOT0123
5319#[inline(always)]
5320#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5321#[cfg_attr(test, assert_instr(vmax_sf))]
5322#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5323pub unsafe fn Q6_Vsf_vmax_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5324    vmax_sf(vu, vv)
5325}
5326
5327/// `Vd32.hf=vmin(Vu32.hf,Vv32.hf)`
5328///
5329/// Instruction Type: CVI_VA
5330/// Execution Slots: SLOT0123
5331#[inline(always)]
5332#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5333#[cfg_attr(test, assert_instr(vmin_hf))]
5334#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5335pub unsafe fn Q6_Vhf_vmin_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5336    vmin_hf(vu, vv)
5337}
5338
5339/// `Vd32.sf=vmin(Vu32.sf,Vv32.sf)`
5340///
5341/// Instruction Type: CVI_VA
5342/// Execution Slots: SLOT0123
5343#[inline(always)]
5344#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5345#[cfg_attr(test, assert_instr(vmin_sf))]
5346#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5347pub unsafe fn Q6_Vsf_vmin_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5348    vmin_sf(vu, vv)
5349}
5350
5351/// `Vd32.hf=vmpy(Vu32.hf,Vv32.hf)`
5352///
5353/// Instruction Type: CVI_VX
5354/// Execution Slots: SLOT23
5355#[inline(always)]
5356#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5357#[cfg_attr(test, assert_instr(vmpy_hf_hf))]
5358#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5359pub unsafe fn Q6_Vhf_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5360    vmpy_hf_hf(vu, vv)
5361}
5362
5363/// `Vx32.hf+=vmpy(Vu32.hf,Vv32.hf)`
5364///
5365/// Instruction Type: CVI_VX
5366/// Execution Slots: SLOT23
5367#[inline(always)]
5368#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5369#[cfg_attr(test, assert_instr(vmpy_hf_hf_acc))]
5370#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5371pub unsafe fn Q6_Vhf_vmpyacc_VhfVhfVhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector {
5372    vmpy_hf_hf_acc(vx, vu, vv)
5373}
5374
5375/// `Vd32.qf16=vmpy(Vu32.qf16,Vv32.qf16)`
5376///
5377/// Instruction Type: CVI_VX_DV
5378/// Execution Slots: SLOT23
5379#[inline(always)]
5380#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5381#[cfg_attr(test, assert_instr(vmpy_qf16))]
5382#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5383pub unsafe fn Q6_Vqf16_vmpy_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector {
5384    vmpy_qf16(vu, vv)
5385}
5386
5387/// `Vd32.qf16=vmpy(Vu32.hf,Vv32.hf)`
5388///
5389/// Instruction Type: CVI_VX_DV
5390/// Execution Slots: SLOT23
5391#[inline(always)]
5392#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5393#[cfg_attr(test, assert_instr(vmpy_qf16_hf))]
5394#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5395pub unsafe fn Q6_Vqf16_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5396    vmpy_qf16_hf(vu, vv)
5397}
5398
5399/// `Vd32.qf16=vmpy(Vu32.qf16,Vv32.hf)`
5400///
5401/// Instruction Type: CVI_VX_DV
5402/// Execution Slots: SLOT23
5403#[inline(always)]
5404#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5405#[cfg_attr(test, assert_instr(vmpy_qf16_mix_hf))]
5406#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5407pub unsafe fn Q6_Vqf16_vmpy_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5408    vmpy_qf16_mix_hf(vu, vv)
5409}
5410
5411/// `Vd32.qf32=vmpy(Vu32.qf32,Vv32.qf32)`
5412///
5413/// Instruction Type: CVI_VX_DV
5414/// Execution Slots: SLOT23
5415#[inline(always)]
5416#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5417#[cfg_attr(test, assert_instr(vmpy_qf32))]
5418#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5419pub unsafe fn Q6_Vqf32_vmpy_Vqf32Vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector {
5420    vmpy_qf32(vu, vv)
5421}
5422
5423/// `Vdd32.qf32=vmpy(Vu32.hf,Vv32.hf)`
5424///
5425/// Instruction Type: CVI_VX_DV
5426/// Execution Slots: SLOT23
5427#[inline(always)]
5428#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5429#[cfg_attr(test, assert_instr(vmpy_qf32_hf))]
5430#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5431pub unsafe fn Q6_Wqf32_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
5432    vmpy_qf32_hf(vu, vv)
5433}
5434
5435/// `Vdd32.qf32=vmpy(Vu32.qf16,Vv32.hf)`
5436///
5437/// Instruction Type: CVI_VX_DV
5438/// Execution Slots: SLOT23
5439#[inline(always)]
5440#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5441#[cfg_attr(test, assert_instr(vmpy_qf32_mix_hf))]
5442#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5443pub unsafe fn Q6_Wqf32_vmpy_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
5444    vmpy_qf32_mix_hf(vu, vv)
5445}
5446
5447/// `Vdd32.qf32=vmpy(Vu32.qf16,Vv32.qf16)`
5448///
5449/// Instruction Type: CVI_VX_DV
5450/// Execution Slots: SLOT23
5451#[inline(always)]
5452#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5453#[cfg_attr(test, assert_instr(vmpy_qf32_qf16))]
5454#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5455pub unsafe fn Q6_Wqf32_vmpy_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
5456    vmpy_qf32_qf16(vu, vv)
5457}
5458
5459/// `Vd32.qf32=vmpy(Vu32.sf,Vv32.sf)`
5460///
5461/// Instruction Type: CVI_VX_DV
5462/// Execution Slots: SLOT23
5463#[inline(always)]
5464#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5465#[cfg_attr(test, assert_instr(vmpy_qf32_sf))]
5466#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5467pub unsafe fn Q6_Vqf32_vmpy_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5468    vmpy_qf32_sf(vu, vv)
5469}
5470
5471/// `Vdd32.sf=vmpy(Vu32.hf,Vv32.hf)`
5472///
5473/// Instruction Type: CVI_VX_DV
5474/// Execution Slots: SLOT23
5475#[inline(always)]
5476#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5477#[cfg_attr(test, assert_instr(vmpy_sf_hf))]
5478#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5479pub unsafe fn Q6_Wsf_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
5480    vmpy_sf_hf(vu, vv)
5481}
5482
5483/// `Vxx32.sf+=vmpy(Vu32.hf,Vv32.hf)`
5484///
5485/// Instruction Type: CVI_VX_DV
5486/// Execution Slots: SLOT23
5487#[inline(always)]
5488#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5489#[cfg_attr(test, assert_instr(vmpy_sf_hf_acc))]
5490#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5491pub unsafe fn Q6_Wsf_vmpyacc_WsfVhfVhf(
5492    vxx: HvxVectorPair,
5493    vu: HvxVector,
5494    vv: HvxVector,
5495) -> HvxVectorPair {
5496    vmpy_sf_hf_acc(vxx, vu, vv)
5497}
5498
5499/// `Vd32.sf=vmpy(Vu32.sf,Vv32.sf)`
5500///
5501/// Instruction Type: CVI_VX_DV
5502/// Execution Slots: SLOT23
5503#[inline(always)]
5504#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5505#[cfg_attr(test, assert_instr(vmpy_sf_sf))]
5506#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5507pub unsafe fn Q6_Vsf_vmpy_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5508    vmpy_sf_sf(vu, vv)
5509}
5510
5511/// `Vd32.qf16=vsub(Vu32.hf,Vv32.hf)`
5512///
5513/// Instruction Type: CVI_VS
5514/// Execution Slots: SLOT0123
5515#[inline(always)]
5516#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5517#[cfg_attr(test, assert_instr(vsub_hf))]
5518#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5519pub unsafe fn Q6_Vqf16_vsub_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5520    vsub_hf(vu, vv)
5521}
5522
5523/// `Vd32.hf=vsub(Vu32.hf,Vv32.hf)`
5524///
5525/// Instruction Type: CVI_VX
5526/// Execution Slots: SLOT23
5527#[inline(always)]
5528#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5529#[cfg_attr(test, assert_instr(vsub_hf_hf))]
5530#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5531pub unsafe fn Q6_Vhf_vsub_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5532    vsub_hf_hf(vu, vv)
5533}
5534
5535/// `Vd32.qf16=vsub(Vu32.qf16,Vv32.qf16)`
5536///
5537/// Instruction Type: CVI_VS
5538/// Execution Slots: SLOT0123
5539#[inline(always)]
5540#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5541#[cfg_attr(test, assert_instr(vsub_qf16))]
5542#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5543pub unsafe fn Q6_Vqf16_vsub_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector {
5544    vsub_qf16(vu, vv)
5545}
5546
5547/// `Vd32.qf16=vsub(Vu32.qf16,Vv32.hf)`
5548///
5549/// Instruction Type: CVI_VS
5550/// Execution Slots: SLOT0123
5551#[inline(always)]
5552#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5553#[cfg_attr(test, assert_instr(vsub_qf16_mix))]
5554#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5555pub unsafe fn Q6_Vqf16_vsub_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5556    vsub_qf16_mix(vu, vv)
5557}
5558
5559/// `Vd32.qf32=vsub(Vu32.qf32,Vv32.qf32)`
5560///
5561/// Instruction Type: CVI_VS
5562/// Execution Slots: SLOT0123
5563#[inline(always)]
5564#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5565#[cfg_attr(test, assert_instr(vsub_qf32))]
5566#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5567pub unsafe fn Q6_Vqf32_vsub_Vqf32Vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector {
5568    vsub_qf32(vu, vv)
5569}
5570
5571/// `Vd32.qf32=vsub(Vu32.qf32,Vv32.sf)`
5572///
5573/// Instruction Type: CVI_VS
5574/// Execution Slots: SLOT0123
5575#[inline(always)]
5576#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5577#[cfg_attr(test, assert_instr(vsub_qf32_mix))]
5578#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5579pub unsafe fn Q6_Vqf32_vsub_Vqf32Vsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5580    vsub_qf32_mix(vu, vv)
5581}
5582
5583/// `Vd32.qf32=vsub(Vu32.sf,Vv32.sf)`
5584///
5585/// Instruction Type: CVI_VS
5586/// Execution Slots: SLOT0123
5587#[inline(always)]
5588#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5589#[cfg_attr(test, assert_instr(vsub_sf))]
5590#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5591pub unsafe fn Q6_Vqf32_vsub_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5592    vsub_sf(vu, vv)
5593}
5594
5595/// `Vdd32.sf=vsub(Vu32.hf,Vv32.hf)`
5596///
5597/// Instruction Type: CVI_VX_DV
5598/// Execution Slots: SLOT23
5599#[inline(always)]
5600#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5601#[cfg_attr(test, assert_instr(vsub_sf_hf))]
5602#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5603pub unsafe fn Q6_Wsf_vsub_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
5604    vsub_sf_hf(vu, vv)
5605}
5606
5607/// `Vd32.sf=vsub(Vu32.sf,Vv32.sf)`
5608///
5609/// Instruction Type: CVI_VX
5610/// Execution Slots: SLOT23
5611#[inline(always)]
5612#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
5613#[cfg_attr(test, assert_instr(vsub_sf_sf))]
5614#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5615pub unsafe fn Q6_Vsf_vsub_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector {
5616    vsub_sf_sf(vu, vv)
5617}
5618
5619/// `Vd32.ub=vasr(Vuu32.uh,Vv32.ub):rnd:sat`
5620///
5621/// Instruction Type: CVI_VS
5622/// Execution Slots: SLOT0123
5623#[inline(always)]
5624#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))]
5625#[cfg_attr(test, assert_instr(vasrvuhubrndsat))]
5626#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5627pub unsafe fn Q6_Vub_vasr_WuhVub_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector {
5628    vasrvuhubrndsat(vuu, vv)
5629}
5630
5631/// `Vd32.ub=vasr(Vuu32.uh,Vv32.ub):sat`
5632///
5633/// Instruction Type: CVI_VS
5634/// Execution Slots: SLOT0123
5635#[inline(always)]
5636#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))]
5637#[cfg_attr(test, assert_instr(vasrvuhubsat))]
5638#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5639pub unsafe fn Q6_Vub_vasr_WuhVub_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector {
5640    vasrvuhubsat(vuu, vv)
5641}
5642
5643/// `Vd32.uh=vasr(Vuu32.w,Vv32.uh):rnd:sat`
5644///
5645/// Instruction Type: CVI_VS
5646/// Execution Slots: SLOT0123
5647#[inline(always)]
5648#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))]
5649#[cfg_attr(test, assert_instr(vasrvwuhrndsat))]
5650#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5651pub unsafe fn Q6_Vuh_vasr_WwVuh_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector {
5652    vasrvwuhrndsat(vuu, vv)
5653}
5654
5655/// `Vd32.uh=vasr(Vuu32.w,Vv32.uh):sat`
5656///
5657/// Instruction Type: CVI_VS
5658/// Execution Slots: SLOT0123
5659#[inline(always)]
5660#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))]
5661#[cfg_attr(test, assert_instr(vasrvwuhsat))]
5662#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5663pub unsafe fn Q6_Vuh_vasr_WwVuh_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector {
5664    vasrvwuhsat(vuu, vv)
5665}
5666
5667/// `Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16`
5668///
5669/// Instruction Type: CVI_VX
5670/// Execution Slots: SLOT23
5671#[inline(always)]
5672#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))]
5673#[cfg_attr(test, assert_instr(vmpyuhvs))]
5674#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5675pub unsafe fn Q6_Vuh_vmpy_VuhVuh_rs16(vu: HvxVector, vv: HvxVector) -> HvxVector {
5676    vmpyuhvs(vu, vv)
5677}
5678
5679/// `Vd32.h=Vu32.hf`
5680///
5681/// Instruction Type: CVI_VS
5682/// Execution Slots: SLOT0123
5683#[inline(always)]
5684#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))]
5685#[cfg_attr(test, assert_instr(vconv_h_hf))]
5686#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5687pub unsafe fn Q6_Vh_equals_Vhf(vu: HvxVector) -> HvxVector {
5688    vconv_h_hf(vu)
5689}
5690
5691/// `Vd32.hf=Vu32.h`
5692///
5693/// Instruction Type: CVI_VS
5694/// Execution Slots: SLOT0123
5695#[inline(always)]
5696#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))]
5697#[cfg_attr(test, assert_instr(vconv_hf_h))]
5698#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5699pub unsafe fn Q6_Vhf_equals_Vh(vu: HvxVector) -> HvxVector {
5700    vconv_hf_h(vu)
5701}
5702
5703/// `Vd32.sf=Vu32.w`
5704///
5705/// Instruction Type: CVI_VS
5706/// Execution Slots: SLOT0123
5707#[inline(always)]
5708#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))]
5709#[cfg_attr(test, assert_instr(vconv_sf_w))]
5710#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5711pub unsafe fn Q6_Vsf_equals_Vw(vu: HvxVector) -> HvxVector {
5712    vconv_sf_w(vu)
5713}
5714
5715/// `Vd32.w=Vu32.sf`
5716///
5717/// Instruction Type: CVI_VS
5718/// Execution Slots: SLOT0123
5719#[inline(always)]
5720#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))]
5721#[cfg_attr(test, assert_instr(vconv_w_sf))]
5722#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5723pub unsafe fn Q6_Vw_equals_Vsf(vu: HvxVector) -> HvxVector {
5724    vconv_w_sf(vu)
5725}
5726
5727/// `Vd32=vgetqfext(Vu32.x,Rt32)`
5728///
5729/// Instruction Type: CVI_VX
5730/// Execution Slots: SLOT23
5731#[inline(always)]
5732#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5733#[cfg_attr(test, assert_instr(get_qfext))]
5734#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5735pub unsafe fn Q6_V_vgetqfext_VR(vu: HvxVector, rt: i32) -> HvxVector {
5736    get_qfext(vu, rt)
5737}
5738
5739/// `Vd32.x=vsetqfext(Vu32,Rt32)`
5740///
5741/// Instruction Type: CVI_VX
5742/// Execution Slots: SLOT23
5743#[inline(always)]
5744#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5745#[cfg_attr(test, assert_instr(set_qfext))]
5746#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5747pub unsafe fn Q6_V_vsetqfext_VR(vu: HvxVector, rt: i32) -> HvxVector {
5748    set_qfext(vu, rt)
5749}
5750
5751/// `Vd32.f8=vabs(Vu32.f8)`
5752///
5753/// Instruction Type: CVI_VX_LATE
5754/// Execution Slots: SLOT23
5755#[inline(always)]
5756#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5757#[cfg_attr(test, assert_instr(vabs_f8))]
5758#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5759pub unsafe fn Q6_V_vabs_V(vu: HvxVector) -> HvxVector {
5760    vabs_f8(vu)
5761}
5762
5763/// `Vdd32.hf=vcvt2(Vu32.b)`
5764///
5765/// Instruction Type: CVI_VX_DV
5766/// Execution Slots: SLOT23
5767#[inline(always)]
5768#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5769#[cfg_attr(test, assert_instr(vcvt2_hf_b))]
5770#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5771pub unsafe fn Q6_Whf_vcvt2_Vb(vu: HvxVector) -> HvxVectorPair {
5772    vcvt2_hf_b(vu)
5773}
5774
5775/// `Vdd32.hf=vcvt2(Vu32.ub)`
5776///
5777/// Instruction Type: CVI_VX_DV
5778/// Execution Slots: SLOT23
5779#[inline(always)]
5780#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5781#[cfg_attr(test, assert_instr(vcvt2_hf_ub))]
5782#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5783pub unsafe fn Q6_Whf_vcvt2_Vub(vu: HvxVector) -> HvxVectorPair {
5784    vcvt2_hf_ub(vu)
5785}
5786
5787/// `Vdd32.hf=vcvt(Vu32.f8)`
5788///
5789/// Instruction Type: CVI_VX_DV
5790/// Execution Slots: SLOT23
5791#[inline(always)]
5792#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5793#[cfg_attr(test, assert_instr(vcvt_hf_f8))]
5794#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5795pub unsafe fn Q6_Whf_vcvt_V(vu: HvxVector) -> HvxVectorPair {
5796    vcvt_hf_f8(vu)
5797}
5798
5799/// `Vd32.f8=vfmax(Vu32.f8,Vv32.f8)`
5800///
5801/// Instruction Type: CVI_VX_LATE
5802/// Execution Slots: SLOT23
5803#[inline(always)]
5804#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5805#[cfg_attr(test, assert_instr(vfmax_f8))]
5806#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5807pub unsafe fn Q6_V_vfmax_VV(vu: HvxVector, vv: HvxVector) -> HvxVector {
5808    vfmax_f8(vu, vv)
5809}
5810
5811/// `Vd32.f8=vfmin(Vu32.f8,Vv32.f8)`
5812///
5813/// Instruction Type: CVI_VX_LATE
5814/// Execution Slots: SLOT23
5815#[inline(always)]
5816#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5817#[cfg_attr(test, assert_instr(vfmin_f8))]
5818#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5819pub unsafe fn Q6_V_vfmin_VV(vu: HvxVector, vv: HvxVector) -> HvxVector {
5820    vfmin_f8(vu, vv)
5821}
5822
5823/// `Vd32.f8=vfneg(Vu32.f8)`
5824///
5825/// Instruction Type: CVI_VX_LATE
5826/// Execution Slots: SLOT23
5827#[inline(always)]
5828#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))]
5829#[cfg_attr(test, assert_instr(vfneg_f8))]
5830#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5831pub unsafe fn Q6_V_vfneg_V(vu: HvxVector) -> HvxVector {
5832    vfneg_f8(vu)
5833}
5834
5835/// `Qd4=and(Qs4,Qt4)`
5836///
5837/// This is a compound operation composed of multiple HVX instructions.
5838/// Instruction Type: CVI_VA_DV
5839/// Execution Slots: SLOT0123
5840#[inline(always)]
5841#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5842#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5843pub unsafe fn Q6_Q_and_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred {
5844    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
5845        pred_and(
5846            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
5847            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
5848        ),
5849        -1,
5850    ))
5851}
5852
5853/// `Qd4=and(Qs4,!Qt4)`
5854///
5855/// This is a compound operation composed of multiple HVX instructions.
5856/// Instruction Type: CVI_VA_DV
5857/// Execution Slots: SLOT0123
5858#[inline(always)]
5859#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5860#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5861pub unsafe fn Q6_Q_and_QQn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred {
5862    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
5863        pred_and_n(
5864            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
5865            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
5866        ),
5867        -1,
5868    ))
5869}
5870
5871/// `Qd4=not(Qs4)`
5872///
5873/// This is a compound operation composed of multiple HVX instructions.
5874/// Instruction Type: CVI_VA
5875/// Execution Slots: SLOT0123
5876#[inline(always)]
5877#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5878#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5879pub unsafe fn Q6_Q_not_Q(qs: HvxVectorPred) -> HvxVectorPred {
5880    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
5881        pred_not(vandvrt(
5882            core::mem::transmute::<HvxVectorPred, HvxVector>(qs),
5883            -1,
5884        )),
5885        -1,
5886    ))
5887}
5888
5889/// `Qd4=or(Qs4,Qt4)`
5890///
5891/// This is a compound operation composed of multiple HVX instructions.
5892/// Instruction Type: CVI_VA_DV
5893/// Execution Slots: SLOT0123
5894#[inline(always)]
5895#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5896#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5897pub unsafe fn Q6_Q_or_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred {
5898    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
5899        pred_or(
5900            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
5901            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
5902        ),
5903        -1,
5904    ))
5905}
5906
5907/// `Qd4=or(Qs4,!Qt4)`
5908///
5909/// This is a compound operation composed of multiple HVX instructions.
5910/// Instruction Type: CVI_VA_DV
5911/// Execution Slots: SLOT0123
5912#[inline(always)]
5913#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5914#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5915pub unsafe fn Q6_Q_or_QQn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred {
5916    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
5917        pred_or_n(
5918            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
5919            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
5920        ),
5921        -1,
5922    ))
5923}
5924
5925/// `Qd4=vsetq(Rt32)`
5926///
5927/// This is a compound operation composed of multiple HVX instructions.
5928/// Instruction Type: CVI_VP
5929/// Execution Slots: SLOT0123
5930#[inline(always)]
5931#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5932#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5933pub unsafe fn Q6_Q_vsetq_R(rt: i32) -> HvxVectorPred {
5934    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(pred_scalar2(rt), -1))
5935}
5936
5937/// `Qd4=xor(Qs4,Qt4)`
5938///
5939/// This is a compound operation composed of multiple HVX instructions.
5940/// Instruction Type: CVI_VA_DV
5941/// Execution Slots: SLOT0123
5942#[inline(always)]
5943#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5944#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5945pub unsafe fn Q6_Q_xor_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred {
5946    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
5947        pred_xor(
5948            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
5949            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
5950        ),
5951        -1,
5952    ))
5953}
5954
5955/// `if (!Qv4) vmem(Rt32+#s4)=Vs32`
5956///
5957/// This is a compound operation composed of multiple HVX instructions.
5958/// Instruction Type: CVI_VM_ST
5959/// Execution Slots: SLOT0
5960#[inline(always)]
5961#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5962#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5963pub unsafe fn Q6_vmem_QnRIV(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) {
5964    vS32b_nqpred_ai(
5965        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
5966        rt,
5967        vs,
5968    )
5969}
5970
5971/// `if (!Qv4) vmem(Rt32+#s4):nt=Vs32`
5972///
5973/// This is a compound operation composed of multiple HVX instructions.
5974/// Instruction Type: CVI_VM_ST
5975/// Execution Slots: SLOT0
5976#[inline(always)]
5977#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5978#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5979pub unsafe fn Q6_vmem_QnRIV_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) {
5980    vS32b_nt_nqpred_ai(
5981        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
5982        rt,
5983        vs,
5984    )
5985}
5986
5987/// `if (Qv4) vmem(Rt32+#s4):nt=Vs32`
5988///
5989/// This is a compound operation composed of multiple HVX instructions.
5990/// Instruction Type: CVI_VM_ST
5991/// Execution Slots: SLOT0
5992#[inline(always)]
5993#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
5994#[unstable(feature = "stdarch_hexagon", issue = "151523")]
5995pub unsafe fn Q6_vmem_QRIV_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) {
5996    vS32b_nt_qpred_ai(
5997        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
5998        rt,
5999        vs,
6000    )
6001}
6002
6003/// `if (Qv4) vmem(Rt32+#s4)=Vs32`
6004///
6005/// This is a compound operation composed of multiple HVX instructions.
6006/// Instruction Type: CVI_VM_ST
6007/// Execution Slots: SLOT0
6008#[inline(always)]
6009#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6010#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6011pub unsafe fn Q6_vmem_QRIV(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) {
6012    vS32b_qpred_ai(
6013        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6014        rt,
6015        vs,
6016    )
6017}
6018
6019/// `if (!Qv4) Vx32.b+=Vu32.b`
6020///
6021/// This is a compound operation composed of multiple HVX instructions.
6022/// Instruction Type: CVI_VA
6023/// Execution Slots: SLOT0123
6024#[inline(always)]
6025#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6026#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6027pub unsafe fn Q6_Vb_condacc_QnVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6028    vaddbnq(
6029        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6030        vx,
6031        vu,
6032    )
6033}
6034
6035/// `if (Qv4) Vx32.b+=Vu32.b`
6036///
6037/// This is a compound operation composed of multiple HVX instructions.
6038/// Instruction Type: CVI_VA
6039/// Execution Slots: SLOT0123
6040#[inline(always)]
6041#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6042#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6043pub unsafe fn Q6_Vb_condacc_QVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6044    vaddbq(
6045        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6046        vx,
6047        vu,
6048    )
6049}
6050
6051/// `if (!Qv4) Vx32.h+=Vu32.h`
6052///
6053/// This is a compound operation composed of multiple HVX instructions.
6054/// Instruction Type: CVI_VA
6055/// Execution Slots: SLOT0123
6056#[inline(always)]
6057#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6058#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6059pub unsafe fn Q6_Vh_condacc_QnVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6060    vaddhnq(
6061        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6062        vx,
6063        vu,
6064    )
6065}
6066
6067/// `if (Qv4) Vx32.h+=Vu32.h`
6068///
6069/// This is a compound operation composed of multiple HVX instructions.
6070/// Instruction Type: CVI_VA
6071/// Execution Slots: SLOT0123
6072#[inline(always)]
6073#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6074#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6075pub unsafe fn Q6_Vh_condacc_QVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6076    vaddhq(
6077        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6078        vx,
6079        vu,
6080    )
6081}
6082
6083/// `if (!Qv4) Vx32.w+=Vu32.w`
6084///
6085/// This is a compound operation composed of multiple HVX instructions.
6086/// Instruction Type: CVI_VA
6087/// Execution Slots: SLOT0123
6088#[inline(always)]
6089#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6090#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6091pub unsafe fn Q6_Vw_condacc_QnVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6092    vaddwnq(
6093        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6094        vx,
6095        vu,
6096    )
6097}
6098
6099/// `if (Qv4) Vx32.w+=Vu32.w`
6100///
6101/// This is a compound operation composed of multiple HVX instructions.
6102/// Instruction Type: CVI_VA
6103/// Execution Slots: SLOT0123
6104#[inline(always)]
6105#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6106#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6107pub unsafe fn Q6_Vw_condacc_QVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6108    vaddwq(
6109        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6110        vx,
6111        vu,
6112    )
6113}
6114
6115/// `Vd32=vand(Qu4,Rt32)`
6116///
6117/// This is a compound operation composed of multiple HVX instructions.
6118/// Instruction Type: CVI_VX_LATE
6119/// Execution Slots: SLOT23
6120#[inline(always)]
6121#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6122#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6123pub unsafe fn Q6_V_vand_QR(qu: HvxVectorPred, rt: i32) -> HvxVector {
6124    vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qu), rt)
6125}
6126
6127/// `Vx32|=vand(Qu4,Rt32)`
6128///
6129/// This is a compound operation composed of multiple HVX instructions.
6130/// Instruction Type: CVI_VX_LATE
6131/// Execution Slots: SLOT23
6132#[inline(always)]
6133#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6134#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6135pub unsafe fn Q6_V_vandor_VQR(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector {
6136    vandvrt_acc(vx, core::mem::transmute::<HvxVectorPred, HvxVector>(qu), rt)
6137}
6138
6139/// `Qd4=vand(Vu32,Rt32)`
6140///
6141/// This is a compound operation composed of multiple HVX instructions.
6142/// Instruction Type: CVI_VX_LATE
6143/// Execution Slots: SLOT23
6144#[inline(always)]
6145#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6146#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6147pub unsafe fn Q6_Q_vand_VR(vu: HvxVector, rt: i32) -> HvxVectorPred {
6148    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vu, rt))
6149}
6150
6151/// `Qx4|=vand(Vu32,Rt32)`
6152///
6153/// This is a compound operation composed of multiple HVX instructions.
6154/// Instruction Type: CVI_VX_LATE
6155/// Execution Slots: SLOT23
6156#[inline(always)]
6157#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6158#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6159pub unsafe fn Q6_Q_vandor_QVR(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxVectorPred {
6160    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt_acc(
6161        core::mem::transmute::<HvxVectorPred, HvxVector>(qx),
6162        vu,
6163        rt,
6164    ))
6165}
6166
6167/// `Qd4=vcmp.eq(Vu32.b,Vv32.b)`
6168///
6169/// This is a compound operation composed of multiple HVX instructions.
6170/// Instruction Type: CVI_VA
6171/// Execution Slots: SLOT0123
6172#[inline(always)]
6173#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6174#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6175pub unsafe fn Q6_Q_vcmp_eq_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6176    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(veqb(vu, vv), -1))
6177}
6178
6179/// `Qx4&=vcmp.eq(Vu32.b,Vv32.b)`
6180///
6181/// This is a compound operation composed of multiple HVX instructions.
6182/// Instruction Type: CVI_VA
6183/// Execution Slots: SLOT0123
6184#[inline(always)]
6185#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6186#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6187pub unsafe fn Q6_Q_vcmp_eqand_QVbVb(
6188    qx: HvxVectorPred,
6189    vu: HvxVector,
6190    vv: HvxVector,
6191) -> HvxVectorPred {
6192    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6193        veqb_and(
6194            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6195            vu,
6196            vv,
6197        ),
6198        -1,
6199    ))
6200}
6201
6202/// `Qx4|=vcmp.eq(Vu32.b,Vv32.b)`
6203///
6204/// This is a compound operation composed of multiple HVX instructions.
6205/// Instruction Type: CVI_VA
6206/// Execution Slots: SLOT0123
6207#[inline(always)]
6208#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6209#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6210pub unsafe fn Q6_Q_vcmp_eqor_QVbVb(
6211    qx: HvxVectorPred,
6212    vu: HvxVector,
6213    vv: HvxVector,
6214) -> HvxVectorPred {
6215    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6216        veqb_or(
6217            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6218            vu,
6219            vv,
6220        ),
6221        -1,
6222    ))
6223}
6224
6225/// `Qx4^=vcmp.eq(Vu32.b,Vv32.b)`
6226///
6227/// This is a compound operation composed of multiple HVX instructions.
6228/// Instruction Type: CVI_VA
6229/// Execution Slots: SLOT0123
6230#[inline(always)]
6231#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6232#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6233pub unsafe fn Q6_Q_vcmp_eqxacc_QVbVb(
6234    qx: HvxVectorPred,
6235    vu: HvxVector,
6236    vv: HvxVector,
6237) -> HvxVectorPred {
6238    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6239        veqb_xor(
6240            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6241            vu,
6242            vv,
6243        ),
6244        -1,
6245    ))
6246}
6247
6248/// `Qd4=vcmp.eq(Vu32.h,Vv32.h)`
6249///
6250/// This is a compound operation composed of multiple HVX instructions.
6251/// Instruction Type: CVI_VA
6252/// Execution Slots: SLOT0123
6253#[inline(always)]
6254#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6255#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6256pub unsafe fn Q6_Q_vcmp_eq_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6257    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(veqh(vu, vv), -1))
6258}
6259
6260/// `Qx4&=vcmp.eq(Vu32.h,Vv32.h)`
6261///
6262/// This is a compound operation composed of multiple HVX instructions.
6263/// Instruction Type: CVI_VA
6264/// Execution Slots: SLOT0123
6265#[inline(always)]
6266#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6267#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6268pub unsafe fn Q6_Q_vcmp_eqand_QVhVh(
6269    qx: HvxVectorPred,
6270    vu: HvxVector,
6271    vv: HvxVector,
6272) -> HvxVectorPred {
6273    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6274        veqh_and(
6275            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6276            vu,
6277            vv,
6278        ),
6279        -1,
6280    ))
6281}
6282
6283/// `Qx4|=vcmp.eq(Vu32.h,Vv32.h)`
6284///
6285/// This is a compound operation composed of multiple HVX instructions.
6286/// Instruction Type: CVI_VA
6287/// Execution Slots: SLOT0123
6288#[inline(always)]
6289#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6290#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6291pub unsafe fn Q6_Q_vcmp_eqor_QVhVh(
6292    qx: HvxVectorPred,
6293    vu: HvxVector,
6294    vv: HvxVector,
6295) -> HvxVectorPred {
6296    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6297        veqh_or(
6298            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6299            vu,
6300            vv,
6301        ),
6302        -1,
6303    ))
6304}
6305
6306/// `Qx4^=vcmp.eq(Vu32.h,Vv32.h)`
6307///
6308/// This is a compound operation composed of multiple HVX instructions.
6309/// Instruction Type: CVI_VA
6310/// Execution Slots: SLOT0123
6311#[inline(always)]
6312#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6313#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6314pub unsafe fn Q6_Q_vcmp_eqxacc_QVhVh(
6315    qx: HvxVectorPred,
6316    vu: HvxVector,
6317    vv: HvxVector,
6318) -> HvxVectorPred {
6319    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6320        veqh_xor(
6321            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6322            vu,
6323            vv,
6324        ),
6325        -1,
6326    ))
6327}
6328
6329/// `Qd4=vcmp.eq(Vu32.w,Vv32.w)`
6330///
6331/// This is a compound operation composed of multiple HVX instructions.
6332/// Instruction Type: CVI_VA
6333/// Execution Slots: SLOT0123
6334#[inline(always)]
6335#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6336#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6337pub unsafe fn Q6_Q_vcmp_eq_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6338    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(veqw(vu, vv), -1))
6339}
6340
6341/// `Qx4&=vcmp.eq(Vu32.w,Vv32.w)`
6342///
6343/// This is a compound operation composed of multiple HVX instructions.
6344/// Instruction Type: CVI_VA
6345/// Execution Slots: SLOT0123
6346#[inline(always)]
6347#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6348#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6349pub unsafe fn Q6_Q_vcmp_eqand_QVwVw(
6350    qx: HvxVectorPred,
6351    vu: HvxVector,
6352    vv: HvxVector,
6353) -> HvxVectorPred {
6354    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6355        veqw_and(
6356            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6357            vu,
6358            vv,
6359        ),
6360        -1,
6361    ))
6362}
6363
6364/// `Qx4|=vcmp.eq(Vu32.w,Vv32.w)`
6365///
6366/// This is a compound operation composed of multiple HVX instructions.
6367/// Instruction Type: CVI_VA
6368/// Execution Slots: SLOT0123
6369#[inline(always)]
6370#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6371#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6372pub unsafe fn Q6_Q_vcmp_eqor_QVwVw(
6373    qx: HvxVectorPred,
6374    vu: HvxVector,
6375    vv: HvxVector,
6376) -> HvxVectorPred {
6377    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6378        veqw_or(
6379            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6380            vu,
6381            vv,
6382        ),
6383        -1,
6384    ))
6385}
6386
6387/// `Qx4^=vcmp.eq(Vu32.w,Vv32.w)`
6388///
6389/// This is a compound operation composed of multiple HVX instructions.
6390/// Instruction Type: CVI_VA
6391/// Execution Slots: SLOT0123
6392#[inline(always)]
6393#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6394#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6395pub unsafe fn Q6_Q_vcmp_eqxacc_QVwVw(
6396    qx: HvxVectorPred,
6397    vu: HvxVector,
6398    vv: HvxVector,
6399) -> HvxVectorPred {
6400    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6401        veqw_xor(
6402            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6403            vu,
6404            vv,
6405        ),
6406        -1,
6407    ))
6408}
6409
6410/// `Qd4=vcmp.gt(Vu32.b,Vv32.b)`
6411///
6412/// This is a compound operation composed of multiple HVX instructions.
6413/// Instruction Type: CVI_VA
6414/// Execution Slots: SLOT0123
6415#[inline(always)]
6416#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6417#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6418pub unsafe fn Q6_Q_vcmp_gt_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6419    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgtb(vu, vv), -1))
6420}
6421
6422/// `Qx4&=vcmp.gt(Vu32.b,Vv32.b)`
6423///
6424/// This is a compound operation composed of multiple HVX instructions.
6425/// Instruction Type: CVI_VA
6426/// Execution Slots: SLOT0123
6427#[inline(always)]
6428#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6429#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6430pub unsafe fn Q6_Q_vcmp_gtand_QVbVb(
6431    qx: HvxVectorPred,
6432    vu: HvxVector,
6433    vv: HvxVector,
6434) -> HvxVectorPred {
6435    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6436        vgtb_and(
6437            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6438            vu,
6439            vv,
6440        ),
6441        -1,
6442    ))
6443}
6444
6445/// `Qx4|=vcmp.gt(Vu32.b,Vv32.b)`
6446///
6447/// This is a compound operation composed of multiple HVX instructions.
6448/// Instruction Type: CVI_VA
6449/// Execution Slots: SLOT0123
6450#[inline(always)]
6451#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6452#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6453pub unsafe fn Q6_Q_vcmp_gtor_QVbVb(
6454    qx: HvxVectorPred,
6455    vu: HvxVector,
6456    vv: HvxVector,
6457) -> HvxVectorPred {
6458    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6459        vgtb_or(
6460            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6461            vu,
6462            vv,
6463        ),
6464        -1,
6465    ))
6466}
6467
6468/// `Qx4^=vcmp.gt(Vu32.b,Vv32.b)`
6469///
6470/// This is a compound operation composed of multiple HVX instructions.
6471/// Instruction Type: CVI_VA
6472/// Execution Slots: SLOT0123
6473#[inline(always)]
6474#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6475#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6476pub unsafe fn Q6_Q_vcmp_gtxacc_QVbVb(
6477    qx: HvxVectorPred,
6478    vu: HvxVector,
6479    vv: HvxVector,
6480) -> HvxVectorPred {
6481    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6482        vgtb_xor(
6483            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6484            vu,
6485            vv,
6486        ),
6487        -1,
6488    ))
6489}
6490
6491/// `Qd4=vcmp.gt(Vu32.h,Vv32.h)`
6492///
6493/// This is a compound operation composed of multiple HVX instructions.
6494/// Instruction Type: CVI_VA
6495/// Execution Slots: SLOT0123
6496#[inline(always)]
6497#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6498#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6499pub unsafe fn Q6_Q_vcmp_gt_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6500    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgth(vu, vv), -1))
6501}
6502
6503/// `Qx4&=vcmp.gt(Vu32.h,Vv32.h)`
6504///
6505/// This is a compound operation composed of multiple HVX instructions.
6506/// Instruction Type: CVI_VA
6507/// Execution Slots: SLOT0123
6508#[inline(always)]
6509#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6510#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6511pub unsafe fn Q6_Q_vcmp_gtand_QVhVh(
6512    qx: HvxVectorPred,
6513    vu: HvxVector,
6514    vv: HvxVector,
6515) -> HvxVectorPred {
6516    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6517        vgth_and(
6518            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6519            vu,
6520            vv,
6521        ),
6522        -1,
6523    ))
6524}
6525
6526/// `Qx4|=vcmp.gt(Vu32.h,Vv32.h)`
6527///
6528/// This is a compound operation composed of multiple HVX instructions.
6529/// Instruction Type: CVI_VA
6530/// Execution Slots: SLOT0123
6531#[inline(always)]
6532#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6533#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6534pub unsafe fn Q6_Q_vcmp_gtor_QVhVh(
6535    qx: HvxVectorPred,
6536    vu: HvxVector,
6537    vv: HvxVector,
6538) -> HvxVectorPred {
6539    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6540        vgth_or(
6541            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6542            vu,
6543            vv,
6544        ),
6545        -1,
6546    ))
6547}
6548
6549/// `Qx4^=vcmp.gt(Vu32.h,Vv32.h)`
6550///
6551/// This is a compound operation composed of multiple HVX instructions.
6552/// Instruction Type: CVI_VA
6553/// Execution Slots: SLOT0123
6554#[inline(always)]
6555#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6556#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6557pub unsafe fn Q6_Q_vcmp_gtxacc_QVhVh(
6558    qx: HvxVectorPred,
6559    vu: HvxVector,
6560    vv: HvxVector,
6561) -> HvxVectorPred {
6562    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6563        vgth_xor(
6564            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6565            vu,
6566            vv,
6567        ),
6568        -1,
6569    ))
6570}
6571
6572/// `Qd4=vcmp.gt(Vu32.ub,Vv32.ub)`
6573///
6574/// This is a compound operation composed of multiple HVX instructions.
6575/// Instruction Type: CVI_VA
6576/// Execution Slots: SLOT0123
6577#[inline(always)]
6578#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6579#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6580pub unsafe fn Q6_Q_vcmp_gt_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6581    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgtub(vu, vv), -1))
6582}
6583
6584/// `Qx4&=vcmp.gt(Vu32.ub,Vv32.ub)`
6585///
6586/// This is a compound operation composed of multiple HVX instructions.
6587/// Instruction Type: CVI_VA
6588/// Execution Slots: SLOT0123
6589#[inline(always)]
6590#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6591#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6592pub unsafe fn Q6_Q_vcmp_gtand_QVubVub(
6593    qx: HvxVectorPred,
6594    vu: HvxVector,
6595    vv: HvxVector,
6596) -> HvxVectorPred {
6597    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6598        vgtub_and(
6599            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6600            vu,
6601            vv,
6602        ),
6603        -1,
6604    ))
6605}
6606
6607/// `Qx4|=vcmp.gt(Vu32.ub,Vv32.ub)`
6608///
6609/// This is a compound operation composed of multiple HVX instructions.
6610/// Instruction Type: CVI_VA
6611/// Execution Slots: SLOT0123
6612#[inline(always)]
6613#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6614#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6615pub unsafe fn Q6_Q_vcmp_gtor_QVubVub(
6616    qx: HvxVectorPred,
6617    vu: HvxVector,
6618    vv: HvxVector,
6619) -> HvxVectorPred {
6620    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6621        vgtub_or(
6622            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6623            vu,
6624            vv,
6625        ),
6626        -1,
6627    ))
6628}
6629
6630/// `Qx4^=vcmp.gt(Vu32.ub,Vv32.ub)`
6631///
6632/// This is a compound operation composed of multiple HVX instructions.
6633/// Instruction Type: CVI_VA
6634/// Execution Slots: SLOT0123
6635#[inline(always)]
6636#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6637#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6638pub unsafe fn Q6_Q_vcmp_gtxacc_QVubVub(
6639    qx: HvxVectorPred,
6640    vu: HvxVector,
6641    vv: HvxVector,
6642) -> HvxVectorPred {
6643    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6644        vgtub_xor(
6645            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6646            vu,
6647            vv,
6648        ),
6649        -1,
6650    ))
6651}
6652
6653/// `Qd4=vcmp.gt(Vu32.uh,Vv32.uh)`
6654///
6655/// This is a compound operation composed of multiple HVX instructions.
6656/// Instruction Type: CVI_VA
6657/// Execution Slots: SLOT0123
6658#[inline(always)]
6659#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6660#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6661pub unsafe fn Q6_Q_vcmp_gt_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6662    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgtuh(vu, vv), -1))
6663}
6664
6665/// `Qx4&=vcmp.gt(Vu32.uh,Vv32.uh)`
6666///
6667/// This is a compound operation composed of multiple HVX instructions.
6668/// Instruction Type: CVI_VA
6669/// Execution Slots: SLOT0123
6670#[inline(always)]
6671#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6672#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6673pub unsafe fn Q6_Q_vcmp_gtand_QVuhVuh(
6674    qx: HvxVectorPred,
6675    vu: HvxVector,
6676    vv: HvxVector,
6677) -> HvxVectorPred {
6678    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6679        vgtuh_and(
6680            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6681            vu,
6682            vv,
6683        ),
6684        -1,
6685    ))
6686}
6687
6688/// `Qx4|=vcmp.gt(Vu32.uh,Vv32.uh)`
6689///
6690/// This is a compound operation composed of multiple HVX instructions.
6691/// Instruction Type: CVI_VA
6692/// Execution Slots: SLOT0123
6693#[inline(always)]
6694#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6695#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6696pub unsafe fn Q6_Q_vcmp_gtor_QVuhVuh(
6697    qx: HvxVectorPred,
6698    vu: HvxVector,
6699    vv: HvxVector,
6700) -> HvxVectorPred {
6701    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6702        vgtuh_or(
6703            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6704            vu,
6705            vv,
6706        ),
6707        -1,
6708    ))
6709}
6710
6711/// `Qx4^=vcmp.gt(Vu32.uh,Vv32.uh)`
6712///
6713/// This is a compound operation composed of multiple HVX instructions.
6714/// Instruction Type: CVI_VA
6715/// Execution Slots: SLOT0123
6716#[inline(always)]
6717#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6718#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6719pub unsafe fn Q6_Q_vcmp_gtxacc_QVuhVuh(
6720    qx: HvxVectorPred,
6721    vu: HvxVector,
6722    vv: HvxVector,
6723) -> HvxVectorPred {
6724    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6725        vgtuh_xor(
6726            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6727            vu,
6728            vv,
6729        ),
6730        -1,
6731    ))
6732}
6733
6734/// `Qd4=vcmp.gt(Vu32.uw,Vv32.uw)`
6735///
6736/// This is a compound operation composed of multiple HVX instructions.
6737/// Instruction Type: CVI_VA
6738/// Execution Slots: SLOT0123
6739#[inline(always)]
6740#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6741#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6742pub unsafe fn Q6_Q_vcmp_gt_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6743    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgtuw(vu, vv), -1))
6744}
6745
6746/// `Qx4&=vcmp.gt(Vu32.uw,Vv32.uw)`
6747///
6748/// This is a compound operation composed of multiple HVX instructions.
6749/// Instruction Type: CVI_VA
6750/// Execution Slots: SLOT0123
6751#[inline(always)]
6752#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6753#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6754pub unsafe fn Q6_Q_vcmp_gtand_QVuwVuw(
6755    qx: HvxVectorPred,
6756    vu: HvxVector,
6757    vv: HvxVector,
6758) -> HvxVectorPred {
6759    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6760        vgtuw_and(
6761            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6762            vu,
6763            vv,
6764        ),
6765        -1,
6766    ))
6767}
6768
6769/// `Qx4|=vcmp.gt(Vu32.uw,Vv32.uw)`
6770///
6771/// This is a compound operation composed of multiple HVX instructions.
6772/// Instruction Type: CVI_VA
6773/// Execution Slots: SLOT0123
6774#[inline(always)]
6775#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6776#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6777pub unsafe fn Q6_Q_vcmp_gtor_QVuwVuw(
6778    qx: HvxVectorPred,
6779    vu: HvxVector,
6780    vv: HvxVector,
6781) -> HvxVectorPred {
6782    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6783        vgtuw_or(
6784            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6785            vu,
6786            vv,
6787        ),
6788        -1,
6789    ))
6790}
6791
6792/// `Qx4^=vcmp.gt(Vu32.uw,Vv32.uw)`
6793///
6794/// This is a compound operation composed of multiple HVX instructions.
6795/// Instruction Type: CVI_VA
6796/// Execution Slots: SLOT0123
6797#[inline(always)]
6798#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6799#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6800pub unsafe fn Q6_Q_vcmp_gtxacc_QVuwVuw(
6801    qx: HvxVectorPred,
6802    vu: HvxVector,
6803    vv: HvxVector,
6804) -> HvxVectorPred {
6805    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6806        vgtuw_xor(
6807            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6808            vu,
6809            vv,
6810        ),
6811        -1,
6812    ))
6813}
6814
6815/// `Qd4=vcmp.gt(Vu32.w,Vv32.w)`
6816///
6817/// This is a compound operation composed of multiple HVX instructions.
6818/// Instruction Type: CVI_VA
6819/// Execution Slots: SLOT0123
6820#[inline(always)]
6821#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6822#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6823pub unsafe fn Q6_Q_vcmp_gt_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
6824    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgtw(vu, vv), -1))
6825}
6826
6827/// `Qx4&=vcmp.gt(Vu32.w,Vv32.w)`
6828///
6829/// This is a compound operation composed of multiple HVX instructions.
6830/// Instruction Type: CVI_VA
6831/// Execution Slots: SLOT0123
6832#[inline(always)]
6833#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6834#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6835pub unsafe fn Q6_Q_vcmp_gtand_QVwVw(
6836    qx: HvxVectorPred,
6837    vu: HvxVector,
6838    vv: HvxVector,
6839) -> HvxVectorPred {
6840    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6841        vgtw_and(
6842            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6843            vu,
6844            vv,
6845        ),
6846        -1,
6847    ))
6848}
6849
6850/// `Qx4|=vcmp.gt(Vu32.w,Vv32.w)`
6851///
6852/// This is a compound operation composed of multiple HVX instructions.
6853/// Instruction Type: CVI_VA
6854/// Execution Slots: SLOT0123
6855#[inline(always)]
6856#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6857#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6858pub unsafe fn Q6_Q_vcmp_gtor_QVwVw(
6859    qx: HvxVectorPred,
6860    vu: HvxVector,
6861    vv: HvxVector,
6862) -> HvxVectorPred {
6863    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6864        vgtw_or(
6865            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6866            vu,
6867            vv,
6868        ),
6869        -1,
6870    ))
6871}
6872
6873/// `Qx4^=vcmp.gt(Vu32.w,Vv32.w)`
6874///
6875/// This is a compound operation composed of multiple HVX instructions.
6876/// Instruction Type: CVI_VA
6877/// Execution Slots: SLOT0123
6878#[inline(always)]
6879#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6880#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6881pub unsafe fn Q6_Q_vcmp_gtxacc_QVwVw(
6882    qx: HvxVectorPred,
6883    vu: HvxVector,
6884    vv: HvxVector,
6885) -> HvxVectorPred {
6886    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
6887        vgtw_xor(
6888            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
6889            vu,
6890            vv,
6891        ),
6892        -1,
6893    ))
6894}
6895
6896/// `Vd32=vmux(Qt4,Vu32,Vv32)`
6897///
6898/// This is a compound operation composed of multiple HVX instructions.
6899/// Instruction Type: CVI_VA
6900/// Execution Slots: SLOT0123
6901#[inline(always)]
6902#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6903#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6904pub unsafe fn Q6_V_vmux_QVV(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVector {
6905    vmux(
6906        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
6907        vu,
6908        vv,
6909    )
6910}
6911
6912/// `if (!Qv4) Vx32.b-=Vu32.b`
6913///
6914/// This is a compound operation composed of multiple HVX instructions.
6915/// Instruction Type: CVI_VA
6916/// Execution Slots: SLOT0123
6917#[inline(always)]
6918#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6919#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6920pub unsafe fn Q6_Vb_condnac_QnVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6921    vsubbnq(
6922        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6923        vx,
6924        vu,
6925    )
6926}
6927
6928/// `if (Qv4) Vx32.b-=Vu32.b`
6929///
6930/// This is a compound operation composed of multiple HVX instructions.
6931/// Instruction Type: CVI_VA
6932/// Execution Slots: SLOT0123
6933#[inline(always)]
6934#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6935#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6936pub unsafe fn Q6_Vb_condnac_QVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6937    vsubbq(
6938        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6939        vx,
6940        vu,
6941    )
6942}
6943
6944/// `if (!Qv4) Vx32.h-=Vu32.h`
6945///
6946/// This is a compound operation composed of multiple HVX instructions.
6947/// Instruction Type: CVI_VA
6948/// Execution Slots: SLOT0123
6949#[inline(always)]
6950#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6951#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6952pub unsafe fn Q6_Vh_condnac_QnVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6953    vsubhnq(
6954        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6955        vx,
6956        vu,
6957    )
6958}
6959
6960/// `if (Qv4) Vx32.h-=Vu32.h`
6961///
6962/// This is a compound operation composed of multiple HVX instructions.
6963/// Instruction Type: CVI_VA
6964/// Execution Slots: SLOT0123
6965#[inline(always)]
6966#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6967#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6968pub unsafe fn Q6_Vh_condnac_QVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6969    vsubhq(
6970        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6971        vx,
6972        vu,
6973    )
6974}
6975
6976/// `if (!Qv4) Vx32.w-=Vu32.w`
6977///
6978/// This is a compound operation composed of multiple HVX instructions.
6979/// Instruction Type: CVI_VA
6980/// Execution Slots: SLOT0123
6981#[inline(always)]
6982#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6983#[unstable(feature = "stdarch_hexagon", issue = "151523")]
6984pub unsafe fn Q6_Vw_condnac_QnVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
6985    vsubwnq(
6986        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
6987        vx,
6988        vu,
6989    )
6990}
6991
6992/// `if (Qv4) Vx32.w-=Vu32.w`
6993///
6994/// This is a compound operation composed of multiple HVX instructions.
6995/// Instruction Type: CVI_VA
6996/// Execution Slots: SLOT0123
6997#[inline(always)]
6998#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
6999#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7000pub unsafe fn Q6_Vw_condnac_QVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector {
7001    vsubwq(
7002        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
7003        vx,
7004        vu,
7005    )
7006}
7007
7008/// `Vdd32=vswap(Qt4,Vu32,Vv32)`
7009///
7010/// This is a compound operation composed of multiple HVX instructions.
7011/// Instruction Type: CVI_VA_DV
7012/// Execution Slots: SLOT0123
7013#[inline(always)]
7014#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))]
7015#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7016pub unsafe fn Q6_W_vswap_QVV(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVectorPair {
7017    vswap(
7018        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
7019        vu,
7020        vv,
7021    )
7022}
7023
7024/// `Qd4=vsetq2(Rt32)`
7025///
7026/// This is a compound operation composed of multiple HVX instructions.
7027/// Instruction Type: CVI_VP
7028/// Execution Slots: SLOT0123
7029#[inline(always)]
7030#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
7031#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7032pub unsafe fn Q6_Q_vsetq2_R(rt: i32) -> HvxVectorPred {
7033    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(pred_scalar2v2(rt), -1))
7034}
7035
7036/// `Qd4.b=vshuffe(Qs4.h,Qt4.h)`
7037///
7038/// This is a compound operation composed of multiple HVX instructions.
7039/// Instruction Type: CVI_VA_DV
7040/// Execution Slots: SLOT0123
7041#[inline(always)]
7042#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
7043#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7044pub unsafe fn Q6_Qb_vshuffe_QhQh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred {
7045    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7046        shuffeqh(
7047            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7048            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
7049        ),
7050        -1,
7051    ))
7052}
7053
7054/// `Qd4.h=vshuffe(Qs4.w,Qt4.w)`
7055///
7056/// This is a compound operation composed of multiple HVX instructions.
7057/// Instruction Type: CVI_VA_DV
7058/// Execution Slots: SLOT0123
7059#[inline(always)]
7060#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
7061#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7062pub unsafe fn Q6_Qh_vshuffe_QwQw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred {
7063    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7064        shuffeqw(
7065            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7066            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qt), -1),
7067        ),
7068        -1,
7069    ))
7070}
7071
7072/// `Vd32=vand(!Qu4,Rt32)`
7073///
7074/// This is a compound operation composed of multiple HVX instructions.
7075/// Instruction Type: CVI_VX_LATE
7076/// Execution Slots: SLOT23
7077#[inline(always)]
7078#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
7079#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7080pub unsafe fn Q6_V_vand_QnR(qu: HvxVectorPred, rt: i32) -> HvxVector {
7081    vandnqrt(
7082        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qu), -1),
7083        rt,
7084    )
7085}
7086
7087/// `Vx32|=vand(!Qu4,Rt32)`
7088///
7089/// This is a compound operation composed of multiple HVX instructions.
7090/// Instruction Type: CVI_VX_LATE
7091/// Execution Slots: SLOT23
7092#[inline(always)]
7093#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
7094#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7095pub unsafe fn Q6_V_vandor_VQnR(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector {
7096    vandnqrt_acc(
7097        vx,
7098        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qu), -1),
7099        rt,
7100    )
7101}
7102
7103/// `Vd32=vand(!Qv4,Vu32)`
7104///
7105/// This is a compound operation composed of multiple HVX instructions.
7106/// Instruction Type: CVI_VA
7107/// Execution Slots: SLOT0123
7108#[inline(always)]
7109#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
7110#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7111pub unsafe fn Q6_V_vand_QnV(qv: HvxVectorPred, vu: HvxVector) -> HvxVector {
7112    vandvnqv(
7113        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
7114        vu,
7115    )
7116}
7117
7118/// `Vd32=vand(Qv4,Vu32)`
7119///
7120/// This is a compound operation composed of multiple HVX instructions.
7121/// Instruction Type: CVI_VA
7122/// Execution Slots: SLOT0123
7123#[inline(always)]
7124#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))]
7125#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7126pub unsafe fn Q6_V_vand_QV(qv: HvxVectorPred, vu: HvxVector) -> HvxVector {
7127    vandvqv(
7128        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qv), -1),
7129        vu,
7130    )
7131}
7132
7133/// `if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h`
7134///
7135/// This is a compound operation composed of multiple HVX instructions.
7136/// Instruction Type: CVI_GATHER
7137/// Execution Slots: SLOT01
7138#[inline(always)]
7139#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7140#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7141pub unsafe fn Q6_vgather_AQRMVh(
7142    rs: *mut HvxVector,
7143    qs: HvxVectorPred,
7144    rt: i32,
7145    mu: i32,
7146    vv: HvxVector,
7147) {
7148    vgathermhq(
7149        rs,
7150        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7151        rt,
7152        mu,
7153        vv,
7154    )
7155}
7156
7157/// `if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h`
7158///
7159/// This is a compound operation composed of multiple HVX instructions.
7160/// Instruction Type: CVI_GATHER_DV
7161/// Execution Slots: SLOT01
7162#[inline(always)]
7163#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7164#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7165pub unsafe fn Q6_vgather_AQRMWw(
7166    rs: *mut HvxVector,
7167    qs: HvxVectorPred,
7168    rt: i32,
7169    mu: i32,
7170    vvv: HvxVectorPair,
7171) {
7172    vgathermhwq(
7173        rs,
7174        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7175        rt,
7176        mu,
7177        vvv,
7178    )
7179}
7180
7181/// `if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w`
7182///
7183/// This is a compound operation composed of multiple HVX instructions.
7184/// Instruction Type: CVI_GATHER
7185/// Execution Slots: SLOT01
7186#[inline(always)]
7187#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7188#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7189pub unsafe fn Q6_vgather_AQRMVw(
7190    rs: *mut HvxVector,
7191    qs: HvxVectorPred,
7192    rt: i32,
7193    mu: i32,
7194    vv: HvxVector,
7195) {
7196    vgathermwq(
7197        rs,
7198        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7199        rt,
7200        mu,
7201        vv,
7202    )
7203}
7204
7205/// `Vd32.b=prefixsum(Qv4)`
7206///
7207/// This is a compound operation composed of multiple HVX instructions.
7208/// Instruction Type: CVI_VS
7209/// Execution Slots: SLOT0123
7210#[inline(always)]
7211#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7212#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7213pub unsafe fn Q6_Vb_prefixsum_Q(qv: HvxVectorPred) -> HvxVector {
7214    vprefixqb(vandvrt(
7215        core::mem::transmute::<HvxVectorPred, HvxVector>(qv),
7216        -1,
7217    ))
7218}
7219
7220/// `Vd32.h=prefixsum(Qv4)`
7221///
7222/// This is a compound operation composed of multiple HVX instructions.
7223/// Instruction Type: CVI_VS
7224/// Execution Slots: SLOT0123
7225#[inline(always)]
7226#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7227#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7228pub unsafe fn Q6_Vh_prefixsum_Q(qv: HvxVectorPred) -> HvxVector {
7229    vprefixqh(vandvrt(
7230        core::mem::transmute::<HvxVectorPred, HvxVector>(qv),
7231        -1,
7232    ))
7233}
7234
7235/// `Vd32.w=prefixsum(Qv4)`
7236///
7237/// This is a compound operation composed of multiple HVX instructions.
7238/// Instruction Type: CVI_VS
7239/// Execution Slots: SLOT0123
7240#[inline(always)]
7241#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7242#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7243pub unsafe fn Q6_Vw_prefixsum_Q(qv: HvxVectorPred) -> HvxVector {
7244    vprefixqw(vandvrt(
7245        core::mem::transmute::<HvxVectorPred, HvxVector>(qv),
7246        -1,
7247    ))
7248}
7249
7250/// `if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32`
7251///
7252/// This is a compound operation composed of multiple HVX instructions.
7253/// Instruction Type: CVI_SCATTER
7254/// Execution Slots: SLOT0
7255#[inline(always)]
7256#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7257#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7258pub unsafe fn Q6_vscatter_QRMVhV(
7259    qs: HvxVectorPred,
7260    rt: i32,
7261    mu: i32,
7262    vv: HvxVector,
7263    vw: HvxVector,
7264) {
7265    vscattermhq(
7266        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7267        rt,
7268        mu,
7269        vv,
7270        vw,
7271    )
7272}
7273
7274/// `if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32`
7275///
7276/// This is a compound operation composed of multiple HVX instructions.
7277/// Instruction Type: CVI_SCATTER_DV
7278/// Execution Slots: SLOT0
7279#[inline(always)]
7280#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7281#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7282pub unsafe fn Q6_vscatter_QRMWwV(
7283    qs: HvxVectorPred,
7284    rt: i32,
7285    mu: i32,
7286    vvv: HvxVectorPair,
7287    vw: HvxVector,
7288) {
7289    vscattermhwq(
7290        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7291        rt,
7292        mu,
7293        vvv,
7294        vw,
7295    )
7296}
7297
7298/// `if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32`
7299///
7300/// This is a compound operation composed of multiple HVX instructions.
7301/// Instruction Type: CVI_SCATTER
7302/// Execution Slots: SLOT0
7303#[inline(always)]
7304#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))]
7305#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7306pub unsafe fn Q6_vscatter_QRMVwV(
7307    qs: HvxVectorPred,
7308    rt: i32,
7309    mu: i32,
7310    vv: HvxVector,
7311    vw: HvxVector,
7312) {
7313    vscattermwq(
7314        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7315        rt,
7316        mu,
7317        vv,
7318        vw,
7319    )
7320}
7321
7322/// `Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat`
7323///
7324/// This is a compound operation composed of multiple HVX instructions.
7325/// Instruction Type: CVI_VA
7326/// Execution Slots: SLOT0123
7327#[inline(always)]
7328#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))]
7329#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7330pub unsafe fn Q6_Vw_vadd_VwVwQ_carry_sat(
7331    vu: HvxVector,
7332    vv: HvxVector,
7333    qs: HvxVectorPred,
7334) -> HvxVector {
7335    vaddcarrysat(
7336        vu,
7337        vv,
7338        vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qs), -1),
7339    )
7340}
7341
7342/// `Qd4=vcmp.gt(Vu32.hf,Vv32.hf)`
7343///
7344/// This is a compound operation composed of multiple HVX instructions.
7345/// Instruction Type: CVI_VA
7346/// Execution Slots: SLOT0123
7347#[inline(always)]
7348#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7349#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7350pub unsafe fn Q6_Q_vcmp_gt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
7351    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgthf(vu, vv), -1))
7352}
7353
7354/// `Qx4&=vcmp.gt(Vu32.hf,Vv32.hf)`
7355///
7356/// This is a compound operation composed of multiple HVX instructions.
7357/// Instruction Type: CVI_VA
7358/// Execution Slots: SLOT0123
7359#[inline(always)]
7360#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7361#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7362pub unsafe fn Q6_Q_vcmp_gtand_QVhfVhf(
7363    qx: HvxVectorPred,
7364    vu: HvxVector,
7365    vv: HvxVector,
7366) -> HvxVectorPred {
7367    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7368        vgthf_and(
7369            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
7370            vu,
7371            vv,
7372        ),
7373        -1,
7374    ))
7375}
7376
7377/// `Qx4|=vcmp.gt(Vu32.hf,Vv32.hf)`
7378///
7379/// This is a compound operation composed of multiple HVX instructions.
7380/// Instruction Type: CVI_VA
7381/// Execution Slots: SLOT0123
7382#[inline(always)]
7383#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7384#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7385pub unsafe fn Q6_Q_vcmp_gtor_QVhfVhf(
7386    qx: HvxVectorPred,
7387    vu: HvxVector,
7388    vv: HvxVector,
7389) -> HvxVectorPred {
7390    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7391        vgthf_or(
7392            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
7393            vu,
7394            vv,
7395        ),
7396        -1,
7397    ))
7398}
7399
7400/// `Qx4^=vcmp.gt(Vu32.hf,Vv32.hf)`
7401///
7402/// This is a compound operation composed of multiple HVX instructions.
7403/// Instruction Type: CVI_VA
7404/// Execution Slots: SLOT0123
7405#[inline(always)]
7406#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7407#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7408pub unsafe fn Q6_Q_vcmp_gtxacc_QVhfVhf(
7409    qx: HvxVectorPred,
7410    vu: HvxVector,
7411    vv: HvxVector,
7412) -> HvxVectorPred {
7413    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7414        vgthf_xor(
7415            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
7416            vu,
7417            vv,
7418        ),
7419        -1,
7420    ))
7421}
7422
7423/// `Qd4=vcmp.gt(Vu32.sf,Vv32.sf)`
7424///
7425/// This is a compound operation composed of multiple HVX instructions.
7426/// Instruction Type: CVI_VA
7427/// Execution Slots: SLOT0123
7428#[inline(always)]
7429#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7430#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7431pub unsafe fn Q6_Q_vcmp_gt_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred {
7432    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(vgtsf(vu, vv), -1))
7433}
7434
7435/// `Qx4&=vcmp.gt(Vu32.sf,Vv32.sf)`
7436///
7437/// This is a compound operation composed of multiple HVX instructions.
7438/// Instruction Type: CVI_VA
7439/// Execution Slots: SLOT0123
7440#[inline(always)]
7441#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7442#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7443pub unsafe fn Q6_Q_vcmp_gtand_QVsfVsf(
7444    qx: HvxVectorPred,
7445    vu: HvxVector,
7446    vv: HvxVector,
7447) -> HvxVectorPred {
7448    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7449        vgtsf_and(
7450            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
7451            vu,
7452            vv,
7453        ),
7454        -1,
7455    ))
7456}
7457
7458/// `Qx4|=vcmp.gt(Vu32.sf,Vv32.sf)`
7459///
7460/// This is a compound operation composed of multiple HVX instructions.
7461/// Instruction Type: CVI_VA
7462/// Execution Slots: SLOT0123
7463#[inline(always)]
7464#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7465#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7466pub unsafe fn Q6_Q_vcmp_gtor_QVsfVsf(
7467    qx: HvxVectorPred,
7468    vu: HvxVector,
7469    vv: HvxVector,
7470) -> HvxVectorPred {
7471    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7472        vgtsf_or(
7473            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
7474            vu,
7475            vv,
7476        ),
7477        -1,
7478    ))
7479}
7480
7481/// `Qx4^=vcmp.gt(Vu32.sf,Vv32.sf)`
7482///
7483/// This is a compound operation composed of multiple HVX instructions.
7484/// Instruction Type: CVI_VA
7485/// Execution Slots: SLOT0123
7486#[inline(always)]
7487#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))]
7488#[unstable(feature = "stdarch_hexagon", issue = "151523")]
7489pub unsafe fn Q6_Q_vcmp_gtxacc_QVsfVsf(
7490    qx: HvxVectorPred,
7491    vu: HvxVector,
7492    vv: HvxVector,
7493) -> HvxVectorPred {
7494    core::mem::transmute::<HvxVector, HvxVectorPred>(vandqrt(
7495        vgtsf_xor(
7496            vandvrt(core::mem::transmute::<HvxVectorPred, HvxVector>(qx), -1),
7497            vu,
7498            vv,
7499        ),
7500        -1,
7501    ))
7502}