1. Configuration-Setting Instructions (vsetvli
/vsetivli
/vsetvl
)
1.1. vsetvli
- Mnemonic
vsetvli rd, rs1, vtypei
- Encoding
- Description
-
rd = new vl, rs1 = AVL, vtypei = new vtype setting
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsetvli.h
1.2. vsetivli
- Mnemonic
vsetivli rd, uimm, vtypei
- Encoding
- Description
-
rd = new vl, uimm = AVL, vtypei = new vtype setting
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsetivli.h
1.3. vsetvl
- Mnemonic
vsetvl rd, rs1, rs2
- Encoding
- Description
-
rd = new vl, rs1 = AVL, rs2 = new vtype value
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsetvl.h
1.4. vsetvl Intrinsic Functions
Details
size_t __riscv_vsetvl_e8mf8 (size_t avl);
size_t __riscv_vsetvl_e8mf4 (size_t avl);
size_t __riscv_vsetvl_e8mf2 (size_t avl);
size_t __riscv_vsetvl_e8m1 (size_t avl);
size_t __riscv_vsetvl_e8m2 (size_t avl);
size_t __riscv_vsetvl_e8m4 (size_t avl);
size_t __riscv_vsetvl_e8m8 (size_t avl);
size_t __riscv_vsetvl_e16mf4 (size_t avl);
size_t __riscv_vsetvl_e16mf2 (size_t avl);
size_t __riscv_vsetvl_e16m1 (size_t avl);
size_t __riscv_vsetvl_e16m2 (size_t avl);
size_t __riscv_vsetvl_e16m4 (size_t avl);
size_t __riscv_vsetvl_e16m8 (size_t avl);
size_t __riscv_vsetvl_e32mf2 (size_t avl);
size_t __riscv_vsetvl_e32m1 (size_t avl);
size_t __riscv_vsetvl_e32m2 (size_t avl);
size_t __riscv_vsetvl_e32m4 (size_t avl);
size_t __riscv_vsetvl_e32m8 (size_t avl);
size_t __riscv_vsetvl_e64m1 (size_t avl);
size_t __riscv_vsetvl_e64m2 (size_t avl);
size_t __riscv_vsetvl_e64m4 (size_t avl);
size_t __riscv_vsetvl_e64m8 (size_t avl);
size_t __riscv_vsetvlmax_e8mf8 ();
size_t __riscv_vsetvlmax_e8mf4 ();
size_t __riscv_vsetvlmax_e8mf2 ();
size_t __riscv_vsetvlmax_e8m1 ();
size_t __riscv_vsetvlmax_e8m2 ();
size_t __riscv_vsetvlmax_e8m4 ();
size_t __riscv_vsetvlmax_e8m8 ();
size_t __riscv_vsetvlmax_e16mf4 ();
size_t __riscv_vsetvlmax_e16mf2 ();
size_t __riscv_vsetvlmax_e16m1 ();
size_t __riscv_vsetvlmax_e16m2 ();
size_t __riscv_vsetvlmax_e16m4 ();
size_t __riscv_vsetvlmax_e16m8 ();
size_t __riscv_vsetvlmax_e32mf2 ();
size_t __riscv_vsetvlmax_e32m1 ();
size_t __riscv_vsetvlmax_e32m2 ();
size_t __riscv_vsetvlmax_e32m4 ();
size_t __riscv_vsetvlmax_e32m8 ();
size_t __riscv_vsetvlmax_e64m1 ();
size_t __riscv_vsetvlmax_e64m2 ();
size_t __riscv_vsetvlmax_e64m4 ();
size_t __riscv_vsetvlmax_e64m8 ();
2. Vector Unit-Stride Instructions
2.1. vle8.v
- Mnemonic
vle8.v vd, (rs1), vm
- Encoding
- Description
-
8-bit unit-stride load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vle8_v.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vle8_v_i8mf8 (const int8_t *base, size_t vl);
vint8mf4_t __riscv_vle8_v_i8mf4 (const int8_t *base, size_t vl);
vint8mf2_t __riscv_vle8_v_i8mf2 (const int8_t *base, size_t vl);
vint8m1_t __riscv_vle8_v_i8m1 (const int8_t *base, size_t vl);
vint8m2_t __riscv_vle8_v_i8m2 (const int8_t *base, size_t vl);
vint8m4_t __riscv_vle8_v_i8m4 (const int8_t *base, size_t vl);
vint8m8_t __riscv_vle8_v_i8m8 (const int8_t *base, size_t vl);
vuint8mf8_t __riscv_vle8_v_u8mf8 (const uint8_t *base, size_t vl);
vuint8mf4_t __riscv_vle8_v_u8mf4 (const uint8_t *base, size_t vl);
vuint8mf2_t __riscv_vle8_v_u8mf2 (const uint8_t *base, size_t vl);
vuint8m1_t __riscv_vle8_v_u8m1 (const uint8_t *base, size_t vl);
vuint8m2_t __riscv_vle8_v_u8m2 (const uint8_t *base, size_t vl);
vuint8m4_t __riscv_vle8_v_u8m4 (const uint8_t *base, size_t vl);
vuint8m8_t __riscv_vle8_v_u8m8 (const uint8_t *base, size_t vl);
vint8mf8_t __riscv_vle8_v_i8mf8_m (vbool64_t mask, const int8_t *base, size_t vl);
vint8mf4_t __riscv_vle8_v_i8mf4_m (vbool32_t mask, const int8_t *base, size_t vl);
vint8mf2_t __riscv_vle8_v_i8mf2_m (vbool16_t mask, const int8_t *base, size_t vl);
vint8m1_t __riscv_vle8_v_i8m1_m (vbool8_t mask, const int8_t *base, size_t vl);
vint8m2_t __riscv_vle8_v_i8m2_m (vbool4_t mask, const int8_t *base, size_t vl);
vint8m4_t __riscv_vle8_v_i8m4_m (vbool2_t mask, const int8_t *base, size_t vl);
vint8m8_t __riscv_vle8_v_i8m8_m (vbool1_t mask, const int8_t *base, size_t vl);
vuint8mf8_t __riscv_vle8_v_u8mf8_m (vbool64_t mask, const uint8_t *base, size_t vl);
vuint8mf4_t __riscv_vle8_v_u8mf4_m (vbool32_t mask, const uint8_t *base, size_t vl);
vuint8mf2_t __riscv_vle8_v_u8mf2_m (vbool16_t mask, const uint8_t *base, size_t vl);
vuint8m1_t __riscv_vle8_v_u8m1_m (vbool8_t mask, const uint8_t *base, size_t vl);
vuint8m2_t __riscv_vle8_v_u8m2_m (vbool4_t mask, const uint8_t *base, size_t vl);
vuint8m4_t __riscv_vle8_v_u8m4_m (vbool2_t mask, const uint8_t *base, size_t vl);
vuint8m8_t __riscv_vle8_v_u8m8_m (vbool1_t mask, const uint8_t *base, size_t vl);
2.2. vle16.v
- Mnemonic
vle16.v vd, (rs1), vm
- Encoding
- Description
-
16-bit unit-stride load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vle16_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vle16_v_f16mf4 (const float16_t *base, size_t vl);
vfloat16mf2_t __riscv_vle16_v_f16mf2 (const float16_t *base, size_t vl);
vfloat16m1_t __riscv_vle16_v_f16m1 (const float16_t *base, size_t vl);
vfloat16m2_t __riscv_vle16_v_f16m2 (const float16_t *base, size_t vl);
vfloat16m4_t __riscv_vle16_v_f16m4 (const float16_t *base, size_t vl);
vfloat16m8_t __riscv_vle16_v_f16m8 (const float16_t *base, size_t vl);
vint16mf4_t __riscv_vle16_v_i16mf4 (const int16_t *base, size_t vl);
vint16mf2_t __riscv_vle16_v_i16mf2 (const int16_t *base, size_t vl);
vint16m1_t __riscv_vle16_v_i16m1 (const int16_t *base, size_t vl);
vint16m2_t __riscv_vle16_v_i16m2 (const int16_t *base, size_t vl);
vint16m4_t __riscv_vle16_v_i16m4 (const int16_t *base, size_t vl);
vint16m8_t __riscv_vle16_v_i16m8 (const int16_t *base, size_t vl);
vuint16mf4_t __riscv_vle16_v_u16mf4 (const uint16_t *base, size_t vl);
vuint16mf2_t __riscv_vle16_v_u16mf2 (const uint16_t *base, size_t vl);
vuint16m1_t __riscv_vle16_v_u16m1 (const uint16_t *base, size_t vl);
vuint16m2_t __riscv_vle16_v_u16m2 (const uint16_t *base, size_t vl);
vuint16m4_t __riscv_vle16_v_u16m4 (const uint16_t *base, size_t vl);
vuint16m8_t __riscv_vle16_v_u16m8 (const uint16_t *base, size_t vl);
vfloat16mf4_t __riscv_vle16_v_f16mf4_m (vbool64_t mask, const float16_t *base, size_t vl);
vfloat16mf2_t __riscv_vle16_v_f16mf2_m (vbool32_t mask, const float16_t *base, size_t vl);
vfloat16m1_t __riscv_vle16_v_f16m1_m (vbool16_t mask, const float16_t *base, size_t vl);
vfloat16m2_t __riscv_vle16_v_f16m2_m (vbool8_t mask, const float16_t *base, size_t vl);
vfloat16m4_t __riscv_vle16_v_f16m4_m (vbool4_t mask, const float16_t *base, size_t vl);
vfloat16m8_t __riscv_vle16_v_f16m8_m (vbool2_t mask, const float16_t *base, size_t vl);
vint16mf4_t __riscv_vle16_v_i16mf4_m (vbool64_t mask, const int16_t *base, size_t vl);
vint16mf2_t __riscv_vle16_v_i16mf2_m (vbool32_t mask, const int16_t *base, size_t vl);
vint16m1_t __riscv_vle16_v_i16m1_m (vbool16_t mask, const int16_t *base, size_t vl);
vint16m2_t __riscv_vle16_v_i16m2_m (vbool8_t mask, const int16_t *base, size_t vl);
vint16m4_t __riscv_vle16_v_i16m4_m (vbool4_t mask, const int16_t *base, size_t vl);
vint16m8_t __riscv_vle16_v_i16m8_m (vbool2_t mask, const int16_t *base, size_t vl);
vuint16mf4_t __riscv_vle16_v_u16mf4_m (vbool64_t mask, const uint16_t *base, size_t vl);
vuint16mf2_t __riscv_vle16_v_u16mf2_m (vbool32_t mask, const uint16_t *base, size_t vl);
vuint16m1_t __riscv_vle16_v_u16m1_m (vbool16_t mask, const uint16_t *base, size_t vl);
vuint16m2_t __riscv_vle16_v_u16m2_m (vbool8_t mask, const uint16_t *base, size_t vl);
vuint16m4_t __riscv_vle16_v_u16m4_m (vbool4_t mask, const uint16_t *base, size_t vl);
vuint16m8_t __riscv_vle16_v_u16m8_m (vbool2_t mask, const uint16_t *base, size_t vl);
2.3. vle32.v
- Mnemonic
vle32.v vd, (rs1), vm
- Encoding
- Description
-
32-bit unit-stride load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vle32_v.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vle32_v_f32mf2 (const float32_t *base, size_t vl);
vfloat32m1_t __riscv_vle32_v_f32m1 (const float32_t *base, size_t vl);
vfloat32m2_t __riscv_vle32_v_f32m2 (const float32_t *base, size_t vl);
vfloat32m4_t __riscv_vle32_v_f32m4 (const float32_t *base, size_t vl);
vfloat32m8_t __riscv_vle32_v_f32m8 (const float32_t *base, size_t vl);
vint32mf2_t __riscv_vle32_v_i32mf2 (const int32_t *base, size_t vl);
vint32m1_t __riscv_vle32_v_i32m1 (const int32_t *base, size_t vl);
vint32m2_t __riscv_vle32_v_i32m2 (const int32_t *base, size_t vl);
vint32m4_t __riscv_vle32_v_i32m4 (const int32_t *base, size_t vl);
vint32m8_t __riscv_vle32_v_i32m8 (const int32_t *base, size_t vl);
vuint32mf2_t __riscv_vle32_v_u32mf2 (const uint32_t *base, size_t vl);
vuint32m1_t __riscv_vle32_v_u32m1 (const uint32_t *base, size_t vl);
vuint32m2_t __riscv_vle32_v_u32m2 (const uint32_t *base, size_t vl);
vuint32m4_t __riscv_vle32_v_u32m4 (const uint32_t *base, size_t vl);
vuint32m8_t __riscv_vle32_v_u32m8 (const uint32_t *base, size_t vl);
vfloat32mf2_t __riscv_vle32_v_f32mf2_m (vbool64_t mask, const float32_t *base, size_t vl);
vfloat32m1_t __riscv_vle32_v_f32m1_m (vbool32_t mask, const float32_t *base, size_t vl);
vfloat32m2_t __riscv_vle32_v_f32m2_m (vbool16_t mask, const float32_t *base, size_t vl);
vfloat32m4_t __riscv_vle32_v_f32m4_m (vbool8_t mask, const float32_t *base, size_t vl);
vfloat32m8_t __riscv_vle32_v_f32m8_m (vbool4_t mask, const float32_t *base, size_t vl);
vint32mf2_t __riscv_vle32_v_i32mf2_m (vbool64_t mask, const int32_t *base, size_t vl);
vint32m1_t __riscv_vle32_v_i32m1_m (vbool32_t mask, const int32_t *base, size_t vl);
vint32m2_t __riscv_vle32_v_i32m2_m (vbool16_t mask, const int32_t *base, size_t vl);
vint32m4_t __riscv_vle32_v_i32m4_m (vbool8_t mask, const int32_t *base, size_t vl);
vint32m8_t __riscv_vle32_v_i32m8_m (vbool4_t mask, const int32_t *base, size_t vl);
vuint32mf2_t __riscv_vle32_v_u32mf2_m (vbool64_t mask, const uint32_t *base, size_t vl);
vuint32m1_t __riscv_vle32_v_u32m1_m (vbool32_t mask, const uint32_t *base, size_t vl);
vuint32m2_t __riscv_vle32_v_u32m2_m (vbool16_t mask, const uint32_t *base, size_t vl);
vuint32m4_t __riscv_vle32_v_u32m4_m (vbool8_t mask, const uint32_t *base, size_t vl);
vuint32m8_t __riscv_vle32_v_u32m8_m (vbool4_t mask, const uint32_t *base, size_t vl);
2.4. vle64.v
- Mnemonic
vle64.v vd, (rs1), vm
- Encoding
- Description
-
64-bit unit-stride load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vle64_v.h
- Intrinsic Functions
Details
vfloat64m1_t __riscv_vle64_v_f64m1 (const float64_t *base, size_t vl);
vfloat64m2_t __riscv_vle64_v_f64m2 (const float64_t *base, size_t vl);
vfloat64m4_t __riscv_vle64_v_f64m4 (const float64_t *base, size_t vl);
vfloat64m8_t __riscv_vle64_v_f64m8 (const float64_t *base, size_t vl);
vint64m1_t __riscv_vle64_v_i64m1 (const int64_t *base, size_t vl);
vint64m2_t __riscv_vle64_v_i64m2 (const int64_t *base, size_t vl);
vint64m4_t __riscv_vle64_v_i64m4 (const int64_t *base, size_t vl);
vint64m8_t __riscv_vle64_v_i64m8 (const int64_t *base, size_t vl);
vuint64m1_t __riscv_vle64_v_u64m1 (const uint64_t *base, size_t vl);
vuint64m2_t __riscv_vle64_v_u64m2 (const uint64_t *base, size_t vl);
vuint64m4_t __riscv_vle64_v_u64m4 (const uint64_t *base, size_t vl);
vuint64m8_t __riscv_vle64_v_u64m8 (const uint64_t *base, size_t vl);
vfloat64m1_t __riscv_vle64_v_f64m1_m (vbool64_t mask, const float64_t *base, size_t vl);
vfloat64m2_t __riscv_vle64_v_f64m2_m (vbool32_t mask, const float64_t *base, size_t vl);
vfloat64m4_t __riscv_vle64_v_f64m4_m (vbool16_t mask, const float64_t *base, size_t vl);
vfloat64m8_t __riscv_vle64_v_f64m8_m (vbool8_t mask, const float64_t *base, size_t vl);
vint64m1_t __riscv_vle64_v_i64m1_m (vbool64_t mask, const int64_t *base, size_t vl);
vint64m2_t __riscv_vle64_v_i64m2_m (vbool32_t mask, const int64_t *base, size_t vl);
vint64m4_t __riscv_vle64_v_i64m4_m (vbool16_t mask, const int64_t *base, size_t vl);
vint64m8_t __riscv_vle64_v_i64m8_m (vbool8_t mask, const int64_t *base, size_t vl);
vuint64m1_t __riscv_vle64_v_u64m1_m (vbool64_t mask, const uint64_t *base, size_t vl);
vuint64m2_t __riscv_vle64_v_u64m2_m (vbool32_t mask, const uint64_t *base, size_t vl);
vuint64m4_t __riscv_vle64_v_u64m4_m (vbool16_t mask, const uint64_t *base, size_t vl);
vuint64m8_t __riscv_vle64_v_u64m8_m (vbool8_t mask, const uint64_t *base, size_t vl);
2.5. vse8.v
- Mnemonic
vse8.v vs3, (rs1), vm
- Encoding
- Description
-
8-bit unit-stride store
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vse8_v.h
- Intrinsic Functions
Details
void __riscv_vse8_v_i8mf8 (int8_t *base, vint8mf8_t value, size_t vl);
void __riscv_vse8_v_i8mf4 (int8_t *base, vint8mf4_t value, size_t vl);
void __riscv_vse8_v_i8mf2 (int8_t *base, vint8mf2_t value, size_t vl);
void __riscv_vse8_v_i8m1 (int8_t *base, vint8m1_t value, size_t vl);
void __riscv_vse8_v_i8m2 (int8_t *base, vint8m2_t value, size_t vl);
void __riscv_vse8_v_i8m4 (int8_t *base, vint8m4_t value, size_t vl);
void __riscv_vse8_v_i8m8 (int8_t *base, vint8m8_t value, size_t vl);
void __riscv_vse8_v_u8mf8 (uint8_t *base, vuint8mf8_t value, size_t vl);
void __riscv_vse8_v_u8mf4 (uint8_t *base, vuint8mf4_t value, size_t vl);
void __riscv_vse8_v_u8mf2 (uint8_t *base, vuint8mf2_t value, size_t vl);
void __riscv_vse8_v_u8m1 (uint8_t *base, vuint8m1_t value, size_t vl);
void __riscv_vse8_v_u8m2 (uint8_t *base, vuint8m2_t value, size_t vl);
void __riscv_vse8_v_u8m4 (uint8_t *base, vuint8m4_t value, size_t vl);
void __riscv_vse8_v_u8m8 (uint8_t *base, vuint8m8_t value, size_t vl);
void __riscv_vse8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl);
void __riscv_vse8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl);
void __riscv_vse8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl);
void __riscv_vse8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl);
void __riscv_vse8_v_i8m2_m (vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl);
void __riscv_vse8_v_i8m4_m (vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl);
void __riscv_vse8_v_i8m8_m (vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl);
void __riscv_vse8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl);
void __riscv_vse8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl);
void __riscv_vse8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl);
void __riscv_vse8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl);
void __riscv_vse8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl);
void __riscv_vse8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl);
void __riscv_vse8_v_u8m8_m (vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl);
2.6. vse16.v
- Mnemonic
vse16.v vs3, (rs1), vm
- Encoding
- Description
-
16-bit unit-stride store
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vse16_v.h
- Intrinsic Functions
Details
void __riscv_vse16_v_f16mf4 (float16_t *base, vfloat16mf4_t value, size_t vl);
void __riscv_vse16_v_f16mf2 (float16_t *base, vfloat16mf2_t value, size_t vl);
void __riscv_vse16_v_f16m1 (float16_t *base, vfloat16m1_t value, size_t vl);
void __riscv_vse16_v_f16m2 (float16_t *base, vfloat16m2_t value, size_t vl);
void __riscv_vse16_v_f16m4 (float16_t *base, vfloat16m4_t value, size_t vl);
void __riscv_vse16_v_f16m8 (float16_t *base, vfloat16m8_t value, size_t vl);
void __riscv_vse16_v_i16mf4 (int16_t *base, vint16mf4_t value, size_t vl);
void __riscv_vse16_v_i16mf2 (int16_t *base, vint16mf2_t value, size_t vl);
void __riscv_vse16_v_i16m1 (int16_t *base, vint16m1_t value, size_t vl);
void __riscv_vse16_v_i16m2 (int16_t *base, vint16m2_t value, size_t vl);
void __riscv_vse16_v_i16m4 (int16_t *base, vint16m4_t value, size_t vl);
void __riscv_vse16_v_i16m8 (int16_t *base, vint16m8_t value, size_t vl);
void __riscv_vse16_v_u16mf4 (uint16_t *base, vuint16mf4_t value, size_t vl);
void __riscv_vse16_v_u16mf2 (uint16_t *base, vuint16mf2_t value, size_t vl);
void __riscv_vse16_v_u16m1 (uint16_t *base, vuint16m1_t value, size_t vl);
void __riscv_vse16_v_u16m2 (uint16_t *base, vuint16m2_t value, size_t vl);
void __riscv_vse16_v_u16m4 (uint16_t *base, vuint16m4_t value, size_t vl);
void __riscv_vse16_v_u16m8 (uint16_t *base, vuint16m8_t value, size_t vl);
void __riscv_vse16_v_f16mf4_m (vbool64_t mask, float16_t *base, vfloat16mf4_t value, size_t vl);
void __riscv_vse16_v_f16mf2_m (vbool32_t mask, float16_t *base, vfloat16mf2_t value, size_t vl);
void __riscv_vse16_v_f16m1_m (vbool16_t mask, float16_t *base, vfloat16m1_t value, size_t vl);
void __riscv_vse16_v_f16m2_m (vbool8_t mask, float16_t *base, vfloat16m2_t value, size_t vl);
void __riscv_vse16_v_f16m4_m (vbool4_t mask, float16_t *base, vfloat16m4_t value, size_t vl);
void __riscv_vse16_v_f16m8_m (vbool2_t mask, float16_t *base, vfloat16m8_t value, size_t vl);
void __riscv_vse16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl);
void __riscv_vse16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl);
void __riscv_vse16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl);
void __riscv_vse16_v_i16m2_m (vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl);
void __riscv_vse16_v_i16m4_m (vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl);
void __riscv_vse16_v_i16m8_m (vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl);
void __riscv_vse16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl);
void __riscv_vse16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl);
void __riscv_vse16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl);
void __riscv_vse16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl);
void __riscv_vse16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl);
void __riscv_vse16_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl);
2.7. vse32.v
- Mnemonic
vse32.v vs3, (rs1), vm
- Encoding
- Description
-
32-bit unit-stride store
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vse32_v.h
- Intrinsic Functions
Details
void __riscv_vse32_v_f32mf2 (float32_t *base, vfloat32mf2_t value, size_t vl);
void __riscv_vse32_v_f32m1 (float32_t *base, vfloat32m1_t value, size_t vl);
void __riscv_vse32_v_f32m2 (float32_t *base, vfloat32m2_t value, size_t vl);
void __riscv_vse32_v_f32m4 (float32_t *base, vfloat32m4_t value, size_t vl);
void __riscv_vse32_v_f32m8 (float32_t *base, vfloat32m8_t value, size_t vl);
void __riscv_vse32_v_i32mf2 (int32_t *base, vint32mf2_t value, size_t vl);
void __riscv_vse32_v_i32m1 (int32_t *base, vint32m1_t value, size_t vl);
void __riscv_vse32_v_i32m2 (int32_t *base, vint32m2_t value, size_t vl);
void __riscv_vse32_v_i32m4 (int32_t *base, vint32m4_t value, size_t vl);
void __riscv_vse32_v_i32m8 (int32_t *base, vint32m8_t value, size_t vl);
void __riscv_vse32_v_u32mf2 (uint32_t *base, vuint32mf2_t value, size_t vl);
void __riscv_vse32_v_u32m1 (uint32_t *base, vuint32m1_t value, size_t vl);
void __riscv_vse32_v_u32m2 (uint32_t *base, vuint32m2_t value, size_t vl);
void __riscv_vse32_v_u32m4 (uint32_t *base, vuint32m4_t value, size_t vl);
void __riscv_vse32_v_u32m8 (uint32_t *base, vuint32m8_t value, size_t vl);
void __riscv_vse32_v_f32mf2_m (vbool64_t mask, float32_t *base, vfloat32mf2_t value, size_t vl);
void __riscv_vse32_v_f32m1_m (vbool32_t mask, float32_t *base, vfloat32m1_t value, size_t vl);
void __riscv_vse32_v_f32m2_m (vbool16_t mask, float32_t *base, vfloat32m2_t value, size_t vl);
void __riscv_vse32_v_f32m4_m (vbool8_t mask, float32_t *base, vfloat32m4_t value, size_t vl);
void __riscv_vse32_v_f32m8_m (vbool4_t mask, float32_t *base, vfloat32m8_t value, size_t vl);
void __riscv_vse32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl);
void __riscv_vse32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl);
void __riscv_vse32_v_i32m2_m (vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl);
void __riscv_vse32_v_i32m4_m (vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl);
void __riscv_vse32_v_i32m8_m (vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl);
void __riscv_vse32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl);
void __riscv_vse32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl);
void __riscv_vse32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl);
void __riscv_vse32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl);
void __riscv_vse32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl);
2.8. vse64.v
- Mnemonic
vse64.v vs3, (rs1), vm
- Encoding
- Description
-
64-bit unit-stride store
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vse64_v.h
- Intrinsic Functions
Details
void __riscv_vse64_v_f64m1 (float64_t *base, vfloat64m1_t value, size_t vl);
void __riscv_vse64_v_f64m2 (float64_t *base, vfloat64m2_t value, size_t vl);
void __riscv_vse64_v_f64m4 (float64_t *base, vfloat64m4_t value, size_t vl);
void __riscv_vse64_v_f64m8 (float64_t *base, vfloat64m8_t value, size_t vl);
void __riscv_vse64_v_i64m1 (int64_t *base, vint64m1_t value, size_t vl);
void __riscv_vse64_v_i64m2 (int64_t *base, vint64m2_t value, size_t vl);
void __riscv_vse64_v_i64m4 (int64_t *base, vint64m4_t value, size_t vl);
void __riscv_vse64_v_i64m8 (int64_t *base, vint64m8_t value, size_t vl);
void __riscv_vse64_v_u64m1 (uint64_t *base, vuint64m1_t value, size_t vl);
void __riscv_vse64_v_u64m2 (uint64_t *base, vuint64m2_t value, size_t vl);
void __riscv_vse64_v_u64m4 (uint64_t *base, vuint64m4_t value, size_t vl);
void __riscv_vse64_v_u64m8 (uint64_t *base, vuint64m8_t value, size_t vl);
void __riscv_vse64_v_f64m1_m (vbool64_t mask, float64_t *base, vfloat64m1_t value, size_t vl);
void __riscv_vse64_v_f64m2_m (vbool32_t mask, float64_t *base, vfloat64m2_t value, size_t vl);
void __riscv_vse64_v_f64m4_m (vbool16_t mask, float64_t *base, vfloat64m4_t value, size_t vl);
void __riscv_vse64_v_f64m8_m (vbool8_t mask, float64_t *base, vfloat64m8_t value, size_t vl);
void __riscv_vse64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl);
void __riscv_vse64_v_i64m2_m (vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl);
void __riscv_vse64_v_i64m4_m (vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl);
void __riscv_vse64_v_i64m8_m (vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl);
void __riscv_vse64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl);
void __riscv_vse64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl);
void __riscv_vse64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl);
void __riscv_vse64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl);
2.9. vlm.v
- Mnemonic
vlm.v vd, (rs1)
- Encoding
- Description
-
Vector unit-stride mask load Load byte vector of length ceil(vl/8)
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vlm_v.h
- Intrinsic Functions
Details
vbool1_t __riscv_vlm_v_b1 (const uint8_t *base, size_t vl);
vbool2_t __riscv_vlm_v_b2 (const uint8_t *base, size_t vl);
vbool4_t __riscv_vlm_v_b4 (const uint8_t *base, size_t vl);
vbool8_t __riscv_vlm_v_b8 (const uint8_t *base, size_t vl);
vbool16_t __riscv_vlm_v_b16 (const uint8_t *base, size_t vl);
vbool32_t __riscv_vlm_v_b32 (const uint8_t *base, size_t vl);
vbool64_t __riscv_vlm_v_b64 (const uint8_t *base, size_t vl);
2.10. vsm.v
- Mnemonic
vsm.v vs3, (rs1)
- Encoding
- Description
-
Vector unit-stride mask store Store byte vector of length ceil(vl/8)
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsm_v.h
- Intrinsic Functions
Details
void __riscv_vsm_v_b1 (uint8_t *base, vbool1_t value, size_t vl);
void __riscv_vsm_v_b2 (uint8_t *base, vbool2_t value, size_t vl);
void __riscv_vsm_v_b4 (uint8_t *base, vbool4_t value, size_t vl);
void __riscv_vsm_v_b8 (uint8_t *base, vbool8_t value, size_t vl);
void __riscv_vsm_v_b16 (uint8_t *base, vbool16_t value, size_t vl);
void __riscv_vsm_v_b32 (uint8_t *base, vbool32_t value, size_t vl);
void __riscv_vsm_v_b64 (uint8_t *base, vbool64_t value, size_t vl);
3. Vector Strided Instructions
3.1. vlse8.v
- Mnemonic
vlse8.v vd, (rs1), rs2, vm
- Encoding
- Description
-
8-bit strided load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vlse8_v.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vlse8_v_i8mf8 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4_t __riscv_vlse8_v_i8mf4 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2_t __riscv_vlse8_v_i8mf2 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1_t __riscv_vlse8_v_i8m1 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m2_t __riscv_vlse8_v_i8m2 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m4_t __riscv_vlse8_v_i8m4 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m8_t __riscv_vlse8_v_i8m8 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8_t __riscv_vlse8_v_u8mf8 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4_t __riscv_vlse8_v_u8mf4 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2_t __riscv_vlse8_v_u8mf2 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1_t __riscv_vlse8_v_u8m1 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m2_t __riscv_vlse8_v_u8m2 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m4_t __riscv_vlse8_v_u8m4 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m8_t __riscv_vlse8_v_u8m8 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf8_t __riscv_vlse8_v_i8mf8_m (vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4_t __riscv_vlse8_v_i8mf4_m (vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2_t __riscv_vlse8_v_i8mf2_m (vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1_t __riscv_vlse8_v_i8m1_m (vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m2_t __riscv_vlse8_v_i8m2_m (vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m4_t __riscv_vlse8_v_i8m4_m (vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m8_t __riscv_vlse8_v_i8m8_m (vbool1_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8_t __riscv_vlse8_v_u8mf8_m (vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4_t __riscv_vlse8_v_u8mf4_m (vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2_t __riscv_vlse8_v_u8mf2_m (vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1_t __riscv_vlse8_v_u8m1_m (vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m2_t __riscv_vlse8_v_u8m2_m (vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m4_t __riscv_vlse8_v_u8m4_m (vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m8_t __riscv_vlse8_v_u8m8_m (vbool1_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
3.2. vlse16.v
- Mnemonic
vlse16.v vd, (rs1), rs2, vm
- Encoding
- Description
-
16-bit strided load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vlse16_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vlse16_v_f16mf4 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2_t __riscv_vlse16_v_f16mf2 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1_t __riscv_vlse16_v_f16m1 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m2_t __riscv_vlse16_v_f16m2 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m4_t __riscv_vlse16_v_f16m4 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m8_t __riscv_vlse16_v_f16m8 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4_t __riscv_vlse16_v_i16mf4 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2_t __riscv_vlse16_v_i16mf2 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1_t __riscv_vlse16_v_i16m1 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m2_t __riscv_vlse16_v_i16m2 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m4_t __riscv_vlse16_v_i16m4 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m8_t __riscv_vlse16_v_i16m8 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4_t __riscv_vlse16_v_u16mf4 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2_t __riscv_vlse16_v_u16mf2 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1_t __riscv_vlse16_v_u16m1 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m2_t __riscv_vlse16_v_u16m2 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m4_t __riscv_vlse16_v_u16m4 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m8_t __riscv_vlse16_v_u16m8 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf4_t __riscv_vlse16_v_f16mf4_m (vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2_t __riscv_vlse16_v_f16mf2_m (vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1_t __riscv_vlse16_v_f16m1_m (vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m2_t __riscv_vlse16_v_f16m2_m (vbool8_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m4_t __riscv_vlse16_v_f16m4_m (vbool4_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m8_t __riscv_vlse16_v_f16m8_m (vbool2_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4_t __riscv_vlse16_v_i16mf4_m (vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2_t __riscv_vlse16_v_i16mf2_m (vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1_t __riscv_vlse16_v_i16m1_m (vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m2_t __riscv_vlse16_v_i16m2_m (vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m4_t __riscv_vlse16_v_i16m4_m (vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m8_t __riscv_vlse16_v_i16m8_m (vbool2_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4_t __riscv_vlse16_v_u16mf4_m (vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2_t __riscv_vlse16_v_u16mf2_m (vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1_t __riscv_vlse16_v_u16m1_m (vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m2_t __riscv_vlse16_v_u16m2_m (vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m4_t __riscv_vlse16_v_u16m4_m (vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m8_t __riscv_vlse16_v_u16m8_m (vbool2_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
3.3. vlse32.v
- Mnemonic
vlse32.v vd, (rs1), rs2, vm
- Encoding
- Description
-
32-bit strided load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vlse32_v.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vlse32_v_f32mf2 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1_t __riscv_vlse32_v_f32m1 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m2_t __riscv_vlse32_v_f32m2 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m4_t __riscv_vlse32_v_f32m4 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m8_t __riscv_vlse32_v_f32m8 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2_t __riscv_vlse32_v_i32mf2 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1_t __riscv_vlse32_v_i32m1 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m2_t __riscv_vlse32_v_i32m2 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m4_t __riscv_vlse32_v_i32m4 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m8_t __riscv_vlse32_v_i32m8 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2_t __riscv_vlse32_v_u32mf2 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1_t __riscv_vlse32_v_u32m1 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m2_t __riscv_vlse32_v_u32m2 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m4_t __riscv_vlse32_v_u32m4 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m8_t __riscv_vlse32_v_u32m8 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32mf2_t __riscv_vlse32_v_f32mf2_m (vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1_t __riscv_vlse32_v_f32m1_m (vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m2_t __riscv_vlse32_v_f32m2_m (vbool16_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m4_t __riscv_vlse32_v_f32m4_m (vbool8_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m8_t __riscv_vlse32_v_f32m8_m (vbool4_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2_t __riscv_vlse32_v_i32mf2_m (vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1_t __riscv_vlse32_v_i32m1_m (vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m2_t __riscv_vlse32_v_i32m2_m (vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m4_t __riscv_vlse32_v_i32m4_m (vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m8_t __riscv_vlse32_v_i32m8_m (vbool4_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2_t __riscv_vlse32_v_u32mf2_m (vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1_t __riscv_vlse32_v_u32m1_m (vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m2_t __riscv_vlse32_v_u32m2_m (vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m4_t __riscv_vlse32_v_u32m4_m (vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m8_t __riscv_vlse32_v_u32m8_m (vbool4_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
3.4. vlse64.v
- Mnemonic
vlse64.v vd, (rs1), rs2, vm
- Encoding
- Description
-
64-bit strided load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vlse64_v.h
- Intrinsic Functions
Details
vfloat64m1_t __riscv_vlse64_v_f64m1 (const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m2_t __riscv_vlse64_v_f64m2 (const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m4_t __riscv_vlse64_v_f64m4 (const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m8_t __riscv_vlse64_v_f64m8 (const float64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1_t __riscv_vlse64_v_i64m1 (const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m2_t __riscv_vlse64_v_i64m2 (const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m4_t __riscv_vlse64_v_i64m4 (const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m8_t __riscv_vlse64_v_i64m8 (const int64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1_t __riscv_vlse64_v_u64m1 (const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m2_t __riscv_vlse64_v_u64m2 (const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m4_t __riscv_vlse64_v_u64m4 (const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m8_t __riscv_vlse64_v_u64m8 (const uint64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m1_t __riscv_vlse64_v_f64m1_m (vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m2_t __riscv_vlse64_v_f64m2_m (vbool32_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m4_t __riscv_vlse64_v_f64m4_m (vbool16_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m8_t __riscv_vlse64_v_f64m8_m (vbool8_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1_t __riscv_vlse64_v_i64m1_m (vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m2_t __riscv_vlse64_v_i64m2_m (vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m4_t __riscv_vlse64_v_i64m4_m (vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m8_t __riscv_vlse64_v_i64m8_m (vbool8_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1_t __riscv_vlse64_v_u64m1_m (vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m2_t __riscv_vlse64_v_u64m2_m (vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m4_t __riscv_vlse64_v_u64m4_m (vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m8_t __riscv_vlse64_v_u64m8_m (vbool8_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl);
3.5. vsse8.v
- Mnemonic
vsse8.v vs3, (rs1), rs2, vm
- Encoding
- Description
-
8-bit strided store
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsse8_v.h
- Intrinsic Functions
Details
void __riscv_vsse8_v_i8mf8 (int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl);
void __riscv_vsse8_v_i8mf4 (int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl);
void __riscv_vsse8_v_i8mf2 (int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl);
void __riscv_vsse8_v_i8m1 (int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl);
void __riscv_vsse8_v_i8m2 (int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl);
void __riscv_vsse8_v_i8m4 (int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl);
void __riscv_vsse8_v_i8m8 (int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl);
void __riscv_vsse8_v_u8mf8 (uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl);
void __riscv_vsse8_v_u8mf4 (uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl);
void __riscv_vsse8_v_u8mf2 (uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl);
void __riscv_vsse8_v_u8m1 (uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl);
void __riscv_vsse8_v_u8m2 (uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl);
void __riscv_vsse8_v_u8m4 (uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl);
void __riscv_vsse8_v_u8m8 (uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl);
void __riscv_vsse8_v_i8mf8_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl);
void __riscv_vsse8_v_i8mf4_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl);
void __riscv_vsse8_v_i8mf2_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl);
void __riscv_vsse8_v_i8m1_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl);
void __riscv_vsse8_v_i8m2_m (vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl);
void __riscv_vsse8_v_i8m4_m (vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl);
void __riscv_vsse8_v_i8m8_m (vbool1_t mask, int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl);
void __riscv_vsse8_v_u8mf8_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl);
void __riscv_vsse8_v_u8mf4_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl);
void __riscv_vsse8_v_u8mf2_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl);
void __riscv_vsse8_v_u8m1_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl);
void __riscv_vsse8_v_u8m2_m (vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl);
void __riscv_vsse8_v_u8m4_m (vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl);
void __riscv_vsse8_v_u8m8_m (vbool1_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl);
3.6. vsse16.v
- Mnemonic
vsse16.v vs3, (rs1), rs2, vm
- Encoding
- Description
-
16-bit strided store
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsse16_v.h
- Intrinsic Functions
Details
void __riscv_vsse16_v_f16mf4 (float16_t *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl);
void __riscv_vsse16_v_f16mf2 (float16_t *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl);
void __riscv_vsse16_v_f16m1 (float16_t *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl);
void __riscv_vsse16_v_f16m2 (float16_t *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl);
void __riscv_vsse16_v_f16m4 (float16_t *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl);
void __riscv_vsse16_v_f16m8 (float16_t *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl);
void __riscv_vsse16_v_i16mf4 (int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl);
void __riscv_vsse16_v_i16mf2 (int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl);
void __riscv_vsse16_v_i16m1 (int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl);
void __riscv_vsse16_v_i16m2 (int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl);
void __riscv_vsse16_v_i16m4 (int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl);
void __riscv_vsse16_v_i16m8 (int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl);
void __riscv_vsse16_v_u16mf4 (uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl);
void __riscv_vsse16_v_u16mf2 (uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl);
void __riscv_vsse16_v_u16m1 (uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl);
void __riscv_vsse16_v_u16m2 (uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl);
void __riscv_vsse16_v_u16m4 (uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl);
void __riscv_vsse16_v_u16m8 (uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl);
void __riscv_vsse16_v_f16mf4_m (vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl);
void __riscv_vsse16_v_f16mf2_m (vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl);
void __riscv_vsse16_v_f16m1_m (vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl);
void __riscv_vsse16_v_f16m2_m (vbool8_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl);
void __riscv_vsse16_v_f16m4_m (vbool4_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl);
void __riscv_vsse16_v_f16m8_m (vbool2_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl);
void __riscv_vsse16_v_i16mf4_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl);
void __riscv_vsse16_v_i16mf2_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl);
void __riscv_vsse16_v_i16m1_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl);
void __riscv_vsse16_v_i16m2_m (vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl);
void __riscv_vsse16_v_i16m4_m (vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl);
void __riscv_vsse16_v_i16m8_m (vbool2_t mask, int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl);
void __riscv_vsse16_v_u16mf4_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl);
void __riscv_vsse16_v_u16mf2_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl);
void __riscv_vsse16_v_u16m1_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl);
void __riscv_vsse16_v_u16m2_m (vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl);
void __riscv_vsse16_v_u16m4_m (vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl);
void __riscv_vsse16_v_u16m8_m (vbool2_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl);
3.7. vsse32.v
- Mnemonic
vsse32.v vs3, (rs1), rs2, vm
- Encoding
- Description
-
32-bit strided store
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsse32_v.h
- Intrinsic Functions
Details
void __riscv_vsse32_v_f32mf2 (float32_t *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl);
void __riscv_vsse32_v_f32m1 (float32_t *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl);
void __riscv_vsse32_v_f32m2 (float32_t *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl);
void __riscv_vsse32_v_f32m4 (float32_t *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl);
void __riscv_vsse32_v_f32m8 (float32_t *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl);
void __riscv_vsse32_v_i32mf2 (int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl);
void __riscv_vsse32_v_i32m1 (int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl);
void __riscv_vsse32_v_i32m2 (int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl);
void __riscv_vsse32_v_i32m4 (int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl);
void __riscv_vsse32_v_i32m8 (int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl);
void __riscv_vsse32_v_u32mf2 (uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl);
void __riscv_vsse32_v_u32m1 (uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl);
void __riscv_vsse32_v_u32m2 (uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl);
void __riscv_vsse32_v_u32m4 (uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl);
void __riscv_vsse32_v_u32m8 (uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl);
void __riscv_vsse32_v_f32mf2_m (vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl);
void __riscv_vsse32_v_f32m1_m (vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl);
void __riscv_vsse32_v_f32m2_m (vbool16_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl);
void __riscv_vsse32_v_f32m4_m (vbool8_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl);
void __riscv_vsse32_v_f32m8_m (vbool4_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl);
void __riscv_vsse32_v_i32mf2_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl);
void __riscv_vsse32_v_i32m1_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl);
void __riscv_vsse32_v_i32m2_m (vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl);
void __riscv_vsse32_v_i32m4_m (vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl);
void __riscv_vsse32_v_i32m8_m (vbool4_t mask, int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl);
void __riscv_vsse32_v_u32mf2_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl);
void __riscv_vsse32_v_u32m1_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl);
void __riscv_vsse32_v_u32m2_m (vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl);
void __riscv_vsse32_v_u32m4_m (vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl);
void __riscv_vsse32_v_u32m8_m (vbool4_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl);
3.8. vsse64.v
- Mnemonic
vsse64.v vs3, (rs1), rs2, vm
- Encoding
- Description
-
64-bit strided store
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsse64_v.h
- Intrinsic Functions
Details
void __riscv_vsse64_v_f64m1 (float64_t *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl);
void __riscv_vsse64_v_f64m2 (float64_t *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl);
void __riscv_vsse64_v_f64m4 (float64_t *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl);
void __riscv_vsse64_v_f64m8 (float64_t *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl);
void __riscv_vsse64_v_i64m1 (int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl);
void __riscv_vsse64_v_i64m2 (int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl);
void __riscv_vsse64_v_i64m4 (int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl);
void __riscv_vsse64_v_i64m8 (int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl);
void __riscv_vsse64_v_u64m1 (uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl);
void __riscv_vsse64_v_u64m2 (uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl);
void __riscv_vsse64_v_u64m4 (uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl);
void __riscv_vsse64_v_u64m8 (uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl);
void __riscv_vsse64_v_f64m1_m (vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl);
void __riscv_vsse64_v_f64m2_m (vbool32_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl);
void __riscv_vsse64_v_f64m4_m (vbool16_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl);
void __riscv_vsse64_v_f64m8_m (vbool8_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl);
void __riscv_vsse64_v_i64m1_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl);
void __riscv_vsse64_v_i64m2_m (vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl);
void __riscv_vsse64_v_i64m4_m (vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl);
void __riscv_vsse64_v_i64m8_m (vbool8_t mask, int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl);
void __riscv_vsse64_v_u64m1_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl);
void __riscv_vsse64_v_u64m2_m (vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl);
void __riscv_vsse64_v_u64m4_m (vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl);
void __riscv_vsse64_v_u64m8_m (vbool8_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl);
4. Vector Indexed Instructions
4.1. vluxei8.v
- Mnemonic
vluxei8.v vd, (rs1), vs2, vm
- Encoding
- Description
-
unordered 8-bit indexed load of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vluxei8_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vluxei8_v_f16mf4 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf2_t __riscv_vluxei8_v_f16mf2 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16m1_t __riscv_vluxei8_v_f16m1 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m2_t __riscv_vluxei8_v_f16m2 (const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m4_t __riscv_vluxei8_v_f16m4 (const float16_t *base, vuint8m2_t bindex, size_t vl);
vfloat16m8_t __riscv_vluxei8_v_f16m8 (const float16_t *base, vuint8m4_t bindex, size_t vl);
vfloat32mf2_t __riscv_vluxei8_v_f32mf2 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32m1_t __riscv_vluxei8_v_f32m1 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m2_t __riscv_vluxei8_v_f32m2 (const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m4_t __riscv_vluxei8_v_f32m4 (const float32_t *base, vuint8m1_t bindex, size_t vl);
vfloat32m8_t __riscv_vluxei8_v_f32m8 (const float32_t *base, vuint8m2_t bindex, size_t vl);
vfloat64m1_t __riscv_vluxei8_v_f64m1 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m2_t __riscv_vluxei8_v_f64m2 (const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m4_t __riscv_vluxei8_v_f64m4 (const float64_t *base, vuint8mf2_t bindex, size_t vl);
vfloat64m8_t __riscv_vluxei8_v_f64m8 (const float64_t *base, vuint8m1_t bindex, size_t vl);
vint8mf8_t __riscv_vluxei8_v_i8mf8 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf4_t __riscv_vluxei8_v_i8mf4 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf2_t __riscv_vluxei8_v_i8mf2 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8m1_t __riscv_vluxei8_v_i8m1 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m2_t __riscv_vluxei8_v_i8m2 (const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m4_t __riscv_vluxei8_v_i8m4 (const int8_t *base, vuint8m4_t bindex, size_t vl);
vint8m8_t __riscv_vluxei8_v_i8m8 (const int8_t *base, vuint8m8_t bindex, size_t vl);
vint16mf4_t __riscv_vluxei8_v_i16mf4 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf2_t __riscv_vluxei8_v_i16mf2 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16m1_t __riscv_vluxei8_v_i16m1 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m2_t __riscv_vluxei8_v_i16m2 (const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m4_t __riscv_vluxei8_v_i16m4 (const int16_t *base, vuint8m2_t bindex, size_t vl);
vint16m8_t __riscv_vluxei8_v_i16m8 (const int16_t *base, vuint8m4_t bindex, size_t vl);
vint32mf2_t __riscv_vluxei8_v_i32mf2 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32m1_t __riscv_vluxei8_v_i32m1 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m2_t __riscv_vluxei8_v_i32m2 (const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m4_t __riscv_vluxei8_v_i32m4 (const int32_t *base, vuint8m1_t bindex, size_t vl);
vint32m8_t __riscv_vluxei8_v_i32m8 (const int32_t *base, vuint8m2_t bindex, size_t vl);
vint64m1_t __riscv_vluxei8_v_i64m1 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m2_t __riscv_vluxei8_v_i64m2 (const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m4_t __riscv_vluxei8_v_i64m4 (const int64_t *base, vuint8mf2_t bindex, size_t vl);
vint64m8_t __riscv_vluxei8_v_i64m8 (const int64_t *base, vuint8m1_t bindex, size_t vl);
vuint8mf8_t __riscv_vluxei8_v_u8mf8 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf4_t __riscv_vluxei8_v_u8mf4 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf2_t __riscv_vluxei8_v_u8mf2 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8m1_t __riscv_vluxei8_v_u8m1 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m2_t __riscv_vluxei8_v_u8m2 (const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m4_t __riscv_vluxei8_v_u8m4 (const uint8_t *base, vuint8m4_t bindex, size_t vl);
vuint8m8_t __riscv_vluxei8_v_u8m8 (const uint8_t *base, vuint8m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vluxei8_v_u16mf4 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf2_t __riscv_vluxei8_v_u16mf2 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16m1_t __riscv_vluxei8_v_u16m1 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m2_t __riscv_vluxei8_v_u16m2 (const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m4_t __riscv_vluxei8_v_u16m4 (const uint16_t *base, vuint8m2_t bindex, size_t vl);
vuint16m8_t __riscv_vluxei8_v_u16m8 (const uint16_t *base, vuint8m4_t bindex, size_t vl);
vuint32mf2_t __riscv_vluxei8_v_u32mf2 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32m1_t __riscv_vluxei8_v_u32m1 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m2_t __riscv_vluxei8_v_u32m2 (const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m4_t __riscv_vluxei8_v_u32m4 (const uint32_t *base, vuint8m1_t bindex, size_t vl);
vuint32m8_t __riscv_vluxei8_v_u32m8 (const uint32_t *base, vuint8m2_t bindex, size_t vl);
vuint64m1_t __riscv_vluxei8_v_u64m1 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m2_t __riscv_vluxei8_v_u64m2 (const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m4_t __riscv_vluxei8_v_u64m4 (const uint64_t *base, vuint8mf2_t bindex, size_t vl);
vuint64m8_t __riscv_vluxei8_v_u64m8 (const uint64_t *base, vuint8m1_t bindex, size_t vl);
vfloat16mf4_t __riscv_vluxei8_v_f16mf4_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf2_t __riscv_vluxei8_v_f16mf2_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16m1_t __riscv_vluxei8_v_f16m1_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m2_t __riscv_vluxei8_v_f16m2_m (vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m4_t __riscv_vluxei8_v_f16m4_m (vbool4_t mask, const float16_t *base, vuint8m2_t bindex, size_t vl);
vfloat16m8_t __riscv_vluxei8_v_f16m8_m (vbool2_t mask, const float16_t *base, vuint8m4_t bindex, size_t vl);
vfloat32mf2_t __riscv_vluxei8_v_f32mf2_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32m1_t __riscv_vluxei8_v_f32m1_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m2_t __riscv_vluxei8_v_f32m2_m (vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m4_t __riscv_vluxei8_v_f32m4_m (vbool8_t mask, const float32_t *base, vuint8m1_t bindex, size_t vl);
vfloat32m8_t __riscv_vluxei8_v_f32m8_m (vbool4_t mask, const float32_t *base, vuint8m2_t bindex, size_t vl);
vfloat64m1_t __riscv_vluxei8_v_f64m1_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m2_t __riscv_vluxei8_v_f64m2_m (vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m4_t __riscv_vluxei8_v_f64m4_m (vbool16_t mask, const float64_t *base, vuint8mf2_t bindex, size_t vl);
vfloat64m8_t __riscv_vluxei8_v_f64m8_m (vbool8_t mask, const float64_t *base, vuint8m1_t bindex, size_t vl);
vint8mf8_t __riscv_vluxei8_v_i8mf8_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf4_t __riscv_vluxei8_v_i8mf4_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf2_t __riscv_vluxei8_v_i8mf2_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8m1_t __riscv_vluxei8_v_i8m1_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m2_t __riscv_vluxei8_v_i8m2_m (vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m4_t __riscv_vluxei8_v_i8m4_m (vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl);
vint8m8_t __riscv_vluxei8_v_i8m8_m (vbool1_t mask, const int8_t *base, vuint8m8_t bindex, size_t vl);
vint16mf4_t __riscv_vluxei8_v_i16mf4_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf2_t __riscv_vluxei8_v_i16mf2_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16m1_t __riscv_vluxei8_v_i16m1_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m2_t __riscv_vluxei8_v_i16m2_m (vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m4_t __riscv_vluxei8_v_i16m4_m (vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl);
vint16m8_t __riscv_vluxei8_v_i16m8_m (vbool2_t mask, const int16_t *base, vuint8m4_t bindex, size_t vl);
vint32mf2_t __riscv_vluxei8_v_i32mf2_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32m1_t __riscv_vluxei8_v_i32m1_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m2_t __riscv_vluxei8_v_i32m2_m (vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m4_t __riscv_vluxei8_v_i32m4_m (vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl);
vint32m8_t __riscv_vluxei8_v_i32m8_m (vbool4_t mask, const int32_t *base, vuint8m2_t bindex, size_t vl);
vint64m1_t __riscv_vluxei8_v_i64m1_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m2_t __riscv_vluxei8_v_i64m2_m (vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m4_t __riscv_vluxei8_v_i64m4_m (vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl);
vint64m8_t __riscv_vluxei8_v_i64m8_m (vbool8_t mask, const int64_t *base, vuint8m1_t bindex, size_t vl);
vuint8mf8_t __riscv_vluxei8_v_u8mf8_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf4_t __riscv_vluxei8_v_u8mf4_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf2_t __riscv_vluxei8_v_u8mf2_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8m1_t __riscv_vluxei8_v_u8m1_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m2_t __riscv_vluxei8_v_u8m2_m (vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m4_t __riscv_vluxei8_v_u8m4_m (vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl);
vuint8m8_t __riscv_vluxei8_v_u8m8_m (vbool1_t mask, const uint8_t *base, vuint8m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vluxei8_v_u16mf4_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf2_t __riscv_vluxei8_v_u16mf2_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16m1_t __riscv_vluxei8_v_u16m1_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m2_t __riscv_vluxei8_v_u16m2_m (vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m4_t __riscv_vluxei8_v_u16m4_m (vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl);
vuint16m8_t __riscv_vluxei8_v_u16m8_m (vbool2_t mask, const uint16_t *base, vuint8m4_t bindex, size_t vl);
vuint32mf2_t __riscv_vluxei8_v_u32mf2_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32m1_t __riscv_vluxei8_v_u32m1_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m2_t __riscv_vluxei8_v_u32m2_m (vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m4_t __riscv_vluxei8_v_u32m4_m (vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl);
vuint32m8_t __riscv_vluxei8_v_u32m8_m (vbool4_t mask, const uint32_t *base, vuint8m2_t bindex, size_t vl);
vuint64m1_t __riscv_vluxei8_v_u64m1_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m2_t __riscv_vluxei8_v_u64m2_m (vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m4_t __riscv_vluxei8_v_u64m4_m (vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl);
vuint64m8_t __riscv_vluxei8_v_u64m8_m (vbool8_t mask, const uint64_t *base, vuint8m1_t bindex, size_t vl);
4.2. vluxei16.v
- Mnemonic
vluxei16.v vd, (rs1), vs2, vm
- Encoding
- Description
-
unordered 16-bit indexed load of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vluxei16_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vluxei16_v_f16mf4 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf2_t __riscv_vluxei16_v_f16mf2 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16m1_t __riscv_vluxei16_v_f16m1 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m2_t __riscv_vluxei16_v_f16m2 (const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m4_t __riscv_vluxei16_v_f16m4 (const float16_t *base, vuint16m4_t bindex, size_t vl);
vfloat16m8_t __riscv_vluxei16_v_f16m8 (const float16_t *base, vuint16m8_t bindex, size_t vl);
vfloat32mf2_t __riscv_vluxei16_v_f32mf2 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32m1_t __riscv_vluxei16_v_f32m1 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m2_t __riscv_vluxei16_v_f32m2 (const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m4_t __riscv_vluxei16_v_f32m4 (const float32_t *base, vuint16m2_t bindex, size_t vl);
vfloat32m8_t __riscv_vluxei16_v_f32m8 (const float32_t *base, vuint16m4_t bindex, size_t vl);
vfloat64m1_t __riscv_vluxei16_v_f64m1 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m2_t __riscv_vluxei16_v_f64m2 (const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m4_t __riscv_vluxei16_v_f64m4 (const float64_t *base, vuint16m1_t bindex, size_t vl);
vfloat64m8_t __riscv_vluxei16_v_f64m8 (const float64_t *base, vuint16m2_t bindex, size_t vl);
vint8mf8_t __riscv_vluxei16_v_i8mf8 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf4_t __riscv_vluxei16_v_i8mf4 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf2_t __riscv_vluxei16_v_i8mf2 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8m1_t __riscv_vluxei16_v_i8m1 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m2_t __riscv_vluxei16_v_i8m2 (const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m4_t __riscv_vluxei16_v_i8m4 (const int8_t *base, vuint16m8_t bindex, size_t vl);
vint16mf4_t __riscv_vluxei16_v_i16mf4 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf2_t __riscv_vluxei16_v_i16mf2 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16m1_t __riscv_vluxei16_v_i16m1 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m2_t __riscv_vluxei16_v_i16m2 (const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m4_t __riscv_vluxei16_v_i16m4 (const int16_t *base, vuint16m4_t bindex, size_t vl);
vint16m8_t __riscv_vluxei16_v_i16m8 (const int16_t *base, vuint16m8_t bindex, size_t vl);
vint32mf2_t __riscv_vluxei16_v_i32mf2 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32m1_t __riscv_vluxei16_v_i32m1 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m2_t __riscv_vluxei16_v_i32m2 (const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m4_t __riscv_vluxei16_v_i32m4 (const int32_t *base, vuint16m2_t bindex, size_t vl);
vint32m8_t __riscv_vluxei16_v_i32m8 (const int32_t *base, vuint16m4_t bindex, size_t vl);
vint64m1_t __riscv_vluxei16_v_i64m1 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m2_t __riscv_vluxei16_v_i64m2 (const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m4_t __riscv_vluxei16_v_i64m4 (const int64_t *base, vuint16m1_t bindex, size_t vl);
vint64m8_t __riscv_vluxei16_v_i64m8 (const int64_t *base, vuint16m2_t bindex, size_t vl);
vuint8mf8_t __riscv_vluxei16_v_u8mf8 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf4_t __riscv_vluxei16_v_u8mf4 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf2_t __riscv_vluxei16_v_u8mf2 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8m1_t __riscv_vluxei16_v_u8m1 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m2_t __riscv_vluxei16_v_u8m2 (const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m4_t __riscv_vluxei16_v_u8m4 (const uint8_t *base, vuint16m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vluxei16_v_u16mf4 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf2_t __riscv_vluxei16_v_u16mf2 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16m1_t __riscv_vluxei16_v_u16m1 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m2_t __riscv_vluxei16_v_u16m2 (const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m4_t __riscv_vluxei16_v_u16m4 (const uint16_t *base, vuint16m4_t bindex, size_t vl);
vuint16m8_t __riscv_vluxei16_v_u16m8 (const uint16_t *base, vuint16m8_t bindex, size_t vl);
vuint32mf2_t __riscv_vluxei16_v_u32mf2 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32m1_t __riscv_vluxei16_v_u32m1 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m2_t __riscv_vluxei16_v_u32m2 (const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m4_t __riscv_vluxei16_v_u32m4 (const uint32_t *base, vuint16m2_t bindex, size_t vl);
vuint32m8_t __riscv_vluxei16_v_u32m8 (const uint32_t *base, vuint16m4_t bindex, size_t vl);
vuint64m1_t __riscv_vluxei16_v_u64m1 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m2_t __riscv_vluxei16_v_u64m2 (const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m4_t __riscv_vluxei16_v_u64m4 (const uint64_t *base, vuint16m1_t bindex, size_t vl);
vuint64m8_t __riscv_vluxei16_v_u64m8 (const uint64_t *base, vuint16m2_t bindex, size_t vl);
vfloat16mf4_t __riscv_vluxei16_v_f16mf4_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf2_t __riscv_vluxei16_v_f16mf2_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16m1_t __riscv_vluxei16_v_f16m1_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m2_t __riscv_vluxei16_v_f16m2_m (vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m4_t __riscv_vluxei16_v_f16m4_m (vbool4_t mask, const float16_t *base, vuint16m4_t bindex, size_t vl);
vfloat16m8_t __riscv_vluxei16_v_f16m8_m (vbool2_t mask, const float16_t *base, vuint16m8_t bindex, size_t vl);
vfloat32mf2_t __riscv_vluxei16_v_f32mf2_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32m1_t __riscv_vluxei16_v_f32m1_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m2_t __riscv_vluxei16_v_f32m2_m (vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m4_t __riscv_vluxei16_v_f32m4_m (vbool8_t mask, const float32_t *base, vuint16m2_t bindex, size_t vl);
vfloat32m8_t __riscv_vluxei16_v_f32m8_m (vbool4_t mask, const float32_t *base, vuint16m4_t bindex, size_t vl);
vfloat64m1_t __riscv_vluxei16_v_f64m1_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m2_t __riscv_vluxei16_v_f64m2_m (vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m4_t __riscv_vluxei16_v_f64m4_m (vbool16_t mask, const float64_t *base, vuint16m1_t bindex, size_t vl);
vfloat64m8_t __riscv_vluxei16_v_f64m8_m (vbool8_t mask, const float64_t *base, vuint16m2_t bindex, size_t vl);
vint8mf8_t __riscv_vluxei16_v_i8mf8_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf4_t __riscv_vluxei16_v_i8mf4_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf2_t __riscv_vluxei16_v_i8mf2_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8m1_t __riscv_vluxei16_v_i8m1_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m2_t __riscv_vluxei16_v_i8m2_m (vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m4_t __riscv_vluxei16_v_i8m4_m (vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl);
vint16mf4_t __riscv_vluxei16_v_i16mf4_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf2_t __riscv_vluxei16_v_i16mf2_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16m1_t __riscv_vluxei16_v_i16m1_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m2_t __riscv_vluxei16_v_i16m2_m (vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m4_t __riscv_vluxei16_v_i16m4_m (vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl);
vint16m8_t __riscv_vluxei16_v_i16m8_m (vbool2_t mask, const int16_t *base, vuint16m8_t bindex, size_t vl);
vint32mf2_t __riscv_vluxei16_v_i32mf2_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32m1_t __riscv_vluxei16_v_i32m1_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m2_t __riscv_vluxei16_v_i32m2_m (vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m4_t __riscv_vluxei16_v_i32m4_m (vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl);
vint32m8_t __riscv_vluxei16_v_i32m8_m (vbool4_t mask, const int32_t *base, vuint16m4_t bindex, size_t vl);
vint64m1_t __riscv_vluxei16_v_i64m1_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m2_t __riscv_vluxei16_v_i64m2_m (vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m4_t __riscv_vluxei16_v_i64m4_m (vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl);
vint64m8_t __riscv_vluxei16_v_i64m8_m (vbool8_t mask, const int64_t *base, vuint16m2_t bindex, size_t vl);
vuint8mf8_t __riscv_vluxei16_v_u8mf8_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf4_t __riscv_vluxei16_v_u8mf4_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf2_t __riscv_vluxei16_v_u8mf2_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8m1_t __riscv_vluxei16_v_u8m1_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m2_t __riscv_vluxei16_v_u8m2_m (vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m4_t __riscv_vluxei16_v_u8m4_m (vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vluxei16_v_u16mf4_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf2_t __riscv_vluxei16_v_u16mf2_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16m1_t __riscv_vluxei16_v_u16m1_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m2_t __riscv_vluxei16_v_u16m2_m (vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m4_t __riscv_vluxei16_v_u16m4_m (vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl);
vuint16m8_t __riscv_vluxei16_v_u16m8_m (vbool2_t mask, const uint16_t *base, vuint16m8_t bindex, size_t vl);
vuint32mf2_t __riscv_vluxei16_v_u32mf2_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32m1_t __riscv_vluxei16_v_u32m1_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m2_t __riscv_vluxei16_v_u32m2_m (vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m4_t __riscv_vluxei16_v_u32m4_m (vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl);
vuint32m8_t __riscv_vluxei16_v_u32m8_m (vbool4_t mask, const uint32_t *base, vuint16m4_t bindex, size_t vl);
vuint64m1_t __riscv_vluxei16_v_u64m1_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m2_t __riscv_vluxei16_v_u64m2_m (vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m4_t __riscv_vluxei16_v_u64m4_m (vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl);
vuint64m8_t __riscv_vluxei16_v_u64m8_m (vbool8_t mask, const uint64_t *base, vuint16m2_t bindex, size_t vl);
4.3. vluxei32.v
- Mnemonic
vluxei32.v vd, (rs1), vs2, vm
- Encoding
- Description
-
unordered 32-bit indexed load of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vluxei32_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vluxei32_v_f16mf4 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf2_t __riscv_vluxei32_v_f16mf2 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16m1_t __riscv_vluxei32_v_f16m1 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m2_t __riscv_vluxei32_v_f16m2 (const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m4_t __riscv_vluxei32_v_f16m4 (const float16_t *base, vuint32m8_t bindex, size_t vl);
vfloat32mf2_t __riscv_vluxei32_v_f32mf2 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32m1_t __riscv_vluxei32_v_f32m1 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m2_t __riscv_vluxei32_v_f32m2 (const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m4_t __riscv_vluxei32_v_f32m4 (const float32_t *base, vuint32m4_t bindex, size_t vl);
vfloat32m8_t __riscv_vluxei32_v_f32m8 (const float32_t *base, vuint32m8_t bindex, size_t vl);
vfloat64m1_t __riscv_vluxei32_v_f64m1 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m2_t __riscv_vluxei32_v_f64m2 (const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m4_t __riscv_vluxei32_v_f64m4 (const float64_t *base, vuint32m2_t bindex, size_t vl);
vfloat64m8_t __riscv_vluxei32_v_f64m8 (const float64_t *base, vuint32m4_t bindex, size_t vl);
vint8mf8_t __riscv_vluxei32_v_i8mf8 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf4_t __riscv_vluxei32_v_i8mf4 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf2_t __riscv_vluxei32_v_i8mf2 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8m1_t __riscv_vluxei32_v_i8m1 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m2_t __riscv_vluxei32_v_i8m2 (const int8_t *base, vuint32m8_t bindex, size_t vl);
vint16mf4_t __riscv_vluxei32_v_i16mf4 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf2_t __riscv_vluxei32_v_i16mf2 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16m1_t __riscv_vluxei32_v_i16m1 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m2_t __riscv_vluxei32_v_i16m2 (const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m4_t __riscv_vluxei32_v_i16m4 (const int16_t *base, vuint32m8_t bindex, size_t vl);
vint32mf2_t __riscv_vluxei32_v_i32mf2 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32m1_t __riscv_vluxei32_v_i32m1 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m2_t __riscv_vluxei32_v_i32m2 (const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m4_t __riscv_vluxei32_v_i32m4 (const int32_t *base, vuint32m4_t bindex, size_t vl);
vint32m8_t __riscv_vluxei32_v_i32m8 (const int32_t *base, vuint32m8_t bindex, size_t vl);
vint64m1_t __riscv_vluxei32_v_i64m1 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m2_t __riscv_vluxei32_v_i64m2 (const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m4_t __riscv_vluxei32_v_i64m4 (const int64_t *base, vuint32m2_t bindex, size_t vl);
vint64m8_t __riscv_vluxei32_v_i64m8 (const int64_t *base, vuint32m4_t bindex, size_t vl);
vuint8mf8_t __riscv_vluxei32_v_u8mf8 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf4_t __riscv_vluxei32_v_u8mf4 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf2_t __riscv_vluxei32_v_u8mf2 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8m1_t __riscv_vluxei32_v_u8m1 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m2_t __riscv_vluxei32_v_u8m2 (const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vluxei32_v_u16mf4 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf2_t __riscv_vluxei32_v_u16mf2 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16m1_t __riscv_vluxei32_v_u16m1 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m2_t __riscv_vluxei32_v_u16m2 (const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m4_t __riscv_vluxei32_v_u16m4 (const uint16_t *base, vuint32m8_t bindex, size_t vl);
vuint32mf2_t __riscv_vluxei32_v_u32mf2 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32m1_t __riscv_vluxei32_v_u32m1 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m2_t __riscv_vluxei32_v_u32m2 (const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m4_t __riscv_vluxei32_v_u32m4 (const uint32_t *base, vuint32m4_t bindex, size_t vl);
vuint32m8_t __riscv_vluxei32_v_u32m8 (const uint32_t *base, vuint32m8_t bindex, size_t vl);
vuint64m1_t __riscv_vluxei32_v_u64m1 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m2_t __riscv_vluxei32_v_u64m2 (const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m4_t __riscv_vluxei32_v_u64m4 (const uint64_t *base, vuint32m2_t bindex, size_t vl);
vuint64m8_t __riscv_vluxei32_v_u64m8 (const uint64_t *base, vuint32m4_t bindex, size_t vl);
vfloat16mf4_t __riscv_vluxei32_v_f16mf4_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf2_t __riscv_vluxei32_v_f16mf2_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16m1_t __riscv_vluxei32_v_f16m1_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m2_t __riscv_vluxei32_v_f16m2_m (vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m4_t __riscv_vluxei32_v_f16m4_m (vbool4_t mask, const float16_t *base, vuint32m8_t bindex, size_t vl);
vfloat32mf2_t __riscv_vluxei32_v_f32mf2_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32m1_t __riscv_vluxei32_v_f32m1_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m2_t __riscv_vluxei32_v_f32m2_m (vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m4_t __riscv_vluxei32_v_f32m4_m (vbool8_t mask, const float32_t *base, vuint32m4_t bindex, size_t vl);
vfloat32m8_t __riscv_vluxei32_v_f32m8_m (vbool4_t mask, const float32_t *base, vuint32m8_t bindex, size_t vl);
vfloat64m1_t __riscv_vluxei32_v_f64m1_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m2_t __riscv_vluxei32_v_f64m2_m (vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m4_t __riscv_vluxei32_v_f64m4_m (vbool16_t mask, const float64_t *base, vuint32m2_t bindex, size_t vl);
vfloat64m8_t __riscv_vluxei32_v_f64m8_m (vbool8_t mask, const float64_t *base, vuint32m4_t bindex, size_t vl);
vint8mf8_t __riscv_vluxei32_v_i8mf8_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf4_t __riscv_vluxei32_v_i8mf4_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf2_t __riscv_vluxei32_v_i8mf2_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8m1_t __riscv_vluxei32_v_i8m1_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m2_t __riscv_vluxei32_v_i8m2_m (vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl);
vint16mf4_t __riscv_vluxei32_v_i16mf4_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf2_t __riscv_vluxei32_v_i16mf2_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16m1_t __riscv_vluxei32_v_i16m1_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m2_t __riscv_vluxei32_v_i16m2_m (vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m4_t __riscv_vluxei32_v_i16m4_m (vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl);
vint32mf2_t __riscv_vluxei32_v_i32mf2_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32m1_t __riscv_vluxei32_v_i32m1_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m2_t __riscv_vluxei32_v_i32m2_m (vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m4_t __riscv_vluxei32_v_i32m4_m (vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl);
vint32m8_t __riscv_vluxei32_v_i32m8_m (vbool4_t mask, const int32_t *base, vuint32m8_t bindex, size_t vl);
vint64m1_t __riscv_vluxei32_v_i64m1_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m2_t __riscv_vluxei32_v_i64m2_m (vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m4_t __riscv_vluxei32_v_i64m4_m (vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl);
vint64m8_t __riscv_vluxei32_v_i64m8_m (vbool8_t mask, const int64_t *base, vuint32m4_t bindex, size_t vl);
vuint8mf8_t __riscv_vluxei32_v_u8mf8_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf4_t __riscv_vluxei32_v_u8mf4_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf2_t __riscv_vluxei32_v_u8mf2_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8m1_t __riscv_vluxei32_v_u8m1_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m2_t __riscv_vluxei32_v_u8m2_m (vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vluxei32_v_u16mf4_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf2_t __riscv_vluxei32_v_u16mf2_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16m1_t __riscv_vluxei32_v_u16m1_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m2_t __riscv_vluxei32_v_u16m2_m (vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m4_t __riscv_vluxei32_v_u16m4_m (vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl);
vuint32mf2_t __riscv_vluxei32_v_u32mf2_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32m1_t __riscv_vluxei32_v_u32m1_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m2_t __riscv_vluxei32_v_u32m2_m (vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m4_t __riscv_vluxei32_v_u32m4_m (vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl);
vuint32m8_t __riscv_vluxei32_v_u32m8_m (vbool4_t mask, const uint32_t *base, vuint32m8_t bindex, size_t vl);
vuint64m1_t __riscv_vluxei32_v_u64m1_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m2_t __riscv_vluxei32_v_u64m2_m (vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m4_t __riscv_vluxei32_v_u64m4_m (vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl);
vuint64m8_t __riscv_vluxei32_v_u64m8_m (vbool8_t mask, const uint64_t *base, vuint32m4_t bindex, size_t vl);
4.4. vluxei64.v
- Mnemonic
vluxei64.v vd, (rs1), vs2, vm
- Encoding
- Description
-
unordered 64-bit indexed load of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vluxei64_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vluxei64_v_f16mf4 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf2_t __riscv_vluxei64_v_f16mf2 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16m1_t __riscv_vluxei64_v_f16m1 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m2_t __riscv_vluxei64_v_f16m2 (const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat32mf2_t __riscv_vluxei64_v_f32mf2 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32m1_t __riscv_vluxei64_v_f32m1 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m2_t __riscv_vluxei64_v_f32m2 (const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m4_t __riscv_vluxei64_v_f32m4 (const float32_t *base, vuint64m8_t bindex, size_t vl);
vfloat64m1_t __riscv_vluxei64_v_f64m1 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m2_t __riscv_vluxei64_v_f64m2 (const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m4_t __riscv_vluxei64_v_f64m4 (const float64_t *base, vuint64m4_t bindex, size_t vl);
vfloat64m8_t __riscv_vluxei64_v_f64m8 (const float64_t *base, vuint64m8_t bindex, size_t vl);
vint8mf8_t __riscv_vluxei64_v_i8mf8 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf4_t __riscv_vluxei64_v_i8mf4 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf2_t __riscv_vluxei64_v_i8mf2 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8m1_t __riscv_vluxei64_v_i8m1 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint16mf4_t __riscv_vluxei64_v_i16mf4 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf2_t __riscv_vluxei64_v_i16mf2 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16m1_t __riscv_vluxei64_v_i16m1 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m2_t __riscv_vluxei64_v_i16m2 (const int16_t *base, vuint64m8_t bindex, size_t vl);
vint32mf2_t __riscv_vluxei64_v_i32mf2 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32m1_t __riscv_vluxei64_v_i32m1 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m2_t __riscv_vluxei64_v_i32m2 (const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m4_t __riscv_vluxei64_v_i32m4 (const int32_t *base, vuint64m8_t bindex, size_t vl);
vint64m1_t __riscv_vluxei64_v_i64m1 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m2_t __riscv_vluxei64_v_i64m2 (const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m4_t __riscv_vluxei64_v_i64m4 (const int64_t *base, vuint64m4_t bindex, size_t vl);
vint64m8_t __riscv_vluxei64_v_i64m8 (const int64_t *base, vuint64m8_t bindex, size_t vl);
vuint8mf8_t __riscv_vluxei64_v_u8mf8 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf4_t __riscv_vluxei64_v_u8mf4 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf2_t __riscv_vluxei64_v_u8mf2 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8m1_t __riscv_vluxei64_v_u8m1 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vluxei64_v_u16mf4 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf2_t __riscv_vluxei64_v_u16mf2 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16m1_t __riscv_vluxei64_v_u16m1 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m2_t __riscv_vluxei64_v_u16m2 (const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint32mf2_t __riscv_vluxei64_v_u32mf2 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32m1_t __riscv_vluxei64_v_u32m1 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m2_t __riscv_vluxei64_v_u32m2 (const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m4_t __riscv_vluxei64_v_u32m4 (const uint32_t *base, vuint64m8_t bindex, size_t vl);
vuint64m1_t __riscv_vluxei64_v_u64m1 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m2_t __riscv_vluxei64_v_u64m2 (const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m4_t __riscv_vluxei64_v_u64m4 (const uint64_t *base, vuint64m4_t bindex, size_t vl);
vuint64m8_t __riscv_vluxei64_v_u64m8 (const uint64_t *base, vuint64m8_t bindex, size_t vl);
vfloat16mf4_t __riscv_vluxei64_v_f16mf4_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf2_t __riscv_vluxei64_v_f16mf2_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16m1_t __riscv_vluxei64_v_f16m1_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m2_t __riscv_vluxei64_v_f16m2_m (vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat32mf2_t __riscv_vluxei64_v_f32mf2_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32m1_t __riscv_vluxei64_v_f32m1_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m2_t __riscv_vluxei64_v_f32m2_m (vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m4_t __riscv_vluxei64_v_f32m4_m (vbool8_t mask, const float32_t *base, vuint64m8_t bindex, size_t vl);
vfloat64m1_t __riscv_vluxei64_v_f64m1_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m2_t __riscv_vluxei64_v_f64m2_m (vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m4_t __riscv_vluxei64_v_f64m4_m (vbool16_t mask, const float64_t *base, vuint64m4_t bindex, size_t vl);
vfloat64m8_t __riscv_vluxei64_v_f64m8_m (vbool8_t mask, const float64_t *base, vuint64m8_t bindex, size_t vl);
vint8mf8_t __riscv_vluxei64_v_i8mf8_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf4_t __riscv_vluxei64_v_i8mf4_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf2_t __riscv_vluxei64_v_i8mf2_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8m1_t __riscv_vluxei64_v_i8m1_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint16mf4_t __riscv_vluxei64_v_i16mf4_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf2_t __riscv_vluxei64_v_i16mf2_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16m1_t __riscv_vluxei64_v_i16m1_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m2_t __riscv_vluxei64_v_i16m2_m (vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl);
vint32mf2_t __riscv_vluxei64_v_i32mf2_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32m1_t __riscv_vluxei64_v_i32m1_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m2_t __riscv_vluxei64_v_i32m2_m (vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m4_t __riscv_vluxei64_v_i32m4_m (vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl);
vint64m1_t __riscv_vluxei64_v_i64m1_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m2_t __riscv_vluxei64_v_i64m2_m (vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m4_t __riscv_vluxei64_v_i64m4_m (vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl);
vint64m8_t __riscv_vluxei64_v_i64m8_m (vbool8_t mask, const int64_t *base, vuint64m8_t bindex, size_t vl);
vuint8mf8_t __riscv_vluxei64_v_u8mf8_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf4_t __riscv_vluxei64_v_u8mf4_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf2_t __riscv_vluxei64_v_u8mf2_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8m1_t __riscv_vluxei64_v_u8m1_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vluxei64_v_u16mf4_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf2_t __riscv_vluxei64_v_u16mf2_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16m1_t __riscv_vluxei64_v_u16m1_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m2_t __riscv_vluxei64_v_u16m2_m (vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint32mf2_t __riscv_vluxei64_v_u32mf2_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32m1_t __riscv_vluxei64_v_u32m1_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m2_t __riscv_vluxei64_v_u32m2_m (vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m4_t __riscv_vluxei64_v_u32m4_m (vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl);
vuint64m1_t __riscv_vluxei64_v_u64m1_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m2_t __riscv_vluxei64_v_u64m2_m (vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m4_t __riscv_vluxei64_v_u64m4_m (vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl);
vuint64m8_t __riscv_vluxei64_v_u64m8_m (vbool8_t mask, const uint64_t *base, vuint64m8_t bindex, size_t vl);
4.5. vloxei8.v
- Mnemonic
vloxei8.v vd, (rs1), vs2, vm
- Encoding
- Description
-
ordered 8-bit indexed load of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vloxei8_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vloxei8_v_f16mf4 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf2_t __riscv_vloxei8_v_f16mf2 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16m1_t __riscv_vloxei8_v_f16m1 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m2_t __riscv_vloxei8_v_f16m2 (const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m4_t __riscv_vloxei8_v_f16m4 (const float16_t *base, vuint8m2_t bindex, size_t vl);
vfloat16m8_t __riscv_vloxei8_v_f16m8 (const float16_t *base, vuint8m4_t bindex, size_t vl);
vfloat32mf2_t __riscv_vloxei8_v_f32mf2 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32m1_t __riscv_vloxei8_v_f32m1 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m2_t __riscv_vloxei8_v_f32m2 (const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m4_t __riscv_vloxei8_v_f32m4 (const float32_t *base, vuint8m1_t bindex, size_t vl);
vfloat32m8_t __riscv_vloxei8_v_f32m8 (const float32_t *base, vuint8m2_t bindex, size_t vl);
vfloat64m1_t __riscv_vloxei8_v_f64m1 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m2_t __riscv_vloxei8_v_f64m2 (const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m4_t __riscv_vloxei8_v_f64m4 (const float64_t *base, vuint8mf2_t bindex, size_t vl);
vfloat64m8_t __riscv_vloxei8_v_f64m8 (const float64_t *base, vuint8m1_t bindex, size_t vl);
vint8mf8_t __riscv_vloxei8_v_i8mf8 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf4_t __riscv_vloxei8_v_i8mf4 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf2_t __riscv_vloxei8_v_i8mf2 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8m1_t __riscv_vloxei8_v_i8m1 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m2_t __riscv_vloxei8_v_i8m2 (const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m4_t __riscv_vloxei8_v_i8m4 (const int8_t *base, vuint8m4_t bindex, size_t vl);
vint8m8_t __riscv_vloxei8_v_i8m8 (const int8_t *base, vuint8m8_t bindex, size_t vl);
vint16mf4_t __riscv_vloxei8_v_i16mf4 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf2_t __riscv_vloxei8_v_i16mf2 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16m1_t __riscv_vloxei8_v_i16m1 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m2_t __riscv_vloxei8_v_i16m2 (const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m4_t __riscv_vloxei8_v_i16m4 (const int16_t *base, vuint8m2_t bindex, size_t vl);
vint16m8_t __riscv_vloxei8_v_i16m8 (const int16_t *base, vuint8m4_t bindex, size_t vl);
vint32mf2_t __riscv_vloxei8_v_i32mf2 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32m1_t __riscv_vloxei8_v_i32m1 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m2_t __riscv_vloxei8_v_i32m2 (const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m4_t __riscv_vloxei8_v_i32m4 (const int32_t *base, vuint8m1_t bindex, size_t vl);
vint32m8_t __riscv_vloxei8_v_i32m8 (const int32_t *base, vuint8m2_t bindex, size_t vl);
vint64m1_t __riscv_vloxei8_v_i64m1 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m2_t __riscv_vloxei8_v_i64m2 (const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m4_t __riscv_vloxei8_v_i64m4 (const int64_t *base, vuint8mf2_t bindex, size_t vl);
vint64m8_t __riscv_vloxei8_v_i64m8 (const int64_t *base, vuint8m1_t bindex, size_t vl);
vuint8mf8_t __riscv_vloxei8_v_u8mf8 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf4_t __riscv_vloxei8_v_u8mf4 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf2_t __riscv_vloxei8_v_u8mf2 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8m1_t __riscv_vloxei8_v_u8m1 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m2_t __riscv_vloxei8_v_u8m2 (const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m4_t __riscv_vloxei8_v_u8m4 (const uint8_t *base, vuint8m4_t bindex, size_t vl);
vuint8m8_t __riscv_vloxei8_v_u8m8 (const uint8_t *base, vuint8m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vloxei8_v_u16mf4 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf2_t __riscv_vloxei8_v_u16mf2 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16m1_t __riscv_vloxei8_v_u16m1 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m2_t __riscv_vloxei8_v_u16m2 (const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m4_t __riscv_vloxei8_v_u16m4 (const uint16_t *base, vuint8m2_t bindex, size_t vl);
vuint16m8_t __riscv_vloxei8_v_u16m8 (const uint16_t *base, vuint8m4_t bindex, size_t vl);
vuint32mf2_t __riscv_vloxei8_v_u32mf2 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32m1_t __riscv_vloxei8_v_u32m1 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m2_t __riscv_vloxei8_v_u32m2 (const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m4_t __riscv_vloxei8_v_u32m4 (const uint32_t *base, vuint8m1_t bindex, size_t vl);
vuint32m8_t __riscv_vloxei8_v_u32m8 (const uint32_t *base, vuint8m2_t bindex, size_t vl);
vuint64m1_t __riscv_vloxei8_v_u64m1 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m2_t __riscv_vloxei8_v_u64m2 (const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m4_t __riscv_vloxei8_v_u64m4 (const uint64_t *base, vuint8mf2_t bindex, size_t vl);
vuint64m8_t __riscv_vloxei8_v_u64m8 (const uint64_t *base, vuint8m1_t bindex, size_t vl);
vfloat16mf4_t __riscv_vloxei8_v_f16mf4_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf2_t __riscv_vloxei8_v_f16mf2_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16m1_t __riscv_vloxei8_v_f16m1_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m2_t __riscv_vloxei8_v_f16m2_m (vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m4_t __riscv_vloxei8_v_f16m4_m (vbool4_t mask, const float16_t *base, vuint8m2_t bindex, size_t vl);
vfloat16m8_t __riscv_vloxei8_v_f16m8_m (vbool2_t mask, const float16_t *base, vuint8m4_t bindex, size_t vl);
vfloat32mf2_t __riscv_vloxei8_v_f32mf2_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32m1_t __riscv_vloxei8_v_f32m1_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m2_t __riscv_vloxei8_v_f32m2_m (vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m4_t __riscv_vloxei8_v_f32m4_m (vbool8_t mask, const float32_t *base, vuint8m1_t bindex, size_t vl);
vfloat32m8_t __riscv_vloxei8_v_f32m8_m (vbool4_t mask, const float32_t *base, vuint8m2_t bindex, size_t vl);
vfloat64m1_t __riscv_vloxei8_v_f64m1_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m2_t __riscv_vloxei8_v_f64m2_m (vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m4_t __riscv_vloxei8_v_f64m4_m (vbool16_t mask, const float64_t *base, vuint8mf2_t bindex, size_t vl);
vfloat64m8_t __riscv_vloxei8_v_f64m8_m (vbool8_t mask, const float64_t *base, vuint8m1_t bindex, size_t vl);
vint8mf8_t __riscv_vloxei8_v_i8mf8_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf4_t __riscv_vloxei8_v_i8mf4_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf2_t __riscv_vloxei8_v_i8mf2_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8m1_t __riscv_vloxei8_v_i8m1_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m2_t __riscv_vloxei8_v_i8m2_m (vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m4_t __riscv_vloxei8_v_i8m4_m (vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl);
vint8m8_t __riscv_vloxei8_v_i8m8_m (vbool1_t mask, const int8_t *base, vuint8m8_t bindex, size_t vl);
vint16mf4_t __riscv_vloxei8_v_i16mf4_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf2_t __riscv_vloxei8_v_i16mf2_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16m1_t __riscv_vloxei8_v_i16m1_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m2_t __riscv_vloxei8_v_i16m2_m (vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m4_t __riscv_vloxei8_v_i16m4_m (vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl);
vint16m8_t __riscv_vloxei8_v_i16m8_m (vbool2_t mask, const int16_t *base, vuint8m4_t bindex, size_t vl);
vint32mf2_t __riscv_vloxei8_v_i32mf2_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32m1_t __riscv_vloxei8_v_i32m1_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m2_t __riscv_vloxei8_v_i32m2_m (vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m4_t __riscv_vloxei8_v_i32m4_m (vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl);
vint32m8_t __riscv_vloxei8_v_i32m8_m (vbool4_t mask, const int32_t *base, vuint8m2_t bindex, size_t vl);
vint64m1_t __riscv_vloxei8_v_i64m1_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m2_t __riscv_vloxei8_v_i64m2_m (vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m4_t __riscv_vloxei8_v_i64m4_m (vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl);
vint64m8_t __riscv_vloxei8_v_i64m8_m (vbool8_t mask, const int64_t *base, vuint8m1_t bindex, size_t vl);
vuint8mf8_t __riscv_vloxei8_v_u8mf8_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf4_t __riscv_vloxei8_v_u8mf4_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf2_t __riscv_vloxei8_v_u8mf2_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8m1_t __riscv_vloxei8_v_u8m1_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m2_t __riscv_vloxei8_v_u8m2_m (vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m4_t __riscv_vloxei8_v_u8m4_m (vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl);
vuint8m8_t __riscv_vloxei8_v_u8m8_m (vbool1_t mask, const uint8_t *base, vuint8m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vloxei8_v_u16mf4_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf2_t __riscv_vloxei8_v_u16mf2_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16m1_t __riscv_vloxei8_v_u16m1_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m2_t __riscv_vloxei8_v_u16m2_m (vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m4_t __riscv_vloxei8_v_u16m4_m (vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl);
vuint16m8_t __riscv_vloxei8_v_u16m8_m (vbool2_t mask, const uint16_t *base, vuint8m4_t bindex, size_t vl);
vuint32mf2_t __riscv_vloxei8_v_u32mf2_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32m1_t __riscv_vloxei8_v_u32m1_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m2_t __riscv_vloxei8_v_u32m2_m (vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m4_t __riscv_vloxei8_v_u32m4_m (vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl);
vuint32m8_t __riscv_vloxei8_v_u32m8_m (vbool4_t mask, const uint32_t *base, vuint8m2_t bindex, size_t vl);
vuint64m1_t __riscv_vloxei8_v_u64m1_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m2_t __riscv_vloxei8_v_u64m2_m (vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m4_t __riscv_vloxei8_v_u64m4_m (vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl);
vuint64m8_t __riscv_vloxei8_v_u64m8_m (vbool8_t mask, const uint64_t *base, vuint8m1_t bindex, size_t vl);
4.6. vloxei16.v
- Mnemonic
vloxei16.v vd, (rs1), vs2, vm
- Encoding
- Description
-
ordered 16-bit indexed load of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vloxei16_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vloxei16_v_f16mf4 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf2_t __riscv_vloxei16_v_f16mf2 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16m1_t __riscv_vloxei16_v_f16m1 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m2_t __riscv_vloxei16_v_f16m2 (const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m4_t __riscv_vloxei16_v_f16m4 (const float16_t *base, vuint16m4_t bindex, size_t vl);
vfloat16m8_t __riscv_vloxei16_v_f16m8 (const float16_t *base, vuint16m8_t bindex, size_t vl);
vfloat32mf2_t __riscv_vloxei16_v_f32mf2 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32m1_t __riscv_vloxei16_v_f32m1 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m2_t __riscv_vloxei16_v_f32m2 (const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m4_t __riscv_vloxei16_v_f32m4 (const float32_t *base, vuint16m2_t bindex, size_t vl);
vfloat32m8_t __riscv_vloxei16_v_f32m8 (const float32_t *base, vuint16m4_t bindex, size_t vl);
vfloat64m1_t __riscv_vloxei16_v_f64m1 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m2_t __riscv_vloxei16_v_f64m2 (const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m4_t __riscv_vloxei16_v_f64m4 (const float64_t *base, vuint16m1_t bindex, size_t vl);
vfloat64m8_t __riscv_vloxei16_v_f64m8 (const float64_t *base, vuint16m2_t bindex, size_t vl);
vint8mf8_t __riscv_vloxei16_v_i8mf8 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf4_t __riscv_vloxei16_v_i8mf4 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf2_t __riscv_vloxei16_v_i8mf2 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8m1_t __riscv_vloxei16_v_i8m1 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m2_t __riscv_vloxei16_v_i8m2 (const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m4_t __riscv_vloxei16_v_i8m4 (const int8_t *base, vuint16m8_t bindex, size_t vl);
vint16mf4_t __riscv_vloxei16_v_i16mf4 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf2_t __riscv_vloxei16_v_i16mf2 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16m1_t __riscv_vloxei16_v_i16m1 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m2_t __riscv_vloxei16_v_i16m2 (const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m4_t __riscv_vloxei16_v_i16m4 (const int16_t *base, vuint16m4_t bindex, size_t vl);
vint16m8_t __riscv_vloxei16_v_i16m8 (const int16_t *base, vuint16m8_t bindex, size_t vl);
vint32mf2_t __riscv_vloxei16_v_i32mf2 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32m1_t __riscv_vloxei16_v_i32m1 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m2_t __riscv_vloxei16_v_i32m2 (const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m4_t __riscv_vloxei16_v_i32m4 (const int32_t *base, vuint16m2_t bindex, size_t vl);
vint32m8_t __riscv_vloxei16_v_i32m8 (const int32_t *base, vuint16m4_t bindex, size_t vl);
vint64m1_t __riscv_vloxei16_v_i64m1 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m2_t __riscv_vloxei16_v_i64m2 (const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m4_t __riscv_vloxei16_v_i64m4 (const int64_t *base, vuint16m1_t bindex, size_t vl);
vint64m8_t __riscv_vloxei16_v_i64m8 (const int64_t *base, vuint16m2_t bindex, size_t vl);
vuint8mf8_t __riscv_vloxei16_v_u8mf8 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf4_t __riscv_vloxei16_v_u8mf4 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf2_t __riscv_vloxei16_v_u8mf2 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8m1_t __riscv_vloxei16_v_u8m1 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m2_t __riscv_vloxei16_v_u8m2 (const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m4_t __riscv_vloxei16_v_u8m4 (const uint8_t *base, vuint16m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vloxei16_v_u16mf4 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf2_t __riscv_vloxei16_v_u16mf2 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16m1_t __riscv_vloxei16_v_u16m1 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m2_t __riscv_vloxei16_v_u16m2 (const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m4_t __riscv_vloxei16_v_u16m4 (const uint16_t *base, vuint16m4_t bindex, size_t vl);
vuint16m8_t __riscv_vloxei16_v_u16m8 (const uint16_t *base, vuint16m8_t bindex, size_t vl);
vuint32mf2_t __riscv_vloxei16_v_u32mf2 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32m1_t __riscv_vloxei16_v_u32m1 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m2_t __riscv_vloxei16_v_u32m2 (const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m4_t __riscv_vloxei16_v_u32m4 (const uint32_t *base, vuint16m2_t bindex, size_t vl);
vuint32m8_t __riscv_vloxei16_v_u32m8 (const uint32_t *base, vuint16m4_t bindex, size_t vl);
vuint64m1_t __riscv_vloxei16_v_u64m1 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m2_t __riscv_vloxei16_v_u64m2 (const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m4_t __riscv_vloxei16_v_u64m4 (const uint64_t *base, vuint16m1_t bindex, size_t vl);
vuint64m8_t __riscv_vloxei16_v_u64m8 (const uint64_t *base, vuint16m2_t bindex, size_t vl);
vfloat16mf4_t __riscv_vloxei16_v_f16mf4_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf2_t __riscv_vloxei16_v_f16mf2_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16m1_t __riscv_vloxei16_v_f16m1_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m2_t __riscv_vloxei16_v_f16m2_m (vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m4_t __riscv_vloxei16_v_f16m4_m (vbool4_t mask, const float16_t *base, vuint16m4_t bindex, size_t vl);
vfloat16m8_t __riscv_vloxei16_v_f16m8_m (vbool2_t mask, const float16_t *base, vuint16m8_t bindex, size_t vl);
vfloat32mf2_t __riscv_vloxei16_v_f32mf2_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32m1_t __riscv_vloxei16_v_f32m1_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m2_t __riscv_vloxei16_v_f32m2_m (vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m4_t __riscv_vloxei16_v_f32m4_m (vbool8_t mask, const float32_t *base, vuint16m2_t bindex, size_t vl);
vfloat32m8_t __riscv_vloxei16_v_f32m8_m (vbool4_t mask, const float32_t *base, vuint16m4_t bindex, size_t vl);
vfloat64m1_t __riscv_vloxei16_v_f64m1_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m2_t __riscv_vloxei16_v_f64m2_m (vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m4_t __riscv_vloxei16_v_f64m4_m (vbool16_t mask, const float64_t *base, vuint16m1_t bindex, size_t vl);
vfloat64m8_t __riscv_vloxei16_v_f64m8_m (vbool8_t mask, const float64_t *base, vuint16m2_t bindex, size_t vl);
vint8mf8_t __riscv_vloxei16_v_i8mf8_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf4_t __riscv_vloxei16_v_i8mf4_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf2_t __riscv_vloxei16_v_i8mf2_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8m1_t __riscv_vloxei16_v_i8m1_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m2_t __riscv_vloxei16_v_i8m2_m (vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m4_t __riscv_vloxei16_v_i8m4_m (vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl);
vint16mf4_t __riscv_vloxei16_v_i16mf4_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf2_t __riscv_vloxei16_v_i16mf2_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16m1_t __riscv_vloxei16_v_i16m1_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m2_t __riscv_vloxei16_v_i16m2_m (vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m4_t __riscv_vloxei16_v_i16m4_m (vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl);
vint16m8_t __riscv_vloxei16_v_i16m8_m (vbool2_t mask, const int16_t *base, vuint16m8_t bindex, size_t vl);
vint32mf2_t __riscv_vloxei16_v_i32mf2_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32m1_t __riscv_vloxei16_v_i32m1_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m2_t __riscv_vloxei16_v_i32m2_m (vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m4_t __riscv_vloxei16_v_i32m4_m (vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl);
vint32m8_t __riscv_vloxei16_v_i32m8_m (vbool4_t mask, const int32_t *base, vuint16m4_t bindex, size_t vl);
vint64m1_t __riscv_vloxei16_v_i64m1_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m2_t __riscv_vloxei16_v_i64m2_m (vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m4_t __riscv_vloxei16_v_i64m4_m (vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl);
vint64m8_t __riscv_vloxei16_v_i64m8_m (vbool8_t mask, const int64_t *base, vuint16m2_t bindex, size_t vl);
vuint8mf8_t __riscv_vloxei16_v_u8mf8_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf4_t __riscv_vloxei16_v_u8mf4_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf2_t __riscv_vloxei16_v_u8mf2_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8m1_t __riscv_vloxei16_v_u8m1_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m2_t __riscv_vloxei16_v_u8m2_m (vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m4_t __riscv_vloxei16_v_u8m4_m (vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vloxei16_v_u16mf4_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf2_t __riscv_vloxei16_v_u16mf2_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16m1_t __riscv_vloxei16_v_u16m1_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m2_t __riscv_vloxei16_v_u16m2_m (vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m4_t __riscv_vloxei16_v_u16m4_m (vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl);
vuint16m8_t __riscv_vloxei16_v_u16m8_m (vbool2_t mask, const uint16_t *base, vuint16m8_t bindex, size_t vl);
vuint32mf2_t __riscv_vloxei16_v_u32mf2_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32m1_t __riscv_vloxei16_v_u32m1_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m2_t __riscv_vloxei16_v_u32m2_m (vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m4_t __riscv_vloxei16_v_u32m4_m (vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl);
vuint32m8_t __riscv_vloxei16_v_u32m8_m (vbool4_t mask, const uint32_t *base, vuint16m4_t bindex, size_t vl);
vuint64m1_t __riscv_vloxei16_v_u64m1_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m2_t __riscv_vloxei16_v_u64m2_m (vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m4_t __riscv_vloxei16_v_u64m4_m (vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl);
vuint64m8_t __riscv_vloxei16_v_u64m8_m (vbool8_t mask, const uint64_t *base, vuint16m2_t bindex, size_t vl);
4.7. vloxei32.v
- Mnemonic
vloxei32.v vd, (rs1), vs2, vm
- Encoding
- Description
-
ordered 32-bit indexed load of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vloxei32_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vloxei32_v_f16mf4 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf2_t __riscv_vloxei32_v_f16mf2 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16m1_t __riscv_vloxei32_v_f16m1 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m2_t __riscv_vloxei32_v_f16m2 (const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m4_t __riscv_vloxei32_v_f16m4 (const float16_t *base, vuint32m8_t bindex, size_t vl);
vfloat32mf2_t __riscv_vloxei32_v_f32mf2 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32m1_t __riscv_vloxei32_v_f32m1 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m2_t __riscv_vloxei32_v_f32m2 (const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m4_t __riscv_vloxei32_v_f32m4 (const float32_t *base, vuint32m4_t bindex, size_t vl);
vfloat32m8_t __riscv_vloxei32_v_f32m8 (const float32_t *base, vuint32m8_t bindex, size_t vl);
vfloat64m1_t __riscv_vloxei32_v_f64m1 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m2_t __riscv_vloxei32_v_f64m2 (const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m4_t __riscv_vloxei32_v_f64m4 (const float64_t *base, vuint32m2_t bindex, size_t vl);
vfloat64m8_t __riscv_vloxei32_v_f64m8 (const float64_t *base, vuint32m4_t bindex, size_t vl);
vint8mf8_t __riscv_vloxei32_v_i8mf8 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf4_t __riscv_vloxei32_v_i8mf4 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf2_t __riscv_vloxei32_v_i8mf2 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8m1_t __riscv_vloxei32_v_i8m1 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m2_t __riscv_vloxei32_v_i8m2 (const int8_t *base, vuint32m8_t bindex, size_t vl);
vint16mf4_t __riscv_vloxei32_v_i16mf4 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf2_t __riscv_vloxei32_v_i16mf2 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16m1_t __riscv_vloxei32_v_i16m1 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m2_t __riscv_vloxei32_v_i16m2 (const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m4_t __riscv_vloxei32_v_i16m4 (const int16_t *base, vuint32m8_t bindex, size_t vl);
vint32mf2_t __riscv_vloxei32_v_i32mf2 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32m1_t __riscv_vloxei32_v_i32m1 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m2_t __riscv_vloxei32_v_i32m2 (const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m4_t __riscv_vloxei32_v_i32m4 (const int32_t *base, vuint32m4_t bindex, size_t vl);
vint32m8_t __riscv_vloxei32_v_i32m8 (const int32_t *base, vuint32m8_t bindex, size_t vl);
vint64m1_t __riscv_vloxei32_v_i64m1 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m2_t __riscv_vloxei32_v_i64m2 (const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m4_t __riscv_vloxei32_v_i64m4 (const int64_t *base, vuint32m2_t bindex, size_t vl);
vint64m8_t __riscv_vloxei32_v_i64m8 (const int64_t *base, vuint32m4_t bindex, size_t vl);
vuint8mf8_t __riscv_vloxei32_v_u8mf8 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf4_t __riscv_vloxei32_v_u8mf4 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf2_t __riscv_vloxei32_v_u8mf2 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8m1_t __riscv_vloxei32_v_u8m1 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m2_t __riscv_vloxei32_v_u8m2 (const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vloxei32_v_u16mf4 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf2_t __riscv_vloxei32_v_u16mf2 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16m1_t __riscv_vloxei32_v_u16m1 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m2_t __riscv_vloxei32_v_u16m2 (const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m4_t __riscv_vloxei32_v_u16m4 (const uint16_t *base, vuint32m8_t bindex, size_t vl);
vuint32mf2_t __riscv_vloxei32_v_u32mf2 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32m1_t __riscv_vloxei32_v_u32m1 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m2_t __riscv_vloxei32_v_u32m2 (const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m4_t __riscv_vloxei32_v_u32m4 (const uint32_t *base, vuint32m4_t bindex, size_t vl);
vuint32m8_t __riscv_vloxei32_v_u32m8 (const uint32_t *base, vuint32m8_t bindex, size_t vl);
vuint64m1_t __riscv_vloxei32_v_u64m1 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m2_t __riscv_vloxei32_v_u64m2 (const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m4_t __riscv_vloxei32_v_u64m4 (const uint64_t *base, vuint32m2_t bindex, size_t vl);
vuint64m8_t __riscv_vloxei32_v_u64m8 (const uint64_t *base, vuint32m4_t bindex, size_t vl);
vfloat16mf4_t __riscv_vloxei32_v_f16mf4_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf2_t __riscv_vloxei32_v_f16mf2_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16m1_t __riscv_vloxei32_v_f16m1_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m2_t __riscv_vloxei32_v_f16m2_m (vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m4_t __riscv_vloxei32_v_f16m4_m (vbool4_t mask, const float16_t *base, vuint32m8_t bindex, size_t vl);
vfloat32mf2_t __riscv_vloxei32_v_f32mf2_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32m1_t __riscv_vloxei32_v_f32m1_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m2_t __riscv_vloxei32_v_f32m2_m (vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m4_t __riscv_vloxei32_v_f32m4_m (vbool8_t mask, const float32_t *base, vuint32m4_t bindex, size_t vl);
vfloat32m8_t __riscv_vloxei32_v_f32m8_m (vbool4_t mask, const float32_t *base, vuint32m8_t bindex, size_t vl);
vfloat64m1_t __riscv_vloxei32_v_f64m1_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m2_t __riscv_vloxei32_v_f64m2_m (vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m4_t __riscv_vloxei32_v_f64m4_m (vbool16_t mask, const float64_t *base, vuint32m2_t bindex, size_t vl);
vfloat64m8_t __riscv_vloxei32_v_f64m8_m (vbool8_t mask, const float64_t *base, vuint32m4_t bindex, size_t vl);
vint8mf8_t __riscv_vloxei32_v_i8mf8_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf4_t __riscv_vloxei32_v_i8mf4_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf2_t __riscv_vloxei32_v_i8mf2_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8m1_t __riscv_vloxei32_v_i8m1_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m2_t __riscv_vloxei32_v_i8m2_m (vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl);
vint16mf4_t __riscv_vloxei32_v_i16mf4_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf2_t __riscv_vloxei32_v_i16mf2_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16m1_t __riscv_vloxei32_v_i16m1_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m2_t __riscv_vloxei32_v_i16m2_m (vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m4_t __riscv_vloxei32_v_i16m4_m (vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl);
vint32mf2_t __riscv_vloxei32_v_i32mf2_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32m1_t __riscv_vloxei32_v_i32m1_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m2_t __riscv_vloxei32_v_i32m2_m (vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m4_t __riscv_vloxei32_v_i32m4_m (vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl);
vint32m8_t __riscv_vloxei32_v_i32m8_m (vbool4_t mask, const int32_t *base, vuint32m8_t bindex, size_t vl);
vint64m1_t __riscv_vloxei32_v_i64m1_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m2_t __riscv_vloxei32_v_i64m2_m (vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m4_t __riscv_vloxei32_v_i64m4_m (vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl);
vint64m8_t __riscv_vloxei32_v_i64m8_m (vbool8_t mask, const int64_t *base, vuint32m4_t bindex, size_t vl);
vuint8mf8_t __riscv_vloxei32_v_u8mf8_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf4_t __riscv_vloxei32_v_u8mf4_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf2_t __riscv_vloxei32_v_u8mf2_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8m1_t __riscv_vloxei32_v_u8m1_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m2_t __riscv_vloxei32_v_u8m2_m (vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vloxei32_v_u16mf4_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf2_t __riscv_vloxei32_v_u16mf2_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16m1_t __riscv_vloxei32_v_u16m1_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m2_t __riscv_vloxei32_v_u16m2_m (vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m4_t __riscv_vloxei32_v_u16m4_m (vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl);
vuint32mf2_t __riscv_vloxei32_v_u32mf2_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32m1_t __riscv_vloxei32_v_u32m1_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m2_t __riscv_vloxei32_v_u32m2_m (vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m4_t __riscv_vloxei32_v_u32m4_m (vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl);
vuint32m8_t __riscv_vloxei32_v_u32m8_m (vbool4_t mask, const uint32_t *base, vuint32m8_t bindex, size_t vl);
vuint64m1_t __riscv_vloxei32_v_u64m1_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m2_t __riscv_vloxei32_v_u64m2_m (vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m4_t __riscv_vloxei32_v_u64m4_m (vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl);
vuint64m8_t __riscv_vloxei32_v_u64m8_m (vbool8_t mask, const uint64_t *base, vuint32m4_t bindex, size_t vl);
4.8. vloxei64.v
- Mnemonic
vloxei64.v vd, (rs1), vs2, vm
- Encoding
- Description
-
ordered 64-bit indexed load of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vloxei64_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vloxei64_v_f16mf4 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf2_t __riscv_vloxei64_v_f16mf2 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16m1_t __riscv_vloxei64_v_f16m1 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m2_t __riscv_vloxei64_v_f16m2 (const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat32mf2_t __riscv_vloxei64_v_f32mf2 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32m1_t __riscv_vloxei64_v_f32m1 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m2_t __riscv_vloxei64_v_f32m2 (const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m4_t __riscv_vloxei64_v_f32m4 (const float32_t *base, vuint64m8_t bindex, size_t vl);
vfloat64m1_t __riscv_vloxei64_v_f64m1 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m2_t __riscv_vloxei64_v_f64m2 (const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m4_t __riscv_vloxei64_v_f64m4 (const float64_t *base, vuint64m4_t bindex, size_t vl);
vfloat64m8_t __riscv_vloxei64_v_f64m8 (const float64_t *base, vuint64m8_t bindex, size_t vl);
vint8mf8_t __riscv_vloxei64_v_i8mf8 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf4_t __riscv_vloxei64_v_i8mf4 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf2_t __riscv_vloxei64_v_i8mf2 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8m1_t __riscv_vloxei64_v_i8m1 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint16mf4_t __riscv_vloxei64_v_i16mf4 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf2_t __riscv_vloxei64_v_i16mf2 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16m1_t __riscv_vloxei64_v_i16m1 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m2_t __riscv_vloxei64_v_i16m2 (const int16_t *base, vuint64m8_t bindex, size_t vl);
vint32mf2_t __riscv_vloxei64_v_i32mf2 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32m1_t __riscv_vloxei64_v_i32m1 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m2_t __riscv_vloxei64_v_i32m2 (const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m4_t __riscv_vloxei64_v_i32m4 (const int32_t *base, vuint64m8_t bindex, size_t vl);
vint64m1_t __riscv_vloxei64_v_i64m1 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m2_t __riscv_vloxei64_v_i64m2 (const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m4_t __riscv_vloxei64_v_i64m4 (const int64_t *base, vuint64m4_t bindex, size_t vl);
vint64m8_t __riscv_vloxei64_v_i64m8 (const int64_t *base, vuint64m8_t bindex, size_t vl);
vuint8mf8_t __riscv_vloxei64_v_u8mf8 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf4_t __riscv_vloxei64_v_u8mf4 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf2_t __riscv_vloxei64_v_u8mf2 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8m1_t __riscv_vloxei64_v_u8m1 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vloxei64_v_u16mf4 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf2_t __riscv_vloxei64_v_u16mf2 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16m1_t __riscv_vloxei64_v_u16m1 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m2_t __riscv_vloxei64_v_u16m2 (const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint32mf2_t __riscv_vloxei64_v_u32mf2 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32m1_t __riscv_vloxei64_v_u32m1 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m2_t __riscv_vloxei64_v_u32m2 (const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m4_t __riscv_vloxei64_v_u32m4 (const uint32_t *base, vuint64m8_t bindex, size_t vl);
vuint64m1_t __riscv_vloxei64_v_u64m1 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m2_t __riscv_vloxei64_v_u64m2 (const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m4_t __riscv_vloxei64_v_u64m4 (const uint64_t *base, vuint64m4_t bindex, size_t vl);
vuint64m8_t __riscv_vloxei64_v_u64m8 (const uint64_t *base, vuint64m8_t bindex, size_t vl);
vfloat16mf4_t __riscv_vloxei64_v_f16mf4_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf2_t __riscv_vloxei64_v_f16mf2_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16m1_t __riscv_vloxei64_v_f16m1_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m2_t __riscv_vloxei64_v_f16m2_m (vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat32mf2_t __riscv_vloxei64_v_f32mf2_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32m1_t __riscv_vloxei64_v_f32m1_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m2_t __riscv_vloxei64_v_f32m2_m (vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m4_t __riscv_vloxei64_v_f32m4_m (vbool8_t mask, const float32_t *base, vuint64m8_t bindex, size_t vl);
vfloat64m1_t __riscv_vloxei64_v_f64m1_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m2_t __riscv_vloxei64_v_f64m2_m (vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m4_t __riscv_vloxei64_v_f64m4_m (vbool16_t mask, const float64_t *base, vuint64m4_t bindex, size_t vl);
vfloat64m8_t __riscv_vloxei64_v_f64m8_m (vbool8_t mask, const float64_t *base, vuint64m8_t bindex, size_t vl);
vint8mf8_t __riscv_vloxei64_v_i8mf8_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf4_t __riscv_vloxei64_v_i8mf4_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf2_t __riscv_vloxei64_v_i8mf2_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8m1_t __riscv_vloxei64_v_i8m1_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint16mf4_t __riscv_vloxei64_v_i16mf4_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf2_t __riscv_vloxei64_v_i16mf2_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16m1_t __riscv_vloxei64_v_i16m1_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m2_t __riscv_vloxei64_v_i16m2_m (vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl);
vint32mf2_t __riscv_vloxei64_v_i32mf2_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32m1_t __riscv_vloxei64_v_i32m1_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m2_t __riscv_vloxei64_v_i32m2_m (vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m4_t __riscv_vloxei64_v_i32m4_m (vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl);
vint64m1_t __riscv_vloxei64_v_i64m1_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m2_t __riscv_vloxei64_v_i64m2_m (vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m4_t __riscv_vloxei64_v_i64m4_m (vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl);
vint64m8_t __riscv_vloxei64_v_i64m8_m (vbool8_t mask, const int64_t *base, vuint64m8_t bindex, size_t vl);
vuint8mf8_t __riscv_vloxei64_v_u8mf8_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf4_t __riscv_vloxei64_v_u8mf4_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf2_t __riscv_vloxei64_v_u8mf2_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8m1_t __riscv_vloxei64_v_u8m1_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint16mf4_t __riscv_vloxei64_v_u16mf4_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf2_t __riscv_vloxei64_v_u16mf2_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16m1_t __riscv_vloxei64_v_u16m1_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m2_t __riscv_vloxei64_v_u16m2_m (vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint32mf2_t __riscv_vloxei64_v_u32mf2_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32m1_t __riscv_vloxei64_v_u32m1_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m2_t __riscv_vloxei64_v_u32m2_m (vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m4_t __riscv_vloxei64_v_u32m4_m (vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl);
vuint64m1_t __riscv_vloxei64_v_u64m1_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m2_t __riscv_vloxei64_v_u64m2_m (vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m4_t __riscv_vloxei64_v_u64m4_m (vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl);
vuint64m8_t __riscv_vloxei64_v_u64m8_m (vbool8_t mask, const uint64_t *base, vuint64m8_t bindex, size_t vl);
4.9. vsuxei8.v
- Mnemonic
vsuxei8.v vs3, (rs1), vs2, vm
- Encoding
- Description
-
unordered 8-bit indexed store of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsuxei8_v.h
- Intrinsic Functions
Details
void __riscv_vsuxei8_v_f16mf4 (float16_t *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsuxei8_v_f16mf2 (float16_t *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsuxei8_v_f16m1 (float16_t *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsuxei8_v_f16m2 (float16_t *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsuxei8_v_f16m4 (float16_t *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl);
void __riscv_vsuxei8_v_f16m8 (float16_t *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl);
void __riscv_vsuxei8_v_f32mf2 (float32_t *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsuxei8_v_f32m1 (float32_t *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsuxei8_v_f32m2 (float32_t *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsuxei8_v_f32m4 (float32_t *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsuxei8_v_f32m8 (float32_t *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl);
void __riscv_vsuxei8_v_f64m1 (float64_t *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsuxei8_v_f64m2 (float64_t *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsuxei8_v_f64m4 (float64_t *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsuxei8_v_f64m8 (float64_t *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsuxei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsuxei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsuxei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsuxei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsuxei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl);
void __riscv_vsuxei8_v_i8m4 (int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl);
void __riscv_vsuxei8_v_i8m8 (int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl);
void __riscv_vsuxei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsuxei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsuxei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsuxei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsuxei8_v_i16m4 (int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl);
void __riscv_vsuxei8_v_i16m8 (int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl);
void __riscv_vsuxei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsuxei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsuxei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsuxei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsuxei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl);
void __riscv_vsuxei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsuxei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsuxei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsuxei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsuxei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsuxei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsuxei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsuxei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsuxei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl);
void __riscv_vsuxei8_v_u8m4 (uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl);
void __riscv_vsuxei8_v_u8m8 (uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl);
void __riscv_vsuxei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsuxei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsuxei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsuxei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsuxei8_v_u16m4 (uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl);
void __riscv_vsuxei8_v_u16m8 (uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl);
void __riscv_vsuxei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsuxei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsuxei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsuxei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsuxei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl);
void __riscv_vsuxei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsuxei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsuxei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsuxei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl);
void __riscv_vsuxei8_v_f16mf4_m (vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsuxei8_v_f16mf2_m (vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsuxei8_v_f16m1_m (vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsuxei8_v_f16m2_m (vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsuxei8_v_f16m4_m (vbool4_t mask, float16_t *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl);
void __riscv_vsuxei8_v_f16m8_m (vbool2_t mask, float16_t *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl);
void __riscv_vsuxei8_v_f32mf2_m (vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsuxei8_v_f32m1_m (vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsuxei8_v_f32m2_m (vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsuxei8_v_f32m4_m (vbool8_t mask, float32_t *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsuxei8_v_f32m8_m (vbool4_t mask, float32_t *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl);
void __riscv_vsuxei8_v_f64m1_m (vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsuxei8_v_f64m2_m (vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsuxei8_v_f64m4_m (vbool16_t mask, float64_t *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsuxei8_v_f64m8_m (vbool8_t mask, float64_t *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsuxei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsuxei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsuxei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsuxei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsuxei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl);
void __riscv_vsuxei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl);
void __riscv_vsuxei8_v_i8m8_m (vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl);
void __riscv_vsuxei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsuxei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsuxei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsuxei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsuxei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl);
void __riscv_vsuxei8_v_i16m8_m (vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl);
void __riscv_vsuxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsuxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsuxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsuxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsuxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl);
void __riscv_vsuxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsuxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsuxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsuxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsuxei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsuxei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsuxei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsuxei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsuxei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl);
void __riscv_vsuxei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl);
void __riscv_vsuxei8_v_u8m8_m (vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl);
void __riscv_vsuxei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsuxei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsuxei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsuxei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsuxei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl);
void __riscv_vsuxei8_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl);
void __riscv_vsuxei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsuxei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsuxei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsuxei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsuxei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl);
void __riscv_vsuxei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsuxei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsuxei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsuxei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl);
4.10. vsuxei16.v
- Mnemonic
vsuxei16.v vs3, (rs1), vs2, vm
- Encoding
- Description
-
unordered 16-bit indexed store of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsuxei16_v.h
- Intrinsic Functions
Details
void __riscv_vsuxei16_v_f16mf4 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsuxei16_v_f16mf2 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsuxei16_v_f16m1 (float16_t *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsuxei16_v_f16m2 (float16_t *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsuxei16_v_f16m4 (float16_t *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl);
void __riscv_vsuxei16_v_f16m8 (float16_t *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl);
void __riscv_vsuxei16_v_f32mf2 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsuxei16_v_f32m1 (float32_t *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsuxei16_v_f32m2 (float32_t *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsuxei16_v_f32m4 (float32_t *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsuxei16_v_f32m8 (float32_t *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl);
void __riscv_vsuxei16_v_f64m1 (float64_t *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsuxei16_v_f64m2 (float64_t *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsuxei16_v_f64m4 (float64_t *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsuxei16_v_f64m8 (float64_t *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsuxei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsuxei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsuxei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsuxei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsuxei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl);
void __riscv_vsuxei16_v_i8m4 (int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl);
void __riscv_vsuxei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsuxei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsuxei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsuxei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsuxei16_v_i16m4 (int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl);
void __riscv_vsuxei16_v_i16m8 (int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl);
void __riscv_vsuxei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsuxei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsuxei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsuxei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsuxei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl);
void __riscv_vsuxei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsuxei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsuxei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsuxei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsuxei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsuxei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsuxei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsuxei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsuxei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl);
void __riscv_vsuxei16_v_u8m4 (uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl);
void __riscv_vsuxei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsuxei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsuxei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsuxei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsuxei16_v_u16m4 (uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl);
void __riscv_vsuxei16_v_u16m8 (uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl);
void __riscv_vsuxei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsuxei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsuxei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsuxei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsuxei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl);
void __riscv_vsuxei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsuxei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsuxei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsuxei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl);
void __riscv_vsuxei16_v_f16mf4_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsuxei16_v_f16mf2_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsuxei16_v_f16m1_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsuxei16_v_f16m2_m (vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsuxei16_v_f16m4_m (vbool4_t mask, float16_t *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl);
void __riscv_vsuxei16_v_f16m8_m (vbool2_t mask, float16_t *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl);
void __riscv_vsuxei16_v_f32mf2_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsuxei16_v_f32m1_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsuxei16_v_f32m2_m (vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsuxei16_v_f32m4_m (vbool8_t mask, float32_t *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsuxei16_v_f32m8_m (vbool4_t mask, float32_t *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl);
void __riscv_vsuxei16_v_f64m1_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsuxei16_v_f64m2_m (vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsuxei16_v_f64m4_m (vbool16_t mask, float64_t *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsuxei16_v_f64m8_m (vbool8_t mask, float64_t *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsuxei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsuxei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsuxei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsuxei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsuxei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl);
void __riscv_vsuxei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl);
void __riscv_vsuxei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsuxei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsuxei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsuxei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsuxei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl);
void __riscv_vsuxei16_v_i16m8_m (vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl);
void __riscv_vsuxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsuxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsuxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsuxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsuxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl);
void __riscv_vsuxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsuxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsuxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsuxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsuxei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsuxei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsuxei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsuxei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsuxei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl);
void __riscv_vsuxei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl);
void __riscv_vsuxei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsuxei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsuxei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsuxei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsuxei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl);
void __riscv_vsuxei16_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl);
void __riscv_vsuxei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsuxei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsuxei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsuxei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsuxei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl);
void __riscv_vsuxei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsuxei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsuxei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsuxei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl);
4.11. vsuxei32.v
- Mnemonic
vsuxei32.v vs3, (rs1), vs2, vm
- Encoding
- Description
-
unordered 32-bit indexed store of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsuxei32_v.h
- Intrinsic Functions
Details
void __riscv_vsuxei32_v_f16mf4 (float16_t *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsuxei32_v_f16mf2 (float16_t *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsuxei32_v_f16m1 (float16_t *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsuxei32_v_f16m2 (float16_t *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsuxei32_v_f16m4 (float16_t *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl);
void __riscv_vsuxei32_v_f32mf2 (float32_t *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsuxei32_v_f32m1 (float32_t *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsuxei32_v_f32m2 (float32_t *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsuxei32_v_f32m4 (float32_t *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsuxei32_v_f32m8 (float32_t *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl);
void __riscv_vsuxei32_v_f64m1 (float64_t *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsuxei32_v_f64m2 (float64_t *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsuxei32_v_f64m4 (float64_t *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsuxei32_v_f64m8 (float64_t *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsuxei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsuxei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsuxei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsuxei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsuxei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl);
void __riscv_vsuxei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsuxei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsuxei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsuxei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsuxei32_v_i16m4 (int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl);
void __riscv_vsuxei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsuxei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsuxei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsuxei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsuxei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl);
void __riscv_vsuxei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsuxei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsuxei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsuxei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsuxei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsuxei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsuxei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsuxei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsuxei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl);
void __riscv_vsuxei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsuxei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsuxei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsuxei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsuxei32_v_u16m4 (uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl);
void __riscv_vsuxei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsuxei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsuxei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsuxei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsuxei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl);
void __riscv_vsuxei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsuxei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsuxei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsuxei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl);
void __riscv_vsuxei32_v_f16mf4_m (vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsuxei32_v_f16mf2_m (vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsuxei32_v_f16m1_m (vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsuxei32_v_f16m2_m (vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsuxei32_v_f16m4_m (vbool4_t mask, float16_t *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl);
void __riscv_vsuxei32_v_f32mf2_m (vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsuxei32_v_f32m1_m (vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsuxei32_v_f32m2_m (vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsuxei32_v_f32m4_m (vbool8_t mask, float32_t *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsuxei32_v_f32m8_m (vbool4_t mask, float32_t *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl);
void __riscv_vsuxei32_v_f64m1_m (vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsuxei32_v_f64m2_m (vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsuxei32_v_f64m4_m (vbool16_t mask, float64_t *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsuxei32_v_f64m8_m (vbool8_t mask, float64_t *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsuxei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsuxei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsuxei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsuxei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsuxei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl);
void __riscv_vsuxei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsuxei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsuxei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsuxei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsuxei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl);
void __riscv_vsuxei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsuxei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsuxei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsuxei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsuxei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl);
void __riscv_vsuxei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsuxei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsuxei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsuxei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsuxei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsuxei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsuxei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsuxei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsuxei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl);
void __riscv_vsuxei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsuxei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsuxei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsuxei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsuxei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl);
void __riscv_vsuxei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsuxei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsuxei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsuxei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsuxei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl);
void __riscv_vsuxei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsuxei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsuxei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsuxei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl);
4.12. vsuxei64.v
- Mnemonic
vsuxei64.v vs3, (rs1), vs2, vm
- Encoding
- Description
-
unordered 64-bit indexed store of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsuxei64_v.h
- Intrinsic Functions
Details
void __riscv_vsuxei64_v_f16mf4 (float16_t *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsuxei64_v_f16mf2 (float16_t *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsuxei64_v_f16m1 (float16_t *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsuxei64_v_f16m2 (float16_t *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsuxei64_v_f32mf2 (float32_t *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsuxei64_v_f32m1 (float32_t *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsuxei64_v_f32m2 (float32_t *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsuxei64_v_f32m4 (float32_t *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsuxei64_v_f64m1 (float64_t *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsuxei64_v_f64m2 (float64_t *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsuxei64_v_f64m4 (float64_t *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsuxei64_v_f64m8 (float64_t *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsuxei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsuxei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsuxei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsuxei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsuxei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsuxei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsuxei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsuxei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsuxei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsuxei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsuxei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsuxei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsuxei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsuxei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsuxei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsuxei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsuxei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsuxei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsuxei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsuxei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsuxei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsuxei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsuxei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsuxei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsuxei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsuxei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsuxei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsuxei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsuxei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsuxei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsuxei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsuxei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl);
void __riscv_vsuxei64_v_f16mf4_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsuxei64_v_f16mf2_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsuxei64_v_f16m1_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsuxei64_v_f16m2_m (vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsuxei64_v_f32mf2_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsuxei64_v_f32m1_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsuxei64_v_f32m2_m (vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsuxei64_v_f32m4_m (vbool8_t mask, float32_t *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsuxei64_v_f64m1_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsuxei64_v_f64m2_m (vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsuxei64_v_f64m4_m (vbool16_t mask, float64_t *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsuxei64_v_f64m8_m (vbool8_t mask, float64_t *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsuxei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsuxei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsuxei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsuxei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsuxei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsuxei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsuxei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsuxei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsuxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsuxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsuxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsuxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsuxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsuxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsuxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsuxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsuxei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsuxei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsuxei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsuxei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsuxei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsuxei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsuxei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsuxei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsuxei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsuxei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsuxei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsuxei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsuxei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsuxei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsuxei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsuxei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl);
4.13. vsoxei8.v
- Mnemonic
vsoxei8.v vs3, (rs1), vs2, vm
- Encoding
- Description
-
ordered 8-bit indexed store of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsoxei8_v.h
- Intrinsic Functions
Details
void __riscv_vsoxei8_v_f16mf4 (float16_t *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsoxei8_v_f16mf2 (float16_t *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsoxei8_v_f16m1 (float16_t *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsoxei8_v_f16m2 (float16_t *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsoxei8_v_f16m4 (float16_t *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl);
void __riscv_vsoxei8_v_f16m8 (float16_t *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl);
void __riscv_vsoxei8_v_f32mf2 (float32_t *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsoxei8_v_f32m1 (float32_t *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsoxei8_v_f32m2 (float32_t *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsoxei8_v_f32m4 (float32_t *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsoxei8_v_f32m8 (float32_t *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl);
void __riscv_vsoxei8_v_f64m1 (float64_t *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsoxei8_v_f64m2 (float64_t *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsoxei8_v_f64m4 (float64_t *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsoxei8_v_f64m8 (float64_t *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsoxei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsoxei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsoxei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsoxei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsoxei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl);
void __riscv_vsoxei8_v_i8m4 (int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl);
void __riscv_vsoxei8_v_i8m8 (int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl);
void __riscv_vsoxei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsoxei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsoxei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsoxei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsoxei8_v_i16m4 (int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl);
void __riscv_vsoxei8_v_i16m8 (int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl);
void __riscv_vsoxei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsoxei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsoxei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsoxei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsoxei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl);
void __riscv_vsoxei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsoxei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsoxei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsoxei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsoxei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsoxei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsoxei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsoxei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsoxei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl);
void __riscv_vsoxei8_v_u8m4 (uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl);
void __riscv_vsoxei8_v_u8m8 (uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl);
void __riscv_vsoxei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsoxei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsoxei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsoxei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsoxei8_v_u16m4 (uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl);
void __riscv_vsoxei8_v_u16m8 (uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl);
void __riscv_vsoxei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsoxei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsoxei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsoxei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsoxei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl);
void __riscv_vsoxei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsoxei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsoxei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsoxei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl);
void __riscv_vsoxei8_v_f16mf4_m (vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsoxei8_v_f16mf2_m (vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsoxei8_v_f16m1_m (vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsoxei8_v_f16m2_m (vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsoxei8_v_f16m4_m (vbool4_t mask, float16_t *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl);
void __riscv_vsoxei8_v_f16m8_m (vbool2_t mask, float16_t *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl);
void __riscv_vsoxei8_v_f32mf2_m (vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsoxei8_v_f32m1_m (vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsoxei8_v_f32m2_m (vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsoxei8_v_f32m4_m (vbool8_t mask, float32_t *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsoxei8_v_f32m8_m (vbool4_t mask, float32_t *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl);
void __riscv_vsoxei8_v_f64m1_m (vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsoxei8_v_f64m2_m (vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsoxei8_v_f64m4_m (vbool16_t mask, float64_t *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsoxei8_v_f64m8_m (vbool8_t mask, float64_t *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsoxei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsoxei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsoxei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsoxei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsoxei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl);
void __riscv_vsoxei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl);
void __riscv_vsoxei8_v_i8m8_m (vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl);
void __riscv_vsoxei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsoxei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsoxei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsoxei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsoxei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl);
void __riscv_vsoxei8_v_i16m8_m (vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl);
void __riscv_vsoxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsoxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsoxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsoxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsoxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl);
void __riscv_vsoxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsoxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsoxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsoxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsoxei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsoxei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsoxei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsoxei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsoxei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl);
void __riscv_vsoxei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl);
void __riscv_vsoxei8_v_u8m8_m (vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl);
void __riscv_vsoxei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsoxei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsoxei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsoxei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsoxei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl);
void __riscv_vsoxei8_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl);
void __riscv_vsoxei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsoxei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsoxei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsoxei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsoxei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl);
void __riscv_vsoxei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsoxei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsoxei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsoxei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl);
4.14. vsoxei16.v
- Mnemonic
vsoxei16.v vs3, (rs1), vs2, vm
- Encoding
- Description
-
ordered 16-bit indexed store of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsoxei16_v.h
- Intrinsic Functions
Details
void __riscv_vsoxei16_v_f16mf4 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsoxei16_v_f16mf2 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsoxei16_v_f16m1 (float16_t *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsoxei16_v_f16m2 (float16_t *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsoxei16_v_f16m4 (float16_t *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl);
void __riscv_vsoxei16_v_f16m8 (float16_t *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl);
void __riscv_vsoxei16_v_f32mf2 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsoxei16_v_f32m1 (float32_t *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsoxei16_v_f32m2 (float32_t *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsoxei16_v_f32m4 (float32_t *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsoxei16_v_f32m8 (float32_t *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl);
void __riscv_vsoxei16_v_f64m1 (float64_t *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsoxei16_v_f64m2 (float64_t *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsoxei16_v_f64m4 (float64_t *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsoxei16_v_f64m8 (float64_t *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsoxei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsoxei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsoxei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsoxei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsoxei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl);
void __riscv_vsoxei16_v_i8m4 (int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl);
void __riscv_vsoxei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsoxei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsoxei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsoxei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsoxei16_v_i16m4 (int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl);
void __riscv_vsoxei16_v_i16m8 (int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl);
void __riscv_vsoxei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsoxei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsoxei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsoxei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsoxei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl);
void __riscv_vsoxei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsoxei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsoxei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsoxei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsoxei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsoxei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsoxei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsoxei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsoxei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl);
void __riscv_vsoxei16_v_u8m4 (uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl);
void __riscv_vsoxei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsoxei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsoxei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsoxei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsoxei16_v_u16m4 (uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl);
void __riscv_vsoxei16_v_u16m8 (uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl);
void __riscv_vsoxei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsoxei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsoxei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsoxei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsoxei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl);
void __riscv_vsoxei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsoxei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsoxei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsoxei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl);
void __riscv_vsoxei16_v_f16mf4_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsoxei16_v_f16mf2_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsoxei16_v_f16m1_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsoxei16_v_f16m2_m (vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsoxei16_v_f16m4_m (vbool4_t mask, float16_t *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl);
void __riscv_vsoxei16_v_f16m8_m (vbool2_t mask, float16_t *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl);
void __riscv_vsoxei16_v_f32mf2_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsoxei16_v_f32m1_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsoxei16_v_f32m2_m (vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsoxei16_v_f32m4_m (vbool8_t mask, float32_t *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsoxei16_v_f32m8_m (vbool4_t mask, float32_t *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl);
void __riscv_vsoxei16_v_f64m1_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsoxei16_v_f64m2_m (vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsoxei16_v_f64m4_m (vbool16_t mask, float64_t *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsoxei16_v_f64m8_m (vbool8_t mask, float64_t *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsoxei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsoxei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsoxei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsoxei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsoxei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl);
void __riscv_vsoxei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl);
void __riscv_vsoxei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsoxei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsoxei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsoxei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsoxei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl);
void __riscv_vsoxei16_v_i16m8_m (vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl);
void __riscv_vsoxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsoxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsoxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsoxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsoxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl);
void __riscv_vsoxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsoxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsoxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsoxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsoxei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsoxei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsoxei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsoxei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsoxei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl);
void __riscv_vsoxei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl);
void __riscv_vsoxei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsoxei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsoxei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsoxei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsoxei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl);
void __riscv_vsoxei16_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl);
void __riscv_vsoxei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsoxei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsoxei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsoxei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsoxei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl);
void __riscv_vsoxei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsoxei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsoxei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsoxei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl);
4.15. vsoxei32.v
- Mnemonic
vsoxei32.v vs3, (rs1), vs2, vm
- Encoding
- Description
-
ordered 32-bit indexed store of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsoxei16_v.h
- Intrinsic Functions
Details
void __riscv_vsoxei16_v_f16mf4 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsoxei16_v_f16mf2 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsoxei16_v_f16m1 (float16_t *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsoxei16_v_f16m2 (float16_t *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsoxei16_v_f16m4 (float16_t *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl);
void __riscv_vsoxei16_v_f16m8 (float16_t *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl);
void __riscv_vsoxei16_v_f32mf2 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsoxei16_v_f32m1 (float32_t *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsoxei16_v_f32m2 (float32_t *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsoxei16_v_f32m4 (float32_t *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsoxei16_v_f32m8 (float32_t *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl);
void __riscv_vsoxei16_v_f64m1 (float64_t *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsoxei16_v_f64m2 (float64_t *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsoxei16_v_f64m4 (float64_t *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsoxei16_v_f64m8 (float64_t *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsoxei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsoxei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsoxei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsoxei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsoxei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl);
void __riscv_vsoxei16_v_i8m4 (int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl);
void __riscv_vsoxei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsoxei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsoxei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsoxei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsoxei16_v_i16m4 (int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl);
void __riscv_vsoxei16_v_i16m8 (int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl);
void __riscv_vsoxei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsoxei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsoxei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsoxei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsoxei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl);
void __riscv_vsoxei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsoxei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsoxei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsoxei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsoxei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsoxei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsoxei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsoxei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsoxei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl);
void __riscv_vsoxei16_v_u8m4 (uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl);
void __riscv_vsoxei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsoxei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsoxei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsoxei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsoxei16_v_u16m4 (uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl);
void __riscv_vsoxei16_v_u16m8 (uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl);
void __riscv_vsoxei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsoxei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsoxei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsoxei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsoxei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl);
void __riscv_vsoxei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsoxei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsoxei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsoxei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl);
void __riscv_vsoxei16_v_f16mf4_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsoxei16_v_f16mf2_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsoxei16_v_f16m1_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsoxei16_v_f16m2_m (vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsoxei16_v_f16m4_m (vbool4_t mask, float16_t *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl);
void __riscv_vsoxei16_v_f16m8_m (vbool2_t mask, float16_t *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl);
void __riscv_vsoxei16_v_f32mf2_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsoxei16_v_f32m1_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsoxei16_v_f32m2_m (vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsoxei16_v_f32m4_m (vbool8_t mask, float32_t *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsoxei16_v_f32m8_m (vbool4_t mask, float32_t *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl);
void __riscv_vsoxei16_v_f64m1_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsoxei16_v_f64m2_m (vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsoxei16_v_f64m4_m (vbool16_t mask, float64_t *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsoxei16_v_f64m8_m (vbool8_t mask, float64_t *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsoxei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsoxei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsoxei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsoxei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsoxei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl);
void __riscv_vsoxei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl);
void __riscv_vsoxei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsoxei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsoxei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsoxei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsoxei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl);
void __riscv_vsoxei16_v_i16m8_m (vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl);
void __riscv_vsoxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsoxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsoxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsoxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsoxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl);
void __riscv_vsoxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsoxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsoxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsoxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsoxei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsoxei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsoxei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsoxei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsoxei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl);
void __riscv_vsoxei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl);
void __riscv_vsoxei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsoxei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsoxei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsoxei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsoxei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl);
void __riscv_vsoxei16_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl);
void __riscv_vsoxei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsoxei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsoxei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsoxei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsoxei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl);
void __riscv_vsoxei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsoxei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsoxei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsoxei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl);
4.16. vsoxei64.v
- Mnemonic
vsoxei64.v vs3, (rs1), vs2, vm
- Encoding
- Description
-
ordered 64-bit indexed store of SEW data
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsoxei64_v.h
- Intrinsic Functions
Details
void __riscv_vsoxei64_v_f16mf4 (float16_t *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsoxei64_v_f16mf2 (float16_t *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsoxei64_v_f16m1 (float16_t *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsoxei64_v_f16m2 (float16_t *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsoxei64_v_f32mf2 (float32_t *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsoxei64_v_f32m1 (float32_t *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsoxei64_v_f32m2 (float32_t *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsoxei64_v_f32m4 (float32_t *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsoxei64_v_f64m1 (float64_t *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsoxei64_v_f64m2 (float64_t *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsoxei64_v_f64m4 (float64_t *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsoxei64_v_f64m8 (float64_t *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsoxei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsoxei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsoxei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsoxei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsoxei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsoxei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsoxei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsoxei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsoxei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsoxei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsoxei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsoxei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsoxei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsoxei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsoxei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsoxei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsoxei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsoxei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsoxei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsoxei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsoxei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsoxei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsoxei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsoxei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsoxei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsoxei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsoxei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsoxei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsoxei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsoxei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsoxei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsoxei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl);
void __riscv_vsoxei64_v_f16mf4_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl);
void __riscv_vsoxei64_v_f16mf2_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl);
void __riscv_vsoxei64_v_f16m1_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl);
void __riscv_vsoxei64_v_f16m2_m (vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl);
void __riscv_vsoxei64_v_f32mf2_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl);
void __riscv_vsoxei64_v_f32m1_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl);
void __riscv_vsoxei64_v_f32m2_m (vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl);
void __riscv_vsoxei64_v_f32m4_m (vbool8_t mask, float32_t *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl);
void __riscv_vsoxei64_v_f64m1_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl);
void __riscv_vsoxei64_v_f64m2_m (vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl);
void __riscv_vsoxei64_v_f64m4_m (vbool16_t mask, float64_t *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl);
void __riscv_vsoxei64_v_f64m8_m (vbool8_t mask, float64_t *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl);
void __riscv_vsoxei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl);
void __riscv_vsoxei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl);
void __riscv_vsoxei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl);
void __riscv_vsoxei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl);
void __riscv_vsoxei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl);
void __riscv_vsoxei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl);
void __riscv_vsoxei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl);
void __riscv_vsoxei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl);
void __riscv_vsoxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl);
void __riscv_vsoxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl);
void __riscv_vsoxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl);
void __riscv_vsoxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl);
void __riscv_vsoxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl);
void __riscv_vsoxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl);
void __riscv_vsoxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl);
void __riscv_vsoxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl);
void __riscv_vsoxei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl);
void __riscv_vsoxei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl);
void __riscv_vsoxei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl);
void __riscv_vsoxei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl);
void __riscv_vsoxei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl);
void __riscv_vsoxei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl);
void __riscv_vsoxei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl);
void __riscv_vsoxei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl);
void __riscv_vsoxei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl);
void __riscv_vsoxei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl);
void __riscv_vsoxei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl);
void __riscv_vsoxei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl);
void __riscv_vsoxei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl);
void __riscv_vsoxei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl);
void __riscv_vsoxei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl);
void __riscv_vsoxei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl);
5. Unit-stride Fault-Only-First Loads
5.1. vle8ff.v
- Mnemonic
vle8ff.v vd, (rs1), vm
- Encoding
- Description
-
8-bit unit-stride fault-only-first load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vle8ff_v.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vle8ff_v_i8mf8 (const int8_t *base, size_t *new_vl, size_t vl);
vint8mf4_t __riscv_vle8ff_v_i8mf4 (const int8_t *base, size_t *new_vl, size_t vl);
vint8mf2_t __riscv_vle8ff_v_i8mf2 (const int8_t *base, size_t *new_vl, size_t vl);
vint8m1_t __riscv_vle8ff_v_i8m1 (const int8_t *base, size_t *new_vl, size_t vl);
vint8m2_t __riscv_vle8ff_v_i8m2 (const int8_t *base, size_t *new_vl, size_t vl);
vint8m4_t __riscv_vle8ff_v_i8m4 (const int8_t *base, size_t *new_vl, size_t vl);
vint8m8_t __riscv_vle8ff_v_i8m8 (const int8_t *base, size_t *new_vl, size_t vl);
vuint8mf8_t __riscv_vle8ff_v_u8mf8 (const uint8_t *base, size_t *new_vl, size_t vl);
vuint8mf4_t __riscv_vle8ff_v_u8mf4 (const uint8_t *base, size_t *new_vl, size_t vl);
vuint8mf2_t __riscv_vle8ff_v_u8mf2 (const uint8_t *base, size_t *new_vl, size_t vl);
vuint8m1_t __riscv_vle8ff_v_u8m1 (const uint8_t *base, size_t *new_vl, size_t vl);
vuint8m2_t __riscv_vle8ff_v_u8m2 (const uint8_t *base, size_t *new_vl, size_t vl);
vuint8m4_t __riscv_vle8ff_v_u8m4 (const uint8_t *base, size_t *new_vl, size_t vl);
vuint8m8_t __riscv_vle8ff_v_u8m8 (const uint8_t *base, size_t *new_vl, size_t vl);
vint8mf8_t __riscv_vle8ff_v_i8mf8_m (vbool64_t mask, const int8_t *base, size_t *new_vl, size_t vl);
vint8mf4_t __riscv_vle8ff_v_i8mf4_m (vbool32_t mask, const int8_t *base, size_t *new_vl, size_t vl);
vint8mf2_t __riscv_vle8ff_v_i8mf2_m (vbool16_t mask, const int8_t *base, size_t *new_vl, size_t vl);
vint8m1_t __riscv_vle8ff_v_i8m1_m (vbool8_t mask, const int8_t *base, size_t *new_vl, size_t vl);
vint8m2_t __riscv_vle8ff_v_i8m2_m (vbool4_t mask, const int8_t *base, size_t *new_vl, size_t vl);
vint8m4_t __riscv_vle8ff_v_i8m4_m (vbool2_t mask, const int8_t *base, size_t *new_vl, size_t vl);
vint8m8_t __riscv_vle8ff_v_i8m8_m (vbool1_t mask, const int8_t *base, size_t *new_vl, size_t vl);
vuint8mf8_t __riscv_vle8ff_v_u8mf8_m (vbool64_t mask, const uint8_t *base, size_t *new_vl, size_t vl);
vuint8mf4_t __riscv_vle8ff_v_u8mf4_m (vbool32_t mask, const uint8_t *base, size_t *new_vl, size_t vl);
vuint8mf2_t __riscv_vle8ff_v_u8mf2_m (vbool16_t mask, const uint8_t *base, size_t *new_vl, size_t vl);
vuint8m1_t __riscv_vle8ff_v_u8m1_m (vbool8_t mask, const uint8_t *base, size_t *new_vl, size_t vl);
vuint8m2_t __riscv_vle8ff_v_u8m2_m (vbool4_t mask, const uint8_t *base, size_t *new_vl, size_t vl);
vuint8m4_t __riscv_vle8ff_v_u8m4_m (vbool2_t mask, const uint8_t *base, size_t *new_vl, size_t vl);
vuint8m8_t __riscv_vle8ff_v_u8m8_m (vbool1_t mask, const uint8_t *base, size_t *new_vl, size_t vl);
5.2. vle16ff.v
- Mnemonic
vle16ff.v vd, (rs1), vm
- Encoding
- Description
-
16-bit unit-stride fault-only-first load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vle16ff_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vle16ff_v_f16mf4 (const float16_t *base, size_t *new_vl, size_t vl);
vfloat16mf2_t __riscv_vle16ff_v_f16mf2 (const float16_t *base, size_t *new_vl, size_t vl);
vfloat16m1_t __riscv_vle16ff_v_f16m1 (const float16_t *base, size_t *new_vl, size_t vl);
vfloat16m2_t __riscv_vle16ff_v_f16m2 (const float16_t *base, size_t *new_vl, size_t vl);
vfloat16m4_t __riscv_vle16ff_v_f16m4 (const float16_t *base, size_t *new_vl, size_t vl);
vfloat16m8_t __riscv_vle16ff_v_f16m8 (const float16_t *base, size_t *new_vl, size_t vl);
vint16mf4_t __riscv_vle16ff_v_i16mf4 (const int16_t *base, size_t *new_vl, size_t vl);
vint16mf2_t __riscv_vle16ff_v_i16mf2 (const int16_t *base, size_t *new_vl, size_t vl);
vint16m1_t __riscv_vle16ff_v_i16m1 (const int16_t *base, size_t *new_vl, size_t vl);
vint16m2_t __riscv_vle16ff_v_i16m2 (const int16_t *base, size_t *new_vl, size_t vl);
vint16m4_t __riscv_vle16ff_v_i16m4 (const int16_t *base, size_t *new_vl, size_t vl);
vint16m8_t __riscv_vle16ff_v_i16m8 (const int16_t *base, size_t *new_vl, size_t vl);
vuint16mf4_t __riscv_vle16ff_v_u16mf4 (const uint16_t *base, size_t *new_vl, size_t vl);
vuint16mf2_t __riscv_vle16ff_v_u16mf2 (const uint16_t *base, size_t *new_vl, size_t vl);
vuint16m1_t __riscv_vle16ff_v_u16m1 (const uint16_t *base, size_t *new_vl, size_t vl);
vuint16m2_t __riscv_vle16ff_v_u16m2 (const uint16_t *base, size_t *new_vl, size_t vl);
vuint16m4_t __riscv_vle16ff_v_u16m4 (const uint16_t *base, size_t *new_vl, size_t vl);
vuint16m8_t __riscv_vle16ff_v_u16m8 (const uint16_t *base, size_t *new_vl, size_t vl);
vfloat16mf4_t __riscv_vle16ff_v_f16mf4_m (vbool64_t mask, const float16_t *base, size_t *new_vl, size_t vl);
vfloat16mf2_t __riscv_vle16ff_v_f16mf2_m (vbool32_t mask, const float16_t *base, size_t *new_vl, size_t vl);
vfloat16m1_t __riscv_vle16ff_v_f16m1_m (vbool16_t mask, const float16_t *base, size_t *new_vl, size_t vl);
vfloat16m2_t __riscv_vle16ff_v_f16m2_m (vbool8_t mask, const float16_t *base, size_t *new_vl, size_t vl);
vfloat16m4_t __riscv_vle16ff_v_f16m4_m (vbool4_t mask, const float16_t *base, size_t *new_vl, size_t vl);
vfloat16m8_t __riscv_vle16ff_v_f16m8_m (vbool2_t mask, const float16_t *base, size_t *new_vl, size_t vl);
vint16mf4_t __riscv_vle16ff_v_i16mf4_m (vbool64_t mask, const int16_t *base, size_t *new_vl, size_t vl);
vint16mf2_t __riscv_vle16ff_v_i16mf2_m (vbool32_t mask, const int16_t *base, size_t *new_vl, size_t vl);
vint16m1_t __riscv_vle16ff_v_i16m1_m (vbool16_t mask, const int16_t *base, size_t *new_vl, size_t vl);
vint16m2_t __riscv_vle16ff_v_i16m2_m (vbool8_t mask, const int16_t *base, size_t *new_vl, size_t vl);
vint16m4_t __riscv_vle16ff_v_i16m4_m (vbool4_t mask, const int16_t *base, size_t *new_vl, size_t vl);
vint16m8_t __riscv_vle16ff_v_i16m8_m (vbool2_t mask, const int16_t *base, size_t *new_vl, size_t vl);
vuint16mf4_t __riscv_vle16ff_v_u16mf4_m (vbool64_t mask, const uint16_t *base, size_t *new_vl, size_t vl);
vuint16mf2_t __riscv_vle16ff_v_u16mf2_m (vbool32_t mask, const uint16_t *base, size_t *new_vl, size_t vl);
vuint16m1_t __riscv_vle16ff_v_u16m1_m (vbool16_t mask, const uint16_t *base, size_t *new_vl, size_t vl);
vuint16m2_t __riscv_vle16ff_v_u16m2_m (vbool8_t mask, const uint16_t *base, size_t *new_vl, size_t vl);
vuint16m4_t __riscv_vle16ff_v_u16m4_m (vbool4_t mask, const uint16_t *base, size_t *new_vl, size_t vl);
vuint16m8_t __riscv_vle16ff_v_u16m8_m (vbool2_t mask, const uint16_t *base, size_t *new_vl, size_t vl);
5.3. vle32ff.v
- Mnemonic
vle32ff.v vd, (rs1), vm
- Encoding
- Description
-
32-bit unit-stride fault-only-first load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vle32ff_v.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vle32ff_v_f32mf2 (const float32_t *base, size_t *new_vl, size_t vl);
vfloat32m1_t __riscv_vle32ff_v_f32m1 (const float32_t *base, size_t *new_vl, size_t vl);
vfloat32m2_t __riscv_vle32ff_v_f32m2 (const float32_t *base, size_t *new_vl, size_t vl);
vfloat32m4_t __riscv_vle32ff_v_f32m4 (const float32_t *base, size_t *new_vl, size_t vl);
vfloat32m8_t __riscv_vle32ff_v_f32m8 (const float32_t *base, size_t *new_vl, size_t vl);
vint32mf2_t __riscv_vle32ff_v_i32mf2 (const int32_t *base, size_t *new_vl, size_t vl);
vint32m1_t __riscv_vle32ff_v_i32m1 (const int32_t *base, size_t *new_vl, size_t vl);
vint32m2_t __riscv_vle32ff_v_i32m2 (const int32_t *base, size_t *new_vl, size_t vl);
vint32m4_t __riscv_vle32ff_v_i32m4 (const int32_t *base, size_t *new_vl, size_t vl);
vint32m8_t __riscv_vle32ff_v_i32m8 (const int32_t *base, size_t *new_vl, size_t vl);
vuint32mf2_t __riscv_vle32ff_v_u32mf2 (const uint32_t *base, size_t *new_vl, size_t vl);
vuint32m1_t __riscv_vle32ff_v_u32m1 (const uint32_t *base, size_t *new_vl, size_t vl);
vuint32m2_t __riscv_vle32ff_v_u32m2 (const uint32_t *base, size_t *new_vl, size_t vl);
vuint32m4_t __riscv_vle32ff_v_u32m4 (const uint32_t *base, size_t *new_vl, size_t vl);
vuint32m8_t __riscv_vle32ff_v_u32m8 (const uint32_t *base, size_t *new_vl, size_t vl);
vfloat32mf2_t __riscv_vle32ff_v_f32mf2_m (vbool64_t mask, const float32_t *base, size_t *new_vl, size_t vl);
vfloat32m1_t __riscv_vle32ff_v_f32m1_m (vbool32_t mask, const float32_t *base, size_t *new_vl, size_t vl);
vfloat32m2_t __riscv_vle32ff_v_f32m2_m (vbool16_t mask, const float32_t *base, size_t *new_vl, size_t vl);
vfloat32m4_t __riscv_vle32ff_v_f32m4_m (vbool8_t mask, const float32_t *base, size_t *new_vl, size_t vl);
vfloat32m8_t __riscv_vle32ff_v_f32m8_m (vbool4_t mask, const float32_t *base, size_t *new_vl, size_t vl);
vint32mf2_t __riscv_vle32ff_v_i32mf2_m (vbool64_t mask, const int32_t *base, size_t *new_vl, size_t vl);
vint32m1_t __riscv_vle32ff_v_i32m1_m (vbool32_t mask, const int32_t *base, size_t *new_vl, size_t vl);
vint32m2_t __riscv_vle32ff_v_i32m2_m (vbool16_t mask, const int32_t *base, size_t *new_vl, size_t vl);
vint32m4_t __riscv_vle32ff_v_i32m4_m (vbool8_t mask, const int32_t *base, size_t *new_vl, size_t vl);
vint32m8_t __riscv_vle32ff_v_i32m8_m (vbool4_t mask, const int32_t *base, size_t *new_vl, size_t vl);
vuint32mf2_t __riscv_vle32ff_v_u32mf2_m (vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl);
vuint32m1_t __riscv_vle32ff_v_u32m1_m (vbool32_t mask, const uint32_t *base, size_t *new_vl, size_t vl);
vuint32m2_t __riscv_vle32ff_v_u32m2_m (vbool16_t mask, const uint32_t *base, size_t *new_vl, size_t vl);
vuint32m4_t __riscv_vle32ff_v_u32m4_m (vbool8_t mask, const uint32_t *base, size_t *new_vl, size_t vl);
vuint32m8_t __riscv_vle32ff_v_u32m8_m (vbool4_t mask, const uint32_t *base, size_t *new_vl, size_t vl);
5.4. vle64ff.v
- Mnemonic
vle64ff.v vd, (rs1), vm
- Encoding
- Description
-
64-bit unit-stride fault-only-first load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vle64ff_v.h
- Intrinsic Functions
Details
vfloat64m1_t __riscv_vle64ff_v_f64m1 (const float64_t *base, size_t *new_vl, size_t vl);
vfloat64m2_t __riscv_vle64ff_v_f64m2 (const float64_t *base, size_t *new_vl, size_t vl);
vfloat64m4_t __riscv_vle64ff_v_f64m4 (const float64_t *base, size_t *new_vl, size_t vl);
vfloat64m8_t __riscv_vle64ff_v_f64m8 (const float64_t *base, size_t *new_vl, size_t vl);
vint64m1_t __riscv_vle64ff_v_i64m1 (const int64_t *base, size_t *new_vl, size_t vl);
vint64m2_t __riscv_vle64ff_v_i64m2 (const int64_t *base, size_t *new_vl, size_t vl);
vint64m4_t __riscv_vle64ff_v_i64m4 (const int64_t *base, size_t *new_vl, size_t vl);
vint64m8_t __riscv_vle64ff_v_i64m8 (const int64_t *base, size_t *new_vl, size_t vl);
vuint64m1_t __riscv_vle64ff_v_u64m1 (const uint64_t *base, size_t *new_vl, size_t vl);
vuint64m2_t __riscv_vle64ff_v_u64m2 (const uint64_t *base, size_t *new_vl, size_t vl);
vuint64m4_t __riscv_vle64ff_v_u64m4 (const uint64_t *base, size_t *new_vl, size_t vl);
vuint64m8_t __riscv_vle64ff_v_u64m8 (const uint64_t *base, size_t *new_vl, size_t vl);
vfloat64m1_t __riscv_vle64ff_v_f64m1_m (vbool64_t mask, const float64_t *base, size_t *new_vl, size_t vl);
vfloat64m2_t __riscv_vle64ff_v_f64m2_m (vbool32_t mask, const float64_t *base, size_t *new_vl, size_t vl);
vfloat64m4_t __riscv_vle64ff_v_f64m4_m (vbool16_t mask, const float64_t *base, size_t *new_vl, size_t vl);
vfloat64m8_t __riscv_vle64ff_v_f64m8_m (vbool8_t mask, const float64_t *base, size_t *new_vl, size_t vl);
vint64m1_t __riscv_vle64ff_v_i64m1_m (vbool64_t mask, const int64_t *base, size_t *new_vl, size_t vl);
vint64m2_t __riscv_vle64ff_v_i64m2_m (vbool32_t mask, const int64_t *base, size_t *new_vl, size_t vl);
vint64m4_t __riscv_vle64ff_v_i64m4_m (vbool16_t mask, const int64_t *base, size_t *new_vl, size_t vl);
vint64m8_t __riscv_vle64ff_v_i64m8_m (vbool8_t mask, const int64_t *base, size_t *new_vl, size_t vl);
vuint64m1_t __riscv_vle64ff_v_u64m1_m (vbool64_t mask, const uint64_t *base, size_t *new_vl, size_t vl);
vuint64m2_t __riscv_vle64ff_v_u64m2_m (vbool32_t mask, const uint64_t *base, size_t *new_vl, size_t vl);
vuint64m4_t __riscv_vle64ff_v_u64m4_m (vbool16_t mask, const uint64_t *base, size_t *new_vl, size_t vl);
vuint64m8_t __riscv_vle64ff_v_u64m8_m (vbool8_t mask, const uint64_t *base, size_t *new_vl, size_t vl);
6. Vector Unit-Stride Segment Loads and Stores
6.1. vlseg<nf>e8.v
- Mnemonic
vlseg2e8.v vd, (rs1), vm # nf=2 vlseg3e8.v vd, (rs1), vm # nf=3 vlseg4e8.v vd, (rs1), vm # nf=4 vlseg5e8.v vd, (rs1), vm # nf=5 vlseg6e8.v vd, (rs1), vm # nf=6 vlseg7e8.v vd, (rs1), vm # nf=7 vlseg8e8.v vd, (rs1), vm # nf=8
- Encoding
- Description
-
8-bit Unit-stride segment load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vle8_v.h
- Intrinsic Functions
Details
vint8mf8x2_t __riscv_vlseg2e8_v_i8mf8x2 (const int8_t *base, size_t vl);
vint8mf8x3_t __riscv_vlseg3e8_v_i8mf8x3 (const int8_t *base, size_t vl);
vint8mf8x4_t __riscv_vlseg4e8_v_i8mf8x4 (const int8_t *base, size_t vl);
vint8mf8x5_t __riscv_vlseg5e8_v_i8mf8x5 (const int8_t *base, size_t vl);
vint8mf8x6_t __riscv_vlseg6e8_v_i8mf8x6 (const int8_t *base, size_t vl);
vint8mf8x7_t __riscv_vlseg7e8_v_i8mf8x7 (const int8_t *base, size_t vl);
vint8mf8x8_t __riscv_vlseg8e8_v_i8mf8x8 (const int8_t *base, size_t vl);
vint8mf4x2_t __riscv_vlseg2e8_v_i8mf4x2 (const int8_t *base, size_t vl);
vint8mf4x3_t __riscv_vlseg3e8_v_i8mf4x3 (const int8_t *base, size_t vl);
vint8mf4x4_t __riscv_vlseg4e8_v_i8mf4x4 (const int8_t *base, size_t vl);
vint8mf4x5_t __riscv_vlseg5e8_v_i8mf4x5 (const int8_t *base, size_t vl);
vint8mf4x6_t __riscv_vlseg6e8_v_i8mf4x6 (const int8_t *base, size_t vl);
vint8mf4x7_t __riscv_vlseg7e8_v_i8mf4x7 (const int8_t *base, size_t vl);
vint8mf4x8_t __riscv_vlseg8e8_v_i8mf4x8 (const int8_t *base, size_t vl);
vint8mf2x2_t __riscv_vlseg2e8_v_i8mf2x2 (const int8_t *base, size_t vl);
vint8mf2x3_t __riscv_vlseg3e8_v_i8mf2x3 (const int8_t *base, size_t vl);
vint8mf2x4_t __riscv_vlseg4e8_v_i8mf2x4 (const int8_t *base, size_t vl);
vint8mf2x5_t __riscv_vlseg5e8_v_i8mf2x5 (const int8_t *base, size_t vl);
vint8mf2x6_t __riscv_vlseg6e8_v_i8mf2x6 (const int8_t *base, size_t vl);
vint8mf2x7_t __riscv_vlseg7e8_v_i8mf2x7 (const int8_t *base, size_t vl);
vint8mf2x8_t __riscv_vlseg8e8_v_i8mf2x8 (const int8_t *base, size_t vl);
vint8m1x2_t __riscv_vlseg2e8_v_i8m1x2 (const int8_t *base, size_t vl);
vint8m1x3_t __riscv_vlseg3e8_v_i8m1x3 (const int8_t *base, size_t vl);
vint8m1x4_t __riscv_vlseg4e8_v_i8m1x4 (const int8_t *base, size_t vl);
vint8m1x5_t __riscv_vlseg5e8_v_i8m1x5 (const int8_t *base, size_t vl);
vint8m1x6_t __riscv_vlseg6e8_v_i8m1x6 (const int8_t *base, size_t vl);
vint8m1x7_t __riscv_vlseg7e8_v_i8m1x7 (const int8_t *base, size_t vl);
vint8m1x8_t __riscv_vlseg8e8_v_i8m1x8 (const int8_t *base, size_t vl);
vint8m2x2_t __riscv_vlseg2e8_v_i8m2x2 (const int8_t *base, size_t vl);
vint8m2x3_t __riscv_vlseg3e8_v_i8m2x3 (const int8_t *base, size_t vl);
vint8m2x4_t __riscv_vlseg4e8_v_i8m2x4 (const int8_t *base, size_t vl);
vint8m4x2_t __riscv_vlseg2e8_v_i8m4x2 (const int8_t *base, size_t vl);
vuint8mf8x2_t __riscv_vlseg2e8_v_u8mf8x2 (const uint8_t *base, size_t vl);
vuint8mf8x3_t __riscv_vlseg3e8_v_u8mf8x3 (const uint8_t *base, size_t vl);
vuint8mf8x4_t __riscv_vlseg4e8_v_u8mf8x4 (const uint8_t *base, size_t vl);
vuint8mf8x5_t __riscv_vlseg5e8_v_u8mf8x5 (const uint8_t *base, size_t vl);
vuint8mf8x6_t __riscv_vlseg6e8_v_u8mf8x6 (const uint8_t *base, size_t vl);
vuint8mf8x7_t __riscv_vlseg7e8_v_u8mf8x7 (const uint8_t *base, size_t vl);
vuint8mf8x8_t __riscv_vlseg8e8_v_u8mf8x8 (const uint8_t *base, size_t vl);
vuint8mf4x2_t __riscv_vlseg2e8_v_u8mf4x2 (const uint8_t *base, size_t vl);
vuint8mf4x3_t __riscv_vlseg3e8_v_u8mf4x3 (const uint8_t *base, size_t vl);
vuint8mf4x4_t __riscv_vlseg4e8_v_u8mf4x4 (const uint8_t *base, size_t vl);
vuint8mf4x5_t __riscv_vlseg5e8_v_u8mf4x5 (const uint8_t *base, size_t vl);
vuint8mf4x6_t __riscv_vlseg6e8_v_u8mf4x6 (const uint8_t *base, size_t vl);
vuint8mf4x7_t __riscv_vlseg7e8_v_u8mf4x7 (const uint8_t *base, size_t vl);
vuint8mf4x8_t __riscv_vlseg8e8_v_u8mf4x8 (const uint8_t *base, size_t vl);
vuint8mf2x2_t __riscv_vlseg2e8_v_u8mf2x2 (const uint8_t *base, size_t vl);
vuint8mf2x3_t __riscv_vlseg3e8_v_u8mf2x3 (const uint8_t *base, size_t vl);
vuint8mf2x4_t __riscv_vlseg4e8_v_u8mf2x4 (const uint8_t *base, size_t vl);
vuint8mf2x5_t __riscv_vlseg5e8_v_u8mf2x5 (const uint8_t *base, size_t vl);
vuint8mf2x6_t __riscv_vlseg6e8_v_u8mf2x6 (const uint8_t *base, size_t vl);
vuint8mf2x7_t __riscv_vlseg7e8_v_u8mf2x7 (const uint8_t *base, size_t vl);
vuint8mf2x8_t __riscv_vlseg8e8_v_u8mf2x8 (const uint8_t *base, size_t vl);
vuint8m1x2_t __riscv_vlseg2e8_v_u8m1x2 (const uint8_t *base, size_t vl);
vuint8m1x3_t __riscv_vlseg3e8_v_u8m1x3 (const uint8_t *base, size_t vl);
vuint8m1x4_t __riscv_vlseg4e8_v_u8m1x4 (const uint8_t *base, size_t vl);
vuint8m1x5_t __riscv_vlseg5e8_v_u8m1x5 (const uint8_t *base, size_t vl);
vuint8m1x6_t __riscv_vlseg6e8_v_u8m1x6 (const uint8_t *base, size_t vl);
vuint8m1x7_t __riscv_vlseg7e8_v_u8m1x7 (const uint8_t *base, size_t vl);
vuint8m1x8_t __riscv_vlseg8e8_v_u8m1x8 (const uint8_t *base, size_t vl);
vuint8m2x2_t __riscv_vlseg2e8_v_u8m2x2 (const uint8_t *base, size_t vl);
vuint8m2x3_t __riscv_vlseg3e8_v_u8m2x3 (const uint8_t *base, size_t vl);
vuint8m2x4_t __riscv_vlseg4e8_v_u8m2x4 (const uint8_t *base, size_t vl);
vuint8m4x2_t __riscv_vlseg2e8_v_u8m4x2 (const uint8_t *base, size_t vl);
vint8mf8x2_t __riscv_vlseg2e8_v_i8mf8x2_m (vbool64_t mask, const int8_t *base, size_t vl);
vint8mf8x3_t __riscv_vlseg3e8_v_i8mf8x3_m (vbool64_t mask, const int8_t *base, size_t vl);
vint8mf8x4_t __riscv_vlseg4e8_v_i8mf8x4_m (vbool64_t mask, const int8_t *base, size_t vl);
vint8mf8x5_t __riscv_vlseg5e8_v_i8mf8x5_m (vbool64_t mask, const int8_t *base, size_t vl);
vint8mf8x6_t __riscv_vlseg6e8_v_i8mf8x6_m (vbool64_t mask, const int8_t *base, size_t vl);
vint8mf8x7_t __riscv_vlseg7e8_v_i8mf8x7_m (vbool64_t mask, const int8_t *base, size_t vl);
vint8mf8x8_t __riscv_vlseg8e8_v_i8mf8x8_m (vbool64_t mask, const int8_t *base, size_t vl);
vint8mf4x2_t __riscv_vlseg2e8_v_i8mf4x2_m (vbool32_t mask, const int8_t *base, size_t vl);
vint8mf4x3_t __riscv_vlseg3e8_v_i8mf4x3_m (vbool32_t mask, const int8_t *base, size_t vl);
vint8mf4x4_t __riscv_vlseg4e8_v_i8mf4x4_m (vbool32_t mask, const int8_t *base, size_t vl);
vint8mf4x5_t __riscv_vlseg5e8_v_i8mf4x5_m (vbool32_t mask, const int8_t *base, size_t vl);
vint8mf4x6_t __riscv_vlseg6e8_v_i8mf4x6_m (vbool32_t mask, const int8_t *base, size_t vl);
vint8mf4x7_t __riscv_vlseg7e8_v_i8mf4x7_m (vbool32_t mask, const int8_t *base, size_t vl);
vint8mf4x8_t __riscv_vlseg8e8_v_i8mf4x8_m (vbool32_t mask, const int8_t *base, size_t vl);
vint8mf2x2_t __riscv_vlseg2e8_v_i8mf2x2_m (vbool16_t mask, const int8_t *base, size_t vl);
vint8mf2x3_t __riscv_vlseg3e8_v_i8mf2x3_m (vbool16_t mask, const int8_t *base, size_t vl);
vint8mf2x4_t __riscv_vlseg4e8_v_i8mf2x4_m (vbool16_t mask, const int8_t *base, size_t vl);
vint8mf2x5_t __riscv_vlseg5e8_v_i8mf2x5_m (vbool16_t mask, const int8_t *base, size_t vl);
vint8mf2x6_t __riscv_vlseg6e8_v_i8mf2x6_m (vbool16_t mask, const int8_t *base, size_t vl);
vint8mf2x7_t __riscv_vlseg7e8_v_i8mf2x7_m (vbool16_t mask, const int8_t *base, size_t vl);
vint8mf2x8_t __riscv_vlseg8e8_v_i8mf2x8_m (vbool16_t mask, const int8_t *base, size_t vl);
vint8m1x2_t __riscv_vlseg2e8_v_i8m1x2_m (vbool8_t mask, const int8_t *base, size_t vl);
vint8m1x3_t __riscv_vlseg3e8_v_i8m1x3_m (vbool8_t mask, const int8_t *base, size_t vl);
vint8m1x4_t __riscv_vlseg4e8_v_i8m1x4_m (vbool8_t mask, const int8_t *base, size_t vl);
vint8m1x5_t __riscv_vlseg5e8_v_i8m1x5_m (vbool8_t mask, const int8_t *base, size_t vl);
vint8m1x6_t __riscv_vlseg6e8_v_i8m1x6_m (vbool8_t mask, const int8_t *base, size_t vl);
vint8m1x7_t __riscv_vlseg7e8_v_i8m1x7_m (vbool8_t mask, const int8_t *base, size_t vl);
vint8m1x8_t __riscv_vlseg8e8_v_i8m1x8_m (vbool8_t mask, const int8_t *base, size_t vl);
vint8m2x2_t __riscv_vlseg2e8_v_i8m2x2_m (vbool4_t mask, const int8_t *base, size_t vl);
vint8m2x3_t __riscv_vlseg3e8_v_i8m2x3_m (vbool4_t mask, const int8_t *base, size_t vl);
vint8m2x4_t __riscv_vlseg4e8_v_i8m2x4_m (vbool4_t mask, const int8_t *base, size_t vl);
vint8m4x2_t __riscv_vlseg2e8_v_i8m4x2_m (vbool2_t mask, const int8_t *base, size_t vl);
vuint8mf8x2_t __riscv_vlseg2e8_v_u8mf8x2_m (vbool64_t mask, const uint8_t *base, size_t vl);
vuint8mf8x3_t __riscv_vlseg3e8_v_u8mf8x3_m (vbool64_t mask, const uint8_t *base, size_t vl);
vuint8mf8x4_t __riscv_vlseg4e8_v_u8mf8x4_m (vbool64_t mask, const uint8_t *base, size_t vl);
vuint8mf8x5_t __riscv_vlseg5e8_v_u8mf8x5_m (vbool64_t mask, const uint8_t *base, size_t vl);
vuint8mf8x6_t __riscv_vlseg6e8_v_u8mf8x6_m (vbool64_t mask, const uint8_t *base, size_t vl);
vuint8mf8x7_t __riscv_vlseg7e8_v_u8mf8x7_m (vbool64_t mask, const uint8_t *base, size_t vl);
vuint8mf8x8_t __riscv_vlseg8e8_v_u8mf8x8_m (vbool64_t mask, const uint8_t *base, size_t vl);
vuint8mf4x2_t __riscv_vlseg2e8_v_u8mf4x2_m (vbool32_t mask, const uint8_t *base, size_t vl);
vuint8mf4x3_t __riscv_vlseg3e8_v_u8mf4x3_m (vbool32_t mask, const uint8_t *base, size_t vl);
vuint8mf4x4_t __riscv_vlseg4e8_v_u8mf4x4_m (vbool32_t mask, const uint8_t *base, size_t vl);
vuint8mf4x5_t __riscv_vlseg5e8_v_u8mf4x5_m (vbool32_t mask, const uint8_t *base, size_t vl);
vuint8mf4x6_t __riscv_vlseg6e8_v_u8mf4x6_m (vbool32_t mask, const uint8_t *base, size_t vl);
vuint8mf4x7_t __riscv_vlseg7e8_v_u8mf4x7_m (vbool32_t mask, const uint8_t *base, size_t vl);
vuint8mf4x8_t __riscv_vlseg8e8_v_u8mf4x8_m (vbool32_t mask, const uint8_t *base, size_t vl);
vuint8mf2x2_t __riscv_vlseg2e8_v_u8mf2x2_m (vbool16_t mask, const uint8_t *base, size_t vl);
vuint8mf2x3_t __riscv_vlseg3e8_v_u8mf2x3_m (vbool16_t mask, const uint8_t *base, size_t vl);
vuint8mf2x4_t __riscv_vlseg4e8_v_u8mf2x4_m (vbool16_t mask, const uint8_t *base, size_t vl);
vuint8mf2x5_t __riscv_vlseg5e8_v_u8mf2x5_m (vbool16_t mask, const uint8_t *base, size_t vl);
vuint8mf2x6_t __riscv_vlseg6e8_v_u8mf2x6_m (vbool16_t mask, const uint8_t *base, size_t vl);
vuint8mf2x7_t __riscv_vlseg7e8_v_u8mf2x7_m (vbool16_t mask, const uint8_t *base, size_t vl);
vuint8mf2x8_t __riscv_vlseg8e8_v_u8mf2x8_m (vbool16_t mask, const uint8_t *base, size_t vl);
vuint8m1x2_t __riscv_vlseg2e8_v_u8m1x2_m (vbool8_t mask, const uint8_t *base, size_t vl);
vuint8m1x3_t __riscv_vlseg3e8_v_u8m1x3_m (vbool8_t mask, const uint8_t *base, size_t vl);
vuint8m1x4_t __riscv_vlseg4e8_v_u8m1x4_m (vbool8_t mask, const uint8_t *base, size_t vl);
vuint8m1x5_t __riscv_vlseg5e8_v_u8m1x5_m (vbool8_t mask, const uint8_t *base, size_t vl);
vuint8m1x6_t __riscv_vlseg6e8_v_u8m1x6_m (vbool8_t mask, const uint8_t *base, size_t vl);
vuint8m1x7_t __riscv_vlseg7e8_v_u8m1x7_m (vbool8_t mask, const uint8_t *base, size_t vl);
vuint8m1x8_t __riscv_vlseg8e8_v_u8m1x8_m (vbool8_t mask, const uint8_t *base, size_t vl);
vuint8m2x2_t __riscv_vlseg2e8_v_u8m2x2_m (vbool4_t mask, const uint8_t *base, size_t vl);
vuint8m2x3_t __riscv_vlseg3e8_v_u8m2x3_m (vbool4_t mask, const uint8_t *base, size_t vl);
vuint8m2x4_t __riscv_vlseg4e8_v_u8m2x4_m (vbool4_t mask, const uint8_t *base, size_t vl);
vuint8m4x2_t __riscv_vlseg2e8_v_u8m4x2_m (vbool2_t mask, const uint8_t *base, size_t vl);
6.2. vlseg<nf>e16.v
- Mnemonic
vlseg2e16.v vd, (rs1), vm # nf=2 vlseg3e16.v vd, (rs1), vm # nf=3 vlseg4e16.v vd, (rs1), vm # nf=4 vlseg5e16.v vd, (rs1), vm # nf=5 vlseg6e16.v vd, (rs1), vm # nf=6 vlseg7e16.v vd, (rs1), vm # nf=7 vlseg8e16.v vd, (rs1), vm # nf=8
- Encoding
- Description
-
16-bit Unit-stride segment load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vle16_v.h
- Intrinsic Functions
Details
vfloat16mf4x2_t __riscv_vlseg2e16_v_f16mf4x2 (const float16_t *base, size_t vl);
vfloat16mf4x3_t __riscv_vlseg3e16_v_f16mf4x3 (const float16_t *base, size_t vl);
vfloat16mf4x4_t __riscv_vlseg4e16_v_f16mf4x4 (const float16_t *base, size_t vl);
vfloat16mf4x5_t __riscv_vlseg5e16_v_f16mf4x5 (const float16_t *base, size_t vl);
vfloat16mf4x6_t __riscv_vlseg6e16_v_f16mf4x6 (const float16_t *base, size_t vl);
vfloat16mf4x7_t __riscv_vlseg7e16_v_f16mf4x7 (const float16_t *base, size_t vl);
vfloat16mf4x8_t __riscv_vlseg8e16_v_f16mf4x8 (const float16_t *base, size_t vl);
vfloat16mf2x2_t __riscv_vlseg2e16_v_f16mf2x2 (const float16_t *base, size_t vl);
vfloat16mf2x3_t __riscv_vlseg3e16_v_f16mf2x3 (const float16_t *base, size_t vl);
vfloat16mf2x4_t __riscv_vlseg4e16_v_f16mf2x4 (const float16_t *base, size_t vl);
vfloat16mf2x5_t __riscv_vlseg5e16_v_f16mf2x5 (const float16_t *base, size_t vl);
vfloat16mf2x6_t __riscv_vlseg6e16_v_f16mf2x6 (const float16_t *base, size_t vl);
vfloat16mf2x7_t __riscv_vlseg7e16_v_f16mf2x7 (const float16_t *base, size_t vl);
vfloat16mf2x8_t __riscv_vlseg8e16_v_f16mf2x8 (const float16_t *base, size_t vl);
vfloat16m1x2_t __riscv_vlseg2e16_v_f16m1x2 (const float16_t *base, size_t vl);
vfloat16m1x3_t __riscv_vlseg3e16_v_f16m1x3 (const float16_t *base, size_t vl);
vfloat16m1x4_t __riscv_vlseg4e16_v_f16m1x4 (const float16_t *base, size_t vl);
vfloat16m1x5_t __riscv_vlseg5e16_v_f16m1x5 (const float16_t *base, size_t vl);
vfloat16m1x6_t __riscv_vlseg6e16_v_f16m1x6 (const float16_t *base, size_t vl);
vfloat16m1x7_t __riscv_vlseg7e16_v_f16m1x7 (const float16_t *base, size_t vl);
vfloat16m1x8_t __riscv_vlseg8e16_v_f16m1x8 (const float16_t *base, size_t vl);
vfloat16m2x2_t __riscv_vlseg2e16_v_f16m2x2 (const float16_t *base, size_t vl);
vfloat16m2x3_t __riscv_vlseg3e16_v_f16m2x3 (const float16_t *base, size_t vl);
vfloat16m2x4_t __riscv_vlseg4e16_v_f16m2x4 (const float16_t *base, size_t vl);
vfloat16m4x2_t __riscv_vlseg2e16_v_f16m4x2 (const float16_t *base, size_t vl);
vint16mf4x2_t __riscv_vlseg2e16_v_i16mf4x2 (const int16_t *base, size_t vl);
vint16mf4x3_t __riscv_vlseg3e16_v_i16mf4x3 (const int16_t *base, size_t vl);
vint16mf4x4_t __riscv_vlseg4e16_v_i16mf4x4 (const int16_t *base, size_t vl);
vint16mf4x5_t __riscv_vlseg5e16_v_i16mf4x5 (const int16_t *base, size_t vl);
vint16mf4x6_t __riscv_vlseg6e16_v_i16mf4x6 (const int16_t *base, size_t vl);
vint16mf4x7_t __riscv_vlseg7e16_v_i16mf4x7 (const int16_t *base, size_t vl);
vint16mf4x8_t __riscv_vlseg8e16_v_i16mf4x8 (const int16_t *base, size_t vl);
vint16mf2x2_t __riscv_vlseg2e16_v_i16mf2x2 (const int16_t *base, size_t vl);
vint16mf2x3_t __riscv_vlseg3e16_v_i16mf2x3 (const int16_t *base, size_t vl);
vint16mf2x4_t __riscv_vlseg4e16_v_i16mf2x4 (const int16_t *base, size_t vl);
vint16mf2x5_t __riscv_vlseg5e16_v_i16mf2x5 (const int16_t *base, size_t vl);
vint16mf2x6_t __riscv_vlseg6e16_v_i16mf2x6 (const int16_t *base, size_t vl);
vint16mf2x7_t __riscv_vlseg7e16_v_i16mf2x7 (const int16_t *base, size_t vl);
vint16mf2x8_t __riscv_vlseg8e16_v_i16mf2x8 (const int16_t *base, size_t vl);
vint16m1x2_t __riscv_vlseg2e16_v_i16m1x2 (const int16_t *base, size_t vl);
vint16m1x3_t __riscv_vlseg3e16_v_i16m1x3 (const int16_t *base, size_t vl);
vint16m1x4_t __riscv_vlseg4e16_v_i16m1x4 (const int16_t *base, size_t vl);
vint16m1x5_t __riscv_vlseg5e16_v_i16m1x5 (const int16_t *base, size_t vl);
vint16m1x6_t __riscv_vlseg6e16_v_i16m1x6 (const int16_t *base, size_t vl);
vint16m1x7_t __riscv_vlseg7e16_v_i16m1x7 (const int16_t *base, size_t vl);
vint16m1x8_t __riscv_vlseg8e16_v_i16m1x8 (const int16_t *base, size_t vl);
vint16m2x2_t __riscv_vlseg2e16_v_i16m2x2 (const int16_t *base, size_t vl);
vint16m2x3_t __riscv_vlseg3e16_v_i16m2x3 (const int16_t *base, size_t vl);
vint16m2x4_t __riscv_vlseg4e16_v_i16m2x4 (const int16_t *base, size_t vl);
vint16m4x2_t __riscv_vlseg2e16_v_i16m4x2 (const int16_t *base, size_t vl);
vuint16mf4x2_t __riscv_vlseg2e16_v_u16mf4x2 (const uint16_t *base, size_t vl);
vuint16mf4x3_t __riscv_vlseg3e16_v_u16mf4x3 (const uint16_t *base, size_t vl);
vuint16mf4x4_t __riscv_vlseg4e16_v_u16mf4x4 (const uint16_t *base, size_t vl);
vuint16mf4x5_t __riscv_vlseg5e16_v_u16mf4x5 (const uint16_t *base, size_t vl);
vuint16mf4x6_t __riscv_vlseg6e16_v_u16mf4x6 (const uint16_t *base, size_t vl);
vuint16mf4x7_t __riscv_vlseg7e16_v_u16mf4x7 (const uint16_t *base, size_t vl);
vuint16mf4x8_t __riscv_vlseg8e16_v_u16mf4x8 (const uint16_t *base, size_t vl);
vuint16mf2x2_t __riscv_vlseg2e16_v_u16mf2x2 (const uint16_t *base, size_t vl);
vuint16mf2x3_t __riscv_vlseg3e16_v_u16mf2x3 (const uint16_t *base, size_t vl);
vuint16mf2x4_t __riscv_vlseg4e16_v_u16mf2x4 (const uint16_t *base, size_t vl);
vuint16mf2x5_t __riscv_vlseg5e16_v_u16mf2x5 (const uint16_t *base, size_t vl);
vuint16mf2x6_t __riscv_vlseg6e16_v_u16mf2x6 (const uint16_t *base, size_t vl);
vuint16mf2x7_t __riscv_vlseg7e16_v_u16mf2x7 (const uint16_t *base, size_t vl);
vuint16mf2x8_t __riscv_vlseg8e16_v_u16mf2x8 (const uint16_t *base, size_t vl);
vuint16m1x2_t __riscv_vlseg2e16_v_u16m1x2 (const uint16_t *base, size_t vl);
vuint16m1x3_t __riscv_vlseg3e16_v_u16m1x3 (const uint16_t *base, size_t vl);
vuint16m1x4_t __riscv_vlseg4e16_v_u16m1x4 (const uint16_t *base, size_t vl);
vuint16m1x5_t __riscv_vlseg5e16_v_u16m1x5 (const uint16_t *base, size_t vl);
vuint16m1x6_t __riscv_vlseg6e16_v_u16m1x6 (const uint16_t *base, size_t vl);
vuint16m1x7_t __riscv_vlseg7e16_v_u16m1x7 (const uint16_t *base, size_t vl);
vuint16m1x8_t __riscv_vlseg8e16_v_u16m1x8 (const uint16_t *base, size_t vl);
vuint16m2x2_t __riscv_vlseg2e16_v_u16m2x2 (const uint16_t *base, size_t vl);
vuint16m2x3_t __riscv_vlseg3e16_v_u16m2x3 (const uint16_t *base, size_t vl);
vuint16m2x4_t __riscv_vlseg4e16_v_u16m2x4 (const uint16_t *base, size_t vl);
vuint16m4x2_t __riscv_vlseg2e16_v_u16m4x2 (const uint16_t *base, size_t vl);
vfloat16mf4x2_t __riscv_vlseg2e16_v_f16mf4x2_m (vbool64_t mask, const float16_t *base, size_t vl);
vfloat16mf4x3_t __riscv_vlseg3e16_v_f16mf4x3_m (vbool64_t mask, const float16_t *base, size_t vl);
vfloat16mf4x4_t __riscv_vlseg4e16_v_f16mf4x4_m (vbool64_t mask, const float16_t *base, size_t vl);
vfloat16mf4x5_t __riscv_vlseg5e16_v_f16mf4x5_m (vbool64_t mask, const float16_t *base, size_t vl);
vfloat16mf4x6_t __riscv_vlseg6e16_v_f16mf4x6_m (vbool64_t mask, const float16_t *base, size_t vl);
vfloat16mf4x7_t __riscv_vlseg7e16_v_f16mf4x7_m (vbool64_t mask, const float16_t *base, size_t vl);
vfloat16mf4x8_t __riscv_vlseg8e16_v_f16mf4x8_m (vbool64_t mask, const float16_t *base, size_t vl);
vfloat16mf2x2_t __riscv_vlseg2e16_v_f16mf2x2_m (vbool32_t mask, const float16_t *base, size_t vl);
vfloat16mf2x3_t __riscv_vlseg3e16_v_f16mf2x3_m (vbool32_t mask, const float16_t *base, size_t vl);
vfloat16mf2x4_t __riscv_vlseg4e16_v_f16mf2x4_m (vbool32_t mask, const float16_t *base, size_t vl);
vfloat16mf2x5_t __riscv_vlseg5e16_v_f16mf2x5_m (vbool32_t mask, const float16_t *base, size_t vl);
vfloat16mf2x6_t __riscv_vlseg6e16_v_f16mf2x6_m (vbool32_t mask, const float16_t *base, size_t vl);
vfloat16mf2x7_t __riscv_vlseg7e16_v_f16mf2x7_m (vbool32_t mask, const float16_t *base, size_t vl);
vfloat16mf2x8_t __riscv_vlseg8e16_v_f16mf2x8_m (vbool32_t mask, const float16_t *base, size_t vl);
vfloat16m1x2_t __riscv_vlseg2e16_v_f16m1x2_m (vbool16_t mask, const float16_t *base, size_t vl);
vfloat16m1x3_t __riscv_vlseg3e16_v_f16m1x3_m (vbool16_t mask, const float16_t *base, size_t vl);
vfloat16m1x4_t __riscv_vlseg4e16_v_f16m1x4_m (vbool16_t mask, const float16_t *base, size_t vl);
vfloat16m1x5_t __riscv_vlseg5e16_v_f16m1x5_m (vbool16_t mask, const float16_t *base, size_t vl);
vfloat16m1x6_t __riscv_vlseg6e16_v_f16m1x6_m (vbool16_t mask, const float16_t *base, size_t vl);
vfloat16m1x7_t __riscv_vlseg7e16_v_f16m1x7_m (vbool16_t mask, const float16_t *base, size_t vl);
vfloat16m1x8_t __riscv_vlseg8e16_v_f16m1x8_m (vbool16_t mask, const float16_t *base, size_t vl);
vfloat16m2x2_t __riscv_vlseg2e16_v_f16m2x2_m (vbool8_t mask, const float16_t *base, size_t vl);
vfloat16m2x3_t __riscv_vlseg3e16_v_f16m2x3_m (vbool8_t mask, const float16_t *base, size_t vl);
vfloat16m2x4_t __riscv_vlseg4e16_v_f16m2x4_m (vbool8_t mask, const float16_t *base, size_t vl);
vfloat16m4x2_t __riscv_vlseg2e16_v_f16m4x2_m (vbool4_t mask, const float16_t *base, size_t vl);
vint16mf4x2_t __riscv_vlseg2e16_v_i16mf4x2_m (vbool64_t mask, const int16_t *base, size_t vl);
vint16mf4x3_t __riscv_vlseg3e16_v_i16mf4x3_m (vbool64_t mask, const int16_t *base, size_t vl);
vint16mf4x4_t __riscv_vlseg4e16_v_i16mf4x4_m (vbool64_t mask, const int16_t *base, size_t vl);
vint16mf4x5_t __riscv_vlseg5e16_v_i16mf4x5_m (vbool64_t mask, const int16_t *base, size_t vl);
vint16mf4x6_t __riscv_vlseg6e16_v_i16mf4x6_m (vbool64_t mask, const int16_t *base, size_t vl);
vint16mf4x7_t __riscv_vlseg7e16_v_i16mf4x7_m (vbool64_t mask, const int16_t *base, size_t vl);
vint16mf4x8_t __riscv_vlseg8e16_v_i16mf4x8_m (vbool64_t mask, const int16_t *base, size_t vl);
vint16mf2x2_t __riscv_vlseg2e16_v_i16mf2x2_m (vbool32_t mask, const int16_t *base, size_t vl);
vint16mf2x3_t __riscv_vlseg3e16_v_i16mf2x3_m (vbool32_t mask, const int16_t *base, size_t vl);
vint16mf2x4_t __riscv_vlseg4e16_v_i16mf2x4_m (vbool32_t mask, const int16_t *base, size_t vl);
vint16mf2x5_t __riscv_vlseg5e16_v_i16mf2x5_m (vbool32_t mask, const int16_t *base, size_t vl);
vint16mf2x6_t __riscv_vlseg6e16_v_i16mf2x6_m (vbool32_t mask, const int16_t *base, size_t vl);
vint16mf2x7_t __riscv_vlseg7e16_v_i16mf2x7_m (vbool32_t mask, const int16_t *base, size_t vl);
vint16mf2x8_t __riscv_vlseg8e16_v_i16mf2x8_m (vbool32_t mask, const int16_t *base, size_t vl);
vint16m1x2_t __riscv_vlseg2e16_v_i16m1x2_m (vbool16_t mask, const int16_t *base, size_t vl);
vint16m1x3_t __riscv_vlseg3e16_v_i16m1x3_m (vbool16_t mask, const int16_t *base, size_t vl);
vint16m1x4_t __riscv_vlseg4e16_v_i16m1x4_m (vbool16_t mask, const int16_t *base, size_t vl);
vint16m1x5_t __riscv_vlseg5e16_v_i16m1x5_m (vbool16_t mask, const int16_t *base, size_t vl);
vint16m1x6_t __riscv_vlseg6e16_v_i16m1x6_m (vbool16_t mask, const int16_t *base, size_t vl);
vint16m1x7_t __riscv_vlseg7e16_v_i16m1x7_m (vbool16_t mask, const int16_t *base, size_t vl);
vint16m1x8_t __riscv_vlseg8e16_v_i16m1x8_m (vbool16_t mask, const int16_t *base, size_t vl);
vint16m2x2_t __riscv_vlseg2e16_v_i16m2x2_m (vbool8_t mask, const int16_t *base, size_t vl);
vint16m2x3_t __riscv_vlseg3e16_v_i16m2x3_m (vbool8_t mask, const int16_t *base, size_t vl);
vint16m2x4_t __riscv_vlseg4e16_v_i16m2x4_m (vbool8_t mask, const int16_t *base, size_t vl);
vint16m4x2_t __riscv_vlseg2e16_v_i16m4x2_m (vbool4_t mask, const int16_t *base, size_t vl);
vuint16mf4x2_t __riscv_vlseg2e16_v_u16mf4x2_m (vbool64_t mask, const uint16_t *base, size_t vl);
vuint16mf4x3_t __riscv_vlseg3e16_v_u16mf4x3_m (vbool64_t mask, const uint16_t *base, size_t vl);
vuint16mf4x4_t __riscv_vlseg4e16_v_u16mf4x4_m (vbool64_t mask, const uint16_t *base, size_t vl);
vuint16mf4x5_t __riscv_vlseg5e16_v_u16mf4x5_m (vbool64_t mask, const uint16_t *base, size_t vl);
vuint16mf4x6_t __riscv_vlseg6e16_v_u16mf4x6_m (vbool64_t mask, const uint16_t *base, size_t vl);
vuint16mf4x7_t __riscv_vlseg7e16_v_u16mf4x7_m (vbool64_t mask, const uint16_t *base, size_t vl);
vuint16mf4x8_t __riscv_vlseg8e16_v_u16mf4x8_m (vbool64_t mask, const uint16_t *base, size_t vl);
vuint16mf2x2_t __riscv_vlseg2e16_v_u16mf2x2_m (vbool32_t mask, const uint16_t *base, size_t vl);
vuint16mf2x3_t __riscv_vlseg3e16_v_u16mf2x3_m (vbool32_t mask, const uint16_t *base, size_t vl);
vuint16mf2x4_t __riscv_vlseg4e16_v_u16mf2x4_m (vbool32_t mask, const uint16_t *base, size_t vl);
vuint16mf2x5_t __riscv_vlseg5e16_v_u16mf2x5_m (vbool32_t mask, const uint16_t *base, size_t vl);
vuint16mf2x6_t __riscv_vlseg6e16_v_u16mf2x6_m (vbool32_t mask, const uint16_t *base, size_t vl);
vuint16mf2x7_t __riscv_vlseg7e16_v_u16mf2x7_m (vbool32_t mask, const uint16_t *base, size_t vl);
vuint16mf2x8_t __riscv_vlseg8e16_v_u16mf2x8_m (vbool32_t mask, const uint16_t *base, size_t vl);
vuint16m1x2_t __riscv_vlseg2e16_v_u16m1x2_m (vbool16_t mask, const uint16_t *base, size_t vl);
vuint16m1x3_t __riscv_vlseg3e16_v_u16m1x3_m (vbool16_t mask, const uint16_t *base, size_t vl);
vuint16m1x4_t __riscv_vlseg4e16_v_u16m1x4_m (vbool16_t mask, const uint16_t *base, size_t vl);
vuint16m1x5_t __riscv_vlseg5e16_v_u16m1x5_m (vbool16_t mask, const uint16_t *base, size_t vl);
vuint16m1x6_t __riscv_vlseg6e16_v_u16m1x6_m (vbool16_t mask, const uint16_t *base, size_t vl);
vuint16m1x7_t __riscv_vlseg7e16_v_u16m1x7_m (vbool16_t mask, const uint16_t *base, size_t vl);
vuint16m1x8_t __riscv_vlseg8e16_v_u16m1x8_m (vbool16_t mask, const uint16_t *base, size_t vl);
vuint16m2x2_t __riscv_vlseg2e16_v_u16m2x2_m (vbool8_t mask, const uint16_t *base, size_t vl);
vuint16m2x3_t __riscv_vlseg3e16_v_u16m2x3_m (vbool8_t mask, const uint16_t *base, size_t vl);
vuint16m2x4_t __riscv_vlseg4e16_v_u16m2x4_m (vbool8_t mask, const uint16_t *base, size_t vl);
vuint16m4x2_t __riscv_vlseg2e16_v_u16m4x2_m (vbool4_t mask, const uint16_t *base, size_t vl);
6.3. vlseg<nf>e32.v
- Mnemonic
vlseg2e32.v vd, (rs1), vm # nf=2 vlseg3e32.v vd, (rs1), vm # nf=3 vlseg4e32.v vd, (rs1), vm # nf=4 vlseg5e32.v vd, (rs1), vm # nf=5 vlseg6e32.v vd, (rs1), vm # nf=6 vlseg7e32.v vd, (rs1), vm # nf=7 vlseg8e32.v vd, (rs1), vm # nf=8
- Encoding
- Description
-
32-bit Unit-stride segment load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vle32_v.h
- Intrinsic Functions
Details
vfloat32mf2x2_t __riscv_vlseg2e32_v_f32mf2x2 (const float32_t *base, size_t vl);
vfloat32mf2x3_t __riscv_vlseg3e32_v_f32mf2x3 (const float32_t *base, size_t vl);
vfloat32mf2x4_t __riscv_vlseg4e32_v_f32mf2x4 (const float32_t *base, size_t vl);
vfloat32mf2x5_t __riscv_vlseg5e32_v_f32mf2x5 (const float32_t *base, size_t vl);
vfloat32mf2x6_t __riscv_vlseg6e32_v_f32mf2x6 (const float32_t *base, size_t vl);
vfloat32mf2x7_t __riscv_vlseg7e32_v_f32mf2x7 (const float32_t *base, size_t vl);
vfloat32mf2x8_t __riscv_vlseg8e32_v_f32mf2x8 (const float32_t *base, size_t vl);
vfloat32m1x2_t __riscv_vlseg2e32_v_f32m1x2 (const float32_t *base, size_t vl);
vfloat32m1x3_t __riscv_vlseg3e32_v_f32m1x3 (const float32_t *base, size_t vl);
vfloat32m1x4_t __riscv_vlseg4e32_v_f32m1x4 (const float32_t *base, size_t vl);
vfloat32m1x5_t __riscv_vlseg5e32_v_f32m1x5 (const float32_t *base, size_t vl);
vfloat32m1x6_t __riscv_vlseg6e32_v_f32m1x6 (const float32_t *base, size_t vl);
vfloat32m1x7_t __riscv_vlseg7e32_v_f32m1x7 (const float32_t *base, size_t vl);
vfloat32m1x8_t __riscv_vlseg8e32_v_f32m1x8 (const float32_t *base, size_t vl);
vfloat32m2x2_t __riscv_vlseg2e32_v_f32m2x2 (const float32_t *base, size_t vl);
vfloat32m2x3_t __riscv_vlseg3e32_v_f32m2x3 (const float32_t *base, size_t vl);
vfloat32m2x4_t __riscv_vlseg4e32_v_f32m2x4 (const float32_t *base, size_t vl);
vfloat32m4x2_t __riscv_vlseg2e32_v_f32m4x2 (const float32_t *base, size_t vl);
vint32mf2x2_t __riscv_vlseg2e32_v_i32mf2x2 (const int32_t *base, size_t vl);
vint32mf2x3_t __riscv_vlseg3e32_v_i32mf2x3 (const int32_t *base, size_t vl);
vint32mf2x4_t __riscv_vlseg4e32_v_i32mf2x4 (const int32_t *base, size_t vl);
vint32mf2x5_t __riscv_vlseg5e32_v_i32mf2x5 (const int32_t *base, size_t vl);
vint32mf2x6_t __riscv_vlseg6e32_v_i32mf2x6 (const int32_t *base, size_t vl);
vint32mf2x7_t __riscv_vlseg7e32_v_i32mf2x7 (const int32_t *base, size_t vl);
vint32mf2x8_t __riscv_vlseg8e32_v_i32mf2x8 (const int32_t *base, size_t vl);
vint32m1x2_t __riscv_vlseg2e32_v_i32m1x2 (const int32_t *base, size_t vl);
vint32m1x3_t __riscv_vlseg3e32_v_i32m1x3 (const int32_t *base, size_t vl);
vint32m1x4_t __riscv_vlseg4e32_v_i32m1x4 (const int32_t *base, size_t vl);
vint32m1x5_t __riscv_vlseg5e32_v_i32m1x5 (const int32_t *base, size_t vl);
vint32m1x6_t __riscv_vlseg6e32_v_i32m1x6 (const int32_t *base, size_t vl);
vint32m1x7_t __riscv_vlseg7e32_v_i32m1x7 (const int32_t *base, size_t vl);
vint32m1x8_t __riscv_vlseg8e32_v_i32m1x8 (const int32_t *base, size_t vl);
vint32m2x2_t __riscv_vlseg2e32_v_i32m2x2 (const int32_t *base, size_t vl);
vint32m2x3_t __riscv_vlseg3e32_v_i32m2x3 (const int32_t *base, size_t vl);
vint32m2x4_t __riscv_vlseg4e32_v_i32m2x4 (const int32_t *base, size_t vl);
vint32m4x2_t __riscv_vlseg2e32_v_i32m4x2 (const int32_t *base, size_t vl);
vuint32mf2x2_t __riscv_vlseg2e32_v_u32mf2x2 (const uint32_t *base, size_t vl);
vuint32mf2x3_t __riscv_vlseg3e32_v_u32mf2x3 (const uint32_t *base, size_t vl);
vuint32mf2x4_t __riscv_vlseg4e32_v_u32mf2x4 (const uint32_t *base, size_t vl);
vuint32mf2x5_t __riscv_vlseg5e32_v_u32mf2x5 (const uint32_t *base, size_t vl);
vuint32mf2x6_t __riscv_vlseg6e32_v_u32mf2x6 (const uint32_t *base, size_t vl);
vuint32mf2x7_t __riscv_vlseg7e32_v_u32mf2x7 (const uint32_t *base, size_t vl);
vuint32mf2x8_t __riscv_vlseg8e32_v_u32mf2x8 (const uint32_t *base, size_t vl);
vuint32m1x2_t __riscv_vlseg2e32_v_u32m1x2 (const uint32_t *base, size_t vl);
vuint32m1x3_t __riscv_vlseg3e32_v_u32m1x3 (const uint32_t *base, size_t vl);
vuint32m1x4_t __riscv_vlseg4e32_v_u32m1x4 (const uint32_t *base, size_t vl);
vuint32m1x5_t __riscv_vlseg5e32_v_u32m1x5 (const uint32_t *base, size_t vl);
vuint32m1x6_t __riscv_vlseg6e32_v_u32m1x6 (const uint32_t *base, size_t vl);
vuint32m1x7_t __riscv_vlseg7e32_v_u32m1x7 (const uint32_t *base, size_t vl);
vuint32m1x8_t __riscv_vlseg8e32_v_u32m1x8 (const uint32_t *base, size_t vl);
vuint32m2x2_t __riscv_vlseg2e32_v_u32m2x2 (const uint32_t *base, size_t vl);
vuint32m2x3_t __riscv_vlseg3e32_v_u32m2x3 (const uint32_t *base, size_t vl);
vuint32m2x4_t __riscv_vlseg4e32_v_u32m2x4 (const uint32_t *base, size_t vl);
vuint32m4x2_t __riscv_vlseg2e32_v_u32m4x2 (const uint32_t *base, size_t vl);
vfloat32mf2x2_t __riscv_vlseg2e32_v_f32mf2x2_m (vbool64_t mask, const float32_t *base, size_t vl);
vfloat32mf2x3_t __riscv_vlseg3e32_v_f32mf2x3_m (vbool64_t mask, const float32_t *base, size_t vl);
vfloat32mf2x4_t __riscv_vlseg4e32_v_f32mf2x4_m (vbool64_t mask, const float32_t *base, size_t vl);
vfloat32mf2x5_t __riscv_vlseg5e32_v_f32mf2x5_m (vbool64_t mask, const float32_t *base, size_t vl);
vfloat32mf2x6_t __riscv_vlseg6e32_v_f32mf2x6_m (vbool64_t mask, const float32_t *base, size_t vl);
vfloat32mf2x7_t __riscv_vlseg7e32_v_f32mf2x7_m (vbool64_t mask, const float32_t *base, size_t vl);
vfloat32mf2x8_t __riscv_vlseg8e32_v_f32mf2x8_m (vbool64_t mask, const float32_t *base, size_t vl);
vfloat32m1x2_t __riscv_vlseg2e32_v_f32m1x2_m (vbool32_t mask, const float32_t *base, size_t vl);
vfloat32m1x3_t __riscv_vlseg3e32_v_f32m1x3_m (vbool32_t mask, const float32_t *base, size_t vl);
vfloat32m1x4_t __riscv_vlseg4e32_v_f32m1x4_m (vbool32_t mask, const float32_t *base, size_t vl);
vfloat32m1x5_t __riscv_vlseg5e32_v_f32m1x5_m (vbool32_t mask, const float32_t *base, size_t vl);
vfloat32m1x6_t __riscv_vlseg6e32_v_f32m1x6_m (vbool32_t mask, const float32_t *base, size_t vl);
vfloat32m1x7_t __riscv_vlseg7e32_v_f32m1x7_m (vbool32_t mask, const float32_t *base, size_t vl);
vfloat32m1x8_t __riscv_vlseg8e32_v_f32m1x8_m (vbool32_t mask, const float32_t *base, size_t vl);
vfloat32m2x2_t __riscv_vlseg2e32_v_f32m2x2_m (vbool16_t mask, const float32_t *base, size_t vl);
vfloat32m2x3_t __riscv_vlseg3e32_v_f32m2x3_m (vbool16_t mask, const float32_t *base, size_t vl);
vfloat32m2x4_t __riscv_vlseg4e32_v_f32m2x4_m (vbool16_t mask, const float32_t *base, size_t vl);
vfloat32m4x2_t __riscv_vlseg2e32_v_f32m4x2_m (vbool8_t mask, const float32_t *base, size_t vl);
vint32mf2x2_t __riscv_vlseg2e32_v_i32mf2x2_m (vbool64_t mask, const int32_t *base, size_t vl);
vint32mf2x3_t __riscv_vlseg3e32_v_i32mf2x3_m (vbool64_t mask, const int32_t *base, size_t vl);
vint32mf2x4_t __riscv_vlseg4e32_v_i32mf2x4_m (vbool64_t mask, const int32_t *base, size_t vl);
vint32mf2x5_t __riscv_vlseg5e32_v_i32mf2x5_m (vbool64_t mask, const int32_t *base, size_t vl);
vint32mf2x6_t __riscv_vlseg6e32_v_i32mf2x6_m (vbool64_t mask, const int32_t *base, size_t vl);
vint32mf2x7_t __riscv_vlseg7e32_v_i32mf2x7_m (vbool64_t mask, const int32_t *base, size_t vl);
vint32mf2x8_t __riscv_vlseg8e32_v_i32mf2x8_m (vbool64_t mask, const int32_t *base, size_t vl);
vint32m1x2_t __riscv_vlseg2e32_v_i32m1x2_m (vbool32_t mask, const int32_t *base, size_t vl);
vint32m1x3_t __riscv_vlseg3e32_v_i32m1x3_m (vbool32_t mask, const int32_t *base, size_t vl);
vint32m1x4_t __riscv_vlseg4e32_v_i32m1x4_m (vbool32_t mask, const int32_t *base, size_t vl);
vint32m1x5_t __riscv_vlseg5e32_v_i32m1x5_m (vbool32_t mask, const int32_t *base, size_t vl);
vint32m1x6_t __riscv_vlseg6e32_v_i32m1x6_m (vbool32_t mask, const int32_t *base, size_t vl);
vint32m1x7_t __riscv_vlseg7e32_v_i32m1x7_m (vbool32_t mask, const int32_t *base, size_t vl);
vint32m1x8_t __riscv_vlseg8e32_v_i32m1x8_m (vbool32_t mask, const int32_t *base, size_t vl);
vint32m2x2_t __riscv_vlseg2e32_v_i32m2x2_m (vbool16_t mask, const int32_t *base, size_t vl);
vint32m2x3_t __riscv_vlseg3e32_v_i32m2x3_m (vbool16_t mask, const int32_t *base, size_t vl);
vint32m2x4_t __riscv_vlseg4e32_v_i32m2x4_m (vbool16_t mask, const int32_t *base, size_t vl);
vint32m4x2_t __riscv_vlseg2e32_v_i32m4x2_m (vbool8_t mask, const int32_t *base, size_t vl);
vuint32mf2x2_t __riscv_vlseg2e32_v_u32mf2x2_m (vbool64_t mask, const uint32_t *base, size_t vl);
vuint32mf2x3_t __riscv_vlseg3e32_v_u32mf2x3_m (vbool64_t mask, const uint32_t *base, size_t vl);
vuint32mf2x4_t __riscv_vlseg4e32_v_u32mf2x4_m (vbool64_t mask, const uint32_t *base, size_t vl);
vuint32mf2x5_t __riscv_vlseg5e32_v_u32mf2x5_m (vbool64_t mask, const uint32_t *base, size_t vl);
vuint32mf2x6_t __riscv_vlseg6e32_v_u32mf2x6_m (vbool64_t mask, const uint32_t *base, size_t vl);
vuint32mf2x7_t __riscv_vlseg7e32_v_u32mf2x7_m (vbool64_t mask, const uint32_t *base, size_t vl);
vuint32mf2x8_t __riscv_vlseg8e32_v_u32mf2x8_m (vbool64_t mask, const uint32_t *base, size_t vl);
vuint32m1x2_t __riscv_vlseg2e32_v_u32m1x2_m (vbool32_t mask, const uint32_t *base, size_t vl);
vuint32m1x3_t __riscv_vlseg3e32_v_u32m1x3_m (vbool32_t mask, const uint32_t *base, size_t vl);
vuint32m1x4_t __riscv_vlseg4e32_v_u32m1x4_m (vbool32_t mask, const uint32_t *base, size_t vl);
vuint32m1x5_t __riscv_vlseg5e32_v_u32m1x5_m (vbool32_t mask, const uint32_t *base, size_t vl);
vuint32m1x6_t __riscv_vlseg6e32_v_u32m1x6_m (vbool32_t mask, const uint32_t *base, size_t vl);
vuint32m1x7_t __riscv_vlseg7e32_v_u32m1x7_m (vbool32_t mask, const uint32_t *base, size_t vl);
vuint32m1x8_t __riscv_vlseg8e32_v_u32m1x8_m (vbool32_t mask, const uint32_t *base, size_t vl);
vuint32m2x2_t __riscv_vlseg2e32_v_u32m2x2_m (vbool16_t mask, const uint32_t *base, size_t vl);
vuint32m2x3_t __riscv_vlseg3e32_v_u32m2x3_m (vbool16_t mask, const uint32_t *base, size_t vl);
vuint32m2x4_t __riscv_vlseg4e32_v_u32m2x4_m (vbool16_t mask, const uint32_t *base, size_t vl);
vuint32m4x2_t __riscv_vlseg2e32_v_u32m4x2_m (vbool8_t mask, const uint32_t *base, size_t vl);
6.4. vlseg<nf>e64.v
- Mnemonic
vlseg2e64.v vd, (rs1), vm # nf=2 vlseg3e64.v vd, (rs1), vm # nf=3 vlseg4e64.v vd, (rs1), vm # nf=4 vlseg5e64.v vd, (rs1), vm # nf=5 vlseg6e64.v vd, (rs1), vm # nf=6 vlseg7e64.v vd, (rs1), vm # nf=7 vlseg8e64.v vd, (rs1), vm # nf=8
- Encoding
- Description
-
64-bit Unit-stride segment load
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vle64_v.h
- Intrinsic Functions
Details
vfloat64m1x2_t __riscv_vlseg2e64_v_f64m1x2 (const float64_t *base, size_t vl);
vfloat64m1x3_t __riscv_vlseg3e64_v_f64m1x3 (const float64_t *base, size_t vl);
vfloat64m1x4_t __riscv_vlseg4e64_v_f64m1x4 (const float64_t *base, size_t vl);
vfloat64m1x5_t __riscv_vlseg5e64_v_f64m1x5 (const float64_t *base, size_t vl);
vfloat64m1x6_t __riscv_vlseg6e64_v_f64m1x6 (const float64_t *base, size_t vl);
vfloat64m1x7_t __riscv_vlseg7e64_v_f64m1x7 (const float64_t *base, size_t vl);
vfloat64m1x8_t __riscv_vlseg8e64_v_f64m1x8 (const float64_t *base, size_t vl);
vfloat64m2x2_t __riscv_vlseg2e64_v_f64m2x2 (const float64_t *base, size_t vl);
vfloat64m2x3_t __riscv_vlseg3e64_v_f64m2x3 (const float64_t *base, size_t vl);
vfloat64m2x4_t __riscv_vlseg4e64_v_f64m2x4 (const float64_t *base, size_t vl);
vfloat64m4x2_t __riscv_vlseg2e64_v_f64m4x2 (const float64_t *base, size_t vl);
vint64m1x2_t __riscv_vlseg2e64_v_i64m1x2 (const int64_t *base, size_t vl);
vint64m1x3_t __riscv_vlseg3e64_v_i64m1x3 (const int64_t *base, size_t vl);
vint64m1x4_t __riscv_vlseg4e64_v_i64m1x4 (const int64_t *base, size_t vl);
vint64m1x5_t __riscv_vlseg5e64_v_i64m1x5 (const int64_t *base, size_t vl);
vint64m1x6_t __riscv_vlseg6e64_v_i64m1x6 (const int64_t *base, size_t vl);
vint64m1x7_t __riscv_vlseg7e64_v_i64m1x7 (const int64_t *base, size_t vl);
vint64m1x8_t __riscv_vlseg8e64_v_i64m1x8 (const int64_t *base, size_t vl);
vint64m2x2_t __riscv_vlseg2e64_v_i64m2x2 (const int64_t *base, size_t vl);
vint64m2x3_t __riscv_vlseg3e64_v_i64m2x3 (const int64_t *base, size_t vl);
vint64m2x4_t __riscv_vlseg4e64_v_i64m2x4 (const int64_t *base, size_t vl);
vint64m4x2_t __riscv_vlseg2e64_v_i64m4x2 (const int64_t *base, size_t vl);
vuint64m1x2_t __riscv_vlseg2e64_v_u64m1x2 (const uint64_t *base, size_t vl);
vuint64m1x3_t __riscv_vlseg3e64_v_u64m1x3 (const uint64_t *base, size_t vl);
vuint64m1x4_t __riscv_vlseg4e64_v_u64m1x4 (const uint64_t *base, size_t vl);
vuint64m1x5_t __riscv_vlseg5e64_v_u64m1x5 (const uint64_t *base, size_t vl);
vuint64m1x6_t __riscv_vlseg6e64_v_u64m1x6 (const uint64_t *base, size_t vl);
vuint64m1x7_t __riscv_vlseg7e64_v_u64m1x7 (const uint64_t *base, size_t vl);
vuint64m1x8_t __riscv_vlseg8e64_v_u64m1x8 (const uint64_t *base, size_t vl);
vuint64m2x2_t __riscv_vlseg2e64_v_u64m2x2 (const uint64_t *base, size_t vl);
vuint64m2x3_t __riscv_vlseg3e64_v_u64m2x3 (const uint64_t *base, size_t vl);
vuint64m2x4_t __riscv_vlseg4e64_v_u64m2x4 (const uint64_t *base, size_t vl);
vuint64m4x2_t __riscv_vlseg2e64_v_u64m4x2 (const uint64_t *base, size_t vl);
vfloat64m1x2_t __riscv_vlseg2e64_v_f64m1x2_m (vbool64_t mask, const float64_t *base, size_t vl);
vfloat64m1x3_t __riscv_vlseg3e64_v_f64m1x3_m (vbool64_t mask, const float64_t *base, size_t vl);
vfloat64m1x4_t __riscv_vlseg4e64_v_f64m1x4_m (vbool64_t mask, const float64_t *base, size_t vl);
vfloat64m1x5_t __riscv_vlseg5e64_v_f64m1x5_m (vbool64_t mask, const float64_t *base, size_t vl);
vfloat64m1x6_t __riscv_vlseg6e64_v_f64m1x6_m (vbool64_t mask, const float64_t *base, size_t vl);
vfloat64m1x7_t __riscv_vlseg7e64_v_f64m1x7_m (vbool64_t mask, const float64_t *base, size_t vl);
vfloat64m1x8_t __riscv_vlseg8e64_v_f64m1x8_m (vbool64_t mask, const float64_t *base, size_t vl);
vfloat64m2x2_t __riscv_vlseg2e64_v_f64m2x2_m (vbool32_t mask, const float64_t *base, size_t vl);
vfloat64m2x3_t __riscv_vlseg3e64_v_f64m2x3_m (vbool32_t mask, const float64_t *base, size_t vl);
vfloat64m2x4_t __riscv_vlseg4e64_v_f64m2x4_m (vbool32_t mask, const float64_t *base, size_t vl);
vfloat64m4x2_t __riscv_vlseg2e64_v_f64m4x2_m (vbool16_t mask, const float64_t *base, size_t vl);
vint64m1x2_t __riscv_vlseg2e64_v_i64m1x2_m (vbool64_t mask, const int64_t *base, size_t vl);
vint64m1x3_t __riscv_vlseg3e64_v_i64m1x3_m (vbool64_t mask, const int64_t *base, size_t vl);
vint64m1x4_t __riscv_vlseg4e64_v_i64m1x4_m (vbool64_t mask, const int64_t *base, size_t vl);
vint64m1x5_t __riscv_vlseg5e64_v_i64m1x5_m (vbool64_t mask, const int64_t *base, size_t vl);
vint64m1x6_t __riscv_vlseg6e64_v_i64m1x6_m (vbool64_t mask, const int64_t *base, size_t vl);
vint64m1x7_t __riscv_vlseg7e64_v_i64m1x7_m (vbool64_t mask, const int64_t *base, size_t vl);
vint64m1x8_t __riscv_vlseg8e64_v_i64m1x8_m (vbool64_t mask, const int64_t *base, size_t vl);
vint64m2x2_t __riscv_vlseg2e64_v_i64m2x2_m (vbool32_t mask, const int64_t *base, size_t vl);
vint64m2x3_t __riscv_vlseg3e64_v_i64m2x3_m (vbool32_t mask, const int64_t *base, size_t vl);
vint64m2x4_t __riscv_vlseg4e64_v_i64m2x4_m (vbool32_t mask, const int64_t *base, size_t vl);
vint64m4x2_t __riscv_vlseg2e64_v_i64m4x2_m (vbool16_t mask, const int64_t *base, size_t vl);
vuint64m1x2_t __riscv_vlseg2e64_v_u64m1x2_m (vbool64_t mask, const uint64_t *base, size_t vl);
vuint64m1x3_t __riscv_vlseg3e64_v_u64m1x3_m (vbool64_t mask, const uint64_t *base, size_t vl);
vuint64m1x4_t __riscv_vlseg4e64_v_u64m1x4_m (vbool64_t mask, const uint64_t *base, size_t vl);
vuint64m1x5_t __riscv_vlseg5e64_v_u64m1x5_m (vbool64_t mask, const uint64_t *base, size_t vl);
vuint64m1x6_t __riscv_vlseg6e64_v_u64m1x6_m (vbool64_t mask, const uint64_t *base, size_t vl);
vuint64m1x7_t __riscv_vlseg7e64_v_u64m1x7_m (vbool64_t mask, const uint64_t *base, size_t vl);
vuint64m1x8_t __riscv_vlseg8e64_v_u64m1x8_m (vbool64_t mask, const uint64_t *base, size_t vl);
vuint64m2x2_t __riscv_vlseg2e64_v_u64m2x2_m (vbool32_t mask, const uint64_t *base, size_t vl);
vuint64m2x3_t __riscv_vlseg3e64_v_u64m2x3_m (vbool32_t mask, const uint64_t *base, size_t vl);
vuint64m2x4_t __riscv_vlseg4e64_v_u64m2x4_m (vbool32_t mask, const uint64_t *base, size_t vl);
vuint64m4x2_t __riscv_vlseg2e64_v_u64m4x2_m (vbool16_t mask, const uint64_t *base, size_t vl);
6.5. vsseg<nf>e8.v
- Mnemonic
vsseg2e8.v vs3, (rs1), vm # nf=2 vsseg3e8.v vs3, (rs1), vm # nf=3 vsseg4e8.v vs3, (rs1), vm # nf=4 vsseg5e8.v vs3, (rs1), vm # nf=5 vsseg6e8.v vs3, (rs1), vm # nf=6 vsseg7e8.v vs3, (rs1), vm # nf=7 vsseg8e8.v vs3, (rs1), vm # nf=8
- Encoding
- Description
-
8-bit Unit-stride segment store
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vse8_v.h
- Intrinsic Functions
Details
void __riscv_vsseg2e8_v_i8mf8x2 (int8_t *base, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_i8mf8x3 (int8_t *base, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_i8mf8x4 (int8_t *base, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_i8mf8x5 (int8_t *base, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_i8mf8x6 (int8_t *base, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_i8mf8x7 (int8_t *base, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_i8mf8x8 (int8_t *base, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_i8mf4x2 (int8_t *base, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_i8mf4x3 (int8_t *base, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_i8mf4x4 (int8_t *base, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_i8mf4x5 (int8_t *base, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_i8mf4x6 (int8_t *base, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_i8mf4x7 (int8_t *base, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_i8mf4x8 (int8_t *base, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_i8mf2x2 (int8_t *base, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_i8mf2x3 (int8_t *base, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_i8mf2x4 (int8_t *base, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_i8mf2x5 (int8_t *base, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_i8mf2x6 (int8_t *base, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_i8mf2x7 (int8_t *base, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_i8mf2x8 (int8_t *base, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_i8m1x2 (int8_t *base, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_i8m1x3 (int8_t *base, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_i8m1x4 (int8_t *base, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_i8m1x5 (int8_t *base, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_i8m1x6 (int8_t *base, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_i8m1x7 (int8_t *base, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_i8m1x8 (int8_t *base, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_i8m2x2 (int8_t *base, vint8m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_i8m2x3 (int8_t *base, vint8m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_i8m2x4 (int8_t *base, vint8m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_i8m4x2 (int8_t *base, vint8m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_u8mf8x2 (uint8_t *base, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_u8mf8x3 (uint8_t *base, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_u8mf8x4 (uint8_t *base, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_u8mf8x5 (uint8_t *base, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_u8mf8x6 (uint8_t *base, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_u8mf8x7 (uint8_t *base, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_u8mf8x8 (uint8_t *base, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_u8mf4x2 (uint8_t *base, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_u8mf4x3 (uint8_t *base, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_u8mf4x4 (uint8_t *base, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_u8mf4x5 (uint8_t *base, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_u8mf4x6 (uint8_t *base, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_u8mf4x7 (uint8_t *base, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_u8mf4x8 (uint8_t *base, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_u8mf2x2 (uint8_t *base, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_u8mf2x3 (uint8_t *base, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_u8mf2x4 (uint8_t *base, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_u8mf2x5 (uint8_t *base, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_u8mf2x6 (uint8_t *base, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_u8mf2x7 (uint8_t *base, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_u8mf2x8 (uint8_t *base, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_u8m1x2 (uint8_t *base, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_u8m1x3 (uint8_t *base, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_u8m1x4 (uint8_t *base, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_u8m1x5 (uint8_t *base, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_u8m1x6 (uint8_t *base, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_u8m1x7 (uint8_t *base, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_u8m1x8 (uint8_t *base, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_u8m2x2 (uint8_t *base, vuint8m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_u8m2x3 (uint8_t *base, vuint8m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_u8m2x4 (uint8_t *base, vuint8m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_u8m4x2 (uint8_t *base, vuint8m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_i8mf8x2_m (vbool64_t mask, int8_t *base, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_i8mf8x3_m (vbool64_t mask, int8_t *base, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_i8mf8x4_m (vbool64_t mask, int8_t *base, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_i8mf8x5_m (vbool64_t mask, int8_t *base, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_i8mf8x6_m (vbool64_t mask, int8_t *base, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_i8mf8x7_m (vbool64_t mask, int8_t *base, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_i8mf8x8_m (vbool64_t mask, int8_t *base, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_i8mf4x2_m (vbool32_t mask, int8_t *base, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_i8mf4x3_m (vbool32_t mask, int8_t *base, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_i8mf4x4_m (vbool32_t mask, int8_t *base, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_i8mf4x5_m (vbool32_t mask, int8_t *base, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_i8mf4x6_m (vbool32_t mask, int8_t *base, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_i8mf4x7_m (vbool32_t mask, int8_t *base, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_i8mf4x8_m (vbool32_t mask, int8_t *base, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_i8mf2x2_m (vbool16_t mask, int8_t *base, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_i8mf2x3_m (vbool16_t mask, int8_t *base, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_i8mf2x4_m (vbool16_t mask, int8_t *base, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_i8mf2x5_m (vbool16_t mask, int8_t *base, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_i8mf2x6_m (vbool16_t mask, int8_t *base, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_i8mf2x7_m (vbool16_t mask, int8_t *base, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_i8mf2x8_m (vbool16_t mask, int8_t *base, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_i8m1x2_m (vbool8_t mask, int8_t *base, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_i8m1x3_m (vbool8_t mask, int8_t *base, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_i8m1x4_m (vbool8_t mask, int8_t *base, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_i8m1x5_m (vbool8_t mask, int8_t *base, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_i8m1x6_m (vbool8_t mask, int8_t *base, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_i8m1x7_m (vbool8_t mask, int8_t *base, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_i8m1x8_m (vbool8_t mask, int8_t *base, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_i8m2x2_m (vbool4_t mask, int8_t *base, vint8m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_i8m2x3_m (vbool4_t mask, int8_t *base, vint8m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_i8m2x4_m (vbool4_t mask, int8_t *base, vint8m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_i8m4x2_m (vbool2_t mask, int8_t *base, vint8m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_u8mf8x2_m (vbool64_t mask, uint8_t *base, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_u8mf8x3_m (vbool64_t mask, uint8_t *base, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_u8mf8x4_m (vbool64_t mask, uint8_t *base, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_u8mf8x5_m (vbool64_t mask, uint8_t *base, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_u8mf8x6_m (vbool64_t mask, uint8_t *base, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_u8mf8x7_m (vbool64_t mask, uint8_t *base, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_u8mf8x8_m (vbool64_t mask, uint8_t *base, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_u8mf4x2_m (vbool32_t mask, uint8_t *base, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_u8mf4x3_m (vbool32_t mask, uint8_t *base, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_u8mf4x4_m (vbool32_t mask, uint8_t *base, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_u8mf4x5_m (vbool32_t mask, uint8_t *base, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_u8mf4x6_m (vbool32_t mask, uint8_t *base, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_u8mf4x7_m (vbool32_t mask, uint8_t *base, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_u8mf4x8_m (vbool32_t mask, uint8_t *base, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_u8mf2x2_m (vbool16_t mask, uint8_t *base, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_u8mf2x3_m (vbool16_t mask, uint8_t *base, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_u8mf2x4_m (vbool16_t mask, uint8_t *base, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_u8mf2x5_m (vbool16_t mask, uint8_t *base, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_u8mf2x6_m (vbool16_t mask, uint8_t *base, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_u8mf2x7_m (vbool16_t mask, uint8_t *base, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_u8mf2x8_m (vbool16_t mask, uint8_t *base, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_u8m1x2_m (vbool8_t mask, uint8_t *base, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_u8m1x3_m (vbool8_t mask, uint8_t *base, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_u8m1x4_m (vbool8_t mask, uint8_t *base, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e8_v_u8m1x5_m (vbool8_t mask, uint8_t *base, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e8_v_u8m1x6_m (vbool8_t mask, uint8_t *base, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e8_v_u8m1x7_m (vbool8_t mask, uint8_t *base, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e8_v_u8m1x8_m (vbool8_t mask, uint8_t *base, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_u8m2x2_m (vbool4_t mask, uint8_t *base, vuint8m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e8_v_u8m2x3_m (vbool4_t mask, uint8_t *base, vuint8m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e8_v_u8m2x4_m (vbool4_t mask, uint8_t *base, vuint8m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e8_v_u8m4x2_m (vbool2_t mask, uint8_t *base, vuint8m4x2_t v_tuple, size_t vl);
6.6. vsseg<nf>e16.v
- Mnemonic
vsseg2e16.v vs3, (rs1), vm # nf=2 vsseg3e16.v vs3, (rs1), vm # nf=3 vsseg4e16.v vs3, (rs1), vm # nf=4 vsseg5e16.v vs3, (rs1), vm # nf=5 vsseg6e16.v vs3, (rs1), vm # nf=6 vsseg7e16.v vs3, (rs1), vm # nf=7 vsseg8e16.v vs3, (rs1), vm # nf=8
- Encoding
- Description
-
16-bit Unit-stride segment store
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vse16_v.h
- Intrinsic Functions
Details
void __riscv_vsseg2e16_v_f16mf4x2 (float16_t *base, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_f16mf4x3 (float16_t *base, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_f16mf4x4 (float16_t *base, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_f16mf4x5 (float16_t *base, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_f16mf4x6 (float16_t *base, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_f16mf4x7 (float16_t *base, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_f16mf4x8 (float16_t *base, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_f16mf2x2 (float16_t *base, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_f16mf2x3 (float16_t *base, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_f16mf2x4 (float16_t *base, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_f16mf2x5 (float16_t *base, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_f16mf2x6 (float16_t *base, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_f16mf2x7 (float16_t *base, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_f16mf2x8 (float16_t *base, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_f16m1x2 (float16_t *base, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_f16m1x3 (float16_t *base, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_f16m1x4 (float16_t *base, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_f16m1x5 (float16_t *base, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_f16m1x6 (float16_t *base, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_f16m1x7 (float16_t *base, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_f16m1x8 (float16_t *base, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_f16m2x2 (float16_t *base, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_f16m2x3 (float16_t *base, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_f16m2x4 (float16_t *base, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_f16m4x2 (float16_t *base, vfloat16m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_i16mf4x2 (int16_t *base, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_i16mf4x3 (int16_t *base, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_i16mf4x4 (int16_t *base, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_i16mf4x5 (int16_t *base, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_i16mf4x6 (int16_t *base, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_i16mf4x7 (int16_t *base, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_i16mf4x8 (int16_t *base, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_i16mf2x2 (int16_t *base, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_i16mf2x3 (int16_t *base, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_i16mf2x4 (int16_t *base, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_i16mf2x5 (int16_t *base, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_i16mf2x6 (int16_t *base, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_i16mf2x7 (int16_t *base, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_i16mf2x8 (int16_t *base, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_i16m1x2 (int16_t *base, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_i16m1x3 (int16_t *base, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_i16m1x4 (int16_t *base, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_i16m1x5 (int16_t *base, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_i16m1x6 (int16_t *base, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_i16m1x7 (int16_t *base, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_i16m1x8 (int16_t *base, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_i16m2x2 (int16_t *base, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_i16m2x3 (int16_t *base, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_i16m2x4 (int16_t *base, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_i16m4x2 (int16_t *base, vint16m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_u16mf4x2 (uint16_t *base, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_u16mf4x3 (uint16_t *base, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_u16mf4x4 (uint16_t *base, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_u16mf4x5 (uint16_t *base, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_u16mf4x6 (uint16_t *base, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_u16mf4x7 (uint16_t *base, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_u16mf4x8 (uint16_t *base, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_u16mf2x2 (uint16_t *base, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_u16mf2x3 (uint16_t *base, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_u16mf2x4 (uint16_t *base, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_u16mf2x5 (uint16_t *base, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_u16mf2x6 (uint16_t *base, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_u16mf2x7 (uint16_t *base, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_u16mf2x8 (uint16_t *base, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_u16m1x2 (uint16_t *base, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_u16m1x3 (uint16_t *base, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_u16m1x4 (uint16_t *base, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_u16m1x5 (uint16_t *base, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_u16m1x6 (uint16_t *base, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_u16m1x7 (uint16_t *base, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_u16m1x8 (uint16_t *base, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_u16m2x2 (uint16_t *base, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_u16m2x3 (uint16_t *base, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_u16m2x4 (uint16_t *base, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_u16m4x2 (uint16_t *base, vuint16m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_f16mf4x2_m (vbool64_t mask, float16_t *base, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_f16mf4x3_m (vbool64_t mask, float16_t *base, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_f16mf4x4_m (vbool64_t mask, float16_t *base, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_f16mf4x5_m (vbool64_t mask, float16_t *base, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_f16mf4x6_m (vbool64_t mask, float16_t *base, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_f16mf4x7_m (vbool64_t mask, float16_t *base, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_f16mf4x8_m (vbool64_t mask, float16_t *base, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_f16mf2x2_m (vbool32_t mask, float16_t *base, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_f16mf2x3_m (vbool32_t mask, float16_t *base, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_f16mf2x4_m (vbool32_t mask, float16_t *base, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_f16mf2x5_m (vbool32_t mask, float16_t *base, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_f16mf2x6_m (vbool32_t mask, float16_t *base, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_f16mf2x7_m (vbool32_t mask, float16_t *base, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_f16mf2x8_m (vbool32_t mask, float16_t *base, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_f16m1x2_m (vbool16_t mask, float16_t *base, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_f16m1x3_m (vbool16_t mask, float16_t *base, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_f16m1x4_m (vbool16_t mask, float16_t *base, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_f16m1x5_m (vbool16_t mask, float16_t *base, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_f16m1x6_m (vbool16_t mask, float16_t *base, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_f16m1x7_m (vbool16_t mask, float16_t *base, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_f16m1x8_m (vbool16_t mask, float16_t *base, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_f16m2x2_m (vbool8_t mask, float16_t *base, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_f16m2x3_m (vbool8_t mask, float16_t *base, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_f16m2x4_m (vbool8_t mask, float16_t *base, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_f16m4x2_m (vbool4_t mask, float16_t *base, vfloat16m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_i16mf4x2_m (vbool64_t mask, int16_t *base, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_i16mf4x3_m (vbool64_t mask, int16_t *base, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_i16mf4x4_m (vbool64_t mask, int16_t *base, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_i16mf4x5_m (vbool64_t mask, int16_t *base, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_i16mf4x6_m (vbool64_t mask, int16_t *base, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_i16mf4x7_m (vbool64_t mask, int16_t *base, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_i16mf4x8_m (vbool64_t mask, int16_t *base, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_i16mf2x2_m (vbool32_t mask, int16_t *base, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_i16mf2x3_m (vbool32_t mask, int16_t *base, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_i16mf2x4_m (vbool32_t mask, int16_t *base, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_i16mf2x5_m (vbool32_t mask, int16_t *base, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_i16mf2x6_m (vbool32_t mask, int16_t *base, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_i16mf2x7_m (vbool32_t mask, int16_t *base, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_i16mf2x8_m (vbool32_t mask, int16_t *base, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_i16m1x2_m (vbool16_t mask, int16_t *base, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_i16m1x3_m (vbool16_t mask, int16_t *base, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_i16m1x4_m (vbool16_t mask, int16_t *base, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_i16m1x5_m (vbool16_t mask, int16_t *base, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_i16m1x6_m (vbool16_t mask, int16_t *base, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_i16m1x7_m (vbool16_t mask, int16_t *base, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_i16m1x8_m (vbool16_t mask, int16_t *base, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_i16m2x2_m (vbool8_t mask, int16_t *base, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_i16m2x3_m (vbool8_t mask, int16_t *base, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_i16m2x4_m (vbool8_t mask, int16_t *base, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_i16m4x2_m (vbool4_t mask, int16_t *base, vint16m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_u16mf4x2_m (vbool64_t mask, uint16_t *base, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_u16mf4x3_m (vbool64_t mask, uint16_t *base, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_u16mf4x4_m (vbool64_t mask, uint16_t *base, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_u16mf4x5_m (vbool64_t mask, uint16_t *base, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_u16mf4x6_m (vbool64_t mask, uint16_t *base, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_u16mf4x7_m (vbool64_t mask, uint16_t *base, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_u16mf4x8_m (vbool64_t mask, uint16_t *base, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_u16mf2x2_m (vbool32_t mask, uint16_t *base, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_u16mf2x3_m (vbool32_t mask, uint16_t *base, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_u16mf2x4_m (vbool32_t mask, uint16_t *base, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_u16mf2x5_m (vbool32_t mask, uint16_t *base, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_u16mf2x6_m (vbool32_t mask, uint16_t *base, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_u16mf2x7_m (vbool32_t mask, uint16_t *base, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_u16mf2x8_m (vbool32_t mask, uint16_t *base, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_u16m1x2_m (vbool16_t mask, uint16_t *base, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_u16m1x3_m (vbool16_t mask, uint16_t *base, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_u16m1x4_m (vbool16_t mask, uint16_t *base, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e16_v_u16m1x5_m (vbool16_t mask, uint16_t *base, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e16_v_u16m1x6_m (vbool16_t mask, uint16_t *base, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e16_v_u16m1x7_m (vbool16_t mask, uint16_t *base, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e16_v_u16m1x8_m (vbool16_t mask, uint16_t *base, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_u16m2x2_m (vbool8_t mask, uint16_t *base, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e16_v_u16m2x3_m (vbool8_t mask, uint16_t *base, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e16_v_u16m2x4_m (vbool8_t mask, uint16_t *base, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e16_v_u16m4x2_m (vbool4_t mask, uint16_t *base, vuint16m4x2_t v_tuple, size_t vl);
6.7. vsseg<nf>e32.v
- Mnemonic
vsseg2e32.v vs3, (rs1), vm # nf=2 vsseg3e32.v vs3, (rs1), vm # nf=3 vsseg4e32.v vs3, (rs1), vm # nf=4 vsseg5e32.v vs3, (rs1), vm # nf=5 vsseg6e32.v vs3, (rs1), vm # nf=6 vsseg7e32.v vs3, (rs1), vm # nf=7 vsseg8e32.v vs3, (rs1), vm # nf=8
- Encoding
- Description
-
32-bit Unit-stride segment store
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vse32_v.h
- Intrinsic Functions
Details
void __riscv_vsseg2e32_v_f32mf2x2 (float32_t *base, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_f32mf2x3 (float32_t *base, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_f32mf2x4 (float32_t *base, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e32_v_f32mf2x5 (float32_t *base, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e32_v_f32mf2x6 (float32_t *base, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e32_v_f32mf2x7 (float32_t *base, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e32_v_f32mf2x8 (float32_t *base, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_f32m1x2 (float32_t *base, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_f32m1x3 (float32_t *base, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_f32m1x4 (float32_t *base, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e32_v_f32m1x5 (float32_t *base, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e32_v_f32m1x6 (float32_t *base, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e32_v_f32m1x7 (float32_t *base, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e32_v_f32m1x8 (float32_t *base, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_f32m2x2 (float32_t *base, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_f32m2x3 (float32_t *base, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_f32m2x4 (float32_t *base, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_f32m4x2 (float32_t *base, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_i32mf2x2 (int32_t *base, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_i32mf2x3 (int32_t *base, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_i32mf2x4 (int32_t *base, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e32_v_i32mf2x5 (int32_t *base, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e32_v_i32mf2x6 (int32_t *base, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e32_v_i32mf2x7 (int32_t *base, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e32_v_i32mf2x8 (int32_t *base, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_i32m1x2 (int32_t *base, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_i32m1x3 (int32_t *base, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_i32m1x4 (int32_t *base, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e32_v_i32m1x5 (int32_t *base, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e32_v_i32m1x6 (int32_t *base, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e32_v_i32m1x7 (int32_t *base, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e32_v_i32m1x8 (int32_t *base, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_i32m2x2 (int32_t *base, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_i32m2x3 (int32_t *base, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_i32m2x4 (int32_t *base, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_i32m4x2 (int32_t *base, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_u32mf2x2 (uint32_t *base, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_u32mf2x3 (uint32_t *base, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_u32mf2x4 (uint32_t *base, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e32_v_u32mf2x5 (uint32_t *base, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e32_v_u32mf2x6 (uint32_t *base, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e32_v_u32mf2x7 (uint32_t *base, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e32_v_u32mf2x8 (uint32_t *base, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_u32m1x2 (uint32_t *base, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_u32m1x3 (uint32_t *base, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_u32m1x4 (uint32_t *base, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e32_v_u32m1x5 (uint32_t *base, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e32_v_u32m1x6 (uint32_t *base, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e32_v_u32m1x7 (uint32_t *base, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e32_v_u32m1x8 (uint32_t *base, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_u32m2x2 (uint32_t *base, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_u32m2x3 (uint32_t *base, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_u32m2x4 (uint32_t *base, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_u32m4x2 (uint32_t *base, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_f32mf2x2_m (vbool64_t mask, float32_t *base, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_f32mf2x3_m (vbool64_t mask, float32_t *base, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_f32mf2x4_m (vbool64_t mask, float32_t *base, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e32_v_f32mf2x5_m (vbool64_t mask, float32_t *base, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e32_v_f32mf2x6_m (vbool64_t mask, float32_t *base, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e32_v_f32mf2x7_m (vbool64_t mask, float32_t *base, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e32_v_f32mf2x8_m (vbool64_t mask, float32_t *base, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_f32m1x2_m (vbool32_t mask, float32_t *base, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_f32m1x3_m (vbool32_t mask, float32_t *base, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_f32m1x4_m (vbool32_t mask, float32_t *base, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e32_v_f32m1x5_m (vbool32_t mask, float32_t *base, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e32_v_f32m1x6_m (vbool32_t mask, float32_t *base, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e32_v_f32m1x7_m (vbool32_t mask, float32_t *base, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e32_v_f32m1x8_m (vbool32_t mask, float32_t *base, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_f32m2x2_m (vbool16_t mask, float32_t *base, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_f32m2x3_m (vbool16_t mask, float32_t *base, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_f32m2x4_m (vbool16_t mask, float32_t *base, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_f32m4x2_m (vbool8_t mask, float32_t *base, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_i32mf2x2_m (vbool64_t mask, int32_t *base, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_i32mf2x3_m (vbool64_t mask, int32_t *base, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_i32mf2x4_m (vbool64_t mask, int32_t *base, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e32_v_i32mf2x5_m (vbool64_t mask, int32_t *base, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e32_v_i32mf2x6_m (vbool64_t mask, int32_t *base, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e32_v_i32mf2x7_m (vbool64_t mask, int32_t *base, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e32_v_i32mf2x8_m (vbool64_t mask, int32_t *base, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_i32m1x2_m (vbool32_t mask, int32_t *base, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_i32m1x3_m (vbool32_t mask, int32_t *base, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_i32m1x4_m (vbool32_t mask, int32_t *base, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e32_v_i32m1x5_m (vbool32_t mask, int32_t *base, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e32_v_i32m1x6_m (vbool32_t mask, int32_t *base, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e32_v_i32m1x7_m (vbool32_t mask, int32_t *base, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e32_v_i32m1x8_m (vbool32_t mask, int32_t *base, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_i32m2x2_m (vbool16_t mask, int32_t *base, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_i32m2x3_m (vbool16_t mask, int32_t *base, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_i32m2x4_m (vbool16_t mask, int32_t *base, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_i32m4x2_m (vbool8_t mask, int32_t *base, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_u32mf2x2_m (vbool64_t mask, uint32_t *base, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_u32mf2x3_m (vbool64_t mask, uint32_t *base, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_u32mf2x4_m (vbool64_t mask, uint32_t *base, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsseg5e32_v_u32mf2x5_m (vbool64_t mask, uint32_t *base, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsseg6e32_v_u32mf2x6_m (vbool64_t mask, uint32_t *base, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsseg7e32_v_u32mf2x7_m (vbool64_t mask, uint32_t *base, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsseg8e32_v_u32mf2x8_m (vbool64_t mask, uint32_t *base, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_u32m1x2_m (vbool32_t mask, uint32_t *base, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_u32m1x3_m (vbool32_t mask, uint32_t *base, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_u32m1x4_m (vbool32_t mask, uint32_t *base, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e32_v_u32m1x5_m (vbool32_t mask, uint32_t *base, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e32_v_u32m1x6_m (vbool32_t mask, uint32_t *base, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e32_v_u32m1x7_m (vbool32_t mask, uint32_t *base, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e32_v_u32m1x8_m (vbool32_t mask, uint32_t *base, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_u32m2x2_m (vbool16_t mask, uint32_t *base, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e32_v_u32m2x3_m (vbool16_t mask, uint32_t *base, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e32_v_u32m2x4_m (vbool16_t mask, uint32_t *base, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e32_v_u32m4x2_m (vbool8_t mask, uint32_t *base, vuint32m4x2_t v_tuple, size_t vl);
6.8. vsseg<nf>e64.v
- Mnemonic
vsseg2e64.v vs3, (rs1), vm # nf=2 vsseg3e64.v vs3, (rs1), vm # nf=3 vsseg4e64.v vs3, (rs1), vm # nf=4 vsseg5e64.v vs3, (rs1), vm # nf=5 vsseg6e64.v vs3, (rs1), vm # nf=6 vsseg7e64.v vs3, (rs1), vm # nf=7 vsseg8e64.v vs3, (rs1), vm # nf=8
- Encoding
- Description
-
64-bit Unit-stride segment store
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vse64_v.h
- Intrinsic Functions
Details
void __riscv_vsseg2e64_v_f64m1x2 (float64_t *base, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e64_v_f64m1x3 (float64_t *base, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e64_v_f64m1x4 (float64_t *base, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e64_v_f64m1x5 (float64_t *base, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e64_v_f64m1x6 (float64_t *base, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e64_v_f64m1x7 (float64_t *base, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e64_v_f64m1x8 (float64_t *base, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_f64m2x2 (float64_t *base, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e64_v_f64m2x3 (float64_t *base, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e64_v_f64m2x4 (float64_t *base, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_f64m4x2 (float64_t *base, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_i64m1x2 (int64_t *base, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e64_v_i64m1x3 (int64_t *base, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e64_v_i64m1x4 (int64_t *base, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e64_v_i64m1x5 (int64_t *base, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e64_v_i64m1x6 (int64_t *base, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e64_v_i64m1x7 (int64_t *base, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e64_v_i64m1x8 (int64_t *base, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_i64m2x2 (int64_t *base, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e64_v_i64m2x3 (int64_t *base, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e64_v_i64m2x4 (int64_t *base, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_i64m4x2 (int64_t *base, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_u64m1x2 (uint64_t *base, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e64_v_u64m1x3 (uint64_t *base, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e64_v_u64m1x4 (uint64_t *base, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e64_v_u64m1x5 (uint64_t *base, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e64_v_u64m1x6 (uint64_t *base, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e64_v_u64m1x7 (uint64_t *base, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e64_v_u64m1x8 (uint64_t *base, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_u64m2x2 (uint64_t *base, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e64_v_u64m2x3 (uint64_t *base, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e64_v_u64m2x4 (uint64_t *base, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_u64m4x2 (uint64_t *base, vuint64m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_f64m1x2_m (vbool64_t mask, float64_t *base, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e64_v_f64m1x3_m (vbool64_t mask, float64_t *base, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e64_v_f64m1x4_m (vbool64_t mask, float64_t *base, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e64_v_f64m1x5_m (vbool64_t mask, float64_t *base, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e64_v_f64m1x6_m (vbool64_t mask, float64_t *base, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e64_v_f64m1x7_m (vbool64_t mask, float64_t *base, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e64_v_f64m1x8_m (vbool64_t mask, float64_t *base, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_f64m2x2_m (vbool32_t mask, float64_t *base, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e64_v_f64m2x3_m (vbool32_t mask, float64_t *base, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e64_v_f64m2x4_m (vbool32_t mask, float64_t *base, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_f64m4x2_m (vbool16_t mask, float64_t *base, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_i64m1x2_m (vbool64_t mask, int64_t *base, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e64_v_i64m1x3_m (vbool64_t mask, int64_t *base, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e64_v_i64m1x4_m (vbool64_t mask, int64_t *base, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e64_v_i64m1x5_m (vbool64_t mask, int64_t *base, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e64_v_i64m1x6_m (vbool64_t mask, int64_t *base, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e64_v_i64m1x7_m (vbool64_t mask, int64_t *base, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e64_v_i64m1x8_m (vbool64_t mask, int64_t *base, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_i64m2x2_m (vbool32_t mask, int64_t *base, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e64_v_i64m2x3_m (vbool32_t mask, int64_t *base, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e64_v_i64m2x4_m (vbool32_t mask, int64_t *base, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_i64m4x2_m (vbool16_t mask, int64_t *base, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_u64m1x2_m (vbool64_t mask, uint64_t *base, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsseg3e64_v_u64m1x3_m (vbool64_t mask, uint64_t *base, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsseg4e64_v_u64m1x4_m (vbool64_t mask, uint64_t *base, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsseg5e64_v_u64m1x5_m (vbool64_t mask, uint64_t *base, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsseg6e64_v_u64m1x6_m (vbool64_t mask, uint64_t *base, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsseg7e64_v_u64m1x7_m (vbool64_t mask, uint64_t *base, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsseg8e64_v_u64m1x8_m (vbool64_t mask, uint64_t *base, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_u64m2x2_m (vbool32_t mask, uint64_t *base, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsseg3e64_v_u64m2x3_m (vbool32_t mask, uint64_t *base, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsseg4e64_v_u64m2x4_m (vbool32_t mask, uint64_t *base, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsseg2e64_v_u64m4x2_m (vbool16_t mask, uint64_t *base, vuint64m4x2_t v_tuple, size_t vl);
7. Vector Strided Segment Loads and Stores
7.1. vlsseg<nf>e8.v
- Mnemonic
vlsseg2e8.v vd, (rs1), rs2, vm # nf=2 vlsseg3e8.v vd, (rs1), rs2, vm # nf=3 vlsseg4e8.v vd, (rs1), rs2, vm # nf=4 vlsseg5e8.v vd, (rs1), rs2, vm # nf=5 vlsseg6e8.v vd, (rs1), rs2, vm # nf=6 vlsseg7e8.v vd, (rs1), rs2, vm # nf=7 vlsseg8e8.v vd, (rs1), rs2, vm # nf=8
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vlse8_v.h
- Intrinsic Functions
Details
vint8mf8x2_t __riscv_vlsseg2e8_v_i8mf8x2 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf8x3_t __riscv_vlsseg3e8_v_i8mf8x3 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf8x4_t __riscv_vlsseg4e8_v_i8mf8x4 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf8x5_t __riscv_vlsseg5e8_v_i8mf8x5 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf8x6_t __riscv_vlsseg6e8_v_i8mf8x6 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf8x7_t __riscv_vlsseg7e8_v_i8mf8x7 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf8x8_t __riscv_vlsseg8e8_v_i8mf8x8 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4x2_t __riscv_vlsseg2e8_v_i8mf4x2 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4x3_t __riscv_vlsseg3e8_v_i8mf4x3 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4x4_t __riscv_vlsseg4e8_v_i8mf4x4 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4x5_t __riscv_vlsseg5e8_v_i8mf4x5 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4x6_t __riscv_vlsseg6e8_v_i8mf4x6 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4x7_t __riscv_vlsseg7e8_v_i8mf4x7 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4x8_t __riscv_vlsseg8e8_v_i8mf4x8 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2x2_t __riscv_vlsseg2e8_v_i8mf2x2 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2x3_t __riscv_vlsseg3e8_v_i8mf2x3 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2x4_t __riscv_vlsseg4e8_v_i8mf2x4 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2x5_t __riscv_vlsseg5e8_v_i8mf2x5 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2x6_t __riscv_vlsseg6e8_v_i8mf2x6 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2x7_t __riscv_vlsseg7e8_v_i8mf2x7 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2x8_t __riscv_vlsseg8e8_v_i8mf2x8 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1x2_t __riscv_vlsseg2e8_v_i8m1x2 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1x3_t __riscv_vlsseg3e8_v_i8m1x3 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1x4_t __riscv_vlsseg4e8_v_i8m1x4 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1x5_t __riscv_vlsseg5e8_v_i8m1x5 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1x6_t __riscv_vlsseg6e8_v_i8m1x6 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1x7_t __riscv_vlsseg7e8_v_i8m1x7 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1x8_t __riscv_vlsseg8e8_v_i8m1x8 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m2x2_t __riscv_vlsseg2e8_v_i8m2x2 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m2x3_t __riscv_vlsseg3e8_v_i8m2x3 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m2x4_t __riscv_vlsseg4e8_v_i8m2x4 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m4x2_t __riscv_vlsseg2e8_v_i8m4x2 (const int8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8x2_t __riscv_vlsseg2e8_v_u8mf8x2 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8x3_t __riscv_vlsseg3e8_v_u8mf8x3 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8x4_t __riscv_vlsseg4e8_v_u8mf8x4 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8x5_t __riscv_vlsseg5e8_v_u8mf8x5 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8x6_t __riscv_vlsseg6e8_v_u8mf8x6 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8x7_t __riscv_vlsseg7e8_v_u8mf8x7 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8x8_t __riscv_vlsseg8e8_v_u8mf8x8 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4x2_t __riscv_vlsseg2e8_v_u8mf4x2 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4x3_t __riscv_vlsseg3e8_v_u8mf4x3 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4x4_t __riscv_vlsseg4e8_v_u8mf4x4 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4x5_t __riscv_vlsseg5e8_v_u8mf4x5 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4x6_t __riscv_vlsseg6e8_v_u8mf4x6 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4x7_t __riscv_vlsseg7e8_v_u8mf4x7 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4x8_t __riscv_vlsseg8e8_v_u8mf4x8 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2x2_t __riscv_vlsseg2e8_v_u8mf2x2 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2x3_t __riscv_vlsseg3e8_v_u8mf2x3 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2x4_t __riscv_vlsseg4e8_v_u8mf2x4 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2x5_t __riscv_vlsseg5e8_v_u8mf2x5 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2x6_t __riscv_vlsseg6e8_v_u8mf2x6 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2x7_t __riscv_vlsseg7e8_v_u8mf2x7 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2x8_t __riscv_vlsseg8e8_v_u8mf2x8 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1x2_t __riscv_vlsseg2e8_v_u8m1x2 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1x3_t __riscv_vlsseg3e8_v_u8m1x3 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1x4_t __riscv_vlsseg4e8_v_u8m1x4 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1x5_t __riscv_vlsseg5e8_v_u8m1x5 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1x6_t __riscv_vlsseg6e8_v_u8m1x6 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1x7_t __riscv_vlsseg7e8_v_u8m1x7 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1x8_t __riscv_vlsseg8e8_v_u8m1x8 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m2x2_t __riscv_vlsseg2e8_v_u8m2x2 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m2x3_t __riscv_vlsseg3e8_v_u8m2x3 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m2x4_t __riscv_vlsseg4e8_v_u8m2x4 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m4x2_t __riscv_vlsseg2e8_v_u8m4x2 (const uint8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf8x2_t __riscv_vlsseg2e8_v_i8mf8x2_m (vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf8x3_t __riscv_vlsseg3e8_v_i8mf8x3_m (vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf8x4_t __riscv_vlsseg4e8_v_i8mf8x4_m (vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf8x5_t __riscv_vlsseg5e8_v_i8mf8x5_m (vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf8x6_t __riscv_vlsseg6e8_v_i8mf8x6_m (vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf8x7_t __riscv_vlsseg7e8_v_i8mf8x7_m (vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf8x8_t __riscv_vlsseg8e8_v_i8mf8x8_m (vbool64_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4x2_t __riscv_vlsseg2e8_v_i8mf4x2_m (vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4x3_t __riscv_vlsseg3e8_v_i8mf4x3_m (vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4x4_t __riscv_vlsseg4e8_v_i8mf4x4_m (vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4x5_t __riscv_vlsseg5e8_v_i8mf4x5_m (vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4x6_t __riscv_vlsseg6e8_v_i8mf4x6_m (vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4x7_t __riscv_vlsseg7e8_v_i8mf4x7_m (vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf4x8_t __riscv_vlsseg8e8_v_i8mf4x8_m (vbool32_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2x2_t __riscv_vlsseg2e8_v_i8mf2x2_m (vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2x3_t __riscv_vlsseg3e8_v_i8mf2x3_m (vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2x4_t __riscv_vlsseg4e8_v_i8mf2x4_m (vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2x5_t __riscv_vlsseg5e8_v_i8mf2x5_m (vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2x6_t __riscv_vlsseg6e8_v_i8mf2x6_m (vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2x7_t __riscv_vlsseg7e8_v_i8mf2x7_m (vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8mf2x8_t __riscv_vlsseg8e8_v_i8mf2x8_m (vbool16_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1x2_t __riscv_vlsseg2e8_v_i8m1x2_m (vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1x3_t __riscv_vlsseg3e8_v_i8m1x3_m (vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1x4_t __riscv_vlsseg4e8_v_i8m1x4_m (vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1x5_t __riscv_vlsseg5e8_v_i8m1x5_m (vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1x6_t __riscv_vlsseg6e8_v_i8m1x6_m (vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1x7_t __riscv_vlsseg7e8_v_i8m1x7_m (vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m1x8_t __riscv_vlsseg8e8_v_i8m1x8_m (vbool8_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m2x2_t __riscv_vlsseg2e8_v_i8m2x2_m (vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m2x3_t __riscv_vlsseg3e8_v_i8m2x3_m (vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m2x4_t __riscv_vlsseg4e8_v_i8m2x4_m (vbool4_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vint8m4x2_t __riscv_vlsseg2e8_v_i8m4x2_m (vbool2_t mask, const int8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8x2_t __riscv_vlsseg2e8_v_u8mf8x2_m (vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8x3_t __riscv_vlsseg3e8_v_u8mf8x3_m (vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8x4_t __riscv_vlsseg4e8_v_u8mf8x4_m (vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8x5_t __riscv_vlsseg5e8_v_u8mf8x5_m (vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8x6_t __riscv_vlsseg6e8_v_u8mf8x6_m (vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8x7_t __riscv_vlsseg7e8_v_u8mf8x7_m (vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf8x8_t __riscv_vlsseg8e8_v_u8mf8x8_m (vbool64_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4x2_t __riscv_vlsseg2e8_v_u8mf4x2_m (vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4x3_t __riscv_vlsseg3e8_v_u8mf4x3_m (vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4x4_t __riscv_vlsseg4e8_v_u8mf4x4_m (vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4x5_t __riscv_vlsseg5e8_v_u8mf4x5_m (vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4x6_t __riscv_vlsseg6e8_v_u8mf4x6_m (vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4x7_t __riscv_vlsseg7e8_v_u8mf4x7_m (vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf4x8_t __riscv_vlsseg8e8_v_u8mf4x8_m (vbool32_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2x2_t __riscv_vlsseg2e8_v_u8mf2x2_m (vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2x3_t __riscv_vlsseg3e8_v_u8mf2x3_m (vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2x4_t __riscv_vlsseg4e8_v_u8mf2x4_m (vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2x5_t __riscv_vlsseg5e8_v_u8mf2x5_m (vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2x6_t __riscv_vlsseg6e8_v_u8mf2x6_m (vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2x7_t __riscv_vlsseg7e8_v_u8mf2x7_m (vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8mf2x8_t __riscv_vlsseg8e8_v_u8mf2x8_m (vbool16_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1x2_t __riscv_vlsseg2e8_v_u8m1x2_m (vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1x3_t __riscv_vlsseg3e8_v_u8m1x3_m (vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1x4_t __riscv_vlsseg4e8_v_u8m1x4_m (vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1x5_t __riscv_vlsseg5e8_v_u8m1x5_m (vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1x6_t __riscv_vlsseg6e8_v_u8m1x6_m (vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1x7_t __riscv_vlsseg7e8_v_u8m1x7_m (vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m1x8_t __riscv_vlsseg8e8_v_u8m1x8_m (vbool8_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m2x2_t __riscv_vlsseg2e8_v_u8m2x2_m (vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m2x3_t __riscv_vlsseg3e8_v_u8m2x3_m (vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m2x4_t __riscv_vlsseg4e8_v_u8m2x4_m (vbool4_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
vuint8m4x2_t __riscv_vlsseg2e8_v_u8m4x2_m (vbool2_t mask, const uint8_t *base, ptrdiff_t bstride, size_t vl);
7.2. vlsseg<nf>e16.v
- Mnemonic
vlsseg2e16.v vd, (rs1), rs2, vm # nf=2 vlsseg3e16.v vd, (rs1), rs2, vm # nf=3 vlsseg4e16.v vd, (rs1), rs2, vm # nf=4 vlsseg5e16.v vd, (rs1), rs2, vm # nf=5 vlsseg6e16.v vd, (rs1), rs2, vm # nf=6 vlsseg7e16.v vd, (rs1), rs2, vm # nf=7 vlsseg8e16.v vd, (rs1), rs2, vm # nf=8
- Encoding
- Description
-
16-bit Strided segment loads
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vlse16_v.h
- Intrinsic Functions
Details
vfloat16mf4x2_t __riscv_vlsseg2e16_v_f16mf4x2 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf4x3_t __riscv_vlsseg3e16_v_f16mf4x3 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf4x4_t __riscv_vlsseg4e16_v_f16mf4x4 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf4x5_t __riscv_vlsseg5e16_v_f16mf4x5 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf4x6_t __riscv_vlsseg6e16_v_f16mf4x6 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf4x7_t __riscv_vlsseg7e16_v_f16mf4x7 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf4x8_t __riscv_vlsseg8e16_v_f16mf4x8 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2x2_t __riscv_vlsseg2e16_v_f16mf2x2 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2x3_t __riscv_vlsseg3e16_v_f16mf2x3 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2x4_t __riscv_vlsseg4e16_v_f16mf2x4 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2x5_t __riscv_vlsseg5e16_v_f16mf2x5 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2x6_t __riscv_vlsseg6e16_v_f16mf2x6 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2x7_t __riscv_vlsseg7e16_v_f16mf2x7 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2x8_t __riscv_vlsseg8e16_v_f16mf2x8 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1x2_t __riscv_vlsseg2e16_v_f16m1x2 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1x3_t __riscv_vlsseg3e16_v_f16m1x3 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1x4_t __riscv_vlsseg4e16_v_f16m1x4 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1x5_t __riscv_vlsseg5e16_v_f16m1x5 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1x6_t __riscv_vlsseg6e16_v_f16m1x6 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1x7_t __riscv_vlsseg7e16_v_f16m1x7 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1x8_t __riscv_vlsseg8e16_v_f16m1x8 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m2x2_t __riscv_vlsseg2e16_v_f16m2x2 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m2x3_t __riscv_vlsseg3e16_v_f16m2x3 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m2x4_t __riscv_vlsseg4e16_v_f16m2x4 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m4x2_t __riscv_vlsseg2e16_v_f16m4x2 (const float16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4x2_t __riscv_vlsseg2e16_v_i16mf4x2 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4x3_t __riscv_vlsseg3e16_v_i16mf4x3 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4x4_t __riscv_vlsseg4e16_v_i16mf4x4 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4x5_t __riscv_vlsseg5e16_v_i16mf4x5 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4x6_t __riscv_vlsseg6e16_v_i16mf4x6 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4x7_t __riscv_vlsseg7e16_v_i16mf4x7 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4x8_t __riscv_vlsseg8e16_v_i16mf4x8 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2x2_t __riscv_vlsseg2e16_v_i16mf2x2 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2x3_t __riscv_vlsseg3e16_v_i16mf2x3 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2x4_t __riscv_vlsseg4e16_v_i16mf2x4 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2x5_t __riscv_vlsseg5e16_v_i16mf2x5 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2x6_t __riscv_vlsseg6e16_v_i16mf2x6 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2x7_t __riscv_vlsseg7e16_v_i16mf2x7 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2x8_t __riscv_vlsseg8e16_v_i16mf2x8 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1x2_t __riscv_vlsseg2e16_v_i16m1x2 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1x3_t __riscv_vlsseg3e16_v_i16m1x3 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1x4_t __riscv_vlsseg4e16_v_i16m1x4 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1x5_t __riscv_vlsseg5e16_v_i16m1x5 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1x6_t __riscv_vlsseg6e16_v_i16m1x6 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1x7_t __riscv_vlsseg7e16_v_i16m1x7 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1x8_t __riscv_vlsseg8e16_v_i16m1x8 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m2x2_t __riscv_vlsseg2e16_v_i16m2x2 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m2x3_t __riscv_vlsseg3e16_v_i16m2x3 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m2x4_t __riscv_vlsseg4e16_v_i16m2x4 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m4x2_t __riscv_vlsseg2e16_v_i16m4x2 (const int16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4x2_t __riscv_vlsseg2e16_v_u16mf4x2 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4x3_t __riscv_vlsseg3e16_v_u16mf4x3 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4x4_t __riscv_vlsseg4e16_v_u16mf4x4 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4x5_t __riscv_vlsseg5e16_v_u16mf4x5 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4x6_t __riscv_vlsseg6e16_v_u16mf4x6 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4x7_t __riscv_vlsseg7e16_v_u16mf4x7 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4x8_t __riscv_vlsseg8e16_v_u16mf4x8 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2x2_t __riscv_vlsseg2e16_v_u16mf2x2 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2x3_t __riscv_vlsseg3e16_v_u16mf2x3 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2x4_t __riscv_vlsseg4e16_v_u16mf2x4 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2x5_t __riscv_vlsseg5e16_v_u16mf2x5 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2x6_t __riscv_vlsseg6e16_v_u16mf2x6 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2x7_t __riscv_vlsseg7e16_v_u16mf2x7 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2x8_t __riscv_vlsseg8e16_v_u16mf2x8 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1x2_t __riscv_vlsseg2e16_v_u16m1x2 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1x3_t __riscv_vlsseg3e16_v_u16m1x3 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1x4_t __riscv_vlsseg4e16_v_u16m1x4 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1x5_t __riscv_vlsseg5e16_v_u16m1x5 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1x6_t __riscv_vlsseg6e16_v_u16m1x6 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1x7_t __riscv_vlsseg7e16_v_u16m1x7 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1x8_t __riscv_vlsseg8e16_v_u16m1x8 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m2x2_t __riscv_vlsseg2e16_v_u16m2x2 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m2x3_t __riscv_vlsseg3e16_v_u16m2x3 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m2x4_t __riscv_vlsseg4e16_v_u16m2x4 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m4x2_t __riscv_vlsseg2e16_v_u16m4x2 (const uint16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf4x2_t __riscv_vlsseg2e16_v_f16mf4x2_m (vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf4x3_t __riscv_vlsseg3e16_v_f16mf4x3_m (vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf4x4_t __riscv_vlsseg4e16_v_f16mf4x4_m (vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf4x5_t __riscv_vlsseg5e16_v_f16mf4x5_m (vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf4x6_t __riscv_vlsseg6e16_v_f16mf4x6_m (vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf4x7_t __riscv_vlsseg7e16_v_f16mf4x7_m (vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf4x8_t __riscv_vlsseg8e16_v_f16mf4x8_m (vbool64_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2x2_t __riscv_vlsseg2e16_v_f16mf2x2_m (vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2x3_t __riscv_vlsseg3e16_v_f16mf2x3_m (vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2x4_t __riscv_vlsseg4e16_v_f16mf2x4_m (vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2x5_t __riscv_vlsseg5e16_v_f16mf2x5_m (vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2x6_t __riscv_vlsseg6e16_v_f16mf2x6_m (vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2x7_t __riscv_vlsseg7e16_v_f16mf2x7_m (vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16mf2x8_t __riscv_vlsseg8e16_v_f16mf2x8_m (vbool32_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1x2_t __riscv_vlsseg2e16_v_f16m1x2_m (vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1x3_t __riscv_vlsseg3e16_v_f16m1x3_m (vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1x4_t __riscv_vlsseg4e16_v_f16m1x4_m (vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1x5_t __riscv_vlsseg5e16_v_f16m1x5_m (vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1x6_t __riscv_vlsseg6e16_v_f16m1x6_m (vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1x7_t __riscv_vlsseg7e16_v_f16m1x7_m (vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m1x8_t __riscv_vlsseg8e16_v_f16m1x8_m (vbool16_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m2x2_t __riscv_vlsseg2e16_v_f16m2x2_m (vbool8_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m2x3_t __riscv_vlsseg3e16_v_f16m2x3_m (vbool8_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m2x4_t __riscv_vlsseg4e16_v_f16m2x4_m (vbool8_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vfloat16m4x2_t __riscv_vlsseg2e16_v_f16m4x2_m (vbool4_t mask, const float16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4x2_t __riscv_vlsseg2e16_v_i16mf4x2_m (vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4x3_t __riscv_vlsseg3e16_v_i16mf4x3_m (vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4x4_t __riscv_vlsseg4e16_v_i16mf4x4_m (vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4x5_t __riscv_vlsseg5e16_v_i16mf4x5_m (vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4x6_t __riscv_vlsseg6e16_v_i16mf4x6_m (vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4x7_t __riscv_vlsseg7e16_v_i16mf4x7_m (vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf4x8_t __riscv_vlsseg8e16_v_i16mf4x8_m (vbool64_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2x2_t __riscv_vlsseg2e16_v_i16mf2x2_m (vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2x3_t __riscv_vlsseg3e16_v_i16mf2x3_m (vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2x4_t __riscv_vlsseg4e16_v_i16mf2x4_m (vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2x5_t __riscv_vlsseg5e16_v_i16mf2x5_m (vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2x6_t __riscv_vlsseg6e16_v_i16mf2x6_m (vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2x7_t __riscv_vlsseg7e16_v_i16mf2x7_m (vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16mf2x8_t __riscv_vlsseg8e16_v_i16mf2x8_m (vbool32_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1x2_t __riscv_vlsseg2e16_v_i16m1x2_m (vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1x3_t __riscv_vlsseg3e16_v_i16m1x3_m (vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1x4_t __riscv_vlsseg4e16_v_i16m1x4_m (vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1x5_t __riscv_vlsseg5e16_v_i16m1x5_m (vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1x6_t __riscv_vlsseg6e16_v_i16m1x6_m (vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1x7_t __riscv_vlsseg7e16_v_i16m1x7_m (vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m1x8_t __riscv_vlsseg8e16_v_i16m1x8_m (vbool16_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m2x2_t __riscv_vlsseg2e16_v_i16m2x2_m (vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m2x3_t __riscv_vlsseg3e16_v_i16m2x3_m (vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m2x4_t __riscv_vlsseg4e16_v_i16m2x4_m (vbool8_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vint16m4x2_t __riscv_vlsseg2e16_v_i16m4x2_m (vbool4_t mask, const int16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4x2_t __riscv_vlsseg2e16_v_u16mf4x2_m (vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4x3_t __riscv_vlsseg3e16_v_u16mf4x3_m (vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4x4_t __riscv_vlsseg4e16_v_u16mf4x4_m (vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4x5_t __riscv_vlsseg5e16_v_u16mf4x5_m (vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4x6_t __riscv_vlsseg6e16_v_u16mf4x6_m (vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4x7_t __riscv_vlsseg7e16_v_u16mf4x7_m (vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf4x8_t __riscv_vlsseg8e16_v_u16mf4x8_m (vbool64_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2x2_t __riscv_vlsseg2e16_v_u16mf2x2_m (vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2x3_t __riscv_vlsseg3e16_v_u16mf2x3_m (vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2x4_t __riscv_vlsseg4e16_v_u16mf2x4_m (vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2x5_t __riscv_vlsseg5e16_v_u16mf2x5_m (vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2x6_t __riscv_vlsseg6e16_v_u16mf2x6_m (vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2x7_t __riscv_vlsseg7e16_v_u16mf2x7_m (vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16mf2x8_t __riscv_vlsseg8e16_v_u16mf2x8_m (vbool32_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1x2_t __riscv_vlsseg2e16_v_u16m1x2_m (vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1x3_t __riscv_vlsseg3e16_v_u16m1x3_m (vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1x4_t __riscv_vlsseg4e16_v_u16m1x4_m (vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1x5_t __riscv_vlsseg5e16_v_u16m1x5_m (vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1x6_t __riscv_vlsseg6e16_v_u16m1x6_m (vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1x7_t __riscv_vlsseg7e16_v_u16m1x7_m (vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m1x8_t __riscv_vlsseg8e16_v_u16m1x8_m (vbool16_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m2x2_t __riscv_vlsseg2e16_v_u16m2x2_m (vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m2x3_t __riscv_vlsseg3e16_v_u16m2x3_m (vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m2x4_t __riscv_vlsseg4e16_v_u16m2x4_m (vbool8_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
vuint16m4x2_t __riscv_vlsseg2e16_v_u16m4x2_m (vbool4_t mask, const uint16_t *base, ptrdiff_t bstride, size_t vl);
7.3. vlsseg<nf>e32.v
- Mnemonic
vlsseg2e32.v vd, (rs1), rs2, vm # nf=2 vlsseg3e32.v vd, (rs1), rs2, vm # nf=3 vlsseg4e32.v vd, (rs1), rs2, vm # nf=4 vlsseg5e32.v vd, (rs1), rs2, vm # nf=5 vlsseg6e32.v vd, (rs1), rs2, vm # nf=6 vlsseg7e32.v vd, (rs1), rs2, vm # nf=7 vlsseg8e32.v vd, (rs1), rs2, vm # nf=8
- Encoding
- Description
-
32-bit Strided segment loads
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vlse32_v.h
- Intrinsic Functions
Details
vfloat32mf2x2_t __riscv_vlsseg2e32_v_f32mf2x2 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32mf2x3_t __riscv_vlsseg3e32_v_f32mf2x3 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32mf2x4_t __riscv_vlsseg4e32_v_f32mf2x4 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32mf2x5_t __riscv_vlsseg5e32_v_f32mf2x5 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32mf2x6_t __riscv_vlsseg6e32_v_f32mf2x6 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32mf2x7_t __riscv_vlsseg7e32_v_f32mf2x7 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32mf2x8_t __riscv_vlsseg8e32_v_f32mf2x8 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1x2_t __riscv_vlsseg2e32_v_f32m1x2 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1x3_t __riscv_vlsseg3e32_v_f32m1x3 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1x4_t __riscv_vlsseg4e32_v_f32m1x4 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1x5_t __riscv_vlsseg5e32_v_f32m1x5 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1x6_t __riscv_vlsseg6e32_v_f32m1x6 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1x7_t __riscv_vlsseg7e32_v_f32m1x7 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1x8_t __riscv_vlsseg8e32_v_f32m1x8 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m2x2_t __riscv_vlsseg2e32_v_f32m2x2 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m2x3_t __riscv_vlsseg3e32_v_f32m2x3 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m2x4_t __riscv_vlsseg4e32_v_f32m2x4 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m4x2_t __riscv_vlsseg2e32_v_f32m4x2 (const float32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2x2_t __riscv_vlsseg2e32_v_i32mf2x2 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2x3_t __riscv_vlsseg3e32_v_i32mf2x3 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2x4_t __riscv_vlsseg4e32_v_i32mf2x4 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2x5_t __riscv_vlsseg5e32_v_i32mf2x5 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2x6_t __riscv_vlsseg6e32_v_i32mf2x6 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2x7_t __riscv_vlsseg7e32_v_i32mf2x7 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2x8_t __riscv_vlsseg8e32_v_i32mf2x8 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1x2_t __riscv_vlsseg2e32_v_i32m1x2 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1x3_t __riscv_vlsseg3e32_v_i32m1x3 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1x4_t __riscv_vlsseg4e32_v_i32m1x4 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1x5_t __riscv_vlsseg5e32_v_i32m1x5 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1x6_t __riscv_vlsseg6e32_v_i32m1x6 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1x7_t __riscv_vlsseg7e32_v_i32m1x7 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1x8_t __riscv_vlsseg8e32_v_i32m1x8 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m2x2_t __riscv_vlsseg2e32_v_i32m2x2 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m2x3_t __riscv_vlsseg3e32_v_i32m2x3 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m2x4_t __riscv_vlsseg4e32_v_i32m2x4 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m4x2_t __riscv_vlsseg2e32_v_i32m4x2 (const int32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2x2_t __riscv_vlsseg2e32_v_u32mf2x2 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2x3_t __riscv_vlsseg3e32_v_u32mf2x3 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2x4_t __riscv_vlsseg4e32_v_u32mf2x4 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2x5_t __riscv_vlsseg5e32_v_u32mf2x5 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2x6_t __riscv_vlsseg6e32_v_u32mf2x6 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2x7_t __riscv_vlsseg7e32_v_u32mf2x7 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2x8_t __riscv_vlsseg8e32_v_u32mf2x8 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1x2_t __riscv_vlsseg2e32_v_u32m1x2 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1x3_t __riscv_vlsseg3e32_v_u32m1x3 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1x4_t __riscv_vlsseg4e32_v_u32m1x4 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1x5_t __riscv_vlsseg5e32_v_u32m1x5 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1x6_t __riscv_vlsseg6e32_v_u32m1x6 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1x7_t __riscv_vlsseg7e32_v_u32m1x7 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1x8_t __riscv_vlsseg8e32_v_u32m1x8 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m2x2_t __riscv_vlsseg2e32_v_u32m2x2 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m2x3_t __riscv_vlsseg3e32_v_u32m2x3 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m2x4_t __riscv_vlsseg4e32_v_u32m2x4 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m4x2_t __riscv_vlsseg2e32_v_u32m4x2 (const uint32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32mf2x2_t __riscv_vlsseg2e32_v_f32mf2x2_m (vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32mf2x3_t __riscv_vlsseg3e32_v_f32mf2x3_m (vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32mf2x4_t __riscv_vlsseg4e32_v_f32mf2x4_m (vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32mf2x5_t __riscv_vlsseg5e32_v_f32mf2x5_m (vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32mf2x6_t __riscv_vlsseg6e32_v_f32mf2x6_m (vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32mf2x7_t __riscv_vlsseg7e32_v_f32mf2x7_m (vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32mf2x8_t __riscv_vlsseg8e32_v_f32mf2x8_m (vbool64_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1x2_t __riscv_vlsseg2e32_v_f32m1x2_m (vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1x3_t __riscv_vlsseg3e32_v_f32m1x3_m (vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1x4_t __riscv_vlsseg4e32_v_f32m1x4_m (vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1x5_t __riscv_vlsseg5e32_v_f32m1x5_m (vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1x6_t __riscv_vlsseg6e32_v_f32m1x6_m (vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1x7_t __riscv_vlsseg7e32_v_f32m1x7_m (vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m1x8_t __riscv_vlsseg8e32_v_f32m1x8_m (vbool32_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m2x2_t __riscv_vlsseg2e32_v_f32m2x2_m (vbool16_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m2x3_t __riscv_vlsseg3e32_v_f32m2x3_m (vbool16_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m2x4_t __riscv_vlsseg4e32_v_f32m2x4_m (vbool16_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vfloat32m4x2_t __riscv_vlsseg2e32_v_f32m4x2_m (vbool8_t mask, const float32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2x2_t __riscv_vlsseg2e32_v_i32mf2x2_m (vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2x3_t __riscv_vlsseg3e32_v_i32mf2x3_m (vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2x4_t __riscv_vlsseg4e32_v_i32mf2x4_m (vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2x5_t __riscv_vlsseg5e32_v_i32mf2x5_m (vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2x6_t __riscv_vlsseg6e32_v_i32mf2x6_m (vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2x7_t __riscv_vlsseg7e32_v_i32mf2x7_m (vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32mf2x8_t __riscv_vlsseg8e32_v_i32mf2x8_m (vbool64_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1x2_t __riscv_vlsseg2e32_v_i32m1x2_m (vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1x3_t __riscv_vlsseg3e32_v_i32m1x3_m (vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1x4_t __riscv_vlsseg4e32_v_i32m1x4_m (vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1x5_t __riscv_vlsseg5e32_v_i32m1x5_m (vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1x6_t __riscv_vlsseg6e32_v_i32m1x6_m (vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1x7_t __riscv_vlsseg7e32_v_i32m1x7_m (vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m1x8_t __riscv_vlsseg8e32_v_i32m1x8_m (vbool32_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m2x2_t __riscv_vlsseg2e32_v_i32m2x2_m (vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m2x3_t __riscv_vlsseg3e32_v_i32m2x3_m (vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m2x4_t __riscv_vlsseg4e32_v_i32m2x4_m (vbool16_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vint32m4x2_t __riscv_vlsseg2e32_v_i32m4x2_m (vbool8_t mask, const int32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2x2_t __riscv_vlsseg2e32_v_u32mf2x2_m (vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2x3_t __riscv_vlsseg3e32_v_u32mf2x3_m (vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2x4_t __riscv_vlsseg4e32_v_u32mf2x4_m (vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2x5_t __riscv_vlsseg5e32_v_u32mf2x5_m (vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2x6_t __riscv_vlsseg6e32_v_u32mf2x6_m (vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2x7_t __riscv_vlsseg7e32_v_u32mf2x7_m (vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32mf2x8_t __riscv_vlsseg8e32_v_u32mf2x8_m (vbool64_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1x2_t __riscv_vlsseg2e32_v_u32m1x2_m (vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1x3_t __riscv_vlsseg3e32_v_u32m1x3_m (vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1x4_t __riscv_vlsseg4e32_v_u32m1x4_m (vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1x5_t __riscv_vlsseg5e32_v_u32m1x5_m (vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1x6_t __riscv_vlsseg6e32_v_u32m1x6_m (vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1x7_t __riscv_vlsseg7e32_v_u32m1x7_m (vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m1x8_t __riscv_vlsseg8e32_v_u32m1x8_m (vbool32_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m2x2_t __riscv_vlsseg2e32_v_u32m2x2_m (vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m2x3_t __riscv_vlsseg3e32_v_u32m2x3_m (vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m2x4_t __riscv_vlsseg4e32_v_u32m2x4_m (vbool16_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
vuint32m4x2_t __riscv_vlsseg2e32_v_u32m4x2_m (vbool8_t mask, const uint32_t *base, ptrdiff_t bstride, size_t vl);
7.4. vlsseg<nf>e64.v
- Mnemonic
vlsseg2e64.v vd, (rs1), rs2, vm # nf=2 vlsseg3e64.v vd, (rs1), rs2, vm # nf=3 vlsseg4e64.v vd, (rs1), rs2, vm # nf=4 vlsseg5e64.v vd, (rs1), rs2, vm # nf=5 vlsseg6e64.v vd, (rs1), rs2, vm # nf=6 vlsseg7e64.v vd, (rs1), rs2, vm # nf=7 vlsseg8e64.v vd, (rs1), rs2, vm # nf=8
- Encoding
- Description
-
64-bit Strided segment loads
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vlse64_v.h
- Intrinsic Functions
Details
vfloat64m1x2_t __riscv_vlsseg2e64_v_f64m1x2 (const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m1x3_t __riscv_vlsseg3e64_v_f64m1x3 (const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m1x4_t __riscv_vlsseg4e64_v_f64m1x4 (const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m1x5_t __riscv_vlsseg5e64_v_f64m1x5 (const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m1x6_t __riscv_vlsseg6e64_v_f64m1x6 (const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m1x7_t __riscv_vlsseg7e64_v_f64m1x7 (const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m1x8_t __riscv_vlsseg8e64_v_f64m1x8 (const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m2x2_t __riscv_vlsseg2e64_v_f64m2x2 (const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m2x3_t __riscv_vlsseg3e64_v_f64m2x3 (const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m2x4_t __riscv_vlsseg4e64_v_f64m2x4 (const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m4x2_t __riscv_vlsseg2e64_v_f64m4x2 (const float64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1x2_t __riscv_vlsseg2e64_v_i64m1x2 (const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1x3_t __riscv_vlsseg3e64_v_i64m1x3 (const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1x4_t __riscv_vlsseg4e64_v_i64m1x4 (const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1x5_t __riscv_vlsseg5e64_v_i64m1x5 (const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1x6_t __riscv_vlsseg6e64_v_i64m1x6 (const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1x7_t __riscv_vlsseg7e64_v_i64m1x7 (const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1x8_t __riscv_vlsseg8e64_v_i64m1x8 (const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m2x2_t __riscv_vlsseg2e64_v_i64m2x2 (const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m2x3_t __riscv_vlsseg3e64_v_i64m2x3 (const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m2x4_t __riscv_vlsseg4e64_v_i64m2x4 (const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m4x2_t __riscv_vlsseg2e64_v_i64m4x2 (const int64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1x2_t __riscv_vlsseg2e64_v_u64m1x2 (const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1x3_t __riscv_vlsseg3e64_v_u64m1x3 (const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1x4_t __riscv_vlsseg4e64_v_u64m1x4 (const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1x5_t __riscv_vlsseg5e64_v_u64m1x5 (const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1x6_t __riscv_vlsseg6e64_v_u64m1x6 (const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1x7_t __riscv_vlsseg7e64_v_u64m1x7 (const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1x8_t __riscv_vlsseg8e64_v_u64m1x8 (const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m2x2_t __riscv_vlsseg2e64_v_u64m2x2 (const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m2x3_t __riscv_vlsseg3e64_v_u64m2x3 (const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m2x4_t __riscv_vlsseg4e64_v_u64m2x4 (const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m4x2_t __riscv_vlsseg2e64_v_u64m4x2 (const uint64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m1x2_t __riscv_vlsseg2e64_v_f64m1x2_m (vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m1x3_t __riscv_vlsseg3e64_v_f64m1x3_m (vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m1x4_t __riscv_vlsseg4e64_v_f64m1x4_m (vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m1x5_t __riscv_vlsseg5e64_v_f64m1x5_m (vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m1x6_t __riscv_vlsseg6e64_v_f64m1x6_m (vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m1x7_t __riscv_vlsseg7e64_v_f64m1x7_m (vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m1x8_t __riscv_vlsseg8e64_v_f64m1x8_m (vbool64_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m2x2_t __riscv_vlsseg2e64_v_f64m2x2_m (vbool32_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m2x3_t __riscv_vlsseg3e64_v_f64m2x3_m (vbool32_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m2x4_t __riscv_vlsseg4e64_v_f64m2x4_m (vbool32_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl);
vfloat64m4x2_t __riscv_vlsseg2e64_v_f64m4x2_m (vbool16_t mask, const float64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1x2_t __riscv_vlsseg2e64_v_i64m1x2_m (vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1x3_t __riscv_vlsseg3e64_v_i64m1x3_m (vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1x4_t __riscv_vlsseg4e64_v_i64m1x4_m (vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1x5_t __riscv_vlsseg5e64_v_i64m1x5_m (vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1x6_t __riscv_vlsseg6e64_v_i64m1x6_m (vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1x7_t __riscv_vlsseg7e64_v_i64m1x7_m (vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m1x8_t __riscv_vlsseg8e64_v_i64m1x8_m (vbool64_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m2x2_t __riscv_vlsseg2e64_v_i64m2x2_m (vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m2x3_t __riscv_vlsseg3e64_v_i64m2x3_m (vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m2x4_t __riscv_vlsseg4e64_v_i64m2x4_m (vbool32_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl);
vint64m4x2_t __riscv_vlsseg2e64_v_i64m4x2_m (vbool16_t mask, const int64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1x2_t __riscv_vlsseg2e64_v_u64m1x2_m (vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1x3_t __riscv_vlsseg3e64_v_u64m1x3_m (vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1x4_t __riscv_vlsseg4e64_v_u64m1x4_m (vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1x5_t __riscv_vlsseg5e64_v_u64m1x5_m (vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1x6_t __riscv_vlsseg6e64_v_u64m1x6_m (vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1x7_t __riscv_vlsseg7e64_v_u64m1x7_m (vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m1x8_t __riscv_vlsseg8e64_v_u64m1x8_m (vbool64_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m2x2_t __riscv_vlsseg2e64_v_u64m2x2_m (vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m2x3_t __riscv_vlsseg3e64_v_u64m2x3_m (vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m2x4_t __riscv_vlsseg4e64_v_u64m2x4_m (vbool32_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl);
vuint64m4x2_t __riscv_vlsseg2e64_v_u64m4x2_m (vbool16_t mask, const uint64_t *base, ptrdiff_t bstride, size_t vl);
7.5. vssseg2e8.v
- Mnemonic
vssseg2e8.v vs3, (rs1), rs2, vm # nf=2 vssseg3e8.v vs3, (rs1), rs2, vm # nf=3 vssseg4e8.v vs3, (rs1), rs2, vm # nf=4 vssseg5e8.v vs3, (rs1), rs2, vm # nf=5 vssseg6e8.v vs3, (rs1), rs2, vm # nf=6 vssseg7e8.v vs3, (rs1), rs2, vm # nf=7 vssseg8e8.v vs3, (rs1), rs2, vm # nf=8
- Encoding
- Description
-
8-bit strided segment stores
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsse8_v.h
- Intrinsic Functions
Details
void __riscv_vssseg2e8_v_i8mf8x2 (int8_t *base, ptrdiff_t bstride, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_i8mf8x3 (int8_t *base, ptrdiff_t bstride, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_i8mf8x4 (int8_t *base, ptrdiff_t bstride, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_i8mf8x5 (int8_t *base, ptrdiff_t bstride, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_i8mf8x6 (int8_t *base, ptrdiff_t bstride, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_i8mf8x7 (int8_t *base, ptrdiff_t bstride, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_i8mf8x8 (int8_t *base, ptrdiff_t bstride, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_i8mf4x2 (int8_t *base, ptrdiff_t bstride, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_i8mf4x3 (int8_t *base, ptrdiff_t bstride, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_i8mf4x4 (int8_t *base, ptrdiff_t bstride, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_i8mf4x5 (int8_t *base, ptrdiff_t bstride, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_i8mf4x6 (int8_t *base, ptrdiff_t bstride, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_i8mf4x7 (int8_t *base, ptrdiff_t bstride, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_i8mf4x8 (int8_t *base, ptrdiff_t bstride, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_i8mf2x2 (int8_t *base, ptrdiff_t bstride, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_i8mf2x3 (int8_t *base, ptrdiff_t bstride, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_i8mf2x4 (int8_t *base, ptrdiff_t bstride, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_i8mf2x5 (int8_t *base, ptrdiff_t bstride, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_i8mf2x6 (int8_t *base, ptrdiff_t bstride, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_i8mf2x7 (int8_t *base, ptrdiff_t bstride, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_i8mf2x8 (int8_t *base, ptrdiff_t bstride, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_i8m1x2 (int8_t *base, ptrdiff_t bstride, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_i8m1x3 (int8_t *base, ptrdiff_t bstride, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_i8m1x4 (int8_t *base, ptrdiff_t bstride, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_i8m1x5 (int8_t *base, ptrdiff_t bstride, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_i8m1x6 (int8_t *base, ptrdiff_t bstride, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_i8m1x7 (int8_t *base, ptrdiff_t bstride, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_i8m1x8 (int8_t *base, ptrdiff_t bstride, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_i8m2x2 (int8_t *base, ptrdiff_t bstride, vint8m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_i8m2x3 (int8_t *base, ptrdiff_t bstride, vint8m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_i8m2x4 (int8_t *base, ptrdiff_t bstride, vint8m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_i8m4x2 (int8_t *base, ptrdiff_t bstride, vint8m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_u8mf8x2 (uint8_t *base, ptrdiff_t bstride, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_u8mf8x3 (uint8_t *base, ptrdiff_t bstride, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_u8mf8x4 (uint8_t *base, ptrdiff_t bstride, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_u8mf8x5 (uint8_t *base, ptrdiff_t bstride, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_u8mf8x6 (uint8_t *base, ptrdiff_t bstride, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_u8mf8x7 (uint8_t *base, ptrdiff_t bstride, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_u8mf8x8 (uint8_t *base, ptrdiff_t bstride, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_u8mf4x2 (uint8_t *base, ptrdiff_t bstride, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_u8mf4x3 (uint8_t *base, ptrdiff_t bstride, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_u8mf4x4 (uint8_t *base, ptrdiff_t bstride, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_u8mf4x5 (uint8_t *base, ptrdiff_t bstride, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_u8mf4x6 (uint8_t *base, ptrdiff_t bstride, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_u8mf4x7 (uint8_t *base, ptrdiff_t bstride, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_u8mf4x8 (uint8_t *base, ptrdiff_t bstride, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_u8mf2x2 (uint8_t *base, ptrdiff_t bstride, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_u8mf2x3 (uint8_t *base, ptrdiff_t bstride, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_u8mf2x4 (uint8_t *base, ptrdiff_t bstride, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_u8mf2x5 (uint8_t *base, ptrdiff_t bstride, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_u8mf2x6 (uint8_t *base, ptrdiff_t bstride, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_u8mf2x7 (uint8_t *base, ptrdiff_t bstride, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_u8mf2x8 (uint8_t *base, ptrdiff_t bstride, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_u8m1x2 (uint8_t *base, ptrdiff_t bstride, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_u8m1x3 (uint8_t *base, ptrdiff_t bstride, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_u8m1x4 (uint8_t *base, ptrdiff_t bstride, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_u8m1x5 (uint8_t *base, ptrdiff_t bstride, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_u8m1x6 (uint8_t *base, ptrdiff_t bstride, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_u8m1x7 (uint8_t *base, ptrdiff_t bstride, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_u8m1x8 (uint8_t *base, ptrdiff_t bstride, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_u8m2x2 (uint8_t *base, ptrdiff_t bstride, vuint8m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_u8m2x3 (uint8_t *base, ptrdiff_t bstride, vuint8m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_u8m2x4 (uint8_t *base, ptrdiff_t bstride, vuint8m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_u8m4x2 (uint8_t *base, ptrdiff_t bstride, vuint8m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_i8mf8x2_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_i8mf8x3_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_i8mf8x4_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_i8mf8x5_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_i8mf8x6_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_i8mf8x7_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_i8mf8x8_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_i8mf4x2_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_i8mf4x3_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_i8mf4x4_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_i8mf4x5_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_i8mf4x6_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_i8mf4x7_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_i8mf4x8_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_i8mf2x2_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_i8mf2x3_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_i8mf2x4_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_i8mf2x5_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_i8mf2x6_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_i8mf2x7_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_i8mf2x8_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_i8m1x2_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_i8m1x3_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_i8m1x4_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_i8m1x5_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_i8m1x6_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_i8m1x7_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_i8m1x8_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_i8m2x2_m (vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_i8m2x3_m (vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_i8m2x4_m (vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_i8m4x2_m (vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_u8mf8x2_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_u8mf8x3_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_u8mf8x4_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_u8mf8x5_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_u8mf8x6_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_u8mf8x7_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_u8mf8x8_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_u8mf4x2_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_u8mf4x3_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_u8mf4x4_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_u8mf4x5_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_u8mf4x6_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_u8mf4x7_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_u8mf4x8_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_u8mf2x2_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_u8mf2x3_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_u8mf2x4_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_u8mf2x5_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_u8mf2x6_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_u8mf2x7_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_u8mf2x8_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_u8m1x2_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_u8m1x3_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_u8m1x4_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e8_v_u8m1x5_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e8_v_u8m1x6_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e8_v_u8m1x7_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e8_v_u8m1x8_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_u8m2x2_m (vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e8_v_u8m2x3_m (vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e8_v_u8m2x4_m (vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e8_v_u8m4x2_m (vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4x2_t v_tuple, size_t vl);
7.6. vssseg2e16.v
- Mnemonic
vssseg2e16.v vs3, (rs1), rs2, vm # nf=2 vssseg3e16.v vs3, (rs1), rs2, vm # nf=3 vssseg4e16.v vs3, (rs1), rs2, vm # nf=4 vssseg5e16.v vs3, (rs1), rs2, vm # nf=5 vssseg6e16.v vs3, (rs1), rs2, vm # nf=6 vssseg7e16.v vs3, (rs1), rs2, vm # nf=7 vssseg8e16.v vs3, (rs1), rs2, vm # nf=8
- Encoding
- Description
-
16-bit strided segment stores
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsse16_v.h
- Intrinsic Functions
Details
void __riscv_vssseg2e16_v_f16mf4x2 (float16_t *base, ptrdiff_t bstride, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_f16mf4x3 (float16_t *base, ptrdiff_t bstride, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_f16mf4x4 (float16_t *base, ptrdiff_t bstride, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_f16mf4x5 (float16_t *base, ptrdiff_t bstride, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_f16mf4x6 (float16_t *base, ptrdiff_t bstride, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_f16mf4x7 (float16_t *base, ptrdiff_t bstride, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_f16mf4x8 (float16_t *base, ptrdiff_t bstride, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_f16mf2x2 (float16_t *base, ptrdiff_t bstride, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_f16mf2x3 (float16_t *base, ptrdiff_t bstride, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_f16mf2x4 (float16_t *base, ptrdiff_t bstride, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_f16mf2x5 (float16_t *base, ptrdiff_t bstride, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_f16mf2x6 (float16_t *base, ptrdiff_t bstride, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_f16mf2x7 (float16_t *base, ptrdiff_t bstride, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_f16mf2x8 (float16_t *base, ptrdiff_t bstride, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_f16m1x2 (float16_t *base, ptrdiff_t bstride, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_f16m1x3 (float16_t *base, ptrdiff_t bstride, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_f16m1x4 (float16_t *base, ptrdiff_t bstride, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_f16m1x5 (float16_t *base, ptrdiff_t bstride, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_f16m1x6 (float16_t *base, ptrdiff_t bstride, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_f16m1x7 (float16_t *base, ptrdiff_t bstride, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_f16m1x8 (float16_t *base, ptrdiff_t bstride, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_f16m2x2 (float16_t *base, ptrdiff_t bstride, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_f16m2x3 (float16_t *base, ptrdiff_t bstride, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_f16m2x4 (float16_t *base, ptrdiff_t bstride, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_f16m4x2 (float16_t *base, ptrdiff_t bstride, vfloat16m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_i16mf4x2 (int16_t *base, ptrdiff_t bstride, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_i16mf4x3 (int16_t *base, ptrdiff_t bstride, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_i16mf4x4 (int16_t *base, ptrdiff_t bstride, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_i16mf4x5 (int16_t *base, ptrdiff_t bstride, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_i16mf4x6 (int16_t *base, ptrdiff_t bstride, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_i16mf4x7 (int16_t *base, ptrdiff_t bstride, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_i16mf4x8 (int16_t *base, ptrdiff_t bstride, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_i16mf2x2 (int16_t *base, ptrdiff_t bstride, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_i16mf2x3 (int16_t *base, ptrdiff_t bstride, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_i16mf2x4 (int16_t *base, ptrdiff_t bstride, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_i16mf2x5 (int16_t *base, ptrdiff_t bstride, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_i16mf2x6 (int16_t *base, ptrdiff_t bstride, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_i16mf2x7 (int16_t *base, ptrdiff_t bstride, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_i16mf2x8 (int16_t *base, ptrdiff_t bstride, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_i16m1x2 (int16_t *base, ptrdiff_t bstride, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_i16m1x3 (int16_t *base, ptrdiff_t bstride, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_i16m1x4 (int16_t *base, ptrdiff_t bstride, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_i16m1x5 (int16_t *base, ptrdiff_t bstride, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_i16m1x6 (int16_t *base, ptrdiff_t bstride, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_i16m1x7 (int16_t *base, ptrdiff_t bstride, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_i16m1x8 (int16_t *base, ptrdiff_t bstride, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_i16m2x2 (int16_t *base, ptrdiff_t bstride, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_i16m2x3 (int16_t *base, ptrdiff_t bstride, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_i16m2x4 (int16_t *base, ptrdiff_t bstride, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_i16m4x2 (int16_t *base, ptrdiff_t bstride, vint16m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_u16mf4x2 (uint16_t *base, ptrdiff_t bstride, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_u16mf4x3 (uint16_t *base, ptrdiff_t bstride, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_u16mf4x4 (uint16_t *base, ptrdiff_t bstride, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_u16mf4x5 (uint16_t *base, ptrdiff_t bstride, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_u16mf4x6 (uint16_t *base, ptrdiff_t bstride, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_u16mf4x7 (uint16_t *base, ptrdiff_t bstride, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_u16mf4x8 (uint16_t *base, ptrdiff_t bstride, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_u16mf2x2 (uint16_t *base, ptrdiff_t bstride, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_u16mf2x3 (uint16_t *base, ptrdiff_t bstride, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_u16mf2x4 (uint16_t *base, ptrdiff_t bstride, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_u16mf2x5 (uint16_t *base, ptrdiff_t bstride, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_u16mf2x6 (uint16_t *base, ptrdiff_t bstride, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_u16mf2x7 (uint16_t *base, ptrdiff_t bstride, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_u16mf2x8 (uint16_t *base, ptrdiff_t bstride, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_u16m1x2 (uint16_t *base, ptrdiff_t bstride, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_u16m1x3 (uint16_t *base, ptrdiff_t bstride, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_u16m1x4 (uint16_t *base, ptrdiff_t bstride, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_u16m1x5 (uint16_t *base, ptrdiff_t bstride, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_u16m1x6 (uint16_t *base, ptrdiff_t bstride, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_u16m1x7 (uint16_t *base, ptrdiff_t bstride, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_u16m1x8 (uint16_t *base, ptrdiff_t bstride, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_u16m2x2 (uint16_t *base, ptrdiff_t bstride, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_u16m2x3 (uint16_t *base, ptrdiff_t bstride, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_u16m2x4 (uint16_t *base, ptrdiff_t bstride, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_u16m4x2 (uint16_t *base, ptrdiff_t bstride, vuint16m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_f16mf4x2_m (vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_f16mf4x3_m (vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_f16mf4x4_m (vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_f16mf4x5_m (vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_f16mf4x6_m (vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_f16mf4x7_m (vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_f16mf4x8_m (vbool64_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_f16mf2x2_m (vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_f16mf2x3_m (vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_f16mf2x4_m (vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_f16mf2x5_m (vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_f16mf2x6_m (vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_f16mf2x7_m (vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_f16mf2x8_m (vbool32_t mask, float16_t *base, ptrdiff_t bstride, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_f16m1x2_m (vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_f16m1x3_m (vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_f16m1x4_m (vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_f16m1x5_m (vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_f16m1x6_m (vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_f16m1x7_m (vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_f16m1x8_m (vbool16_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_f16m2x2_m (vbool8_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_f16m2x3_m (vbool8_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_f16m2x4_m (vbool8_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_f16m4x2_m (vbool4_t mask, float16_t *base, ptrdiff_t bstride, vfloat16m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_i16mf4x2_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_i16mf4x3_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_i16mf4x4_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_i16mf4x5_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_i16mf4x6_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_i16mf4x7_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_i16mf4x8_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_i16mf2x2_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_i16mf2x3_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_i16mf2x4_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_i16mf2x5_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_i16mf2x6_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_i16mf2x7_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_i16mf2x8_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_i16m1x2_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_i16m1x3_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_i16m1x4_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_i16m1x5_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_i16m1x6_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_i16m1x7_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_i16m1x8_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_i16m2x2_m (vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_i16m2x3_m (vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_i16m2x4_m (vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_i16m4x2_m (vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_u16mf4x2_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_u16mf4x3_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_u16mf4x4_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_u16mf4x5_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_u16mf4x6_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_u16mf4x7_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_u16mf4x8_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_u16mf2x2_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_u16mf2x3_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_u16mf2x4_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_u16mf2x5_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_u16mf2x6_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_u16mf2x7_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_u16mf2x8_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_u16m1x2_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_u16m1x3_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_u16m1x4_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e16_v_u16m1x5_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e16_v_u16m1x6_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e16_v_u16m1x7_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e16_v_u16m1x8_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_u16m2x2_m (vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e16_v_u16m2x3_m (vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e16_v_u16m2x4_m (vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e16_v_u16m4x2_m (vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4x2_t v_tuple, size_t vl);
7.7. vssseg2e32.v
- Mnemonic
vssseg2e32.v vs3, (rs1), rs2, vm # nf=2 vssseg3e32.v vs3, (rs1), rs2, vm # nf=3 vssseg4e32.v vs3, (rs1), rs2, vm # nf=4 vssseg5e32.v vs3, (rs1), rs2, vm # nf=5 vssseg6e32.v vs3, (rs1), rs2, vm # nf=6 vssseg7e32.v vs3, (rs1), rs2, vm # nf=7 vssseg8e32.v vs3, (rs1), rs2, vm # nf=8
- Encoding
- Description
-
32-bit strided segment stores
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsse32_v.h
- Intrinsic Functions
Details
void __riscv_vssseg2e32_v_f32mf2x2 (float32_t *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_f32mf2x3 (float32_t *base, ptrdiff_t bstride, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_f32mf2x4 (float32_t *base, ptrdiff_t bstride, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e32_v_f32mf2x5 (float32_t *base, ptrdiff_t bstride, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e32_v_f32mf2x6 (float32_t *base, ptrdiff_t bstride, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e32_v_f32mf2x7 (float32_t *base, ptrdiff_t bstride, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e32_v_f32mf2x8 (float32_t *base, ptrdiff_t bstride, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_f32m1x2 (float32_t *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_f32m1x3 (float32_t *base, ptrdiff_t bstride, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_f32m1x4 (float32_t *base, ptrdiff_t bstride, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e32_v_f32m1x5 (float32_t *base, ptrdiff_t bstride, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e32_v_f32m1x6 (float32_t *base, ptrdiff_t bstride, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e32_v_f32m1x7 (float32_t *base, ptrdiff_t bstride, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e32_v_f32m1x8 (float32_t *base, ptrdiff_t bstride, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_f32m2x2 (float32_t *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_f32m2x3 (float32_t *base, ptrdiff_t bstride, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_f32m2x4 (float32_t *base, ptrdiff_t bstride, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_f32m4x2 (float32_t *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_i32mf2x2 (int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_i32mf2x3 (int32_t *base, ptrdiff_t bstride, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_i32mf2x4 (int32_t *base, ptrdiff_t bstride, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e32_v_i32mf2x5 (int32_t *base, ptrdiff_t bstride, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e32_v_i32mf2x6 (int32_t *base, ptrdiff_t bstride, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e32_v_i32mf2x7 (int32_t *base, ptrdiff_t bstride, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e32_v_i32mf2x8 (int32_t *base, ptrdiff_t bstride, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_i32m1x2 (int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_i32m1x3 (int32_t *base, ptrdiff_t bstride, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_i32m1x4 (int32_t *base, ptrdiff_t bstride, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e32_v_i32m1x5 (int32_t *base, ptrdiff_t bstride, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e32_v_i32m1x6 (int32_t *base, ptrdiff_t bstride, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e32_v_i32m1x7 (int32_t *base, ptrdiff_t bstride, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e32_v_i32m1x8 (int32_t *base, ptrdiff_t bstride, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_i32m2x2 (int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_i32m2x3 (int32_t *base, ptrdiff_t bstride, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_i32m2x4 (int32_t *base, ptrdiff_t bstride, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_i32m4x2 (int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_u32mf2x2 (uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_u32mf2x3 (uint32_t *base, ptrdiff_t bstride, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_u32mf2x4 (uint32_t *base, ptrdiff_t bstride, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e32_v_u32mf2x5 (uint32_t *base, ptrdiff_t bstride, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e32_v_u32mf2x6 (uint32_t *base, ptrdiff_t bstride, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e32_v_u32mf2x7 (uint32_t *base, ptrdiff_t bstride, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e32_v_u32mf2x8 (uint32_t *base, ptrdiff_t bstride, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_u32m1x2 (uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_u32m1x3 (uint32_t *base, ptrdiff_t bstride, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_u32m1x4 (uint32_t *base, ptrdiff_t bstride, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e32_v_u32m1x5 (uint32_t *base, ptrdiff_t bstride, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e32_v_u32m1x6 (uint32_t *base, ptrdiff_t bstride, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e32_v_u32m1x7 (uint32_t *base, ptrdiff_t bstride, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e32_v_u32m1x8 (uint32_t *base, ptrdiff_t bstride, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_u32m2x2 (uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_u32m2x3 (uint32_t *base, ptrdiff_t bstride, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_u32m2x4 (uint32_t *base, ptrdiff_t bstride, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_u32m4x2 (uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_f32mf2x2_m (vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_f32mf2x3_m (vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_f32mf2x4_m (vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e32_v_f32mf2x5_m (vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e32_v_f32mf2x6_m (vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e32_v_f32mf2x7_m (vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e32_v_f32mf2x8_m (vbool64_t mask, float32_t *base, ptrdiff_t bstride, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_f32m1x2_m (vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_f32m1x3_m (vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_f32m1x4_m (vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e32_v_f32m1x5_m (vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e32_v_f32m1x6_m (vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e32_v_f32m1x7_m (vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e32_v_f32m1x8_m (vbool32_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_f32m2x2_m (vbool16_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_f32m2x3_m (vbool16_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_f32m2x4_m (vbool16_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_f32m4x2_m (vbool8_t mask, float32_t *base, ptrdiff_t bstride, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_i32mf2x2_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_i32mf2x3_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_i32mf2x4_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e32_v_i32mf2x5_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e32_v_i32mf2x6_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e32_v_i32mf2x7_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e32_v_i32mf2x8_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_i32m1x2_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_i32m1x3_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_i32m1x4_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e32_v_i32m1x5_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e32_v_i32m1x6_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e32_v_i32m1x7_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e32_v_i32m1x8_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_i32m2x2_m (vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_i32m2x3_m (vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_i32m2x4_m (vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_i32m4x2_m (vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_u32mf2x2_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_u32mf2x3_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_u32mf2x4_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vssseg5e32_v_u32mf2x5_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vssseg6e32_v_u32mf2x6_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vssseg7e32_v_u32mf2x7_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vssseg8e32_v_u32mf2x8_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_u32m1x2_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_u32m1x3_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_u32m1x4_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e32_v_u32m1x5_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e32_v_u32m1x6_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e32_v_u32m1x7_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e32_v_u32m1x8_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_u32m2x2_m (vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e32_v_u32m2x3_m (vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e32_v_u32m2x4_m (vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e32_v_u32m4x2_m (vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4x2_t v_tuple, size_t vl);
7.8. vssseg2e64.v
- Mnemonic
vssseg2e64.v vs3, (rs1), rs2, vm # nf=2 vssseg3e64.v vs3, (rs1), rs2, vm # nf=3 vssseg4e64.v vs3, (rs1), rs2, vm # nf=4 vssseg5e64.v vs3, (rs1), rs2, vm # nf=5 vssseg6e64.v vs3, (rs1), rs2, vm # nf=6 vssseg7e64.v vs3, (rs1), rs2, vm # nf=7 vssseg8e64.v vs3, (rs1), rs2, vm # nf=8
- Encoding
- Description
-
64-bit strided segment stores
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsse64_v.h
- Intrinsic Functions
Details
void __riscv_vssseg2e64_v_f64m1x2 (float64_t *base, ptrdiff_t bstride, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e64_v_f64m1x3 (float64_t *base, ptrdiff_t bstride, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e64_v_f64m1x4 (float64_t *base, ptrdiff_t bstride, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e64_v_f64m1x5 (float64_t *base, ptrdiff_t bstride, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e64_v_f64m1x6 (float64_t *base, ptrdiff_t bstride, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e64_v_f64m1x7 (float64_t *base, ptrdiff_t bstride, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e64_v_f64m1x8 (float64_t *base, ptrdiff_t bstride, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_f64m2x2 (float64_t *base, ptrdiff_t bstride, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e64_v_f64m2x3 (float64_t *base, ptrdiff_t bstride, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e64_v_f64m2x4 (float64_t *base, ptrdiff_t bstride, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_f64m4x2 (float64_t *base, ptrdiff_t bstride, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_i64m1x2 (int64_t *base, ptrdiff_t bstride, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e64_v_i64m1x3 (int64_t *base, ptrdiff_t bstride, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e64_v_i64m1x4 (int64_t *base, ptrdiff_t bstride, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e64_v_i64m1x5 (int64_t *base, ptrdiff_t bstride, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e64_v_i64m1x6 (int64_t *base, ptrdiff_t bstride, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e64_v_i64m1x7 (int64_t *base, ptrdiff_t bstride, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e64_v_i64m1x8 (int64_t *base, ptrdiff_t bstride, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_i64m2x2 (int64_t *base, ptrdiff_t bstride, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e64_v_i64m2x3 (int64_t *base, ptrdiff_t bstride, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e64_v_i64m2x4 (int64_t *base, ptrdiff_t bstride, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_i64m4x2 (int64_t *base, ptrdiff_t bstride, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_u64m1x2 (uint64_t *base, ptrdiff_t bstride, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e64_v_u64m1x3 (uint64_t *base, ptrdiff_t bstride, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e64_v_u64m1x4 (uint64_t *base, ptrdiff_t bstride, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e64_v_u64m1x5 (uint64_t *base, ptrdiff_t bstride, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e64_v_u64m1x6 (uint64_t *base, ptrdiff_t bstride, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e64_v_u64m1x7 (uint64_t *base, ptrdiff_t bstride, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e64_v_u64m1x8 (uint64_t *base, ptrdiff_t bstride, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_u64m2x2 (uint64_t *base, ptrdiff_t bstride, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e64_v_u64m2x3 (uint64_t *base, ptrdiff_t bstride, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e64_v_u64m2x4 (uint64_t *base, ptrdiff_t bstride, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_u64m4x2 (uint64_t *base, ptrdiff_t bstride, vuint64m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_f64m1x2_m (vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e64_v_f64m1x3_m (vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e64_v_f64m1x4_m (vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e64_v_f64m1x5_m (vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e64_v_f64m1x6_m (vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e64_v_f64m1x7_m (vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e64_v_f64m1x8_m (vbool64_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_f64m2x2_m (vbool32_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e64_v_f64m2x3_m (vbool32_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e64_v_f64m2x4_m (vbool32_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_f64m4x2_m (vbool16_t mask, float64_t *base, ptrdiff_t bstride, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_i64m1x2_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e64_v_i64m1x3_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e64_v_i64m1x4_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e64_v_i64m1x5_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e64_v_i64m1x6_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e64_v_i64m1x7_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e64_v_i64m1x8_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_i64m2x2_m (vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e64_v_i64m2x3_m (vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e64_v_i64m2x4_m (vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_i64m4x2_m (vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_u64m1x2_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vssseg3e64_v_u64m1x3_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vssseg4e64_v_u64m1x4_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vssseg5e64_v_u64m1x5_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vssseg6e64_v_u64m1x6_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vssseg7e64_v_u64m1x7_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vssseg8e64_v_u64m1x8_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_u64m2x2_m (vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vssseg3e64_v_u64m2x3_m (vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vssseg4e64_v_u64m2x4_m (vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vssseg2e64_v_u64m4x2_m (vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4x2_t v_tuple, size_t vl);
8. Vector Indexed Segment Loads and Stores
- Mnemonic
vluxseg2ei8.v vd, (rs1), vs2, vm # nf=2 vluxseg3ei8.v vd, (rs1), vs2, vm # nf=3 vluxseg4ei8.v vd, (rs1), vs2, vm # nf=4 vluxseg5ei8.v vd, (rs1), vs2, vm # nf=5 vluxseg6ei8.v vd, (rs1), vs2, vm # nf=6 vluxseg7ei8.v vd, (rs1), vs2, vm # nf=7 vluxseg8ei8.v vd, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-unordered segment loads
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vluxei8_v.h
- Intrinsic Functions
Details
vfloat16mf4x2_t __riscv_vluxseg2ei8_v_f16mf4x2 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vluxseg3ei8_v_f16mf4x3 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vluxseg4ei8_v_f16mf4x4 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vluxseg5ei8_v_f16mf4x5 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vluxseg6ei8_v_f16mf4x6 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vluxseg7ei8_v_f16mf4x7 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vluxseg8ei8_v_f16mf4x8 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vluxseg2ei8_v_f16mf2x2 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vluxseg3ei8_v_f16mf2x3 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vluxseg4ei8_v_f16mf2x4 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vluxseg5ei8_v_f16mf2x5 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vluxseg6ei8_v_f16mf2x6 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vluxseg7ei8_v_f16mf2x7 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vluxseg8ei8_v_f16mf2x8 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vluxseg2ei8_v_f16m1x2 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vluxseg3ei8_v_f16m1x3 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vluxseg4ei8_v_f16m1x4 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vluxseg5ei8_v_f16m1x5 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vluxseg6ei8_v_f16m1x6 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vluxseg7ei8_v_f16m1x7 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vluxseg8ei8_v_f16m1x8 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vluxseg2ei8_v_f16m2x2 (const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vluxseg3ei8_v_f16m2x3 (const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vluxseg4ei8_v_f16m2x4 (const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m4x2_t __riscv_vluxseg2ei8_v_f16m4x2 (const float16_t *base, vuint8m2_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vluxseg2ei8_v_f32mf2x2 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vluxseg3ei8_v_f32mf2x3 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vluxseg4ei8_v_f32mf2x4 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vluxseg5ei8_v_f32mf2x5 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vluxseg6ei8_v_f32mf2x6 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vluxseg7ei8_v_f32mf2x7 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vluxseg8ei8_v_f32mf2x8 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vluxseg2ei8_v_f32m1x2 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vluxseg3ei8_v_f32m1x3 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vluxseg4ei8_v_f32m1x4 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vluxseg5ei8_v_f32m1x5 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vluxseg6ei8_v_f32m1x6 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vluxseg7ei8_v_f32m1x7 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vluxseg8ei8_v_f32m1x8 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vluxseg2ei8_v_f32m2x2 (const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vluxseg3ei8_v_f32m2x3 (const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vluxseg4ei8_v_f32m2x4 (const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vluxseg2ei8_v_f32m4x2 (const float32_t *base, vuint8m1_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vluxseg2ei8_v_f64m1x2 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vluxseg3ei8_v_f64m1x3 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vluxseg4ei8_v_f64m1x4 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vluxseg5ei8_v_f64m1x5 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vluxseg6ei8_v_f64m1x6 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vluxseg7ei8_v_f64m1x7 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vluxseg8ei8_v_f64m1x8 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vluxseg2ei8_v_f64m2x2 (const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vluxseg3ei8_v_f64m2x3 (const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vluxseg4ei8_v_f64m2x4 (const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vluxseg2ei8_v_f64m4x2 (const float64_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf8x2_t __riscv_vluxseg2ei8_v_i8mf8x2 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x3_t __riscv_vluxseg3ei8_v_i8mf8x3 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x4_t __riscv_vluxseg4ei8_v_i8mf8x4 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x5_t __riscv_vluxseg5ei8_v_i8mf8x5 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x6_t __riscv_vluxseg6ei8_v_i8mf8x6 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x7_t __riscv_vluxseg7ei8_v_i8mf8x7 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x8_t __riscv_vluxseg8ei8_v_i8mf8x8 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf4x2_t __riscv_vluxseg2ei8_v_i8mf4x2 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x3_t __riscv_vluxseg3ei8_v_i8mf4x3 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x4_t __riscv_vluxseg4ei8_v_i8mf4x4 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x5_t __riscv_vluxseg5ei8_v_i8mf4x5 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x6_t __riscv_vluxseg6ei8_v_i8mf4x6 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x7_t __riscv_vluxseg7ei8_v_i8mf4x7 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x8_t __riscv_vluxseg8ei8_v_i8mf4x8 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf2x2_t __riscv_vluxseg2ei8_v_i8mf2x2 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x3_t __riscv_vluxseg3ei8_v_i8mf2x3 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x4_t __riscv_vluxseg4ei8_v_i8mf2x4 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x5_t __riscv_vluxseg5ei8_v_i8mf2x5 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x6_t __riscv_vluxseg6ei8_v_i8mf2x6 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x7_t __riscv_vluxseg7ei8_v_i8mf2x7 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x8_t __riscv_vluxseg8ei8_v_i8mf2x8 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8m1x2_t __riscv_vluxseg2ei8_v_i8m1x2 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x3_t __riscv_vluxseg3ei8_v_i8m1x3 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x4_t __riscv_vluxseg4ei8_v_i8m1x4 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x5_t __riscv_vluxseg5ei8_v_i8m1x5 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x6_t __riscv_vluxseg6ei8_v_i8m1x6 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x7_t __riscv_vluxseg7ei8_v_i8m1x7 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x8_t __riscv_vluxseg8ei8_v_i8m1x8 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m2x2_t __riscv_vluxseg2ei8_v_i8m2x2 (const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m2x3_t __riscv_vluxseg3ei8_v_i8m2x3 (const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m2x4_t __riscv_vluxseg4ei8_v_i8m2x4 (const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m4x2_t __riscv_vluxseg2ei8_v_i8m4x2 (const int8_t *base, vuint8m4_t bindex, size_t vl);
vint16mf4x2_t __riscv_vluxseg2ei8_v_i16mf4x2 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x3_t __riscv_vluxseg3ei8_v_i16mf4x3 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x4_t __riscv_vluxseg4ei8_v_i16mf4x4 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x5_t __riscv_vluxseg5ei8_v_i16mf4x5 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x6_t __riscv_vluxseg6ei8_v_i16mf4x6 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x7_t __riscv_vluxseg7ei8_v_i16mf4x7 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x8_t __riscv_vluxseg8ei8_v_i16mf4x8 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf2x2_t __riscv_vluxseg2ei8_v_i16mf2x2 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x3_t __riscv_vluxseg3ei8_v_i16mf2x3 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x4_t __riscv_vluxseg4ei8_v_i16mf2x4 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x5_t __riscv_vluxseg5ei8_v_i16mf2x5 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x6_t __riscv_vluxseg6ei8_v_i16mf2x6 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x7_t __riscv_vluxseg7ei8_v_i16mf2x7 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x8_t __riscv_vluxseg8ei8_v_i16mf2x8 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16m1x2_t __riscv_vluxseg2ei8_v_i16m1x2 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x3_t __riscv_vluxseg3ei8_v_i16m1x3 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x4_t __riscv_vluxseg4ei8_v_i16m1x4 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x5_t __riscv_vluxseg5ei8_v_i16m1x5 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x6_t __riscv_vluxseg6ei8_v_i16m1x6 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x7_t __riscv_vluxseg7ei8_v_i16m1x7 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x8_t __riscv_vluxseg8ei8_v_i16m1x8 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m2x2_t __riscv_vluxseg2ei8_v_i16m2x2 (const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m2x3_t __riscv_vluxseg3ei8_v_i16m2x3 (const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m2x4_t __riscv_vluxseg4ei8_v_i16m2x4 (const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m4x2_t __riscv_vluxseg2ei8_v_i16m4x2 (const int16_t *base, vuint8m2_t bindex, size_t vl);
vint32mf2x2_t __riscv_vluxseg2ei8_v_i32mf2x2 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x3_t __riscv_vluxseg3ei8_v_i32mf2x3 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x4_t __riscv_vluxseg4ei8_v_i32mf2x4 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x5_t __riscv_vluxseg5ei8_v_i32mf2x5 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x6_t __riscv_vluxseg6ei8_v_i32mf2x6 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x7_t __riscv_vluxseg7ei8_v_i32mf2x7 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x8_t __riscv_vluxseg8ei8_v_i32mf2x8 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32m1x2_t __riscv_vluxseg2ei8_v_i32m1x2 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x3_t __riscv_vluxseg3ei8_v_i32m1x3 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x4_t __riscv_vluxseg4ei8_v_i32m1x4 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x5_t __riscv_vluxseg5ei8_v_i32m1x5 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x6_t __riscv_vluxseg6ei8_v_i32m1x6 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x7_t __riscv_vluxseg7ei8_v_i32m1x7 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x8_t __riscv_vluxseg8ei8_v_i32m1x8 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m2x2_t __riscv_vluxseg2ei8_v_i32m2x2 (const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m2x3_t __riscv_vluxseg3ei8_v_i32m2x3 (const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m2x4_t __riscv_vluxseg4ei8_v_i32m2x4 (const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m4x2_t __riscv_vluxseg2ei8_v_i32m4x2 (const int32_t *base, vuint8m1_t bindex, size_t vl);
vint64m1x2_t __riscv_vluxseg2ei8_v_i64m1x2 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x3_t __riscv_vluxseg3ei8_v_i64m1x3 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x4_t __riscv_vluxseg4ei8_v_i64m1x4 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x5_t __riscv_vluxseg5ei8_v_i64m1x5 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x6_t __riscv_vluxseg6ei8_v_i64m1x6 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x7_t __riscv_vluxseg7ei8_v_i64m1x7 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x8_t __riscv_vluxseg8ei8_v_i64m1x8 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m2x2_t __riscv_vluxseg2ei8_v_i64m2x2 (const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m2x3_t __riscv_vluxseg3ei8_v_i64m2x3 (const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m2x4_t __riscv_vluxseg4ei8_v_i64m2x4 (const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m4x2_t __riscv_vluxseg2ei8_v_i64m4x2 (const int64_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vluxseg2ei8_v_u8mf8x2 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vluxseg3ei8_v_u8mf8x3 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vluxseg4ei8_v_u8mf8x4 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vluxseg5ei8_v_u8mf8x5 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vluxseg6ei8_v_u8mf8x6 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vluxseg7ei8_v_u8mf8x7 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vluxseg8ei8_v_u8mf8x8 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vluxseg2ei8_v_u8mf4x2 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vluxseg3ei8_v_u8mf4x3 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vluxseg4ei8_v_u8mf4x4 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vluxseg5ei8_v_u8mf4x5 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vluxseg6ei8_v_u8mf4x6 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vluxseg7ei8_v_u8mf4x7 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vluxseg8ei8_v_u8mf4x8 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vluxseg2ei8_v_u8mf2x2 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vluxseg3ei8_v_u8mf2x3 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vluxseg4ei8_v_u8mf2x4 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vluxseg5ei8_v_u8mf2x5 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vluxseg6ei8_v_u8mf2x6 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vluxseg7ei8_v_u8mf2x7 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vluxseg8ei8_v_u8mf2x8 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8m1x2_t __riscv_vluxseg2ei8_v_u8m1x2 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x3_t __riscv_vluxseg3ei8_v_u8m1x3 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x4_t __riscv_vluxseg4ei8_v_u8m1x4 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x5_t __riscv_vluxseg5ei8_v_u8m1x5 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x6_t __riscv_vluxseg6ei8_v_u8m1x6 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x7_t __riscv_vluxseg7ei8_v_u8m1x7 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x8_t __riscv_vluxseg8ei8_v_u8m1x8 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m2x2_t __riscv_vluxseg2ei8_v_u8m2x2 (const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m2x3_t __riscv_vluxseg3ei8_v_u8m2x3 (const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m2x4_t __riscv_vluxseg4ei8_v_u8m2x4 (const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m4x2_t __riscv_vluxseg2ei8_v_u8m4x2 (const uint8_t *base, vuint8m4_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vluxseg2ei8_v_u16mf4x2 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vluxseg3ei8_v_u16mf4x3 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vluxseg4ei8_v_u16mf4x4 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vluxseg5ei8_v_u16mf4x5 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vluxseg6ei8_v_u16mf4x6 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vluxseg7ei8_v_u16mf4x7 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vluxseg8ei8_v_u16mf4x8 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vluxseg2ei8_v_u16mf2x2 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vluxseg3ei8_v_u16mf2x3 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vluxseg4ei8_v_u16mf2x4 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vluxseg5ei8_v_u16mf2x5 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vluxseg6ei8_v_u16mf2x6 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vluxseg7ei8_v_u16mf2x7 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vluxseg8ei8_v_u16mf2x8 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16m1x2_t __riscv_vluxseg2ei8_v_u16m1x2 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x3_t __riscv_vluxseg3ei8_v_u16m1x3 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x4_t __riscv_vluxseg4ei8_v_u16m1x4 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x5_t __riscv_vluxseg5ei8_v_u16m1x5 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x6_t __riscv_vluxseg6ei8_v_u16m1x6 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x7_t __riscv_vluxseg7ei8_v_u16m1x7 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x8_t __riscv_vluxseg8ei8_v_u16m1x8 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m2x2_t __riscv_vluxseg2ei8_v_u16m2x2 (const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m2x3_t __riscv_vluxseg3ei8_v_u16m2x3 (const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m2x4_t __riscv_vluxseg4ei8_v_u16m2x4 (const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m4x2_t __riscv_vluxseg2ei8_v_u16m4x2 (const uint16_t *base, vuint8m2_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vluxseg2ei8_v_u32mf2x2 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vluxseg3ei8_v_u32mf2x3 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vluxseg4ei8_v_u32mf2x4 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vluxseg5ei8_v_u32mf2x5 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vluxseg6ei8_v_u32mf2x6 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vluxseg7ei8_v_u32mf2x7 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vluxseg8ei8_v_u32mf2x8 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32m1x2_t __riscv_vluxseg2ei8_v_u32m1x2 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x3_t __riscv_vluxseg3ei8_v_u32m1x3 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x4_t __riscv_vluxseg4ei8_v_u32m1x4 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x5_t __riscv_vluxseg5ei8_v_u32m1x5 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x6_t __riscv_vluxseg6ei8_v_u32m1x6 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x7_t __riscv_vluxseg7ei8_v_u32m1x7 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x8_t __riscv_vluxseg8ei8_v_u32m1x8 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m2x2_t __riscv_vluxseg2ei8_v_u32m2x2 (const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m2x3_t __riscv_vluxseg3ei8_v_u32m2x3 (const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m2x4_t __riscv_vluxseg4ei8_v_u32m2x4 (const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m4x2_t __riscv_vluxseg2ei8_v_u32m4x2 (const uint32_t *base, vuint8m1_t bindex, size_t vl);
vuint64m1x2_t __riscv_vluxseg2ei8_v_u64m1x2 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x3_t __riscv_vluxseg3ei8_v_u64m1x3 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x4_t __riscv_vluxseg4ei8_v_u64m1x4 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x5_t __riscv_vluxseg5ei8_v_u64m1x5 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x6_t __riscv_vluxseg6ei8_v_u64m1x6 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x7_t __riscv_vluxseg7ei8_v_u64m1x7 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x8_t __riscv_vluxseg8ei8_v_u64m1x8 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m2x2_t __riscv_vluxseg2ei8_v_u64m2x2 (const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m2x3_t __riscv_vluxseg3ei8_v_u64m2x3 (const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m2x4_t __riscv_vluxseg4ei8_v_u64m2x4 (const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m4x2_t __riscv_vluxseg2ei8_v_u64m4x2 (const uint64_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16mf4x2_t __riscv_vluxseg2ei8_v_f16mf4x2_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vluxseg3ei8_v_f16mf4x3_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vluxseg4ei8_v_f16mf4x4_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vluxseg5ei8_v_f16mf4x5_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vluxseg6ei8_v_f16mf4x6_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vluxseg7ei8_v_f16mf4x7_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vluxseg8ei8_v_f16mf4x8_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vluxseg2ei8_v_f16mf2x2_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vluxseg3ei8_v_f16mf2x3_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vluxseg4ei8_v_f16mf2x4_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vluxseg5ei8_v_f16mf2x5_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vluxseg6ei8_v_f16mf2x6_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vluxseg7ei8_v_f16mf2x7_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vluxseg8ei8_v_f16mf2x8_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vluxseg2ei8_v_f16m1x2_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vluxseg3ei8_v_f16m1x3_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vluxseg4ei8_v_f16m1x4_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vluxseg5ei8_v_f16m1x5_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vluxseg6ei8_v_f16m1x6_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vluxseg7ei8_v_f16m1x7_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vluxseg8ei8_v_f16m1x8_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vluxseg2ei8_v_f16m2x2_m (vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vluxseg3ei8_v_f16m2x3_m (vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vluxseg4ei8_v_f16m2x4_m (vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m4x2_t __riscv_vluxseg2ei8_v_f16m4x2_m (vbool4_t mask, const float16_t *base, vuint8m2_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vluxseg2ei8_v_f32mf2x2_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vluxseg3ei8_v_f32mf2x3_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vluxseg4ei8_v_f32mf2x4_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vluxseg5ei8_v_f32mf2x5_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vluxseg6ei8_v_f32mf2x6_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vluxseg7ei8_v_f32mf2x7_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vluxseg8ei8_v_f32mf2x8_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vluxseg2ei8_v_f32m1x2_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vluxseg3ei8_v_f32m1x3_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vluxseg4ei8_v_f32m1x4_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vluxseg5ei8_v_f32m1x5_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vluxseg6ei8_v_f32m1x6_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vluxseg7ei8_v_f32m1x7_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vluxseg8ei8_v_f32m1x8_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vluxseg2ei8_v_f32m2x2_m (vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vluxseg3ei8_v_f32m2x3_m (vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vluxseg4ei8_v_f32m2x4_m (vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vluxseg2ei8_v_f32m4x2_m (vbool8_t mask, const float32_t *base, vuint8m1_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vluxseg2ei8_v_f64m1x2_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vluxseg3ei8_v_f64m1x3_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vluxseg4ei8_v_f64m1x4_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vluxseg5ei8_v_f64m1x5_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vluxseg6ei8_v_f64m1x6_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vluxseg7ei8_v_f64m1x7_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vluxseg8ei8_v_f64m1x8_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vluxseg2ei8_v_f64m2x2_m (vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vluxseg3ei8_v_f64m2x3_m (vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vluxseg4ei8_v_f64m2x4_m (vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vluxseg2ei8_v_f64m4x2_m (vbool16_t mask, const float64_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf8x2_t __riscv_vluxseg2ei8_v_i8mf8x2_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x3_t __riscv_vluxseg3ei8_v_i8mf8x3_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x4_t __riscv_vluxseg4ei8_v_i8mf8x4_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x5_t __riscv_vluxseg5ei8_v_i8mf8x5_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x6_t __riscv_vluxseg6ei8_v_i8mf8x6_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x7_t __riscv_vluxseg7ei8_v_i8mf8x7_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x8_t __riscv_vluxseg8ei8_v_i8mf8x8_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf4x2_t __riscv_vluxseg2ei8_v_i8mf4x2_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x3_t __riscv_vluxseg3ei8_v_i8mf4x3_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x4_t __riscv_vluxseg4ei8_v_i8mf4x4_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x5_t __riscv_vluxseg5ei8_v_i8mf4x5_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x6_t __riscv_vluxseg6ei8_v_i8mf4x6_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x7_t __riscv_vluxseg7ei8_v_i8mf4x7_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x8_t __riscv_vluxseg8ei8_v_i8mf4x8_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf2x2_t __riscv_vluxseg2ei8_v_i8mf2x2_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x3_t __riscv_vluxseg3ei8_v_i8mf2x3_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x4_t __riscv_vluxseg4ei8_v_i8mf2x4_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x5_t __riscv_vluxseg5ei8_v_i8mf2x5_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x6_t __riscv_vluxseg6ei8_v_i8mf2x6_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x7_t __riscv_vluxseg7ei8_v_i8mf2x7_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x8_t __riscv_vluxseg8ei8_v_i8mf2x8_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8m1x2_t __riscv_vluxseg2ei8_v_i8m1x2_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x3_t __riscv_vluxseg3ei8_v_i8m1x3_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x4_t __riscv_vluxseg4ei8_v_i8m1x4_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x5_t __riscv_vluxseg5ei8_v_i8m1x5_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x6_t __riscv_vluxseg6ei8_v_i8m1x6_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x7_t __riscv_vluxseg7ei8_v_i8m1x7_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x8_t __riscv_vluxseg8ei8_v_i8m1x8_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m2x2_t __riscv_vluxseg2ei8_v_i8m2x2_m (vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m2x3_t __riscv_vluxseg3ei8_v_i8m2x3_m (vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m2x4_t __riscv_vluxseg4ei8_v_i8m2x4_m (vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m4x2_t __riscv_vluxseg2ei8_v_i8m4x2_m (vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl);
vint16mf4x2_t __riscv_vluxseg2ei8_v_i16mf4x2_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x3_t __riscv_vluxseg3ei8_v_i16mf4x3_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x4_t __riscv_vluxseg4ei8_v_i16mf4x4_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x5_t __riscv_vluxseg5ei8_v_i16mf4x5_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x6_t __riscv_vluxseg6ei8_v_i16mf4x6_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x7_t __riscv_vluxseg7ei8_v_i16mf4x7_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x8_t __riscv_vluxseg8ei8_v_i16mf4x8_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf2x2_t __riscv_vluxseg2ei8_v_i16mf2x2_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x3_t __riscv_vluxseg3ei8_v_i16mf2x3_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x4_t __riscv_vluxseg4ei8_v_i16mf2x4_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x5_t __riscv_vluxseg5ei8_v_i16mf2x5_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x6_t __riscv_vluxseg6ei8_v_i16mf2x6_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x7_t __riscv_vluxseg7ei8_v_i16mf2x7_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x8_t __riscv_vluxseg8ei8_v_i16mf2x8_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16m1x2_t __riscv_vluxseg2ei8_v_i16m1x2_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x3_t __riscv_vluxseg3ei8_v_i16m1x3_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x4_t __riscv_vluxseg4ei8_v_i16m1x4_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x5_t __riscv_vluxseg5ei8_v_i16m1x5_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x6_t __riscv_vluxseg6ei8_v_i16m1x6_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x7_t __riscv_vluxseg7ei8_v_i16m1x7_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x8_t __riscv_vluxseg8ei8_v_i16m1x8_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m2x2_t __riscv_vluxseg2ei8_v_i16m2x2_m (vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m2x3_t __riscv_vluxseg3ei8_v_i16m2x3_m (vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m2x4_t __riscv_vluxseg4ei8_v_i16m2x4_m (vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m4x2_t __riscv_vluxseg2ei8_v_i16m4x2_m (vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl);
vint32mf2x2_t __riscv_vluxseg2ei8_v_i32mf2x2_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x3_t __riscv_vluxseg3ei8_v_i32mf2x3_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x4_t __riscv_vluxseg4ei8_v_i32mf2x4_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x5_t __riscv_vluxseg5ei8_v_i32mf2x5_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x6_t __riscv_vluxseg6ei8_v_i32mf2x6_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x7_t __riscv_vluxseg7ei8_v_i32mf2x7_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x8_t __riscv_vluxseg8ei8_v_i32mf2x8_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32m1x2_t __riscv_vluxseg2ei8_v_i32m1x2_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x3_t __riscv_vluxseg3ei8_v_i32m1x3_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x4_t __riscv_vluxseg4ei8_v_i32m1x4_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x5_t __riscv_vluxseg5ei8_v_i32m1x5_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x6_t __riscv_vluxseg6ei8_v_i32m1x6_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x7_t __riscv_vluxseg7ei8_v_i32m1x7_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x8_t __riscv_vluxseg8ei8_v_i32m1x8_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m2x2_t __riscv_vluxseg2ei8_v_i32m2x2_m (vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m2x3_t __riscv_vluxseg3ei8_v_i32m2x3_m (vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m2x4_t __riscv_vluxseg4ei8_v_i32m2x4_m (vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m4x2_t __riscv_vluxseg2ei8_v_i32m4x2_m (vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl);
vint64m1x2_t __riscv_vluxseg2ei8_v_i64m1x2_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x3_t __riscv_vluxseg3ei8_v_i64m1x3_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x4_t __riscv_vluxseg4ei8_v_i64m1x4_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x5_t __riscv_vluxseg5ei8_v_i64m1x5_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x6_t __riscv_vluxseg6ei8_v_i64m1x6_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x7_t __riscv_vluxseg7ei8_v_i64m1x7_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x8_t __riscv_vluxseg8ei8_v_i64m1x8_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m2x2_t __riscv_vluxseg2ei8_v_i64m2x2_m (vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m2x3_t __riscv_vluxseg3ei8_v_i64m2x3_m (vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m2x4_t __riscv_vluxseg4ei8_v_i64m2x4_m (vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m4x2_t __riscv_vluxseg2ei8_v_i64m4x2_m (vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vluxseg2ei8_v_u8mf8x2_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vluxseg3ei8_v_u8mf8x3_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vluxseg4ei8_v_u8mf8x4_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vluxseg5ei8_v_u8mf8x5_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vluxseg6ei8_v_u8mf8x6_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vluxseg7ei8_v_u8mf8x7_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vluxseg8ei8_v_u8mf8x8_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vluxseg2ei8_v_u8mf4x2_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vluxseg3ei8_v_u8mf4x3_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vluxseg4ei8_v_u8mf4x4_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vluxseg5ei8_v_u8mf4x5_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vluxseg6ei8_v_u8mf4x6_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vluxseg7ei8_v_u8mf4x7_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vluxseg8ei8_v_u8mf4x8_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vluxseg2ei8_v_u8mf2x2_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vluxseg3ei8_v_u8mf2x3_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vluxseg4ei8_v_u8mf2x4_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vluxseg5ei8_v_u8mf2x5_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vluxseg6ei8_v_u8mf2x6_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vluxseg7ei8_v_u8mf2x7_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vluxseg8ei8_v_u8mf2x8_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8m1x2_t __riscv_vluxseg2ei8_v_u8m1x2_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x3_t __riscv_vluxseg3ei8_v_u8m1x3_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x4_t __riscv_vluxseg4ei8_v_u8m1x4_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x5_t __riscv_vluxseg5ei8_v_u8m1x5_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x6_t __riscv_vluxseg6ei8_v_u8m1x6_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x7_t __riscv_vluxseg7ei8_v_u8m1x7_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x8_t __riscv_vluxseg8ei8_v_u8m1x8_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m2x2_t __riscv_vluxseg2ei8_v_u8m2x2_m (vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m2x3_t __riscv_vluxseg3ei8_v_u8m2x3_m (vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m2x4_t __riscv_vluxseg4ei8_v_u8m2x4_m (vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m4x2_t __riscv_vluxseg2ei8_v_u8m4x2_m (vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vluxseg2ei8_v_u16mf4x2_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vluxseg3ei8_v_u16mf4x3_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vluxseg4ei8_v_u16mf4x4_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vluxseg5ei8_v_u16mf4x5_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vluxseg6ei8_v_u16mf4x6_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vluxseg7ei8_v_u16mf4x7_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vluxseg8ei8_v_u16mf4x8_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vluxseg2ei8_v_u16mf2x2_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vluxseg3ei8_v_u16mf2x3_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vluxseg4ei8_v_u16mf2x4_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vluxseg5ei8_v_u16mf2x5_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vluxseg6ei8_v_u16mf2x6_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vluxseg7ei8_v_u16mf2x7_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vluxseg8ei8_v_u16mf2x8_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16m1x2_t __riscv_vluxseg2ei8_v_u16m1x2_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x3_t __riscv_vluxseg3ei8_v_u16m1x3_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x4_t __riscv_vluxseg4ei8_v_u16m1x4_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x5_t __riscv_vluxseg5ei8_v_u16m1x5_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x6_t __riscv_vluxseg6ei8_v_u16m1x6_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x7_t __riscv_vluxseg7ei8_v_u16m1x7_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x8_t __riscv_vluxseg8ei8_v_u16m1x8_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m2x2_t __riscv_vluxseg2ei8_v_u16m2x2_m (vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m2x3_t __riscv_vluxseg3ei8_v_u16m2x3_m (vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m2x4_t __riscv_vluxseg4ei8_v_u16m2x4_m (vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m4x2_t __riscv_vluxseg2ei8_v_u16m4x2_m (vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vluxseg2ei8_v_u32mf2x2_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vluxseg3ei8_v_u32mf2x3_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vluxseg4ei8_v_u32mf2x4_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vluxseg5ei8_v_u32mf2x5_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vluxseg6ei8_v_u32mf2x6_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vluxseg7ei8_v_u32mf2x7_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vluxseg8ei8_v_u32mf2x8_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32m1x2_t __riscv_vluxseg2ei8_v_u32m1x2_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x3_t __riscv_vluxseg3ei8_v_u32m1x3_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x4_t __riscv_vluxseg4ei8_v_u32m1x4_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x5_t __riscv_vluxseg5ei8_v_u32m1x5_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x6_t __riscv_vluxseg6ei8_v_u32m1x6_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x7_t __riscv_vluxseg7ei8_v_u32m1x7_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x8_t __riscv_vluxseg8ei8_v_u32m1x8_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m2x2_t __riscv_vluxseg2ei8_v_u32m2x2_m (vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m2x3_t __riscv_vluxseg3ei8_v_u32m2x3_m (vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m2x4_t __riscv_vluxseg4ei8_v_u32m2x4_m (vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m4x2_t __riscv_vluxseg2ei8_v_u32m4x2_m (vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl);
vuint64m1x2_t __riscv_vluxseg2ei8_v_u64m1x2_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x3_t __riscv_vluxseg3ei8_v_u64m1x3_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x4_t __riscv_vluxseg4ei8_v_u64m1x4_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x5_t __riscv_vluxseg5ei8_v_u64m1x5_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x6_t __riscv_vluxseg6ei8_v_u64m1x6_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x7_t __riscv_vluxseg7ei8_v_u64m1x7_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x8_t __riscv_vluxseg8ei8_v_u64m1x8_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m2x2_t __riscv_vluxseg2ei8_v_u64m2x2_m (vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m2x3_t __riscv_vluxseg3ei8_v_u64m2x3_m (vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m2x4_t __riscv_vluxseg4ei8_v_u64m2x4_m (vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m4x2_t __riscv_vluxseg2ei8_v_u64m4x2_m (vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl);
- Mnemonic
vluxseg2ei16.v vd, (rs1), vs2, vm # nf=2 vluxseg3ei16.v vd, (rs1), vs2, vm # nf=3 vluxseg4ei16.v vd, (rs1), vs2, vm # nf=4 vluxseg5ei16.v vd, (rs1), vs2, vm # nf=5 vluxseg6ei16.v vd, (rs1), vs2, vm # nf=6 vluxseg7ei16.v vd, (rs1), vs2, vm # nf=7 vluxseg8ei16.v vd, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-unordered segment loads
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vluxei16_v.h
- Intrinsic Functions
Details
vfloat16mf4x2_t __riscv_vluxseg2ei16_v_f16mf4x2 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vluxseg3ei16_v_f16mf4x3 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vluxseg4ei16_v_f16mf4x4 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vluxseg5ei16_v_f16mf4x5 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vluxseg6ei16_v_f16mf4x6 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vluxseg7ei16_v_f16mf4x7 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vluxseg8ei16_v_f16mf4x8 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vluxseg2ei16_v_f16mf2x2 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vluxseg3ei16_v_f16mf2x3 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vluxseg4ei16_v_f16mf2x4 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vluxseg5ei16_v_f16mf2x5 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vluxseg6ei16_v_f16mf2x6 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vluxseg7ei16_v_f16mf2x7 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vluxseg8ei16_v_f16mf2x8 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vluxseg2ei16_v_f16m1x2 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vluxseg3ei16_v_f16m1x3 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vluxseg4ei16_v_f16m1x4 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vluxseg5ei16_v_f16m1x5 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vluxseg6ei16_v_f16m1x6 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vluxseg7ei16_v_f16m1x7 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vluxseg8ei16_v_f16m1x8 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vluxseg2ei16_v_f16m2x2 (const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vluxseg3ei16_v_f16m2x3 (const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vluxseg4ei16_v_f16m2x4 (const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m4x2_t __riscv_vluxseg2ei16_v_f16m4x2 (const float16_t *base, vuint16m4_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vluxseg2ei16_v_f32mf2x2 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vluxseg3ei16_v_f32mf2x3 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vluxseg4ei16_v_f32mf2x4 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vluxseg5ei16_v_f32mf2x5 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vluxseg6ei16_v_f32mf2x6 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vluxseg7ei16_v_f32mf2x7 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vluxseg8ei16_v_f32mf2x8 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vluxseg2ei16_v_f32m1x2 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vluxseg3ei16_v_f32m1x3 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vluxseg4ei16_v_f32m1x4 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vluxseg5ei16_v_f32m1x5 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vluxseg6ei16_v_f32m1x6 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vluxseg7ei16_v_f32m1x7 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vluxseg8ei16_v_f32m1x8 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vluxseg2ei16_v_f32m2x2 (const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vluxseg3ei16_v_f32m2x3 (const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vluxseg4ei16_v_f32m2x4 (const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vluxseg2ei16_v_f32m4x2 (const float32_t *base, vuint16m2_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vluxseg2ei16_v_f64m1x2 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vluxseg3ei16_v_f64m1x3 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vluxseg4ei16_v_f64m1x4 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vluxseg5ei16_v_f64m1x5 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vluxseg6ei16_v_f64m1x6 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vluxseg7ei16_v_f64m1x7 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vluxseg8ei16_v_f64m1x8 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vluxseg2ei16_v_f64m2x2 (const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vluxseg3ei16_v_f64m2x3 (const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vluxseg4ei16_v_f64m2x4 (const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vluxseg2ei16_v_f64m4x2 (const float64_t *base, vuint16m1_t bindex, size_t vl);
vint8mf8x2_t __riscv_vluxseg2ei16_v_i8mf8x2 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x3_t __riscv_vluxseg3ei16_v_i8mf8x3 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x4_t __riscv_vluxseg4ei16_v_i8mf8x4 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x5_t __riscv_vluxseg5ei16_v_i8mf8x5 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x6_t __riscv_vluxseg6ei16_v_i8mf8x6 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x7_t __riscv_vluxseg7ei16_v_i8mf8x7 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x8_t __riscv_vluxseg8ei16_v_i8mf8x8 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf4x2_t __riscv_vluxseg2ei16_v_i8mf4x2 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x3_t __riscv_vluxseg3ei16_v_i8mf4x3 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x4_t __riscv_vluxseg4ei16_v_i8mf4x4 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x5_t __riscv_vluxseg5ei16_v_i8mf4x5 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x6_t __riscv_vluxseg6ei16_v_i8mf4x6 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x7_t __riscv_vluxseg7ei16_v_i8mf4x7 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x8_t __riscv_vluxseg8ei16_v_i8mf4x8 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf2x2_t __riscv_vluxseg2ei16_v_i8mf2x2 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x3_t __riscv_vluxseg3ei16_v_i8mf2x3 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x4_t __riscv_vluxseg4ei16_v_i8mf2x4 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x5_t __riscv_vluxseg5ei16_v_i8mf2x5 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x6_t __riscv_vluxseg6ei16_v_i8mf2x6 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x7_t __riscv_vluxseg7ei16_v_i8mf2x7 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x8_t __riscv_vluxseg8ei16_v_i8mf2x8 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8m1x2_t __riscv_vluxseg2ei16_v_i8m1x2 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x3_t __riscv_vluxseg3ei16_v_i8m1x3 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x4_t __riscv_vluxseg4ei16_v_i8m1x4 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x5_t __riscv_vluxseg5ei16_v_i8m1x5 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x6_t __riscv_vluxseg6ei16_v_i8m1x6 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x7_t __riscv_vluxseg7ei16_v_i8m1x7 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x8_t __riscv_vluxseg8ei16_v_i8m1x8 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m2x2_t __riscv_vluxseg2ei16_v_i8m2x2 (const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m2x3_t __riscv_vluxseg3ei16_v_i8m2x3 (const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m2x4_t __riscv_vluxseg4ei16_v_i8m2x4 (const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m4x2_t __riscv_vluxseg2ei16_v_i8m4x2 (const int8_t *base, vuint16m8_t bindex, size_t vl);
vint16mf4x2_t __riscv_vluxseg2ei16_v_i16mf4x2 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x3_t __riscv_vluxseg3ei16_v_i16mf4x3 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x4_t __riscv_vluxseg4ei16_v_i16mf4x4 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x5_t __riscv_vluxseg5ei16_v_i16mf4x5 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x6_t __riscv_vluxseg6ei16_v_i16mf4x6 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x7_t __riscv_vluxseg7ei16_v_i16mf4x7 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x8_t __riscv_vluxseg8ei16_v_i16mf4x8 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf2x2_t __riscv_vluxseg2ei16_v_i16mf2x2 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x3_t __riscv_vluxseg3ei16_v_i16mf2x3 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x4_t __riscv_vluxseg4ei16_v_i16mf2x4 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x5_t __riscv_vluxseg5ei16_v_i16mf2x5 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x6_t __riscv_vluxseg6ei16_v_i16mf2x6 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x7_t __riscv_vluxseg7ei16_v_i16mf2x7 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x8_t __riscv_vluxseg8ei16_v_i16mf2x8 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16m1x2_t __riscv_vluxseg2ei16_v_i16m1x2 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x3_t __riscv_vluxseg3ei16_v_i16m1x3 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x4_t __riscv_vluxseg4ei16_v_i16m1x4 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x5_t __riscv_vluxseg5ei16_v_i16m1x5 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x6_t __riscv_vluxseg6ei16_v_i16m1x6 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x7_t __riscv_vluxseg7ei16_v_i16m1x7 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x8_t __riscv_vluxseg8ei16_v_i16m1x8 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m2x2_t __riscv_vluxseg2ei16_v_i16m2x2 (const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m2x3_t __riscv_vluxseg3ei16_v_i16m2x3 (const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m2x4_t __riscv_vluxseg4ei16_v_i16m2x4 (const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m4x2_t __riscv_vluxseg2ei16_v_i16m4x2 (const int16_t *base, vuint16m4_t bindex, size_t vl);
vint32mf2x2_t __riscv_vluxseg2ei16_v_i32mf2x2 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x3_t __riscv_vluxseg3ei16_v_i32mf2x3 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x4_t __riscv_vluxseg4ei16_v_i32mf2x4 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x5_t __riscv_vluxseg5ei16_v_i32mf2x5 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x6_t __riscv_vluxseg6ei16_v_i32mf2x6 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x7_t __riscv_vluxseg7ei16_v_i32mf2x7 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x8_t __riscv_vluxseg8ei16_v_i32mf2x8 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32m1x2_t __riscv_vluxseg2ei16_v_i32m1x2 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x3_t __riscv_vluxseg3ei16_v_i32m1x3 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x4_t __riscv_vluxseg4ei16_v_i32m1x4 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x5_t __riscv_vluxseg5ei16_v_i32m1x5 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x6_t __riscv_vluxseg6ei16_v_i32m1x6 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x7_t __riscv_vluxseg7ei16_v_i32m1x7 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x8_t __riscv_vluxseg8ei16_v_i32m1x8 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m2x2_t __riscv_vluxseg2ei16_v_i32m2x2 (const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m2x3_t __riscv_vluxseg3ei16_v_i32m2x3 (const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m2x4_t __riscv_vluxseg4ei16_v_i32m2x4 (const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m4x2_t __riscv_vluxseg2ei16_v_i32m4x2 (const int32_t *base, vuint16m2_t bindex, size_t vl);
vint64m1x2_t __riscv_vluxseg2ei16_v_i64m1x2 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x3_t __riscv_vluxseg3ei16_v_i64m1x3 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x4_t __riscv_vluxseg4ei16_v_i64m1x4 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x5_t __riscv_vluxseg5ei16_v_i64m1x5 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x6_t __riscv_vluxseg6ei16_v_i64m1x6 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x7_t __riscv_vluxseg7ei16_v_i64m1x7 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x8_t __riscv_vluxseg8ei16_v_i64m1x8 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m2x2_t __riscv_vluxseg2ei16_v_i64m2x2 (const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m2x3_t __riscv_vluxseg3ei16_v_i64m2x3 (const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m2x4_t __riscv_vluxseg4ei16_v_i64m2x4 (const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m4x2_t __riscv_vluxseg2ei16_v_i64m4x2 (const int64_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vluxseg2ei16_v_u8mf8x2 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vluxseg3ei16_v_u8mf8x3 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vluxseg4ei16_v_u8mf8x4 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vluxseg5ei16_v_u8mf8x5 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vluxseg6ei16_v_u8mf8x6 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vluxseg7ei16_v_u8mf8x7 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vluxseg8ei16_v_u8mf8x8 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vluxseg2ei16_v_u8mf4x2 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vluxseg3ei16_v_u8mf4x3 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vluxseg4ei16_v_u8mf4x4 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vluxseg5ei16_v_u8mf4x5 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vluxseg6ei16_v_u8mf4x6 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vluxseg7ei16_v_u8mf4x7 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vluxseg8ei16_v_u8mf4x8 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vluxseg2ei16_v_u8mf2x2 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vluxseg3ei16_v_u8mf2x3 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vluxseg4ei16_v_u8mf2x4 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vluxseg5ei16_v_u8mf2x5 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vluxseg6ei16_v_u8mf2x6 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vluxseg7ei16_v_u8mf2x7 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vluxseg8ei16_v_u8mf2x8 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8m1x2_t __riscv_vluxseg2ei16_v_u8m1x2 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x3_t __riscv_vluxseg3ei16_v_u8m1x3 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x4_t __riscv_vluxseg4ei16_v_u8m1x4 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x5_t __riscv_vluxseg5ei16_v_u8m1x5 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x6_t __riscv_vluxseg6ei16_v_u8m1x6 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x7_t __riscv_vluxseg7ei16_v_u8m1x7 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x8_t __riscv_vluxseg8ei16_v_u8m1x8 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m2x2_t __riscv_vluxseg2ei16_v_u8m2x2 (const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m2x3_t __riscv_vluxseg3ei16_v_u8m2x3 (const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m2x4_t __riscv_vluxseg4ei16_v_u8m2x4 (const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m4x2_t __riscv_vluxseg2ei16_v_u8m4x2 (const uint8_t *base, vuint16m8_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vluxseg2ei16_v_u16mf4x2 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vluxseg3ei16_v_u16mf4x3 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vluxseg4ei16_v_u16mf4x4 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vluxseg5ei16_v_u16mf4x5 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vluxseg6ei16_v_u16mf4x6 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vluxseg7ei16_v_u16mf4x7 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vluxseg8ei16_v_u16mf4x8 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vluxseg2ei16_v_u16mf2x2 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vluxseg3ei16_v_u16mf2x3 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vluxseg4ei16_v_u16mf2x4 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vluxseg5ei16_v_u16mf2x5 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vluxseg6ei16_v_u16mf2x6 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vluxseg7ei16_v_u16mf2x7 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vluxseg8ei16_v_u16mf2x8 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16m1x2_t __riscv_vluxseg2ei16_v_u16m1x2 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x3_t __riscv_vluxseg3ei16_v_u16m1x3 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x4_t __riscv_vluxseg4ei16_v_u16m1x4 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x5_t __riscv_vluxseg5ei16_v_u16m1x5 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x6_t __riscv_vluxseg6ei16_v_u16m1x6 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x7_t __riscv_vluxseg7ei16_v_u16m1x7 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x8_t __riscv_vluxseg8ei16_v_u16m1x8 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m2x2_t __riscv_vluxseg2ei16_v_u16m2x2 (const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m2x3_t __riscv_vluxseg3ei16_v_u16m2x3 (const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m2x4_t __riscv_vluxseg4ei16_v_u16m2x4 (const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m4x2_t __riscv_vluxseg2ei16_v_u16m4x2 (const uint16_t *base, vuint16m4_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vluxseg2ei16_v_u32mf2x2 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vluxseg3ei16_v_u32mf2x3 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vluxseg4ei16_v_u32mf2x4 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vluxseg5ei16_v_u32mf2x5 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vluxseg6ei16_v_u32mf2x6 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vluxseg7ei16_v_u32mf2x7 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vluxseg8ei16_v_u32mf2x8 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32m1x2_t __riscv_vluxseg2ei16_v_u32m1x2 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x3_t __riscv_vluxseg3ei16_v_u32m1x3 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x4_t __riscv_vluxseg4ei16_v_u32m1x4 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x5_t __riscv_vluxseg5ei16_v_u32m1x5 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x6_t __riscv_vluxseg6ei16_v_u32m1x6 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x7_t __riscv_vluxseg7ei16_v_u32m1x7 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x8_t __riscv_vluxseg8ei16_v_u32m1x8 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m2x2_t __riscv_vluxseg2ei16_v_u32m2x2 (const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m2x3_t __riscv_vluxseg3ei16_v_u32m2x3 (const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m2x4_t __riscv_vluxseg4ei16_v_u32m2x4 (const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m4x2_t __riscv_vluxseg2ei16_v_u32m4x2 (const uint32_t *base, vuint16m2_t bindex, size_t vl);
vuint64m1x2_t __riscv_vluxseg2ei16_v_u64m1x2 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x3_t __riscv_vluxseg3ei16_v_u64m1x3 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x4_t __riscv_vluxseg4ei16_v_u64m1x4 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x5_t __riscv_vluxseg5ei16_v_u64m1x5 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x6_t __riscv_vluxseg6ei16_v_u64m1x6 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x7_t __riscv_vluxseg7ei16_v_u64m1x7 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x8_t __riscv_vluxseg8ei16_v_u64m1x8 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m2x2_t __riscv_vluxseg2ei16_v_u64m2x2 (const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m2x3_t __riscv_vluxseg3ei16_v_u64m2x3 (const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m2x4_t __riscv_vluxseg4ei16_v_u64m2x4 (const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m4x2_t __riscv_vluxseg2ei16_v_u64m4x2 (const uint64_t *base, vuint16m1_t bindex, size_t vl);
vfloat16mf4x2_t __riscv_vluxseg2ei16_v_f16mf4x2_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vluxseg3ei16_v_f16mf4x3_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vluxseg4ei16_v_f16mf4x4_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vluxseg5ei16_v_f16mf4x5_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vluxseg6ei16_v_f16mf4x6_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vluxseg7ei16_v_f16mf4x7_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vluxseg8ei16_v_f16mf4x8_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vluxseg2ei16_v_f16mf2x2_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vluxseg3ei16_v_f16mf2x3_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vluxseg4ei16_v_f16mf2x4_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vluxseg5ei16_v_f16mf2x5_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vluxseg6ei16_v_f16mf2x6_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vluxseg7ei16_v_f16mf2x7_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vluxseg8ei16_v_f16mf2x8_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vluxseg2ei16_v_f16m1x2_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vluxseg3ei16_v_f16m1x3_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vluxseg4ei16_v_f16m1x4_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vluxseg5ei16_v_f16m1x5_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vluxseg6ei16_v_f16m1x6_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vluxseg7ei16_v_f16m1x7_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vluxseg8ei16_v_f16m1x8_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vluxseg2ei16_v_f16m2x2_m (vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vluxseg3ei16_v_f16m2x3_m (vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vluxseg4ei16_v_f16m2x4_m (vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m4x2_t __riscv_vluxseg2ei16_v_f16m4x2_m (vbool4_t mask, const float16_t *base, vuint16m4_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vluxseg2ei16_v_f32mf2x2_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vluxseg3ei16_v_f32mf2x3_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vluxseg4ei16_v_f32mf2x4_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vluxseg5ei16_v_f32mf2x5_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vluxseg6ei16_v_f32mf2x6_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vluxseg7ei16_v_f32mf2x7_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vluxseg8ei16_v_f32mf2x8_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vluxseg2ei16_v_f32m1x2_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vluxseg3ei16_v_f32m1x3_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vluxseg4ei16_v_f32m1x4_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vluxseg5ei16_v_f32m1x5_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vluxseg6ei16_v_f32m1x6_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vluxseg7ei16_v_f32m1x7_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vluxseg8ei16_v_f32m1x8_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vluxseg2ei16_v_f32m2x2_m (vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vluxseg3ei16_v_f32m2x3_m (vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vluxseg4ei16_v_f32m2x4_m (vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vluxseg2ei16_v_f32m4x2_m (vbool8_t mask, const float32_t *base, vuint16m2_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vluxseg2ei16_v_f64m1x2_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vluxseg3ei16_v_f64m1x3_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vluxseg4ei16_v_f64m1x4_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vluxseg5ei16_v_f64m1x5_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vluxseg6ei16_v_f64m1x6_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vluxseg7ei16_v_f64m1x7_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vluxseg8ei16_v_f64m1x8_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vluxseg2ei16_v_f64m2x2_m (vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vluxseg3ei16_v_f64m2x3_m (vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vluxseg4ei16_v_f64m2x4_m (vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vluxseg2ei16_v_f64m4x2_m (vbool16_t mask, const float64_t *base, vuint16m1_t bindex, size_t vl);
vint8mf8x2_t __riscv_vluxseg2ei16_v_i8mf8x2_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x3_t __riscv_vluxseg3ei16_v_i8mf8x3_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x4_t __riscv_vluxseg4ei16_v_i8mf8x4_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x5_t __riscv_vluxseg5ei16_v_i8mf8x5_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x6_t __riscv_vluxseg6ei16_v_i8mf8x6_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x7_t __riscv_vluxseg7ei16_v_i8mf8x7_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x8_t __riscv_vluxseg8ei16_v_i8mf8x8_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf4x2_t __riscv_vluxseg2ei16_v_i8mf4x2_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x3_t __riscv_vluxseg3ei16_v_i8mf4x3_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x4_t __riscv_vluxseg4ei16_v_i8mf4x4_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x5_t __riscv_vluxseg5ei16_v_i8mf4x5_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x6_t __riscv_vluxseg6ei16_v_i8mf4x6_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x7_t __riscv_vluxseg7ei16_v_i8mf4x7_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x8_t __riscv_vluxseg8ei16_v_i8mf4x8_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf2x2_t __riscv_vluxseg2ei16_v_i8mf2x2_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x3_t __riscv_vluxseg3ei16_v_i8mf2x3_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x4_t __riscv_vluxseg4ei16_v_i8mf2x4_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x5_t __riscv_vluxseg5ei16_v_i8mf2x5_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x6_t __riscv_vluxseg6ei16_v_i8mf2x6_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x7_t __riscv_vluxseg7ei16_v_i8mf2x7_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x8_t __riscv_vluxseg8ei16_v_i8mf2x8_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8m1x2_t __riscv_vluxseg2ei16_v_i8m1x2_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x3_t __riscv_vluxseg3ei16_v_i8m1x3_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x4_t __riscv_vluxseg4ei16_v_i8m1x4_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x5_t __riscv_vluxseg5ei16_v_i8m1x5_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x6_t __riscv_vluxseg6ei16_v_i8m1x6_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x7_t __riscv_vluxseg7ei16_v_i8m1x7_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x8_t __riscv_vluxseg8ei16_v_i8m1x8_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m2x2_t __riscv_vluxseg2ei16_v_i8m2x2_m (vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m2x3_t __riscv_vluxseg3ei16_v_i8m2x3_m (vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m2x4_t __riscv_vluxseg4ei16_v_i8m2x4_m (vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m4x2_t __riscv_vluxseg2ei16_v_i8m4x2_m (vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl);
vint16mf4x2_t __riscv_vluxseg2ei16_v_i16mf4x2_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x3_t __riscv_vluxseg3ei16_v_i16mf4x3_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x4_t __riscv_vluxseg4ei16_v_i16mf4x4_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x5_t __riscv_vluxseg5ei16_v_i16mf4x5_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x6_t __riscv_vluxseg6ei16_v_i16mf4x6_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x7_t __riscv_vluxseg7ei16_v_i16mf4x7_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x8_t __riscv_vluxseg8ei16_v_i16mf4x8_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf2x2_t __riscv_vluxseg2ei16_v_i16mf2x2_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x3_t __riscv_vluxseg3ei16_v_i16mf2x3_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x4_t __riscv_vluxseg4ei16_v_i16mf2x4_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x5_t __riscv_vluxseg5ei16_v_i16mf2x5_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x6_t __riscv_vluxseg6ei16_v_i16mf2x6_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x7_t __riscv_vluxseg7ei16_v_i16mf2x7_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x8_t __riscv_vluxseg8ei16_v_i16mf2x8_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16m1x2_t __riscv_vluxseg2ei16_v_i16m1x2_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x3_t __riscv_vluxseg3ei16_v_i16m1x3_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x4_t __riscv_vluxseg4ei16_v_i16m1x4_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x5_t __riscv_vluxseg5ei16_v_i16m1x5_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x6_t __riscv_vluxseg6ei16_v_i16m1x6_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x7_t __riscv_vluxseg7ei16_v_i16m1x7_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x8_t __riscv_vluxseg8ei16_v_i16m1x8_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m2x2_t __riscv_vluxseg2ei16_v_i16m2x2_m (vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m2x3_t __riscv_vluxseg3ei16_v_i16m2x3_m (vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m2x4_t __riscv_vluxseg4ei16_v_i16m2x4_m (vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m4x2_t __riscv_vluxseg2ei16_v_i16m4x2_m (vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl);
vint32mf2x2_t __riscv_vluxseg2ei16_v_i32mf2x2_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x3_t __riscv_vluxseg3ei16_v_i32mf2x3_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x4_t __riscv_vluxseg4ei16_v_i32mf2x4_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x5_t __riscv_vluxseg5ei16_v_i32mf2x5_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x6_t __riscv_vluxseg6ei16_v_i32mf2x6_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x7_t __riscv_vluxseg7ei16_v_i32mf2x7_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x8_t __riscv_vluxseg8ei16_v_i32mf2x8_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32m1x2_t __riscv_vluxseg2ei16_v_i32m1x2_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x3_t __riscv_vluxseg3ei16_v_i32m1x3_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x4_t __riscv_vluxseg4ei16_v_i32m1x4_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x5_t __riscv_vluxseg5ei16_v_i32m1x5_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x6_t __riscv_vluxseg6ei16_v_i32m1x6_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x7_t __riscv_vluxseg7ei16_v_i32m1x7_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x8_t __riscv_vluxseg8ei16_v_i32m1x8_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m2x2_t __riscv_vluxseg2ei16_v_i32m2x2_m (vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m2x3_t __riscv_vluxseg3ei16_v_i32m2x3_m (vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m2x4_t __riscv_vluxseg4ei16_v_i32m2x4_m (vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m4x2_t __riscv_vluxseg2ei16_v_i32m4x2_m (vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl);
vint64m1x2_t __riscv_vluxseg2ei16_v_i64m1x2_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x3_t __riscv_vluxseg3ei16_v_i64m1x3_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x4_t __riscv_vluxseg4ei16_v_i64m1x4_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x5_t __riscv_vluxseg5ei16_v_i64m1x5_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x6_t __riscv_vluxseg6ei16_v_i64m1x6_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x7_t __riscv_vluxseg7ei16_v_i64m1x7_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x8_t __riscv_vluxseg8ei16_v_i64m1x8_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m2x2_t __riscv_vluxseg2ei16_v_i64m2x2_m (vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m2x3_t __riscv_vluxseg3ei16_v_i64m2x3_m (vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m2x4_t __riscv_vluxseg4ei16_v_i64m2x4_m (vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m4x2_t __riscv_vluxseg2ei16_v_i64m4x2_m (vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vluxseg2ei16_v_u8mf8x2_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vluxseg3ei16_v_u8mf8x3_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vluxseg4ei16_v_u8mf8x4_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vluxseg5ei16_v_u8mf8x5_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vluxseg6ei16_v_u8mf8x6_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vluxseg7ei16_v_u8mf8x7_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vluxseg8ei16_v_u8mf8x8_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vluxseg2ei16_v_u8mf4x2_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vluxseg3ei16_v_u8mf4x3_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vluxseg4ei16_v_u8mf4x4_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vluxseg5ei16_v_u8mf4x5_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vluxseg6ei16_v_u8mf4x6_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vluxseg7ei16_v_u8mf4x7_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vluxseg8ei16_v_u8mf4x8_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vluxseg2ei16_v_u8mf2x2_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vluxseg3ei16_v_u8mf2x3_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vluxseg4ei16_v_u8mf2x4_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vluxseg5ei16_v_u8mf2x5_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vluxseg6ei16_v_u8mf2x6_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vluxseg7ei16_v_u8mf2x7_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vluxseg8ei16_v_u8mf2x8_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8m1x2_t __riscv_vluxseg2ei16_v_u8m1x2_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x3_t __riscv_vluxseg3ei16_v_u8m1x3_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x4_t __riscv_vluxseg4ei16_v_u8m1x4_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x5_t __riscv_vluxseg5ei16_v_u8m1x5_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x6_t __riscv_vluxseg6ei16_v_u8m1x6_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x7_t __riscv_vluxseg7ei16_v_u8m1x7_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x8_t __riscv_vluxseg8ei16_v_u8m1x8_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m2x2_t __riscv_vluxseg2ei16_v_u8m2x2_m (vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m2x3_t __riscv_vluxseg3ei16_v_u8m2x3_m (vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m2x4_t __riscv_vluxseg4ei16_v_u8m2x4_m (vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m4x2_t __riscv_vluxseg2ei16_v_u8m4x2_m (vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vluxseg2ei16_v_u16mf4x2_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vluxseg3ei16_v_u16mf4x3_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vluxseg4ei16_v_u16mf4x4_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vluxseg5ei16_v_u16mf4x5_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vluxseg6ei16_v_u16mf4x6_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vluxseg7ei16_v_u16mf4x7_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vluxseg8ei16_v_u16mf4x8_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vluxseg2ei16_v_u16mf2x2_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vluxseg3ei16_v_u16mf2x3_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vluxseg4ei16_v_u16mf2x4_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vluxseg5ei16_v_u16mf2x5_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vluxseg6ei16_v_u16mf2x6_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vluxseg7ei16_v_u16mf2x7_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vluxseg8ei16_v_u16mf2x8_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16m1x2_t __riscv_vluxseg2ei16_v_u16m1x2_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x3_t __riscv_vluxseg3ei16_v_u16m1x3_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x4_t __riscv_vluxseg4ei16_v_u16m1x4_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x5_t __riscv_vluxseg5ei16_v_u16m1x5_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x6_t __riscv_vluxseg6ei16_v_u16m1x6_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x7_t __riscv_vluxseg7ei16_v_u16m1x7_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x8_t __riscv_vluxseg8ei16_v_u16m1x8_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m2x2_t __riscv_vluxseg2ei16_v_u16m2x2_m (vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m2x3_t __riscv_vluxseg3ei16_v_u16m2x3_m (vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m2x4_t __riscv_vluxseg4ei16_v_u16m2x4_m (vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m4x2_t __riscv_vluxseg2ei16_v_u16m4x2_m (vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vluxseg2ei16_v_u32mf2x2_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vluxseg3ei16_v_u32mf2x3_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vluxseg4ei16_v_u32mf2x4_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vluxseg5ei16_v_u32mf2x5_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vluxseg6ei16_v_u32mf2x6_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vluxseg7ei16_v_u32mf2x7_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vluxseg8ei16_v_u32mf2x8_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32m1x2_t __riscv_vluxseg2ei16_v_u32m1x2_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x3_t __riscv_vluxseg3ei16_v_u32m1x3_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x4_t __riscv_vluxseg4ei16_v_u32m1x4_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x5_t __riscv_vluxseg5ei16_v_u32m1x5_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x6_t __riscv_vluxseg6ei16_v_u32m1x6_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x7_t __riscv_vluxseg7ei16_v_u32m1x7_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x8_t __riscv_vluxseg8ei16_v_u32m1x8_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m2x2_t __riscv_vluxseg2ei16_v_u32m2x2_m (vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m2x3_t __riscv_vluxseg3ei16_v_u32m2x3_m (vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m2x4_t __riscv_vluxseg4ei16_v_u32m2x4_m (vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m4x2_t __riscv_vluxseg2ei16_v_u32m4x2_m (vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl);
vuint64m1x2_t __riscv_vluxseg2ei16_v_u64m1x2_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x3_t __riscv_vluxseg3ei16_v_u64m1x3_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x4_t __riscv_vluxseg4ei16_v_u64m1x4_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x5_t __riscv_vluxseg5ei16_v_u64m1x5_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x6_t __riscv_vluxseg6ei16_v_u64m1x6_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x7_t __riscv_vluxseg7ei16_v_u64m1x7_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x8_t __riscv_vluxseg8ei16_v_u64m1x8_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m2x2_t __riscv_vluxseg2ei16_v_u64m2x2_m (vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m2x3_t __riscv_vluxseg3ei16_v_u64m2x3_m (vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m2x4_t __riscv_vluxseg4ei16_v_u64m2x4_m (vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m4x2_t __riscv_vluxseg2ei16_v_u64m4x2_m (vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl);
- Mnemonic
vluxseg2ei32.v vd, (rs1), vs2, vm # nf=2 vluxseg3ei32.v vd, (rs1), vs2, vm # nf=3 vluxseg4ei32.v vd, (rs1), vs2, vm # nf=4 vluxseg5ei32.v vd, (rs1), vs2, vm # nf=5 vluxseg6ei32.v vd, (rs1), vs2, vm # nf=6 vluxseg7ei32.v vd, (rs1), vs2, vm # nf=7 vluxseg8ei32.v vd, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-unordered segment loads
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vluxei32_v.h
- Intrinsic Functions
Details
vfloat16mf4x2_t __riscv_vluxseg2ei32_v_f16mf4x2 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vluxseg3ei32_v_f16mf4x3 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vluxseg4ei32_v_f16mf4x4 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vluxseg5ei32_v_f16mf4x5 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vluxseg6ei32_v_f16mf4x6 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vluxseg7ei32_v_f16mf4x7 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vluxseg8ei32_v_f16mf4x8 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vluxseg2ei32_v_f16mf2x2 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vluxseg3ei32_v_f16mf2x3 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vluxseg4ei32_v_f16mf2x4 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vluxseg5ei32_v_f16mf2x5 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vluxseg6ei32_v_f16mf2x6 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vluxseg7ei32_v_f16mf2x7 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vluxseg8ei32_v_f16mf2x8 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vluxseg2ei32_v_f16m1x2 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vluxseg3ei32_v_f16m1x3 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vluxseg4ei32_v_f16m1x4 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vluxseg5ei32_v_f16m1x5 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vluxseg6ei32_v_f16m1x6 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vluxseg7ei32_v_f16m1x7 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vluxseg8ei32_v_f16m1x8 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vluxseg2ei32_v_f16m2x2 (const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vluxseg3ei32_v_f16m2x3 (const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vluxseg4ei32_v_f16m2x4 (const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m4x2_t __riscv_vluxseg2ei32_v_f16m4x2 (const float16_t *base, vuint32m8_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vluxseg2ei32_v_f32mf2x2 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vluxseg3ei32_v_f32mf2x3 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vluxseg4ei32_v_f32mf2x4 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vluxseg5ei32_v_f32mf2x5 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vluxseg6ei32_v_f32mf2x6 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vluxseg7ei32_v_f32mf2x7 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vluxseg8ei32_v_f32mf2x8 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vluxseg2ei32_v_f32m1x2 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vluxseg3ei32_v_f32m1x3 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vluxseg4ei32_v_f32m1x4 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vluxseg5ei32_v_f32m1x5 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vluxseg6ei32_v_f32m1x6 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vluxseg7ei32_v_f32m1x7 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vluxseg8ei32_v_f32m1x8 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vluxseg2ei32_v_f32m2x2 (const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vluxseg3ei32_v_f32m2x3 (const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vluxseg4ei32_v_f32m2x4 (const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vluxseg2ei32_v_f32m4x2 (const float32_t *base, vuint32m4_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vluxseg2ei32_v_f64m1x2 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vluxseg3ei32_v_f64m1x3 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vluxseg4ei32_v_f64m1x4 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vluxseg5ei32_v_f64m1x5 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vluxseg6ei32_v_f64m1x6 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vluxseg7ei32_v_f64m1x7 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vluxseg8ei32_v_f64m1x8 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vluxseg2ei32_v_f64m2x2 (const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vluxseg3ei32_v_f64m2x3 (const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vluxseg4ei32_v_f64m2x4 (const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vluxseg2ei32_v_f64m4x2 (const float64_t *base, vuint32m2_t bindex, size_t vl);
vint8mf8x2_t __riscv_vluxseg2ei32_v_i8mf8x2 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x3_t __riscv_vluxseg3ei32_v_i8mf8x3 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x4_t __riscv_vluxseg4ei32_v_i8mf8x4 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x5_t __riscv_vluxseg5ei32_v_i8mf8x5 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x6_t __riscv_vluxseg6ei32_v_i8mf8x6 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x7_t __riscv_vluxseg7ei32_v_i8mf8x7 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x8_t __riscv_vluxseg8ei32_v_i8mf8x8 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf4x2_t __riscv_vluxseg2ei32_v_i8mf4x2 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x3_t __riscv_vluxseg3ei32_v_i8mf4x3 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x4_t __riscv_vluxseg4ei32_v_i8mf4x4 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x5_t __riscv_vluxseg5ei32_v_i8mf4x5 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x6_t __riscv_vluxseg6ei32_v_i8mf4x6 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x7_t __riscv_vluxseg7ei32_v_i8mf4x7 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x8_t __riscv_vluxseg8ei32_v_i8mf4x8 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf2x2_t __riscv_vluxseg2ei32_v_i8mf2x2 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x3_t __riscv_vluxseg3ei32_v_i8mf2x3 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x4_t __riscv_vluxseg4ei32_v_i8mf2x4 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x5_t __riscv_vluxseg5ei32_v_i8mf2x5 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x6_t __riscv_vluxseg6ei32_v_i8mf2x6 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x7_t __riscv_vluxseg7ei32_v_i8mf2x7 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x8_t __riscv_vluxseg8ei32_v_i8mf2x8 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8m1x2_t __riscv_vluxseg2ei32_v_i8m1x2 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x3_t __riscv_vluxseg3ei32_v_i8m1x3 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x4_t __riscv_vluxseg4ei32_v_i8m1x4 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x5_t __riscv_vluxseg5ei32_v_i8m1x5 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x6_t __riscv_vluxseg6ei32_v_i8m1x6 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x7_t __riscv_vluxseg7ei32_v_i8m1x7 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x8_t __riscv_vluxseg8ei32_v_i8m1x8 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m2x2_t __riscv_vluxseg2ei32_v_i8m2x2 (const int8_t *base, vuint32m8_t bindex, size_t vl);
vint8m2x3_t __riscv_vluxseg3ei32_v_i8m2x3 (const int8_t *base, vuint32m8_t bindex, size_t vl);
vint8m2x4_t __riscv_vluxseg4ei32_v_i8m2x4 (const int8_t *base, vuint32m8_t bindex, size_t vl);
vint16mf4x2_t __riscv_vluxseg2ei32_v_i16mf4x2 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x3_t __riscv_vluxseg3ei32_v_i16mf4x3 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x4_t __riscv_vluxseg4ei32_v_i16mf4x4 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x5_t __riscv_vluxseg5ei32_v_i16mf4x5 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x6_t __riscv_vluxseg6ei32_v_i16mf4x6 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x7_t __riscv_vluxseg7ei32_v_i16mf4x7 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x8_t __riscv_vluxseg8ei32_v_i16mf4x8 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf2x2_t __riscv_vluxseg2ei32_v_i16mf2x2 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x3_t __riscv_vluxseg3ei32_v_i16mf2x3 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x4_t __riscv_vluxseg4ei32_v_i16mf2x4 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x5_t __riscv_vluxseg5ei32_v_i16mf2x5 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x6_t __riscv_vluxseg6ei32_v_i16mf2x6 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x7_t __riscv_vluxseg7ei32_v_i16mf2x7 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x8_t __riscv_vluxseg8ei32_v_i16mf2x8 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16m1x2_t __riscv_vluxseg2ei32_v_i16m1x2 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x3_t __riscv_vluxseg3ei32_v_i16m1x3 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x4_t __riscv_vluxseg4ei32_v_i16m1x4 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x5_t __riscv_vluxseg5ei32_v_i16m1x5 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x6_t __riscv_vluxseg6ei32_v_i16m1x6 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x7_t __riscv_vluxseg7ei32_v_i16m1x7 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x8_t __riscv_vluxseg8ei32_v_i16m1x8 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m2x2_t __riscv_vluxseg2ei32_v_i16m2x2 (const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m2x3_t __riscv_vluxseg3ei32_v_i16m2x3 (const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m2x4_t __riscv_vluxseg4ei32_v_i16m2x4 (const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m4x2_t __riscv_vluxseg2ei32_v_i16m4x2 (const int16_t *base, vuint32m8_t bindex, size_t vl);
vint32mf2x2_t __riscv_vluxseg2ei32_v_i32mf2x2 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x3_t __riscv_vluxseg3ei32_v_i32mf2x3 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x4_t __riscv_vluxseg4ei32_v_i32mf2x4 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x5_t __riscv_vluxseg5ei32_v_i32mf2x5 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x6_t __riscv_vluxseg6ei32_v_i32mf2x6 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x7_t __riscv_vluxseg7ei32_v_i32mf2x7 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x8_t __riscv_vluxseg8ei32_v_i32mf2x8 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32m1x2_t __riscv_vluxseg2ei32_v_i32m1x2 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x3_t __riscv_vluxseg3ei32_v_i32m1x3 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x4_t __riscv_vluxseg4ei32_v_i32m1x4 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x5_t __riscv_vluxseg5ei32_v_i32m1x5 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x6_t __riscv_vluxseg6ei32_v_i32m1x6 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x7_t __riscv_vluxseg7ei32_v_i32m1x7 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x8_t __riscv_vluxseg8ei32_v_i32m1x8 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m2x2_t __riscv_vluxseg2ei32_v_i32m2x2 (const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m2x3_t __riscv_vluxseg3ei32_v_i32m2x3 (const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m2x4_t __riscv_vluxseg4ei32_v_i32m2x4 (const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m4x2_t __riscv_vluxseg2ei32_v_i32m4x2 (const int32_t *base, vuint32m4_t bindex, size_t vl);
vint64m1x2_t __riscv_vluxseg2ei32_v_i64m1x2 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x3_t __riscv_vluxseg3ei32_v_i64m1x3 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x4_t __riscv_vluxseg4ei32_v_i64m1x4 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x5_t __riscv_vluxseg5ei32_v_i64m1x5 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x6_t __riscv_vluxseg6ei32_v_i64m1x6 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x7_t __riscv_vluxseg7ei32_v_i64m1x7 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x8_t __riscv_vluxseg8ei32_v_i64m1x8 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m2x2_t __riscv_vluxseg2ei32_v_i64m2x2 (const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m2x3_t __riscv_vluxseg3ei32_v_i64m2x3 (const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m2x4_t __riscv_vluxseg4ei32_v_i64m2x4 (const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m4x2_t __riscv_vluxseg2ei32_v_i64m4x2 (const int64_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vluxseg2ei32_v_u8mf8x2 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vluxseg3ei32_v_u8mf8x3 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vluxseg4ei32_v_u8mf8x4 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vluxseg5ei32_v_u8mf8x5 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vluxseg6ei32_v_u8mf8x6 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vluxseg7ei32_v_u8mf8x7 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vluxseg8ei32_v_u8mf8x8 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vluxseg2ei32_v_u8mf4x2 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vluxseg3ei32_v_u8mf4x3 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vluxseg4ei32_v_u8mf4x4 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vluxseg5ei32_v_u8mf4x5 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vluxseg6ei32_v_u8mf4x6 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vluxseg7ei32_v_u8mf4x7 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vluxseg8ei32_v_u8mf4x8 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vluxseg2ei32_v_u8mf2x2 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vluxseg3ei32_v_u8mf2x3 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vluxseg4ei32_v_u8mf2x4 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vluxseg5ei32_v_u8mf2x5 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vluxseg6ei32_v_u8mf2x6 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vluxseg7ei32_v_u8mf2x7 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vluxseg8ei32_v_u8mf2x8 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8m1x2_t __riscv_vluxseg2ei32_v_u8m1x2 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x3_t __riscv_vluxseg3ei32_v_u8m1x3 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x4_t __riscv_vluxseg4ei32_v_u8m1x4 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x5_t __riscv_vluxseg5ei32_v_u8m1x5 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x6_t __riscv_vluxseg6ei32_v_u8m1x6 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x7_t __riscv_vluxseg7ei32_v_u8m1x7 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x8_t __riscv_vluxseg8ei32_v_u8m1x8 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m2x2_t __riscv_vluxseg2ei32_v_u8m2x2 (const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint8m2x3_t __riscv_vluxseg3ei32_v_u8m2x3 (const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint8m2x4_t __riscv_vluxseg4ei32_v_u8m2x4 (const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vluxseg2ei32_v_u16mf4x2 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vluxseg3ei32_v_u16mf4x3 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vluxseg4ei32_v_u16mf4x4 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vluxseg5ei32_v_u16mf4x5 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vluxseg6ei32_v_u16mf4x6 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vluxseg7ei32_v_u16mf4x7 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vluxseg8ei32_v_u16mf4x8 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vluxseg2ei32_v_u16mf2x2 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vluxseg3ei32_v_u16mf2x3 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vluxseg4ei32_v_u16mf2x4 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vluxseg5ei32_v_u16mf2x5 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vluxseg6ei32_v_u16mf2x6 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vluxseg7ei32_v_u16mf2x7 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vluxseg8ei32_v_u16mf2x8 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16m1x2_t __riscv_vluxseg2ei32_v_u16m1x2 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x3_t __riscv_vluxseg3ei32_v_u16m1x3 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x4_t __riscv_vluxseg4ei32_v_u16m1x4 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x5_t __riscv_vluxseg5ei32_v_u16m1x5 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x6_t __riscv_vluxseg6ei32_v_u16m1x6 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x7_t __riscv_vluxseg7ei32_v_u16m1x7 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x8_t __riscv_vluxseg8ei32_v_u16m1x8 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m2x2_t __riscv_vluxseg2ei32_v_u16m2x2 (const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m2x3_t __riscv_vluxseg3ei32_v_u16m2x3 (const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m2x4_t __riscv_vluxseg4ei32_v_u16m2x4 (const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m4x2_t __riscv_vluxseg2ei32_v_u16m4x2 (const uint16_t *base, vuint32m8_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vluxseg2ei32_v_u32mf2x2 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vluxseg3ei32_v_u32mf2x3 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vluxseg4ei32_v_u32mf2x4 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vluxseg5ei32_v_u32mf2x5 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vluxseg6ei32_v_u32mf2x6 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vluxseg7ei32_v_u32mf2x7 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vluxseg8ei32_v_u32mf2x8 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32m1x2_t __riscv_vluxseg2ei32_v_u32m1x2 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x3_t __riscv_vluxseg3ei32_v_u32m1x3 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x4_t __riscv_vluxseg4ei32_v_u32m1x4 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x5_t __riscv_vluxseg5ei32_v_u32m1x5 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x6_t __riscv_vluxseg6ei32_v_u32m1x6 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x7_t __riscv_vluxseg7ei32_v_u32m1x7 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x8_t __riscv_vluxseg8ei32_v_u32m1x8 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m2x2_t __riscv_vluxseg2ei32_v_u32m2x2 (const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m2x3_t __riscv_vluxseg3ei32_v_u32m2x3 (const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m2x4_t __riscv_vluxseg4ei32_v_u32m2x4 (const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m4x2_t __riscv_vluxseg2ei32_v_u32m4x2 (const uint32_t *base, vuint32m4_t bindex, size_t vl);
vuint64m1x2_t __riscv_vluxseg2ei32_v_u64m1x2 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x3_t __riscv_vluxseg3ei32_v_u64m1x3 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x4_t __riscv_vluxseg4ei32_v_u64m1x4 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x5_t __riscv_vluxseg5ei32_v_u64m1x5 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x6_t __riscv_vluxseg6ei32_v_u64m1x6 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x7_t __riscv_vluxseg7ei32_v_u64m1x7 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x8_t __riscv_vluxseg8ei32_v_u64m1x8 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m2x2_t __riscv_vluxseg2ei32_v_u64m2x2 (const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m2x3_t __riscv_vluxseg3ei32_v_u64m2x3 (const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m2x4_t __riscv_vluxseg4ei32_v_u64m2x4 (const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m4x2_t __riscv_vluxseg2ei32_v_u64m4x2 (const uint64_t *base, vuint32m2_t bindex, size_t vl);
vfloat16mf4x2_t __riscv_vluxseg2ei32_v_f16mf4x2_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vluxseg3ei32_v_f16mf4x3_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vluxseg4ei32_v_f16mf4x4_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vluxseg5ei32_v_f16mf4x5_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vluxseg6ei32_v_f16mf4x6_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vluxseg7ei32_v_f16mf4x7_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vluxseg8ei32_v_f16mf4x8_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vluxseg2ei32_v_f16mf2x2_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vluxseg3ei32_v_f16mf2x3_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vluxseg4ei32_v_f16mf2x4_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vluxseg5ei32_v_f16mf2x5_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vluxseg6ei32_v_f16mf2x6_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vluxseg7ei32_v_f16mf2x7_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vluxseg8ei32_v_f16mf2x8_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vluxseg2ei32_v_f16m1x2_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vluxseg3ei32_v_f16m1x3_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vluxseg4ei32_v_f16m1x4_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vluxseg5ei32_v_f16m1x5_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vluxseg6ei32_v_f16m1x6_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vluxseg7ei32_v_f16m1x7_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vluxseg8ei32_v_f16m1x8_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vluxseg2ei32_v_f16m2x2_m (vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vluxseg3ei32_v_f16m2x3_m (vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vluxseg4ei32_v_f16m2x4_m (vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m4x2_t __riscv_vluxseg2ei32_v_f16m4x2_m (vbool4_t mask, const float16_t *base, vuint32m8_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vluxseg2ei32_v_f32mf2x2_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vluxseg3ei32_v_f32mf2x3_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vluxseg4ei32_v_f32mf2x4_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vluxseg5ei32_v_f32mf2x5_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vluxseg6ei32_v_f32mf2x6_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vluxseg7ei32_v_f32mf2x7_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vluxseg8ei32_v_f32mf2x8_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vluxseg2ei32_v_f32m1x2_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vluxseg3ei32_v_f32m1x3_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vluxseg4ei32_v_f32m1x4_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vluxseg5ei32_v_f32m1x5_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vluxseg6ei32_v_f32m1x6_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vluxseg7ei32_v_f32m1x7_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vluxseg8ei32_v_f32m1x8_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vluxseg2ei32_v_f32m2x2_m (vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vluxseg3ei32_v_f32m2x3_m (vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vluxseg4ei32_v_f32m2x4_m (vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vluxseg2ei32_v_f32m4x2_m (vbool8_t mask, const float32_t *base, vuint32m4_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vluxseg2ei32_v_f64m1x2_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vluxseg3ei32_v_f64m1x3_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vluxseg4ei32_v_f64m1x4_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vluxseg5ei32_v_f64m1x5_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vluxseg6ei32_v_f64m1x6_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vluxseg7ei32_v_f64m1x7_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vluxseg8ei32_v_f64m1x8_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vluxseg2ei32_v_f64m2x2_m (vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vluxseg3ei32_v_f64m2x3_m (vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vluxseg4ei32_v_f64m2x4_m (vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vluxseg2ei32_v_f64m4x2_m (vbool16_t mask, const float64_t *base, vuint32m2_t bindex, size_t vl);
vint8mf8x2_t __riscv_vluxseg2ei32_v_i8mf8x2_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x3_t __riscv_vluxseg3ei32_v_i8mf8x3_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x4_t __riscv_vluxseg4ei32_v_i8mf8x4_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x5_t __riscv_vluxseg5ei32_v_i8mf8x5_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x6_t __riscv_vluxseg6ei32_v_i8mf8x6_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x7_t __riscv_vluxseg7ei32_v_i8mf8x7_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x8_t __riscv_vluxseg8ei32_v_i8mf8x8_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf4x2_t __riscv_vluxseg2ei32_v_i8mf4x2_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x3_t __riscv_vluxseg3ei32_v_i8mf4x3_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x4_t __riscv_vluxseg4ei32_v_i8mf4x4_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x5_t __riscv_vluxseg5ei32_v_i8mf4x5_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x6_t __riscv_vluxseg6ei32_v_i8mf4x6_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x7_t __riscv_vluxseg7ei32_v_i8mf4x7_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x8_t __riscv_vluxseg8ei32_v_i8mf4x8_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf2x2_t __riscv_vluxseg2ei32_v_i8mf2x2_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x3_t __riscv_vluxseg3ei32_v_i8mf2x3_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x4_t __riscv_vluxseg4ei32_v_i8mf2x4_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x5_t __riscv_vluxseg5ei32_v_i8mf2x5_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x6_t __riscv_vluxseg6ei32_v_i8mf2x6_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x7_t __riscv_vluxseg7ei32_v_i8mf2x7_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x8_t __riscv_vluxseg8ei32_v_i8mf2x8_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8m1x2_t __riscv_vluxseg2ei32_v_i8m1x2_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x3_t __riscv_vluxseg3ei32_v_i8m1x3_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x4_t __riscv_vluxseg4ei32_v_i8m1x4_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x5_t __riscv_vluxseg5ei32_v_i8m1x5_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x6_t __riscv_vluxseg6ei32_v_i8m1x6_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x7_t __riscv_vluxseg7ei32_v_i8m1x7_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x8_t __riscv_vluxseg8ei32_v_i8m1x8_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m2x2_t __riscv_vluxseg2ei32_v_i8m2x2_m (vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl);
vint8m2x3_t __riscv_vluxseg3ei32_v_i8m2x3_m (vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl);
vint8m2x4_t __riscv_vluxseg4ei32_v_i8m2x4_m (vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl);
vint16mf4x2_t __riscv_vluxseg2ei32_v_i16mf4x2_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x3_t __riscv_vluxseg3ei32_v_i16mf4x3_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x4_t __riscv_vluxseg4ei32_v_i16mf4x4_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x5_t __riscv_vluxseg5ei32_v_i16mf4x5_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x6_t __riscv_vluxseg6ei32_v_i16mf4x6_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x7_t __riscv_vluxseg7ei32_v_i16mf4x7_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x8_t __riscv_vluxseg8ei32_v_i16mf4x8_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf2x2_t __riscv_vluxseg2ei32_v_i16mf2x2_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x3_t __riscv_vluxseg3ei32_v_i16mf2x3_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x4_t __riscv_vluxseg4ei32_v_i16mf2x4_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x5_t __riscv_vluxseg5ei32_v_i16mf2x5_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x6_t __riscv_vluxseg6ei32_v_i16mf2x6_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x7_t __riscv_vluxseg7ei32_v_i16mf2x7_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x8_t __riscv_vluxseg8ei32_v_i16mf2x8_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16m1x2_t __riscv_vluxseg2ei32_v_i16m1x2_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x3_t __riscv_vluxseg3ei32_v_i16m1x3_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x4_t __riscv_vluxseg4ei32_v_i16m1x4_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x5_t __riscv_vluxseg5ei32_v_i16m1x5_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x6_t __riscv_vluxseg6ei32_v_i16m1x6_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x7_t __riscv_vluxseg7ei32_v_i16m1x7_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x8_t __riscv_vluxseg8ei32_v_i16m1x8_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m2x2_t __riscv_vluxseg2ei32_v_i16m2x2_m (vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m2x3_t __riscv_vluxseg3ei32_v_i16m2x3_m (vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m2x4_t __riscv_vluxseg4ei32_v_i16m2x4_m (vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m4x2_t __riscv_vluxseg2ei32_v_i16m4x2_m (vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl);
vint32mf2x2_t __riscv_vluxseg2ei32_v_i32mf2x2_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x3_t __riscv_vluxseg3ei32_v_i32mf2x3_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x4_t __riscv_vluxseg4ei32_v_i32mf2x4_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x5_t __riscv_vluxseg5ei32_v_i32mf2x5_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x6_t __riscv_vluxseg6ei32_v_i32mf2x6_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x7_t __riscv_vluxseg7ei32_v_i32mf2x7_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x8_t __riscv_vluxseg8ei32_v_i32mf2x8_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32m1x2_t __riscv_vluxseg2ei32_v_i32m1x2_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x3_t __riscv_vluxseg3ei32_v_i32m1x3_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x4_t __riscv_vluxseg4ei32_v_i32m1x4_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x5_t __riscv_vluxseg5ei32_v_i32m1x5_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x6_t __riscv_vluxseg6ei32_v_i32m1x6_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x7_t __riscv_vluxseg7ei32_v_i32m1x7_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x8_t __riscv_vluxseg8ei32_v_i32m1x8_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m2x2_t __riscv_vluxseg2ei32_v_i32m2x2_m (vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m2x3_t __riscv_vluxseg3ei32_v_i32m2x3_m (vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m2x4_t __riscv_vluxseg4ei32_v_i32m2x4_m (vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m4x2_t __riscv_vluxseg2ei32_v_i32m4x2_m (vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl);
vint64m1x2_t __riscv_vluxseg2ei32_v_i64m1x2_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x3_t __riscv_vluxseg3ei32_v_i64m1x3_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x4_t __riscv_vluxseg4ei32_v_i64m1x4_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x5_t __riscv_vluxseg5ei32_v_i64m1x5_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x6_t __riscv_vluxseg6ei32_v_i64m1x6_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x7_t __riscv_vluxseg7ei32_v_i64m1x7_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x8_t __riscv_vluxseg8ei32_v_i64m1x8_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m2x2_t __riscv_vluxseg2ei32_v_i64m2x2_m (vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m2x3_t __riscv_vluxseg3ei32_v_i64m2x3_m (vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m2x4_t __riscv_vluxseg4ei32_v_i64m2x4_m (vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m4x2_t __riscv_vluxseg2ei32_v_i64m4x2_m (vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vluxseg2ei32_v_u8mf8x2_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vluxseg3ei32_v_u8mf8x3_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vluxseg4ei32_v_u8mf8x4_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vluxseg5ei32_v_u8mf8x5_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vluxseg6ei32_v_u8mf8x6_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vluxseg7ei32_v_u8mf8x7_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vluxseg8ei32_v_u8mf8x8_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vluxseg2ei32_v_u8mf4x2_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vluxseg3ei32_v_u8mf4x3_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vluxseg4ei32_v_u8mf4x4_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vluxseg5ei32_v_u8mf4x5_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vluxseg6ei32_v_u8mf4x6_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vluxseg7ei32_v_u8mf4x7_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vluxseg8ei32_v_u8mf4x8_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vluxseg2ei32_v_u8mf2x2_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vluxseg3ei32_v_u8mf2x3_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vluxseg4ei32_v_u8mf2x4_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vluxseg5ei32_v_u8mf2x5_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vluxseg6ei32_v_u8mf2x6_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vluxseg7ei32_v_u8mf2x7_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vluxseg8ei32_v_u8mf2x8_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8m1x2_t __riscv_vluxseg2ei32_v_u8m1x2_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x3_t __riscv_vluxseg3ei32_v_u8m1x3_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x4_t __riscv_vluxseg4ei32_v_u8m1x4_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x5_t __riscv_vluxseg5ei32_v_u8m1x5_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x6_t __riscv_vluxseg6ei32_v_u8m1x6_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x7_t __riscv_vluxseg7ei32_v_u8m1x7_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x8_t __riscv_vluxseg8ei32_v_u8m1x8_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m2x2_t __riscv_vluxseg2ei32_v_u8m2x2_m (vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint8m2x3_t __riscv_vluxseg3ei32_v_u8m2x3_m (vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint8m2x4_t __riscv_vluxseg4ei32_v_u8m2x4_m (vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vluxseg2ei32_v_u16mf4x2_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vluxseg3ei32_v_u16mf4x3_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vluxseg4ei32_v_u16mf4x4_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vluxseg5ei32_v_u16mf4x5_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vluxseg6ei32_v_u16mf4x6_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vluxseg7ei32_v_u16mf4x7_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vluxseg8ei32_v_u16mf4x8_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vluxseg2ei32_v_u16mf2x2_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vluxseg3ei32_v_u16mf2x3_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vluxseg4ei32_v_u16mf2x4_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vluxseg5ei32_v_u16mf2x5_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vluxseg6ei32_v_u16mf2x6_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vluxseg7ei32_v_u16mf2x7_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vluxseg8ei32_v_u16mf2x8_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16m1x2_t __riscv_vluxseg2ei32_v_u16m1x2_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x3_t __riscv_vluxseg3ei32_v_u16m1x3_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x4_t __riscv_vluxseg4ei32_v_u16m1x4_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x5_t __riscv_vluxseg5ei32_v_u16m1x5_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x6_t __riscv_vluxseg6ei32_v_u16m1x6_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x7_t __riscv_vluxseg7ei32_v_u16m1x7_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x8_t __riscv_vluxseg8ei32_v_u16m1x8_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m2x2_t __riscv_vluxseg2ei32_v_u16m2x2_m (vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m2x3_t __riscv_vluxseg3ei32_v_u16m2x3_m (vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m2x4_t __riscv_vluxseg4ei32_v_u16m2x4_m (vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m4x2_t __riscv_vluxseg2ei32_v_u16m4x2_m (vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vluxseg2ei32_v_u32mf2x2_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vluxseg3ei32_v_u32mf2x3_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vluxseg4ei32_v_u32mf2x4_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vluxseg5ei32_v_u32mf2x5_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vluxseg6ei32_v_u32mf2x6_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vluxseg7ei32_v_u32mf2x7_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vluxseg8ei32_v_u32mf2x8_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32m1x2_t __riscv_vluxseg2ei32_v_u32m1x2_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x3_t __riscv_vluxseg3ei32_v_u32m1x3_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x4_t __riscv_vluxseg4ei32_v_u32m1x4_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x5_t __riscv_vluxseg5ei32_v_u32m1x5_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x6_t __riscv_vluxseg6ei32_v_u32m1x6_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x7_t __riscv_vluxseg7ei32_v_u32m1x7_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x8_t __riscv_vluxseg8ei32_v_u32m1x8_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m2x2_t __riscv_vluxseg2ei32_v_u32m2x2_m (vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m2x3_t __riscv_vluxseg3ei32_v_u32m2x3_m (vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m2x4_t __riscv_vluxseg4ei32_v_u32m2x4_m (vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m4x2_t __riscv_vluxseg2ei32_v_u32m4x2_m (vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl);
vuint64m1x2_t __riscv_vluxseg2ei32_v_u64m1x2_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x3_t __riscv_vluxseg3ei32_v_u64m1x3_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x4_t __riscv_vluxseg4ei32_v_u64m1x4_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x5_t __riscv_vluxseg5ei32_v_u64m1x5_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x6_t __riscv_vluxseg6ei32_v_u64m1x6_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x7_t __riscv_vluxseg7ei32_v_u64m1x7_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x8_t __riscv_vluxseg8ei32_v_u64m1x8_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m2x2_t __riscv_vluxseg2ei32_v_u64m2x2_m (vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m2x3_t __riscv_vluxseg3ei32_v_u64m2x3_m (vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m2x4_t __riscv_vluxseg4ei32_v_u64m2x4_m (vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m4x2_t __riscv_vluxseg2ei32_v_u64m4x2_m (vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl);
- Mnemonic
vluxseg2ei64.v vd, (rs1), vs2, vm # nf=2 vluxseg3ei64.v vd, (rs1), vs2, vm # nf=3 vluxseg4ei64.v vd, (rs1), vs2, vm # nf=4 vluxseg5ei64.v vd, (rs1), vs2, vm # nf=5 vluxseg6ei64.v vd, (rs1), vs2, vm # nf=6 vluxseg7ei64.v vd, (rs1), vs2, vm # nf=7 vluxseg8ei64.v vd, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-unordered segment loads
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vluxei64_v.h
- Intrinsic Functions
Details
vfloat16mf4x2_t __riscv_vluxseg2ei64_v_f16mf4x2 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vluxseg3ei64_v_f16mf4x3 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vluxseg4ei64_v_f16mf4x4 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vluxseg5ei64_v_f16mf4x5 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vluxseg6ei64_v_f16mf4x6 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vluxseg7ei64_v_f16mf4x7 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vluxseg8ei64_v_f16mf4x8 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vluxseg2ei64_v_f16mf2x2 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vluxseg3ei64_v_f16mf2x3 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vluxseg4ei64_v_f16mf2x4 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vluxseg5ei64_v_f16mf2x5 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vluxseg6ei64_v_f16mf2x6 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vluxseg7ei64_v_f16mf2x7 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vluxseg8ei64_v_f16mf2x8 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vluxseg2ei64_v_f16m1x2 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vluxseg3ei64_v_f16m1x3 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vluxseg4ei64_v_f16m1x4 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vluxseg5ei64_v_f16m1x5 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vluxseg6ei64_v_f16m1x6 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vluxseg7ei64_v_f16m1x7 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vluxseg8ei64_v_f16m1x8 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vluxseg2ei64_v_f16m2x2 (const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vluxseg3ei64_v_f16m2x3 (const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vluxseg4ei64_v_f16m2x4 (const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vluxseg2ei64_v_f32mf2x2 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vluxseg3ei64_v_f32mf2x3 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vluxseg4ei64_v_f32mf2x4 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vluxseg5ei64_v_f32mf2x5 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vluxseg6ei64_v_f32mf2x6 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vluxseg7ei64_v_f32mf2x7 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vluxseg8ei64_v_f32mf2x8 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vluxseg2ei64_v_f32m1x2 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vluxseg3ei64_v_f32m1x3 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vluxseg4ei64_v_f32m1x4 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vluxseg5ei64_v_f32m1x5 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vluxseg6ei64_v_f32m1x6 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vluxseg7ei64_v_f32m1x7 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vluxseg8ei64_v_f32m1x8 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vluxseg2ei64_v_f32m2x2 (const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vluxseg3ei64_v_f32m2x3 (const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vluxseg4ei64_v_f32m2x4 (const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vluxseg2ei64_v_f32m4x2 (const float32_t *base, vuint64m8_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vluxseg2ei64_v_f64m1x2 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vluxseg3ei64_v_f64m1x3 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vluxseg4ei64_v_f64m1x4 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vluxseg5ei64_v_f64m1x5 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vluxseg6ei64_v_f64m1x6 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vluxseg7ei64_v_f64m1x7 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vluxseg8ei64_v_f64m1x8 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vluxseg2ei64_v_f64m2x2 (const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vluxseg3ei64_v_f64m2x3 (const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vluxseg4ei64_v_f64m2x4 (const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vluxseg2ei64_v_f64m4x2 (const float64_t *base, vuint64m4_t bindex, size_t vl);
vint8mf8x2_t __riscv_vluxseg2ei64_v_i8mf8x2 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x3_t __riscv_vluxseg3ei64_v_i8mf8x3 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x4_t __riscv_vluxseg4ei64_v_i8mf8x4 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x5_t __riscv_vluxseg5ei64_v_i8mf8x5 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x6_t __riscv_vluxseg6ei64_v_i8mf8x6 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x7_t __riscv_vluxseg7ei64_v_i8mf8x7 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x8_t __riscv_vluxseg8ei64_v_i8mf8x8 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf4x2_t __riscv_vluxseg2ei64_v_i8mf4x2 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x3_t __riscv_vluxseg3ei64_v_i8mf4x3 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x4_t __riscv_vluxseg4ei64_v_i8mf4x4 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x5_t __riscv_vluxseg5ei64_v_i8mf4x5 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x6_t __riscv_vluxseg6ei64_v_i8mf4x6 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x7_t __riscv_vluxseg7ei64_v_i8mf4x7 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x8_t __riscv_vluxseg8ei64_v_i8mf4x8 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf2x2_t __riscv_vluxseg2ei64_v_i8mf2x2 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x3_t __riscv_vluxseg3ei64_v_i8mf2x3 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x4_t __riscv_vluxseg4ei64_v_i8mf2x4 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x5_t __riscv_vluxseg5ei64_v_i8mf2x5 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x6_t __riscv_vluxseg6ei64_v_i8mf2x6 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x7_t __riscv_vluxseg7ei64_v_i8mf2x7 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x8_t __riscv_vluxseg8ei64_v_i8mf2x8 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8m1x2_t __riscv_vluxseg2ei64_v_i8m1x2 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x3_t __riscv_vluxseg3ei64_v_i8m1x3 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x4_t __riscv_vluxseg4ei64_v_i8m1x4 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x5_t __riscv_vluxseg5ei64_v_i8m1x5 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x6_t __riscv_vluxseg6ei64_v_i8m1x6 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x7_t __riscv_vluxseg7ei64_v_i8m1x7 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x8_t __riscv_vluxseg8ei64_v_i8m1x8 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint16mf4x2_t __riscv_vluxseg2ei64_v_i16mf4x2 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x3_t __riscv_vluxseg3ei64_v_i16mf4x3 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x4_t __riscv_vluxseg4ei64_v_i16mf4x4 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x5_t __riscv_vluxseg5ei64_v_i16mf4x5 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x6_t __riscv_vluxseg6ei64_v_i16mf4x6 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x7_t __riscv_vluxseg7ei64_v_i16mf4x7 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x8_t __riscv_vluxseg8ei64_v_i16mf4x8 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf2x2_t __riscv_vluxseg2ei64_v_i16mf2x2 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x3_t __riscv_vluxseg3ei64_v_i16mf2x3 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x4_t __riscv_vluxseg4ei64_v_i16mf2x4 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x5_t __riscv_vluxseg5ei64_v_i16mf2x5 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x6_t __riscv_vluxseg6ei64_v_i16mf2x6 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x7_t __riscv_vluxseg7ei64_v_i16mf2x7 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x8_t __riscv_vluxseg8ei64_v_i16mf2x8 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16m1x2_t __riscv_vluxseg2ei64_v_i16m1x2 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x3_t __riscv_vluxseg3ei64_v_i16m1x3 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x4_t __riscv_vluxseg4ei64_v_i16m1x4 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x5_t __riscv_vluxseg5ei64_v_i16m1x5 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x6_t __riscv_vluxseg6ei64_v_i16m1x6 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x7_t __riscv_vluxseg7ei64_v_i16m1x7 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x8_t __riscv_vluxseg8ei64_v_i16m1x8 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m2x2_t __riscv_vluxseg2ei64_v_i16m2x2 (const int16_t *base, vuint64m8_t bindex, size_t vl);
vint16m2x3_t __riscv_vluxseg3ei64_v_i16m2x3 (const int16_t *base, vuint64m8_t bindex, size_t vl);
vint16m2x4_t __riscv_vluxseg4ei64_v_i16m2x4 (const int16_t *base, vuint64m8_t bindex, size_t vl);
vint32mf2x2_t __riscv_vluxseg2ei64_v_i32mf2x2 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x3_t __riscv_vluxseg3ei64_v_i32mf2x3 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x4_t __riscv_vluxseg4ei64_v_i32mf2x4 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x5_t __riscv_vluxseg5ei64_v_i32mf2x5 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x6_t __riscv_vluxseg6ei64_v_i32mf2x6 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x7_t __riscv_vluxseg7ei64_v_i32mf2x7 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x8_t __riscv_vluxseg8ei64_v_i32mf2x8 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32m1x2_t __riscv_vluxseg2ei64_v_i32m1x2 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x3_t __riscv_vluxseg3ei64_v_i32m1x3 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x4_t __riscv_vluxseg4ei64_v_i32m1x4 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x5_t __riscv_vluxseg5ei64_v_i32m1x5 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x6_t __riscv_vluxseg6ei64_v_i32m1x6 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x7_t __riscv_vluxseg7ei64_v_i32m1x7 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x8_t __riscv_vluxseg8ei64_v_i32m1x8 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m2x2_t __riscv_vluxseg2ei64_v_i32m2x2 (const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m2x3_t __riscv_vluxseg3ei64_v_i32m2x3 (const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m2x4_t __riscv_vluxseg4ei64_v_i32m2x4 (const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m4x2_t __riscv_vluxseg2ei64_v_i32m4x2 (const int32_t *base, vuint64m8_t bindex, size_t vl);
vint64m1x2_t __riscv_vluxseg2ei64_v_i64m1x2 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x3_t __riscv_vluxseg3ei64_v_i64m1x3 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x4_t __riscv_vluxseg4ei64_v_i64m1x4 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x5_t __riscv_vluxseg5ei64_v_i64m1x5 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x6_t __riscv_vluxseg6ei64_v_i64m1x6 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x7_t __riscv_vluxseg7ei64_v_i64m1x7 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x8_t __riscv_vluxseg8ei64_v_i64m1x8 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m2x2_t __riscv_vluxseg2ei64_v_i64m2x2 (const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m2x3_t __riscv_vluxseg3ei64_v_i64m2x3 (const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m2x4_t __riscv_vluxseg4ei64_v_i64m2x4 (const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m4x2_t __riscv_vluxseg2ei64_v_i64m4x2 (const int64_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vluxseg2ei64_v_u8mf8x2 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vluxseg3ei64_v_u8mf8x3 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vluxseg4ei64_v_u8mf8x4 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vluxseg5ei64_v_u8mf8x5 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vluxseg6ei64_v_u8mf8x6 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vluxseg7ei64_v_u8mf8x7 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vluxseg8ei64_v_u8mf8x8 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vluxseg2ei64_v_u8mf4x2 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vluxseg3ei64_v_u8mf4x3 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vluxseg4ei64_v_u8mf4x4 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vluxseg5ei64_v_u8mf4x5 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vluxseg6ei64_v_u8mf4x6 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vluxseg7ei64_v_u8mf4x7 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vluxseg8ei64_v_u8mf4x8 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vluxseg2ei64_v_u8mf2x2 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vluxseg3ei64_v_u8mf2x3 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vluxseg4ei64_v_u8mf2x4 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vluxseg5ei64_v_u8mf2x5 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vluxseg6ei64_v_u8mf2x6 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vluxseg7ei64_v_u8mf2x7 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vluxseg8ei64_v_u8mf2x8 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8m1x2_t __riscv_vluxseg2ei64_v_u8m1x2 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x3_t __riscv_vluxseg3ei64_v_u8m1x3 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x4_t __riscv_vluxseg4ei64_v_u8m1x4 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x5_t __riscv_vluxseg5ei64_v_u8m1x5 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x6_t __riscv_vluxseg6ei64_v_u8m1x6 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x7_t __riscv_vluxseg7ei64_v_u8m1x7 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x8_t __riscv_vluxseg8ei64_v_u8m1x8 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vluxseg2ei64_v_u16mf4x2 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vluxseg3ei64_v_u16mf4x3 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vluxseg4ei64_v_u16mf4x4 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vluxseg5ei64_v_u16mf4x5 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vluxseg6ei64_v_u16mf4x6 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vluxseg7ei64_v_u16mf4x7 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vluxseg8ei64_v_u16mf4x8 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vluxseg2ei64_v_u16mf2x2 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vluxseg3ei64_v_u16mf2x3 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vluxseg4ei64_v_u16mf2x4 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vluxseg5ei64_v_u16mf2x5 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vluxseg6ei64_v_u16mf2x6 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vluxseg7ei64_v_u16mf2x7 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vluxseg8ei64_v_u16mf2x8 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16m1x2_t __riscv_vluxseg2ei64_v_u16m1x2 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x3_t __riscv_vluxseg3ei64_v_u16m1x3 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x4_t __riscv_vluxseg4ei64_v_u16m1x4 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x5_t __riscv_vluxseg5ei64_v_u16m1x5 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x6_t __riscv_vluxseg6ei64_v_u16m1x6 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x7_t __riscv_vluxseg7ei64_v_u16m1x7 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x8_t __riscv_vluxseg8ei64_v_u16m1x8 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m2x2_t __riscv_vluxseg2ei64_v_u16m2x2 (const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint16m2x3_t __riscv_vluxseg3ei64_v_u16m2x3 (const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint16m2x4_t __riscv_vluxseg4ei64_v_u16m2x4 (const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vluxseg2ei64_v_u32mf2x2 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vluxseg3ei64_v_u32mf2x3 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vluxseg4ei64_v_u32mf2x4 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vluxseg5ei64_v_u32mf2x5 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vluxseg6ei64_v_u32mf2x6 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vluxseg7ei64_v_u32mf2x7 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vluxseg8ei64_v_u32mf2x8 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32m1x2_t __riscv_vluxseg2ei64_v_u32m1x2 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x3_t __riscv_vluxseg3ei64_v_u32m1x3 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x4_t __riscv_vluxseg4ei64_v_u32m1x4 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x5_t __riscv_vluxseg5ei64_v_u32m1x5 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x6_t __riscv_vluxseg6ei64_v_u32m1x6 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x7_t __riscv_vluxseg7ei64_v_u32m1x7 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x8_t __riscv_vluxseg8ei64_v_u32m1x8 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m2x2_t __riscv_vluxseg2ei64_v_u32m2x2 (const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m2x3_t __riscv_vluxseg3ei64_v_u32m2x3 (const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m2x4_t __riscv_vluxseg4ei64_v_u32m2x4 (const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m4x2_t __riscv_vluxseg2ei64_v_u32m4x2 (const uint32_t *base, vuint64m8_t bindex, size_t vl);
vuint64m1x2_t __riscv_vluxseg2ei64_v_u64m1x2 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x3_t __riscv_vluxseg3ei64_v_u64m1x3 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x4_t __riscv_vluxseg4ei64_v_u64m1x4 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x5_t __riscv_vluxseg5ei64_v_u64m1x5 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x6_t __riscv_vluxseg6ei64_v_u64m1x6 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x7_t __riscv_vluxseg7ei64_v_u64m1x7 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x8_t __riscv_vluxseg8ei64_v_u64m1x8 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m2x2_t __riscv_vluxseg2ei64_v_u64m2x2 (const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m2x3_t __riscv_vluxseg3ei64_v_u64m2x3 (const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m2x4_t __riscv_vluxseg4ei64_v_u64m2x4 (const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m4x2_t __riscv_vluxseg2ei64_v_u64m4x2 (const uint64_t *base, vuint64m4_t bindex, size_t vl);
vfloat16mf4x2_t __riscv_vluxseg2ei64_v_f16mf4x2_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vluxseg3ei64_v_f16mf4x3_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vluxseg4ei64_v_f16mf4x4_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vluxseg5ei64_v_f16mf4x5_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vluxseg6ei64_v_f16mf4x6_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vluxseg7ei64_v_f16mf4x7_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vluxseg8ei64_v_f16mf4x8_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vluxseg2ei64_v_f16mf2x2_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vluxseg3ei64_v_f16mf2x3_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vluxseg4ei64_v_f16mf2x4_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vluxseg5ei64_v_f16mf2x5_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vluxseg6ei64_v_f16mf2x6_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vluxseg7ei64_v_f16mf2x7_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vluxseg8ei64_v_f16mf2x8_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vluxseg2ei64_v_f16m1x2_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vluxseg3ei64_v_f16m1x3_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vluxseg4ei64_v_f16m1x4_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vluxseg5ei64_v_f16m1x5_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vluxseg6ei64_v_f16m1x6_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vluxseg7ei64_v_f16m1x7_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vluxseg8ei64_v_f16m1x8_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vluxseg2ei64_v_f16m2x2_m (vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vluxseg3ei64_v_f16m2x3_m (vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vluxseg4ei64_v_f16m2x4_m (vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vluxseg2ei64_v_f32mf2x2_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vluxseg3ei64_v_f32mf2x3_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vluxseg4ei64_v_f32mf2x4_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vluxseg5ei64_v_f32mf2x5_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vluxseg6ei64_v_f32mf2x6_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vluxseg7ei64_v_f32mf2x7_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vluxseg8ei64_v_f32mf2x8_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vluxseg2ei64_v_f32m1x2_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vluxseg3ei64_v_f32m1x3_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vluxseg4ei64_v_f32m1x4_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vluxseg5ei64_v_f32m1x5_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vluxseg6ei64_v_f32m1x6_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vluxseg7ei64_v_f32m1x7_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vluxseg8ei64_v_f32m1x8_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vluxseg2ei64_v_f32m2x2_m (vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vluxseg3ei64_v_f32m2x3_m (vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vluxseg4ei64_v_f32m2x4_m (vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vluxseg2ei64_v_f32m4x2_m (vbool8_t mask, const float32_t *base, vuint64m8_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vluxseg2ei64_v_f64m1x2_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vluxseg3ei64_v_f64m1x3_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vluxseg4ei64_v_f64m1x4_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vluxseg5ei64_v_f64m1x5_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vluxseg6ei64_v_f64m1x6_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vluxseg7ei64_v_f64m1x7_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vluxseg8ei64_v_f64m1x8_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vluxseg2ei64_v_f64m2x2_m (vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vluxseg3ei64_v_f64m2x3_m (vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vluxseg4ei64_v_f64m2x4_m (vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vluxseg2ei64_v_f64m4x2_m (vbool16_t mask, const float64_t *base, vuint64m4_t bindex, size_t vl);
vint8mf8x2_t __riscv_vluxseg2ei64_v_i8mf8x2_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x3_t __riscv_vluxseg3ei64_v_i8mf8x3_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x4_t __riscv_vluxseg4ei64_v_i8mf8x4_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x5_t __riscv_vluxseg5ei64_v_i8mf8x5_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x6_t __riscv_vluxseg6ei64_v_i8mf8x6_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x7_t __riscv_vluxseg7ei64_v_i8mf8x7_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x8_t __riscv_vluxseg8ei64_v_i8mf8x8_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf4x2_t __riscv_vluxseg2ei64_v_i8mf4x2_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x3_t __riscv_vluxseg3ei64_v_i8mf4x3_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x4_t __riscv_vluxseg4ei64_v_i8mf4x4_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x5_t __riscv_vluxseg5ei64_v_i8mf4x5_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x6_t __riscv_vluxseg6ei64_v_i8mf4x6_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x7_t __riscv_vluxseg7ei64_v_i8mf4x7_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x8_t __riscv_vluxseg8ei64_v_i8mf4x8_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf2x2_t __riscv_vluxseg2ei64_v_i8mf2x2_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x3_t __riscv_vluxseg3ei64_v_i8mf2x3_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x4_t __riscv_vluxseg4ei64_v_i8mf2x4_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x5_t __riscv_vluxseg5ei64_v_i8mf2x5_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x6_t __riscv_vluxseg6ei64_v_i8mf2x6_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x7_t __riscv_vluxseg7ei64_v_i8mf2x7_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x8_t __riscv_vluxseg8ei64_v_i8mf2x8_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8m1x2_t __riscv_vluxseg2ei64_v_i8m1x2_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x3_t __riscv_vluxseg3ei64_v_i8m1x3_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x4_t __riscv_vluxseg4ei64_v_i8m1x4_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x5_t __riscv_vluxseg5ei64_v_i8m1x5_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x6_t __riscv_vluxseg6ei64_v_i8m1x6_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x7_t __riscv_vluxseg7ei64_v_i8m1x7_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x8_t __riscv_vluxseg8ei64_v_i8m1x8_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint16mf4x2_t __riscv_vluxseg2ei64_v_i16mf4x2_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x3_t __riscv_vluxseg3ei64_v_i16mf4x3_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x4_t __riscv_vluxseg4ei64_v_i16mf4x4_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x5_t __riscv_vluxseg5ei64_v_i16mf4x5_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x6_t __riscv_vluxseg6ei64_v_i16mf4x6_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x7_t __riscv_vluxseg7ei64_v_i16mf4x7_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x8_t __riscv_vluxseg8ei64_v_i16mf4x8_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf2x2_t __riscv_vluxseg2ei64_v_i16mf2x2_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x3_t __riscv_vluxseg3ei64_v_i16mf2x3_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x4_t __riscv_vluxseg4ei64_v_i16mf2x4_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x5_t __riscv_vluxseg5ei64_v_i16mf2x5_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x6_t __riscv_vluxseg6ei64_v_i16mf2x6_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x7_t __riscv_vluxseg7ei64_v_i16mf2x7_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x8_t __riscv_vluxseg8ei64_v_i16mf2x8_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16m1x2_t __riscv_vluxseg2ei64_v_i16m1x2_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x3_t __riscv_vluxseg3ei64_v_i16m1x3_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x4_t __riscv_vluxseg4ei64_v_i16m1x4_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x5_t __riscv_vluxseg5ei64_v_i16m1x5_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x6_t __riscv_vluxseg6ei64_v_i16m1x6_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x7_t __riscv_vluxseg7ei64_v_i16m1x7_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x8_t __riscv_vluxseg8ei64_v_i16m1x8_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m2x2_t __riscv_vluxseg2ei64_v_i16m2x2_m (vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl);
vint16m2x3_t __riscv_vluxseg3ei64_v_i16m2x3_m (vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl);
vint16m2x4_t __riscv_vluxseg4ei64_v_i16m2x4_m (vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl);
vint32mf2x2_t __riscv_vluxseg2ei64_v_i32mf2x2_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x3_t __riscv_vluxseg3ei64_v_i32mf2x3_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x4_t __riscv_vluxseg4ei64_v_i32mf2x4_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x5_t __riscv_vluxseg5ei64_v_i32mf2x5_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x6_t __riscv_vluxseg6ei64_v_i32mf2x6_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x7_t __riscv_vluxseg7ei64_v_i32mf2x7_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x8_t __riscv_vluxseg8ei64_v_i32mf2x8_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32m1x2_t __riscv_vluxseg2ei64_v_i32m1x2_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x3_t __riscv_vluxseg3ei64_v_i32m1x3_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x4_t __riscv_vluxseg4ei64_v_i32m1x4_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x5_t __riscv_vluxseg5ei64_v_i32m1x5_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x6_t __riscv_vluxseg6ei64_v_i32m1x6_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x7_t __riscv_vluxseg7ei64_v_i32m1x7_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x8_t __riscv_vluxseg8ei64_v_i32m1x8_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m2x2_t __riscv_vluxseg2ei64_v_i32m2x2_m (vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m2x3_t __riscv_vluxseg3ei64_v_i32m2x3_m (vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m2x4_t __riscv_vluxseg4ei64_v_i32m2x4_m (vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m4x2_t __riscv_vluxseg2ei64_v_i32m4x2_m (vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl);
vint64m1x2_t __riscv_vluxseg2ei64_v_i64m1x2_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x3_t __riscv_vluxseg3ei64_v_i64m1x3_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x4_t __riscv_vluxseg4ei64_v_i64m1x4_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x5_t __riscv_vluxseg5ei64_v_i64m1x5_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x6_t __riscv_vluxseg6ei64_v_i64m1x6_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x7_t __riscv_vluxseg7ei64_v_i64m1x7_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x8_t __riscv_vluxseg8ei64_v_i64m1x8_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m2x2_t __riscv_vluxseg2ei64_v_i64m2x2_m (vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m2x3_t __riscv_vluxseg3ei64_v_i64m2x3_m (vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m2x4_t __riscv_vluxseg4ei64_v_i64m2x4_m (vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m4x2_t __riscv_vluxseg2ei64_v_i64m4x2_m (vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vluxseg2ei64_v_u8mf8x2_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vluxseg3ei64_v_u8mf8x3_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vluxseg4ei64_v_u8mf8x4_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vluxseg5ei64_v_u8mf8x5_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vluxseg6ei64_v_u8mf8x6_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vluxseg7ei64_v_u8mf8x7_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vluxseg8ei64_v_u8mf8x8_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vluxseg2ei64_v_u8mf4x2_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vluxseg3ei64_v_u8mf4x3_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vluxseg4ei64_v_u8mf4x4_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vluxseg5ei64_v_u8mf4x5_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vluxseg6ei64_v_u8mf4x6_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vluxseg7ei64_v_u8mf4x7_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vluxseg8ei64_v_u8mf4x8_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vluxseg2ei64_v_u8mf2x2_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vluxseg3ei64_v_u8mf2x3_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vluxseg4ei64_v_u8mf2x4_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vluxseg5ei64_v_u8mf2x5_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vluxseg6ei64_v_u8mf2x6_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vluxseg7ei64_v_u8mf2x7_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vluxseg8ei64_v_u8mf2x8_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8m1x2_t __riscv_vluxseg2ei64_v_u8m1x2_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x3_t __riscv_vluxseg3ei64_v_u8m1x3_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x4_t __riscv_vluxseg4ei64_v_u8m1x4_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x5_t __riscv_vluxseg5ei64_v_u8m1x5_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x6_t __riscv_vluxseg6ei64_v_u8m1x6_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x7_t __riscv_vluxseg7ei64_v_u8m1x7_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x8_t __riscv_vluxseg8ei64_v_u8m1x8_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vluxseg2ei64_v_u16mf4x2_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vluxseg3ei64_v_u16mf4x3_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vluxseg4ei64_v_u16mf4x4_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vluxseg5ei64_v_u16mf4x5_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vluxseg6ei64_v_u16mf4x6_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vluxseg7ei64_v_u16mf4x7_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vluxseg8ei64_v_u16mf4x8_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vluxseg2ei64_v_u16mf2x2_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vluxseg3ei64_v_u16mf2x3_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vluxseg4ei64_v_u16mf2x4_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vluxseg5ei64_v_u16mf2x5_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vluxseg6ei64_v_u16mf2x6_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vluxseg7ei64_v_u16mf2x7_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vluxseg8ei64_v_u16mf2x8_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16m1x2_t __riscv_vluxseg2ei64_v_u16m1x2_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x3_t __riscv_vluxseg3ei64_v_u16m1x3_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x4_t __riscv_vluxseg4ei64_v_u16m1x4_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x5_t __riscv_vluxseg5ei64_v_u16m1x5_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x6_t __riscv_vluxseg6ei64_v_u16m1x6_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x7_t __riscv_vluxseg7ei64_v_u16m1x7_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x8_t __riscv_vluxseg8ei64_v_u16m1x8_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m2x2_t __riscv_vluxseg2ei64_v_u16m2x2_m (vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint16m2x3_t __riscv_vluxseg3ei64_v_u16m2x3_m (vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint16m2x4_t __riscv_vluxseg4ei64_v_u16m2x4_m (vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vluxseg2ei64_v_u32mf2x2_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vluxseg3ei64_v_u32mf2x3_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vluxseg4ei64_v_u32mf2x4_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vluxseg5ei64_v_u32mf2x5_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vluxseg6ei64_v_u32mf2x6_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vluxseg7ei64_v_u32mf2x7_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vluxseg8ei64_v_u32mf2x8_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32m1x2_t __riscv_vluxseg2ei64_v_u32m1x2_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x3_t __riscv_vluxseg3ei64_v_u32m1x3_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x4_t __riscv_vluxseg4ei64_v_u32m1x4_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x5_t __riscv_vluxseg5ei64_v_u32m1x5_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x6_t __riscv_vluxseg6ei64_v_u32m1x6_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x7_t __riscv_vluxseg7ei64_v_u32m1x7_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x8_t __riscv_vluxseg8ei64_v_u32m1x8_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m2x2_t __riscv_vluxseg2ei64_v_u32m2x2_m (vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m2x3_t __riscv_vluxseg3ei64_v_u32m2x3_m (vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m2x4_t __riscv_vluxseg4ei64_v_u32m2x4_m (vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m4x2_t __riscv_vluxseg2ei64_v_u32m4x2_m (vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl);
vuint64m1x2_t __riscv_vluxseg2ei64_v_u64m1x2_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x3_t __riscv_vluxseg3ei64_v_u64m1x3_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x4_t __riscv_vluxseg4ei64_v_u64m1x4_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x5_t __riscv_vluxseg5ei64_v_u64m1x5_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x6_t __riscv_vluxseg6ei64_v_u64m1x6_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x7_t __riscv_vluxseg7ei64_v_u64m1x7_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x8_t __riscv_vluxseg8ei64_v_u64m1x8_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m2x2_t __riscv_vluxseg2ei64_v_u64m2x2_m (vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m2x3_t __riscv_vluxseg3ei64_v_u64m2x3_m (vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m2x4_t __riscv_vluxseg4ei64_v_u64m2x4_m (vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m4x2_t __riscv_vluxseg2ei64_v_u64m4x2_m (vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl);
8.1. vloxseg<nf>ei8.v
- Mnemonic
vloxseg2ei8.v vd, (rs1), vs2, vm # nf=2 vloxseg3ei8.v vd, (rs1), vs2, vm # nf=3 vloxseg4ei8.v vd, (rs1), vs2, vm # nf=4 vloxseg5ei8.v vd, (rs1), vs2, vm # nf=5 vloxseg6ei8.v vd, (rs1), vs2, vm # nf=6 vloxseg7ei8.v vd, (rs1), vs2, vm # nf=7 vloxseg8ei8.v vd, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-ordered segment loads
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vloxei8_v.h
- Intrinsic Functions
Details
vfloat16mf4x2_t __riscv_vloxseg2ei8_v_f16mf4x2 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vloxseg3ei8_v_f16mf4x3 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vloxseg4ei8_v_f16mf4x4 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vloxseg5ei8_v_f16mf4x5 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vloxseg6ei8_v_f16mf4x6 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vloxseg7ei8_v_f16mf4x7 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vloxseg8ei8_v_f16mf4x8 (const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vloxseg2ei8_v_f16mf2x2 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vloxseg3ei8_v_f16mf2x3 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vloxseg4ei8_v_f16mf2x4 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vloxseg5ei8_v_f16mf2x5 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vloxseg6ei8_v_f16mf2x6 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vloxseg7ei8_v_f16mf2x7 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vloxseg8ei8_v_f16mf2x8 (const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vloxseg2ei8_v_f16m1x2 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vloxseg3ei8_v_f16m1x3 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vloxseg4ei8_v_f16m1x4 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vloxseg5ei8_v_f16m1x5 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vloxseg6ei8_v_f16m1x6 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vloxseg7ei8_v_f16m1x7 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vloxseg8ei8_v_f16m1x8 (const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vloxseg2ei8_v_f16m2x2 (const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vloxseg3ei8_v_f16m2x3 (const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vloxseg4ei8_v_f16m2x4 (const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m4x2_t __riscv_vloxseg2ei8_v_f16m4x2 (const float16_t *base, vuint8m2_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vloxseg2ei8_v_f32mf2x2 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vloxseg3ei8_v_f32mf2x3 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vloxseg4ei8_v_f32mf2x4 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vloxseg5ei8_v_f32mf2x5 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vloxseg6ei8_v_f32mf2x6 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vloxseg7ei8_v_f32mf2x7 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vloxseg8ei8_v_f32mf2x8 (const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vloxseg2ei8_v_f32m1x2 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vloxseg3ei8_v_f32m1x3 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vloxseg4ei8_v_f32m1x4 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vloxseg5ei8_v_f32m1x5 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vloxseg6ei8_v_f32m1x6 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vloxseg7ei8_v_f32m1x7 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vloxseg8ei8_v_f32m1x8 (const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vloxseg2ei8_v_f32m2x2 (const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vloxseg3ei8_v_f32m2x3 (const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vloxseg4ei8_v_f32m2x4 (const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vloxseg2ei8_v_f32m4x2 (const float32_t *base, vuint8m1_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vloxseg2ei8_v_f64m1x2 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vloxseg3ei8_v_f64m1x3 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vloxseg4ei8_v_f64m1x4 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vloxseg5ei8_v_f64m1x5 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vloxseg6ei8_v_f64m1x6 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vloxseg7ei8_v_f64m1x7 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vloxseg8ei8_v_f64m1x8 (const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vloxseg2ei8_v_f64m2x2 (const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vloxseg3ei8_v_f64m2x3 (const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vloxseg4ei8_v_f64m2x4 (const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vloxseg2ei8_v_f64m4x2 (const float64_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf8x2_t __riscv_vloxseg2ei8_v_i8mf8x2 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x3_t __riscv_vloxseg3ei8_v_i8mf8x3 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x4_t __riscv_vloxseg4ei8_v_i8mf8x4 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x5_t __riscv_vloxseg5ei8_v_i8mf8x5 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x6_t __riscv_vloxseg6ei8_v_i8mf8x6 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x7_t __riscv_vloxseg7ei8_v_i8mf8x7 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x8_t __riscv_vloxseg8ei8_v_i8mf8x8 (const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf4x2_t __riscv_vloxseg2ei8_v_i8mf4x2 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x3_t __riscv_vloxseg3ei8_v_i8mf4x3 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x4_t __riscv_vloxseg4ei8_v_i8mf4x4 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x5_t __riscv_vloxseg5ei8_v_i8mf4x5 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x6_t __riscv_vloxseg6ei8_v_i8mf4x6 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x7_t __riscv_vloxseg7ei8_v_i8mf4x7 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x8_t __riscv_vloxseg8ei8_v_i8mf4x8 (const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf2x2_t __riscv_vloxseg2ei8_v_i8mf2x2 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x3_t __riscv_vloxseg3ei8_v_i8mf2x3 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x4_t __riscv_vloxseg4ei8_v_i8mf2x4 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x5_t __riscv_vloxseg5ei8_v_i8mf2x5 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x6_t __riscv_vloxseg6ei8_v_i8mf2x6 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x7_t __riscv_vloxseg7ei8_v_i8mf2x7 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x8_t __riscv_vloxseg8ei8_v_i8mf2x8 (const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8m1x2_t __riscv_vloxseg2ei8_v_i8m1x2 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x3_t __riscv_vloxseg3ei8_v_i8m1x3 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x4_t __riscv_vloxseg4ei8_v_i8m1x4 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x5_t __riscv_vloxseg5ei8_v_i8m1x5 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x6_t __riscv_vloxseg6ei8_v_i8m1x6 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x7_t __riscv_vloxseg7ei8_v_i8m1x7 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x8_t __riscv_vloxseg8ei8_v_i8m1x8 (const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m2x2_t __riscv_vloxseg2ei8_v_i8m2x2 (const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m2x3_t __riscv_vloxseg3ei8_v_i8m2x3 (const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m2x4_t __riscv_vloxseg4ei8_v_i8m2x4 (const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m4x2_t __riscv_vloxseg2ei8_v_i8m4x2 (const int8_t *base, vuint8m4_t bindex, size_t vl);
vint16mf4x2_t __riscv_vloxseg2ei8_v_i16mf4x2 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x3_t __riscv_vloxseg3ei8_v_i16mf4x3 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x4_t __riscv_vloxseg4ei8_v_i16mf4x4 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x5_t __riscv_vloxseg5ei8_v_i16mf4x5 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x6_t __riscv_vloxseg6ei8_v_i16mf4x6 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x7_t __riscv_vloxseg7ei8_v_i16mf4x7 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x8_t __riscv_vloxseg8ei8_v_i16mf4x8 (const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf2x2_t __riscv_vloxseg2ei8_v_i16mf2x2 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x3_t __riscv_vloxseg3ei8_v_i16mf2x3 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x4_t __riscv_vloxseg4ei8_v_i16mf2x4 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x5_t __riscv_vloxseg5ei8_v_i16mf2x5 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x6_t __riscv_vloxseg6ei8_v_i16mf2x6 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x7_t __riscv_vloxseg7ei8_v_i16mf2x7 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x8_t __riscv_vloxseg8ei8_v_i16mf2x8 (const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16m1x2_t __riscv_vloxseg2ei8_v_i16m1x2 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x3_t __riscv_vloxseg3ei8_v_i16m1x3 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x4_t __riscv_vloxseg4ei8_v_i16m1x4 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x5_t __riscv_vloxseg5ei8_v_i16m1x5 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x6_t __riscv_vloxseg6ei8_v_i16m1x6 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x7_t __riscv_vloxseg7ei8_v_i16m1x7 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x8_t __riscv_vloxseg8ei8_v_i16m1x8 (const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m2x2_t __riscv_vloxseg2ei8_v_i16m2x2 (const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m2x3_t __riscv_vloxseg3ei8_v_i16m2x3 (const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m2x4_t __riscv_vloxseg4ei8_v_i16m2x4 (const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m4x2_t __riscv_vloxseg2ei8_v_i16m4x2 (const int16_t *base, vuint8m2_t bindex, size_t vl);
vint32mf2x2_t __riscv_vloxseg2ei8_v_i32mf2x2 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x3_t __riscv_vloxseg3ei8_v_i32mf2x3 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x4_t __riscv_vloxseg4ei8_v_i32mf2x4 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x5_t __riscv_vloxseg5ei8_v_i32mf2x5 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x6_t __riscv_vloxseg6ei8_v_i32mf2x6 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x7_t __riscv_vloxseg7ei8_v_i32mf2x7 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x8_t __riscv_vloxseg8ei8_v_i32mf2x8 (const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32m1x2_t __riscv_vloxseg2ei8_v_i32m1x2 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x3_t __riscv_vloxseg3ei8_v_i32m1x3 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x4_t __riscv_vloxseg4ei8_v_i32m1x4 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x5_t __riscv_vloxseg5ei8_v_i32m1x5 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x6_t __riscv_vloxseg6ei8_v_i32m1x6 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x7_t __riscv_vloxseg7ei8_v_i32m1x7 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x8_t __riscv_vloxseg8ei8_v_i32m1x8 (const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m2x2_t __riscv_vloxseg2ei8_v_i32m2x2 (const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m2x3_t __riscv_vloxseg3ei8_v_i32m2x3 (const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m2x4_t __riscv_vloxseg4ei8_v_i32m2x4 (const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m4x2_t __riscv_vloxseg2ei8_v_i32m4x2 (const int32_t *base, vuint8m1_t bindex, size_t vl);
vint64m1x2_t __riscv_vloxseg2ei8_v_i64m1x2 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x3_t __riscv_vloxseg3ei8_v_i64m1x3 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x4_t __riscv_vloxseg4ei8_v_i64m1x4 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x5_t __riscv_vloxseg5ei8_v_i64m1x5 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x6_t __riscv_vloxseg6ei8_v_i64m1x6 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x7_t __riscv_vloxseg7ei8_v_i64m1x7 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x8_t __riscv_vloxseg8ei8_v_i64m1x8 (const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m2x2_t __riscv_vloxseg2ei8_v_i64m2x2 (const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m2x3_t __riscv_vloxseg3ei8_v_i64m2x3 (const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m2x4_t __riscv_vloxseg4ei8_v_i64m2x4 (const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m4x2_t __riscv_vloxseg2ei8_v_i64m4x2 (const int64_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vloxseg2ei8_v_u8mf8x2 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vloxseg3ei8_v_u8mf8x3 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vloxseg4ei8_v_u8mf8x4 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vloxseg5ei8_v_u8mf8x5 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vloxseg6ei8_v_u8mf8x6 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vloxseg7ei8_v_u8mf8x7 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vloxseg8ei8_v_u8mf8x8 (const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vloxseg2ei8_v_u8mf4x2 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vloxseg3ei8_v_u8mf4x3 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vloxseg4ei8_v_u8mf4x4 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vloxseg5ei8_v_u8mf4x5 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vloxseg6ei8_v_u8mf4x6 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vloxseg7ei8_v_u8mf4x7 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vloxseg8ei8_v_u8mf4x8 (const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vloxseg2ei8_v_u8mf2x2 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vloxseg3ei8_v_u8mf2x3 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vloxseg4ei8_v_u8mf2x4 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vloxseg5ei8_v_u8mf2x5 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vloxseg6ei8_v_u8mf2x6 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vloxseg7ei8_v_u8mf2x7 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vloxseg8ei8_v_u8mf2x8 (const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8m1x2_t __riscv_vloxseg2ei8_v_u8m1x2 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x3_t __riscv_vloxseg3ei8_v_u8m1x3 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x4_t __riscv_vloxseg4ei8_v_u8m1x4 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x5_t __riscv_vloxseg5ei8_v_u8m1x5 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x6_t __riscv_vloxseg6ei8_v_u8m1x6 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x7_t __riscv_vloxseg7ei8_v_u8m1x7 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x8_t __riscv_vloxseg8ei8_v_u8m1x8 (const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m2x2_t __riscv_vloxseg2ei8_v_u8m2x2 (const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m2x3_t __riscv_vloxseg3ei8_v_u8m2x3 (const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m2x4_t __riscv_vloxseg4ei8_v_u8m2x4 (const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m4x2_t __riscv_vloxseg2ei8_v_u8m4x2 (const uint8_t *base, vuint8m4_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vloxseg2ei8_v_u16mf4x2 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vloxseg3ei8_v_u16mf4x3 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vloxseg4ei8_v_u16mf4x4 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vloxseg5ei8_v_u16mf4x5 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vloxseg6ei8_v_u16mf4x6 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vloxseg7ei8_v_u16mf4x7 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vloxseg8ei8_v_u16mf4x8 (const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vloxseg2ei8_v_u16mf2x2 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vloxseg3ei8_v_u16mf2x3 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vloxseg4ei8_v_u16mf2x4 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vloxseg5ei8_v_u16mf2x5 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vloxseg6ei8_v_u16mf2x6 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vloxseg7ei8_v_u16mf2x7 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vloxseg8ei8_v_u16mf2x8 (const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16m1x2_t __riscv_vloxseg2ei8_v_u16m1x2 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x3_t __riscv_vloxseg3ei8_v_u16m1x3 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x4_t __riscv_vloxseg4ei8_v_u16m1x4 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x5_t __riscv_vloxseg5ei8_v_u16m1x5 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x6_t __riscv_vloxseg6ei8_v_u16m1x6 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x7_t __riscv_vloxseg7ei8_v_u16m1x7 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x8_t __riscv_vloxseg8ei8_v_u16m1x8 (const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m2x2_t __riscv_vloxseg2ei8_v_u16m2x2 (const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m2x3_t __riscv_vloxseg3ei8_v_u16m2x3 (const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m2x4_t __riscv_vloxseg4ei8_v_u16m2x4 (const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m4x2_t __riscv_vloxseg2ei8_v_u16m4x2 (const uint16_t *base, vuint8m2_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vloxseg2ei8_v_u32mf2x2 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vloxseg3ei8_v_u32mf2x3 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vloxseg4ei8_v_u32mf2x4 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vloxseg5ei8_v_u32mf2x5 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vloxseg6ei8_v_u32mf2x6 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vloxseg7ei8_v_u32mf2x7 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vloxseg8ei8_v_u32mf2x8 (const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32m1x2_t __riscv_vloxseg2ei8_v_u32m1x2 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x3_t __riscv_vloxseg3ei8_v_u32m1x3 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x4_t __riscv_vloxseg4ei8_v_u32m1x4 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x5_t __riscv_vloxseg5ei8_v_u32m1x5 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x6_t __riscv_vloxseg6ei8_v_u32m1x6 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x7_t __riscv_vloxseg7ei8_v_u32m1x7 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x8_t __riscv_vloxseg8ei8_v_u32m1x8 (const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m2x2_t __riscv_vloxseg2ei8_v_u32m2x2 (const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m2x3_t __riscv_vloxseg3ei8_v_u32m2x3 (const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m2x4_t __riscv_vloxseg4ei8_v_u32m2x4 (const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m4x2_t __riscv_vloxseg2ei8_v_u32m4x2 (const uint32_t *base, vuint8m1_t bindex, size_t vl);
vuint64m1x2_t __riscv_vloxseg2ei8_v_u64m1x2 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x3_t __riscv_vloxseg3ei8_v_u64m1x3 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x4_t __riscv_vloxseg4ei8_v_u64m1x4 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x5_t __riscv_vloxseg5ei8_v_u64m1x5 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x6_t __riscv_vloxseg6ei8_v_u64m1x6 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x7_t __riscv_vloxseg7ei8_v_u64m1x7 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x8_t __riscv_vloxseg8ei8_v_u64m1x8 (const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m2x2_t __riscv_vloxseg2ei8_v_u64m2x2 (const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m2x3_t __riscv_vloxseg3ei8_v_u64m2x3 (const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m2x4_t __riscv_vloxseg4ei8_v_u64m2x4 (const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m4x2_t __riscv_vloxseg2ei8_v_u64m4x2 (const uint64_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16mf4x2_t __riscv_vloxseg2ei8_v_f16mf4x2_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vloxseg3ei8_v_f16mf4x3_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vloxseg4ei8_v_f16mf4x4_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vloxseg5ei8_v_f16mf4x5_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vloxseg6ei8_v_f16mf4x6_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vloxseg7ei8_v_f16mf4x7_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vloxseg8ei8_v_f16mf4x8_m (vbool64_t mask, const float16_t *base, vuint8mf8_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vloxseg2ei8_v_f16mf2x2_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vloxseg3ei8_v_f16mf2x3_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vloxseg4ei8_v_f16mf2x4_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vloxseg5ei8_v_f16mf2x5_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vloxseg6ei8_v_f16mf2x6_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vloxseg7ei8_v_f16mf2x7_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vloxseg8ei8_v_f16mf2x8_m (vbool32_t mask, const float16_t *base, vuint8mf4_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vloxseg2ei8_v_f16m1x2_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vloxseg3ei8_v_f16m1x3_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vloxseg4ei8_v_f16m1x4_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vloxseg5ei8_v_f16m1x5_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vloxseg6ei8_v_f16m1x6_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vloxseg7ei8_v_f16m1x7_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vloxseg8ei8_v_f16m1x8_m (vbool16_t mask, const float16_t *base, vuint8mf2_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vloxseg2ei8_v_f16m2x2_m (vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vloxseg3ei8_v_f16m2x3_m (vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vloxseg4ei8_v_f16m2x4_m (vbool8_t mask, const float16_t *base, vuint8m1_t bindex, size_t vl);
vfloat16m4x2_t __riscv_vloxseg2ei8_v_f16m4x2_m (vbool4_t mask, const float16_t *base, vuint8m2_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vloxseg2ei8_v_f32mf2x2_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vloxseg3ei8_v_f32mf2x3_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vloxseg4ei8_v_f32mf2x4_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vloxseg5ei8_v_f32mf2x5_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vloxseg6ei8_v_f32mf2x6_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vloxseg7ei8_v_f32mf2x7_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vloxseg8ei8_v_f32mf2x8_m (vbool64_t mask, const float32_t *base, vuint8mf8_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vloxseg2ei8_v_f32m1x2_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vloxseg3ei8_v_f32m1x3_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vloxseg4ei8_v_f32m1x4_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vloxseg5ei8_v_f32m1x5_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vloxseg6ei8_v_f32m1x6_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vloxseg7ei8_v_f32m1x7_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vloxseg8ei8_v_f32m1x8_m (vbool32_t mask, const float32_t *base, vuint8mf4_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vloxseg2ei8_v_f32m2x2_m (vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vloxseg3ei8_v_f32m2x3_m (vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vloxseg4ei8_v_f32m2x4_m (vbool16_t mask, const float32_t *base, vuint8mf2_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vloxseg2ei8_v_f32m4x2_m (vbool8_t mask, const float32_t *base, vuint8m1_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vloxseg2ei8_v_f64m1x2_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vloxseg3ei8_v_f64m1x3_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vloxseg4ei8_v_f64m1x4_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vloxseg5ei8_v_f64m1x5_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vloxseg6ei8_v_f64m1x6_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vloxseg7ei8_v_f64m1x7_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vloxseg8ei8_v_f64m1x8_m (vbool64_t mask, const float64_t *base, vuint8mf8_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vloxseg2ei8_v_f64m2x2_m (vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vloxseg3ei8_v_f64m2x3_m (vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vloxseg4ei8_v_f64m2x4_m (vbool32_t mask, const float64_t *base, vuint8mf4_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vloxseg2ei8_v_f64m4x2_m (vbool16_t mask, const float64_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf8x2_t __riscv_vloxseg2ei8_v_i8mf8x2_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x3_t __riscv_vloxseg3ei8_v_i8mf8x3_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x4_t __riscv_vloxseg4ei8_v_i8mf8x4_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x5_t __riscv_vloxseg5ei8_v_i8mf8x5_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x6_t __riscv_vloxseg6ei8_v_i8mf8x6_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x7_t __riscv_vloxseg7ei8_v_i8mf8x7_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf8x8_t __riscv_vloxseg8ei8_v_i8mf8x8_m (vbool64_t mask, const int8_t *base, vuint8mf8_t bindex, size_t vl);
vint8mf4x2_t __riscv_vloxseg2ei8_v_i8mf4x2_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x3_t __riscv_vloxseg3ei8_v_i8mf4x3_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x4_t __riscv_vloxseg4ei8_v_i8mf4x4_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x5_t __riscv_vloxseg5ei8_v_i8mf4x5_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x6_t __riscv_vloxseg6ei8_v_i8mf4x6_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x7_t __riscv_vloxseg7ei8_v_i8mf4x7_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf4x8_t __riscv_vloxseg8ei8_v_i8mf4x8_m (vbool32_t mask, const int8_t *base, vuint8mf4_t bindex, size_t vl);
vint8mf2x2_t __riscv_vloxseg2ei8_v_i8mf2x2_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x3_t __riscv_vloxseg3ei8_v_i8mf2x3_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x4_t __riscv_vloxseg4ei8_v_i8mf2x4_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x5_t __riscv_vloxseg5ei8_v_i8mf2x5_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x6_t __riscv_vloxseg6ei8_v_i8mf2x6_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x7_t __riscv_vloxseg7ei8_v_i8mf2x7_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8mf2x8_t __riscv_vloxseg8ei8_v_i8mf2x8_m (vbool16_t mask, const int8_t *base, vuint8mf2_t bindex, size_t vl);
vint8m1x2_t __riscv_vloxseg2ei8_v_i8m1x2_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x3_t __riscv_vloxseg3ei8_v_i8m1x3_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x4_t __riscv_vloxseg4ei8_v_i8m1x4_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x5_t __riscv_vloxseg5ei8_v_i8m1x5_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x6_t __riscv_vloxseg6ei8_v_i8m1x6_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x7_t __riscv_vloxseg7ei8_v_i8m1x7_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m1x8_t __riscv_vloxseg8ei8_v_i8m1x8_m (vbool8_t mask, const int8_t *base, vuint8m1_t bindex, size_t vl);
vint8m2x2_t __riscv_vloxseg2ei8_v_i8m2x2_m (vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m2x3_t __riscv_vloxseg3ei8_v_i8m2x3_m (vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m2x4_t __riscv_vloxseg4ei8_v_i8m2x4_m (vbool4_t mask, const int8_t *base, vuint8m2_t bindex, size_t vl);
vint8m4x2_t __riscv_vloxseg2ei8_v_i8m4x2_m (vbool2_t mask, const int8_t *base, vuint8m4_t bindex, size_t vl);
vint16mf4x2_t __riscv_vloxseg2ei8_v_i16mf4x2_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x3_t __riscv_vloxseg3ei8_v_i16mf4x3_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x4_t __riscv_vloxseg4ei8_v_i16mf4x4_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x5_t __riscv_vloxseg5ei8_v_i16mf4x5_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x6_t __riscv_vloxseg6ei8_v_i16mf4x6_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x7_t __riscv_vloxseg7ei8_v_i16mf4x7_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf4x8_t __riscv_vloxseg8ei8_v_i16mf4x8_m (vbool64_t mask, const int16_t *base, vuint8mf8_t bindex, size_t vl);
vint16mf2x2_t __riscv_vloxseg2ei8_v_i16mf2x2_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x3_t __riscv_vloxseg3ei8_v_i16mf2x3_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x4_t __riscv_vloxseg4ei8_v_i16mf2x4_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x5_t __riscv_vloxseg5ei8_v_i16mf2x5_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x6_t __riscv_vloxseg6ei8_v_i16mf2x6_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x7_t __riscv_vloxseg7ei8_v_i16mf2x7_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16mf2x8_t __riscv_vloxseg8ei8_v_i16mf2x8_m (vbool32_t mask, const int16_t *base, vuint8mf4_t bindex, size_t vl);
vint16m1x2_t __riscv_vloxseg2ei8_v_i16m1x2_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x3_t __riscv_vloxseg3ei8_v_i16m1x3_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x4_t __riscv_vloxseg4ei8_v_i16m1x4_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x5_t __riscv_vloxseg5ei8_v_i16m1x5_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x6_t __riscv_vloxseg6ei8_v_i16m1x6_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x7_t __riscv_vloxseg7ei8_v_i16m1x7_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m1x8_t __riscv_vloxseg8ei8_v_i16m1x8_m (vbool16_t mask, const int16_t *base, vuint8mf2_t bindex, size_t vl);
vint16m2x2_t __riscv_vloxseg2ei8_v_i16m2x2_m (vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m2x3_t __riscv_vloxseg3ei8_v_i16m2x3_m (vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m2x4_t __riscv_vloxseg4ei8_v_i16m2x4_m (vbool8_t mask, const int16_t *base, vuint8m1_t bindex, size_t vl);
vint16m4x2_t __riscv_vloxseg2ei8_v_i16m4x2_m (vbool4_t mask, const int16_t *base, vuint8m2_t bindex, size_t vl);
vint32mf2x2_t __riscv_vloxseg2ei8_v_i32mf2x2_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x3_t __riscv_vloxseg3ei8_v_i32mf2x3_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x4_t __riscv_vloxseg4ei8_v_i32mf2x4_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x5_t __riscv_vloxseg5ei8_v_i32mf2x5_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x6_t __riscv_vloxseg6ei8_v_i32mf2x6_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x7_t __riscv_vloxseg7ei8_v_i32mf2x7_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32mf2x8_t __riscv_vloxseg8ei8_v_i32mf2x8_m (vbool64_t mask, const int32_t *base, vuint8mf8_t bindex, size_t vl);
vint32m1x2_t __riscv_vloxseg2ei8_v_i32m1x2_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x3_t __riscv_vloxseg3ei8_v_i32m1x3_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x4_t __riscv_vloxseg4ei8_v_i32m1x4_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x5_t __riscv_vloxseg5ei8_v_i32m1x5_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x6_t __riscv_vloxseg6ei8_v_i32m1x6_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x7_t __riscv_vloxseg7ei8_v_i32m1x7_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m1x8_t __riscv_vloxseg8ei8_v_i32m1x8_m (vbool32_t mask, const int32_t *base, vuint8mf4_t bindex, size_t vl);
vint32m2x2_t __riscv_vloxseg2ei8_v_i32m2x2_m (vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m2x3_t __riscv_vloxseg3ei8_v_i32m2x3_m (vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m2x4_t __riscv_vloxseg4ei8_v_i32m2x4_m (vbool16_t mask, const int32_t *base, vuint8mf2_t bindex, size_t vl);
vint32m4x2_t __riscv_vloxseg2ei8_v_i32m4x2_m (vbool8_t mask, const int32_t *base, vuint8m1_t bindex, size_t vl);
vint64m1x2_t __riscv_vloxseg2ei8_v_i64m1x2_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x3_t __riscv_vloxseg3ei8_v_i64m1x3_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x4_t __riscv_vloxseg4ei8_v_i64m1x4_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x5_t __riscv_vloxseg5ei8_v_i64m1x5_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x6_t __riscv_vloxseg6ei8_v_i64m1x6_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x7_t __riscv_vloxseg7ei8_v_i64m1x7_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m1x8_t __riscv_vloxseg8ei8_v_i64m1x8_m (vbool64_t mask, const int64_t *base, vuint8mf8_t bindex, size_t vl);
vint64m2x2_t __riscv_vloxseg2ei8_v_i64m2x2_m (vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m2x3_t __riscv_vloxseg3ei8_v_i64m2x3_m (vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m2x4_t __riscv_vloxseg4ei8_v_i64m2x4_m (vbool32_t mask, const int64_t *base, vuint8mf4_t bindex, size_t vl);
vint64m4x2_t __riscv_vloxseg2ei8_v_i64m4x2_m (vbool16_t mask, const int64_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vloxseg2ei8_v_u8mf8x2_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vloxseg3ei8_v_u8mf8x3_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vloxseg4ei8_v_u8mf8x4_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vloxseg5ei8_v_u8mf8x5_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vloxseg6ei8_v_u8mf8x6_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vloxseg7ei8_v_u8mf8x7_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vloxseg8ei8_v_u8mf8x8_m (vbool64_t mask, const uint8_t *base, vuint8mf8_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vloxseg2ei8_v_u8mf4x2_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vloxseg3ei8_v_u8mf4x3_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vloxseg4ei8_v_u8mf4x4_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vloxseg5ei8_v_u8mf4x5_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vloxseg6ei8_v_u8mf4x6_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vloxseg7ei8_v_u8mf4x7_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vloxseg8ei8_v_u8mf4x8_m (vbool32_t mask, const uint8_t *base, vuint8mf4_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vloxseg2ei8_v_u8mf2x2_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vloxseg3ei8_v_u8mf2x3_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vloxseg4ei8_v_u8mf2x4_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vloxseg5ei8_v_u8mf2x5_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vloxseg6ei8_v_u8mf2x6_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vloxseg7ei8_v_u8mf2x7_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vloxseg8ei8_v_u8mf2x8_m (vbool16_t mask, const uint8_t *base, vuint8mf2_t bindex, size_t vl);
vuint8m1x2_t __riscv_vloxseg2ei8_v_u8m1x2_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x3_t __riscv_vloxseg3ei8_v_u8m1x3_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x4_t __riscv_vloxseg4ei8_v_u8m1x4_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x5_t __riscv_vloxseg5ei8_v_u8m1x5_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x6_t __riscv_vloxseg6ei8_v_u8m1x6_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x7_t __riscv_vloxseg7ei8_v_u8m1x7_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m1x8_t __riscv_vloxseg8ei8_v_u8m1x8_m (vbool8_t mask, const uint8_t *base, vuint8m1_t bindex, size_t vl);
vuint8m2x2_t __riscv_vloxseg2ei8_v_u8m2x2_m (vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m2x3_t __riscv_vloxseg3ei8_v_u8m2x3_m (vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m2x4_t __riscv_vloxseg4ei8_v_u8m2x4_m (vbool4_t mask, const uint8_t *base, vuint8m2_t bindex, size_t vl);
vuint8m4x2_t __riscv_vloxseg2ei8_v_u8m4x2_m (vbool2_t mask, const uint8_t *base, vuint8m4_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vloxseg2ei8_v_u16mf4x2_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vloxseg3ei8_v_u16mf4x3_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vloxseg4ei8_v_u16mf4x4_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vloxseg5ei8_v_u16mf4x5_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vloxseg6ei8_v_u16mf4x6_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vloxseg7ei8_v_u16mf4x7_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vloxseg8ei8_v_u16mf4x8_m (vbool64_t mask, const uint16_t *base, vuint8mf8_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vloxseg2ei8_v_u16mf2x2_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vloxseg3ei8_v_u16mf2x3_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vloxseg4ei8_v_u16mf2x4_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vloxseg5ei8_v_u16mf2x5_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vloxseg6ei8_v_u16mf2x6_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vloxseg7ei8_v_u16mf2x7_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vloxseg8ei8_v_u16mf2x8_m (vbool32_t mask, const uint16_t *base, vuint8mf4_t bindex, size_t vl);
vuint16m1x2_t __riscv_vloxseg2ei8_v_u16m1x2_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x3_t __riscv_vloxseg3ei8_v_u16m1x3_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x4_t __riscv_vloxseg4ei8_v_u16m1x4_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x5_t __riscv_vloxseg5ei8_v_u16m1x5_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x6_t __riscv_vloxseg6ei8_v_u16m1x6_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x7_t __riscv_vloxseg7ei8_v_u16m1x7_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m1x8_t __riscv_vloxseg8ei8_v_u16m1x8_m (vbool16_t mask, const uint16_t *base, vuint8mf2_t bindex, size_t vl);
vuint16m2x2_t __riscv_vloxseg2ei8_v_u16m2x2_m (vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m2x3_t __riscv_vloxseg3ei8_v_u16m2x3_m (vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m2x4_t __riscv_vloxseg4ei8_v_u16m2x4_m (vbool8_t mask, const uint16_t *base, vuint8m1_t bindex, size_t vl);
vuint16m4x2_t __riscv_vloxseg2ei8_v_u16m4x2_m (vbool4_t mask, const uint16_t *base, vuint8m2_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vloxseg2ei8_v_u32mf2x2_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vloxseg3ei8_v_u32mf2x3_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vloxseg4ei8_v_u32mf2x4_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vloxseg5ei8_v_u32mf2x5_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vloxseg6ei8_v_u32mf2x6_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vloxseg7ei8_v_u32mf2x7_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vloxseg8ei8_v_u32mf2x8_m (vbool64_t mask, const uint32_t *base, vuint8mf8_t bindex, size_t vl);
vuint32m1x2_t __riscv_vloxseg2ei8_v_u32m1x2_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x3_t __riscv_vloxseg3ei8_v_u32m1x3_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x4_t __riscv_vloxseg4ei8_v_u32m1x4_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x5_t __riscv_vloxseg5ei8_v_u32m1x5_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x6_t __riscv_vloxseg6ei8_v_u32m1x6_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x7_t __riscv_vloxseg7ei8_v_u32m1x7_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m1x8_t __riscv_vloxseg8ei8_v_u32m1x8_m (vbool32_t mask, const uint32_t *base, vuint8mf4_t bindex, size_t vl);
vuint32m2x2_t __riscv_vloxseg2ei8_v_u32m2x2_m (vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m2x3_t __riscv_vloxseg3ei8_v_u32m2x3_m (vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m2x4_t __riscv_vloxseg4ei8_v_u32m2x4_m (vbool16_t mask, const uint32_t *base, vuint8mf2_t bindex, size_t vl);
vuint32m4x2_t __riscv_vloxseg2ei8_v_u32m4x2_m (vbool8_t mask, const uint32_t *base, vuint8m1_t bindex, size_t vl);
vuint64m1x2_t __riscv_vloxseg2ei8_v_u64m1x2_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x3_t __riscv_vloxseg3ei8_v_u64m1x3_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x4_t __riscv_vloxseg4ei8_v_u64m1x4_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x5_t __riscv_vloxseg5ei8_v_u64m1x5_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x6_t __riscv_vloxseg6ei8_v_u64m1x6_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x7_t __riscv_vloxseg7ei8_v_u64m1x7_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m1x8_t __riscv_vloxseg8ei8_v_u64m1x8_m (vbool64_t mask, const uint64_t *base, vuint8mf8_t bindex, size_t vl);
vuint64m2x2_t __riscv_vloxseg2ei8_v_u64m2x2_m (vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m2x3_t __riscv_vloxseg3ei8_v_u64m2x3_m (vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m2x4_t __riscv_vloxseg4ei8_v_u64m2x4_m (vbool32_t mask, const uint64_t *base, vuint8mf4_t bindex, size_t vl);
vuint64m4x2_t __riscv_vloxseg2ei8_v_u64m4x2_m (vbool16_t mask, const uint64_t *base, vuint8mf2_t bindex, size_t vl);
8.2. vloxseg<nf>ei16.v
- Mnemonic
vloxseg2ei16.v vd, (rs1), vs2, vm # nf=2 vloxseg3ei16.v vd, (rs1), vs2, vm # nf=3 vloxseg4ei16.v vd, (rs1), vs2, vm # nf=4 vloxseg5ei16.v vd, (rs1), vs2, vm # nf=5 vloxseg6ei16.v vd, (rs1), vs2, vm # nf=6 vloxseg7ei16.v vd, (rs1), vs2, vm # nf=7 vloxseg8ei16.v vd, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-ordered segment loads
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vloxei16_v.h
- Intrinsic Functions
Details
vfloat16mf4x2_t __riscv_vloxseg2ei16_v_f16mf4x2 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vloxseg3ei16_v_f16mf4x3 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vloxseg4ei16_v_f16mf4x4 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vloxseg5ei16_v_f16mf4x5 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vloxseg6ei16_v_f16mf4x6 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vloxseg7ei16_v_f16mf4x7 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vloxseg8ei16_v_f16mf4x8 (const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vloxseg2ei16_v_f16mf2x2 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vloxseg3ei16_v_f16mf2x3 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vloxseg4ei16_v_f16mf2x4 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vloxseg5ei16_v_f16mf2x5 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vloxseg6ei16_v_f16mf2x6 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vloxseg7ei16_v_f16mf2x7 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vloxseg8ei16_v_f16mf2x8 (const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vloxseg2ei16_v_f16m1x2 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vloxseg3ei16_v_f16m1x3 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vloxseg4ei16_v_f16m1x4 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vloxseg5ei16_v_f16m1x5 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vloxseg6ei16_v_f16m1x6 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vloxseg7ei16_v_f16m1x7 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vloxseg8ei16_v_f16m1x8 (const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vloxseg2ei16_v_f16m2x2 (const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vloxseg3ei16_v_f16m2x3 (const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vloxseg4ei16_v_f16m2x4 (const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m4x2_t __riscv_vloxseg2ei16_v_f16m4x2 (const float16_t *base, vuint16m4_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vloxseg2ei16_v_f32mf2x2 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vloxseg3ei16_v_f32mf2x3 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vloxseg4ei16_v_f32mf2x4 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vloxseg5ei16_v_f32mf2x5 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vloxseg6ei16_v_f32mf2x6 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vloxseg7ei16_v_f32mf2x7 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vloxseg8ei16_v_f32mf2x8 (const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vloxseg2ei16_v_f32m1x2 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vloxseg3ei16_v_f32m1x3 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vloxseg4ei16_v_f32m1x4 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vloxseg5ei16_v_f32m1x5 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vloxseg6ei16_v_f32m1x6 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vloxseg7ei16_v_f32m1x7 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vloxseg8ei16_v_f32m1x8 (const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vloxseg2ei16_v_f32m2x2 (const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vloxseg3ei16_v_f32m2x3 (const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vloxseg4ei16_v_f32m2x4 (const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vloxseg2ei16_v_f32m4x2 (const float32_t *base, vuint16m2_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vloxseg2ei16_v_f64m1x2 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vloxseg3ei16_v_f64m1x3 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vloxseg4ei16_v_f64m1x4 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vloxseg5ei16_v_f64m1x5 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vloxseg6ei16_v_f64m1x6 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vloxseg7ei16_v_f64m1x7 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vloxseg8ei16_v_f64m1x8 (const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vloxseg2ei16_v_f64m2x2 (const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vloxseg3ei16_v_f64m2x3 (const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vloxseg4ei16_v_f64m2x4 (const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vloxseg2ei16_v_f64m4x2 (const float64_t *base, vuint16m1_t bindex, size_t vl);
vint8mf8x2_t __riscv_vloxseg2ei16_v_i8mf8x2 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x3_t __riscv_vloxseg3ei16_v_i8mf8x3 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x4_t __riscv_vloxseg4ei16_v_i8mf8x4 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x5_t __riscv_vloxseg5ei16_v_i8mf8x5 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x6_t __riscv_vloxseg6ei16_v_i8mf8x6 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x7_t __riscv_vloxseg7ei16_v_i8mf8x7 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x8_t __riscv_vloxseg8ei16_v_i8mf8x8 (const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf4x2_t __riscv_vloxseg2ei16_v_i8mf4x2 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x3_t __riscv_vloxseg3ei16_v_i8mf4x3 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x4_t __riscv_vloxseg4ei16_v_i8mf4x4 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x5_t __riscv_vloxseg5ei16_v_i8mf4x5 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x6_t __riscv_vloxseg6ei16_v_i8mf4x6 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x7_t __riscv_vloxseg7ei16_v_i8mf4x7 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x8_t __riscv_vloxseg8ei16_v_i8mf4x8 (const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf2x2_t __riscv_vloxseg2ei16_v_i8mf2x2 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x3_t __riscv_vloxseg3ei16_v_i8mf2x3 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x4_t __riscv_vloxseg4ei16_v_i8mf2x4 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x5_t __riscv_vloxseg5ei16_v_i8mf2x5 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x6_t __riscv_vloxseg6ei16_v_i8mf2x6 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x7_t __riscv_vloxseg7ei16_v_i8mf2x7 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x8_t __riscv_vloxseg8ei16_v_i8mf2x8 (const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8m1x2_t __riscv_vloxseg2ei16_v_i8m1x2 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x3_t __riscv_vloxseg3ei16_v_i8m1x3 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x4_t __riscv_vloxseg4ei16_v_i8m1x4 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x5_t __riscv_vloxseg5ei16_v_i8m1x5 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x6_t __riscv_vloxseg6ei16_v_i8m1x6 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x7_t __riscv_vloxseg7ei16_v_i8m1x7 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x8_t __riscv_vloxseg8ei16_v_i8m1x8 (const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m2x2_t __riscv_vloxseg2ei16_v_i8m2x2 (const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m2x3_t __riscv_vloxseg3ei16_v_i8m2x3 (const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m2x4_t __riscv_vloxseg4ei16_v_i8m2x4 (const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m4x2_t __riscv_vloxseg2ei16_v_i8m4x2 (const int8_t *base, vuint16m8_t bindex, size_t vl);
vint16mf4x2_t __riscv_vloxseg2ei16_v_i16mf4x2 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x3_t __riscv_vloxseg3ei16_v_i16mf4x3 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x4_t __riscv_vloxseg4ei16_v_i16mf4x4 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x5_t __riscv_vloxseg5ei16_v_i16mf4x5 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x6_t __riscv_vloxseg6ei16_v_i16mf4x6 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x7_t __riscv_vloxseg7ei16_v_i16mf4x7 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x8_t __riscv_vloxseg8ei16_v_i16mf4x8 (const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf2x2_t __riscv_vloxseg2ei16_v_i16mf2x2 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x3_t __riscv_vloxseg3ei16_v_i16mf2x3 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x4_t __riscv_vloxseg4ei16_v_i16mf2x4 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x5_t __riscv_vloxseg5ei16_v_i16mf2x5 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x6_t __riscv_vloxseg6ei16_v_i16mf2x6 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x7_t __riscv_vloxseg7ei16_v_i16mf2x7 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x8_t __riscv_vloxseg8ei16_v_i16mf2x8 (const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16m1x2_t __riscv_vloxseg2ei16_v_i16m1x2 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x3_t __riscv_vloxseg3ei16_v_i16m1x3 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x4_t __riscv_vloxseg4ei16_v_i16m1x4 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x5_t __riscv_vloxseg5ei16_v_i16m1x5 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x6_t __riscv_vloxseg6ei16_v_i16m1x6 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x7_t __riscv_vloxseg7ei16_v_i16m1x7 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x8_t __riscv_vloxseg8ei16_v_i16m1x8 (const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m2x2_t __riscv_vloxseg2ei16_v_i16m2x2 (const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m2x3_t __riscv_vloxseg3ei16_v_i16m2x3 (const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m2x4_t __riscv_vloxseg4ei16_v_i16m2x4 (const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m4x2_t __riscv_vloxseg2ei16_v_i16m4x2 (const int16_t *base, vuint16m4_t bindex, size_t vl);
vint32mf2x2_t __riscv_vloxseg2ei16_v_i32mf2x2 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x3_t __riscv_vloxseg3ei16_v_i32mf2x3 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x4_t __riscv_vloxseg4ei16_v_i32mf2x4 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x5_t __riscv_vloxseg5ei16_v_i32mf2x5 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x6_t __riscv_vloxseg6ei16_v_i32mf2x6 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x7_t __riscv_vloxseg7ei16_v_i32mf2x7 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x8_t __riscv_vloxseg8ei16_v_i32mf2x8 (const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32m1x2_t __riscv_vloxseg2ei16_v_i32m1x2 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x3_t __riscv_vloxseg3ei16_v_i32m1x3 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x4_t __riscv_vloxseg4ei16_v_i32m1x4 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x5_t __riscv_vloxseg5ei16_v_i32m1x5 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x6_t __riscv_vloxseg6ei16_v_i32m1x6 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x7_t __riscv_vloxseg7ei16_v_i32m1x7 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x8_t __riscv_vloxseg8ei16_v_i32m1x8 (const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m2x2_t __riscv_vloxseg2ei16_v_i32m2x2 (const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m2x3_t __riscv_vloxseg3ei16_v_i32m2x3 (const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m2x4_t __riscv_vloxseg4ei16_v_i32m2x4 (const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m4x2_t __riscv_vloxseg2ei16_v_i32m4x2 (const int32_t *base, vuint16m2_t bindex, size_t vl);
vint64m1x2_t __riscv_vloxseg2ei16_v_i64m1x2 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x3_t __riscv_vloxseg3ei16_v_i64m1x3 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x4_t __riscv_vloxseg4ei16_v_i64m1x4 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x5_t __riscv_vloxseg5ei16_v_i64m1x5 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x6_t __riscv_vloxseg6ei16_v_i64m1x6 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x7_t __riscv_vloxseg7ei16_v_i64m1x7 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x8_t __riscv_vloxseg8ei16_v_i64m1x8 (const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m2x2_t __riscv_vloxseg2ei16_v_i64m2x2 (const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m2x3_t __riscv_vloxseg3ei16_v_i64m2x3 (const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m2x4_t __riscv_vloxseg4ei16_v_i64m2x4 (const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m4x2_t __riscv_vloxseg2ei16_v_i64m4x2 (const int64_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vloxseg2ei16_v_u8mf8x2 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vloxseg3ei16_v_u8mf8x3 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vloxseg4ei16_v_u8mf8x4 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vloxseg5ei16_v_u8mf8x5 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vloxseg6ei16_v_u8mf8x6 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vloxseg7ei16_v_u8mf8x7 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vloxseg8ei16_v_u8mf8x8 (const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vloxseg2ei16_v_u8mf4x2 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vloxseg3ei16_v_u8mf4x3 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vloxseg4ei16_v_u8mf4x4 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vloxseg5ei16_v_u8mf4x5 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vloxseg6ei16_v_u8mf4x6 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vloxseg7ei16_v_u8mf4x7 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vloxseg8ei16_v_u8mf4x8 (const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vloxseg2ei16_v_u8mf2x2 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vloxseg3ei16_v_u8mf2x3 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vloxseg4ei16_v_u8mf2x4 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vloxseg5ei16_v_u8mf2x5 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vloxseg6ei16_v_u8mf2x6 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vloxseg7ei16_v_u8mf2x7 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vloxseg8ei16_v_u8mf2x8 (const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8m1x2_t __riscv_vloxseg2ei16_v_u8m1x2 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x3_t __riscv_vloxseg3ei16_v_u8m1x3 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x4_t __riscv_vloxseg4ei16_v_u8m1x4 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x5_t __riscv_vloxseg5ei16_v_u8m1x5 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x6_t __riscv_vloxseg6ei16_v_u8m1x6 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x7_t __riscv_vloxseg7ei16_v_u8m1x7 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x8_t __riscv_vloxseg8ei16_v_u8m1x8 (const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m2x2_t __riscv_vloxseg2ei16_v_u8m2x2 (const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m2x3_t __riscv_vloxseg3ei16_v_u8m2x3 (const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m2x4_t __riscv_vloxseg4ei16_v_u8m2x4 (const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m4x2_t __riscv_vloxseg2ei16_v_u8m4x2 (const uint8_t *base, vuint16m8_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vloxseg2ei16_v_u16mf4x2 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vloxseg3ei16_v_u16mf4x3 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vloxseg4ei16_v_u16mf4x4 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vloxseg5ei16_v_u16mf4x5 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vloxseg6ei16_v_u16mf4x6 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vloxseg7ei16_v_u16mf4x7 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vloxseg8ei16_v_u16mf4x8 (const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vloxseg2ei16_v_u16mf2x2 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vloxseg3ei16_v_u16mf2x3 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vloxseg4ei16_v_u16mf2x4 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vloxseg5ei16_v_u16mf2x5 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vloxseg6ei16_v_u16mf2x6 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vloxseg7ei16_v_u16mf2x7 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vloxseg8ei16_v_u16mf2x8 (const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16m1x2_t __riscv_vloxseg2ei16_v_u16m1x2 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x3_t __riscv_vloxseg3ei16_v_u16m1x3 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x4_t __riscv_vloxseg4ei16_v_u16m1x4 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x5_t __riscv_vloxseg5ei16_v_u16m1x5 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x6_t __riscv_vloxseg6ei16_v_u16m1x6 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x7_t __riscv_vloxseg7ei16_v_u16m1x7 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x8_t __riscv_vloxseg8ei16_v_u16m1x8 (const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m2x2_t __riscv_vloxseg2ei16_v_u16m2x2 (const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m2x3_t __riscv_vloxseg3ei16_v_u16m2x3 (const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m2x4_t __riscv_vloxseg4ei16_v_u16m2x4 (const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m4x2_t __riscv_vloxseg2ei16_v_u16m4x2 (const uint16_t *base, vuint16m4_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vloxseg2ei16_v_u32mf2x2 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vloxseg3ei16_v_u32mf2x3 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vloxseg4ei16_v_u32mf2x4 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vloxseg5ei16_v_u32mf2x5 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vloxseg6ei16_v_u32mf2x6 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vloxseg7ei16_v_u32mf2x7 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vloxseg8ei16_v_u32mf2x8 (const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32m1x2_t __riscv_vloxseg2ei16_v_u32m1x2 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x3_t __riscv_vloxseg3ei16_v_u32m1x3 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x4_t __riscv_vloxseg4ei16_v_u32m1x4 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x5_t __riscv_vloxseg5ei16_v_u32m1x5 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x6_t __riscv_vloxseg6ei16_v_u32m1x6 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x7_t __riscv_vloxseg7ei16_v_u32m1x7 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x8_t __riscv_vloxseg8ei16_v_u32m1x8 (const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m2x2_t __riscv_vloxseg2ei16_v_u32m2x2 (const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m2x3_t __riscv_vloxseg3ei16_v_u32m2x3 (const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m2x4_t __riscv_vloxseg4ei16_v_u32m2x4 (const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m4x2_t __riscv_vloxseg2ei16_v_u32m4x2 (const uint32_t *base, vuint16m2_t bindex, size_t vl);
vuint64m1x2_t __riscv_vloxseg2ei16_v_u64m1x2 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x3_t __riscv_vloxseg3ei16_v_u64m1x3 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x4_t __riscv_vloxseg4ei16_v_u64m1x4 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x5_t __riscv_vloxseg5ei16_v_u64m1x5 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x6_t __riscv_vloxseg6ei16_v_u64m1x6 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x7_t __riscv_vloxseg7ei16_v_u64m1x7 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x8_t __riscv_vloxseg8ei16_v_u64m1x8 (const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m2x2_t __riscv_vloxseg2ei16_v_u64m2x2 (const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m2x3_t __riscv_vloxseg3ei16_v_u64m2x3 (const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m2x4_t __riscv_vloxseg4ei16_v_u64m2x4 (const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m4x2_t __riscv_vloxseg2ei16_v_u64m4x2 (const uint64_t *base, vuint16m1_t bindex, size_t vl);
vfloat16mf4x2_t __riscv_vloxseg2ei16_v_f16mf4x2_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vloxseg3ei16_v_f16mf4x3_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vloxseg4ei16_v_f16mf4x4_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vloxseg5ei16_v_f16mf4x5_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vloxseg6ei16_v_f16mf4x6_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vloxseg7ei16_v_f16mf4x7_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vloxseg8ei16_v_f16mf4x8_m (vbool64_t mask, const float16_t *base, vuint16mf4_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vloxseg2ei16_v_f16mf2x2_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vloxseg3ei16_v_f16mf2x3_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vloxseg4ei16_v_f16mf2x4_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vloxseg5ei16_v_f16mf2x5_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vloxseg6ei16_v_f16mf2x6_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vloxseg7ei16_v_f16mf2x7_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vloxseg8ei16_v_f16mf2x8_m (vbool32_t mask, const float16_t *base, vuint16mf2_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vloxseg2ei16_v_f16m1x2_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vloxseg3ei16_v_f16m1x3_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vloxseg4ei16_v_f16m1x4_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vloxseg5ei16_v_f16m1x5_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vloxseg6ei16_v_f16m1x6_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vloxseg7ei16_v_f16m1x7_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vloxseg8ei16_v_f16m1x8_m (vbool16_t mask, const float16_t *base, vuint16m1_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vloxseg2ei16_v_f16m2x2_m (vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vloxseg3ei16_v_f16m2x3_m (vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vloxseg4ei16_v_f16m2x4_m (vbool8_t mask, const float16_t *base, vuint16m2_t bindex, size_t vl);
vfloat16m4x2_t __riscv_vloxseg2ei16_v_f16m4x2_m (vbool4_t mask, const float16_t *base, vuint16m4_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vloxseg2ei16_v_f32mf2x2_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vloxseg3ei16_v_f32mf2x3_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vloxseg4ei16_v_f32mf2x4_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vloxseg5ei16_v_f32mf2x5_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vloxseg6ei16_v_f32mf2x6_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vloxseg7ei16_v_f32mf2x7_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vloxseg8ei16_v_f32mf2x8_m (vbool64_t mask, const float32_t *base, vuint16mf4_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vloxseg2ei16_v_f32m1x2_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vloxseg3ei16_v_f32m1x3_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vloxseg4ei16_v_f32m1x4_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vloxseg5ei16_v_f32m1x5_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vloxseg6ei16_v_f32m1x6_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vloxseg7ei16_v_f32m1x7_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vloxseg8ei16_v_f32m1x8_m (vbool32_t mask, const float32_t *base, vuint16mf2_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vloxseg2ei16_v_f32m2x2_m (vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vloxseg3ei16_v_f32m2x3_m (vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vloxseg4ei16_v_f32m2x4_m (vbool16_t mask, const float32_t *base, vuint16m1_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vloxseg2ei16_v_f32m4x2_m (vbool8_t mask, const float32_t *base, vuint16m2_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vloxseg2ei16_v_f64m1x2_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vloxseg3ei16_v_f64m1x3_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vloxseg4ei16_v_f64m1x4_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vloxseg5ei16_v_f64m1x5_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vloxseg6ei16_v_f64m1x6_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vloxseg7ei16_v_f64m1x7_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vloxseg8ei16_v_f64m1x8_m (vbool64_t mask, const float64_t *base, vuint16mf4_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vloxseg2ei16_v_f64m2x2_m (vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vloxseg3ei16_v_f64m2x3_m (vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vloxseg4ei16_v_f64m2x4_m (vbool32_t mask, const float64_t *base, vuint16mf2_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vloxseg2ei16_v_f64m4x2_m (vbool16_t mask, const float64_t *base, vuint16m1_t bindex, size_t vl);
vint8mf8x2_t __riscv_vloxseg2ei16_v_i8mf8x2_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x3_t __riscv_vloxseg3ei16_v_i8mf8x3_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x4_t __riscv_vloxseg4ei16_v_i8mf8x4_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x5_t __riscv_vloxseg5ei16_v_i8mf8x5_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x6_t __riscv_vloxseg6ei16_v_i8mf8x6_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x7_t __riscv_vloxseg7ei16_v_i8mf8x7_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf8x8_t __riscv_vloxseg8ei16_v_i8mf8x8_m (vbool64_t mask, const int8_t *base, vuint16mf4_t bindex, size_t vl);
vint8mf4x2_t __riscv_vloxseg2ei16_v_i8mf4x2_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x3_t __riscv_vloxseg3ei16_v_i8mf4x3_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x4_t __riscv_vloxseg4ei16_v_i8mf4x4_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x5_t __riscv_vloxseg5ei16_v_i8mf4x5_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x6_t __riscv_vloxseg6ei16_v_i8mf4x6_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x7_t __riscv_vloxseg7ei16_v_i8mf4x7_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf4x8_t __riscv_vloxseg8ei16_v_i8mf4x8_m (vbool32_t mask, const int8_t *base, vuint16mf2_t bindex, size_t vl);
vint8mf2x2_t __riscv_vloxseg2ei16_v_i8mf2x2_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x3_t __riscv_vloxseg3ei16_v_i8mf2x3_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x4_t __riscv_vloxseg4ei16_v_i8mf2x4_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x5_t __riscv_vloxseg5ei16_v_i8mf2x5_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x6_t __riscv_vloxseg6ei16_v_i8mf2x6_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x7_t __riscv_vloxseg7ei16_v_i8mf2x7_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8mf2x8_t __riscv_vloxseg8ei16_v_i8mf2x8_m (vbool16_t mask, const int8_t *base, vuint16m1_t bindex, size_t vl);
vint8m1x2_t __riscv_vloxseg2ei16_v_i8m1x2_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x3_t __riscv_vloxseg3ei16_v_i8m1x3_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x4_t __riscv_vloxseg4ei16_v_i8m1x4_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x5_t __riscv_vloxseg5ei16_v_i8m1x5_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x6_t __riscv_vloxseg6ei16_v_i8m1x6_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x7_t __riscv_vloxseg7ei16_v_i8m1x7_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m1x8_t __riscv_vloxseg8ei16_v_i8m1x8_m (vbool8_t mask, const int8_t *base, vuint16m2_t bindex, size_t vl);
vint8m2x2_t __riscv_vloxseg2ei16_v_i8m2x2_m (vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m2x3_t __riscv_vloxseg3ei16_v_i8m2x3_m (vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m2x4_t __riscv_vloxseg4ei16_v_i8m2x4_m (vbool4_t mask, const int8_t *base, vuint16m4_t bindex, size_t vl);
vint8m4x2_t __riscv_vloxseg2ei16_v_i8m4x2_m (vbool2_t mask, const int8_t *base, vuint16m8_t bindex, size_t vl);
vint16mf4x2_t __riscv_vloxseg2ei16_v_i16mf4x2_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x3_t __riscv_vloxseg3ei16_v_i16mf4x3_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x4_t __riscv_vloxseg4ei16_v_i16mf4x4_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x5_t __riscv_vloxseg5ei16_v_i16mf4x5_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x6_t __riscv_vloxseg6ei16_v_i16mf4x6_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x7_t __riscv_vloxseg7ei16_v_i16mf4x7_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf4x8_t __riscv_vloxseg8ei16_v_i16mf4x8_m (vbool64_t mask, const int16_t *base, vuint16mf4_t bindex, size_t vl);
vint16mf2x2_t __riscv_vloxseg2ei16_v_i16mf2x2_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x3_t __riscv_vloxseg3ei16_v_i16mf2x3_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x4_t __riscv_vloxseg4ei16_v_i16mf2x4_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x5_t __riscv_vloxseg5ei16_v_i16mf2x5_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x6_t __riscv_vloxseg6ei16_v_i16mf2x6_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x7_t __riscv_vloxseg7ei16_v_i16mf2x7_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16mf2x8_t __riscv_vloxseg8ei16_v_i16mf2x8_m (vbool32_t mask, const int16_t *base, vuint16mf2_t bindex, size_t vl);
vint16m1x2_t __riscv_vloxseg2ei16_v_i16m1x2_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x3_t __riscv_vloxseg3ei16_v_i16m1x3_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x4_t __riscv_vloxseg4ei16_v_i16m1x4_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x5_t __riscv_vloxseg5ei16_v_i16m1x5_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x6_t __riscv_vloxseg6ei16_v_i16m1x6_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x7_t __riscv_vloxseg7ei16_v_i16m1x7_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m1x8_t __riscv_vloxseg8ei16_v_i16m1x8_m (vbool16_t mask, const int16_t *base, vuint16m1_t bindex, size_t vl);
vint16m2x2_t __riscv_vloxseg2ei16_v_i16m2x2_m (vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m2x3_t __riscv_vloxseg3ei16_v_i16m2x3_m (vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m2x4_t __riscv_vloxseg4ei16_v_i16m2x4_m (vbool8_t mask, const int16_t *base, vuint16m2_t bindex, size_t vl);
vint16m4x2_t __riscv_vloxseg2ei16_v_i16m4x2_m (vbool4_t mask, const int16_t *base, vuint16m4_t bindex, size_t vl);
vint32mf2x2_t __riscv_vloxseg2ei16_v_i32mf2x2_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x3_t __riscv_vloxseg3ei16_v_i32mf2x3_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x4_t __riscv_vloxseg4ei16_v_i32mf2x4_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x5_t __riscv_vloxseg5ei16_v_i32mf2x5_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x6_t __riscv_vloxseg6ei16_v_i32mf2x6_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x7_t __riscv_vloxseg7ei16_v_i32mf2x7_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32mf2x8_t __riscv_vloxseg8ei16_v_i32mf2x8_m (vbool64_t mask, const int32_t *base, vuint16mf4_t bindex, size_t vl);
vint32m1x2_t __riscv_vloxseg2ei16_v_i32m1x2_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x3_t __riscv_vloxseg3ei16_v_i32m1x3_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x4_t __riscv_vloxseg4ei16_v_i32m1x4_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x5_t __riscv_vloxseg5ei16_v_i32m1x5_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x6_t __riscv_vloxseg6ei16_v_i32m1x6_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x7_t __riscv_vloxseg7ei16_v_i32m1x7_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m1x8_t __riscv_vloxseg8ei16_v_i32m1x8_m (vbool32_t mask, const int32_t *base, vuint16mf2_t bindex, size_t vl);
vint32m2x2_t __riscv_vloxseg2ei16_v_i32m2x2_m (vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m2x3_t __riscv_vloxseg3ei16_v_i32m2x3_m (vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m2x4_t __riscv_vloxseg4ei16_v_i32m2x4_m (vbool16_t mask, const int32_t *base, vuint16m1_t bindex, size_t vl);
vint32m4x2_t __riscv_vloxseg2ei16_v_i32m4x2_m (vbool8_t mask, const int32_t *base, vuint16m2_t bindex, size_t vl);
vint64m1x2_t __riscv_vloxseg2ei16_v_i64m1x2_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x3_t __riscv_vloxseg3ei16_v_i64m1x3_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x4_t __riscv_vloxseg4ei16_v_i64m1x4_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x5_t __riscv_vloxseg5ei16_v_i64m1x5_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x6_t __riscv_vloxseg6ei16_v_i64m1x6_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x7_t __riscv_vloxseg7ei16_v_i64m1x7_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m1x8_t __riscv_vloxseg8ei16_v_i64m1x8_m (vbool64_t mask, const int64_t *base, vuint16mf4_t bindex, size_t vl);
vint64m2x2_t __riscv_vloxseg2ei16_v_i64m2x2_m (vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m2x3_t __riscv_vloxseg3ei16_v_i64m2x3_m (vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m2x4_t __riscv_vloxseg4ei16_v_i64m2x4_m (vbool32_t mask, const int64_t *base, vuint16mf2_t bindex, size_t vl);
vint64m4x2_t __riscv_vloxseg2ei16_v_i64m4x2_m (vbool16_t mask, const int64_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vloxseg2ei16_v_u8mf8x2_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vloxseg3ei16_v_u8mf8x3_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vloxseg4ei16_v_u8mf8x4_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vloxseg5ei16_v_u8mf8x5_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vloxseg6ei16_v_u8mf8x6_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vloxseg7ei16_v_u8mf8x7_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vloxseg8ei16_v_u8mf8x8_m (vbool64_t mask, const uint8_t *base, vuint16mf4_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vloxseg2ei16_v_u8mf4x2_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vloxseg3ei16_v_u8mf4x3_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vloxseg4ei16_v_u8mf4x4_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vloxseg5ei16_v_u8mf4x5_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vloxseg6ei16_v_u8mf4x6_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vloxseg7ei16_v_u8mf4x7_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vloxseg8ei16_v_u8mf4x8_m (vbool32_t mask, const uint8_t *base, vuint16mf2_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vloxseg2ei16_v_u8mf2x2_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vloxseg3ei16_v_u8mf2x3_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vloxseg4ei16_v_u8mf2x4_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vloxseg5ei16_v_u8mf2x5_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vloxseg6ei16_v_u8mf2x6_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vloxseg7ei16_v_u8mf2x7_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vloxseg8ei16_v_u8mf2x8_m (vbool16_t mask, const uint8_t *base, vuint16m1_t bindex, size_t vl);
vuint8m1x2_t __riscv_vloxseg2ei16_v_u8m1x2_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x3_t __riscv_vloxseg3ei16_v_u8m1x3_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x4_t __riscv_vloxseg4ei16_v_u8m1x4_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x5_t __riscv_vloxseg5ei16_v_u8m1x5_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x6_t __riscv_vloxseg6ei16_v_u8m1x6_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x7_t __riscv_vloxseg7ei16_v_u8m1x7_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m1x8_t __riscv_vloxseg8ei16_v_u8m1x8_m (vbool8_t mask, const uint8_t *base, vuint16m2_t bindex, size_t vl);
vuint8m2x2_t __riscv_vloxseg2ei16_v_u8m2x2_m (vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m2x3_t __riscv_vloxseg3ei16_v_u8m2x3_m (vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m2x4_t __riscv_vloxseg4ei16_v_u8m2x4_m (vbool4_t mask, const uint8_t *base, vuint16m4_t bindex, size_t vl);
vuint8m4x2_t __riscv_vloxseg2ei16_v_u8m4x2_m (vbool2_t mask, const uint8_t *base, vuint16m8_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vloxseg2ei16_v_u16mf4x2_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vloxseg3ei16_v_u16mf4x3_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vloxseg4ei16_v_u16mf4x4_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vloxseg5ei16_v_u16mf4x5_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vloxseg6ei16_v_u16mf4x6_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vloxseg7ei16_v_u16mf4x7_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vloxseg8ei16_v_u16mf4x8_m (vbool64_t mask, const uint16_t *base, vuint16mf4_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vloxseg2ei16_v_u16mf2x2_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vloxseg3ei16_v_u16mf2x3_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vloxseg4ei16_v_u16mf2x4_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vloxseg5ei16_v_u16mf2x5_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vloxseg6ei16_v_u16mf2x6_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vloxseg7ei16_v_u16mf2x7_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vloxseg8ei16_v_u16mf2x8_m (vbool32_t mask, const uint16_t *base, vuint16mf2_t bindex, size_t vl);
vuint16m1x2_t __riscv_vloxseg2ei16_v_u16m1x2_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x3_t __riscv_vloxseg3ei16_v_u16m1x3_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x4_t __riscv_vloxseg4ei16_v_u16m1x4_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x5_t __riscv_vloxseg5ei16_v_u16m1x5_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x6_t __riscv_vloxseg6ei16_v_u16m1x6_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x7_t __riscv_vloxseg7ei16_v_u16m1x7_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m1x8_t __riscv_vloxseg8ei16_v_u16m1x8_m (vbool16_t mask, const uint16_t *base, vuint16m1_t bindex, size_t vl);
vuint16m2x2_t __riscv_vloxseg2ei16_v_u16m2x2_m (vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m2x3_t __riscv_vloxseg3ei16_v_u16m2x3_m (vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m2x4_t __riscv_vloxseg4ei16_v_u16m2x4_m (vbool8_t mask, const uint16_t *base, vuint16m2_t bindex, size_t vl);
vuint16m4x2_t __riscv_vloxseg2ei16_v_u16m4x2_m (vbool4_t mask, const uint16_t *base, vuint16m4_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vloxseg2ei16_v_u32mf2x2_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vloxseg3ei16_v_u32mf2x3_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vloxseg4ei16_v_u32mf2x4_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vloxseg5ei16_v_u32mf2x5_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vloxseg6ei16_v_u32mf2x6_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vloxseg7ei16_v_u32mf2x7_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vloxseg8ei16_v_u32mf2x8_m (vbool64_t mask, const uint32_t *base, vuint16mf4_t bindex, size_t vl);
vuint32m1x2_t __riscv_vloxseg2ei16_v_u32m1x2_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x3_t __riscv_vloxseg3ei16_v_u32m1x3_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x4_t __riscv_vloxseg4ei16_v_u32m1x4_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x5_t __riscv_vloxseg5ei16_v_u32m1x5_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x6_t __riscv_vloxseg6ei16_v_u32m1x6_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x7_t __riscv_vloxseg7ei16_v_u32m1x7_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m1x8_t __riscv_vloxseg8ei16_v_u32m1x8_m (vbool32_t mask, const uint32_t *base, vuint16mf2_t bindex, size_t vl);
vuint32m2x2_t __riscv_vloxseg2ei16_v_u32m2x2_m (vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m2x3_t __riscv_vloxseg3ei16_v_u32m2x3_m (vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m2x4_t __riscv_vloxseg4ei16_v_u32m2x4_m (vbool16_t mask, const uint32_t *base, vuint16m1_t bindex, size_t vl);
vuint32m4x2_t __riscv_vloxseg2ei16_v_u32m4x2_m (vbool8_t mask, const uint32_t *base, vuint16m2_t bindex, size_t vl);
vuint64m1x2_t __riscv_vloxseg2ei16_v_u64m1x2_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x3_t __riscv_vloxseg3ei16_v_u64m1x3_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x4_t __riscv_vloxseg4ei16_v_u64m1x4_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x5_t __riscv_vloxseg5ei16_v_u64m1x5_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x6_t __riscv_vloxseg6ei16_v_u64m1x6_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x7_t __riscv_vloxseg7ei16_v_u64m1x7_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m1x8_t __riscv_vloxseg8ei16_v_u64m1x8_m (vbool64_t mask, const uint64_t *base, vuint16mf4_t bindex, size_t vl);
vuint64m2x2_t __riscv_vloxseg2ei16_v_u64m2x2_m (vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m2x3_t __riscv_vloxseg3ei16_v_u64m2x3_m (vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m2x4_t __riscv_vloxseg4ei16_v_u64m2x4_m (vbool32_t mask, const uint64_t *base, vuint16mf2_t bindex, size_t vl);
vuint64m4x2_t __riscv_vloxseg2ei16_v_u64m4x2_m (vbool16_t mask, const uint64_t *base, vuint16m1_t bindex, size_t vl);
8.3. vloxseg<nf>ei32.v
- Mnemonic
vloxseg2ei32.v vd, (rs1), vs2, vm # nf=2 vloxseg3ei32.v vd, (rs1), vs2, vm # nf=3 vloxseg4ei32.v vd, (rs1), vs2, vm # nf=4 vloxseg5ei32.v vd, (rs1), vs2, vm # nf=5 vloxseg6ei32.v vd, (rs1), vs2, vm # nf=6 vloxseg7ei32.v vd, (rs1), vs2, vm # nf=7 vloxseg8ei32.v vd, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-ordered segment loads
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vloxei32_v.h
- Intrinsic Functions
Details
vfloat16mf4x2_t __riscv_vloxseg2ei32_v_f16mf4x2 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vloxseg3ei32_v_f16mf4x3 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vloxseg4ei32_v_f16mf4x4 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vloxseg5ei32_v_f16mf4x5 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vloxseg6ei32_v_f16mf4x6 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vloxseg7ei32_v_f16mf4x7 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vloxseg8ei32_v_f16mf4x8 (const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vloxseg2ei32_v_f16mf2x2 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vloxseg3ei32_v_f16mf2x3 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vloxseg4ei32_v_f16mf2x4 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vloxseg5ei32_v_f16mf2x5 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vloxseg6ei32_v_f16mf2x6 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vloxseg7ei32_v_f16mf2x7 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vloxseg8ei32_v_f16mf2x8 (const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vloxseg2ei32_v_f16m1x2 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vloxseg3ei32_v_f16m1x3 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vloxseg4ei32_v_f16m1x4 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vloxseg5ei32_v_f16m1x5 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vloxseg6ei32_v_f16m1x6 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vloxseg7ei32_v_f16m1x7 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vloxseg8ei32_v_f16m1x8 (const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vloxseg2ei32_v_f16m2x2 (const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vloxseg3ei32_v_f16m2x3 (const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vloxseg4ei32_v_f16m2x4 (const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m4x2_t __riscv_vloxseg2ei32_v_f16m4x2 (const float16_t *base, vuint32m8_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vloxseg2ei32_v_f32mf2x2 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vloxseg3ei32_v_f32mf2x3 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vloxseg4ei32_v_f32mf2x4 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vloxseg5ei32_v_f32mf2x5 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vloxseg6ei32_v_f32mf2x6 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vloxseg7ei32_v_f32mf2x7 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vloxseg8ei32_v_f32mf2x8 (const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vloxseg2ei32_v_f32m1x2 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vloxseg3ei32_v_f32m1x3 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vloxseg4ei32_v_f32m1x4 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vloxseg5ei32_v_f32m1x5 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vloxseg6ei32_v_f32m1x6 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vloxseg7ei32_v_f32m1x7 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vloxseg8ei32_v_f32m1x8 (const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vloxseg2ei32_v_f32m2x2 (const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vloxseg3ei32_v_f32m2x3 (const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vloxseg4ei32_v_f32m2x4 (const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vloxseg2ei32_v_f32m4x2 (const float32_t *base, vuint32m4_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vloxseg2ei32_v_f64m1x2 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vloxseg3ei32_v_f64m1x3 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vloxseg4ei32_v_f64m1x4 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vloxseg5ei32_v_f64m1x5 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vloxseg6ei32_v_f64m1x6 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vloxseg7ei32_v_f64m1x7 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vloxseg8ei32_v_f64m1x8 (const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vloxseg2ei32_v_f64m2x2 (const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vloxseg3ei32_v_f64m2x3 (const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vloxseg4ei32_v_f64m2x4 (const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vloxseg2ei32_v_f64m4x2 (const float64_t *base, vuint32m2_t bindex, size_t vl);
vint8mf8x2_t __riscv_vloxseg2ei32_v_i8mf8x2 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x3_t __riscv_vloxseg3ei32_v_i8mf8x3 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x4_t __riscv_vloxseg4ei32_v_i8mf8x4 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x5_t __riscv_vloxseg5ei32_v_i8mf8x5 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x6_t __riscv_vloxseg6ei32_v_i8mf8x6 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x7_t __riscv_vloxseg7ei32_v_i8mf8x7 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x8_t __riscv_vloxseg8ei32_v_i8mf8x8 (const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf4x2_t __riscv_vloxseg2ei32_v_i8mf4x2 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x3_t __riscv_vloxseg3ei32_v_i8mf4x3 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x4_t __riscv_vloxseg4ei32_v_i8mf4x4 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x5_t __riscv_vloxseg5ei32_v_i8mf4x5 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x6_t __riscv_vloxseg6ei32_v_i8mf4x6 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x7_t __riscv_vloxseg7ei32_v_i8mf4x7 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x8_t __riscv_vloxseg8ei32_v_i8mf4x8 (const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf2x2_t __riscv_vloxseg2ei32_v_i8mf2x2 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x3_t __riscv_vloxseg3ei32_v_i8mf2x3 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x4_t __riscv_vloxseg4ei32_v_i8mf2x4 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x5_t __riscv_vloxseg5ei32_v_i8mf2x5 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x6_t __riscv_vloxseg6ei32_v_i8mf2x6 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x7_t __riscv_vloxseg7ei32_v_i8mf2x7 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x8_t __riscv_vloxseg8ei32_v_i8mf2x8 (const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8m1x2_t __riscv_vloxseg2ei32_v_i8m1x2 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x3_t __riscv_vloxseg3ei32_v_i8m1x3 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x4_t __riscv_vloxseg4ei32_v_i8m1x4 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x5_t __riscv_vloxseg5ei32_v_i8m1x5 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x6_t __riscv_vloxseg6ei32_v_i8m1x6 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x7_t __riscv_vloxseg7ei32_v_i8m1x7 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x8_t __riscv_vloxseg8ei32_v_i8m1x8 (const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m2x2_t __riscv_vloxseg2ei32_v_i8m2x2 (const int8_t *base, vuint32m8_t bindex, size_t vl);
vint8m2x3_t __riscv_vloxseg3ei32_v_i8m2x3 (const int8_t *base, vuint32m8_t bindex, size_t vl);
vint8m2x4_t __riscv_vloxseg4ei32_v_i8m2x4 (const int8_t *base, vuint32m8_t bindex, size_t vl);
vint16mf4x2_t __riscv_vloxseg2ei32_v_i16mf4x2 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x3_t __riscv_vloxseg3ei32_v_i16mf4x3 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x4_t __riscv_vloxseg4ei32_v_i16mf4x4 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x5_t __riscv_vloxseg5ei32_v_i16mf4x5 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x6_t __riscv_vloxseg6ei32_v_i16mf4x6 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x7_t __riscv_vloxseg7ei32_v_i16mf4x7 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x8_t __riscv_vloxseg8ei32_v_i16mf4x8 (const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf2x2_t __riscv_vloxseg2ei32_v_i16mf2x2 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x3_t __riscv_vloxseg3ei32_v_i16mf2x3 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x4_t __riscv_vloxseg4ei32_v_i16mf2x4 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x5_t __riscv_vloxseg5ei32_v_i16mf2x5 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x6_t __riscv_vloxseg6ei32_v_i16mf2x6 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x7_t __riscv_vloxseg7ei32_v_i16mf2x7 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x8_t __riscv_vloxseg8ei32_v_i16mf2x8 (const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16m1x2_t __riscv_vloxseg2ei32_v_i16m1x2 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x3_t __riscv_vloxseg3ei32_v_i16m1x3 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x4_t __riscv_vloxseg4ei32_v_i16m1x4 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x5_t __riscv_vloxseg5ei32_v_i16m1x5 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x6_t __riscv_vloxseg6ei32_v_i16m1x6 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x7_t __riscv_vloxseg7ei32_v_i16m1x7 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x8_t __riscv_vloxseg8ei32_v_i16m1x8 (const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m2x2_t __riscv_vloxseg2ei32_v_i16m2x2 (const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m2x3_t __riscv_vloxseg3ei32_v_i16m2x3 (const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m2x4_t __riscv_vloxseg4ei32_v_i16m2x4 (const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m4x2_t __riscv_vloxseg2ei32_v_i16m4x2 (const int16_t *base, vuint32m8_t bindex, size_t vl);
vint32mf2x2_t __riscv_vloxseg2ei32_v_i32mf2x2 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x3_t __riscv_vloxseg3ei32_v_i32mf2x3 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x4_t __riscv_vloxseg4ei32_v_i32mf2x4 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x5_t __riscv_vloxseg5ei32_v_i32mf2x5 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x6_t __riscv_vloxseg6ei32_v_i32mf2x6 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x7_t __riscv_vloxseg7ei32_v_i32mf2x7 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x8_t __riscv_vloxseg8ei32_v_i32mf2x8 (const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32m1x2_t __riscv_vloxseg2ei32_v_i32m1x2 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x3_t __riscv_vloxseg3ei32_v_i32m1x3 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x4_t __riscv_vloxseg4ei32_v_i32m1x4 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x5_t __riscv_vloxseg5ei32_v_i32m1x5 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x6_t __riscv_vloxseg6ei32_v_i32m1x6 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x7_t __riscv_vloxseg7ei32_v_i32m1x7 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x8_t __riscv_vloxseg8ei32_v_i32m1x8 (const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m2x2_t __riscv_vloxseg2ei32_v_i32m2x2 (const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m2x3_t __riscv_vloxseg3ei32_v_i32m2x3 (const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m2x4_t __riscv_vloxseg4ei32_v_i32m2x4 (const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m4x2_t __riscv_vloxseg2ei32_v_i32m4x2 (const int32_t *base, vuint32m4_t bindex, size_t vl);
vint64m1x2_t __riscv_vloxseg2ei32_v_i64m1x2 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x3_t __riscv_vloxseg3ei32_v_i64m1x3 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x4_t __riscv_vloxseg4ei32_v_i64m1x4 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x5_t __riscv_vloxseg5ei32_v_i64m1x5 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x6_t __riscv_vloxseg6ei32_v_i64m1x6 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x7_t __riscv_vloxseg7ei32_v_i64m1x7 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x8_t __riscv_vloxseg8ei32_v_i64m1x8 (const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m2x2_t __riscv_vloxseg2ei32_v_i64m2x2 (const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m2x3_t __riscv_vloxseg3ei32_v_i64m2x3 (const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m2x4_t __riscv_vloxseg4ei32_v_i64m2x4 (const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m4x2_t __riscv_vloxseg2ei32_v_i64m4x2 (const int64_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vloxseg2ei32_v_u8mf8x2 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vloxseg3ei32_v_u8mf8x3 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vloxseg4ei32_v_u8mf8x4 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vloxseg5ei32_v_u8mf8x5 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vloxseg6ei32_v_u8mf8x6 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vloxseg7ei32_v_u8mf8x7 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vloxseg8ei32_v_u8mf8x8 (const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vloxseg2ei32_v_u8mf4x2 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vloxseg3ei32_v_u8mf4x3 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vloxseg4ei32_v_u8mf4x4 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vloxseg5ei32_v_u8mf4x5 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vloxseg6ei32_v_u8mf4x6 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vloxseg7ei32_v_u8mf4x7 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vloxseg8ei32_v_u8mf4x8 (const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vloxseg2ei32_v_u8mf2x2 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vloxseg3ei32_v_u8mf2x3 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vloxseg4ei32_v_u8mf2x4 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vloxseg5ei32_v_u8mf2x5 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vloxseg6ei32_v_u8mf2x6 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vloxseg7ei32_v_u8mf2x7 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vloxseg8ei32_v_u8mf2x8 (const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8m1x2_t __riscv_vloxseg2ei32_v_u8m1x2 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x3_t __riscv_vloxseg3ei32_v_u8m1x3 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x4_t __riscv_vloxseg4ei32_v_u8m1x4 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x5_t __riscv_vloxseg5ei32_v_u8m1x5 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x6_t __riscv_vloxseg6ei32_v_u8m1x6 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x7_t __riscv_vloxseg7ei32_v_u8m1x7 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x8_t __riscv_vloxseg8ei32_v_u8m1x8 (const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m2x2_t __riscv_vloxseg2ei32_v_u8m2x2 (const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint8m2x3_t __riscv_vloxseg3ei32_v_u8m2x3 (const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint8m2x4_t __riscv_vloxseg4ei32_v_u8m2x4 (const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vloxseg2ei32_v_u16mf4x2 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vloxseg3ei32_v_u16mf4x3 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vloxseg4ei32_v_u16mf4x4 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vloxseg5ei32_v_u16mf4x5 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vloxseg6ei32_v_u16mf4x6 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vloxseg7ei32_v_u16mf4x7 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vloxseg8ei32_v_u16mf4x8 (const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vloxseg2ei32_v_u16mf2x2 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vloxseg3ei32_v_u16mf2x3 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vloxseg4ei32_v_u16mf2x4 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vloxseg5ei32_v_u16mf2x5 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vloxseg6ei32_v_u16mf2x6 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vloxseg7ei32_v_u16mf2x7 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vloxseg8ei32_v_u16mf2x8 (const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16m1x2_t __riscv_vloxseg2ei32_v_u16m1x2 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x3_t __riscv_vloxseg3ei32_v_u16m1x3 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x4_t __riscv_vloxseg4ei32_v_u16m1x4 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x5_t __riscv_vloxseg5ei32_v_u16m1x5 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x6_t __riscv_vloxseg6ei32_v_u16m1x6 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x7_t __riscv_vloxseg7ei32_v_u16m1x7 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x8_t __riscv_vloxseg8ei32_v_u16m1x8 (const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m2x2_t __riscv_vloxseg2ei32_v_u16m2x2 (const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m2x3_t __riscv_vloxseg3ei32_v_u16m2x3 (const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m2x4_t __riscv_vloxseg4ei32_v_u16m2x4 (const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m4x2_t __riscv_vloxseg2ei32_v_u16m4x2 (const uint16_t *base, vuint32m8_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vloxseg2ei32_v_u32mf2x2 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vloxseg3ei32_v_u32mf2x3 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vloxseg4ei32_v_u32mf2x4 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vloxseg5ei32_v_u32mf2x5 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vloxseg6ei32_v_u32mf2x6 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vloxseg7ei32_v_u32mf2x7 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vloxseg8ei32_v_u32mf2x8 (const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32m1x2_t __riscv_vloxseg2ei32_v_u32m1x2 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x3_t __riscv_vloxseg3ei32_v_u32m1x3 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x4_t __riscv_vloxseg4ei32_v_u32m1x4 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x5_t __riscv_vloxseg5ei32_v_u32m1x5 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x6_t __riscv_vloxseg6ei32_v_u32m1x6 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x7_t __riscv_vloxseg7ei32_v_u32m1x7 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x8_t __riscv_vloxseg8ei32_v_u32m1x8 (const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m2x2_t __riscv_vloxseg2ei32_v_u32m2x2 (const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m2x3_t __riscv_vloxseg3ei32_v_u32m2x3 (const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m2x4_t __riscv_vloxseg4ei32_v_u32m2x4 (const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m4x2_t __riscv_vloxseg2ei32_v_u32m4x2 (const uint32_t *base, vuint32m4_t bindex, size_t vl);
vuint64m1x2_t __riscv_vloxseg2ei32_v_u64m1x2 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x3_t __riscv_vloxseg3ei32_v_u64m1x3 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x4_t __riscv_vloxseg4ei32_v_u64m1x4 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x5_t __riscv_vloxseg5ei32_v_u64m1x5 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x6_t __riscv_vloxseg6ei32_v_u64m1x6 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x7_t __riscv_vloxseg7ei32_v_u64m1x7 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x8_t __riscv_vloxseg8ei32_v_u64m1x8 (const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m2x2_t __riscv_vloxseg2ei32_v_u64m2x2 (const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m2x3_t __riscv_vloxseg3ei32_v_u64m2x3 (const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m2x4_t __riscv_vloxseg4ei32_v_u64m2x4 (const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m4x2_t __riscv_vloxseg2ei32_v_u64m4x2 (const uint64_t *base, vuint32m2_t bindex, size_t vl);
vfloat16mf4x2_t __riscv_vloxseg2ei32_v_f16mf4x2_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vloxseg3ei32_v_f16mf4x3_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vloxseg4ei32_v_f16mf4x4_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vloxseg5ei32_v_f16mf4x5_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vloxseg6ei32_v_f16mf4x6_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vloxseg7ei32_v_f16mf4x7_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vloxseg8ei32_v_f16mf4x8_m (vbool64_t mask, const float16_t *base, vuint32mf2_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vloxseg2ei32_v_f16mf2x2_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vloxseg3ei32_v_f16mf2x3_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vloxseg4ei32_v_f16mf2x4_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vloxseg5ei32_v_f16mf2x5_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vloxseg6ei32_v_f16mf2x6_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vloxseg7ei32_v_f16mf2x7_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vloxseg8ei32_v_f16mf2x8_m (vbool32_t mask, const float16_t *base, vuint32m1_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vloxseg2ei32_v_f16m1x2_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vloxseg3ei32_v_f16m1x3_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vloxseg4ei32_v_f16m1x4_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vloxseg5ei32_v_f16m1x5_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vloxseg6ei32_v_f16m1x6_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vloxseg7ei32_v_f16m1x7_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vloxseg8ei32_v_f16m1x8_m (vbool16_t mask, const float16_t *base, vuint32m2_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vloxseg2ei32_v_f16m2x2_m (vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vloxseg3ei32_v_f16m2x3_m (vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vloxseg4ei32_v_f16m2x4_m (vbool8_t mask, const float16_t *base, vuint32m4_t bindex, size_t vl);
vfloat16m4x2_t __riscv_vloxseg2ei32_v_f16m4x2_m (vbool4_t mask, const float16_t *base, vuint32m8_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vloxseg2ei32_v_f32mf2x2_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vloxseg3ei32_v_f32mf2x3_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vloxseg4ei32_v_f32mf2x4_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vloxseg5ei32_v_f32mf2x5_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vloxseg6ei32_v_f32mf2x6_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vloxseg7ei32_v_f32mf2x7_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vloxseg8ei32_v_f32mf2x8_m (vbool64_t mask, const float32_t *base, vuint32mf2_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vloxseg2ei32_v_f32m1x2_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vloxseg3ei32_v_f32m1x3_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vloxseg4ei32_v_f32m1x4_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vloxseg5ei32_v_f32m1x5_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vloxseg6ei32_v_f32m1x6_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vloxseg7ei32_v_f32m1x7_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vloxseg8ei32_v_f32m1x8_m (vbool32_t mask, const float32_t *base, vuint32m1_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vloxseg2ei32_v_f32m2x2_m (vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vloxseg3ei32_v_f32m2x3_m (vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vloxseg4ei32_v_f32m2x4_m (vbool16_t mask, const float32_t *base, vuint32m2_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vloxseg2ei32_v_f32m4x2_m (vbool8_t mask, const float32_t *base, vuint32m4_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vloxseg2ei32_v_f64m1x2_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vloxseg3ei32_v_f64m1x3_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vloxseg4ei32_v_f64m1x4_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vloxseg5ei32_v_f64m1x5_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vloxseg6ei32_v_f64m1x6_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vloxseg7ei32_v_f64m1x7_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vloxseg8ei32_v_f64m1x8_m (vbool64_t mask, const float64_t *base, vuint32mf2_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vloxseg2ei32_v_f64m2x2_m (vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vloxseg3ei32_v_f64m2x3_m (vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vloxseg4ei32_v_f64m2x4_m (vbool32_t mask, const float64_t *base, vuint32m1_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vloxseg2ei32_v_f64m4x2_m (vbool16_t mask, const float64_t *base, vuint32m2_t bindex, size_t vl);
vint8mf8x2_t __riscv_vloxseg2ei32_v_i8mf8x2_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x3_t __riscv_vloxseg3ei32_v_i8mf8x3_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x4_t __riscv_vloxseg4ei32_v_i8mf8x4_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x5_t __riscv_vloxseg5ei32_v_i8mf8x5_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x6_t __riscv_vloxseg6ei32_v_i8mf8x6_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x7_t __riscv_vloxseg7ei32_v_i8mf8x7_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf8x8_t __riscv_vloxseg8ei32_v_i8mf8x8_m (vbool64_t mask, const int8_t *base, vuint32mf2_t bindex, size_t vl);
vint8mf4x2_t __riscv_vloxseg2ei32_v_i8mf4x2_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x3_t __riscv_vloxseg3ei32_v_i8mf4x3_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x4_t __riscv_vloxseg4ei32_v_i8mf4x4_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x5_t __riscv_vloxseg5ei32_v_i8mf4x5_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x6_t __riscv_vloxseg6ei32_v_i8mf4x6_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x7_t __riscv_vloxseg7ei32_v_i8mf4x7_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf4x8_t __riscv_vloxseg8ei32_v_i8mf4x8_m (vbool32_t mask, const int8_t *base, vuint32m1_t bindex, size_t vl);
vint8mf2x2_t __riscv_vloxseg2ei32_v_i8mf2x2_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x3_t __riscv_vloxseg3ei32_v_i8mf2x3_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x4_t __riscv_vloxseg4ei32_v_i8mf2x4_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x5_t __riscv_vloxseg5ei32_v_i8mf2x5_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x6_t __riscv_vloxseg6ei32_v_i8mf2x6_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x7_t __riscv_vloxseg7ei32_v_i8mf2x7_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8mf2x8_t __riscv_vloxseg8ei32_v_i8mf2x8_m (vbool16_t mask, const int8_t *base, vuint32m2_t bindex, size_t vl);
vint8m1x2_t __riscv_vloxseg2ei32_v_i8m1x2_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x3_t __riscv_vloxseg3ei32_v_i8m1x3_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x4_t __riscv_vloxseg4ei32_v_i8m1x4_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x5_t __riscv_vloxseg5ei32_v_i8m1x5_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x6_t __riscv_vloxseg6ei32_v_i8m1x6_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x7_t __riscv_vloxseg7ei32_v_i8m1x7_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m1x8_t __riscv_vloxseg8ei32_v_i8m1x8_m (vbool8_t mask, const int8_t *base, vuint32m4_t bindex, size_t vl);
vint8m2x2_t __riscv_vloxseg2ei32_v_i8m2x2_m (vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl);
vint8m2x3_t __riscv_vloxseg3ei32_v_i8m2x3_m (vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl);
vint8m2x4_t __riscv_vloxseg4ei32_v_i8m2x4_m (vbool4_t mask, const int8_t *base, vuint32m8_t bindex, size_t vl);
vint16mf4x2_t __riscv_vloxseg2ei32_v_i16mf4x2_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x3_t __riscv_vloxseg3ei32_v_i16mf4x3_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x4_t __riscv_vloxseg4ei32_v_i16mf4x4_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x5_t __riscv_vloxseg5ei32_v_i16mf4x5_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x6_t __riscv_vloxseg6ei32_v_i16mf4x6_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x7_t __riscv_vloxseg7ei32_v_i16mf4x7_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf4x8_t __riscv_vloxseg8ei32_v_i16mf4x8_m (vbool64_t mask, const int16_t *base, vuint32mf2_t bindex, size_t vl);
vint16mf2x2_t __riscv_vloxseg2ei32_v_i16mf2x2_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x3_t __riscv_vloxseg3ei32_v_i16mf2x3_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x4_t __riscv_vloxseg4ei32_v_i16mf2x4_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x5_t __riscv_vloxseg5ei32_v_i16mf2x5_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x6_t __riscv_vloxseg6ei32_v_i16mf2x6_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x7_t __riscv_vloxseg7ei32_v_i16mf2x7_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16mf2x8_t __riscv_vloxseg8ei32_v_i16mf2x8_m (vbool32_t mask, const int16_t *base, vuint32m1_t bindex, size_t vl);
vint16m1x2_t __riscv_vloxseg2ei32_v_i16m1x2_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x3_t __riscv_vloxseg3ei32_v_i16m1x3_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x4_t __riscv_vloxseg4ei32_v_i16m1x4_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x5_t __riscv_vloxseg5ei32_v_i16m1x5_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x6_t __riscv_vloxseg6ei32_v_i16m1x6_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x7_t __riscv_vloxseg7ei32_v_i16m1x7_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m1x8_t __riscv_vloxseg8ei32_v_i16m1x8_m (vbool16_t mask, const int16_t *base, vuint32m2_t bindex, size_t vl);
vint16m2x2_t __riscv_vloxseg2ei32_v_i16m2x2_m (vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m2x3_t __riscv_vloxseg3ei32_v_i16m2x3_m (vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m2x4_t __riscv_vloxseg4ei32_v_i16m2x4_m (vbool8_t mask, const int16_t *base, vuint32m4_t bindex, size_t vl);
vint16m4x2_t __riscv_vloxseg2ei32_v_i16m4x2_m (vbool4_t mask, const int16_t *base, vuint32m8_t bindex, size_t vl);
vint32mf2x2_t __riscv_vloxseg2ei32_v_i32mf2x2_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x3_t __riscv_vloxseg3ei32_v_i32mf2x3_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x4_t __riscv_vloxseg4ei32_v_i32mf2x4_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x5_t __riscv_vloxseg5ei32_v_i32mf2x5_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x6_t __riscv_vloxseg6ei32_v_i32mf2x6_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x7_t __riscv_vloxseg7ei32_v_i32mf2x7_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32mf2x8_t __riscv_vloxseg8ei32_v_i32mf2x8_m (vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl);
vint32m1x2_t __riscv_vloxseg2ei32_v_i32m1x2_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x3_t __riscv_vloxseg3ei32_v_i32m1x3_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x4_t __riscv_vloxseg4ei32_v_i32m1x4_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x5_t __riscv_vloxseg5ei32_v_i32m1x5_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x6_t __riscv_vloxseg6ei32_v_i32m1x6_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x7_t __riscv_vloxseg7ei32_v_i32m1x7_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m1x8_t __riscv_vloxseg8ei32_v_i32m1x8_m (vbool32_t mask, const int32_t *base, vuint32m1_t bindex, size_t vl);
vint32m2x2_t __riscv_vloxseg2ei32_v_i32m2x2_m (vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m2x3_t __riscv_vloxseg3ei32_v_i32m2x3_m (vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m2x4_t __riscv_vloxseg4ei32_v_i32m2x4_m (vbool16_t mask, const int32_t *base, vuint32m2_t bindex, size_t vl);
vint32m4x2_t __riscv_vloxseg2ei32_v_i32m4x2_m (vbool8_t mask, const int32_t *base, vuint32m4_t bindex, size_t vl);
vint64m1x2_t __riscv_vloxseg2ei32_v_i64m1x2_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x3_t __riscv_vloxseg3ei32_v_i64m1x3_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x4_t __riscv_vloxseg4ei32_v_i64m1x4_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x5_t __riscv_vloxseg5ei32_v_i64m1x5_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x6_t __riscv_vloxseg6ei32_v_i64m1x6_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x7_t __riscv_vloxseg7ei32_v_i64m1x7_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m1x8_t __riscv_vloxseg8ei32_v_i64m1x8_m (vbool64_t mask, const int64_t *base, vuint32mf2_t bindex, size_t vl);
vint64m2x2_t __riscv_vloxseg2ei32_v_i64m2x2_m (vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m2x3_t __riscv_vloxseg3ei32_v_i64m2x3_m (vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m2x4_t __riscv_vloxseg4ei32_v_i64m2x4_m (vbool32_t mask, const int64_t *base, vuint32m1_t bindex, size_t vl);
vint64m4x2_t __riscv_vloxseg2ei32_v_i64m4x2_m (vbool16_t mask, const int64_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vloxseg2ei32_v_u8mf8x2_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vloxseg3ei32_v_u8mf8x3_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vloxseg4ei32_v_u8mf8x4_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vloxseg5ei32_v_u8mf8x5_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vloxseg6ei32_v_u8mf8x6_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vloxseg7ei32_v_u8mf8x7_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vloxseg8ei32_v_u8mf8x8_m (vbool64_t mask, const uint8_t *base, vuint32mf2_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vloxseg2ei32_v_u8mf4x2_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vloxseg3ei32_v_u8mf4x3_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vloxseg4ei32_v_u8mf4x4_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vloxseg5ei32_v_u8mf4x5_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vloxseg6ei32_v_u8mf4x6_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vloxseg7ei32_v_u8mf4x7_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vloxseg8ei32_v_u8mf4x8_m (vbool32_t mask, const uint8_t *base, vuint32m1_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vloxseg2ei32_v_u8mf2x2_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vloxseg3ei32_v_u8mf2x3_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vloxseg4ei32_v_u8mf2x4_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vloxseg5ei32_v_u8mf2x5_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vloxseg6ei32_v_u8mf2x6_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vloxseg7ei32_v_u8mf2x7_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vloxseg8ei32_v_u8mf2x8_m (vbool16_t mask, const uint8_t *base, vuint32m2_t bindex, size_t vl);
vuint8m1x2_t __riscv_vloxseg2ei32_v_u8m1x2_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x3_t __riscv_vloxseg3ei32_v_u8m1x3_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x4_t __riscv_vloxseg4ei32_v_u8m1x4_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x5_t __riscv_vloxseg5ei32_v_u8m1x5_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x6_t __riscv_vloxseg6ei32_v_u8m1x6_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x7_t __riscv_vloxseg7ei32_v_u8m1x7_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m1x8_t __riscv_vloxseg8ei32_v_u8m1x8_m (vbool8_t mask, const uint8_t *base, vuint32m4_t bindex, size_t vl);
vuint8m2x2_t __riscv_vloxseg2ei32_v_u8m2x2_m (vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint8m2x3_t __riscv_vloxseg3ei32_v_u8m2x3_m (vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint8m2x4_t __riscv_vloxseg4ei32_v_u8m2x4_m (vbool4_t mask, const uint8_t *base, vuint32m8_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vloxseg2ei32_v_u16mf4x2_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vloxseg3ei32_v_u16mf4x3_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vloxseg4ei32_v_u16mf4x4_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vloxseg5ei32_v_u16mf4x5_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vloxseg6ei32_v_u16mf4x6_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vloxseg7ei32_v_u16mf4x7_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vloxseg8ei32_v_u16mf4x8_m (vbool64_t mask, const uint16_t *base, vuint32mf2_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vloxseg2ei32_v_u16mf2x2_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vloxseg3ei32_v_u16mf2x3_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vloxseg4ei32_v_u16mf2x4_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vloxseg5ei32_v_u16mf2x5_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vloxseg6ei32_v_u16mf2x6_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vloxseg7ei32_v_u16mf2x7_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vloxseg8ei32_v_u16mf2x8_m (vbool32_t mask, const uint16_t *base, vuint32m1_t bindex, size_t vl);
vuint16m1x2_t __riscv_vloxseg2ei32_v_u16m1x2_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x3_t __riscv_vloxseg3ei32_v_u16m1x3_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x4_t __riscv_vloxseg4ei32_v_u16m1x4_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x5_t __riscv_vloxseg5ei32_v_u16m1x5_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x6_t __riscv_vloxseg6ei32_v_u16m1x6_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x7_t __riscv_vloxseg7ei32_v_u16m1x7_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m1x8_t __riscv_vloxseg8ei32_v_u16m1x8_m (vbool16_t mask, const uint16_t *base, vuint32m2_t bindex, size_t vl);
vuint16m2x2_t __riscv_vloxseg2ei32_v_u16m2x2_m (vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m2x3_t __riscv_vloxseg3ei32_v_u16m2x3_m (vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m2x4_t __riscv_vloxseg4ei32_v_u16m2x4_m (vbool8_t mask, const uint16_t *base, vuint32m4_t bindex, size_t vl);
vuint16m4x2_t __riscv_vloxseg2ei32_v_u16m4x2_m (vbool4_t mask, const uint16_t *base, vuint32m8_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vloxseg2ei32_v_u32mf2x2_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vloxseg3ei32_v_u32mf2x3_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vloxseg4ei32_v_u32mf2x4_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vloxseg5ei32_v_u32mf2x5_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vloxseg6ei32_v_u32mf2x6_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vloxseg7ei32_v_u32mf2x7_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vloxseg8ei32_v_u32mf2x8_m (vbool64_t mask, const uint32_t *base, vuint32mf2_t bindex, size_t vl);
vuint32m1x2_t __riscv_vloxseg2ei32_v_u32m1x2_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x3_t __riscv_vloxseg3ei32_v_u32m1x3_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x4_t __riscv_vloxseg4ei32_v_u32m1x4_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x5_t __riscv_vloxseg5ei32_v_u32m1x5_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x6_t __riscv_vloxseg6ei32_v_u32m1x6_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x7_t __riscv_vloxseg7ei32_v_u32m1x7_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m1x8_t __riscv_vloxseg8ei32_v_u32m1x8_m (vbool32_t mask, const uint32_t *base, vuint32m1_t bindex, size_t vl);
vuint32m2x2_t __riscv_vloxseg2ei32_v_u32m2x2_m (vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m2x3_t __riscv_vloxseg3ei32_v_u32m2x3_m (vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m2x4_t __riscv_vloxseg4ei32_v_u32m2x4_m (vbool16_t mask, const uint32_t *base, vuint32m2_t bindex, size_t vl);
vuint32m4x2_t __riscv_vloxseg2ei32_v_u32m4x2_m (vbool8_t mask, const uint32_t *base, vuint32m4_t bindex, size_t vl);
vuint64m1x2_t __riscv_vloxseg2ei32_v_u64m1x2_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x3_t __riscv_vloxseg3ei32_v_u64m1x3_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x4_t __riscv_vloxseg4ei32_v_u64m1x4_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x5_t __riscv_vloxseg5ei32_v_u64m1x5_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x6_t __riscv_vloxseg6ei32_v_u64m1x6_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x7_t __riscv_vloxseg7ei32_v_u64m1x7_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m1x8_t __riscv_vloxseg8ei32_v_u64m1x8_m (vbool64_t mask, const uint64_t *base, vuint32mf2_t bindex, size_t vl);
vuint64m2x2_t __riscv_vloxseg2ei32_v_u64m2x2_m (vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m2x3_t __riscv_vloxseg3ei32_v_u64m2x3_m (vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m2x4_t __riscv_vloxseg4ei32_v_u64m2x4_m (vbool32_t mask, const uint64_t *base, vuint32m1_t bindex, size_t vl);
vuint64m4x2_t __riscv_vloxseg2ei32_v_u64m4x2_m (vbool16_t mask, const uint64_t *base, vuint32m2_t bindex, size_t vl);
8.4. vloxseg<nf>ei64.v
- Mnemonic
vloxseg2ei64.v vd, (rs1), vs2, vm # nf=2 vloxseg3ei64.v vd, (rs1), vs2, vm # nf=3 vloxseg4ei64.v vd, (rs1), vs2, vm # nf=4 vloxseg5ei64.v vd, (rs1), vs2, vm # nf=5 vloxseg6ei64.v vd, (rs1), vs2, vm # nf=6 vloxseg7ei64.v vd, (rs1), vs2, vm # nf=7 vloxseg8ei64.v vd, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-ordered segment loads
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vloxei64_v.h
- Intrinsic Functions
Details
vfloat16mf4x2_t __riscv_vloxseg2ei64_v_f16mf4x2 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vloxseg3ei64_v_f16mf4x3 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vloxseg4ei64_v_f16mf4x4 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vloxseg5ei64_v_f16mf4x5 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vloxseg6ei64_v_f16mf4x6 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vloxseg7ei64_v_f16mf4x7 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vloxseg8ei64_v_f16mf4x8 (const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vloxseg2ei64_v_f16mf2x2 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vloxseg3ei64_v_f16mf2x3 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vloxseg4ei64_v_f16mf2x4 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vloxseg5ei64_v_f16mf2x5 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vloxseg6ei64_v_f16mf2x6 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vloxseg7ei64_v_f16mf2x7 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vloxseg8ei64_v_f16mf2x8 (const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vloxseg2ei64_v_f16m1x2 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vloxseg3ei64_v_f16m1x3 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vloxseg4ei64_v_f16m1x4 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vloxseg5ei64_v_f16m1x5 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vloxseg6ei64_v_f16m1x6 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vloxseg7ei64_v_f16m1x7 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vloxseg8ei64_v_f16m1x8 (const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vloxseg2ei64_v_f16m2x2 (const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vloxseg3ei64_v_f16m2x3 (const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vloxseg4ei64_v_f16m2x4 (const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vloxseg2ei64_v_f32mf2x2 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vloxseg3ei64_v_f32mf2x3 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vloxseg4ei64_v_f32mf2x4 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vloxseg5ei64_v_f32mf2x5 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vloxseg6ei64_v_f32mf2x6 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vloxseg7ei64_v_f32mf2x7 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vloxseg8ei64_v_f32mf2x8 (const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vloxseg2ei64_v_f32m1x2 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vloxseg3ei64_v_f32m1x3 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vloxseg4ei64_v_f32m1x4 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vloxseg5ei64_v_f32m1x5 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vloxseg6ei64_v_f32m1x6 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vloxseg7ei64_v_f32m1x7 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vloxseg8ei64_v_f32m1x8 (const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vloxseg2ei64_v_f32m2x2 (const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vloxseg3ei64_v_f32m2x3 (const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vloxseg4ei64_v_f32m2x4 (const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vloxseg2ei64_v_f32m4x2 (const float32_t *base, vuint64m8_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vloxseg2ei64_v_f64m1x2 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vloxseg3ei64_v_f64m1x3 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vloxseg4ei64_v_f64m1x4 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vloxseg5ei64_v_f64m1x5 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vloxseg6ei64_v_f64m1x6 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vloxseg7ei64_v_f64m1x7 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vloxseg8ei64_v_f64m1x8 (const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vloxseg2ei64_v_f64m2x2 (const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vloxseg3ei64_v_f64m2x3 (const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vloxseg4ei64_v_f64m2x4 (const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vloxseg2ei64_v_f64m4x2 (const float64_t *base, vuint64m4_t bindex, size_t vl);
vint8mf8x2_t __riscv_vloxseg2ei64_v_i8mf8x2 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x3_t __riscv_vloxseg3ei64_v_i8mf8x3 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x4_t __riscv_vloxseg4ei64_v_i8mf8x4 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x5_t __riscv_vloxseg5ei64_v_i8mf8x5 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x6_t __riscv_vloxseg6ei64_v_i8mf8x6 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x7_t __riscv_vloxseg7ei64_v_i8mf8x7 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x8_t __riscv_vloxseg8ei64_v_i8mf8x8 (const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf4x2_t __riscv_vloxseg2ei64_v_i8mf4x2 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x3_t __riscv_vloxseg3ei64_v_i8mf4x3 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x4_t __riscv_vloxseg4ei64_v_i8mf4x4 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x5_t __riscv_vloxseg5ei64_v_i8mf4x5 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x6_t __riscv_vloxseg6ei64_v_i8mf4x6 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x7_t __riscv_vloxseg7ei64_v_i8mf4x7 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x8_t __riscv_vloxseg8ei64_v_i8mf4x8 (const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf2x2_t __riscv_vloxseg2ei64_v_i8mf2x2 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x3_t __riscv_vloxseg3ei64_v_i8mf2x3 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x4_t __riscv_vloxseg4ei64_v_i8mf2x4 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x5_t __riscv_vloxseg5ei64_v_i8mf2x5 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x6_t __riscv_vloxseg6ei64_v_i8mf2x6 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x7_t __riscv_vloxseg7ei64_v_i8mf2x7 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x8_t __riscv_vloxseg8ei64_v_i8mf2x8 (const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8m1x2_t __riscv_vloxseg2ei64_v_i8m1x2 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x3_t __riscv_vloxseg3ei64_v_i8m1x3 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x4_t __riscv_vloxseg4ei64_v_i8m1x4 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x5_t __riscv_vloxseg5ei64_v_i8m1x5 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x6_t __riscv_vloxseg6ei64_v_i8m1x6 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x7_t __riscv_vloxseg7ei64_v_i8m1x7 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x8_t __riscv_vloxseg8ei64_v_i8m1x8 (const int8_t *base, vuint64m8_t bindex, size_t vl);
vint16mf4x2_t __riscv_vloxseg2ei64_v_i16mf4x2 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x3_t __riscv_vloxseg3ei64_v_i16mf4x3 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x4_t __riscv_vloxseg4ei64_v_i16mf4x4 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x5_t __riscv_vloxseg5ei64_v_i16mf4x5 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x6_t __riscv_vloxseg6ei64_v_i16mf4x6 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x7_t __riscv_vloxseg7ei64_v_i16mf4x7 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x8_t __riscv_vloxseg8ei64_v_i16mf4x8 (const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf2x2_t __riscv_vloxseg2ei64_v_i16mf2x2 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x3_t __riscv_vloxseg3ei64_v_i16mf2x3 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x4_t __riscv_vloxseg4ei64_v_i16mf2x4 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x5_t __riscv_vloxseg5ei64_v_i16mf2x5 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x6_t __riscv_vloxseg6ei64_v_i16mf2x6 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x7_t __riscv_vloxseg7ei64_v_i16mf2x7 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x8_t __riscv_vloxseg8ei64_v_i16mf2x8 (const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16m1x2_t __riscv_vloxseg2ei64_v_i16m1x2 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x3_t __riscv_vloxseg3ei64_v_i16m1x3 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x4_t __riscv_vloxseg4ei64_v_i16m1x4 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x5_t __riscv_vloxseg5ei64_v_i16m1x5 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x6_t __riscv_vloxseg6ei64_v_i16m1x6 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x7_t __riscv_vloxseg7ei64_v_i16m1x7 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x8_t __riscv_vloxseg8ei64_v_i16m1x8 (const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m2x2_t __riscv_vloxseg2ei64_v_i16m2x2 (const int16_t *base, vuint64m8_t bindex, size_t vl);
vint16m2x3_t __riscv_vloxseg3ei64_v_i16m2x3 (const int16_t *base, vuint64m8_t bindex, size_t vl);
vint16m2x4_t __riscv_vloxseg4ei64_v_i16m2x4 (const int16_t *base, vuint64m8_t bindex, size_t vl);
vint32mf2x2_t __riscv_vloxseg2ei64_v_i32mf2x2 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x3_t __riscv_vloxseg3ei64_v_i32mf2x3 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x4_t __riscv_vloxseg4ei64_v_i32mf2x4 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x5_t __riscv_vloxseg5ei64_v_i32mf2x5 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x6_t __riscv_vloxseg6ei64_v_i32mf2x6 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x7_t __riscv_vloxseg7ei64_v_i32mf2x7 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x8_t __riscv_vloxseg8ei64_v_i32mf2x8 (const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32m1x2_t __riscv_vloxseg2ei64_v_i32m1x2 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x3_t __riscv_vloxseg3ei64_v_i32m1x3 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x4_t __riscv_vloxseg4ei64_v_i32m1x4 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x5_t __riscv_vloxseg5ei64_v_i32m1x5 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x6_t __riscv_vloxseg6ei64_v_i32m1x6 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x7_t __riscv_vloxseg7ei64_v_i32m1x7 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x8_t __riscv_vloxseg8ei64_v_i32m1x8 (const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m2x2_t __riscv_vloxseg2ei64_v_i32m2x2 (const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m2x3_t __riscv_vloxseg3ei64_v_i32m2x3 (const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m2x4_t __riscv_vloxseg4ei64_v_i32m2x4 (const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m4x2_t __riscv_vloxseg2ei64_v_i32m4x2 (const int32_t *base, vuint64m8_t bindex, size_t vl);
vint64m1x2_t __riscv_vloxseg2ei64_v_i64m1x2 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x3_t __riscv_vloxseg3ei64_v_i64m1x3 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x4_t __riscv_vloxseg4ei64_v_i64m1x4 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x5_t __riscv_vloxseg5ei64_v_i64m1x5 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x6_t __riscv_vloxseg6ei64_v_i64m1x6 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x7_t __riscv_vloxseg7ei64_v_i64m1x7 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x8_t __riscv_vloxseg8ei64_v_i64m1x8 (const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m2x2_t __riscv_vloxseg2ei64_v_i64m2x2 (const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m2x3_t __riscv_vloxseg3ei64_v_i64m2x3 (const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m2x4_t __riscv_vloxseg4ei64_v_i64m2x4 (const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m4x2_t __riscv_vloxseg2ei64_v_i64m4x2 (const int64_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vloxseg2ei64_v_u8mf8x2 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vloxseg3ei64_v_u8mf8x3 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vloxseg4ei64_v_u8mf8x4 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vloxseg5ei64_v_u8mf8x5 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vloxseg6ei64_v_u8mf8x6 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vloxseg7ei64_v_u8mf8x7 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vloxseg8ei64_v_u8mf8x8 (const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vloxseg2ei64_v_u8mf4x2 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vloxseg3ei64_v_u8mf4x3 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vloxseg4ei64_v_u8mf4x4 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vloxseg5ei64_v_u8mf4x5 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vloxseg6ei64_v_u8mf4x6 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vloxseg7ei64_v_u8mf4x7 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vloxseg8ei64_v_u8mf4x8 (const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vloxseg2ei64_v_u8mf2x2 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vloxseg3ei64_v_u8mf2x3 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vloxseg4ei64_v_u8mf2x4 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vloxseg5ei64_v_u8mf2x5 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vloxseg6ei64_v_u8mf2x6 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vloxseg7ei64_v_u8mf2x7 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vloxseg8ei64_v_u8mf2x8 (const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8m1x2_t __riscv_vloxseg2ei64_v_u8m1x2 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x3_t __riscv_vloxseg3ei64_v_u8m1x3 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x4_t __riscv_vloxseg4ei64_v_u8m1x4 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x5_t __riscv_vloxseg5ei64_v_u8m1x5 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x6_t __riscv_vloxseg6ei64_v_u8m1x6 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x7_t __riscv_vloxseg7ei64_v_u8m1x7 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x8_t __riscv_vloxseg8ei64_v_u8m1x8 (const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vloxseg2ei64_v_u16mf4x2 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vloxseg3ei64_v_u16mf4x3 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vloxseg4ei64_v_u16mf4x4 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vloxseg5ei64_v_u16mf4x5 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vloxseg6ei64_v_u16mf4x6 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vloxseg7ei64_v_u16mf4x7 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vloxseg8ei64_v_u16mf4x8 (const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vloxseg2ei64_v_u16mf2x2 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vloxseg3ei64_v_u16mf2x3 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vloxseg4ei64_v_u16mf2x4 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vloxseg5ei64_v_u16mf2x5 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vloxseg6ei64_v_u16mf2x6 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vloxseg7ei64_v_u16mf2x7 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vloxseg8ei64_v_u16mf2x8 (const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16m1x2_t __riscv_vloxseg2ei64_v_u16m1x2 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x3_t __riscv_vloxseg3ei64_v_u16m1x3 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x4_t __riscv_vloxseg4ei64_v_u16m1x4 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x5_t __riscv_vloxseg5ei64_v_u16m1x5 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x6_t __riscv_vloxseg6ei64_v_u16m1x6 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x7_t __riscv_vloxseg7ei64_v_u16m1x7 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x8_t __riscv_vloxseg8ei64_v_u16m1x8 (const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m2x2_t __riscv_vloxseg2ei64_v_u16m2x2 (const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint16m2x3_t __riscv_vloxseg3ei64_v_u16m2x3 (const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint16m2x4_t __riscv_vloxseg4ei64_v_u16m2x4 (const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vloxseg2ei64_v_u32mf2x2 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vloxseg3ei64_v_u32mf2x3 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vloxseg4ei64_v_u32mf2x4 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vloxseg5ei64_v_u32mf2x5 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vloxseg6ei64_v_u32mf2x6 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vloxseg7ei64_v_u32mf2x7 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vloxseg8ei64_v_u32mf2x8 (const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32m1x2_t __riscv_vloxseg2ei64_v_u32m1x2 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x3_t __riscv_vloxseg3ei64_v_u32m1x3 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x4_t __riscv_vloxseg4ei64_v_u32m1x4 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x5_t __riscv_vloxseg5ei64_v_u32m1x5 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x6_t __riscv_vloxseg6ei64_v_u32m1x6 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x7_t __riscv_vloxseg7ei64_v_u32m1x7 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x8_t __riscv_vloxseg8ei64_v_u32m1x8 (const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m2x2_t __riscv_vloxseg2ei64_v_u32m2x2 (const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m2x3_t __riscv_vloxseg3ei64_v_u32m2x3 (const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m2x4_t __riscv_vloxseg4ei64_v_u32m2x4 (const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m4x2_t __riscv_vloxseg2ei64_v_u32m4x2 (const uint32_t *base, vuint64m8_t bindex, size_t vl);
vuint64m1x2_t __riscv_vloxseg2ei64_v_u64m1x2 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x3_t __riscv_vloxseg3ei64_v_u64m1x3 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x4_t __riscv_vloxseg4ei64_v_u64m1x4 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x5_t __riscv_vloxseg5ei64_v_u64m1x5 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x6_t __riscv_vloxseg6ei64_v_u64m1x6 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x7_t __riscv_vloxseg7ei64_v_u64m1x7 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x8_t __riscv_vloxseg8ei64_v_u64m1x8 (const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m2x2_t __riscv_vloxseg2ei64_v_u64m2x2 (const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m2x3_t __riscv_vloxseg3ei64_v_u64m2x3 (const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m2x4_t __riscv_vloxseg4ei64_v_u64m2x4 (const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m4x2_t __riscv_vloxseg2ei64_v_u64m4x2 (const uint64_t *base, vuint64m4_t bindex, size_t vl);
vfloat16mf4x2_t __riscv_vloxseg2ei64_v_f16mf4x2_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x3_t __riscv_vloxseg3ei64_v_f16mf4x3_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x4_t __riscv_vloxseg4ei64_v_f16mf4x4_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x5_t __riscv_vloxseg5ei64_v_f16mf4x5_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x6_t __riscv_vloxseg6ei64_v_f16mf4x6_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x7_t __riscv_vloxseg7ei64_v_f16mf4x7_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf4x8_t __riscv_vloxseg8ei64_v_f16mf4x8_m (vbool64_t mask, const float16_t *base, vuint64m1_t bindex, size_t vl);
vfloat16mf2x2_t __riscv_vloxseg2ei64_v_f16mf2x2_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x3_t __riscv_vloxseg3ei64_v_f16mf2x3_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x4_t __riscv_vloxseg4ei64_v_f16mf2x4_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x5_t __riscv_vloxseg5ei64_v_f16mf2x5_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x6_t __riscv_vloxseg6ei64_v_f16mf2x6_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x7_t __riscv_vloxseg7ei64_v_f16mf2x7_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16mf2x8_t __riscv_vloxseg8ei64_v_f16mf2x8_m (vbool32_t mask, const float16_t *base, vuint64m2_t bindex, size_t vl);
vfloat16m1x2_t __riscv_vloxseg2ei64_v_f16m1x2_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x3_t __riscv_vloxseg3ei64_v_f16m1x3_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x4_t __riscv_vloxseg4ei64_v_f16m1x4_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x5_t __riscv_vloxseg5ei64_v_f16m1x5_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x6_t __riscv_vloxseg6ei64_v_f16m1x6_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x7_t __riscv_vloxseg7ei64_v_f16m1x7_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m1x8_t __riscv_vloxseg8ei64_v_f16m1x8_m (vbool16_t mask, const float16_t *base, vuint64m4_t bindex, size_t vl);
vfloat16m2x2_t __riscv_vloxseg2ei64_v_f16m2x2_m (vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat16m2x3_t __riscv_vloxseg3ei64_v_f16m2x3_m (vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat16m2x4_t __riscv_vloxseg4ei64_v_f16m2x4_m (vbool8_t mask, const float16_t *base, vuint64m8_t bindex, size_t vl);
vfloat32mf2x2_t __riscv_vloxseg2ei64_v_f32mf2x2_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x3_t __riscv_vloxseg3ei64_v_f32mf2x3_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x4_t __riscv_vloxseg4ei64_v_f32mf2x4_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x5_t __riscv_vloxseg5ei64_v_f32mf2x5_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x6_t __riscv_vloxseg6ei64_v_f32mf2x6_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x7_t __riscv_vloxseg7ei64_v_f32mf2x7_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32mf2x8_t __riscv_vloxseg8ei64_v_f32mf2x8_m (vbool64_t mask, const float32_t *base, vuint64m1_t bindex, size_t vl);
vfloat32m1x2_t __riscv_vloxseg2ei64_v_f32m1x2_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x3_t __riscv_vloxseg3ei64_v_f32m1x3_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x4_t __riscv_vloxseg4ei64_v_f32m1x4_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x5_t __riscv_vloxseg5ei64_v_f32m1x5_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x6_t __riscv_vloxseg6ei64_v_f32m1x6_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x7_t __riscv_vloxseg7ei64_v_f32m1x7_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m1x8_t __riscv_vloxseg8ei64_v_f32m1x8_m (vbool32_t mask, const float32_t *base, vuint64m2_t bindex, size_t vl);
vfloat32m2x2_t __riscv_vloxseg2ei64_v_f32m2x2_m (vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m2x3_t __riscv_vloxseg3ei64_v_f32m2x3_m (vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m2x4_t __riscv_vloxseg4ei64_v_f32m2x4_m (vbool16_t mask, const float32_t *base, vuint64m4_t bindex, size_t vl);
vfloat32m4x2_t __riscv_vloxseg2ei64_v_f32m4x2_m (vbool8_t mask, const float32_t *base, vuint64m8_t bindex, size_t vl);
vfloat64m1x2_t __riscv_vloxseg2ei64_v_f64m1x2_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x3_t __riscv_vloxseg3ei64_v_f64m1x3_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x4_t __riscv_vloxseg4ei64_v_f64m1x4_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x5_t __riscv_vloxseg5ei64_v_f64m1x5_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x6_t __riscv_vloxseg6ei64_v_f64m1x6_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x7_t __riscv_vloxseg7ei64_v_f64m1x7_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m1x8_t __riscv_vloxseg8ei64_v_f64m1x8_m (vbool64_t mask, const float64_t *base, vuint64m1_t bindex, size_t vl);
vfloat64m2x2_t __riscv_vloxseg2ei64_v_f64m2x2_m (vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m2x3_t __riscv_vloxseg3ei64_v_f64m2x3_m (vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m2x4_t __riscv_vloxseg4ei64_v_f64m2x4_m (vbool32_t mask, const float64_t *base, vuint64m2_t bindex, size_t vl);
vfloat64m4x2_t __riscv_vloxseg2ei64_v_f64m4x2_m (vbool16_t mask, const float64_t *base, vuint64m4_t bindex, size_t vl);
vint8mf8x2_t __riscv_vloxseg2ei64_v_i8mf8x2_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x3_t __riscv_vloxseg3ei64_v_i8mf8x3_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x4_t __riscv_vloxseg4ei64_v_i8mf8x4_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x5_t __riscv_vloxseg5ei64_v_i8mf8x5_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x6_t __riscv_vloxseg6ei64_v_i8mf8x6_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x7_t __riscv_vloxseg7ei64_v_i8mf8x7_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf8x8_t __riscv_vloxseg8ei64_v_i8mf8x8_m (vbool64_t mask, const int8_t *base, vuint64m1_t bindex, size_t vl);
vint8mf4x2_t __riscv_vloxseg2ei64_v_i8mf4x2_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x3_t __riscv_vloxseg3ei64_v_i8mf4x3_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x4_t __riscv_vloxseg4ei64_v_i8mf4x4_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x5_t __riscv_vloxseg5ei64_v_i8mf4x5_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x6_t __riscv_vloxseg6ei64_v_i8mf4x6_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x7_t __riscv_vloxseg7ei64_v_i8mf4x7_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf4x8_t __riscv_vloxseg8ei64_v_i8mf4x8_m (vbool32_t mask, const int8_t *base, vuint64m2_t bindex, size_t vl);
vint8mf2x2_t __riscv_vloxseg2ei64_v_i8mf2x2_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x3_t __riscv_vloxseg3ei64_v_i8mf2x3_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x4_t __riscv_vloxseg4ei64_v_i8mf2x4_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x5_t __riscv_vloxseg5ei64_v_i8mf2x5_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x6_t __riscv_vloxseg6ei64_v_i8mf2x6_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x7_t __riscv_vloxseg7ei64_v_i8mf2x7_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8mf2x8_t __riscv_vloxseg8ei64_v_i8mf2x8_m (vbool16_t mask, const int8_t *base, vuint64m4_t bindex, size_t vl);
vint8m1x2_t __riscv_vloxseg2ei64_v_i8m1x2_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x3_t __riscv_vloxseg3ei64_v_i8m1x3_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x4_t __riscv_vloxseg4ei64_v_i8m1x4_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x5_t __riscv_vloxseg5ei64_v_i8m1x5_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x6_t __riscv_vloxseg6ei64_v_i8m1x6_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x7_t __riscv_vloxseg7ei64_v_i8m1x7_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint8m1x8_t __riscv_vloxseg8ei64_v_i8m1x8_m (vbool8_t mask, const int8_t *base, vuint64m8_t bindex, size_t vl);
vint16mf4x2_t __riscv_vloxseg2ei64_v_i16mf4x2_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x3_t __riscv_vloxseg3ei64_v_i16mf4x3_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x4_t __riscv_vloxseg4ei64_v_i16mf4x4_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x5_t __riscv_vloxseg5ei64_v_i16mf4x5_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x6_t __riscv_vloxseg6ei64_v_i16mf4x6_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x7_t __riscv_vloxseg7ei64_v_i16mf4x7_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf4x8_t __riscv_vloxseg8ei64_v_i16mf4x8_m (vbool64_t mask, const int16_t *base, vuint64m1_t bindex, size_t vl);
vint16mf2x2_t __riscv_vloxseg2ei64_v_i16mf2x2_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x3_t __riscv_vloxseg3ei64_v_i16mf2x3_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x4_t __riscv_vloxseg4ei64_v_i16mf2x4_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x5_t __riscv_vloxseg5ei64_v_i16mf2x5_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x6_t __riscv_vloxseg6ei64_v_i16mf2x6_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x7_t __riscv_vloxseg7ei64_v_i16mf2x7_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16mf2x8_t __riscv_vloxseg8ei64_v_i16mf2x8_m (vbool32_t mask, const int16_t *base, vuint64m2_t bindex, size_t vl);
vint16m1x2_t __riscv_vloxseg2ei64_v_i16m1x2_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x3_t __riscv_vloxseg3ei64_v_i16m1x3_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x4_t __riscv_vloxseg4ei64_v_i16m1x4_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x5_t __riscv_vloxseg5ei64_v_i16m1x5_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x6_t __riscv_vloxseg6ei64_v_i16m1x6_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x7_t __riscv_vloxseg7ei64_v_i16m1x7_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m1x8_t __riscv_vloxseg8ei64_v_i16m1x8_m (vbool16_t mask, const int16_t *base, vuint64m4_t bindex, size_t vl);
vint16m2x2_t __riscv_vloxseg2ei64_v_i16m2x2_m (vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl);
vint16m2x3_t __riscv_vloxseg3ei64_v_i16m2x3_m (vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl);
vint16m2x4_t __riscv_vloxseg4ei64_v_i16m2x4_m (vbool8_t mask, const int16_t *base, vuint64m8_t bindex, size_t vl);
vint32mf2x2_t __riscv_vloxseg2ei64_v_i32mf2x2_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x3_t __riscv_vloxseg3ei64_v_i32mf2x3_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x4_t __riscv_vloxseg4ei64_v_i32mf2x4_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x5_t __riscv_vloxseg5ei64_v_i32mf2x5_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x6_t __riscv_vloxseg6ei64_v_i32mf2x6_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x7_t __riscv_vloxseg7ei64_v_i32mf2x7_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32mf2x8_t __riscv_vloxseg8ei64_v_i32mf2x8_m (vbool64_t mask, const int32_t *base, vuint64m1_t bindex, size_t vl);
vint32m1x2_t __riscv_vloxseg2ei64_v_i32m1x2_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x3_t __riscv_vloxseg3ei64_v_i32m1x3_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x4_t __riscv_vloxseg4ei64_v_i32m1x4_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x5_t __riscv_vloxseg5ei64_v_i32m1x5_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x6_t __riscv_vloxseg6ei64_v_i32m1x6_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x7_t __riscv_vloxseg7ei64_v_i32m1x7_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m1x8_t __riscv_vloxseg8ei64_v_i32m1x8_m (vbool32_t mask, const int32_t *base, vuint64m2_t bindex, size_t vl);
vint32m2x2_t __riscv_vloxseg2ei64_v_i32m2x2_m (vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m2x3_t __riscv_vloxseg3ei64_v_i32m2x3_m (vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m2x4_t __riscv_vloxseg4ei64_v_i32m2x4_m (vbool16_t mask, const int32_t *base, vuint64m4_t bindex, size_t vl);
vint32m4x2_t __riscv_vloxseg2ei64_v_i32m4x2_m (vbool8_t mask, const int32_t *base, vuint64m8_t bindex, size_t vl);
vint64m1x2_t __riscv_vloxseg2ei64_v_i64m1x2_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x3_t __riscv_vloxseg3ei64_v_i64m1x3_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x4_t __riscv_vloxseg4ei64_v_i64m1x4_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x5_t __riscv_vloxseg5ei64_v_i64m1x5_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x6_t __riscv_vloxseg6ei64_v_i64m1x6_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x7_t __riscv_vloxseg7ei64_v_i64m1x7_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m1x8_t __riscv_vloxseg8ei64_v_i64m1x8_m (vbool64_t mask, const int64_t *base, vuint64m1_t bindex, size_t vl);
vint64m2x2_t __riscv_vloxseg2ei64_v_i64m2x2_m (vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m2x3_t __riscv_vloxseg3ei64_v_i64m2x3_m (vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m2x4_t __riscv_vloxseg4ei64_v_i64m2x4_m (vbool32_t mask, const int64_t *base, vuint64m2_t bindex, size_t vl);
vint64m4x2_t __riscv_vloxseg2ei64_v_i64m4x2_m (vbool16_t mask, const int64_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf8x2_t __riscv_vloxseg2ei64_v_u8mf8x2_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x3_t __riscv_vloxseg3ei64_v_u8mf8x3_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x4_t __riscv_vloxseg4ei64_v_u8mf8x4_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x5_t __riscv_vloxseg5ei64_v_u8mf8x5_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x6_t __riscv_vloxseg6ei64_v_u8mf8x6_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x7_t __riscv_vloxseg7ei64_v_u8mf8x7_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf8x8_t __riscv_vloxseg8ei64_v_u8mf8x8_m (vbool64_t mask, const uint8_t *base, vuint64m1_t bindex, size_t vl);
vuint8mf4x2_t __riscv_vloxseg2ei64_v_u8mf4x2_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x3_t __riscv_vloxseg3ei64_v_u8mf4x3_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x4_t __riscv_vloxseg4ei64_v_u8mf4x4_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x5_t __riscv_vloxseg5ei64_v_u8mf4x5_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x6_t __riscv_vloxseg6ei64_v_u8mf4x6_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x7_t __riscv_vloxseg7ei64_v_u8mf4x7_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf4x8_t __riscv_vloxseg8ei64_v_u8mf4x8_m (vbool32_t mask, const uint8_t *base, vuint64m2_t bindex, size_t vl);
vuint8mf2x2_t __riscv_vloxseg2ei64_v_u8mf2x2_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x3_t __riscv_vloxseg3ei64_v_u8mf2x3_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x4_t __riscv_vloxseg4ei64_v_u8mf2x4_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x5_t __riscv_vloxseg5ei64_v_u8mf2x5_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x6_t __riscv_vloxseg6ei64_v_u8mf2x6_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x7_t __riscv_vloxseg7ei64_v_u8mf2x7_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8mf2x8_t __riscv_vloxseg8ei64_v_u8mf2x8_m (vbool16_t mask, const uint8_t *base, vuint64m4_t bindex, size_t vl);
vuint8m1x2_t __riscv_vloxseg2ei64_v_u8m1x2_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x3_t __riscv_vloxseg3ei64_v_u8m1x3_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x4_t __riscv_vloxseg4ei64_v_u8m1x4_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x5_t __riscv_vloxseg5ei64_v_u8m1x5_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x6_t __riscv_vloxseg6ei64_v_u8m1x6_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x7_t __riscv_vloxseg7ei64_v_u8m1x7_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint8m1x8_t __riscv_vloxseg8ei64_v_u8m1x8_m (vbool8_t mask, const uint8_t *base, vuint64m8_t bindex, size_t vl);
vuint16mf4x2_t __riscv_vloxseg2ei64_v_u16mf4x2_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x3_t __riscv_vloxseg3ei64_v_u16mf4x3_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x4_t __riscv_vloxseg4ei64_v_u16mf4x4_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x5_t __riscv_vloxseg5ei64_v_u16mf4x5_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x6_t __riscv_vloxseg6ei64_v_u16mf4x6_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x7_t __riscv_vloxseg7ei64_v_u16mf4x7_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf4x8_t __riscv_vloxseg8ei64_v_u16mf4x8_m (vbool64_t mask, const uint16_t *base, vuint64m1_t bindex, size_t vl);
vuint16mf2x2_t __riscv_vloxseg2ei64_v_u16mf2x2_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x3_t __riscv_vloxseg3ei64_v_u16mf2x3_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x4_t __riscv_vloxseg4ei64_v_u16mf2x4_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x5_t __riscv_vloxseg5ei64_v_u16mf2x5_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x6_t __riscv_vloxseg6ei64_v_u16mf2x6_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x7_t __riscv_vloxseg7ei64_v_u16mf2x7_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16mf2x8_t __riscv_vloxseg8ei64_v_u16mf2x8_m (vbool32_t mask, const uint16_t *base, vuint64m2_t bindex, size_t vl);
vuint16m1x2_t __riscv_vloxseg2ei64_v_u16m1x2_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x3_t __riscv_vloxseg3ei64_v_u16m1x3_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x4_t __riscv_vloxseg4ei64_v_u16m1x4_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x5_t __riscv_vloxseg5ei64_v_u16m1x5_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x6_t __riscv_vloxseg6ei64_v_u16m1x6_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x7_t __riscv_vloxseg7ei64_v_u16m1x7_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m1x8_t __riscv_vloxseg8ei64_v_u16m1x8_m (vbool16_t mask, const uint16_t *base, vuint64m4_t bindex, size_t vl);
vuint16m2x2_t __riscv_vloxseg2ei64_v_u16m2x2_m (vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint16m2x3_t __riscv_vloxseg3ei64_v_u16m2x3_m (vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint16m2x4_t __riscv_vloxseg4ei64_v_u16m2x4_m (vbool8_t mask, const uint16_t *base, vuint64m8_t bindex, size_t vl);
vuint32mf2x2_t __riscv_vloxseg2ei64_v_u32mf2x2_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x3_t __riscv_vloxseg3ei64_v_u32mf2x3_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x4_t __riscv_vloxseg4ei64_v_u32mf2x4_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x5_t __riscv_vloxseg5ei64_v_u32mf2x5_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x6_t __riscv_vloxseg6ei64_v_u32mf2x6_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x7_t __riscv_vloxseg7ei64_v_u32mf2x7_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32mf2x8_t __riscv_vloxseg8ei64_v_u32mf2x8_m (vbool64_t mask, const uint32_t *base, vuint64m1_t bindex, size_t vl);
vuint32m1x2_t __riscv_vloxseg2ei64_v_u32m1x2_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x3_t __riscv_vloxseg3ei64_v_u32m1x3_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x4_t __riscv_vloxseg4ei64_v_u32m1x4_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x5_t __riscv_vloxseg5ei64_v_u32m1x5_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x6_t __riscv_vloxseg6ei64_v_u32m1x6_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x7_t __riscv_vloxseg7ei64_v_u32m1x7_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m1x8_t __riscv_vloxseg8ei64_v_u32m1x8_m (vbool32_t mask, const uint32_t *base, vuint64m2_t bindex, size_t vl);
vuint32m2x2_t __riscv_vloxseg2ei64_v_u32m2x2_m (vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m2x3_t __riscv_vloxseg3ei64_v_u32m2x3_m (vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m2x4_t __riscv_vloxseg4ei64_v_u32m2x4_m (vbool16_t mask, const uint32_t *base, vuint64m4_t bindex, size_t vl);
vuint32m4x2_t __riscv_vloxseg2ei64_v_u32m4x2_m (vbool8_t mask, const uint32_t *base, vuint64m8_t bindex, size_t vl);
vuint64m1x2_t __riscv_vloxseg2ei64_v_u64m1x2_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x3_t __riscv_vloxseg3ei64_v_u64m1x3_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x4_t __riscv_vloxseg4ei64_v_u64m1x4_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x5_t __riscv_vloxseg5ei64_v_u64m1x5_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x6_t __riscv_vloxseg6ei64_v_u64m1x6_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x7_t __riscv_vloxseg7ei64_v_u64m1x7_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m1x8_t __riscv_vloxseg8ei64_v_u64m1x8_m (vbool64_t mask, const uint64_t *base, vuint64m1_t bindex, size_t vl);
vuint64m2x2_t __riscv_vloxseg2ei64_v_u64m2x2_m (vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m2x3_t __riscv_vloxseg3ei64_v_u64m2x3_m (vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m2x4_t __riscv_vloxseg4ei64_v_u64m2x4_m (vbool32_t mask, const uint64_t *base, vuint64m2_t bindex, size_t vl);
vuint64m4x2_t __riscv_vloxseg2ei64_v_u64m4x2_m (vbool16_t mask, const uint64_t *base, vuint64m4_t bindex, size_t vl);
8.5. vsuxseg<nf>ei8.v
- Mnemonic
vsuxseg2ei8.v vs3, (rs1), vs2, vm # nf=2 vsuxseg3ei8.v vs3, (rs1), vs2, vm # nf=3 vsuxseg4ei8.v vs3, (rs1), vs2, vm # nf=4 vsuxseg5ei8.v vs3, (rs1), vs2, vm # nf=5 vsuxseg6ei8.v vs3, (rs1), vs2, vm # nf=6 vsuxseg7ei8.v vs3, (rs1), vs2, vm # nf=7 vsuxseg8ei8.v vs3, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-unordered segment stores
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsuxei8_v.h
- Intrinsic Functions
Details
void __riscv_vsuxseg2ei64_v_f16mf4x2 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16mf4x3 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16mf4x4 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f16mf4x5 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f16mf4x6 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f16mf4x7 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f16mf4x8 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f16mf2x2 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16mf2x3 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16mf2x4 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f16mf2x5 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f16mf2x6 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f16mf2x7 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f16mf2x8 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f16m1x2 (float16_t *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16m1x3 (float16_t *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16m1x4 (float16_t *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f16m1x5 (float16_t *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f16m1x6 (float16_t *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f16m1x7 (float16_t *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f16m1x8 (float16_t *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f16m2x2 (float16_t *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16m2x3 (float16_t *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16m2x4 (float16_t *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32mf2x2 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f32mf2x3 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f32mf2x4 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f32mf2x5 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f32mf2x6 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f32mf2x7 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f32mf2x8 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32m1x2 (float32_t *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f32m1x3 (float32_t *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f32m1x4 (float32_t *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f32m1x5 (float32_t *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f32m1x6 (float32_t *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f32m1x7 (float32_t *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f32m1x8 (float32_t *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32m2x2 (float32_t *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f32m2x3 (float32_t *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f32m2x4 (float32_t *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32m4x2 (float32_t *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f64m1x2 (float64_t *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f64m1x3 (float64_t *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f64m1x4 (float64_t *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f64m1x5 (float64_t *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f64m1x6 (float64_t *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f64m1x7 (float64_t *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f64m1x8 (float64_t *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f64m2x2 (float64_t *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f64m2x3 (float64_t *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f64m2x4 (float64_t *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f64m4x2 (float64_t *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8mf8x2 (int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8mf8x3 (int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8mf8x4 (int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8mf8x5 (int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8mf8x6 (int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8mf8x7 (int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8mf8x8 (int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8mf4x2 (int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8mf4x3 (int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8mf4x4 (int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8mf4x5 (int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8mf4x6 (int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8mf4x7 (int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8mf4x8 (int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8mf2x2 (int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8mf2x3 (int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8mf2x4 (int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8mf2x5 (int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8mf2x6 (int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8mf2x7 (int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8mf2x8 (int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8m1x2 (int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8m1x3 (int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8m1x4 (int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8m1x5 (int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8m1x6 (int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8m1x7 (int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8m1x8 (int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16mf4x2 (int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16mf4x3 (int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16mf4x4 (int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i16mf4x5 (int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i16mf4x6 (int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i16mf4x7 (int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i16mf4x8 (int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16mf2x2 (int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16mf2x3 (int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16mf2x4 (int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i16mf2x5 (int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i16mf2x6 (int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i16mf2x7 (int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i16mf2x8 (int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16m1x2 (int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16m1x3 (int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16m1x4 (int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i16m1x5 (int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i16m1x6 (int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i16m1x7 (int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i16m1x8 (int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16m2x2 (int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16m2x3 (int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16m2x4 (int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32mf2x2 (int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i32mf2x3 (int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i32mf2x4 (int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i32mf2x5 (int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i32mf2x6 (int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i32mf2x7 (int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i32mf2x8 (int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32m1x2 (int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i32m1x3 (int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i32m1x4 (int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i32m1x5 (int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i32m1x6 (int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i32m1x7 (int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i32m1x8 (int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32m2x2 (int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i32m2x3 (int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i32m2x4 (int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32m4x2 (int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i64m1x2 (int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i64m1x3 (int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i64m1x4 (int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i64m1x5 (int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i64m1x6 (int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i64m1x7 (int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i64m1x8 (int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i64m2x2 (int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i64m2x3 (int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i64m2x4 (int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i64m4x2 (int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8mf8x2 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8mf8x3 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8mf8x4 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8mf8x5 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8mf8x6 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8mf8x7 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8mf8x8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8mf4x2 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8mf4x3 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8mf4x4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8mf4x5 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8mf4x6 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8mf4x7 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8mf4x8 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8mf2x2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8mf2x3 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8mf2x4 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8mf2x5 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8mf2x6 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8mf2x7 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8mf2x8 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8m1x2 (uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8m1x3 (uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8m1x4 (uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8m1x5 (uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8m1x6 (uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8m1x7 (uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8m1x8 (uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16mf4x2 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16mf4x3 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16mf4x4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u16mf4x5 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u16mf4x6 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u16mf4x7 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u16mf4x8 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16mf2x2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16mf2x3 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16mf2x4 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u16mf2x5 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u16mf2x6 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u16mf2x7 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u16mf2x8 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16m1x2 (uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16m1x3 (uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16m1x4 (uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u16m1x5 (uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u16m1x6 (uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u16m1x7 (uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u16m1x8 (uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16m2x2 (uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16m2x3 (uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16m2x4 (uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32mf2x2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u32mf2x3 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u32mf2x4 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u32mf2x5 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u32mf2x6 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u32mf2x7 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u32mf2x8 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32m1x2 (uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u32m1x3 (uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u32m1x4 (uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u32m1x5 (uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u32m1x6 (uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u32m1x7 (uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u32m1x8 (uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32m2x2 (uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u32m2x3 (uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u32m2x4 (uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32m4x2 (uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u64m1x2 (uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u64m1x3 (uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u64m1x4 (uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u64m1x5 (uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u64m1x6 (uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u64m1x7 (uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u64m1x8 (uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u64m2x2 (uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u64m2x3 (uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u64m2x4 (uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u64m4x2 (uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f16mf4x2_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16mf4x3_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16mf4x4_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f16mf4x5_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f16mf4x6_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f16mf4x7_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f16mf4x8_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f16mf2x2_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16mf2x3_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16mf2x4_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f16mf2x5_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f16mf2x6_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f16mf2x7_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f16mf2x8_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f16m1x2_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16m1x3_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16m1x4_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f16m1x5_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f16m1x6_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f16m1x7_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f16m1x8_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f16m2x2_m (vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16m2x3_m (vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16m2x4_m (vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32mf2x2_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f32mf2x3_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f32mf2x4_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f32mf2x5_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f32mf2x6_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f32mf2x7_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f32mf2x8_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32m1x2_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f32m1x3_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f32m1x4_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f32m1x5_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f32m1x6_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f32m1x7_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f32m1x8_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32m2x2_m (vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f32m2x3_m (vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f32m2x4_m (vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32m4x2_m (vbool8_t mask, float32_t *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f64m1x2_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f64m1x3_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f64m1x4_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f64m1x5_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f64m1x6_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f64m1x7_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f64m1x8_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f64m2x2_m (vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f64m2x3_m (vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f64m2x4_m (vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f64m4x2_m (vbool16_t mask, float64_t *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8mf8x2_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8mf8x3_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8mf8x4_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8mf8x5_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8mf8x6_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8mf8x7_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8mf8x8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8mf4x2_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8mf4x3_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8mf4x4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8mf4x5_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8mf4x6_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8mf4x7_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8mf4x8_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8mf2x2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8mf2x3_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8mf2x4_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8mf2x5_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8mf2x6_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8mf2x7_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8mf2x8_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8m1x2_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8m1x3_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8m1x4_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8m1x5_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8m1x6_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8m1x7_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8m1x8_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16mf4x2_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16mf4x3_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16mf4x4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i16mf4x5_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i16mf4x6_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i16mf4x7_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i16mf4x8_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16mf2x2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16mf2x3_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16mf2x4_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i16mf2x5_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i16mf2x6_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i16mf2x7_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i16mf2x8_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16m1x2_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16m1x3_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16m1x4_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i16m1x5_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i16m1x6_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i16m1x7_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i16m1x8_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16m2x2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16m2x3_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16m2x4_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32mf2x2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i32mf2x3_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i32mf2x4_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i32mf2x5_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i32mf2x6_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i32mf2x7_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i32mf2x8_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32m1x2_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i32m1x3_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i32m1x4_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i32m1x5_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i32m1x6_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i32m1x7_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i32m1x8_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32m2x2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i32m2x3_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i32m2x4_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32m4x2_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i64m1x2_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i64m1x3_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i64m1x4_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i64m1x5_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i64m1x6_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i64m1x7_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i64m1x8_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i64m2x2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i64m2x3_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i64m2x4_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i64m4x2_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8mf8x2_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8mf8x3_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8mf8x4_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8mf8x5_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8mf8x6_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8mf8x7_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8mf8x8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8mf4x2_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8mf4x3_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8mf4x4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8mf4x5_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8mf4x6_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8mf4x7_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8mf4x8_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8mf2x2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8mf2x3_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8mf2x4_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8mf2x5_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8mf2x6_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8mf2x7_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8mf2x8_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8m1x2_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8m1x3_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8m1x4_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8m1x5_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8m1x6_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8m1x7_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8m1x8_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16mf4x2_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16mf4x3_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16mf4x4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u16mf4x5_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u16mf4x6_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u16mf4x7_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u16mf4x8_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16mf2x2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16mf2x3_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16mf2x4_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u16mf2x5_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u16mf2x6_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u16mf2x7_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u16mf2x8_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16m1x2_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16m1x3_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16m1x4_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u16m1x5_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u16m1x6_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u16m1x7_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u16m1x8_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16m2x2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16m2x3_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16m2x4_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32mf2x2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u32mf2x3_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u32mf2x4_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u32mf2x5_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u32mf2x6_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u32mf2x7_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u32mf2x8_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32m1x2_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u32m1x3_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u32m1x4_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u32m1x5_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u32m1x6_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u32m1x7_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u32m1x8_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32m2x2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u32m2x3_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u32m2x4_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32m4x2_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u64m1x2_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u64m1x3_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u64m1x4_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u64m1x5_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u64m1x6_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u64m1x7_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u64m1x8_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u64m2x2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u64m2x3_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u64m2x4_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u64m4x2_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl);
8.6. vsuxseg<nf>ei16.v
- Mnemonic
vsuxseg2ei16.v vs3, (rs1), vs2, vm # nf=2 vsuxseg3ei16.v vs3, (rs1), vs2, vm # nf=3 vsuxseg4ei16.v vs3, (rs1), vs2, vm # nf=4 vsuxseg5ei16.v vs3, (rs1), vs2, vm # nf=5 vsuxseg6ei16.v vs3, (rs1), vs2, vm # nf=6 vsuxseg7ei16.v vs3, (rs1), vs2, vm # nf=7 vsuxseg8ei16.v vs3, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-unordered segment stores
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsuxei16_v.h
- Intrinsic Functions
Details
void __riscv_vsuxseg2ei16_v_f16mf4x2 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f16mf4x3 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f16mf4x4 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_f16mf4x5 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_f16mf4x6 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_f16mf4x7 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_f16mf4x8 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f16mf2x2 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f16mf2x3 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f16mf2x4 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_f16mf2x5 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_f16mf2x6 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_f16mf2x7 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_f16mf2x8 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f16m1x2 (float16_t *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f16m1x3 (float16_t *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f16m1x4 (float16_t *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_f16m1x5 (float16_t *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_f16m1x6 (float16_t *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_f16m1x7 (float16_t *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_f16m1x8 (float16_t *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f16m2x2 (float16_t *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f16m2x3 (float16_t *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f16m2x4 (float16_t *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f16m4x2 (float16_t *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f32mf2x2 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f32mf2x3 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f32mf2x4 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_f32mf2x5 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_f32mf2x6 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_f32mf2x7 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_f32mf2x8 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f32m1x2 (float32_t *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f32m1x3 (float32_t *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f32m1x4 (float32_t *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_f32m1x5 (float32_t *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_f32m1x6 (float32_t *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_f32m1x7 (float32_t *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_f32m1x8 (float32_t *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f32m2x2 (float32_t *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f32m2x3 (float32_t *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f32m2x4 (float32_t *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f32m4x2 (float32_t *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f64m1x2 (float64_t *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f64m1x3 (float64_t *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f64m1x4 (float64_t *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_f64m1x5 (float64_t *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_f64m1x6 (float64_t *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_f64m1x7 (float64_t *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_f64m1x8 (float64_t *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f64m2x2 (float64_t *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f64m2x3 (float64_t *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f64m2x4 (float64_t *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f64m4x2 (float64_t *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i8mf8x2 (int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i8mf8x3 (int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i8mf8x4 (int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i8mf8x5 (int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i8mf8x6 (int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i8mf8x7 (int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i8mf8x8 (int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i8mf4x2 (int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i8mf4x3 (int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i8mf4x4 (int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i8mf4x5 (int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i8mf4x6 (int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i8mf4x7 (int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i8mf4x8 (int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i8mf2x2 (int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i8mf2x3 (int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i8mf2x4 (int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i8mf2x5 (int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i8mf2x6 (int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i8mf2x7 (int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i8mf2x8 (int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i8m1x2 (int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i8m1x3 (int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i8m1x4 (int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i8m1x5 (int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i8m1x6 (int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i8m1x7 (int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i8m1x8 (int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i8m2x2 (int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i8m2x3 (int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i8m2x4 (int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i8m4x2 (int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i16mf4x2 (int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i16mf4x3 (int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i16mf4x4 (int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i16mf4x5 (int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i16mf4x6 (int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i16mf4x7 (int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i16mf4x8 (int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i16mf2x2 (int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i16mf2x3 (int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i16mf2x4 (int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i16mf2x5 (int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i16mf2x6 (int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i16mf2x7 (int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i16mf2x8 (int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i16m1x2 (int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i16m1x3 (int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i16m1x4 (int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i16m1x5 (int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i16m1x6 (int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i16m1x7 (int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i16m1x8 (int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i16m2x2 (int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i16m2x3 (int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i16m2x4 (int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i16m4x2 (int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i32mf2x2 (int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i32mf2x3 (int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i32mf2x4 (int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i32mf2x5 (int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i32mf2x6 (int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i32mf2x7 (int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i32mf2x8 (int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i32m1x2 (int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i32m1x3 (int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i32m1x4 (int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i32m1x5 (int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i32m1x6 (int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i32m1x7 (int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i32m1x8 (int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i32m2x2 (int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i32m2x3 (int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i32m2x4 (int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i32m4x2 (int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i64m1x2 (int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i64m1x3 (int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i64m1x4 (int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i64m1x5 (int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i64m1x6 (int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i64m1x7 (int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i64m1x8 (int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i64m2x2 (int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i64m2x3 (int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i64m2x4 (int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i64m4x2 (int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u8mf8x2 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u8mf8x3 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u8mf8x4 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u8mf8x5 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u8mf8x6 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u8mf8x7 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u8mf8x8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u8mf4x2 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u8mf4x3 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u8mf4x4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u8mf4x5 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u8mf4x6 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u8mf4x7 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u8mf4x8 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u8mf2x2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u8mf2x3 (uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u8mf2x4 (uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u8mf2x5 (uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u8mf2x6 (uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u8mf2x7 (uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u8mf2x8 (uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u8m1x2 (uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u8m1x3 (uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u8m1x4 (uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u8m1x5 (uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u8m1x6 (uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u8m1x7 (uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u8m1x8 (uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u8m2x2 (uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u8m2x3 (uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u8m2x4 (uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u8m4x2 (uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u16mf4x2 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u16mf4x3 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u16mf4x4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u16mf4x5 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u16mf4x6 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u16mf4x7 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u16mf4x8 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u16mf2x2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u16mf2x3 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u16mf2x4 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u16mf2x5 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u16mf2x6 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u16mf2x7 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u16mf2x8 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u16m1x2 (uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u16m1x3 (uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u16m1x4 (uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u16m1x5 (uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u16m1x6 (uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u16m1x7 (uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u16m1x8 (uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u16m2x2 (uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u16m2x3 (uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u16m2x4 (uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u16m4x2 (uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u32mf2x2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u32mf2x3 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u32mf2x4 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u32mf2x5 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u32mf2x6 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u32mf2x7 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u32mf2x8 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u32m1x2 (uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u32m1x3 (uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u32m1x4 (uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u32m1x5 (uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u32m1x6 (uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u32m1x7 (uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u32m1x8 (uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u32m2x2 (uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u32m2x3 (uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u32m2x4 (uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u32m4x2 (uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u64m1x2 (uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u64m1x3 (uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u64m1x4 (uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u64m1x5 (uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u64m1x6 (uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u64m1x7 (uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u64m1x8 (uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u64m2x2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u64m2x3 (uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u64m2x4 (uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u64m4x2 (uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f16mf4x2_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f16mf4x3_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f16mf4x4_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_f16mf4x5_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_f16mf4x6_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_f16mf4x7_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_f16mf4x8_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f16mf2x2_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f16mf2x3_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f16mf2x4_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_f16mf2x5_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_f16mf2x6_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_f16mf2x7_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_f16mf2x8_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f16m1x2_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f16m1x3_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f16m1x4_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_f16m1x5_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_f16m1x6_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_f16m1x7_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_f16m1x8_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f16m2x2_m (vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f16m2x3_m (vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f16m2x4_m (vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f16m4x2_m (vbool4_t mask, float16_t *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f32mf2x2_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f32mf2x3_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f32mf2x4_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_f32mf2x5_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_f32mf2x6_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_f32mf2x7_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_f32mf2x8_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f32m1x2_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f32m1x3_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f32m1x4_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_f32m1x5_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_f32m1x6_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_f32m1x7_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_f32m1x8_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f32m2x2_m (vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f32m2x3_m (vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f32m2x4_m (vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f32m4x2_m (vbool8_t mask, float32_t *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f64m1x2_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f64m1x3_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f64m1x4_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_f64m1x5_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_f64m1x6_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_f64m1x7_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_f64m1x8_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f64m2x2_m (vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_f64m2x3_m (vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_f64m2x4_m (vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_f64m4x2_m (vbool16_t mask, float64_t *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i8mf8x2_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i8mf8x3_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i8mf8x4_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i8mf8x5_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i8mf8x6_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i8mf8x7_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i8mf8x8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i8mf4x2_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i8mf4x3_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i8mf4x4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i8mf4x5_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i8mf4x6_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i8mf4x7_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i8mf4x8_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i8mf2x2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i8mf2x3_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i8mf2x4_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i8mf2x5_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i8mf2x6_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i8mf2x7_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i8mf2x8_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i8m1x2_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i8m1x3_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i8m1x4_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i8m1x5_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i8m1x6_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i8m1x7_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i8m1x8_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i8m2x2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i8m2x3_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i8m2x4_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i8m4x2_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i16mf4x2_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i16mf4x3_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i16mf4x4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i16mf4x5_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i16mf4x6_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i16mf4x7_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i16mf4x8_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i16mf2x2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i16mf2x3_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i16mf2x4_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i16mf2x5_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i16mf2x6_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i16mf2x7_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i16mf2x8_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i16m1x2_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i16m1x3_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i16m1x4_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i16m1x5_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i16m1x6_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i16m1x7_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i16m1x8_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i16m2x2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i16m2x3_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i16m2x4_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i16m4x2_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i32mf2x2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i32mf2x3_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i32mf2x4_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i32mf2x5_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i32mf2x6_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i32mf2x7_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i32mf2x8_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i32m1x2_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i32m1x3_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i32m1x4_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i32m1x5_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i32m1x6_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i32m1x7_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i32m1x8_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i32m2x2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i32m2x3_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i32m2x4_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i32m4x2_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i64m1x2_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i64m1x3_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i64m1x4_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_i64m1x5_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_i64m1x6_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_i64m1x7_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_i64m1x8_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i64m2x2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_i64m2x3_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_i64m2x4_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_i64m4x2_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u8mf8x2_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u8mf8x3_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u8mf8x4_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u8mf8x5_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u8mf8x6_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u8mf8x7_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u8mf8x8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u8mf4x2_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u8mf4x3_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u8mf4x4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u8mf4x5_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u8mf4x6_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u8mf4x7_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u8mf4x8_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u8mf2x2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u8mf2x3_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u8mf2x4_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u8mf2x5_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u8mf2x6_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u8mf2x7_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u8mf2x8_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u8m1x2_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u8m1x3_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u8m1x4_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u8m1x5_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u8m1x6_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u8m1x7_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u8m1x8_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u8m2x2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u8m2x3_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u8m2x4_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u8m4x2_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u16mf4x2_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u16mf4x3_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u16mf4x4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u16mf4x5_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u16mf4x6_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u16mf4x7_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u16mf4x8_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u16mf2x2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u16mf2x3_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u16mf2x4_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u16mf2x5_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u16mf2x6_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u16mf2x7_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u16mf2x8_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u16m1x2_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u16m1x3_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u16m1x4_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u16m1x5_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u16m1x6_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u16m1x7_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u16m1x8_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u16m2x2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u16m2x3_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u16m2x4_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u16m4x2_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u32mf2x2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u32mf2x3_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u32mf2x4_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u32mf2x5_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u32mf2x6_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u32mf2x7_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u32mf2x8_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u32m1x2_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u32m1x3_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u32m1x4_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u32m1x5_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u32m1x6_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u32m1x7_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u32m1x8_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u32m2x2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u32m2x3_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u32m2x4_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u32m4x2_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u64m1x2_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u64m1x3_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u64m1x4_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei16_v_u64m1x5_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei16_v_u64m1x6_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei16_v_u64m1x7_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei16_v_u64m1x8_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u64m2x2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei16_v_u64m2x3_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei16_v_u64m2x4_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei16_v_u64m4x2_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl);
8.7. vsuxseg<nf>ei32.v
- Mnemonic
vsuxseg2ei32.v vs3, (rs1), vs2, vm # nf=2 vsuxseg3ei32.v vs3, (rs1), vs2, vm # nf=3 vsuxseg4ei32.v vs3, (rs1), vs2, vm # nf=4 vsuxseg5ei32.v vs3, (rs1), vs2, vm # nf=5 vsuxseg6ei32.v vs3, (rs1), vs2, vm # nf=6 vsuxseg7ei32.v vs3, (rs1), vs2, vm # nf=7 vsuxseg8ei32.v vs3, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-unordered segment stores
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsuxei32_v.h
- Intrinsic Functions
Details
void __riscv_vsuxseg2ei32_v_f16mf4x2 (float16_t *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f16mf4x3 (float16_t *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f16mf4x4 (float16_t *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_f16mf4x5 (float16_t *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_f16mf4x6 (float16_t *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_f16mf4x7 (float16_t *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_f16mf4x8 (float16_t *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f16mf2x2 (float16_t *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f16mf2x3 (float16_t *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f16mf2x4 (float16_t *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_f16mf2x5 (float16_t *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_f16mf2x6 (float16_t *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_f16mf2x7 (float16_t *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_f16mf2x8 (float16_t *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f16m1x2 (float16_t *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f16m1x3 (float16_t *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f16m1x4 (float16_t *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_f16m1x5 (float16_t *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_f16m1x6 (float16_t *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_f16m1x7 (float16_t *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_f16m1x8 (float16_t *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f16m2x2 (float16_t *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f16m2x3 (float16_t *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f16m2x4 (float16_t *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f16m4x2 (float16_t *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f32mf2x2 (float32_t *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f32mf2x3 (float32_t *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f32mf2x4 (float32_t *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_f32mf2x5 (float32_t *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_f32mf2x6 (float32_t *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_f32mf2x7 (float32_t *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_f32mf2x8 (float32_t *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f32m1x2 (float32_t *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f32m1x3 (float32_t *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f32m1x4 (float32_t *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_f32m1x5 (float32_t *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_f32m1x6 (float32_t *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_f32m1x7 (float32_t *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_f32m1x8 (float32_t *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f32m2x2 (float32_t *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f32m2x3 (float32_t *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f32m2x4 (float32_t *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f32m4x2 (float32_t *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f64m1x2 (float64_t *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f64m1x3 (float64_t *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f64m1x4 (float64_t *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_f64m1x5 (float64_t *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_f64m1x6 (float64_t *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_f64m1x7 (float64_t *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_f64m1x8 (float64_t *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f64m2x2 (float64_t *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f64m2x3 (float64_t *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f64m2x4 (float64_t *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f64m4x2 (float64_t *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i8mf8x2 (int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i8mf8x3 (int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i8mf8x4 (int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i8mf8x5 (int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i8mf8x6 (int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i8mf8x7 (int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i8mf8x8 (int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i8mf4x2 (int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i8mf4x3 (int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i8mf4x4 (int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i8mf4x5 (int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i8mf4x6 (int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i8mf4x7 (int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i8mf4x8 (int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i8mf2x2 (int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i8mf2x3 (int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i8mf2x4 (int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i8mf2x5 (int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i8mf2x6 (int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i8mf2x7 (int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i8mf2x8 (int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i8m1x2 (int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i8m1x3 (int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i8m1x4 (int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i8m1x5 (int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i8m1x6 (int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i8m1x7 (int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i8m1x8 (int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i8m2x2 (int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i8m2x3 (int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i8m2x4 (int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i16mf4x2 (int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i16mf4x3 (int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i16mf4x4 (int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i16mf4x5 (int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i16mf4x6 (int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i16mf4x7 (int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i16mf4x8 (int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i16mf2x2 (int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i16mf2x3 (int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i16mf2x4 (int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i16mf2x5 (int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i16mf2x6 (int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i16mf2x7 (int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i16mf2x8 (int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i16m1x2 (int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i16m1x3 (int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i16m1x4 (int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i16m1x5 (int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i16m1x6 (int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i16m1x7 (int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i16m1x8 (int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i16m2x2 (int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i16m2x3 (int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i16m2x4 (int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i16m4x2 (int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i32mf2x2 (int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i32mf2x3 (int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i32mf2x4 (int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i32mf2x5 (int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i32mf2x6 (int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i32mf2x7 (int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i32mf2x8 (int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i32m1x2 (int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i32m1x3 (int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i32m1x4 (int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i32m1x5 (int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i32m1x6 (int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i32m1x7 (int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i32m1x8 (int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i32m2x2 (int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i32m2x3 (int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i32m2x4 (int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i32m4x2 (int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i64m1x2 (int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i64m1x3 (int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i64m1x4 (int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i64m1x5 (int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i64m1x6 (int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i64m1x7 (int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i64m1x8 (int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i64m2x2 (int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i64m2x3 (int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i64m2x4 (int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i64m4x2 (int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u8mf8x2 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u8mf8x3 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u8mf8x4 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u8mf8x5 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u8mf8x6 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u8mf8x7 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u8mf8x8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u8mf4x2 (uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u8mf4x3 (uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u8mf4x4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u8mf4x5 (uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u8mf4x6 (uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u8mf4x7 (uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u8mf4x8 (uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u8mf2x2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u8mf2x3 (uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u8mf2x4 (uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u8mf2x5 (uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u8mf2x6 (uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u8mf2x7 (uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u8mf2x8 (uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u8m1x2 (uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u8m1x3 (uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u8m1x4 (uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u8m1x5 (uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u8m1x6 (uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u8m1x7 (uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u8m1x8 (uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u8m2x2 (uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u8m2x3 (uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u8m2x4 (uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u16mf4x2 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u16mf4x3 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u16mf4x4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u16mf4x5 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u16mf4x6 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u16mf4x7 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u16mf4x8 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u16mf2x2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u16mf2x3 (uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u16mf2x4 (uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u16mf2x5 (uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u16mf2x6 (uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u16mf2x7 (uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u16mf2x8 (uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u16m1x2 (uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u16m1x3 (uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u16m1x4 (uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u16m1x5 (uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u16m1x6 (uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u16m1x7 (uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u16m1x8 (uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u16m2x2 (uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u16m2x3 (uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u16m2x4 (uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u16m4x2 (uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u32mf2x2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u32mf2x3 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u32mf2x4 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u32mf2x5 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u32mf2x6 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u32mf2x7 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u32mf2x8 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u32m1x2 (uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u32m1x3 (uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u32m1x4 (uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u32m1x5 (uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u32m1x6 (uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u32m1x7 (uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u32m1x8 (uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u32m2x2 (uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u32m2x3 (uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u32m2x4 (uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u32m4x2 (uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u64m1x2 (uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u64m1x3 (uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u64m1x4 (uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u64m1x5 (uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u64m1x6 (uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u64m1x7 (uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u64m1x8 (uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u64m2x2 (uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u64m2x3 (uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u64m2x4 (uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u64m4x2 (uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f16mf4x2_m (vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f16mf4x3_m (vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f16mf4x4_m (vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_f16mf4x5_m (vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_f16mf4x6_m (vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_f16mf4x7_m (vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_f16mf4x8_m (vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f16mf2x2_m (vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f16mf2x3_m (vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f16mf2x4_m (vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_f16mf2x5_m (vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_f16mf2x6_m (vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_f16mf2x7_m (vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_f16mf2x8_m (vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f16m1x2_m (vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f16m1x3_m (vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f16m1x4_m (vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_f16m1x5_m (vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_f16m1x6_m (vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_f16m1x7_m (vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_f16m1x8_m (vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f16m2x2_m (vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f16m2x3_m (vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f16m2x4_m (vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f16m4x2_m (vbool4_t mask, float16_t *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f32mf2x2_m (vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f32mf2x3_m (vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f32mf2x4_m (vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_f32mf2x5_m (vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_f32mf2x6_m (vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_f32mf2x7_m (vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_f32mf2x8_m (vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f32m1x2_m (vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f32m1x3_m (vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f32m1x4_m (vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_f32m1x5_m (vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_f32m1x6_m (vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_f32m1x7_m (vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_f32m1x8_m (vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f32m2x2_m (vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f32m2x3_m (vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f32m2x4_m (vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f32m4x2_m (vbool8_t mask, float32_t *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f64m1x2_m (vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f64m1x3_m (vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f64m1x4_m (vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_f64m1x5_m (vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_f64m1x6_m (vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_f64m1x7_m (vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_f64m1x8_m (vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f64m2x2_m (vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_f64m2x3_m (vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_f64m2x4_m (vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_f64m4x2_m (vbool16_t mask, float64_t *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i8mf8x2_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i8mf8x3_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i8mf8x4_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i8mf8x5_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i8mf8x6_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i8mf8x7_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i8mf8x8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i8mf4x2_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i8mf4x3_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i8mf4x4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i8mf4x5_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i8mf4x6_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i8mf4x7_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i8mf4x8_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i8mf2x2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i8mf2x3_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i8mf2x4_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i8mf2x5_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i8mf2x6_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i8mf2x7_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i8mf2x8_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i8m1x2_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i8m1x3_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i8m1x4_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i8m1x5_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i8m1x6_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i8m1x7_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i8m1x8_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i8m2x2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i8m2x3_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i8m2x4_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i16mf4x2_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i16mf4x3_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i16mf4x4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i16mf4x5_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i16mf4x6_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i16mf4x7_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i16mf4x8_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i16mf2x2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i16mf2x3_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i16mf2x4_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i16mf2x5_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i16mf2x6_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i16mf2x7_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i16mf2x8_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i16m1x2_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i16m1x3_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i16m1x4_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i16m1x5_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i16m1x6_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i16m1x7_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i16m1x8_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i16m2x2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i16m2x3_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i16m2x4_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i16m4x2_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i32mf2x2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i32mf2x3_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i32mf2x4_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i32mf2x5_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i32mf2x6_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i32mf2x7_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i32mf2x8_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i32m1x2_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i32m1x3_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i32m1x4_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i32m1x5_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i32m1x6_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i32m1x7_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i32m1x8_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i32m2x2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i32m2x3_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i32m2x4_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i32m4x2_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i64m1x2_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i64m1x3_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i64m1x4_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_i64m1x5_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_i64m1x6_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_i64m1x7_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_i64m1x8_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i64m2x2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_i64m2x3_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_i64m2x4_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_i64m4x2_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u8mf8x2_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u8mf8x3_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u8mf8x4_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u8mf8x5_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u8mf8x6_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u8mf8x7_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u8mf8x8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u8mf4x2_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u8mf4x3_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u8mf4x4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u8mf4x5_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u8mf4x6_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u8mf4x7_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u8mf4x8_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u8mf2x2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u8mf2x3_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u8mf2x4_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u8mf2x5_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u8mf2x6_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u8mf2x7_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u8mf2x8_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u8m1x2_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u8m1x3_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u8m1x4_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u8m1x5_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u8m1x6_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u8m1x7_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u8m1x8_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u8m2x2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u8m2x3_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u8m2x4_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u16mf4x2_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u16mf4x3_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u16mf4x4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u16mf4x5_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u16mf4x6_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u16mf4x7_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u16mf4x8_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u16mf2x2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u16mf2x3_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u16mf2x4_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u16mf2x5_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u16mf2x6_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u16mf2x7_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u16mf2x8_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u16m1x2_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u16m1x3_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u16m1x4_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u16m1x5_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u16m1x6_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u16m1x7_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u16m1x8_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u16m2x2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u16m2x3_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u16m2x4_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u16m4x2_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u32mf2x2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u32mf2x3_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u32mf2x4_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u32mf2x5_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u32mf2x6_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u32mf2x7_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u32mf2x8_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u32m1x2_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u32m1x3_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u32m1x4_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u32m1x5_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u32m1x6_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u32m1x7_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u32m1x8_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u32m2x2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u32m2x3_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u32m2x4_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u32m4x2_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u64m1x2_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u64m1x3_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u64m1x4_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei32_v_u64m1x5_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei32_v_u64m1x6_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei32_v_u64m1x7_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei32_v_u64m1x8_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u64m2x2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei32_v_u64m2x3_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei32_v_u64m2x4_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei32_v_u64m4x2_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl);
8.8. vsuxseg<nf>ei64.v
- Mnemonic
vsuxseg2ei64.v vs3, (rs1), vs2, vm # nf=2 vsuxseg3ei64.v vs3, (rs1), vs2, vm # nf=3 vsuxseg4ei64.v vs3, (rs1), vs2, vm # nf=4 vsuxseg5ei64.v vs3, (rs1), vs2, vm # nf=5 vsuxseg6ei64.v vs3, (rs1), vs2, vm # nf=6 vsuxseg7ei64.v vs3, (rs1), vs2, vm # nf=7 vsuxseg8ei64.v vs3, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-unordered segment stores
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsuxei64_v.h
- Intrinsic Functions
Details
void __riscv_vsuxseg2ei64_v_f16mf4x2 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16mf4x3 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16mf4x4 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f16mf4x5 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f16mf4x6 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f16mf4x7 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f16mf4x8 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f16mf2x2 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16mf2x3 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16mf2x4 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f16mf2x5 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f16mf2x6 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f16mf2x7 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f16mf2x8 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f16m1x2 (float16_t *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16m1x3 (float16_t *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16m1x4 (float16_t *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f16m1x5 (float16_t *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f16m1x6 (float16_t *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f16m1x7 (float16_t *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f16m1x8 (float16_t *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f16m2x2 (float16_t *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16m2x3 (float16_t *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16m2x4 (float16_t *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32mf2x2 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f32mf2x3 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f32mf2x4 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f32mf2x5 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f32mf2x6 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f32mf2x7 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f32mf2x8 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32m1x2 (float32_t *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f32m1x3 (float32_t *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f32m1x4 (float32_t *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f32m1x5 (float32_t *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f32m1x6 (float32_t *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f32m1x7 (float32_t *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f32m1x8 (float32_t *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32m2x2 (float32_t *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f32m2x3 (float32_t *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f32m2x4 (float32_t *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32m4x2 (float32_t *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f64m1x2 (float64_t *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f64m1x3 (float64_t *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f64m1x4 (float64_t *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f64m1x5 (float64_t *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f64m1x6 (float64_t *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f64m1x7 (float64_t *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f64m1x8 (float64_t *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f64m2x2 (float64_t *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f64m2x3 (float64_t *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f64m2x4 (float64_t *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f64m4x2 (float64_t *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8mf8x2 (int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8mf8x3 (int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8mf8x4 (int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8mf8x5 (int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8mf8x6 (int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8mf8x7 (int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8mf8x8 (int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8mf4x2 (int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8mf4x3 (int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8mf4x4 (int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8mf4x5 (int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8mf4x6 (int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8mf4x7 (int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8mf4x8 (int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8mf2x2 (int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8mf2x3 (int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8mf2x4 (int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8mf2x5 (int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8mf2x6 (int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8mf2x7 (int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8mf2x8 (int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8m1x2 (int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8m1x3 (int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8m1x4 (int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8m1x5 (int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8m1x6 (int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8m1x7 (int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8m1x8 (int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16mf4x2 (int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16mf4x3 (int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16mf4x4 (int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i16mf4x5 (int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i16mf4x6 (int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i16mf4x7 (int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i16mf4x8 (int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16mf2x2 (int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16mf2x3 (int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16mf2x4 (int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i16mf2x5 (int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i16mf2x6 (int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i16mf2x7 (int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i16mf2x8 (int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16m1x2 (int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16m1x3 (int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16m1x4 (int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i16m1x5 (int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i16m1x6 (int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i16m1x7 (int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i16m1x8 (int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16m2x2 (int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16m2x3 (int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16m2x4 (int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32mf2x2 (int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i32mf2x3 (int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i32mf2x4 (int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i32mf2x5 (int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i32mf2x6 (int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i32mf2x7 (int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i32mf2x8 (int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32m1x2 (int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i32m1x3 (int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i32m1x4 (int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i32m1x5 (int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i32m1x6 (int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i32m1x7 (int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i32m1x8 (int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32m2x2 (int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i32m2x3 (int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i32m2x4 (int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32m4x2 (int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i64m1x2 (int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i64m1x3 (int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i64m1x4 (int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i64m1x5 (int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i64m1x6 (int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i64m1x7 (int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i64m1x8 (int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i64m2x2 (int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i64m2x3 (int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i64m2x4 (int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i64m4x2 (int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8mf8x2 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8mf8x3 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8mf8x4 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8mf8x5 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8mf8x6 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8mf8x7 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8mf8x8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8mf4x2 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8mf4x3 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8mf4x4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8mf4x5 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8mf4x6 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8mf4x7 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8mf4x8 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8mf2x2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8mf2x3 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8mf2x4 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8mf2x5 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8mf2x6 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8mf2x7 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8mf2x8 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8m1x2 (uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8m1x3 (uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8m1x4 (uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8m1x5 (uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8m1x6 (uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8m1x7 (uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8m1x8 (uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16mf4x2 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16mf4x3 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16mf4x4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u16mf4x5 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u16mf4x6 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u16mf4x7 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u16mf4x8 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16mf2x2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16mf2x3 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16mf2x4 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u16mf2x5 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u16mf2x6 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u16mf2x7 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u16mf2x8 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16m1x2 (uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16m1x3 (uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16m1x4 (uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u16m1x5 (uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u16m1x6 (uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u16m1x7 (uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u16m1x8 (uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16m2x2 (uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16m2x3 (uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16m2x4 (uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32mf2x2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u32mf2x3 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u32mf2x4 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u32mf2x5 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u32mf2x6 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u32mf2x7 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u32mf2x8 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32m1x2 (uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u32m1x3 (uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u32m1x4 (uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u32m1x5 (uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u32m1x6 (uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u32m1x7 (uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u32m1x8 (uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32m2x2 (uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u32m2x3 (uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u32m2x4 (uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32m4x2 (uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u64m1x2 (uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u64m1x3 (uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u64m1x4 (uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u64m1x5 (uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u64m1x6 (uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u64m1x7 (uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u64m1x8 (uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u64m2x2 (uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u64m2x3 (uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u64m2x4 (uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u64m4x2 (uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f16mf4x2_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16mf4x3_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16mf4x4_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f16mf4x5_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f16mf4x6_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f16mf4x7_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f16mf4x8_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f16mf2x2_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16mf2x3_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16mf2x4_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f16mf2x5_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f16mf2x6_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f16mf2x7_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f16mf2x8_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f16m1x2_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16m1x3_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16m1x4_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f16m1x5_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f16m1x6_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f16m1x7_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f16m1x8_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f16m2x2_m (vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f16m2x3_m (vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f16m2x4_m (vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32mf2x2_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f32mf2x3_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f32mf2x4_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f32mf2x5_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f32mf2x6_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f32mf2x7_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f32mf2x8_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32m1x2_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f32m1x3_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f32m1x4_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f32m1x5_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f32m1x6_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f32m1x7_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f32m1x8_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32m2x2_m (vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f32m2x3_m (vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f32m2x4_m (vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f32m4x2_m (vbool8_t mask, float32_t *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f64m1x2_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f64m1x3_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f64m1x4_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_f64m1x5_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_f64m1x6_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_f64m1x7_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_f64m1x8_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f64m2x2_m (vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_f64m2x3_m (vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_f64m2x4_m (vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_f64m4x2_m (vbool16_t mask, float64_t *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8mf8x2_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8mf8x3_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8mf8x4_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8mf8x5_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8mf8x6_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8mf8x7_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8mf8x8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8mf4x2_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8mf4x3_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8mf4x4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8mf4x5_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8mf4x6_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8mf4x7_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8mf4x8_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8mf2x2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8mf2x3_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8mf2x4_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8mf2x5_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8mf2x6_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8mf2x7_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8mf2x8_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i8m1x2_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i8m1x3_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i8m1x4_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i8m1x5_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i8m1x6_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i8m1x7_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i8m1x8_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16mf4x2_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16mf4x3_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16mf4x4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i16mf4x5_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i16mf4x6_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i16mf4x7_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i16mf4x8_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16mf2x2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16mf2x3_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16mf2x4_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i16mf2x5_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i16mf2x6_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i16mf2x7_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i16mf2x8_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16m1x2_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16m1x3_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16m1x4_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i16m1x5_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i16m1x6_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i16m1x7_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i16m1x8_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i16m2x2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i16m2x3_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i16m2x4_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32mf2x2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i32mf2x3_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i32mf2x4_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i32mf2x5_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i32mf2x6_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i32mf2x7_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i32mf2x8_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32m1x2_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i32m1x3_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i32m1x4_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i32m1x5_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i32m1x6_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i32m1x7_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i32m1x8_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32m2x2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i32m2x3_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i32m2x4_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i32m4x2_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i64m1x2_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i64m1x3_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i64m1x4_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_i64m1x5_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_i64m1x6_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_i64m1x7_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_i64m1x8_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i64m2x2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_i64m2x3_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_i64m2x4_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_i64m4x2_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8mf8x2_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8mf8x3_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8mf8x4_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8mf8x5_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8mf8x6_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8mf8x7_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8mf8x8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8mf4x2_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8mf4x3_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8mf4x4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8mf4x5_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8mf4x6_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8mf4x7_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8mf4x8_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8mf2x2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8mf2x3_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8mf2x4_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8mf2x5_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8mf2x6_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8mf2x7_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8mf2x8_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u8m1x2_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u8m1x3_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u8m1x4_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u8m1x5_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u8m1x6_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u8m1x7_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u8m1x8_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16mf4x2_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16mf4x3_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16mf4x4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u16mf4x5_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u16mf4x6_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u16mf4x7_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u16mf4x8_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16mf2x2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16mf2x3_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16mf2x4_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u16mf2x5_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u16mf2x6_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u16mf2x7_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u16mf2x8_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16m1x2_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16m1x3_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16m1x4_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u16m1x5_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u16m1x6_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u16m1x7_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u16m1x8_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u16m2x2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u16m2x3_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u16m2x4_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32mf2x2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u32mf2x3_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u32mf2x4_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u32mf2x5_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u32mf2x6_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u32mf2x7_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u32mf2x8_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32m1x2_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u32m1x3_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u32m1x4_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u32m1x5_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u32m1x6_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u32m1x7_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u32m1x8_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32m2x2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u32m2x3_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u32m2x4_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u32m4x2_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u64m1x2_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u64m1x3_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u64m1x4_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsuxseg5ei64_v_u64m1x5_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsuxseg6ei64_v_u64m1x6_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsuxseg7ei64_v_u64m1x7_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsuxseg8ei64_v_u64m1x8_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u64m2x2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsuxseg3ei64_v_u64m2x3_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsuxseg4ei64_v_u64m2x4_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsuxseg2ei64_v_u64m4x2_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl);
8.9. vsoxseg<nf>ei8.v
- Mnemonic
vsoxseg2ei8.v vs3, (rs1), vs2, vm # nf=2 vsoxseg3ei8.v vs3, (rs1), vs2, vm # nf=3 vsoxseg4ei8.v vs3, (rs1), vs2, vm # nf=4 vsoxseg5ei8.v vs3, (rs1), vs2, vm # nf=5 vsoxseg6ei8.v vs3, (rs1), vs2, vm # nf=6 vsoxseg7ei8.v vs3, (rs1), vs2, vm # nf=7 vsoxseg8ei8.v vs3, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-unordered segment stores
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsoxei8_v.h
- Intrinsic Functions
Details
void __riscv_vsoxseg2ei8_v_f16mf4x2 (float16_t *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f16mf4x3 (float16_t *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f16mf4x4 (float16_t *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_f16mf4x5 (float16_t *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_f16mf4x6 (float16_t *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_f16mf4x7 (float16_t *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_f16mf4x8 (float16_t *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f16mf2x2 (float16_t *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f16mf2x3 (float16_t *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f16mf2x4 (float16_t *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_f16mf2x5 (float16_t *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_f16mf2x6 (float16_t *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_f16mf2x7 (float16_t *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_f16mf2x8 (float16_t *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f16m1x2 (float16_t *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f16m1x3 (float16_t *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f16m1x4 (float16_t *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_f16m1x5 (float16_t *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_f16m1x6 (float16_t *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_f16m1x7 (float16_t *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_f16m1x8 (float16_t *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f16m2x2 (float16_t *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f16m2x3 (float16_t *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f16m2x4 (float16_t *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f16m4x2 (float16_t *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f32mf2x2 (float32_t *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f32mf2x3 (float32_t *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f32mf2x4 (float32_t *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_f32mf2x5 (float32_t *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_f32mf2x6 (float32_t *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_f32mf2x7 (float32_t *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_f32mf2x8 (float32_t *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f32m1x2 (float32_t *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f32m1x3 (float32_t *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f32m1x4 (float32_t *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_f32m1x5 (float32_t *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_f32m1x6 (float32_t *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_f32m1x7 (float32_t *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_f32m1x8 (float32_t *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f32m2x2 (float32_t *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f32m2x3 (float32_t *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f32m2x4 (float32_t *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f32m4x2 (float32_t *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f64m1x2 (float64_t *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f64m1x3 (float64_t *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f64m1x4 (float64_t *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_f64m1x5 (float64_t *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_f64m1x6 (float64_t *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_f64m1x7 (float64_t *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_f64m1x8 (float64_t *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f64m2x2 (float64_t *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f64m2x3 (float64_t *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f64m2x4 (float64_t *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f64m4x2 (float64_t *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i8mf8x2 (int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i8mf8x3 (int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i8mf8x4 (int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i8mf8x5 (int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i8mf8x6 (int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i8mf8x7 (int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i8mf8x8 (int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i8mf4x2 (int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i8mf4x3 (int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i8mf4x4 (int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i8mf4x5 (int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i8mf4x6 (int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i8mf4x7 (int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i8mf4x8 (int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i8mf2x2 (int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i8mf2x3 (int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i8mf2x4 (int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i8mf2x5 (int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i8mf2x6 (int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i8mf2x7 (int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i8mf2x8 (int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i8m1x2 (int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i8m1x3 (int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i8m1x4 (int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i8m1x5 (int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i8m1x6 (int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i8m1x7 (int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i8m1x8 (int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i8m2x2 (int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i8m2x3 (int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i8m2x4 (int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i8m4x2 (int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i16mf4x2 (int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i16mf4x3 (int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i16mf4x4 (int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i16mf4x5 (int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i16mf4x6 (int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i16mf4x7 (int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i16mf4x8 (int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i16mf2x2 (int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i16mf2x3 (int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i16mf2x4 (int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i16mf2x5 (int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i16mf2x6 (int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i16mf2x7 (int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i16mf2x8 (int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i16m1x2 (int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i16m1x3 (int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i16m1x4 (int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i16m1x5 (int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i16m1x6 (int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i16m1x7 (int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i16m1x8 (int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i16m2x2 (int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i16m2x3 (int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i16m2x4 (int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i16m4x2 (int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i32mf2x2 (int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i32mf2x3 (int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i32mf2x4 (int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i32mf2x5 (int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i32mf2x6 (int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i32mf2x7 (int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i32mf2x8 (int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i32m1x2 (int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i32m1x3 (int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i32m1x4 (int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i32m1x5 (int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i32m1x6 (int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i32m1x7 (int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i32m1x8 (int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i32m2x2 (int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i32m2x3 (int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i32m2x4 (int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i32m4x2 (int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i64m1x2 (int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i64m1x3 (int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i64m1x4 (int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i64m1x5 (int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i64m1x6 (int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i64m1x7 (int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i64m1x8 (int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i64m2x2 (int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i64m2x3 (int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i64m2x4 (int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i64m4x2 (int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u8mf8x2 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u8mf8x3 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u8mf8x4 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u8mf8x5 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u8mf8x6 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u8mf8x7 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u8mf8x8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u8mf4x2 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u8mf4x3 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u8mf4x4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u8mf4x5 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u8mf4x6 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u8mf4x7 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u8mf4x8 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u8mf2x2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u8mf2x3 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u8mf2x4 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u8mf2x5 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u8mf2x6 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u8mf2x7 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u8mf2x8 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u8m1x2 (uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u8m1x3 (uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u8m1x4 (uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u8m1x5 (uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u8m1x6 (uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u8m1x7 (uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u8m1x8 (uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u8m2x2 (uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u8m2x3 (uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u8m2x4 (uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u8m4x2 (uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u16mf4x2 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u16mf4x3 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u16mf4x4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u16mf4x5 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u16mf4x6 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u16mf4x7 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u16mf4x8 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u16mf2x2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u16mf2x3 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u16mf2x4 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u16mf2x5 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u16mf2x6 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u16mf2x7 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u16mf2x8 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u16m1x2 (uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u16m1x3 (uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u16m1x4 (uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u16m1x5 (uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u16m1x6 (uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u16m1x7 (uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u16m1x8 (uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u16m2x2 (uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u16m2x3 (uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u16m2x4 (uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u16m4x2 (uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u32mf2x2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u32mf2x3 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u32mf2x4 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u32mf2x5 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u32mf2x6 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u32mf2x7 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u32mf2x8 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u32m1x2 (uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u32m1x3 (uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u32m1x4 (uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u32m1x5 (uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u32m1x6 (uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u32m1x7 (uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u32m1x8 (uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u32m2x2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u32m2x3 (uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u32m2x4 (uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u32m4x2 (uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u64m1x2 (uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u64m1x3 (uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u64m1x4 (uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u64m1x5 (uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u64m1x6 (uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u64m1x7 (uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u64m1x8 (uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u64m2x2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u64m2x3 (uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u64m2x4 (uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u64m4x2 (uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f16mf4x2_m (vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f16mf4x3_m (vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f16mf4x4_m (vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_f16mf4x5_m (vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_f16mf4x6_m (vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_f16mf4x7_m (vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_f16mf4x8_m (vbool64_t mask, float16_t *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f16mf2x2_m (vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f16mf2x3_m (vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f16mf2x4_m (vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_f16mf2x5_m (vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_f16mf2x6_m (vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_f16mf2x7_m (vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_f16mf2x8_m (vbool32_t mask, float16_t *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f16m1x2_m (vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f16m1x3_m (vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f16m1x4_m (vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_f16m1x5_m (vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_f16m1x6_m (vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_f16m1x7_m (vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_f16m1x8_m (vbool16_t mask, float16_t *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f16m2x2_m (vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f16m2x3_m (vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f16m2x4_m (vbool8_t mask, float16_t *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f16m4x2_m (vbool4_t mask, float16_t *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f32mf2x2_m (vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f32mf2x3_m (vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f32mf2x4_m (vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_f32mf2x5_m (vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_f32mf2x6_m (vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_f32mf2x7_m (vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_f32mf2x8_m (vbool64_t mask, float32_t *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f32m1x2_m (vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f32m1x3_m (vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f32m1x4_m (vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_f32m1x5_m (vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_f32m1x6_m (vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_f32m1x7_m (vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_f32m1x8_m (vbool32_t mask, float32_t *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f32m2x2_m (vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f32m2x3_m (vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f32m2x4_m (vbool16_t mask, float32_t *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f32m4x2_m (vbool8_t mask, float32_t *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f64m1x2_m (vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f64m1x3_m (vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f64m1x4_m (vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_f64m1x5_m (vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_f64m1x6_m (vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_f64m1x7_m (vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_f64m1x8_m (vbool64_t mask, float64_t *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f64m2x2_m (vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_f64m2x3_m (vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_f64m2x4_m (vbool32_t mask, float64_t *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_f64m4x2_m (vbool16_t mask, float64_t *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i8mf8x2_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i8mf8x3_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i8mf8x4_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i8mf8x5_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i8mf8x6_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i8mf8x7_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i8mf8x8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i8mf4x2_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i8mf4x3_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i8mf4x4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i8mf4x5_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i8mf4x6_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i8mf4x7_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i8mf4x8_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i8mf2x2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i8mf2x3_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i8mf2x4_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i8mf2x5_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i8mf2x6_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i8mf2x7_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i8mf2x8_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i8m1x2_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i8m1x3_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i8m1x4_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i8m1x5_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i8m1x6_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i8m1x7_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i8m1x8_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i8m2x2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i8m2x3_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i8m2x4_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i8m4x2_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i16mf4x2_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i16mf4x3_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i16mf4x4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i16mf4x5_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i16mf4x6_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i16mf4x7_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i16mf4x8_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i16mf2x2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i16mf2x3_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i16mf2x4_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i16mf2x5_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i16mf2x6_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i16mf2x7_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i16mf2x8_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i16m1x2_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i16m1x3_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i16m1x4_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i16m1x5_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i16m1x6_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i16m1x7_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i16m1x8_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i16m2x2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i16m2x3_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i16m2x4_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i16m4x2_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i32mf2x2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i32mf2x3_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i32mf2x4_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i32mf2x5_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i32mf2x6_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i32mf2x7_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i32mf2x8_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i32m1x2_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i32m1x3_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i32m1x4_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i32m1x5_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i32m1x6_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i32m1x7_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i32m1x8_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i32m2x2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i32m2x3_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i32m2x4_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i32m4x2_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i64m1x2_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i64m1x3_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i64m1x4_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_i64m1x5_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_i64m1x6_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_i64m1x7_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_i64m1x8_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i64m2x2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_i64m2x3_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_i64m2x4_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_i64m4x2_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u8mf8x2_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u8mf8x3_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u8mf8x4_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u8mf8x5_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u8mf8x6_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u8mf8x7_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u8mf8x8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u8mf4x2_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u8mf4x3_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u8mf4x4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u8mf4x5_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u8mf4x6_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u8mf4x7_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u8mf4x8_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u8mf2x2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u8mf2x3_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u8mf2x4_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u8mf2x5_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u8mf2x6_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u8mf2x7_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u8mf2x8_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u8m1x2_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u8m1x3_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u8m1x4_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u8m1x5_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u8m1x6_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u8m1x7_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u8m1x8_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u8m2x2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u8m2x3_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u8m2x4_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u8m4x2_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u16mf4x2_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u16mf4x3_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u16mf4x4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u16mf4x5_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u16mf4x6_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u16mf4x7_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u16mf4x8_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u16mf2x2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u16mf2x3_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u16mf2x4_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u16mf2x5_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u16mf2x6_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u16mf2x7_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u16mf2x8_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u16m1x2_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u16m1x3_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u16m1x4_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u16m1x5_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u16m1x6_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u16m1x7_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u16m1x8_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u16m2x2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u16m2x3_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u16m2x4_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u16m4x2_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u32mf2x2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u32mf2x3_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u32mf2x4_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u32mf2x5_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u32mf2x6_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u32mf2x7_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u32mf2x8_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u32m1x2_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u32m1x3_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u32m1x4_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u32m1x5_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u32m1x6_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u32m1x7_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u32m1x8_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u32m2x2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u32m2x3_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u32m2x4_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u32m4x2_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u64m1x2_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u64m1x3_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u64m1x4_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei8_v_u64m1x5_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei8_v_u64m1x6_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei8_v_u64m1x7_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei8_v_u64m1x8_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u64m2x2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei8_v_u64m2x3_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei8_v_u64m2x4_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei8_v_u64m4x2_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl);
8.10. vsoxseg<nf>ei16.v
- Mnemonic
vsoxseg2ei16.v vs3, (rs1), vs2, vm # nf=2 vsoxseg3ei16.v vs3, (rs1), vs2, vm # nf=3 vsoxseg4ei16.v vs3, (rs1), vs2, vm # nf=4 vsoxseg5ei16.v vs3, (rs1), vs2, vm # nf=5 vsoxseg6ei16.v vs3, (rs1), vs2, vm # nf=6 vsoxseg7ei16.v vs3, (rs1), vs2, vm # nf=7 vsoxseg8ei16.v vs3, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-unordered segment stores
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsoxei16_v.h
- Intrinsic Functions
Details
void __riscv_vsoxseg2ei16_v_f16mf4x2 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f16mf4x3 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f16mf4x4 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_f16mf4x5 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_f16mf4x6 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_f16mf4x7 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_f16mf4x8 (float16_t *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f16mf2x2 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f16mf2x3 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f16mf2x4 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_f16mf2x5 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_f16mf2x6 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_f16mf2x7 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_f16mf2x8 (float16_t *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f16m1x2 (float16_t *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f16m1x3 (float16_t *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f16m1x4 (float16_t *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_f16m1x5 (float16_t *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_f16m1x6 (float16_t *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_f16m1x7 (float16_t *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_f16m1x8 (float16_t *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f16m2x2 (float16_t *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f16m2x3 (float16_t *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f16m2x4 (float16_t *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f16m4x2 (float16_t *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f32mf2x2 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f32mf2x3 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f32mf2x4 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_f32mf2x5 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_f32mf2x6 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_f32mf2x7 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_f32mf2x8 (float32_t *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f32m1x2 (float32_t *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f32m1x3 (float32_t *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f32m1x4 (float32_t *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_f32m1x5 (float32_t *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_f32m1x6 (float32_t *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_f32m1x7 (float32_t *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_f32m1x8 (float32_t *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f32m2x2 (float32_t *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f32m2x3 (float32_t *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f32m2x4 (float32_t *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f32m4x2 (float32_t *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f64m1x2 (float64_t *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f64m1x3 (float64_t *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f64m1x4 (float64_t *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_f64m1x5 (float64_t *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_f64m1x6 (float64_t *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_f64m1x7 (float64_t *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_f64m1x8 (float64_t *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f64m2x2 (float64_t *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f64m2x3 (float64_t *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f64m2x4 (float64_t *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f64m4x2 (float64_t *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i8mf8x2 (int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i8mf8x3 (int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i8mf8x4 (int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i8mf8x5 (int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i8mf8x6 (int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i8mf8x7 (int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i8mf8x8 (int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i8mf4x2 (int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i8mf4x3 (int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i8mf4x4 (int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i8mf4x5 (int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i8mf4x6 (int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i8mf4x7 (int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i8mf4x8 (int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i8mf2x2 (int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i8mf2x3 (int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i8mf2x4 (int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i8mf2x5 (int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i8mf2x6 (int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i8mf2x7 (int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i8mf2x8 (int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i8m1x2 (int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i8m1x3 (int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i8m1x4 (int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i8m1x5 (int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i8m1x6 (int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i8m1x7 (int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i8m1x8 (int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i8m2x2 (int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i8m2x3 (int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i8m2x4 (int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i8m4x2 (int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i16mf4x2 (int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i16mf4x3 (int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i16mf4x4 (int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i16mf4x5 (int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i16mf4x6 (int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i16mf4x7 (int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i16mf4x8 (int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i16mf2x2 (int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i16mf2x3 (int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i16mf2x4 (int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i16mf2x5 (int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i16mf2x6 (int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i16mf2x7 (int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i16mf2x8 (int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i16m1x2 (int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i16m1x3 (int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i16m1x4 (int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i16m1x5 (int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i16m1x6 (int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i16m1x7 (int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i16m1x8 (int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i16m2x2 (int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i16m2x3 (int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i16m2x4 (int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i16m4x2 (int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i32mf2x2 (int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i32mf2x3 (int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i32mf2x4 (int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i32mf2x5 (int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i32mf2x6 (int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i32mf2x7 (int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i32mf2x8 (int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i32m1x2 (int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i32m1x3 (int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i32m1x4 (int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i32m1x5 (int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i32m1x6 (int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i32m1x7 (int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i32m1x8 (int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i32m2x2 (int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i32m2x3 (int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i32m2x4 (int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i32m4x2 (int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i64m1x2 (int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i64m1x3 (int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i64m1x4 (int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i64m1x5 (int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i64m1x6 (int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i64m1x7 (int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i64m1x8 (int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i64m2x2 (int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i64m2x3 (int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i64m2x4 (int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i64m4x2 (int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u8mf8x2 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u8mf8x3 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u8mf8x4 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u8mf8x5 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u8mf8x6 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u8mf8x7 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u8mf8x8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u8mf4x2 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u8mf4x3 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u8mf4x4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u8mf4x5 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u8mf4x6 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u8mf4x7 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u8mf4x8 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u8mf2x2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u8mf2x3 (uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u8mf2x4 (uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u8mf2x5 (uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u8mf2x6 (uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u8mf2x7 (uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u8mf2x8 (uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u8m1x2 (uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u8m1x3 (uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u8m1x4 (uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u8m1x5 (uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u8m1x6 (uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u8m1x7 (uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u8m1x8 (uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u8m2x2 (uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u8m2x3 (uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u8m2x4 (uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u8m4x2 (uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u16mf4x2 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u16mf4x3 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u16mf4x4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u16mf4x5 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u16mf4x6 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u16mf4x7 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u16mf4x8 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u16mf2x2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u16mf2x3 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u16mf2x4 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u16mf2x5 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u16mf2x6 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u16mf2x7 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u16mf2x8 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u16m1x2 (uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u16m1x3 (uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u16m1x4 (uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u16m1x5 (uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u16m1x6 (uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u16m1x7 (uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u16m1x8 (uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u16m2x2 (uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u16m2x3 (uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u16m2x4 (uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u16m4x2 (uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u32mf2x2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u32mf2x3 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u32mf2x4 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u32mf2x5 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u32mf2x6 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u32mf2x7 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u32mf2x8 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u32m1x2 (uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u32m1x3 (uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u32m1x4 (uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u32m1x5 (uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u32m1x6 (uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u32m1x7 (uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u32m1x8 (uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u32m2x2 (uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u32m2x3 (uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u32m2x4 (uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u32m4x2 (uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u64m1x2 (uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u64m1x3 (uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u64m1x4 (uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u64m1x5 (uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u64m1x6 (uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u64m1x7 (uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u64m1x8 (uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u64m2x2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u64m2x3 (uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u64m2x4 (uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u64m4x2 (uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f16mf4x2_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f16mf4x3_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f16mf4x4_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_f16mf4x5_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_f16mf4x6_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_f16mf4x7_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_f16mf4x8_m (vbool64_t mask, float16_t *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f16mf2x2_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f16mf2x3_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f16mf2x4_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_f16mf2x5_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_f16mf2x6_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_f16mf2x7_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_f16mf2x8_m (vbool32_t mask, float16_t *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f16m1x2_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f16m1x3_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f16m1x4_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_f16m1x5_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_f16m1x6_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_f16m1x7_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_f16m1x8_m (vbool16_t mask, float16_t *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f16m2x2_m (vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f16m2x3_m (vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f16m2x4_m (vbool8_t mask, float16_t *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f16m4x2_m (vbool4_t mask, float16_t *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f32mf2x2_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f32mf2x3_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f32mf2x4_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_f32mf2x5_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_f32mf2x6_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_f32mf2x7_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_f32mf2x8_m (vbool64_t mask, float32_t *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f32m1x2_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f32m1x3_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f32m1x4_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_f32m1x5_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_f32m1x6_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_f32m1x7_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_f32m1x8_m (vbool32_t mask, float32_t *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f32m2x2_m (vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f32m2x3_m (vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f32m2x4_m (vbool16_t mask, float32_t *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f32m4x2_m (vbool8_t mask, float32_t *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f64m1x2_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f64m1x3_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f64m1x4_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_f64m1x5_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_f64m1x6_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_f64m1x7_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_f64m1x8_m (vbool64_t mask, float64_t *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f64m2x2_m (vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_f64m2x3_m (vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_f64m2x4_m (vbool32_t mask, float64_t *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_f64m4x2_m (vbool16_t mask, float64_t *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i8mf8x2_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i8mf8x3_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i8mf8x4_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i8mf8x5_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i8mf8x6_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i8mf8x7_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i8mf8x8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i8mf4x2_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i8mf4x3_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i8mf4x4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i8mf4x5_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i8mf4x6_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i8mf4x7_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i8mf4x8_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i8mf2x2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i8mf2x3_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i8mf2x4_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i8mf2x5_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i8mf2x6_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i8mf2x7_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i8mf2x8_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i8m1x2_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i8m1x3_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i8m1x4_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i8m1x5_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i8m1x6_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i8m1x7_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i8m1x8_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i8m2x2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i8m2x3_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i8m2x4_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i8m4x2_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i16mf4x2_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i16mf4x3_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i16mf4x4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i16mf4x5_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i16mf4x6_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i16mf4x7_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i16mf4x8_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i16mf2x2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i16mf2x3_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i16mf2x4_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i16mf2x5_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i16mf2x6_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i16mf2x7_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i16mf2x8_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i16m1x2_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i16m1x3_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i16m1x4_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i16m1x5_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i16m1x6_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i16m1x7_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i16m1x8_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i16m2x2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i16m2x3_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i16m2x4_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i16m4x2_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i32mf2x2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i32mf2x3_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i32mf2x4_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i32mf2x5_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i32mf2x6_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i32mf2x7_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i32mf2x8_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i32m1x2_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i32m1x3_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i32m1x4_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i32m1x5_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i32m1x6_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i32m1x7_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i32m1x8_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i32m2x2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i32m2x3_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i32m2x4_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i32m4x2_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i64m1x2_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i64m1x3_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i64m1x4_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_i64m1x5_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_i64m1x6_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_i64m1x7_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_i64m1x8_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i64m2x2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_i64m2x3_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_i64m2x4_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_i64m4x2_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u8mf8x2_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u8mf8x3_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u8mf8x4_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u8mf8x5_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u8mf8x6_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u8mf8x7_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u8mf8x8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u8mf4x2_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u8mf4x3_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u8mf4x4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u8mf4x5_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u8mf4x6_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u8mf4x7_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u8mf4x8_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u8mf2x2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u8mf2x3_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u8mf2x4_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u8mf2x5_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u8mf2x6_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u8mf2x7_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u8mf2x8_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u8m1x2_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u8m1x3_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u8m1x4_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u8m1x5_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u8m1x6_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u8m1x7_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u8m1x8_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u8m2x2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u8m2x3_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u8m2x4_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u8m4x2_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u16mf4x2_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u16mf4x3_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u16mf4x4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u16mf4x5_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u16mf4x6_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u16mf4x7_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u16mf4x8_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u16mf2x2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u16mf2x3_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u16mf2x4_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u16mf2x5_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u16mf2x6_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u16mf2x7_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u16mf2x8_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u16m1x2_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u16m1x3_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u16m1x4_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u16m1x5_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u16m1x6_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u16m1x7_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u16m1x8_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u16m2x2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u16m2x3_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u16m2x4_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u16m4x2_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u32mf2x2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u32mf2x3_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u32mf2x4_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u32mf2x5_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u32mf2x6_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u32mf2x7_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u32mf2x8_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u32m1x2_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u32m1x3_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u32m1x4_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u32m1x5_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u32m1x6_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u32m1x7_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u32m1x8_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u32m2x2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u32m2x3_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u32m2x4_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u32m4x2_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u64m1x2_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u64m1x3_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u64m1x4_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei16_v_u64m1x5_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei16_v_u64m1x6_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei16_v_u64m1x7_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei16_v_u64m1x8_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u64m2x2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei16_v_u64m2x3_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei16_v_u64m2x4_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei16_v_u64m4x2_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl);
8.11. vsoxseg<nf>ei32.v
- Mnemonic
vsoxseg2ei32.v vs3, (rs1), vs2, vm # nf=2 vsoxseg3ei32.v vs3, (rs1), vs2, vm # nf=3 vsoxseg4ei32.v vs3, (rs1), vs2, vm # nf=4 vsoxseg5ei32.v vs3, (rs1), vs2, vm # nf=5 vsoxseg6ei32.v vs3, (rs1), vs2, vm # nf=6 vsoxseg7ei32.v vs3, (rs1), vs2, vm # nf=7 vsoxseg8ei32.v vs3, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-unordered segment stores
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsoxei32_v.h
- Intrinsic Functions
Details
void __riscv_vsoxseg2ei32_v_f16mf4x2 (float16_t *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f16mf4x3 (float16_t *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f16mf4x4 (float16_t *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_f16mf4x5 (float16_t *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_f16mf4x6 (float16_t *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_f16mf4x7 (float16_t *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_f16mf4x8 (float16_t *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f16mf2x2 (float16_t *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f16mf2x3 (float16_t *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f16mf2x4 (float16_t *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_f16mf2x5 (float16_t *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_f16mf2x6 (float16_t *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_f16mf2x7 (float16_t *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_f16mf2x8 (float16_t *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f16m1x2 (float16_t *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f16m1x3 (float16_t *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f16m1x4 (float16_t *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_f16m1x5 (float16_t *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_f16m1x6 (float16_t *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_f16m1x7 (float16_t *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_f16m1x8 (float16_t *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f16m2x2 (float16_t *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f16m2x3 (float16_t *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f16m2x4 (float16_t *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f16m4x2 (float16_t *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f32mf2x2 (float32_t *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f32mf2x3 (float32_t *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f32mf2x4 (float32_t *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_f32mf2x5 (float32_t *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_f32mf2x6 (float32_t *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_f32mf2x7 (float32_t *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_f32mf2x8 (float32_t *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f32m1x2 (float32_t *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f32m1x3 (float32_t *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f32m1x4 (float32_t *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_f32m1x5 (float32_t *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_f32m1x6 (float32_t *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_f32m1x7 (float32_t *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_f32m1x8 (float32_t *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f32m2x2 (float32_t *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f32m2x3 (float32_t *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f32m2x4 (float32_t *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f32m4x2 (float32_t *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f64m1x2 (float64_t *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f64m1x3 (float64_t *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f64m1x4 (float64_t *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_f64m1x5 (float64_t *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_f64m1x6 (float64_t *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_f64m1x7 (float64_t *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_f64m1x8 (float64_t *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f64m2x2 (float64_t *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f64m2x3 (float64_t *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f64m2x4 (float64_t *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f64m4x2 (float64_t *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i8mf8x2 (int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i8mf8x3 (int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i8mf8x4 (int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i8mf8x5 (int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i8mf8x6 (int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i8mf8x7 (int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i8mf8x8 (int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i8mf4x2 (int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i8mf4x3 (int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i8mf4x4 (int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i8mf4x5 (int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i8mf4x6 (int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i8mf4x7 (int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i8mf4x8 (int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i8mf2x2 (int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i8mf2x3 (int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i8mf2x4 (int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i8mf2x5 (int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i8mf2x6 (int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i8mf2x7 (int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i8mf2x8 (int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i8m1x2 (int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i8m1x3 (int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i8m1x4 (int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i8m1x5 (int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i8m1x6 (int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i8m1x7 (int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i8m1x8 (int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i8m2x2 (int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i8m2x3 (int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i8m2x4 (int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i16mf4x2 (int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i16mf4x3 (int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i16mf4x4 (int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i16mf4x5 (int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i16mf4x6 (int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i16mf4x7 (int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i16mf4x8 (int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i16mf2x2 (int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i16mf2x3 (int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i16mf2x4 (int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i16mf2x5 (int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i16mf2x6 (int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i16mf2x7 (int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i16mf2x8 (int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i16m1x2 (int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i16m1x3 (int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i16m1x4 (int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i16m1x5 (int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i16m1x6 (int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i16m1x7 (int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i16m1x8 (int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i16m2x2 (int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i16m2x3 (int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i16m2x4 (int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i16m4x2 (int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i32mf2x2 (int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i32mf2x3 (int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i32mf2x4 (int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i32mf2x5 (int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i32mf2x6 (int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i32mf2x7 (int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i32mf2x8 (int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i32m1x2 (int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i32m1x3 (int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i32m1x4 (int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i32m1x5 (int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i32m1x6 (int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i32m1x7 (int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i32m1x8 (int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i32m2x2 (int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i32m2x3 (int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i32m2x4 (int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i32m4x2 (int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i64m1x2 (int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i64m1x3 (int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i64m1x4 (int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i64m1x5 (int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i64m1x6 (int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i64m1x7 (int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i64m1x8 (int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i64m2x2 (int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i64m2x3 (int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i64m2x4 (int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i64m4x2 (int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u8mf8x2 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u8mf8x3 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u8mf8x4 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u8mf8x5 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u8mf8x6 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u8mf8x7 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u8mf8x8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u8mf4x2 (uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u8mf4x3 (uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u8mf4x4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u8mf4x5 (uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u8mf4x6 (uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u8mf4x7 (uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u8mf4x8 (uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u8mf2x2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u8mf2x3 (uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u8mf2x4 (uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u8mf2x5 (uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u8mf2x6 (uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u8mf2x7 (uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u8mf2x8 (uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u8m1x2 (uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u8m1x3 (uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u8m1x4 (uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u8m1x5 (uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u8m1x6 (uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u8m1x7 (uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u8m1x8 (uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u8m2x2 (uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u8m2x3 (uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u8m2x4 (uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u16mf4x2 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u16mf4x3 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u16mf4x4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u16mf4x5 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u16mf4x6 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u16mf4x7 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u16mf4x8 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u16mf2x2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u16mf2x3 (uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u16mf2x4 (uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u16mf2x5 (uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u16mf2x6 (uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u16mf2x7 (uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u16mf2x8 (uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u16m1x2 (uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u16m1x3 (uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u16m1x4 (uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u16m1x5 (uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u16m1x6 (uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u16m1x7 (uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u16m1x8 (uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u16m2x2 (uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u16m2x3 (uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u16m2x4 (uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u16m4x2 (uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u32mf2x2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u32mf2x3 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u32mf2x4 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u32mf2x5 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u32mf2x6 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u32mf2x7 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u32mf2x8 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u32m1x2 (uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u32m1x3 (uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u32m1x4 (uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u32m1x5 (uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u32m1x6 (uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u32m1x7 (uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u32m1x8 (uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u32m2x2 (uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u32m2x3 (uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u32m2x4 (uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u32m4x2 (uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u64m1x2 (uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u64m1x3 (uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u64m1x4 (uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u64m1x5 (uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u64m1x6 (uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u64m1x7 (uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u64m1x8 (uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u64m2x2 (uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u64m2x3 (uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u64m2x4 (uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u64m4x2 (uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f16mf4x2_m (vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f16mf4x3_m (vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f16mf4x4_m (vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_f16mf4x5_m (vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_f16mf4x6_m (vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_f16mf4x7_m (vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_f16mf4x8_m (vbool64_t mask, float16_t *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f16mf2x2_m (vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f16mf2x3_m (vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f16mf2x4_m (vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_f16mf2x5_m (vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_f16mf2x6_m (vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_f16mf2x7_m (vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_f16mf2x8_m (vbool32_t mask, float16_t *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f16m1x2_m (vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f16m1x3_m (vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f16m1x4_m (vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_f16m1x5_m (vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_f16m1x6_m (vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_f16m1x7_m (vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_f16m1x8_m (vbool16_t mask, float16_t *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f16m2x2_m (vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f16m2x3_m (vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f16m2x4_m (vbool8_t mask, float16_t *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f16m4x2_m (vbool4_t mask, float16_t *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f32mf2x2_m (vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f32mf2x3_m (vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f32mf2x4_m (vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_f32mf2x5_m (vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_f32mf2x6_m (vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_f32mf2x7_m (vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_f32mf2x8_m (vbool64_t mask, float32_t *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f32m1x2_m (vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f32m1x3_m (vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f32m1x4_m (vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_f32m1x5_m (vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_f32m1x6_m (vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_f32m1x7_m (vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_f32m1x8_m (vbool32_t mask, float32_t *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f32m2x2_m (vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f32m2x3_m (vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f32m2x4_m (vbool16_t mask, float32_t *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f32m4x2_m (vbool8_t mask, float32_t *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f64m1x2_m (vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f64m1x3_m (vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f64m1x4_m (vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_f64m1x5_m (vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_f64m1x6_m (vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_f64m1x7_m (vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_f64m1x8_m (vbool64_t mask, float64_t *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f64m2x2_m (vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_f64m2x3_m (vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_f64m2x4_m (vbool32_t mask, float64_t *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_f64m4x2_m (vbool16_t mask, float64_t *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i8mf8x2_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i8mf8x3_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i8mf8x4_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i8mf8x5_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i8mf8x6_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i8mf8x7_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i8mf8x8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i8mf4x2_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i8mf4x3_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i8mf4x4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i8mf4x5_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i8mf4x6_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i8mf4x7_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i8mf4x8_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i8mf2x2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i8mf2x3_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i8mf2x4_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i8mf2x5_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i8mf2x6_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i8mf2x7_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i8mf2x8_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i8m1x2_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i8m1x3_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i8m1x4_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i8m1x5_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i8m1x6_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i8m1x7_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i8m1x8_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i8m2x2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i8m2x3_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i8m2x4_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i16mf4x2_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i16mf4x3_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i16mf4x4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i16mf4x5_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i16mf4x6_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i16mf4x7_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i16mf4x8_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i16mf2x2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i16mf2x3_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i16mf2x4_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i16mf2x5_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i16mf2x6_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i16mf2x7_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i16mf2x8_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i16m1x2_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i16m1x3_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i16m1x4_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i16m1x5_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i16m1x6_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i16m1x7_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i16m1x8_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i16m2x2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i16m2x3_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i16m2x4_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i16m4x2_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i32mf2x2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i32mf2x3_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i32mf2x4_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i32mf2x5_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i32mf2x6_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i32mf2x7_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i32mf2x8_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i32m1x2_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i32m1x3_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i32m1x4_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i32m1x5_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i32m1x6_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i32m1x7_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i32m1x8_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i32m2x2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i32m2x3_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i32m2x4_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i32m4x2_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i64m1x2_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i64m1x3_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i64m1x4_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_i64m1x5_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_i64m1x6_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_i64m1x7_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_i64m1x8_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i64m2x2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_i64m2x3_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_i64m2x4_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_i64m4x2_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u8mf8x2_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u8mf8x3_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u8mf8x4_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u8mf8x5_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u8mf8x6_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u8mf8x7_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u8mf8x8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u8mf4x2_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u8mf4x3_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u8mf4x4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u8mf4x5_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u8mf4x6_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u8mf4x7_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u8mf4x8_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u8mf2x2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u8mf2x3_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u8mf2x4_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u8mf2x5_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u8mf2x6_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u8mf2x7_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u8mf2x8_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u8m1x2_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u8m1x3_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u8m1x4_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u8m1x5_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u8m1x6_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u8m1x7_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u8m1x8_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u8m2x2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u8m2x3_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u8m2x4_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u16mf4x2_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u16mf4x3_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u16mf4x4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u16mf4x5_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u16mf4x6_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u16mf4x7_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u16mf4x8_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u16mf2x2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u16mf2x3_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u16mf2x4_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u16mf2x5_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u16mf2x6_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u16mf2x7_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u16mf2x8_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u16m1x2_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u16m1x3_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u16m1x4_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u16m1x5_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u16m1x6_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u16m1x7_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u16m1x8_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u16m2x2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u16m2x3_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u16m2x4_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u16m4x2_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u32mf2x2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u32mf2x3_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u32mf2x4_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u32mf2x5_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u32mf2x6_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u32mf2x7_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u32mf2x8_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u32m1x2_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u32m1x3_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u32m1x4_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u32m1x5_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u32m1x6_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u32m1x7_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u32m1x8_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u32m2x2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u32m2x3_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u32m2x4_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u32m4x2_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u64m1x2_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u64m1x3_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u64m1x4_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei32_v_u64m1x5_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei32_v_u64m1x6_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei32_v_u64m1x7_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei32_v_u64m1x8_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u64m2x2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei32_v_u64m2x3_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei32_v_u64m2x4_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei32_v_u64m4x2_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl);
8.12. vsoxseg<nf>ei64.v
- Mnemonic
vsoxseg2ei64.v vs3, (rs1), vs2, vm # nf=2 vsoxseg3ei64.v vs3, (rs1), vs2, vm # nf=3 vsoxseg4ei64.v vs3, (rs1), vs2, vm # nf=4 vsoxseg5ei64.v vs3, (rs1), vs2, vm # nf=5 vsoxseg6ei64.v vs3, (rs1), vs2, vm # nf=6 vsoxseg7ei64.v vs3, (rs1), vs2, vm # nf=7 vsoxseg8ei64.v vs3, (rs1), vs2, vm # nf=8
- Encoding
- Description
-
Indexed-unordered segment stores
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsoxei64_v.h
- Intrinsic Functions
Details
void __riscv_vsoxseg2ei64_v_f16mf4x2 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f16mf4x3 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f16mf4x4 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_f16mf4x5 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_f16mf4x6 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_f16mf4x7 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_f16mf4x8 (float16_t *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f16mf2x2 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f16mf2x3 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f16mf2x4 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_f16mf2x5 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_f16mf2x6 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_f16mf2x7 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_f16mf2x8 (float16_t *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f16m1x2 (float16_t *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f16m1x3 (float16_t *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f16m1x4 (float16_t *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_f16m1x5 (float16_t *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_f16m1x6 (float16_t *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_f16m1x7 (float16_t *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_f16m1x8 (float16_t *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f16m2x2 (float16_t *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f16m2x3 (float16_t *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f16m2x4 (float16_t *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f32mf2x2 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f32mf2x3 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f32mf2x4 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_f32mf2x5 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_f32mf2x6 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_f32mf2x7 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_f32mf2x8 (float32_t *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f32m1x2 (float32_t *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f32m1x3 (float32_t *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f32m1x4 (float32_t *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_f32m1x5 (float32_t *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_f32m1x6 (float32_t *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_f32m1x7 (float32_t *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_f32m1x8 (float32_t *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f32m2x2 (float32_t *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f32m2x3 (float32_t *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f32m2x4 (float32_t *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f32m4x2 (float32_t *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f64m1x2 (float64_t *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f64m1x3 (float64_t *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f64m1x4 (float64_t *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_f64m1x5 (float64_t *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_f64m1x6 (float64_t *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_f64m1x7 (float64_t *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_f64m1x8 (float64_t *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f64m2x2 (float64_t *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f64m2x3 (float64_t *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f64m2x4 (float64_t *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f64m4x2 (float64_t *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i8mf8x2 (int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i8mf8x3 (int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i8mf8x4 (int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i8mf8x5 (int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i8mf8x6 (int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i8mf8x7 (int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i8mf8x8 (int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i8mf4x2 (int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i8mf4x3 (int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i8mf4x4 (int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i8mf4x5 (int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i8mf4x6 (int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i8mf4x7 (int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i8mf4x8 (int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i8mf2x2 (int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i8mf2x3 (int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i8mf2x4 (int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i8mf2x5 (int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i8mf2x6 (int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i8mf2x7 (int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i8mf2x8 (int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i8m1x2 (int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i8m1x3 (int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i8m1x4 (int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i8m1x5 (int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i8m1x6 (int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i8m1x7 (int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i8m1x8 (int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i16mf4x2 (int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i16mf4x3 (int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i16mf4x4 (int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i16mf4x5 (int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i16mf4x6 (int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i16mf4x7 (int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i16mf4x8 (int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i16mf2x2 (int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i16mf2x3 (int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i16mf2x4 (int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i16mf2x5 (int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i16mf2x6 (int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i16mf2x7 (int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i16mf2x8 (int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i16m1x2 (int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i16m1x3 (int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i16m1x4 (int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i16m1x5 (int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i16m1x6 (int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i16m1x7 (int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i16m1x8 (int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i16m2x2 (int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i16m2x3 (int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i16m2x4 (int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i32mf2x2 (int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i32mf2x3 (int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i32mf2x4 (int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i32mf2x5 (int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i32mf2x6 (int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i32mf2x7 (int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i32mf2x8 (int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i32m1x2 (int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i32m1x3 (int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i32m1x4 (int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i32m1x5 (int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i32m1x6 (int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i32m1x7 (int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i32m1x8 (int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i32m2x2 (int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i32m2x3 (int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i32m2x4 (int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i32m4x2 (int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i64m1x2 (int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i64m1x3 (int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i64m1x4 (int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i64m1x5 (int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i64m1x6 (int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i64m1x7 (int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i64m1x8 (int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i64m2x2 (int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i64m2x3 (int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i64m2x4 (int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i64m4x2 (int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u8mf8x2 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u8mf8x3 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u8mf8x4 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u8mf8x5 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u8mf8x6 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u8mf8x7 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u8mf8x8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u8mf4x2 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u8mf4x3 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u8mf4x4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u8mf4x5 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u8mf4x6 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u8mf4x7 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u8mf4x8 (uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u8mf2x2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u8mf2x3 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u8mf2x4 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u8mf2x5 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u8mf2x6 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u8mf2x7 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u8mf2x8 (uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u8m1x2 (uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u8m1x3 (uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u8m1x4 (uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u8m1x5 (uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u8m1x6 (uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u8m1x7 (uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u8m1x8 (uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u16mf4x2 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u16mf4x3 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u16mf4x4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u16mf4x5 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u16mf4x6 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u16mf4x7 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u16mf4x8 (uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u16mf2x2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u16mf2x3 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u16mf2x4 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u16mf2x5 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u16mf2x6 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u16mf2x7 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u16mf2x8 (uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u16m1x2 (uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u16m1x3 (uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u16m1x4 (uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u16m1x5 (uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u16m1x6 (uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u16m1x7 (uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u16m1x8 (uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u16m2x2 (uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u16m2x3 (uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u16m2x4 (uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u32mf2x2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u32mf2x3 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u32mf2x4 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u32mf2x5 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u32mf2x6 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u32mf2x7 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u32mf2x8 (uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u32m1x2 (uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u32m1x3 (uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u32m1x4 (uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u32m1x5 (uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u32m1x6 (uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u32m1x7 (uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u32m1x8 (uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u32m2x2 (uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u32m2x3 (uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u32m2x4 (uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u32m4x2 (uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u64m1x2 (uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u64m1x3 (uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u64m1x4 (uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u64m1x5 (uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u64m1x6 (uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u64m1x7 (uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u64m1x8 (uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u64m2x2 (uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u64m2x3 (uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u64m2x4 (uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u64m4x2 (uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f16mf4x2_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f16mf4x3_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f16mf4x4_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_f16mf4x5_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_f16mf4x6_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_f16mf4x7_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_f16mf4x8_m (vbool64_t mask, float16_t *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f16mf2x2_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f16mf2x3_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f16mf2x4_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_f16mf2x5_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_f16mf2x6_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_f16mf2x7_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_f16mf2x8_m (vbool32_t mask, float16_t *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f16m1x2_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f16m1x3_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f16m1x4_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_f16m1x5_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_f16m1x6_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_f16m1x7_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_f16m1x8_m (vbool16_t mask, float16_t *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f16m2x2_m (vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f16m2x3_m (vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f16m2x4_m (vbool8_t mask, float16_t *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f32mf2x2_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f32mf2x3_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f32mf2x4_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_f32mf2x5_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_f32mf2x6_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_f32mf2x7_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_f32mf2x8_m (vbool64_t mask, float32_t *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f32m1x2_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f32m1x3_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f32m1x4_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_f32m1x5_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_f32m1x6_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_f32m1x7_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_f32m1x8_m (vbool32_t mask, float32_t *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f32m2x2_m (vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f32m2x3_m (vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f32m2x4_m (vbool16_t mask, float32_t *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f32m4x2_m (vbool8_t mask, float32_t *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f64m1x2_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f64m1x3_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f64m1x4_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_f64m1x5_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_f64m1x6_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_f64m1x7_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_f64m1x8_m (vbool64_t mask, float64_t *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f64m2x2_m (vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_f64m2x3_m (vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_f64m2x4_m (vbool32_t mask, float64_t *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_f64m4x2_m (vbool16_t mask, float64_t *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i8mf8x2_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i8mf8x3_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i8mf8x4_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i8mf8x5_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i8mf8x6_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i8mf8x7_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i8mf8x8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i8mf4x2_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i8mf4x3_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i8mf4x4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i8mf4x5_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i8mf4x6_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i8mf4x7_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i8mf4x8_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i8mf2x2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i8mf2x3_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i8mf2x4_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i8mf2x5_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i8mf2x6_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i8mf2x7_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i8mf2x8_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i8m1x2_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i8m1x3_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i8m1x4_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i8m1x5_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i8m1x6_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i8m1x7_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i8m1x8_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i16mf4x2_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i16mf4x3_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i16mf4x4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i16mf4x5_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i16mf4x6_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i16mf4x7_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i16mf4x8_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i16mf2x2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i16mf2x3_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i16mf2x4_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i16mf2x5_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i16mf2x6_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i16mf2x7_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i16mf2x8_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i16m1x2_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i16m1x3_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i16m1x4_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i16m1x5_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i16m1x6_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i16m1x7_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i16m1x8_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i16m2x2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i16m2x3_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i16m2x4_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i32mf2x2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i32mf2x3_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i32mf2x4_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i32mf2x5_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i32mf2x6_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i32mf2x7_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i32mf2x8_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i32m1x2_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i32m1x3_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i32m1x4_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i32m1x5_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i32m1x6_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i32m1x7_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i32m1x8_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i32m2x2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i32m2x3_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i32m2x4_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i32m4x2_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i64m1x2_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i64m1x3_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i64m1x4_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_i64m1x5_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_i64m1x6_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_i64m1x7_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_i64m1x8_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i64m2x2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_i64m2x3_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_i64m2x4_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_i64m4x2_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u8mf8x2_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u8mf8x3_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u8mf8x4_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u8mf8x5_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u8mf8x6_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u8mf8x7_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u8mf8x8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u8mf4x2_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u8mf4x3_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u8mf4x4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u8mf4x5_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u8mf4x6_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u8mf4x7_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u8mf4x8_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u8mf2x2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u8mf2x3_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u8mf2x4_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u8mf2x5_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u8mf2x6_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u8mf2x7_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u8mf2x8_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u8m1x2_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u8m1x3_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u8m1x4_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u8m1x5_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u8m1x6_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u8m1x7_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u8m1x8_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u16mf4x2_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u16mf4x3_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u16mf4x4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u16mf4x5_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u16mf4x6_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u16mf4x7_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u16mf4x8_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u16mf2x2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u16mf2x3_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u16mf2x4_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u16mf2x5_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u16mf2x6_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u16mf2x7_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u16mf2x8_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u16m1x2_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u16m1x3_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u16m1x4_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u16m1x5_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u16m1x6_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u16m1x7_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u16m1x8_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u16m2x2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u16m2x3_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u16m2x4_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u32mf2x2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u32mf2x3_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u32mf2x4_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u32mf2x5_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u32mf2x6_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u32mf2x7_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u32mf2x8_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u32m1x2_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u32m1x3_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u32m1x4_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u32m1x5_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u32m1x6_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u32m1x7_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u32m1x8_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u32m2x2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u32m2x3_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u32m2x4_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u32m4x2_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u64m1x2_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u64m1x3_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u64m1x4_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl);
void __riscv_vsoxseg5ei64_v_u64m1x5_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl);
void __riscv_vsoxseg6ei64_v_u64m1x6_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl);
void __riscv_vsoxseg7ei64_v_u64m1x7_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl);
void __riscv_vsoxseg8ei64_v_u64m1x8_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u64m2x2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl);
void __riscv_vsoxseg3ei64_v_u64m2x3_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl);
void __riscv_vsoxseg4ei64_v_u64m2x4_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl);
void __riscv_vsoxseg2ei64_v_u64m4x2_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl);
9. Vector Load/Store Whole Register Instructions
9.1. vl1re8.v
- Mnemonic
vl1re8.v vd, (rs1)
- Encoding
- Description
-
Load
vd
with VLEN/8 bytes held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl1re8_v.h
- Intrinsic Functions
Details
9.2. vl1re16.v
- Mnemonic
vl1re16.v vd, (rs1)
- Encoding
- Description
-
Load
vd
with VLEN/16 halfwords held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl1re16_v.h
- Intrinsic Functions
Details
9.3. vl1re32.v
- Mnemonic
vl1re32.v vd, (rs1)
- Encoding
- Description
-
Load
vd
with VLEN/32 words held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl1re32_v.h
- Intrinsic Functions
Details
9.4. vl1re64.v
- Mnemonic
vl1re64.v vd, (rs1)
- Encoding
- Description
-
Load
vd
with VLEN/64 doublewords held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl1re64_v.h
- Intrinsic Functions
Details
9.5. vl2re8.v
- Mnemonic
vl2re8.v vd, (rs1)
- Encoding
- Description
-
Load
vd
-vd+1
with VLEN/8 bytes held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl2re8_v.h
- Intrinsic Functions
Details
9.6. vl2re16.v
- Mnemonic
vl2re16.v vd, (rs1)
- Encoding
- Description
-
Load
vd
-vd+1
with VLEN/16 halfwords held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl2re16_v.h
- Intrinsic Functions
Details
9.7. vl2re32.v
- Mnemonic
vl2re32.v vd, (rs1)
- Encoding
- Description
-
Load
vd
-vd+1
with VLEN/32 words held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl2re32_v.h
- Intrinsic Functions
Details
9.8. vl2re64.v
- Mnemonic
vl2re64.v vd, (rs1)
- Encoding
- Description
-
Load
vd
-vd+1
with VLEN/64 doublewords held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl2re64_v.h
- Intrinsic Functions
Details
9.9. vl4re8.v
- Mnemonic
vl4re8.v vd, (rs1)
- Encoding
- Description
-
Load
vd
-vd+3
with VLEN/8 bytes held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl4re8_v.h
- Intrinsic Functions
Details
9.10. vl4re16.v
- Mnemonic
vl4re16.v vd, (rs1)
- Encoding
- Description
-
Load
vd
-vd+3
with VLEN/16 halfwords held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl4re16_v.h
- Intrinsic Functions
Details
9.11. vl4re32.v
- Mnemonic
vl4re32.v vd, (rs1)
- Encoding
- Description
-
Load
vd
-vd+3
with VLEN/32 words held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl4re32_v.h
- Intrinsic Functions
Details
9.12. vl4re64.v
- Mnemonic
vl4re64.v vd, (rs1)
- Encoding
- Description
-
Load
vd
-vd+3
with VLEN/64 doublewords held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl4re64_v.h
- Intrinsic Functions
Details
9.13. vl8re8.v
- Mnemonic
vl8re8.v vd, (rs1)
- Encoding
- Description
-
Load
vd
-vd+7
with VLEN/8 bytes held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl8re8_v.h
- Intrinsic Functions
Details
9.14. vl8re16.v
- Mnemonic
vl8re16.v vd, (rs1)
- Encoding
- Description
-
Load
vd
-vd+7
with VLEN/16 halfwords held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl8re16_v.h
- Intrinsic Functions
Details
9.15. vl8re32.v
- Mnemonic
vl8re32.v vd, (rs1)
- Encoding
- Description
-
Load
vd
-vd+7
with VLEN/32 words held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl8re32_v.h
- Intrinsic Functions
Details
9.16. vl8re64.v
- Mnemonic
vl8re64.v vd, (rs1)
- Encoding
- Description
-
Load
vd
-vd+7
with VLEN/64 doublewords held at address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vl8re64_v.h
- Intrinsic Functions
Details
9.17. vs1r.v
- Mnemonic
vs1r.v vs3, (rs1)
- Encoding
- Description
-
Store
vs3
to address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vs1r_v.h
- Intrinsic Functions
Details
9.18. vs2r.v
- Mnemonic
vs2r.v vs3, (rs1)
- Encoding
- Description
-
Store
vs3
-vs3+1
to address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vs2r_v.h
- Intrinsic Functions
Details
9.19. vs4r.v
- Mnemonic
vs4r.v vs3, (rs1)
- Encoding
- Description
-
Store
vs3
-vs3+3
to address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vs4r_v.h
- Intrinsic Functions
Details
9.20. vs8r.v
- Mnemonic
vs8r.v vs3, (rs1)
- Encoding
- Description
-
Store
vs3
-vs3+7
to address inrs1
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vs8r_v.h
- Intrinsic Functions
Details
10. Vector Single-Width Integer Add and Subtract
10.1. vadd.vv
- Mnemonic
vadd.vv vd, vs2, vs1, vm
- Encoding
- Descrption
-
Integer addition, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vadd_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vadd_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vadd_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vadd_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vadd_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vadd_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vadd_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vadd_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vadd_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vadd_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vadd_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vadd_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vadd_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vadd_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vadd_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vadd_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vadd_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vadd_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vadd_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vadd_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vadd_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vadd_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vadd_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vadd_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vadd_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vadd_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vadd_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vadd_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vadd_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vadd_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vadd_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vadd_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vadd_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vadd_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vadd_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vadd_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vadd_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vadd_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vadd_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vadd_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vadd_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vadd_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vadd_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vadd_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vadd_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vint8mf8_t __riscv_vadd_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vadd_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vadd_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vadd_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vadd_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vadd_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vadd_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vadd_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vadd_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vadd_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vadd_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vadd_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vadd_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vadd_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vadd_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vadd_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vadd_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vadd_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vadd_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vadd_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vadd_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vadd_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vadd_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vadd_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vadd_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vadd_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vadd_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vadd_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vadd_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vadd_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vadd_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vadd_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vadd_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vadd_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vadd_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vadd_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vadd_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vadd_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vadd_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vadd_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vadd_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vadd_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vadd_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vadd_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
10.2. vadd.vx
- Mnemonic
vadd.vx vd, vs2, rs1, vm
- Encoding
- Descrption
-
Integer addition, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vadd_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vadd_vx_i8mf8 (vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vadd_vx_i8mf4 (vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vadd_vx_i8mf2 (vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vadd_vx_i8m1 (vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vadd_vx_i8m2 (vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vadd_vx_i8m4 (vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vadd_vx_i8m8 (vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vadd_vx_i16mf4 (vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vadd_vx_i16mf2 (vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vadd_vx_i16m1 (vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vadd_vx_i16m2 (vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vadd_vx_i16m4 (vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vadd_vx_i16m8 (vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vadd_vx_i32mf2 (vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vadd_vx_i32m1 (vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vadd_vx_i32m2 (vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vadd_vx_i32m4 (vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vadd_vx_i32m8 (vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vadd_vx_i64m1 (vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vadd_vx_i64m2 (vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vadd_vx_i64m4 (vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vadd_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl);
vuint8mf8_t __riscv_vadd_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vadd_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vadd_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vadd_vx_u8m1 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vadd_vx_u8m2 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vadd_vx_u8m4 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vadd_vx_u8m8 (vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vadd_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vadd_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vadd_vx_u16m1 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vadd_vx_u16m2 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vadd_vx_u16m4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vadd_vx_u16m8 (vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vadd_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vadd_vx_u32m1 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vadd_vx_u32m2 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vadd_vx_u32m4 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vadd_vx_u32m8 (vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vadd_vx_u64m1 (vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vadd_vx_u64m2 (vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vadd_vx_u64m4 (vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vadd_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vint8mf8_t __riscv_vadd_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vadd_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vadd_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vadd_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vadd_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vadd_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vadd_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vadd_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vadd_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vadd_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vadd_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vadd_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vadd_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vadd_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vadd_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vadd_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vadd_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vadd_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vadd_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vadd_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vadd_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vadd_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
vuint8mf8_t __riscv_vadd_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vadd_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vadd_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vadd_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vadd_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vadd_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vadd_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vadd_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vadd_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vadd_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vadd_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vadd_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vadd_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vadd_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vadd_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vadd_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vadd_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vadd_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vadd_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vadd_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vadd_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vadd_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
10.3. vadd.vi
- Mnemonic
vadd.vi vd, vs2, imm, vm
- Encoding
- Descrption
-
Integer addition, vector-immediate
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vadd_vi.h
- Intrinsic Functions
Details
10.4. vsub.vv
- Mnemonic
vsub.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Integer subtraction, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsub_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vsub_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vsub_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vsub_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vsub_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vsub_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vsub_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vsub_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vsub_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vsub_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vsub_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vsub_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vsub_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vsub_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vsub_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vsub_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vsub_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vsub_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vsub_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vsub_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vsub_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vsub_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vsub_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vsub_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vsub_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vsub_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vsub_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vsub_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vsub_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vsub_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vsub_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vsub_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vsub_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vsub_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vsub_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vsub_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vsub_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vsub_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vsub_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vsub_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vsub_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vsub_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vsub_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vsub_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vsub_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vint8mf8_t __riscv_vsub_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vsub_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vsub_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vsub_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vsub_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vsub_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vsub_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vsub_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vsub_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vsub_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vsub_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vsub_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vsub_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vsub_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vsub_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vsub_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vsub_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vsub_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vsub_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vsub_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vsub_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vsub_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vsub_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vsub_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vsub_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vsub_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vsub_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vsub_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vsub_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vsub_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vsub_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vsub_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vsub_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vsub_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vsub_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vsub_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vsub_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vsub_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vsub_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vsub_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vsub_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vsub_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vsub_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vsub_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
10.5. vsub.vx
- Mnemonic
vsub.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Integer subtraction, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsub_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vsub_vx_i8mf8 (vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vsub_vx_i8mf4 (vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vsub_vx_i8mf2 (vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vsub_vx_i8m1 (vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vsub_vx_i8m2 (vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vsub_vx_i8m4 (vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vsub_vx_i8m8 (vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vsub_vx_i16mf4 (vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vsub_vx_i16mf2 (vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vsub_vx_i16m1 (vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vsub_vx_i16m2 (vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vsub_vx_i16m4 (vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vsub_vx_i16m8 (vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vsub_vx_i32mf2 (vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vsub_vx_i32m1 (vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vsub_vx_i32m2 (vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vsub_vx_i32m4 (vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vsub_vx_i32m8 (vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vsub_vx_i64m1 (vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vsub_vx_i64m2 (vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vsub_vx_i64m4 (vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vsub_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl);
vuint8mf8_t __riscv_vsub_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vsub_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vsub_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vsub_vx_u8m1 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vsub_vx_u8m2 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vsub_vx_u8m4 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vsub_vx_u8m8 (vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vsub_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vsub_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vsub_vx_u16m1 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vsub_vx_u16m2 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vsub_vx_u16m4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vsub_vx_u16m8 (vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vsub_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vsub_vx_u32m1 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vsub_vx_u32m2 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vsub_vx_u32m4 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vsub_vx_u32m8 (vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vsub_vx_u64m1 (vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vsub_vx_u64m2 (vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vsub_vx_u64m4 (vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vsub_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vint8mf8_t __riscv_vsub_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vsub_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vsub_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vsub_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vsub_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vsub_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vsub_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vsub_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vsub_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vsub_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vsub_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vsub_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vsub_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vsub_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vsub_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vsub_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vsub_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vsub_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vsub_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vsub_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vsub_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vsub_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
vuint8mf8_t __riscv_vsub_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vsub_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vsub_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vsub_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vsub_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vsub_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vsub_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vsub_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vsub_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vsub_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vsub_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vsub_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vsub_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vsub_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vsub_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vsub_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vsub_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vsub_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vsub_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vsub_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vsub_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vsub_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
10.6. vrsub.vx
- Mnemonic
vrsub.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Integer reverse subtract, vd[i] = x[rs1] - vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vrsub_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vrsub_vx_i8mf8 (vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vrsub_vx_i8mf4 (vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vrsub_vx_i8mf2 (vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vrsub_vx_i8m1 (vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vrsub_vx_i8m2 (vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vrsub_vx_i8m4 (vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vrsub_vx_i8m8 (vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vrsub_vx_i16mf4 (vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vrsub_vx_i16mf2 (vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vrsub_vx_i16m1 (vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vrsub_vx_i16m2 (vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vrsub_vx_i16m4 (vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vrsub_vx_i16m8 (vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vrsub_vx_i32mf2 (vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vrsub_vx_i32m1 (vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vrsub_vx_i32m2 (vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vrsub_vx_i32m4 (vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vrsub_vx_i32m8 (vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vrsub_vx_i64m1 (vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vrsub_vx_i64m2 (vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vrsub_vx_i64m4 (vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vrsub_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl);
vuint8mf8_t __riscv_vrsub_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vrsub_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vrsub_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vrsub_vx_u8m1 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vrsub_vx_u8m2 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vrsub_vx_u8m4 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vrsub_vx_u8m8 (vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vrsub_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vrsub_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vrsub_vx_u16m1 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vrsub_vx_u16m2 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vrsub_vx_u16m4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vrsub_vx_u16m8 (vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vrsub_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vrsub_vx_u32m1 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vrsub_vx_u32m2 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vrsub_vx_u32m4 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vrsub_vx_u32m8 (vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vrsub_vx_u64m1 (vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vrsub_vx_u64m2 (vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vrsub_vx_u64m4 (vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vrsub_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vint8mf8_t __riscv_vrsub_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vrsub_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vrsub_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vrsub_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vrsub_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vrsub_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vrsub_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vrsub_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vrsub_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vrsub_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vrsub_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vrsub_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vrsub_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vrsub_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vrsub_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vrsub_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vrsub_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vrsub_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vrsub_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vrsub_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vrsub_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vrsub_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
vuint8mf8_t __riscv_vrsub_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vrsub_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vrsub_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vrsub_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vrsub_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vrsub_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vrsub_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vrsub_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vrsub_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vrsub_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vrsub_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vrsub_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vrsub_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vrsub_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vrsub_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vrsub_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vrsub_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vrsub_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vrsub_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vrsub_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vrsub_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vrsub_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
10.7. vrsub.vi
- Mnemonic
vrsub.vi vd, vs2, imm, vm
- Encoding
- Description
-
Integer reverse subtract, vd[i] = imm - vs2[i]
11. Vector Widening Integer Add/Subtract
The widening add/subtract instructions are provided in both signed and unsigned variants, depending on whether the narrower source operands are first sign- or zero-extended before forming the double-width sum.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vrsub_vi.h
- Intrinsic Functions
Details
11.1. vwaddu.vv
- Mnemonic
vwaddu.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwaddu_vv.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vwaddu_vv_u16mf4 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint16mf2_t __riscv_vwaddu_vv_u16mf2 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint16m1_t __riscv_vwaddu_vv_u16m1 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint16m2_t __riscv_vwaddu_vv_u16m2 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint16m4_t __riscv_vwaddu_vv_u16m4 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint16m8_t __riscv_vwaddu_vv_u16m8 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint32mf2_t __riscv_vwaddu_vv_u32mf2 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint32m1_t __riscv_vwaddu_vv_u32m1 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint32m2_t __riscv_vwaddu_vv_u32m2 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint32m4_t __riscv_vwaddu_vv_u32m4 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint32m8_t __riscv_vwaddu_vv_u32m8 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint64m1_t __riscv_vwaddu_vv_u64m1 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint64m2_t __riscv_vwaddu_vv_u64m2 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint64m4_t __riscv_vwaddu_vv_u64m4 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint64m8_t __riscv_vwaddu_vv_u64m8 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint16mf4_t __riscv_vwaddu_vv_u16mf4_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint16mf2_t __riscv_vwaddu_vv_u16mf2_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint16m1_t __riscv_vwaddu_vv_u16m1_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint16m2_t __riscv_vwaddu_vv_u16m2_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint16m4_t __riscv_vwaddu_vv_u16m4_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint16m8_t __riscv_vwaddu_vv_u16m8_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint32mf2_t __riscv_vwaddu_vv_u32mf2_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint32m1_t __riscv_vwaddu_vv_u32m1_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint32m2_t __riscv_vwaddu_vv_u32m2_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint32m4_t __riscv_vwaddu_vv_u32m4_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint32m8_t __riscv_vwaddu_vv_u32m8_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint64m1_t __riscv_vwaddu_vv_u64m1_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint64m2_t __riscv_vwaddu_vv_u64m2_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint64m4_t __riscv_vwaddu_vv_u64m4_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint64m8_t __riscv_vwaddu_vv_u64m8_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
11.2. vwaddu.vx
- Mnemonic
vwaddu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwaddu_vx.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vwaddu_vx_u16mf4 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint16mf2_t __riscv_vwaddu_vx_u16mf2 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint16m1_t __riscv_vwaddu_vx_u16m1 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint16m2_t __riscv_vwaddu_vx_u16m2 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint16m4_t __riscv_vwaddu_vx_u16m4 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint16m8_t __riscv_vwaddu_vx_u16m8 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint32mf2_t __riscv_vwaddu_vx_u32mf2 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint32m1_t __riscv_vwaddu_vx_u32m1 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint32m2_t __riscv_vwaddu_vx_u32m2 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint32m4_t __riscv_vwaddu_vx_u32m4 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint32m8_t __riscv_vwaddu_vx_u32m8 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint64m1_t __riscv_vwaddu_vx_u64m1 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint64m2_t __riscv_vwaddu_vx_u64m2 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint64m4_t __riscv_vwaddu_vx_u64m4 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint64m8_t __riscv_vwaddu_vx_u64m8 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint16mf4_t __riscv_vwaddu_vx_u16mf4_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint16mf2_t __riscv_vwaddu_vx_u16mf2_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint16m1_t __riscv_vwaddu_vx_u16m1_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint16m2_t __riscv_vwaddu_vx_u16m2_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint16m4_t __riscv_vwaddu_vx_u16m4_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint16m8_t __riscv_vwaddu_vx_u16m8_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint32mf2_t __riscv_vwaddu_vx_u32mf2_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint32m1_t __riscv_vwaddu_vx_u32m1_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint32m2_t __riscv_vwaddu_vx_u32m2_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint32m4_t __riscv_vwaddu_vx_u32m4_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint32m8_t __riscv_vwaddu_vx_u32m8_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint64m1_t __riscv_vwaddu_vx_u64m1_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint64m2_t __riscv_vwaddu_vx_u64m2_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint64m4_t __riscv_vwaddu_vx_u64m4_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint64m8_t __riscv_vwaddu_vx_u64m8_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
11.3. vwsubu.vv
- Mnemonic
vwsubu.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwsubu_vv.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vwsubu_vv_u16mf4 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint16mf2_t __riscv_vwsubu_vv_u16mf2 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint16m1_t __riscv_vwsubu_vv_u16m1 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint16m2_t __riscv_vwsubu_vv_u16m2 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint16m4_t __riscv_vwsubu_vv_u16m4 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint16m8_t __riscv_vwsubu_vv_u16m8 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint32mf2_t __riscv_vwsubu_vv_u32mf2 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint32m1_t __riscv_vwsubu_vv_u32m1 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint32m2_t __riscv_vwsubu_vv_u32m2 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint32m4_t __riscv_vwsubu_vv_u32m4 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint32m8_t __riscv_vwsubu_vv_u32m8 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint64m1_t __riscv_vwsubu_vv_u64m1 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint64m2_t __riscv_vwsubu_vv_u64m2 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint64m4_t __riscv_vwsubu_vv_u64m4 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint64m8_t __riscv_vwsubu_vv_u64m8 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint16mf4_t __riscv_vwsubu_vv_u16mf4_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint16mf2_t __riscv_vwsubu_vv_u16mf2_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint16m1_t __riscv_vwsubu_vv_u16m1_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint16m2_t __riscv_vwsubu_vv_u16m2_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint16m4_t __riscv_vwsubu_vv_u16m4_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint16m8_t __riscv_vwsubu_vv_u16m8_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint32mf2_t __riscv_vwsubu_vv_u32mf2_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint32m1_t __riscv_vwsubu_vv_u32m1_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint32m2_t __riscv_vwsubu_vv_u32m2_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint32m4_t __riscv_vwsubu_vv_u32m4_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint32m8_t __riscv_vwsubu_vv_u32m8_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint64m1_t __riscv_vwsubu_vv_u64m1_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint64m2_t __riscv_vwsubu_vv_u64m2_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint64m4_t __riscv_vwsubu_vv_u64m4_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint64m8_t __riscv_vwsubu_vv_u64m8_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
11.4. vwsubu.vx
- Mnemonic
vwsubu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwsubu_vx.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vwsubu_vx_u16mf4 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint16mf2_t __riscv_vwsubu_vx_u16mf2 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint16m1_t __riscv_vwsubu_vx_u16m1 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint16m2_t __riscv_vwsubu_vx_u16m2 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint16m4_t __riscv_vwsubu_vx_u16m4 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint16m8_t __riscv_vwsubu_vx_u16m8 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint32mf2_t __riscv_vwsubu_vx_u32mf2 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint32m1_t __riscv_vwsubu_vx_u32m1 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint32m2_t __riscv_vwsubu_vx_u32m2 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint32m4_t __riscv_vwsubu_vx_u32m4 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint32m8_t __riscv_vwsubu_vx_u32m8 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint64m1_t __riscv_vwsubu_vx_u64m1 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint64m2_t __riscv_vwsubu_vx_u64m2 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint64m4_t __riscv_vwsubu_vx_u64m4 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint64m8_t __riscv_vwsubu_vx_u64m8 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint16mf4_t __riscv_vwsubu_vx_u16mf4_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint16mf2_t __riscv_vwsubu_vx_u16mf2_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint16m1_t __riscv_vwsubu_vx_u16m1_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint16m2_t __riscv_vwsubu_vx_u16m2_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint16m4_t __riscv_vwsubu_vx_u16m4_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint16m8_t __riscv_vwsubu_vx_u16m8_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint32mf2_t __riscv_vwsubu_vx_u32mf2_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint32m1_t __riscv_vwsubu_vx_u32m1_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint32m2_t __riscv_vwsubu_vx_u32m2_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint32m4_t __riscv_vwsubu_vx_u32m4_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint32m8_t __riscv_vwsubu_vx_u32m8_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint64m1_t __riscv_vwsubu_vx_u64m1_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint64m2_t __riscv_vwsubu_vx_u64m2_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint64m4_t __riscv_vwsubu_vx_u64m4_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint64m8_t __riscv_vwsubu_vx_u64m8_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
11.5. vwadd.vv
- Mnemonic
vwadd.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening signed integer add/subtract, 2*SEW = SEW +/- SEW, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwadd_vv.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwadd_vv_i16mf4 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint16mf2_t __riscv_vwadd_vv_i16mf2 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint16m1_t __riscv_vwadd_vv_i16m1 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint16m2_t __riscv_vwadd_vv_i16m2 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint16m4_t __riscv_vwadd_vv_i16m4 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint16m8_t __riscv_vwadd_vv_i16m8 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint32mf2_t __riscv_vwadd_vv_i32mf2 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint32m1_t __riscv_vwadd_vv_i32m1 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint32m2_t __riscv_vwadd_vv_i32m2 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint32m4_t __riscv_vwadd_vv_i32m4 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint32m8_t __riscv_vwadd_vv_i32m8 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint64m1_t __riscv_vwadd_vv_i64m1 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint64m2_t __riscv_vwadd_vv_i64m2 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint64m4_t __riscv_vwadd_vv_i64m4 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint64m8_t __riscv_vwadd_vv_i64m8 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint16mf4_t __riscv_vwadd_vv_i16mf4_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint16mf2_t __riscv_vwadd_vv_i16mf2_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint16m1_t __riscv_vwadd_vv_i16m1_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint16m2_t __riscv_vwadd_vv_i16m2_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint16m4_t __riscv_vwadd_vv_i16m4_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint16m8_t __riscv_vwadd_vv_i16m8_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint32mf2_t __riscv_vwadd_vv_i32mf2_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint32m1_t __riscv_vwadd_vv_i32m1_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint32m2_t __riscv_vwadd_vv_i32m2_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint32m4_t __riscv_vwadd_vv_i32m4_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint32m8_t __riscv_vwadd_vv_i32m8_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint64m1_t __riscv_vwadd_vv_i64m1_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint64m2_t __riscv_vwadd_vv_i64m2_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint64m4_t __riscv_vwadd_vv_i64m4_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint64m8_t __riscv_vwadd_vv_i64m8_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
11.6. vwadd.vx
- Mnemonic
vwadd.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Widening signed integer add/subtract, 2*SEW = SEW +/- SEW, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwadd_vx.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwadd_vx_i16mf4 (vint8mf8_t op1, int8_t op2, size_t vl);
vint16mf2_t __riscv_vwadd_vx_i16mf2 (vint8mf4_t op1, int8_t op2, size_t vl);
vint16m1_t __riscv_vwadd_vx_i16m1 (vint8mf2_t op1, int8_t op2, size_t vl);
vint16m2_t __riscv_vwadd_vx_i16m2 (vint8m1_t op1, int8_t op2, size_t vl);
vint16m4_t __riscv_vwadd_vx_i16m4 (vint8m2_t op1, int8_t op2, size_t vl);
vint16m8_t __riscv_vwadd_vx_i16m8 (vint8m4_t op1, int8_t op2, size_t vl);
vint32mf2_t __riscv_vwadd_vx_i32mf2 (vint16mf4_t op1, int16_t op2, size_t vl);
vint32m1_t __riscv_vwadd_vx_i32m1 (vint16mf2_t op1, int16_t op2, size_t vl);
vint32m2_t __riscv_vwadd_vx_i32m2 (vint16m1_t op1, int16_t op2, size_t vl);
vint32m4_t __riscv_vwadd_vx_i32m4 (vint16m2_t op1, int16_t op2, size_t vl);
vint32m8_t __riscv_vwadd_vx_i32m8 (vint16m4_t op1, int16_t op2, size_t vl);
vint64m1_t __riscv_vwadd_vx_i64m1 (vint32mf2_t op1, int32_t op2, size_t vl);
vint64m2_t __riscv_vwadd_vx_i64m2 (vint32m1_t op1, int32_t op2, size_t vl);
vint64m4_t __riscv_vwadd_vx_i64m4 (vint32m2_t op1, int32_t op2, size_t vl);
vint64m8_t __riscv_vwadd_vx_i64m8 (vint32m4_t op1, int32_t op2, size_t vl);
vint16mf4_t __riscv_vwadd_vx_i16mf4_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint16mf2_t __riscv_vwadd_vx_i16mf2_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint16m1_t __riscv_vwadd_vx_i16m1_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint16m2_t __riscv_vwadd_vx_i16m2_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint16m4_t __riscv_vwadd_vx_i16m4_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint16m8_t __riscv_vwadd_vx_i16m8_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint32mf2_t __riscv_vwadd_vx_i32mf2_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint32m1_t __riscv_vwadd_vx_i32m1_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint32m2_t __riscv_vwadd_vx_i32m2_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint32m4_t __riscv_vwadd_vx_i32m4_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint32m8_t __riscv_vwadd_vx_i32m8_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint64m1_t __riscv_vwadd_vx_i64m1_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint64m2_t __riscv_vwadd_vx_i64m2_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint64m4_t __riscv_vwadd_vx_i64m4_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint64m8_t __riscv_vwadd_vx_i64m8_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
11.7. vwsub.vv
- Mnemonic
vwsub.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening signed integer add/subtract, 2*SEW = SEW +/- SEW, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwsub_vv.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwsub_vv_i16mf4 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint16mf2_t __riscv_vwsub_vv_i16mf2 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint16m1_t __riscv_vwsub_vv_i16m1 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint16m2_t __riscv_vwsub_vv_i16m2 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint16m4_t __riscv_vwsub_vv_i16m4 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint16m8_t __riscv_vwsub_vv_i16m8 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint32mf2_t __riscv_vwsub_vv_i32mf2 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint32m1_t __riscv_vwsub_vv_i32m1 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint32m2_t __riscv_vwsub_vv_i32m2 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint32m4_t __riscv_vwsub_vv_i32m4 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint32m8_t __riscv_vwsub_vv_i32m8 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint64m1_t __riscv_vwsub_vv_i64m1 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint64m2_t __riscv_vwsub_vv_i64m2 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint64m4_t __riscv_vwsub_vv_i64m4 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint64m8_t __riscv_vwsub_vv_i64m8 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint16mf4_t __riscv_vwsub_vv_i16mf4_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint16mf2_t __riscv_vwsub_vv_i16mf2_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint16m1_t __riscv_vwsub_vv_i16m1_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint16m2_t __riscv_vwsub_vv_i16m2_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint16m4_t __riscv_vwsub_vv_i16m4_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint16m8_t __riscv_vwsub_vv_i16m8_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint32mf2_t __riscv_vwsub_vv_i32mf2_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint32m1_t __riscv_vwsub_vv_i32m1_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint32m2_t __riscv_vwsub_vv_i32m2_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint32m4_t __riscv_vwsub_vv_i32m4_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint32m8_t __riscv_vwsub_vv_i32m8_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint64m1_t __riscv_vwsub_vv_i64m1_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint64m2_t __riscv_vwsub_vv_i64m2_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint64m4_t __riscv_vwsub_vv_i64m4_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint64m8_t __riscv_vwsub_vv_i64m8_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
11.8. vwsub.vx
- Mnemonic
vwsub.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Widening signed integer add/subtract, 2*SEW = SEW +/- SEW, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwsub_vx.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwsub_vx_i16mf4 (vint8mf8_t op1, int8_t op2, size_t vl);
vint16mf2_t __riscv_vwsub_vx_i16mf2 (vint8mf4_t op1, int8_t op2, size_t vl);
vint16m1_t __riscv_vwsub_vx_i16m1 (vint8mf2_t op1, int8_t op2, size_t vl);
vint16m2_t __riscv_vwsub_vx_i16m2 (vint8m1_t op1, int8_t op2, size_t vl);
vint16m4_t __riscv_vwsub_vx_i16m4 (vint8m2_t op1, int8_t op2, size_t vl);
vint16m8_t __riscv_vwsub_vx_i16m8 (vint8m4_t op1, int8_t op2, size_t vl);
vint32mf2_t __riscv_vwsub_vx_i32mf2 (vint16mf4_t op1, int16_t op2, size_t vl);
vint32m1_t __riscv_vwsub_vx_i32m1 (vint16mf2_t op1, int16_t op2, size_t vl);
vint32m2_t __riscv_vwsub_vx_i32m2 (vint16m1_t op1, int16_t op2, size_t vl);
vint32m4_t __riscv_vwsub_vx_i32m4 (vint16m2_t op1, int16_t op2, size_t vl);
vint32m8_t __riscv_vwsub_vx_i32m8 (vint16m4_t op1, int16_t op2, size_t vl);
vint64m1_t __riscv_vwsub_vx_i64m1 (vint32mf2_t op1, int32_t op2, size_t vl);
vint64m2_t __riscv_vwsub_vx_i64m2 (vint32m1_t op1, int32_t op2, size_t vl);
vint64m4_t __riscv_vwsub_vx_i64m4 (vint32m2_t op1, int32_t op2, size_t vl);
vint64m8_t __riscv_vwsub_vx_i64m8 (vint32m4_t op1, int32_t op2, size_t vl);
vint16mf4_t __riscv_vwsub_vx_i16mf4_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint16mf2_t __riscv_vwsub_vx_i16mf2_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint16m1_t __riscv_vwsub_vx_i16m1_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint16m2_t __riscv_vwsub_vx_i16m2_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint16m4_t __riscv_vwsub_vx_i16m4_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint16m8_t __riscv_vwsub_vx_i16m8_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint32mf2_t __riscv_vwsub_vx_i32mf2_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint32m1_t __riscv_vwsub_vx_i32m1_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint32m2_t __riscv_vwsub_vx_i32m2_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint32m4_t __riscv_vwsub_vx_i32m4_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint32m8_t __riscv_vwsub_vx_i32m8_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint64m1_t __riscv_vwsub_vx_i64m1_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint64m2_t __riscv_vwsub_vx_i64m2_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint64m4_t __riscv_vwsub_vx_i64m4_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint64m8_t __riscv_vwsub_vx_i64m8_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
11.9. vwaddu.wv
- Mnemonic
vwaddu.wv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening unsigned integer add/subtract, 2*SEW = 2*SEW +/- SEW, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwaddu_wv.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vwaddu_wv_u16mf4 (vuint16mf4_t op1, vuint8mf8_t op2, size_t vl);
vuint16mf2_t __riscv_vwaddu_wv_u16mf2 (vuint16mf2_t op1, vuint8mf4_t op2, size_t vl);
vuint16m1_t __riscv_vwaddu_wv_u16m1 (vuint16m1_t op1, vuint8mf2_t op2, size_t vl);
vuint16m2_t __riscv_vwaddu_wv_u16m2 (vuint16m2_t op1, vuint8m1_t op2, size_t vl);
vuint16m4_t __riscv_vwaddu_wv_u16m4 (vuint16m4_t op1, vuint8m2_t op2, size_t vl);
vuint16m8_t __riscv_vwaddu_wv_u16m8 (vuint16m8_t op1, vuint8m4_t op2, size_t vl);
vuint32mf2_t __riscv_vwaddu_wv_u32mf2 (vuint32mf2_t op1, vuint16mf4_t op2, size_t vl);
vuint32m1_t __riscv_vwaddu_wv_u32m1 (vuint32m1_t op1, vuint16mf2_t op2, size_t vl);
vuint32m2_t __riscv_vwaddu_wv_u32m2 (vuint32m2_t op1, vuint16m1_t op2, size_t vl);
vuint32m4_t __riscv_vwaddu_wv_u32m4 (vuint32m4_t op1, vuint16m2_t op2, size_t vl);
vuint32m8_t __riscv_vwaddu_wv_u32m8 (vuint32m8_t op1, vuint16m4_t op2, size_t vl);
vuint64m1_t __riscv_vwaddu_wv_u64m1 (vuint64m1_t op1, vuint32mf2_t op2, size_t vl);
vuint64m2_t __riscv_vwaddu_wv_u64m2 (vuint64m2_t op1, vuint32m1_t op2, size_t vl);
vuint64m4_t __riscv_vwaddu_wv_u64m4 (vuint64m4_t op1, vuint32m2_t op2, size_t vl);
vuint64m8_t __riscv_vwaddu_wv_u64m8 (vuint64m8_t op1, vuint32m4_t op2, size_t vl);
vuint16mf4_t __riscv_vwaddu_wv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl);
vuint16mf2_t __riscv_vwaddu_wv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl);
vuint16m1_t __riscv_vwaddu_wv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint8mf2_t op2, size_t vl);
vuint16m2_t __riscv_vwaddu_wv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint8m1_t op2, size_t vl);
vuint16m4_t __riscv_vwaddu_wv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint8m2_t op2, size_t vl);
vuint16m8_t __riscv_vwaddu_wv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint8m4_t op2, size_t vl);
vuint32mf2_t __riscv_vwaddu_wv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl);
vuint32m1_t __riscv_vwaddu_wv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl);
vuint32m2_t __riscv_vwaddu_wv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl);
vuint32m4_t __riscv_vwaddu_wv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl);
vuint32m8_t __riscv_vwaddu_wv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl);
vuint64m1_t __riscv_vwaddu_wv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl);
vuint64m2_t __riscv_vwaddu_wv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint32m1_t op2, size_t vl);
vuint64m4_t __riscv_vwaddu_wv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint32m2_t op2, size_t vl);
vuint64m8_t __riscv_vwaddu_wv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint32m4_t op2, size_t vl);
11.10. vwaddu.wx
- Mnemonic
vwaddu.wx vd, vs2, rs1, vm
- Encoding
- Description
-
Widening unsigned integer add/subtract, 2*SEW = 2*SEW +/- SEW, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwaddu_wx.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vwaddu_wx_u16mf4 (vuint16mf4_t op1, uint8_t op2, size_t vl);
vuint16mf2_t __riscv_vwaddu_wx_u16mf2 (vuint16mf2_t op1, uint8_t op2, size_t vl);
vuint16m1_t __riscv_vwaddu_wx_u16m1 (vuint16m1_t op1, uint8_t op2, size_t vl);
vuint16m2_t __riscv_vwaddu_wx_u16m2 (vuint16m2_t op1, uint8_t op2, size_t vl);
vuint16m4_t __riscv_vwaddu_wx_u16m4 (vuint16m4_t op1, uint8_t op2, size_t vl);
vuint16m8_t __riscv_vwaddu_wx_u16m8 (vuint16m8_t op1, uint8_t op2, size_t vl);
vuint32mf2_t __riscv_vwaddu_wx_u32mf2 (vuint32mf2_t op1, uint16_t op2, size_t vl);
vuint32m1_t __riscv_vwaddu_wx_u32m1 (vuint32m1_t op1, uint16_t op2, size_t vl);
vuint32m2_t __riscv_vwaddu_wx_u32m2 (vuint32m2_t op1, uint16_t op2, size_t vl);
vuint32m4_t __riscv_vwaddu_wx_u32m4 (vuint32m4_t op1, uint16_t op2, size_t vl);
vuint32m8_t __riscv_vwaddu_wx_u32m8 (vuint32m8_t op1, uint16_t op2, size_t vl);
vuint64m1_t __riscv_vwaddu_wx_u64m1 (vuint64m1_t op1, uint32_t op2, size_t vl);
vuint64m2_t __riscv_vwaddu_wx_u64m2 (vuint64m2_t op1, uint32_t op2, size_t vl);
vuint64m4_t __riscv_vwaddu_wx_u64m4 (vuint64m4_t op1, uint32_t op2, size_t vl);
vuint64m8_t __riscv_vwaddu_wx_u64m8 (vuint64m8_t op1, uint32_t op2, size_t vl);
vuint16mf4_t __riscv_vwaddu_wx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint8_t op2, size_t vl);
vuint16mf2_t __riscv_vwaddu_wx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint8_t op2, size_t vl);
vuint16m1_t __riscv_vwaddu_wx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint8_t op2, size_t vl);
vuint16m2_t __riscv_vwaddu_wx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint8_t op2, size_t vl);
vuint16m4_t __riscv_vwaddu_wx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint8_t op2, size_t vl);
vuint16m8_t __riscv_vwaddu_wx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint8_t op2, size_t vl);
vuint32mf2_t __riscv_vwaddu_wx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint16_t op2, size_t vl);
vuint32m1_t __riscv_vwaddu_wx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint16_t op2, size_t vl);
vuint32m2_t __riscv_vwaddu_wx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint16_t op2, size_t vl);
vuint32m4_t __riscv_vwaddu_wx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint16_t op2, size_t vl);
vuint32m8_t __riscv_vwaddu_wx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint16_t op2, size_t vl);
vuint64m1_t __riscv_vwaddu_wx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl);
vuint64m2_t __riscv_vwaddu_wx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint32_t op2, size_t vl);
vuint64m4_t __riscv_vwaddu_wx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint32_t op2, size_t vl);
vuint64m8_t __riscv_vwaddu_wx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint32_t op2, size_t vl);
11.11. vwsubu.wv
- Mnemonic
vwsubu.wv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening unsigned integer add/subtract, 2*SEW = 2*SEW +/- SEW, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwsubu_wv.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vwsubu_wv_u16mf4 (vuint16mf4_t op1, vuint8mf8_t op2, size_t vl);
vuint16mf2_t __riscv_vwsubu_wv_u16mf2 (vuint16mf2_t op1, vuint8mf4_t op2, size_t vl);
vuint16m1_t __riscv_vwsubu_wv_u16m1 (vuint16m1_t op1, vuint8mf2_t op2, size_t vl);
vuint16m2_t __riscv_vwsubu_wv_u16m2 (vuint16m2_t op1, vuint8m1_t op2, size_t vl);
vuint16m4_t __riscv_vwsubu_wv_u16m4 (vuint16m4_t op1, vuint8m2_t op2, size_t vl);
vuint16m8_t __riscv_vwsubu_wv_u16m8 (vuint16m8_t op1, vuint8m4_t op2, size_t vl);
vuint32mf2_t __riscv_vwsubu_wv_u32mf2 (vuint32mf2_t op1, vuint16mf4_t op2, size_t vl);
vuint32m1_t __riscv_vwsubu_wv_u32m1 (vuint32m1_t op1, vuint16mf2_t op2, size_t vl);
vuint32m2_t __riscv_vwsubu_wv_u32m2 (vuint32m2_t op1, vuint16m1_t op2, size_t vl);
vuint32m4_t __riscv_vwsubu_wv_u32m4 (vuint32m4_t op1, vuint16m2_t op2, size_t vl);
vuint32m8_t __riscv_vwsubu_wv_u32m8 (vuint32m8_t op1, vuint16m4_t op2, size_t vl);
vuint64m1_t __riscv_vwsubu_wv_u64m1 (vuint64m1_t op1, vuint32mf2_t op2, size_t vl);
vuint64m2_t __riscv_vwsubu_wv_u64m2 (vuint64m2_t op1, vuint32m1_t op2, size_t vl);
vuint64m4_t __riscv_vwsubu_wv_u64m4 (vuint64m4_t op1, vuint32m2_t op2, size_t vl);
vuint64m8_t __riscv_vwsubu_wv_u64m8 (vuint64m8_t op1, vuint32m4_t op2, size_t vl);
vuint16mf4_t __riscv_vwsubu_wv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl);
vuint16mf2_t __riscv_vwsubu_wv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl);
vuint16m1_t __riscv_vwsubu_wv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint8mf2_t op2, size_t vl);
vuint16m2_t __riscv_vwsubu_wv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint8m1_t op2, size_t vl);
vuint16m4_t __riscv_vwsubu_wv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint8m2_t op2, size_t vl);
vuint16m8_t __riscv_vwsubu_wv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint8m4_t op2, size_t vl);
vuint32mf2_t __riscv_vwsubu_wv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl);
vuint32m1_t __riscv_vwsubu_wv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint16mf2_t op2, size_t vl);
vuint32m2_t __riscv_vwsubu_wv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint16m1_t op2, size_t vl);
vuint32m4_t __riscv_vwsubu_wv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint16m2_t op2, size_t vl);
vuint32m8_t __riscv_vwsubu_wv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint16m4_t op2, size_t vl);
vuint64m1_t __riscv_vwsubu_wv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint32mf2_t op2, size_t vl);
vuint64m2_t __riscv_vwsubu_wv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint32m1_t op2, size_t vl);
vuint64m4_t __riscv_vwsubu_wv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint32m2_t op2, size_t vl);
vuint64m8_t __riscv_vwsubu_wv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint32m4_t op2, size_t vl);
11.12. vwsubu.wx
- Mnemonic
vwsubu.wx vd, vs2, rs1, vm
- Encoding
- Description
-
Widening unsigned integer add/subtract, 2*SEW = 2*SEW +/- SEW, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwsubu_wx.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vwsubu_wx_u16mf4 (vuint16mf4_t op1, uint8_t op2, size_t vl);
vuint16mf2_t __riscv_vwsubu_wx_u16mf2 (vuint16mf2_t op1, uint8_t op2, size_t vl);
vuint16m1_t __riscv_vwsubu_wx_u16m1 (vuint16m1_t op1, uint8_t op2, size_t vl);
vuint16m2_t __riscv_vwsubu_wx_u16m2 (vuint16m2_t op1, uint8_t op2, size_t vl);
vuint16m4_t __riscv_vwsubu_wx_u16m4 (vuint16m4_t op1, uint8_t op2, size_t vl);
vuint16m8_t __riscv_vwsubu_wx_u16m8 (vuint16m8_t op1, uint8_t op2, size_t vl);
vuint32mf2_t __riscv_vwsubu_wx_u32mf2 (vuint32mf2_t op1, uint16_t op2, size_t vl);
vuint32m1_t __riscv_vwsubu_wx_u32m1 (vuint32m1_t op1, uint16_t op2, size_t vl);
vuint32m2_t __riscv_vwsubu_wx_u32m2 (vuint32m2_t op1, uint16_t op2, size_t vl);
vuint32m4_t __riscv_vwsubu_wx_u32m4 (vuint32m4_t op1, uint16_t op2, size_t vl);
vuint32m8_t __riscv_vwsubu_wx_u32m8 (vuint32m8_t op1, uint16_t op2, size_t vl);
vuint64m1_t __riscv_vwsubu_wx_u64m1 (vuint64m1_t op1, uint32_t op2, size_t vl);
vuint64m2_t __riscv_vwsubu_wx_u64m2 (vuint64m2_t op1, uint32_t op2, size_t vl);
vuint64m4_t __riscv_vwsubu_wx_u64m4 (vuint64m4_t op1, uint32_t op2, size_t vl);
vuint64m8_t __riscv_vwsubu_wx_u64m8 (vuint64m8_t op1, uint32_t op2, size_t vl);
vuint16mf4_t __riscv_vwsubu_wx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint8_t op2, size_t vl);
vuint16mf2_t __riscv_vwsubu_wx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint8_t op2, size_t vl);
vuint16m1_t __riscv_vwsubu_wx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint8_t op2, size_t vl);
vuint16m2_t __riscv_vwsubu_wx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint8_t op2, size_t vl);
vuint16m4_t __riscv_vwsubu_wx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint8_t op2, size_t vl);
vuint16m8_t __riscv_vwsubu_wx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint8_t op2, size_t vl);
vuint32mf2_t __riscv_vwsubu_wx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint16_t op2, size_t vl);
vuint32m1_t __riscv_vwsubu_wx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint16_t op2, size_t vl);
vuint32m2_t __riscv_vwsubu_wx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint16_t op2, size_t vl);
vuint32m4_t __riscv_vwsubu_wx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint16_t op2, size_t vl);
vuint32m8_t __riscv_vwsubu_wx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint16_t op2, size_t vl);
vuint64m1_t __riscv_vwsubu_wx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint32_t op2, size_t vl);
vuint64m2_t __riscv_vwsubu_wx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint32_t op2, size_t vl);
vuint64m4_t __riscv_vwsubu_wx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint32_t op2, size_t vl);
vuint64m8_t __riscv_vwsubu_wx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint32_t op2, size_t vl);
11.13. vwadd.wv
- Mnemonic
vwadd.wv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening signed integer add/subtract, 2*SEW = 2*SEW +/- SEW, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwadd_wv.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwadd_wv_i16mf4 (vint16mf4_t op1, vint8mf8_t op2, size_t vl);
vint16mf2_t __riscv_vwadd_wv_i16mf2 (vint16mf2_t op1, vint8mf4_t op2, size_t vl);
vint16m1_t __riscv_vwadd_wv_i16m1 (vint16m1_t op1, vint8mf2_t op2, size_t vl);
vint16m2_t __riscv_vwadd_wv_i16m2 (vint16m2_t op1, vint8m1_t op2, size_t vl);
vint16m4_t __riscv_vwadd_wv_i16m4 (vint16m4_t op1, vint8m2_t op2, size_t vl);
vint16m8_t __riscv_vwadd_wv_i16m8 (vint16m8_t op1, vint8m4_t op2, size_t vl);
vint32mf2_t __riscv_vwadd_wv_i32mf2 (vint32mf2_t op1, vint16mf4_t op2, size_t vl);
vint32m1_t __riscv_vwadd_wv_i32m1 (vint32m1_t op1, vint16mf2_t op2, size_t vl);
vint32m2_t __riscv_vwadd_wv_i32m2 (vint32m2_t op1, vint16m1_t op2, size_t vl);
vint32m4_t __riscv_vwadd_wv_i32m4 (vint32m4_t op1, vint16m2_t op2, size_t vl);
vint32m8_t __riscv_vwadd_wv_i32m8 (vint32m8_t op1, vint16m4_t op2, size_t vl);
vint64m1_t __riscv_vwadd_wv_i64m1 (vint64m1_t op1, vint32mf2_t op2, size_t vl);
vint64m2_t __riscv_vwadd_wv_i64m2 (vint64m2_t op1, vint32m1_t op2, size_t vl);
vint64m4_t __riscv_vwadd_wv_i64m4 (vint64m4_t op1, vint32m2_t op2, size_t vl);
vint64m8_t __riscv_vwadd_wv_i64m8 (vint64m8_t op1, vint32m4_t op2, size_t vl);
vint16mf4_t __riscv_vwadd_wv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint8mf8_t op2, size_t vl);
vint16mf2_t __riscv_vwadd_wv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint8mf4_t op2, size_t vl);
vint16m1_t __riscv_vwadd_wv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, size_t vl);
vint16m2_t __riscv_vwadd_wv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint8m1_t op2, size_t vl);
vint16m4_t __riscv_vwadd_wv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint8m2_t op2, size_t vl);
vint16m8_t __riscv_vwadd_wv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint8m4_t op2, size_t vl);
vint32mf2_t __riscv_vwadd_wv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint16mf4_t op2, size_t vl);
vint32m1_t __riscv_vwadd_wv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint16mf2_t op2, size_t vl);
vint32m2_t __riscv_vwadd_wv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint16m1_t op2, size_t vl);
vint32m4_t __riscv_vwadd_wv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint16m2_t op2, size_t vl);
vint32m8_t __riscv_vwadd_wv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint16m4_t op2, size_t vl);
vint64m1_t __riscv_vwadd_wv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl);
vint64m2_t __riscv_vwadd_wv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint32m1_t op2, size_t vl);
vint64m4_t __riscv_vwadd_wv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint32m2_t op2, size_t vl);
vint64m8_t __riscv_vwadd_wv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint32m4_t op2, size_t vl);
11.14. vwadd.wx
- Mnemonic
vwadd.wx vd, vs2, rs1, vm
- Encoding
- Description
-
Widening signed integer add/subtract, 2*SEW = 2*SEW +/- SEW, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwadd_wx.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwadd_wx_i16mf4 (vint16mf4_t op1, int8_t op2, size_t vl);
vint16mf2_t __riscv_vwadd_wx_i16mf2 (vint16mf2_t op1, int8_t op2, size_t vl);
vint16m1_t __riscv_vwadd_wx_i16m1 (vint16m1_t op1, int8_t op2, size_t vl);
vint16m2_t __riscv_vwadd_wx_i16m2 (vint16m2_t op1, int8_t op2, size_t vl);
vint16m4_t __riscv_vwadd_wx_i16m4 (vint16m4_t op1, int8_t op2, size_t vl);
vint16m8_t __riscv_vwadd_wx_i16m8 (vint16m8_t op1, int8_t op2, size_t vl);
vint32mf2_t __riscv_vwadd_wx_i32mf2 (vint32mf2_t op1, int16_t op2, size_t vl);
vint32m1_t __riscv_vwadd_wx_i32m1 (vint32m1_t op1, int16_t op2, size_t vl);
vint32m2_t __riscv_vwadd_wx_i32m2 (vint32m2_t op1, int16_t op2, size_t vl);
vint32m4_t __riscv_vwadd_wx_i32m4 (vint32m4_t op1, int16_t op2, size_t vl);
vint32m8_t __riscv_vwadd_wx_i32m8 (vint32m8_t op1, int16_t op2, size_t vl);
vint64m1_t __riscv_vwadd_wx_i64m1 (vint64m1_t op1, int32_t op2, size_t vl);
vint64m2_t __riscv_vwadd_wx_i64m2 (vint64m2_t op1, int32_t op2, size_t vl);
vint64m4_t __riscv_vwadd_wx_i64m4 (vint64m4_t op1, int32_t op2, size_t vl);
vint64m8_t __riscv_vwadd_wx_i64m8 (vint64m8_t op1, int32_t op2, size_t vl);
vint16mf4_t __riscv_vwadd_wx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int8_t op2, size_t vl);
vint16mf2_t __riscv_vwadd_wx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int8_t op2, size_t vl);
vint16m1_t __riscv_vwadd_wx_i16m1_m (vbool16_t mask, vint16m1_t op1, int8_t op2, size_t vl);
vint16m2_t __riscv_vwadd_wx_i16m2_m (vbool8_t mask, vint16m2_t op1, int8_t op2, size_t vl);
vint16m4_t __riscv_vwadd_wx_i16m4_m (vbool4_t mask, vint16m4_t op1, int8_t op2, size_t vl);
vint16m8_t __riscv_vwadd_wx_i16m8_m (vbool2_t mask, vint16m8_t op1, int8_t op2, size_t vl);
vint32mf2_t __riscv_vwadd_wx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int16_t op2, size_t vl);
vint32m1_t __riscv_vwadd_wx_i32m1_m (vbool32_t mask, vint32m1_t op1, int16_t op2, size_t vl);
vint32m2_t __riscv_vwadd_wx_i32m2_m (vbool16_t mask, vint32m2_t op1, int16_t op2, size_t vl);
vint32m4_t __riscv_vwadd_wx_i32m4_m (vbool8_t mask, vint32m4_t op1, int16_t op2, size_t vl);
vint32m8_t __riscv_vwadd_wx_i32m8_m (vbool4_t mask, vint32m8_t op1, int16_t op2, size_t vl);
vint64m1_t __riscv_vwadd_wx_i64m1_m (vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl);
vint64m2_t __riscv_vwadd_wx_i64m2_m (vbool32_t mask, vint64m2_t op1, int32_t op2, size_t vl);
vint64m4_t __riscv_vwadd_wx_i64m4_m (vbool16_t mask, vint64m4_t op1, int32_t op2, size_t vl);
vint64m8_t __riscv_vwadd_wx_i64m8_m (vbool8_t mask, vint64m8_t op1, int32_t op2, size_t vl);
11.15. vwsub.wv
- Mnemonic
vwsub.wv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening signed integer add/subtract, 2*SEW = 2*SEW +/- SEW, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwsub_wv.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwsub_wv_i16mf4 (vint16mf4_t op1, vint8mf8_t op2, size_t vl);
vint16mf2_t __riscv_vwsub_wv_i16mf2 (vint16mf2_t op1, vint8mf4_t op2, size_t vl);
vint16m1_t __riscv_vwsub_wv_i16m1 (vint16m1_t op1, vint8mf2_t op2, size_t vl);
vint16m2_t __riscv_vwsub_wv_i16m2 (vint16m2_t op1, vint8m1_t op2, size_t vl);
vint16m4_t __riscv_vwsub_wv_i16m4 (vint16m4_t op1, vint8m2_t op2, size_t vl);
vint16m8_t __riscv_vwsub_wv_i16m8 (vint16m8_t op1, vint8m4_t op2, size_t vl);
vint32mf2_t __riscv_vwsub_wv_i32mf2 (vint32mf2_t op1, vint16mf4_t op2, size_t vl);
vint32m1_t __riscv_vwsub_wv_i32m1 (vint32m1_t op1, vint16mf2_t op2, size_t vl);
vint32m2_t __riscv_vwsub_wv_i32m2 (vint32m2_t op1, vint16m1_t op2, size_t vl);
vint32m4_t __riscv_vwsub_wv_i32m4 (vint32m4_t op1, vint16m2_t op2, size_t vl);
vint32m8_t __riscv_vwsub_wv_i32m8 (vint32m8_t op1, vint16m4_t op2, size_t vl);
vint64m1_t __riscv_vwsub_wv_i64m1 (vint64m1_t op1, vint32mf2_t op2, size_t vl);
vint64m2_t __riscv_vwsub_wv_i64m2 (vint64m2_t op1, vint32m1_t op2, size_t vl);
vint64m4_t __riscv_vwsub_wv_i64m4 (vint64m4_t op1, vint32m2_t op2, size_t vl);
vint64m8_t __riscv_vwsub_wv_i64m8 (vint64m8_t op1, vint32m4_t op2, size_t vl);
vint16mf4_t __riscv_vwsub_wv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint8mf8_t op2, size_t vl);
vint16mf2_t __riscv_vwsub_wv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint8mf4_t op2, size_t vl);
vint16m1_t __riscv_vwsub_wv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint8mf2_t op2, size_t vl);
vint16m2_t __riscv_vwsub_wv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint8m1_t op2, size_t vl);
vint16m4_t __riscv_vwsub_wv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint8m2_t op2, size_t vl);
vint16m8_t __riscv_vwsub_wv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint8m4_t op2, size_t vl);
vint32mf2_t __riscv_vwsub_wv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint16mf4_t op2, size_t vl);
vint32m1_t __riscv_vwsub_wv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint16mf2_t op2, size_t vl);
vint32m2_t __riscv_vwsub_wv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint16m1_t op2, size_t vl);
vint32m4_t __riscv_vwsub_wv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint16m2_t op2, size_t vl);
vint32m8_t __riscv_vwsub_wv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint16m4_t op2, size_t vl);
vint64m1_t __riscv_vwsub_wv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint32mf2_t op2, size_t vl);
vint64m2_t __riscv_vwsub_wv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint32m1_t op2, size_t vl);
vint64m4_t __riscv_vwsub_wv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint32m2_t op2, size_t vl);
vint64m8_t __riscv_vwsub_wv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint32m4_t op2, size_t vl);
11.16. vwsub.wx
- Mnemonic
vwsub.wx vd, vs2, rs1, vm
- Encoding
- Description
-
Widening signed integer add/subtract, 2*SEW = 2*SEW +/- SEW, vector-scalar
12. Vector Integer Extension
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwsub_wx.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwsub_wx_i16mf4 (vint16mf4_t op1, int8_t op2, size_t vl);
vint16mf2_t __riscv_vwsub_wx_i16mf2 (vint16mf2_t op1, int8_t op2, size_t vl);
vint16m1_t __riscv_vwsub_wx_i16m1 (vint16m1_t op1, int8_t op2, size_t vl);
vint16m2_t __riscv_vwsub_wx_i16m2 (vint16m2_t op1, int8_t op2, size_t vl);
vint16m4_t __riscv_vwsub_wx_i16m4 (vint16m4_t op1, int8_t op2, size_t vl);
vint16m8_t __riscv_vwsub_wx_i16m8 (vint16m8_t op1, int8_t op2, size_t vl);
vint32mf2_t __riscv_vwsub_wx_i32mf2 (vint32mf2_t op1, int16_t op2, size_t vl);
vint32m1_t __riscv_vwsub_wx_i32m1 (vint32m1_t op1, int16_t op2, size_t vl);
vint32m2_t __riscv_vwsub_wx_i32m2 (vint32m2_t op1, int16_t op2, size_t vl);
vint32m4_t __riscv_vwsub_wx_i32m4 (vint32m4_t op1, int16_t op2, size_t vl);
vint32m8_t __riscv_vwsub_wx_i32m8 (vint32m8_t op1, int16_t op2, size_t vl);
vint64m1_t __riscv_vwsub_wx_i64m1 (vint64m1_t op1, int32_t op2, size_t vl);
vint64m2_t __riscv_vwsub_wx_i64m2 (vint64m2_t op1, int32_t op2, size_t vl);
vint64m4_t __riscv_vwsub_wx_i64m4 (vint64m4_t op1, int32_t op2, size_t vl);
vint64m8_t __riscv_vwsub_wx_i64m8 (vint64m8_t op1, int32_t op2, size_t vl);
vint16mf4_t __riscv_vwsub_wx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int8_t op2, size_t vl);
vint16mf2_t __riscv_vwsub_wx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int8_t op2, size_t vl);
vint16m1_t __riscv_vwsub_wx_i16m1_m (vbool16_t mask, vint16m1_t op1, int8_t op2, size_t vl);
vint16m2_t __riscv_vwsub_wx_i16m2_m (vbool8_t mask, vint16m2_t op1, int8_t op2, size_t vl);
vint16m4_t __riscv_vwsub_wx_i16m4_m (vbool4_t mask, vint16m4_t op1, int8_t op2, size_t vl);
vint16m8_t __riscv_vwsub_wx_i16m8_m (vbool2_t mask, vint16m8_t op1, int8_t op2, size_t vl);
vint32mf2_t __riscv_vwsub_wx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int16_t op2, size_t vl);
vint32m1_t __riscv_vwsub_wx_i32m1_m (vbool32_t mask, vint32m1_t op1, int16_t op2, size_t vl);
vint32m2_t __riscv_vwsub_wx_i32m2_m (vbool16_t mask, vint32m2_t op1, int16_t op2, size_t vl);
vint32m4_t __riscv_vwsub_wx_i32m4_m (vbool8_t mask, vint32m4_t op1, int16_t op2, size_t vl);
vint32m8_t __riscv_vwsub_wx_i32m8_m (vbool4_t mask, vint32m8_t op1, int16_t op2, size_t vl);
vint64m1_t __riscv_vwsub_wx_i64m1_m (vbool64_t mask, vint64m1_t op1, int32_t op2, size_t vl);
vint64m2_t __riscv_vwsub_wx_i64m2_m (vbool32_t mask, vint64m2_t op1, int32_t op2, size_t vl);
vint64m4_t __riscv_vwsub_wx_i64m4_m (vbool16_t mask, vint64m4_t op1, int32_t op2, size_t vl);
vint64m8_t __riscv_vwsub_wx_i64m8_m (vbool8_t mask, vint64m8_t op1, int32_t op2, size_t vl);
12.1. vzext.vf2
- Mnemonic
vzext.vf2 vd, vs2, vm
- Encoding
- Description
-
Zero-extend SEW/2 source to SEW destination
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vzext_vf2.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vzext_vf2_u16mf4 (vuint8mf8_t op1, size_t vl);
vuint16mf2_t __riscv_vzext_vf2_u16mf2 (vuint8mf4_t op1, size_t vl);
vuint16m1_t __riscv_vzext_vf2_u16m1 (vuint8mf2_t op1, size_t vl);
vuint16m2_t __riscv_vzext_vf2_u16m2 (vuint8m1_t op1, size_t vl);
vuint16m4_t __riscv_vzext_vf2_u16m4 (vuint8m2_t op1, size_t vl);
vuint16m8_t __riscv_vzext_vf2_u16m8 (vuint8m4_t op1, size_t vl);
vuint32mf2_t __riscv_vzext_vf2_u32mf2 (vuint16mf4_t op1, size_t vl);
vuint32m1_t __riscv_vzext_vf2_u32m1 (vuint16mf2_t op1, size_t vl);
vuint32m2_t __riscv_vzext_vf2_u32m2 (vuint16m1_t op1, size_t vl);
vuint32m4_t __riscv_vzext_vf2_u32m4 (vuint16m2_t op1, size_t vl);
vuint32m8_t __riscv_vzext_vf2_u32m8 (vuint16m4_t op1, size_t vl);
vuint64m1_t __riscv_vzext_vf2_u64m1 (vuint32mf2_t op1, size_t vl);
vuint64m2_t __riscv_vzext_vf2_u64m2 (vuint32m1_t op1, size_t vl);
vuint64m4_t __riscv_vzext_vf2_u64m4 (vuint32m2_t op1, size_t vl);
vuint64m8_t __riscv_vzext_vf2_u64m8 (vuint32m4_t op1, size_t vl);
vuint16mf4_t __riscv_vzext_vf2_u16mf4_m (vbool64_t mask, vuint8mf8_t op1, size_t vl);
vuint16mf2_t __riscv_vzext_vf2_u16mf2_m (vbool32_t mask, vuint8mf4_t op1, size_t vl);
vuint16m1_t __riscv_vzext_vf2_u16m1_m (vbool16_t mask, vuint8mf2_t op1, size_t vl);
vuint16m2_t __riscv_vzext_vf2_u16m2_m (vbool8_t mask, vuint8m1_t op1, size_t vl);
vuint16m4_t __riscv_vzext_vf2_u16m4_m (vbool4_t mask, vuint8m2_t op1, size_t vl);
vuint16m8_t __riscv_vzext_vf2_u16m8_m (vbool2_t mask, vuint8m4_t op1, size_t vl);
vuint32mf2_t __riscv_vzext_vf2_u32mf2_m (vbool64_t mask, vuint16mf4_t op1, size_t vl);
vuint32m1_t __riscv_vzext_vf2_u32m1_m (vbool32_t mask, vuint16mf2_t op1, size_t vl);
vuint32m2_t __riscv_vzext_vf2_u32m2_m (vbool16_t mask, vuint16m1_t op1, size_t vl);
vuint32m4_t __riscv_vzext_vf2_u32m4_m (vbool8_t mask, vuint16m2_t op1, size_t vl);
vuint32m8_t __riscv_vzext_vf2_u32m8_m (vbool4_t mask, vuint16m4_t op1, size_t vl);
vuint64m1_t __riscv_vzext_vf2_u64m1_m (vbool64_t mask, vuint32mf2_t op1, size_t vl);
vuint64m2_t __riscv_vzext_vf2_u64m2_m (vbool32_t mask, vuint32m1_t op1, size_t vl);
vuint64m4_t __riscv_vzext_vf2_u64m4_m (vbool16_t mask, vuint32m2_t op1, size_t vl);
vuint64m8_t __riscv_vzext_vf2_u64m8_m (vbool8_t mask, vuint32m4_t op1, size_t vl);
12.2. vsext.vf2
- Mnemonic
vsext.vf2 vd, vs2, vm
- Encoding
- Description
-
Sign-extend SEW/2 source to SEW destination
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsext_vf2.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vsext_vf2_i16mf4 (vint8mf8_t op1, size_t vl);
vint16mf2_t __riscv_vsext_vf2_i16mf2 (vint8mf4_t op1, size_t vl);
vint16m1_t __riscv_vsext_vf2_i16m1 (vint8mf2_t op1, size_t vl);
vint16m2_t __riscv_vsext_vf2_i16m2 (vint8m1_t op1, size_t vl);
vint16m4_t __riscv_vsext_vf2_i16m4 (vint8m2_t op1, size_t vl);
vint16m8_t __riscv_vsext_vf2_i16m8 (vint8m4_t op1, size_t vl);
vint32mf2_t __riscv_vsext_vf2_i32mf2 (vint16mf4_t op1, size_t vl);
vint32m1_t __riscv_vsext_vf2_i32m1 (vint16mf2_t op1, size_t vl);
vint32m2_t __riscv_vsext_vf2_i32m2 (vint16m1_t op1, size_t vl);
vint32m4_t __riscv_vsext_vf2_i32m4 (vint16m2_t op1, size_t vl);
vint32m8_t __riscv_vsext_vf2_i32m8 (vint16m4_t op1, size_t vl);
vint64m1_t __riscv_vsext_vf2_i64m1 (vint32mf2_t op1, size_t vl);
vint64m2_t __riscv_vsext_vf2_i64m2 (vint32m1_t op1, size_t vl);
vint64m4_t __riscv_vsext_vf2_i64m4 (vint32m2_t op1, size_t vl);
vint64m8_t __riscv_vsext_vf2_i64m8 (vint32m4_t op1, size_t vl);
vint16mf4_t __riscv_vsext_vf2_i16mf4_m (vbool64_t mask, vint8mf8_t op1, size_t vl);
vint16mf2_t __riscv_vsext_vf2_i16mf2_m (vbool32_t mask, vint8mf4_t op1, size_t vl);
vint16m1_t __riscv_vsext_vf2_i16m1_m (vbool16_t mask, vint8mf2_t op1, size_t vl);
vint16m2_t __riscv_vsext_vf2_i16m2_m (vbool8_t mask, vint8m1_t op1, size_t vl);
vint16m4_t __riscv_vsext_vf2_i16m4_m (vbool4_t mask, vint8m2_t op1, size_t vl);
vint16m8_t __riscv_vsext_vf2_i16m8_m (vbool2_t mask, vint8m4_t op1, size_t vl);
vint32mf2_t __riscv_vsext_vf2_i32mf2_m (vbool64_t mask, vint16mf4_t op1, size_t vl);
vint32m1_t __riscv_vsext_vf2_i32m1_m (vbool32_t mask, vint16mf2_t op1, size_t vl);
vint32m2_t __riscv_vsext_vf2_i32m2_m (vbool16_t mask, vint16m1_t op1, size_t vl);
vint32m4_t __riscv_vsext_vf2_i32m4_m (vbool8_t mask, vint16m2_t op1, size_t vl);
vint32m8_t __riscv_vsext_vf2_i32m8_m (vbool4_t mask, vint16m4_t op1, size_t vl);
vint64m1_t __riscv_vsext_vf2_i64m1_m (vbool64_t mask, vint32mf2_t op1, size_t vl);
vint64m2_t __riscv_vsext_vf2_i64m2_m (vbool32_t mask, vint32m1_t op1, size_t vl);
vint64m4_t __riscv_vsext_vf2_i64m4_m (vbool16_t mask, vint32m2_t op1, size_t vl);
vint64m8_t __riscv_vsext_vf2_i64m8_m (vbool8_t mask, vint32m4_t op1, size_t vl);
12.3. vzext.vf4
- Mnemonic
vzext.vf4 vd, vs2, vm
- Encoding
- Description
-
Zero-extend SEW/4 source to SEW destination
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vzext_vf4.h
- Intrinsic Functions
Details
vuint32mf2_t __riscv_vzext_vf4_u32mf2 (vuint8mf8_t op1, size_t vl);
vuint32m1_t __riscv_vzext_vf4_u32m1 (vuint8mf4_t op1, size_t vl);
vuint32m2_t __riscv_vzext_vf4_u32m2 (vuint8mf2_t op1, size_t vl);
vuint32m4_t __riscv_vzext_vf4_u32m4 (vuint8m1_t op1, size_t vl);
vuint32m8_t __riscv_vzext_vf4_u32m8 (vuint8m2_t op1, size_t vl);
vuint64m1_t __riscv_vzext_vf4_u64m1 (vuint16mf4_t op1, size_t vl);
vuint64m2_t __riscv_vzext_vf4_u64m2 (vuint16mf2_t op1, size_t vl);
vuint64m4_t __riscv_vzext_vf4_u64m4 (vuint16m1_t op1, size_t vl);
vuint64m8_t __riscv_vzext_vf4_u64m8 (vuint16m2_t op1, size_t vl);
vuint32mf2_t __riscv_vzext_vf4_u32mf2_m (vbool64_t mask, vuint8mf8_t op1, size_t vl);
vuint32m1_t __riscv_vzext_vf4_u32m1_m (vbool32_t mask, vuint8mf4_t op1, size_t vl);
vuint32m2_t __riscv_vzext_vf4_u32m2_m (vbool16_t mask, vuint8mf2_t op1, size_t vl);
vuint32m4_t __riscv_vzext_vf4_u32m4_m (vbool8_t mask, vuint8m1_t op1, size_t vl);
vuint32m8_t __riscv_vzext_vf4_u32m8_m (vbool4_t mask, vuint8m2_t op1, size_t vl);
vuint64m1_t __riscv_vzext_vf4_u64m1_m (vbool64_t mask, vuint16mf4_t op1, size_t vl);
vuint64m2_t __riscv_vzext_vf4_u64m2_m (vbool32_t mask, vuint16mf2_t op1, size_t vl);
vuint64m4_t __riscv_vzext_vf4_u64m4_m (vbool16_t mask, vuint16m1_t op1, size_t vl);
vuint64m8_t __riscv_vzext_vf4_u64m8_m (vbool8_t mask, vuint16m2_t op1, size_t vl);
12.4. vsext.vf4
- Mnemonic
vsext.vf4 vd, vs2, vm
- Encoding
- Description
-
Sign-extend SEW/4 source to SEW destination
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsext_vf4.h
- Intrinsic Functions
Details
vint32mf2_t __riscv_vsext_vf4_i32mf2 (vint8mf8_t op1, size_t vl);
vint32m1_t __riscv_vsext_vf4_i32m1 (vint8mf4_t op1, size_t vl);
vint32m2_t __riscv_vsext_vf4_i32m2 (vint8mf2_t op1, size_t vl);
vint32m4_t __riscv_vsext_vf4_i32m4 (vint8m1_t op1, size_t vl);
vint32m8_t __riscv_vsext_vf4_i32m8 (vint8m2_t op1, size_t vl);
vint64m1_t __riscv_vsext_vf4_i64m1 (vint16mf4_t op1, size_t vl);
vint64m2_t __riscv_vsext_vf4_i64m2 (vint16mf2_t op1, size_t vl);
vint64m4_t __riscv_vsext_vf4_i64m4 (vint16m1_t op1, size_t vl);
vint64m8_t __riscv_vsext_vf4_i64m8 (vint16m2_t op1, size_t vl);
vint32mf2_t __riscv_vsext_vf4_i32mf2_m (vbool64_t mask, vint8mf8_t op1, size_t vl);
vint32m1_t __riscv_vsext_vf4_i32m1_m (vbool32_t mask, vint8mf4_t op1, size_t vl);
vint32m2_t __riscv_vsext_vf4_i32m2_m (vbool16_t mask, vint8mf2_t op1, size_t vl);
vint32m4_t __riscv_vsext_vf4_i32m4_m (vbool8_t mask, vint8m1_t op1, size_t vl);
vint32m8_t __riscv_vsext_vf4_i32m8_m (vbool4_t mask, vint8m2_t op1, size_t vl);
vint64m1_t __riscv_vsext_vf4_i64m1_m (vbool64_t mask, vint16mf4_t op1, size_t vl);
vint64m2_t __riscv_vsext_vf4_i64m2_m (vbool32_t mask, vint16mf2_t op1, size_t vl);
vint64m4_t __riscv_vsext_vf4_i64m4_m (vbool16_t mask, vint16m1_t op1, size_t vl);
vint64m8_t __riscv_vsext_vf4_i64m8_m (vbool8_t mask, vint16m2_t op1, size_t vl);
12.5. vzext.vf8
- Mnemonic
vzext.vf8 vd, vs2, vm
- Encoding
- Description
-
Zero-extend SEW/8 source to SEW destination
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vzext_vf8.h
- Intrinsic Functions
Details
vuint64m1_t __riscv_vzext_vf8_u64m1 (vuint8mf8_t op1, size_t vl);
vuint64m2_t __riscv_vzext_vf8_u64m2 (vuint8mf4_t op1, size_t vl);
vuint64m4_t __riscv_vzext_vf8_u64m4 (vuint8mf2_t op1, size_t vl);
vuint64m8_t __riscv_vzext_vf8_u64m8 (vuint8m1_t op1, size_t vl);
vuint64m1_t __riscv_vzext_vf8_u64m1_m (vbool64_t mask, vuint8mf8_t op1, size_t vl);
vuint64m2_t __riscv_vzext_vf8_u64m2_m (vbool32_t mask, vuint8mf4_t op1, size_t vl);
vuint64m4_t __riscv_vzext_vf8_u64m4_m (vbool16_t mask, vuint8mf2_t op1, size_t vl);
vuint64m8_t __riscv_vzext_vf8_u64m8_m (vbool8_t mask, vuint8m1_t op1, size_t vl);
12.6. vsext.vf8
- Mnemonic
vsext.vf8 vd, vs2, vm
- Encoding
- Description
-
Sign-extend SEW/8 source to SEW destination
13. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsext_vf8.h
- Intrinsic Functions
Details
vint64m1_t __riscv_vsext_vf8_i64m1 (vint8mf8_t op1, size_t vl);
vint64m2_t __riscv_vsext_vf8_i64m2 (vint8mf4_t op1, size_t vl);
vint64m4_t __riscv_vsext_vf8_i64m4 (vint8mf2_t op1, size_t vl);
vint64m8_t __riscv_vsext_vf8_i64m8 (vint8m1_t op1, size_t vl);
vint64m1_t __riscv_vsext_vf8_i64m1_m (vbool64_t mask, vint8mf8_t op1, size_t vl);
vint64m2_t __riscv_vsext_vf8_i64m2_m (vbool32_t mask, vint8mf4_t op1, size_t vl);
vint64m4_t __riscv_vsext_vf8_i64m4_m (vbool16_t mask, vint8mf2_t op1, size_t vl);
vint64m8_t __riscv_vsext_vf8_i64m8_m (vbool8_t mask, vint8m1_t op1, size_t vl);
13.1. vadc.vvm
- Mnemonic
vadc.vvm vd, vs2, vs1, v0
- Encoding
- Description
-
Produce sum with carry. vd[i] = vs2[i] + vs1[i] + v0.mask[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vadc_vvm.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vadc_vvm_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin, size_t vl);
vint8mf4_t __riscv_vadc_vvm_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin, size_t vl);
vint8mf2_t __riscv_vadc_vvm_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin, size_t vl);
vint8m1_t __riscv_vadc_vvm_i8m1 (vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, size_t vl);
vint8m2_t __riscv_vadc_vvm_i8m2 (vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, size_t vl);
vint8m4_t __riscv_vadc_vvm_i8m4 (vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, size_t vl);
vint8m8_t __riscv_vadc_vvm_i8m8 (vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl);
vint16mf4_t __riscv_vadc_vvm_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, vbool64_t carryin, size_t vl);
vint16mf2_t __riscv_vadc_vvm_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, vbool32_t carryin, size_t vl);
vint16m1_t __riscv_vadc_vvm_i16m1 (vint16m1_t op1, vint16m1_t op2, vbool16_t carryin, size_t vl);
vint16m2_t __riscv_vadc_vvm_i16m2 (vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, size_t vl);
vint16m4_t __riscv_vadc_vvm_i16m4 (vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, size_t vl);
vint16m8_t __riscv_vadc_vvm_i16m8 (vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl);
vint32mf2_t __riscv_vadc_vvm_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl);
vint32m1_t __riscv_vadc_vvm_i32m1 (vint32m1_t op1, vint32m1_t op2, vbool32_t carryin, size_t vl);
vint32m2_t __riscv_vadc_vvm_i32m2 (vint32m2_t op1, vint32m2_t op2, vbool16_t carryin, size_t vl);
vint32m4_t __riscv_vadc_vvm_i32m4 (vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, size_t vl);
vint32m8_t __riscv_vadc_vvm_i32m8 (vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl);
vint64m1_t __riscv_vadc_vvm_i64m1 (vint64m1_t op1, vint64m1_t op2, vbool64_t carryin, size_t vl);
vint64m2_t __riscv_vadc_vvm_i64m2 (vint64m2_t op1, vint64m2_t op2, vbool32_t carryin, size_t vl);
vint64m4_t __riscv_vadc_vvm_i64m4 (vint64m4_t op1, vint64m4_t op2, vbool16_t carryin, size_t vl);
vint64m8_t __riscv_vadc_vvm_i64m8 (vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl);
vuint8mf8_t __riscv_vadc_vvm_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carryin, size_t vl);
vuint8mf4_t __riscv_vadc_vvm_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carryin, size_t vl);
vuint8mf2_t __riscv_vadc_vvm_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carryin, size_t vl);
vuint8m1_t __riscv_vadc_vvm_u8m1 (vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, size_t vl);
vuint8m2_t __riscv_vadc_vvm_u8m2 (vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, size_t vl);
vuint8m4_t __riscv_vadc_vvm_u8m4 (vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, size_t vl);
vuint8m8_t __riscv_vadc_vvm_u8m8 (vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl);
vuint16mf4_t __riscv_vadc_vvm_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t carryin, size_t vl);
vuint16mf2_t __riscv_vadc_vvm_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t carryin, size_t vl);
vuint16m1_t __riscv_vadc_vvm_u16m1 (vuint16m1_t op1, vuint16m1_t op2, vbool16_t carryin, size_t vl);
vuint16m2_t __riscv_vadc_vvm_u16m2 (vuint16m2_t op1, vuint16m2_t op2, vbool8_t carryin, size_t vl);
vuint16m4_t __riscv_vadc_vvm_u16m4 (vuint16m4_t op1, vuint16m4_t op2, vbool4_t carryin, size_t vl);
vuint16m8_t __riscv_vadc_vvm_u16m8 (vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl);
vuint32mf2_t __riscv_vadc_vvm_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl);
vuint32m1_t __riscv_vadc_vvm_u32m1 (vuint32m1_t op1, vuint32m1_t op2, vbool32_t carryin, size_t vl);
vuint32m2_t __riscv_vadc_vvm_u32m2 (vuint32m2_t op1, vuint32m2_t op2, vbool16_t carryin, size_t vl);
vuint32m4_t __riscv_vadc_vvm_u32m4 (vuint32m4_t op1, vuint32m4_t op2, vbool8_t carryin, size_t vl);
vuint32m8_t __riscv_vadc_vvm_u32m8 (vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl);
vuint64m1_t __riscv_vadc_vvm_u64m1 (vuint64m1_t op1, vuint64m1_t op2, vbool64_t carryin, size_t vl);
vuint64m2_t __riscv_vadc_vvm_u64m2 (vuint64m2_t op1, vuint64m2_t op2, vbool32_t carryin, size_t vl);
vuint64m4_t __riscv_vadc_vvm_u64m4 (vuint64m4_t op1, vuint64m4_t op2, vbool16_t carryin, size_t vl);
vuint64m8_t __riscv_vadc_vvm_u64m8 (vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl);
13.2. vadc.vxm
- Mnemonic
vadc.vxm vd, vs2, rs1, v0
- Encoding
- Description
-
Produce sum with carry. vd[i] = vs2[i] + x[rs1] + v0.mask[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vadc_vxm.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vadc_vxm_i8mf8 (vint8mf8_t op1, int8_t op2, vbool64_t carryin, size_t vl);
vint8mf4_t __riscv_vadc_vxm_i8mf4 (vint8mf4_t op1, int8_t op2, vbool32_t carryin, size_t vl);
vint8mf2_t __riscv_vadc_vxm_i8mf2 (vint8mf2_t op1, int8_t op2, vbool16_t carryin, size_t vl);
vint8m1_t __riscv_vadc_vxm_i8m1 (vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t vl);
vint8m2_t __riscv_vadc_vxm_i8m2 (vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t vl);
vint8m4_t __riscv_vadc_vxm_i8m4 (vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t vl);
vint8m8_t __riscv_vadc_vxm_i8m8 (vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t vl);
vint16mf4_t __riscv_vadc_vxm_i16mf4 (vint16mf4_t op1, int16_t op2, vbool64_t carryin, size_t vl);
vint16mf2_t __riscv_vadc_vxm_i16mf2 (vint16mf2_t op1, int16_t op2, vbool32_t carryin, size_t vl);
vint16m1_t __riscv_vadc_vxm_i16m1 (vint16m1_t op1, int16_t op2, vbool16_t carryin, size_t vl);
vint16m2_t __riscv_vadc_vxm_i16m2 (vint16m2_t op1, int16_t op2, vbool8_t carryin, size_t vl);
vint16m4_t __riscv_vadc_vxm_i16m4 (vint16m4_t op1, int16_t op2, vbool4_t carryin, size_t vl);
vint16m8_t __riscv_vadc_vxm_i16m8 (vint16m8_t op1, int16_t op2, vbool2_t carryin, size_t vl);
vint32mf2_t __riscv_vadc_vxm_i32mf2 (vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl);
vint32m1_t __riscv_vadc_vxm_i32m1 (vint32m1_t op1, int32_t op2, vbool32_t carryin, size_t vl);
vint32m2_t __riscv_vadc_vxm_i32m2 (vint32m2_t op1, int32_t op2, vbool16_t carryin, size_t vl);
vint32m4_t __riscv_vadc_vxm_i32m4 (vint32m4_t op1, int32_t op2, vbool8_t carryin, size_t vl);
vint32m8_t __riscv_vadc_vxm_i32m8 (vint32m8_t op1, int32_t op2, vbool4_t carryin, size_t vl);
vint64m1_t __riscv_vadc_vxm_i64m1 (vint64m1_t op1, int64_t op2, vbool64_t carryin, size_t vl);
vint64m2_t __riscv_vadc_vxm_i64m2 (vint64m2_t op1, int64_t op2, vbool32_t carryin, size_t vl);
vint64m4_t __riscv_vadc_vxm_i64m4 (vint64m4_t op1, int64_t op2, vbool16_t carryin, size_t vl);
vint64m8_t __riscv_vadc_vxm_i64m8 (vint64m8_t op1, int64_t op2, vbool8_t carryin, size_t vl);
vuint8mf8_t __riscv_vadc_vxm_u8mf8 (vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, size_t vl);
vuint8mf4_t __riscv_vadc_vxm_u8mf4 (vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, size_t vl);
vuint8mf2_t __riscv_vadc_vxm_u8mf2 (vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, size_t vl);
vuint8m1_t __riscv_vadc_vxm_u8m1 (vuint8m1_t op1, uint8_t op2, vbool8_t carryin, size_t vl);
vuint8m2_t __riscv_vadc_vxm_u8m2 (vuint8m2_t op1, uint8_t op2, vbool4_t carryin, size_t vl);
vuint8m4_t __riscv_vadc_vxm_u8m4 (vuint8m4_t op1, uint8_t op2, vbool2_t carryin, size_t vl);
vuint8m8_t __riscv_vadc_vxm_u8m8 (vuint8m8_t op1, uint8_t op2, vbool1_t carryin, size_t vl);
vuint16mf4_t __riscv_vadc_vxm_u16mf4 (vuint16mf4_t op1, uint16_t op2, vbool64_t carryin, size_t vl);
vuint16mf2_t __riscv_vadc_vxm_u16mf2 (vuint16mf2_t op1, uint16_t op2, vbool32_t carryin, size_t vl);
vuint16m1_t __riscv_vadc_vxm_u16m1 (vuint16m1_t op1, uint16_t op2, vbool16_t carryin, size_t vl);
vuint16m2_t __riscv_vadc_vxm_u16m2 (vuint16m2_t op1, uint16_t op2, vbool8_t carryin, size_t vl);
vuint16m4_t __riscv_vadc_vxm_u16m4 (vuint16m4_t op1, uint16_t op2, vbool4_t carryin, size_t vl);
vuint16m8_t __riscv_vadc_vxm_u16m8 (vuint16m8_t op1, uint16_t op2, vbool2_t carryin, size_t vl);
vuint32mf2_t __riscv_vadc_vxm_u32mf2 (vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl);
vuint32m1_t __riscv_vadc_vxm_u32m1 (vuint32m1_t op1, uint32_t op2, vbool32_t carryin, size_t vl);
vuint32m2_t __riscv_vadc_vxm_u32m2 (vuint32m2_t op1, uint32_t op2, vbool16_t carryin, size_t vl);
vuint32m4_t __riscv_vadc_vxm_u32m4 (vuint32m4_t op1, uint32_t op2, vbool8_t carryin, size_t vl);
vuint32m8_t __riscv_vadc_vxm_u32m8 (vuint32m8_t op1, uint32_t op2, vbool4_t carryin, size_t vl);
vuint64m1_t __riscv_vadc_vxm_u64m1 (vuint64m1_t op1, uint64_t op2, vbool64_t carryin, size_t vl);
vuint64m2_t __riscv_vadc_vxm_u64m2 (vuint64m2_t op1, uint64_t op2, vbool32_t carryin, size_t vl);
vuint64m4_t __riscv_vadc_vxm_u64m4 (vuint64m4_t op1, uint64_t op2, vbool16_t carryin, size_t vl);
vuint64m8_t __riscv_vadc_vxm_u64m8 (vuint64m8_t op1, uint64_t op2, vbool8_t carryin, size_t vl);
13.3. vadc.vim
- Mnemonic
vadc.vim vd, vs2, imm, v0
- Encoding
- Description
-
Produce sum with carry. vd[i] = vs2[i] + imm + v0.mask[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vadc_vim.h
- Intrinsic Functions
Details
13.4. vmadc.vvm
- Mnemonic
vmadc.vvm vd, vs2, vs1, v0
- Encoding
- Description
-
Produce carry out in mask register format. vd.mask[i] = carry_out(vs2[i] + vs1[i] + v0.mask[i])
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmadc_vvm.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmadc_vvm_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vvm_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vvm_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vvm_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vvm_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vvm_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, size_t vl);
vbool1_t __riscv_vmadc_vvm_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vvm_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vvm_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vvm_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vvm_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vvm_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vvm_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vvm_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vvm_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vvm_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vvm_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vvm_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vvm_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vvm_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vvm_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vvm_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vvm_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vvm_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vvm_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vvm_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vvm_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vvm_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, size_t vl);
vbool1_t __riscv_vmadc_vvm_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vvm_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vvm_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vvm_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vvm_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, vbool8_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vvm_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, vbool4_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vvm_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vvm_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vvm_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vvm_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vvm_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, vbool8_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vvm_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vvm_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vvm_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vvm_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vvm_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl);
13.5. vmadc.vxm
- Mnemonic
vmadc.vxm vd, vs2, rs1, v0
- Encoding
- Description
-
Produce carry out in mask register format. vd.mask[i] = carry_out(vs2[i] + x[rs1] + v0.mask[i])
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmadc_vxm.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmadc_vxm_i8mf8_b64 (vint8mf8_t op1, int8_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vxm_i8mf4_b32 (vint8mf4_t op1, int8_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vxm_i8mf2_b16 (vint8mf2_t op1, int8_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vxm_i8m1_b8 (vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vxm_i8m2_b4 (vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vxm_i8m4_b2 (vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t vl);
vbool1_t __riscv_vmadc_vxm_i8m8_b1 (vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vxm_i16mf4_b64 (vint16mf4_t op1, int16_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vxm_i16mf2_b32 (vint16mf2_t op1, int16_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vxm_i16m1_b16 (vint16m1_t op1, int16_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vxm_i16m2_b8 (vint16m2_t op1, int16_t op2, vbool8_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vxm_i16m4_b4 (vint16m4_t op1, int16_t op2, vbool4_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vxm_i16m8_b2 (vint16m8_t op1, int16_t op2, vbool2_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vxm_i32mf2_b64 (vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vxm_i32m1_b32 (vint32m1_t op1, int32_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vxm_i32m2_b16 (vint32m2_t op1, int32_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vxm_i32m4_b8 (vint32m4_t op1, int32_t op2, vbool8_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vxm_i32m8_b4 (vint32m8_t op1, int32_t op2, vbool4_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vxm_i64m1_b64 (vint64m1_t op1, int64_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vxm_i64m2_b32 (vint64m2_t op1, int64_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vxm_i64m4_b16 (vint64m4_t op1, int64_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vxm_i64m8_b8 (vint64m8_t op1, int64_t op2, vbool8_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vxm_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vxm_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vxm_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vxm_u8m1_b8 (vuint8m1_t op1, uint8_t op2, vbool8_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vxm_u8m2_b4 (vuint8m2_t op1, uint8_t op2, vbool4_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vxm_u8m4_b2 (vuint8m4_t op1, uint8_t op2, vbool2_t carryin, size_t vl);
vbool1_t __riscv_vmadc_vxm_u8m8_b1 (vuint8m8_t op1, uint8_t op2, vbool1_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vxm_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vxm_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vxm_u16m1_b16 (vuint16m1_t op1, uint16_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vxm_u16m2_b8 (vuint16m2_t op1, uint16_t op2, vbool8_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vxm_u16m4_b4 (vuint16m4_t op1, uint16_t op2, vbool4_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vxm_u16m8_b2 (vuint16m8_t op1, uint16_t op2, vbool2_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vxm_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vxm_u32m1_b32 (vuint32m1_t op1, uint32_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vxm_u32m2_b16 (vuint32m2_t op1, uint32_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vxm_u32m4_b8 (vuint32m4_t op1, uint32_t op2, vbool8_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vxm_u32m8_b4 (vuint32m8_t op1, uint32_t op2, vbool4_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vxm_u64m1_b64 (vuint64m1_t op1, uint64_t op2, vbool64_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vxm_u64m2_b32 (vuint64m2_t op1, uint64_t op2, vbool32_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vxm_u64m4_b16 (vuint64m4_t op1, uint64_t op2, vbool16_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vxm_u64m8_b8 (vuint64m8_t op1, uint64_t op2, vbool8_t carryin, size_t vl);
13.6. vmadc.vim
- Mnemonic
vmadc.vim vd, vs2, imm, v0
- Encoding
- Description
-
Produce carry out in mask register format. vd.mask[i] = carry_out(vs2[i] + imm + v0.mask[i])
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmadc_vim.h
- Intrinsic Functions
Details
13.7. vmadc.vv
- Mnemonic
vmadc.vv vd, vs2, vs1
- Encoding
- Description
-
Produce carry out in mask register format. vd.mask[i] = carry_out(vs2[i] + vs1[i])
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmadc_vv.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmadc_vvm_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vv_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmadc_vvm_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vv_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmadc_vvm_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vv_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmadc_vvm_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vv_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vbool4_t __riscv_vmadc_vvm_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vv_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vbool2_t __riscv_vmadc_vvm_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vv_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vbool1_t __riscv_vmadc_vvm_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, size_t vl);
vbool1_t __riscv_vmadc_vv_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vbool64_t __riscv_vmadc_vvm_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vv_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmadc_vvm_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vv_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmadc_vvm_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vv_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vbool8_t __riscv_vmadc_vvm_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vv_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vbool4_t __riscv_vmadc_vvm_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vv_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vbool2_t __riscv_vmadc_vvm_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vv_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vbool64_t __riscv_vmadc_vvm_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vv_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmadc_vvm_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vv_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vbool16_t __riscv_vmadc_vvm_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vv_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vbool8_t __riscv_vmadc_vvm_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vv_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vbool4_t __riscv_vmadc_vvm_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vv_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vbool64_t __riscv_vmadc_vvm_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vv_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vbool32_t __riscv_vmadc_vvm_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vv_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vbool16_t __riscv_vmadc_vvm_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vv_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vbool8_t __riscv_vmadc_vvm_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vv_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vbool64_t __riscv_vmadc_vvm_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vv_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmadc_vvm_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vv_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmadc_vvm_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vv_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmadc_vvm_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vv_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vbool4_t __riscv_vmadc_vvm_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vv_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vbool2_t __riscv_vmadc_vvm_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vv_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vbool1_t __riscv_vmadc_vvm_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, size_t vl);
vbool1_t __riscv_vmadc_vv_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vbool64_t __riscv_vmadc_vvm_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vv_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmadc_vvm_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vv_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmadc_vvm_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vv_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vbool8_t __riscv_vmadc_vvm_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vv_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vbool4_t __riscv_vmadc_vvm_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, vbool4_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vv_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vbool2_t __riscv_vmadc_vvm_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, vbool2_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vv_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vbool64_t __riscv_vmadc_vvm_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vv_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmadc_vvm_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vv_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vbool16_t __riscv_vmadc_vvm_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vv_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vbool8_t __riscv_vmadc_vvm_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vv_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vbool4_t __riscv_vmadc_vvm_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, vbool4_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vv_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vbool64_t __riscv_vmadc_vvm_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vv_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vbool32_t __riscv_vmadc_vvm_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vv_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vbool16_t __riscv_vmadc_vvm_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vv_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vbool8_t __riscv_vmadc_vvm_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vv_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
13.8. vmadc.vx
- Mnemonic
vmadc.vx vd, vs2, rs1
- Encoding
- Description
-
Produce carry out in mask register format. vd.mask[i] = carry_out(vs2[i] + x[rs1])
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmadc_vx.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmadc_vxm_i8mf8_b64 (vint8mf8_t op1, int8_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vx_i8mf8_b64 (vint8mf8_t op1, int8_t op2, size_t vl);
vbool32_t __riscv_vmadc_vxm_i8mf4_b32 (vint8mf4_t op1, int8_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vx_i8mf4_b32 (vint8mf4_t op1, int8_t op2, size_t vl);
vbool16_t __riscv_vmadc_vxm_i8mf2_b16 (vint8mf2_t op1, int8_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vx_i8mf2_b16 (vint8mf2_t op1, int8_t op2, size_t vl);
vbool8_t __riscv_vmadc_vxm_i8m1_b8 (vint8m1_t op1, int8_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vx_i8m1_b8 (vint8m1_t op1, int8_t op2, size_t vl);
vbool4_t __riscv_vmadc_vxm_i8m2_b4 (vint8m2_t op1, int8_t op2, vbool4_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vx_i8m2_b4 (vint8m2_t op1, int8_t op2, size_t vl);
vbool2_t __riscv_vmadc_vxm_i8m4_b2 (vint8m4_t op1, int8_t op2, vbool2_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vx_i8m4_b2 (vint8m4_t op1, int8_t op2, size_t vl);
vbool1_t __riscv_vmadc_vxm_i8m8_b1 (vint8m8_t op1, int8_t op2, vbool1_t carryin, size_t vl);
vbool1_t __riscv_vmadc_vx_i8m8_b1 (vint8m8_t op1, int8_t op2, size_t vl);
vbool64_t __riscv_vmadc_vxm_i16mf4_b64 (vint16mf4_t op1, int16_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vx_i16mf4_b64 (vint16mf4_t op1, int16_t op2, size_t vl);
vbool32_t __riscv_vmadc_vxm_i16mf2_b32 (vint16mf2_t op1, int16_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vx_i16mf2_b32 (vint16mf2_t op1, int16_t op2, size_t vl);
vbool16_t __riscv_vmadc_vxm_i16m1_b16 (vint16m1_t op1, int16_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vx_i16m1_b16 (vint16m1_t op1, int16_t op2, size_t vl);
vbool8_t __riscv_vmadc_vxm_i16m2_b8 (vint16m2_t op1, int16_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vx_i16m2_b8 (vint16m2_t op1, int16_t op2, size_t vl);
vbool4_t __riscv_vmadc_vxm_i16m4_b4 (vint16m4_t op1, int16_t op2, vbool4_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vx_i16m4_b4 (vint16m4_t op1, int16_t op2, size_t vl);
vbool2_t __riscv_vmadc_vxm_i16m8_b2 (vint16m8_t op1, int16_t op2, vbool2_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vx_i16m8_b2 (vint16m8_t op1, int16_t op2, size_t vl);
vbool64_t __riscv_vmadc_vxm_i32mf2_b64 (vint32mf2_t op1, int32_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vx_i32mf2_b64 (vint32mf2_t op1, int32_t op2, size_t vl);
vbool32_t __riscv_vmadc_vxm_i32m1_b32 (vint32m1_t op1, int32_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vx_i32m1_b32 (vint32m1_t op1, int32_t op2, size_t vl);
vbool16_t __riscv_vmadc_vxm_i32m2_b16 (vint32m2_t op1, int32_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vx_i32m2_b16 (vint32m2_t op1, int32_t op2, size_t vl);
vbool8_t __riscv_vmadc_vxm_i32m4_b8 (vint32m4_t op1, int32_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vx_i32m4_b8 (vint32m4_t op1, int32_t op2, size_t vl);
vbool4_t __riscv_vmadc_vxm_i32m8_b4 (vint32m8_t op1, int32_t op2, vbool4_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vx_i32m8_b4 (vint32m8_t op1, int32_t op2, size_t vl);
vbool64_t __riscv_vmadc_vxm_i64m1_b64 (vint64m1_t op1, int64_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vx_i64m1_b64 (vint64m1_t op1, int64_t op2, size_t vl);
vbool32_t __riscv_vmadc_vxm_i64m2_b32 (vint64m2_t op1, int64_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vx_i64m2_b32 (vint64m2_t op1, int64_t op2, size_t vl);
vbool16_t __riscv_vmadc_vxm_i64m4_b16 (vint64m4_t op1, int64_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vx_i64m4_b16 (vint64m4_t op1, int64_t op2, size_t vl);
vbool8_t __riscv_vmadc_vxm_i64m8_b8 (vint64m8_t op1, int64_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vx_i64m8_b8 (vint64m8_t op1, int64_t op2, size_t vl);
vbool64_t __riscv_vmadc_vxm_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vx_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vbool32_t __riscv_vmadc_vxm_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vx_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vbool16_t __riscv_vmadc_vxm_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vx_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vbool8_t __riscv_vmadc_vxm_u8m1_b8 (vuint8m1_t op1, uint8_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vx_u8m1_b8 (vuint8m1_t op1, uint8_t op2, size_t vl);
vbool4_t __riscv_vmadc_vxm_u8m2_b4 (vuint8m2_t op1, uint8_t op2, vbool4_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vx_u8m2_b4 (vuint8m2_t op1, uint8_t op2, size_t vl);
vbool2_t __riscv_vmadc_vxm_u8m4_b2 (vuint8m4_t op1, uint8_t op2, vbool2_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vx_u8m4_b2 (vuint8m4_t op1, uint8_t op2, size_t vl);
vbool1_t __riscv_vmadc_vxm_u8m8_b1 (vuint8m8_t op1, uint8_t op2, vbool1_t carryin, size_t vl);
vbool1_t __riscv_vmadc_vx_u8m8_b1 (vuint8m8_t op1, uint8_t op2, size_t vl);
vbool64_t __riscv_vmadc_vxm_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vx_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vbool32_t __riscv_vmadc_vxm_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vx_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vbool16_t __riscv_vmadc_vxm_u16m1_b16 (vuint16m1_t op1, uint16_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vx_u16m1_b16 (vuint16m1_t op1, uint16_t op2, size_t vl);
vbool8_t __riscv_vmadc_vxm_u16m2_b8 (vuint16m2_t op1, uint16_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vx_u16m2_b8 (vuint16m2_t op1, uint16_t op2, size_t vl);
vbool4_t __riscv_vmadc_vxm_u16m4_b4 (vuint16m4_t op1, uint16_t op2, vbool4_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vx_u16m4_b4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vbool2_t __riscv_vmadc_vxm_u16m8_b2 (vuint16m8_t op1, uint16_t op2, vbool2_t carryin, size_t vl);
vbool2_t __riscv_vmadc_vx_u16m8_b2 (vuint16m8_t op1, uint16_t op2, size_t vl);
vbool64_t __riscv_vmadc_vxm_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vx_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vbool32_t __riscv_vmadc_vxm_u32m1_b32 (vuint32m1_t op1, uint32_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vx_u32m1_b32 (vuint32m1_t op1, uint32_t op2, size_t vl);
vbool16_t __riscv_vmadc_vxm_u32m2_b16 (vuint32m2_t op1, uint32_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vx_u32m2_b16 (vuint32m2_t op1, uint32_t op2, size_t vl);
vbool8_t __riscv_vmadc_vxm_u32m4_b8 (vuint32m4_t op1, uint32_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vx_u32m4_b8 (vuint32m4_t op1, uint32_t op2, size_t vl);
vbool4_t __riscv_vmadc_vxm_u32m8_b4 (vuint32m8_t op1, uint32_t op2, vbool4_t carryin, size_t vl);
vbool4_t __riscv_vmadc_vx_u32m8_b4 (vuint32m8_t op1, uint32_t op2, size_t vl);
vbool64_t __riscv_vmadc_vxm_u64m1_b64 (vuint64m1_t op1, uint64_t op2, vbool64_t carryin, size_t vl);
vbool64_t __riscv_vmadc_vx_u64m1_b64 (vuint64m1_t op1, uint64_t op2, size_t vl);
vbool32_t __riscv_vmadc_vxm_u64m2_b32 (vuint64m2_t op1, uint64_t op2, vbool32_t carryin, size_t vl);
vbool32_t __riscv_vmadc_vx_u64m2_b32 (vuint64m2_t op1, uint64_t op2, size_t vl);
vbool16_t __riscv_vmadc_vxm_u64m4_b16 (vuint64m4_t op1, uint64_t op2, vbool16_t carryin, size_t vl);
vbool16_t __riscv_vmadc_vx_u64m4_b16 (vuint64m4_t op1, uint64_t op2, size_t vl);
vbool8_t __riscv_vmadc_vxm_u64m8_b8 (vuint64m8_t op1, uint64_t op2, vbool8_t carryin, size_t vl);
vbool8_t __riscv_vmadc_vx_u64m8_b8 (vuint64m8_t op1, uint64_t op2, size_t vl);
13.9. vmadc.vi
- Mnemonic
vmadc.vi vd, vs2, imm
- Encoding
- Description
-
Produce carry out in mask register format. vd.mask[i] = carry_out(vs2[i] + imm)
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmadc_vi.h
- Intrinsic Functions
Details
13.10. vsbc.vvm
- Mnemonic
vsbc.vvm vd, vs2, vs1, v0
- Encoding
- Description
-
Produce difference with borrow. vd[i] = vs2[i] - vs1[i] - v0.mask[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsbc_vvm.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vsbc_vvm_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl);
vint8mf4_t __riscv_vsbc_vvm_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl);
vint8mf2_t __riscv_vsbc_vvm_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl);
vint8m1_t __riscv_vsbc_vvm_i8m1 (vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl);
vint8m2_t __riscv_vsbc_vvm_i8m2 (vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl);
vint8m4_t __riscv_vsbc_vvm_i8m4 (vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl);
vint8m8_t __riscv_vsbc_vvm_i8m8 (vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl);
vint16mf4_t __riscv_vsbc_vvm_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl);
vint16mf2_t __riscv_vsbc_vvm_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl);
vint16m1_t __riscv_vsbc_vvm_i16m1 (vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl);
vint16m2_t __riscv_vsbc_vvm_i16m2 (vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl);
vint16m4_t __riscv_vsbc_vvm_i16m4 (vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl);
vint16m8_t __riscv_vsbc_vvm_i16m8 (vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl);
vint32mf2_t __riscv_vsbc_vvm_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl);
vint32m1_t __riscv_vsbc_vvm_i32m1 (vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl);
vint32m2_t __riscv_vsbc_vvm_i32m2 (vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl);
vint32m4_t __riscv_vsbc_vvm_i32m4 (vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl);
vint32m8_t __riscv_vsbc_vvm_i32m8 (vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl);
vint64m1_t __riscv_vsbc_vvm_i64m1 (vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl);
vint64m2_t __riscv_vsbc_vvm_i64m2 (vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl);
vint64m4_t __riscv_vsbc_vvm_i64m4 (vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl);
vint64m8_t __riscv_vsbc_vvm_i64m8 (vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl);
vuint8mf8_t __riscv_vsbc_vvm_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl);
vuint8mf4_t __riscv_vsbc_vvm_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl);
vuint8mf2_t __riscv_vsbc_vvm_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl);
vuint8m1_t __riscv_vsbc_vvm_u8m1 (vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl);
vuint8m2_t __riscv_vsbc_vvm_u8m2 (vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl);
vuint8m4_t __riscv_vsbc_vvm_u8m4 (vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl);
vuint8m8_t __riscv_vsbc_vvm_u8m8 (vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl);
vuint16mf4_t __riscv_vsbc_vvm_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl);
vuint16mf2_t __riscv_vsbc_vvm_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl);
vuint16m1_t __riscv_vsbc_vvm_u16m1 (vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl);
vuint16m2_t __riscv_vsbc_vvm_u16m2 (vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl);
vuint16m4_t __riscv_vsbc_vvm_u16m4 (vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl);
vuint16m8_t __riscv_vsbc_vvm_u16m8 (vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl);
vuint32mf2_t __riscv_vsbc_vvm_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl);
vuint32m1_t __riscv_vsbc_vvm_u32m1 (vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl);
vuint32m2_t __riscv_vsbc_vvm_u32m2 (vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl);
vuint32m4_t __riscv_vsbc_vvm_u32m4 (vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl);
vuint32m8_t __riscv_vsbc_vvm_u32m8 (vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl);
vuint64m1_t __riscv_vsbc_vvm_u64m1 (vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl);
vuint64m2_t __riscv_vsbc_vvm_u64m2 (vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl);
vuint64m4_t __riscv_vsbc_vvm_u64m4 (vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl);
vuint64m8_t __riscv_vsbc_vvm_u64m8 (vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl);
13.11. vsbc.vxm
- Mnemonic
vsbc.vxm vd, vs2, rs1, v0
- Encoding
- Description
-
Produce difference with borrow. vd[i] = vs2[i] - x[rs1] - v0.mask[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsbc_vxm.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vsbc_vxm_i8mf8 (vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl);
vint8mf4_t __riscv_vsbc_vxm_i8mf4 (vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl);
vint8mf2_t __riscv_vsbc_vxm_i8mf2 (vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl);
vint8m1_t __riscv_vsbc_vxm_i8m1 (vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl);
vint8m2_t __riscv_vsbc_vxm_i8m2 (vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl);
vint8m4_t __riscv_vsbc_vxm_i8m4 (vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl);
vint8m8_t __riscv_vsbc_vxm_i8m8 (vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl);
vint16mf4_t __riscv_vsbc_vxm_i16mf4 (vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl);
vint16mf2_t __riscv_vsbc_vxm_i16mf2 (vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl);
vint16m1_t __riscv_vsbc_vxm_i16m1 (vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl);
vint16m2_t __riscv_vsbc_vxm_i16m2 (vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl);
vint16m4_t __riscv_vsbc_vxm_i16m4 (vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl);
vint16m8_t __riscv_vsbc_vxm_i16m8 (vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl);
vint32mf2_t __riscv_vsbc_vxm_i32mf2 (vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl);
vint32m1_t __riscv_vsbc_vxm_i32m1 (vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl);
vint32m2_t __riscv_vsbc_vxm_i32m2 (vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl);
vint32m4_t __riscv_vsbc_vxm_i32m4 (vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl);
vint32m8_t __riscv_vsbc_vxm_i32m8 (vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl);
vint64m1_t __riscv_vsbc_vxm_i64m1 (vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl);
vint64m2_t __riscv_vsbc_vxm_i64m2 (vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl);
vint64m4_t __riscv_vsbc_vxm_i64m4 (vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl);
vint64m8_t __riscv_vsbc_vxm_i64m8 (vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl);
vuint8mf8_t __riscv_vsbc_vxm_u8mf8 (vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl);
vuint8mf4_t __riscv_vsbc_vxm_u8mf4 (vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl);
vuint8mf2_t __riscv_vsbc_vxm_u8mf2 (vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl);
vuint8m1_t __riscv_vsbc_vxm_u8m1 (vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl);
vuint8m2_t __riscv_vsbc_vxm_u8m2 (vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl);
vuint8m4_t __riscv_vsbc_vxm_u8m4 (vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl);
vuint8m8_t __riscv_vsbc_vxm_u8m8 (vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl);
vuint16mf4_t __riscv_vsbc_vxm_u16mf4 (vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl);
vuint16mf2_t __riscv_vsbc_vxm_u16mf2 (vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl);
vuint16m1_t __riscv_vsbc_vxm_u16m1 (vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl);
vuint16m2_t __riscv_vsbc_vxm_u16m2 (vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl);
vuint16m4_t __riscv_vsbc_vxm_u16m4 (vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl);
vuint16m8_t __riscv_vsbc_vxm_u16m8 (vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl);
vuint32mf2_t __riscv_vsbc_vxm_u32mf2 (vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl);
vuint32m1_t __riscv_vsbc_vxm_u32m1 (vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl);
vuint32m2_t __riscv_vsbc_vxm_u32m2 (vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl);
vuint32m4_t __riscv_vsbc_vxm_u32m4 (vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl);
vuint32m8_t __riscv_vsbc_vxm_u32m8 (vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl);
vuint64m1_t __riscv_vsbc_vxm_u64m1 (vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl);
vuint64m2_t __riscv_vsbc_vxm_u64m2 (vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl);
vuint64m4_t __riscv_vsbc_vxm_u64m4 (vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl);
vuint64m8_t __riscv_vsbc_vxm_u64m8 (vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl);
13.12. vmsbc.vvm
- Mnemonic
vmsbc.vvm vd, vs2, vs1, v0
- Encoding
- Description
-
Produce borrow out in mask register format. vd.mask[i] = borrow_out(vs2[i] - vs1[i] - v0.mask[i])
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsbc_vvm.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmsbc_vvm_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vvm_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vvm_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vvm_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vvm_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vvm_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl);
vbool1_t __riscv_vmsbc_vvm_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vvm_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vvm_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vvm_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vvm_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vvm_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vvm_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vvm_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vvm_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vvm_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vvm_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vvm_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vvm_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vvm_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vvm_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vvm_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vvm_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vvm_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vvm_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vvm_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vvm_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vvm_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl);
vbool1_t __riscv_vmsbc_vvm_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vvm_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vvm_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vvm_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vvm_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vvm_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vvm_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vvm_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vvm_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vvm_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vvm_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vvm_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vvm_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vvm_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vvm_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vvm_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl);
13.13. vmsbc.vxm
- Mnemonic
vmsbc.vxm vd, vs2, rs1, v0
- Encoding
- Description
-
Produce borrow out in mask register format. vd.mask[i] = borrow_out(vs2[i] - x[rs1] - v0.mask[i])
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsbc_vxm.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmsbc_vxm_i8mf8_b64 (vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vxm_i8mf4_b32 (vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vxm_i8mf2_b16 (vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vxm_i8m1_b8 (vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vxm_i8m2_b4 (vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vxm_i8m4_b2 (vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl);
vbool1_t __riscv_vmsbc_vxm_i8m8_b1 (vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vxm_i16mf4_b64 (vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vxm_i16mf2_b32 (vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vxm_i16m1_b16 (vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vxm_i16m2_b8 (vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vxm_i16m4_b4 (vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vxm_i16m8_b2 (vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vxm_i32mf2_b64 (vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vxm_i32m1_b32 (vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vxm_i32m2_b16 (vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vxm_i32m4_b8 (vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vxm_i32m8_b4 (vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vxm_i64m1_b64 (vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vxm_i64m2_b32 (vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vxm_i64m4_b16 (vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vxm_i64m8_b8 (vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vxm_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vxm_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vxm_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vxm_u8m1_b8 (vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vxm_u8m2_b4 (vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vxm_u8m4_b2 (vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl);
vbool1_t __riscv_vmsbc_vxm_u8m8_b1 (vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vxm_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vxm_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vxm_u16m1_b16 (vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vxm_u16m2_b8 (vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vxm_u16m4_b4 (vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vxm_u16m8_b2 (vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vxm_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vxm_u32m1_b32 (vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vxm_u32m2_b16 (vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vxm_u32m4_b8 (vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vxm_u32m8_b4 (vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vxm_u64m1_b64 (vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vxm_u64m2_b32 (vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vxm_u64m4_b16 (vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vxm_u64m8_b8 (vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl);
13.14. vmsbc.vv
- Mnemonic
vmsbc.vv vd, vs2, vs1
- Encoding
- Description
-
Produce borrow out in mask register format. vd.mask[i] = borrow_out(vs2[i] - vs1[i])
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsbc_vv.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmsbc_vvm_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vv_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vvm_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vv_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vvm_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vv_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vvm_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vv_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vbool4_t __riscv_vmsbc_vvm_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vv_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vbool2_t __riscv_vmsbc_vvm_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vv_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vbool1_t __riscv_vmsbc_vvm_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, size_t vl);
vbool1_t __riscv_vmsbc_vv_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vbool64_t __riscv_vmsbc_vvm_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vv_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vvm_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vv_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vvm_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vv_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vvm_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vv_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vbool4_t __riscv_vmsbc_vvm_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, vbool4_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vv_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vbool2_t __riscv_vmsbc_vvm_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, vbool2_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vv_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vbool64_t __riscv_vmsbc_vvm_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vv_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vvm_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vv_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vvm_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vv_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vvm_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vv_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vbool4_t __riscv_vmsbc_vvm_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, vbool4_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vv_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vbool64_t __riscv_vmsbc_vvm_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vv_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vvm_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vv_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vvm_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vv_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vvm_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vv_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vbool64_t __riscv_vmsbc_vvm_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vv_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vvm_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vv_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vvm_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vv_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vvm_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vv_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vbool4_t __riscv_vmsbc_vvm_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vv_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vbool2_t __riscv_vmsbc_vvm_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vv_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vbool1_t __riscv_vmsbc_vvm_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, size_t vl);
vbool1_t __riscv_vmsbc_vv_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vbool64_t __riscv_vmsbc_vvm_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vv_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vvm_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vv_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vvm_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vv_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vvm_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vv_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vbool4_t __riscv_vmsbc_vvm_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, vbool4_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vv_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vbool2_t __riscv_vmsbc_vvm_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, vbool2_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vv_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vbool64_t __riscv_vmsbc_vvm_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vv_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vvm_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vv_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vvm_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vv_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vvm_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vv_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vbool4_t __riscv_vmsbc_vvm_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, vbool4_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vv_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vbool64_t __riscv_vmsbc_vvm_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vv_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vvm_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vv_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vvm_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vv_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vvm_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vv_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
13.15. vmsbc.vx
- Mnemonic
vmsbc.vx vd, vs2, rs1
- Encoding
- Description
-
Produce borrow out in mask register format. vd.mask[i] = borrow_out(vs2[i] - x[rs1])
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsbc_vx.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmsbc_vxm_i8mf8_b64 (vint8mf8_t op1, int8_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vx_i8mf8_b64 (vint8mf8_t op1, int8_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vxm_i8mf4_b32 (vint8mf4_t op1, int8_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vx_i8mf4_b32 (vint8mf4_t op1, int8_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vxm_i8mf2_b16 (vint8mf2_t op1, int8_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vx_i8mf2_b16 (vint8mf2_t op1, int8_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vxm_i8m1_b8 (vint8m1_t op1, int8_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vx_i8m1_b8 (vint8m1_t op1, int8_t op2, size_t vl);
vbool4_t __riscv_vmsbc_vxm_i8m2_b4 (vint8m2_t op1, int8_t op2, vbool4_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vx_i8m2_b4 (vint8m2_t op1, int8_t op2, size_t vl);
vbool2_t __riscv_vmsbc_vxm_i8m4_b2 (vint8m4_t op1, int8_t op2, vbool2_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vx_i8m4_b2 (vint8m4_t op1, int8_t op2, size_t vl);
vbool1_t __riscv_vmsbc_vxm_i8m8_b1 (vint8m8_t op1, int8_t op2, vbool1_t borrowin, size_t vl);
vbool1_t __riscv_vmsbc_vx_i8m8_b1 (vint8m8_t op1, int8_t op2, size_t vl);
vbool64_t __riscv_vmsbc_vxm_i16mf4_b64 (vint16mf4_t op1, int16_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vx_i16mf4_b64 (vint16mf4_t op1, int16_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vxm_i16mf2_b32 (vint16mf2_t op1, int16_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vx_i16mf2_b32 (vint16mf2_t op1, int16_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vxm_i16m1_b16 (vint16m1_t op1, int16_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vx_i16m1_b16 (vint16m1_t op1, int16_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vxm_i16m2_b8 (vint16m2_t op1, int16_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vx_i16m2_b8 (vint16m2_t op1, int16_t op2, size_t vl);
vbool4_t __riscv_vmsbc_vxm_i16m4_b4 (vint16m4_t op1, int16_t op2, vbool4_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vx_i16m4_b4 (vint16m4_t op1, int16_t op2, size_t vl);
vbool2_t __riscv_vmsbc_vxm_i16m8_b2 (vint16m8_t op1, int16_t op2, vbool2_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vx_i16m8_b2 (vint16m8_t op1, int16_t op2, size_t vl);
vbool64_t __riscv_vmsbc_vxm_i32mf2_b64 (vint32mf2_t op1, int32_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vx_i32mf2_b64 (vint32mf2_t op1, int32_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vxm_i32m1_b32 (vint32m1_t op1, int32_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vx_i32m1_b32 (vint32m1_t op1, int32_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vxm_i32m2_b16 (vint32m2_t op1, int32_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vx_i32m2_b16 (vint32m2_t op1, int32_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vxm_i32m4_b8 (vint32m4_t op1, int32_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vx_i32m4_b8 (vint32m4_t op1, int32_t op2, size_t vl);
vbool4_t __riscv_vmsbc_vxm_i32m8_b4 (vint32m8_t op1, int32_t op2, vbool4_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vx_i32m8_b4 (vint32m8_t op1, int32_t op2, size_t vl);
vbool64_t __riscv_vmsbc_vxm_i64m1_b64 (vint64m1_t op1, int64_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vx_i64m1_b64 (vint64m1_t op1, int64_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vxm_i64m2_b32 (vint64m2_t op1, int64_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vx_i64m2_b32 (vint64m2_t op1, int64_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vxm_i64m4_b16 (vint64m4_t op1, int64_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vx_i64m4_b16 (vint64m4_t op1, int64_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vxm_i64m8_b8 (vint64m8_t op1, int64_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vx_i64m8_b8 (vint64m8_t op1, int64_t op2, size_t vl);
vbool64_t __riscv_vmsbc_vxm_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vx_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vxm_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vx_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vxm_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vx_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vxm_u8m1_b8 (vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vx_u8m1_b8 (vuint8m1_t op1, uint8_t op2, size_t vl);
vbool4_t __riscv_vmsbc_vxm_u8m2_b4 (vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vx_u8m2_b4 (vuint8m2_t op1, uint8_t op2, size_t vl);
vbool2_t __riscv_vmsbc_vxm_u8m4_b2 (vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vx_u8m4_b2 (vuint8m4_t op1, uint8_t op2, size_t vl);
vbool1_t __riscv_vmsbc_vxm_u8m8_b1 (vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, size_t vl);
vbool1_t __riscv_vmsbc_vx_u8m8_b1 (vuint8m8_t op1, uint8_t op2, size_t vl);
vbool64_t __riscv_vmsbc_vxm_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vx_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vxm_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vx_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vxm_u16m1_b16 (vuint16m1_t op1, uint16_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vx_u16m1_b16 (vuint16m1_t op1, uint16_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vxm_u16m2_b8 (vuint16m2_t op1, uint16_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vx_u16m2_b8 (vuint16m2_t op1, uint16_t op2, size_t vl);
vbool4_t __riscv_vmsbc_vxm_u16m4_b4 (vuint16m4_t op1, uint16_t op2, vbool4_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vx_u16m4_b4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vbool2_t __riscv_vmsbc_vxm_u16m8_b2 (vuint16m8_t op1, uint16_t op2, vbool2_t borrowin, size_t vl);
vbool2_t __riscv_vmsbc_vx_u16m8_b2 (vuint16m8_t op1, uint16_t op2, size_t vl);
vbool64_t __riscv_vmsbc_vxm_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vx_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vxm_u32m1_b32 (vuint32m1_t op1, uint32_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vx_u32m1_b32 (vuint32m1_t op1, uint32_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vxm_u32m2_b16 (vuint32m2_t op1, uint32_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vx_u32m2_b16 (vuint32m2_t op1, uint32_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vxm_u32m4_b8 (vuint32m4_t op1, uint32_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vx_u32m4_b8 (vuint32m4_t op1, uint32_t op2, size_t vl);
vbool4_t __riscv_vmsbc_vxm_u32m8_b4 (vuint32m8_t op1, uint32_t op2, vbool4_t borrowin, size_t vl);
vbool4_t __riscv_vmsbc_vx_u32m8_b4 (vuint32m8_t op1, uint32_t op2, size_t vl);
vbool64_t __riscv_vmsbc_vxm_u64m1_b64 (vuint64m1_t op1, uint64_t op2, vbool64_t borrowin, size_t vl);
vbool64_t __riscv_vmsbc_vx_u64m1_b64 (vuint64m1_t op1, uint64_t op2, size_t vl);
vbool32_t __riscv_vmsbc_vxm_u64m2_b32 (vuint64m2_t op1, uint64_t op2, vbool32_t borrowin, size_t vl);
vbool32_t __riscv_vmsbc_vx_u64m2_b32 (vuint64m2_t op1, uint64_t op2, size_t vl);
vbool16_t __riscv_vmsbc_vxm_u64m4_b16 (vuint64m4_t op1, uint64_t op2, vbool16_t borrowin, size_t vl);
vbool16_t __riscv_vmsbc_vx_u64m4_b16 (vuint64m4_t op1, uint64_t op2, size_t vl);
vbool8_t __riscv_vmsbc_vxm_u64m8_b8 (vuint64m8_t op1, uint64_t op2, vbool8_t borrowin, size_t vl);
vbool8_t __riscv_vmsbc_vx_u64m8_b8 (vuint64m8_t op1, uint64_t op2, size_t vl);
14. Vector Bitwise Logical Instructions
14.1. vand.vv
- Mnemonic
vand.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Bitwise logical operations, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vand_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vand_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vand_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vand_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vand_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vand_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vand_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vand_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vand_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vand_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vand_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vand_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vand_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vand_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vand_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vand_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vand_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vand_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vand_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vand_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vand_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vand_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vand_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vand_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vand_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vand_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vand_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vand_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vand_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vand_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vand_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vand_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vand_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vand_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vand_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vand_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vand_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vand_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vand_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vand_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vand_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vand_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vand_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vand_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vand_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vint8mf8_t __riscv_vand_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vand_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vand_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vand_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vand_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vand_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vand_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vand_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vand_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vand_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vand_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vand_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vand_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vand_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vand_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vand_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vand_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vand_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vand_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vand_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vand_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vand_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vand_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vand_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vand_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vand_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vand_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vand_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vand_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vand_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vand_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vand_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vand_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vand_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vand_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vand_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vand_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vand_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vand_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vand_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vand_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vand_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vand_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vand_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
14.2. vand.vx
- Mnemonic
vand.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Bitwise logical operations, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vand_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vand_vx_i8mf8 (vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vand_vx_i8mf4 (vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vand_vx_i8mf2 (vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vand_vx_i8m1 (vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vand_vx_i8m2 (vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vand_vx_i8m4 (vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vand_vx_i8m8 (vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vand_vx_i16mf4 (vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vand_vx_i16mf2 (vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vand_vx_i16m1 (vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vand_vx_i16m2 (vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vand_vx_i16m4 (vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vand_vx_i16m8 (vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vand_vx_i32mf2 (vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vand_vx_i32m1 (vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vand_vx_i32m2 (vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vand_vx_i32m4 (vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vand_vx_i32m8 (vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vand_vx_i64m1 (vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vand_vx_i64m2 (vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vand_vx_i64m4 (vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vand_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl);
vuint8mf8_t __riscv_vand_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vand_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vand_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vand_vx_u8m1 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vand_vx_u8m2 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vand_vx_u8m4 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vand_vx_u8m8 (vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vand_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vand_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vand_vx_u16m1 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vand_vx_u16m2 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vand_vx_u16m4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vand_vx_u16m8 (vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vand_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vand_vx_u32m1 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vand_vx_u32m2 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vand_vx_u32m4 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vand_vx_u32m8 (vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vand_vx_u64m1 (vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vand_vx_u64m2 (vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vand_vx_u64m4 (vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vand_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vint8mf8_t __riscv_vand_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vand_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vand_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vand_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vand_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vand_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vand_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vand_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vand_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vand_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vand_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vand_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vand_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vand_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vand_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vand_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vand_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vand_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vand_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vand_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vand_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vand_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
vuint8mf8_t __riscv_vand_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vand_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vand_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vand_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vand_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vand_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vand_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vand_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vand_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vand_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vand_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vand_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vand_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vand_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vand_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vand_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vand_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vand_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vand_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vand_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vand_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vand_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
14.3. vand.vi
- Mnemonic
vand.vi vd, vs2, imm, vm
- Encoding
- Description
-
Bitwise logical operations, vector-immediate
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vand_vi.h
- Intrinsic Functions
Details
14.4. vor.vv
- Mnemonic
vor.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Bitwise logical operations, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vor_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vor_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vor_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vor_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vor_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vor_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vor_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vor_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vor_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vor_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vor_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vor_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vor_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vor_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vor_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vor_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vor_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vor_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vor_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vor_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vor_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vor_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vor_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vor_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vor_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vor_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vor_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vor_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vor_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vor_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vor_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vor_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vor_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vor_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vor_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vor_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vor_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vor_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vor_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vor_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vor_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vor_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vor_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vor_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vor_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vint8mf8_t __riscv_vor_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vor_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vor_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vor_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vor_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vor_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vor_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vor_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vor_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vor_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vor_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vor_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vor_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vor_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vor_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vor_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vor_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vor_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vor_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vor_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vor_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vor_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vor_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vor_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vor_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vor_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vor_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vor_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vor_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vor_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vor_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vor_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vor_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vor_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vor_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vor_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vor_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vor_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vor_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vor_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vor_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vor_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vor_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vor_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
14.5. vor.vx
- Mnemonic
vor.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Bitwise logical operations, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vor_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vor_vx_i8mf8 (vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vor_vx_i8mf4 (vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vor_vx_i8mf2 (vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vor_vx_i8m1 (vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vor_vx_i8m2 (vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vor_vx_i8m4 (vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vor_vx_i8m8 (vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vor_vx_i16mf4 (vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vor_vx_i16mf2 (vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vor_vx_i16m1 (vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vor_vx_i16m2 (vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vor_vx_i16m4 (vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vor_vx_i16m8 (vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vor_vx_i32mf2 (vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vor_vx_i32m1 (vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vor_vx_i32m2 (vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vor_vx_i32m4 (vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vor_vx_i32m8 (vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vor_vx_i64m1 (vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vor_vx_i64m2 (vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vor_vx_i64m4 (vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vor_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl);
vuint8mf8_t __riscv_vor_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vor_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vor_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vor_vx_u8m1 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vor_vx_u8m2 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vor_vx_u8m4 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vor_vx_u8m8 (vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vor_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vor_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vor_vx_u16m1 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vor_vx_u16m2 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vor_vx_u16m4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vor_vx_u16m8 (vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vor_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vor_vx_u32m1 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vor_vx_u32m2 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vor_vx_u32m4 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vor_vx_u32m8 (vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vor_vx_u64m1 (vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vor_vx_u64m2 (vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vor_vx_u64m4 (vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vor_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vint8mf8_t __riscv_vor_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vor_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vor_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vor_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vor_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vor_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vor_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vor_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vor_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vor_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vor_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vor_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vor_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vor_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vor_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vor_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vor_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vor_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vor_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vor_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vor_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vor_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
vuint8mf8_t __riscv_vor_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vor_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vor_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vor_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vor_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vor_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vor_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vor_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vor_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vor_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vor_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vor_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vor_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vor_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vor_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vor_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vor_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vor_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vor_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vor_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vor_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vor_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
14.6. vor.vi
- Mnemonic
vor.vi vd, vs2, imm, vm
- Encoding
- Description
-
Bitwise logical operations, vector-immediate
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vor_vi.h
- Intrinsic Functions
Details
14.7. vxor.vv
- Mnemonic
vxor.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Bitwise logical operations, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vxor_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vxor_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vxor_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vxor_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vxor_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vxor_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vxor_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vxor_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vxor_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vxor_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vxor_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vxor_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vxor_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vxor_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vxor_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vxor_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vxor_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vxor_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vxor_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vxor_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vxor_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vxor_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vxor_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vxor_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vxor_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vxor_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vxor_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vxor_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vxor_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vxor_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vxor_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vxor_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vxor_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vxor_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vxor_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vxor_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vxor_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vxor_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vxor_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vxor_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vxor_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vxor_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vxor_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vxor_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vxor_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vint8mf8_t __riscv_vxor_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vxor_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vxor_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vxor_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vxor_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vxor_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vxor_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vxor_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vxor_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vxor_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vxor_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vxor_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vxor_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vxor_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vxor_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vxor_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vxor_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vxor_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vxor_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vxor_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vxor_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vxor_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vxor_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vxor_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vxor_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vxor_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vxor_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vxor_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vxor_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vxor_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vxor_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vxor_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vxor_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vxor_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vxor_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vxor_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vxor_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vxor_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vxor_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vxor_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vxor_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vxor_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vxor_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vxor_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
14.8. vxor.vx
- Mnemonic
vxor.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Bitwise logical operations, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vxor_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vxor_vx_i8mf8 (vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vxor_vx_i8mf4 (vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vxor_vx_i8mf2 (vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vxor_vx_i8m1 (vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vxor_vx_i8m2 (vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vxor_vx_i8m4 (vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vxor_vx_i8m8 (vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vxor_vx_i16mf4 (vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vxor_vx_i16mf2 (vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vxor_vx_i16m1 (vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vxor_vx_i16m2 (vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vxor_vx_i16m4 (vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vxor_vx_i16m8 (vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vxor_vx_i32mf2 (vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vxor_vx_i32m1 (vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vxor_vx_i32m2 (vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vxor_vx_i32m4 (vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vxor_vx_i32m8 (vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vxor_vx_i64m1 (vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vxor_vx_i64m2 (vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vxor_vx_i64m4 (vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vxor_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl);
vuint8mf8_t __riscv_vxor_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vxor_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vxor_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vxor_vx_u8m1 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vxor_vx_u8m2 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vxor_vx_u8m4 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vxor_vx_u8m8 (vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vxor_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vxor_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vxor_vx_u16m1 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vxor_vx_u16m2 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vxor_vx_u16m4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vxor_vx_u16m8 (vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vxor_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vxor_vx_u32m1 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vxor_vx_u32m2 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vxor_vx_u32m4 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vxor_vx_u32m8 (vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vxor_vx_u64m1 (vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vxor_vx_u64m2 (vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vxor_vx_u64m4 (vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vxor_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vint8mf8_t __riscv_vxor_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vxor_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vxor_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vxor_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vxor_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vxor_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vxor_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vxor_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vxor_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vxor_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vxor_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vxor_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vxor_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vxor_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vxor_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vxor_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vxor_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vxor_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vxor_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vxor_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vxor_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vxor_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
vuint8mf8_t __riscv_vxor_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vxor_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vxor_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vxor_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vxor_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vxor_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vxor_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vxor_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vxor_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vxor_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vxor_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vxor_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vxor_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vxor_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vxor_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vxor_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vxor_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vxor_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vxor_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vxor_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vxor_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vxor_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
14.9. vxor.vi
- Mnemonic
vxor.vi vd, vs2, imm, vm
- Encoding
- Description
-
Bitwise logical operations, vector-immediate
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vxor_vi.h
- Intrinsic Functions
Details
14.10. Vector Single-Width Shift Instructions
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/Vector.h
- Intrinsic Functions
Details
== Vector Loads and Stores Intrinsics
=== Vector Unit-Stride Load Intrinsics
=== Vector Unit-Stride Store Intrinsics
=== Vector Mask Load/Store Intrinsics
=== Vector Strided Load Intrinsics
=== Vector Strided Store Intrinsics
=== Vector Indexed Load Intrinsics
=== Vector Indexed Store Intrinsics
== Vector Loads and Stores Segment Instructions
=== Vector Unit-Stride Segment Load Intrinsics
=== Vector Unit-Stride Segment Store Intrinsics
=== Vector Strided Segment Load Intrinsics
=== Vector Strided Segment Store Intrinsics
=== Vector Indexed Segment Load Intrinsics
=== Vector Indexed Segment Store Intrinsics
== Vector Integer Arithmetic Instructions
=== Vector Single-Width Integer Add and Subtract Intrinsics
=== Vector Widening Integer Add/Subtract Intrinsics
=== Vector Integer Widening Intrinsics
=== Vector Integer Extension Intrinsics
=== Vector Integer Add-with-Carry / Subtract-with-Borrow Intrinsics
=== Vector Bitwise Binary Logical Intrinsics
=== Vector Bitwise Unary Logical Intrinsics
=== Vector Single-Width Bit Shift Intrinsics
=== Vector Narrowing Integer Right Shift Intrinsics
=== Vector Integer Narrowing Intrinsics
=== Vector Integer Compare Intrinsics
=== Vector Integer Min/Max Intrinsics
=== Vector Single-Width Integer Multiply Intrinsics
=== Vector Integer Divide Intrinsics
=== Vector Widening Integer Multiply Intrinsics
=== Vector Single-Width Integer Multiply-Add Intrinsics
=== Vector Widening Integer Multiply-Add Intrinsics
=== Vector Integer Merge Intrinsics
=== Vector Integer Move Intrinsics
== Vector Fixed-Point Arithmetic Instructions
=== Vector Single-Width Saturating Add and Subtract Intrinsics
=== Vector Single-Width Averaging Add and Subtract Intrinsics
=== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics
=== Vector Single-Width Scaling Shift Intrinsics
=== Vector Narrowing Fixed-Point Clip Intrinsics
== Vector Floating-Point Instructions
=== Vector Single-Width Floating-Point Add/Subtract Intrinsics
=== Vector Widening Floating-Point Add/Subtract Intrinsics
=== Vector Single-Width Floating-Point Multiply/Divide Intrinsics
=== Vector Widening Floating-Point Multiply Intrinsics
=== Vector Single-Width Floating-Point Fused Multiply-Add Intrinsics
=== Vector Widening Floating-Point Fused Multiply-Add Intrinsics
=== Vector Floating-Point Square-Root Intrinsics
=== Vector Floating-Point Reciprocal Square-Root Estimate Intrinsics
=== Vector Floating-Point Reciprocal Estimate Intrinsics
=== Vector Floating-Point MIN/MAX Intrinsics
=== Vector Floating-Point Sign-Injection Intrinsics
=== Vector Floating-Point Absolute Value Intrinsics
=== Vector Floating-Point Compare Intrinsics
=== Vector Floating-Point Classify Intrinsics
=== Vector Floating-Point Merge Intrinsics
=== Vector Floating-Point Move Intrinsics
== Vector Reduction Operations
=== Vector Single-Width Integer Reduction Intrinsics
=== Vector Widening Integer Reduction Intrinsics
=== Vector Single-Width Floating-Point Reduction Intrinsics
=== Vector Widening Floating-Point Reduction Intrinsics
== Vector Mask Instructions
=== Vector Mask-Register Logical
=== Vector count population in mask `vcpop.m`
=== Vector Iota Intrinsics
=== Vector Element Index Intrinsics
== Vector Permutation Instructions
=== Vector Slideup Intrinsics
=== Vector Slidedown Intrinsics
=== Vector Slide1up and Slide1down Intrinsics
=== Vector Register Gather Intrinsics
=== Vector Compress Intrinsics
== Miscellaneous Vector Utility Intrinsics
=== Vector LMUL Extension Intrinsics
=== Vector LMUL Truncation Intrinsics
=== Vector Initialization Intrinsics
=== Vector Insertion Intrinsics
=== Vector Extraction Intrinsics
14.11. vsll.vv
- Mnemonic
vsll.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Bit shift operations, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsll_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vsll_vv_i8mf8 (vint8mf8_t op1, vuint8mf8_t shift, size_t vl);
vint8mf4_t __riscv_vsll_vv_i8mf4 (vint8mf4_t op1, vuint8mf4_t shift, size_t vl);
vint8mf2_t __riscv_vsll_vv_i8mf2 (vint8mf2_t op1, vuint8mf2_t shift, size_t vl);
vint8m1_t __riscv_vsll_vv_i8m1 (vint8m1_t op1, vuint8m1_t shift, size_t vl);
vint8m2_t __riscv_vsll_vv_i8m2 (vint8m2_t op1, vuint8m2_t shift, size_t vl);
vint8m4_t __riscv_vsll_vv_i8m4 (vint8m4_t op1, vuint8m4_t shift, size_t vl);
vint8m8_t __riscv_vsll_vv_i8m8 (vint8m8_t op1, vuint8m8_t shift, size_t vl);
vint16mf4_t __riscv_vsll_vv_i16mf4 (vint16mf4_t op1, vuint16mf4_t shift, size_t vl);
vint16mf2_t __riscv_vsll_vv_i16mf2 (vint16mf2_t op1, vuint16mf2_t shift, size_t vl);
vint16m1_t __riscv_vsll_vv_i16m1 (vint16m1_t op1, vuint16m1_t shift, size_t vl);
vint16m2_t __riscv_vsll_vv_i16m2 (vint16m2_t op1, vuint16m2_t shift, size_t vl);
vint16m4_t __riscv_vsll_vv_i16m4 (vint16m4_t op1, vuint16m4_t shift, size_t vl);
vint16m8_t __riscv_vsll_vv_i16m8 (vint16m8_t op1, vuint16m8_t shift, size_t vl);
vint32mf2_t __riscv_vsll_vv_i32mf2 (vint32mf2_t op1, vuint32mf2_t shift, size_t vl);
vint32m1_t __riscv_vsll_vv_i32m1 (vint32m1_t op1, vuint32m1_t shift, size_t vl);
vint32m2_t __riscv_vsll_vv_i32m2 (vint32m2_t op1, vuint32m2_t shift, size_t vl);
vint32m4_t __riscv_vsll_vv_i32m4 (vint32m4_t op1, vuint32m4_t shift, size_t vl);
vint32m8_t __riscv_vsll_vv_i32m8 (vint32m8_t op1, vuint32m8_t shift, size_t vl);
vint64m1_t __riscv_vsll_vv_i64m1 (vint64m1_t op1, vuint64m1_t shift, size_t vl);
vint64m2_t __riscv_vsll_vv_i64m2 (vint64m2_t op1, vuint64m2_t shift, size_t vl);
vint64m4_t __riscv_vsll_vv_i64m4 (vint64m4_t op1, vuint64m4_t shift, size_t vl);
vint64m8_t __riscv_vsll_vv_i64m8 (vint64m8_t op1, vuint64m8_t shift, size_t vl);
vuint8mf8_t __riscv_vsll_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t shift, size_t vl);
vuint8mf4_t __riscv_vsll_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t shift, size_t vl);
vuint8mf2_t __riscv_vsll_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t shift, size_t vl);
vuint8m1_t __riscv_vsll_vv_u8m1 (vuint8m1_t op1, vuint8m1_t shift, size_t vl);
vuint8m2_t __riscv_vsll_vv_u8m2 (vuint8m2_t op1, vuint8m2_t shift, size_t vl);
vuint8m4_t __riscv_vsll_vv_u8m4 (vuint8m4_t op1, vuint8m4_t shift, size_t vl);
vuint8m8_t __riscv_vsll_vv_u8m8 (vuint8m8_t op1, vuint8m8_t shift, size_t vl);
vuint16mf4_t __riscv_vsll_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t shift, size_t vl);
vuint16mf2_t __riscv_vsll_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t shift, size_t vl);
vuint16m1_t __riscv_vsll_vv_u16m1 (vuint16m1_t op1, vuint16m1_t shift, size_t vl);
vuint16m2_t __riscv_vsll_vv_u16m2 (vuint16m2_t op1, vuint16m2_t shift, size_t vl);
vuint16m4_t __riscv_vsll_vv_u16m4 (vuint16m4_t op1, vuint16m4_t shift, size_t vl);
vuint16m8_t __riscv_vsll_vv_u16m8 (vuint16m8_t op1, vuint16m8_t shift, size_t vl);
vuint32mf2_t __riscv_vsll_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t shift, size_t vl);
vuint32m1_t __riscv_vsll_vv_u32m1 (vuint32m1_t op1, vuint32m1_t shift, size_t vl);
vuint32m2_t __riscv_vsll_vv_u32m2 (vuint32m2_t op1, vuint32m2_t shift, size_t vl);
vuint32m4_t __riscv_vsll_vv_u32m4 (vuint32m4_t op1, vuint32m4_t shift, size_t vl);
vuint32m8_t __riscv_vsll_vv_u32m8 (vuint32m8_t op1, vuint32m8_t shift, size_t vl);
vuint64m1_t __riscv_vsll_vv_u64m1 (vuint64m1_t op1, vuint64m1_t shift, size_t vl);
vuint64m2_t __riscv_vsll_vv_u64m2 (vuint64m2_t op1, vuint64m2_t shift, size_t vl);
vuint64m4_t __riscv_vsll_vv_u64m4 (vuint64m4_t op1, vuint64m4_t shift, size_t vl);
vuint64m8_t __riscv_vsll_vv_u64m8 (vuint64m8_t op1, vuint64m8_t shift, size_t vl);
vint8mf8_t __riscv_vsll_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl);
vint8mf4_t __riscv_vsll_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl);
vint8mf2_t __riscv_vsll_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl);
vint8m1_t __riscv_vsll_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl);
vint8m2_t __riscv_vsll_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl);
vint8m4_t __riscv_vsll_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl);
vint8m8_t __riscv_vsll_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl);
vint16mf4_t __riscv_vsll_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl);
vint16mf2_t __riscv_vsll_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl);
vint16m1_t __riscv_vsll_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl);
vint16m2_t __riscv_vsll_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl);
vint16m4_t __riscv_vsll_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl);
vint16m8_t __riscv_vsll_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl);
vint32mf2_t __riscv_vsll_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl);
vint32m1_t __riscv_vsll_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl);
vint32m2_t __riscv_vsll_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl);
vint32m4_t __riscv_vsll_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl);
vint32m8_t __riscv_vsll_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl);
vint64m1_t __riscv_vsll_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl);
vint64m2_t __riscv_vsll_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl);
vint64m4_t __riscv_vsll_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl);
vint64m8_t __riscv_vsll_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl);
vuint8mf8_t __riscv_vsll_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl);
vuint8mf4_t __riscv_vsll_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl);
vuint8mf2_t __riscv_vsll_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl);
vuint8m1_t __riscv_vsll_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl);
vuint8m2_t __riscv_vsll_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl);
vuint8m4_t __riscv_vsll_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl);
vuint8m8_t __riscv_vsll_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl);
vuint16mf4_t __riscv_vsll_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl);
vuint16mf2_t __riscv_vsll_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl);
vuint16m1_t __riscv_vsll_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl);
vuint16m2_t __riscv_vsll_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl);
vuint16m4_t __riscv_vsll_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl);
vuint16m8_t __riscv_vsll_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl);
vuint32mf2_t __riscv_vsll_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl);
vuint32m1_t __riscv_vsll_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl);
vuint32m2_t __riscv_vsll_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl);
vuint32m4_t __riscv_vsll_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl);
vuint32m8_t __riscv_vsll_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl);
vuint64m1_t __riscv_vsll_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl);
vuint64m2_t __riscv_vsll_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl);
vuint64m4_t __riscv_vsll_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl);
vuint64m8_t __riscv_vsll_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl);
14.12. vsll.vx
- Mnemonic
vsll.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Bit shift operations, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsll_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vsll_vx_i8mf8 (vint8mf8_t op1, size_t shift, size_t vl);
vint8mf4_t __riscv_vsll_vx_i8mf4 (vint8mf4_t op1, size_t shift, size_t vl);
vint8mf2_t __riscv_vsll_vx_i8mf2 (vint8mf2_t op1, size_t shift, size_t vl);
vint8m1_t __riscv_vsll_vx_i8m1 (vint8m1_t op1, size_t shift, size_t vl);
vint8m2_t __riscv_vsll_vx_i8m2 (vint8m2_t op1, size_t shift, size_t vl);
vint8m4_t __riscv_vsll_vx_i8m4 (vint8m4_t op1, size_t shift, size_t vl);
vint8m8_t __riscv_vsll_vx_i8m8 (vint8m8_t op1, size_t shift, size_t vl);
vint16mf4_t __riscv_vsll_vx_i16mf4 (vint16mf4_t op1, size_t shift, size_t vl);
vint16mf2_t __riscv_vsll_vx_i16mf2 (vint16mf2_t op1, size_t shift, size_t vl);
vint16m1_t __riscv_vsll_vx_i16m1 (vint16m1_t op1, size_t shift, size_t vl);
vint16m2_t __riscv_vsll_vx_i16m2 (vint16m2_t op1, size_t shift, size_t vl);
vint16m4_t __riscv_vsll_vx_i16m4 (vint16m4_t op1, size_t shift, size_t vl);
vint16m8_t __riscv_vsll_vx_i16m8 (vint16m8_t op1, size_t shift, size_t vl);
vint32mf2_t __riscv_vsll_vx_i32mf2 (vint32mf2_t op1, size_t shift, size_t vl);
vint32m1_t __riscv_vsll_vx_i32m1 (vint32m1_t op1, size_t shift, size_t vl);
vint32m2_t __riscv_vsll_vx_i32m2 (vint32m2_t op1, size_t shift, size_t vl);
vint32m4_t __riscv_vsll_vx_i32m4 (vint32m4_t op1, size_t shift, size_t vl);
vint32m8_t __riscv_vsll_vx_i32m8 (vint32m8_t op1, size_t shift, size_t vl);
vint64m1_t __riscv_vsll_vx_i64m1 (vint64m1_t op1, size_t shift, size_t vl);
vint64m2_t __riscv_vsll_vx_i64m2 (vint64m2_t op1, size_t shift, size_t vl);
vint64m4_t __riscv_vsll_vx_i64m4 (vint64m4_t op1, size_t shift, size_t vl);
vint64m8_t __riscv_vsll_vx_i64m8 (vint64m8_t op1, size_t shift, size_t vl);
vuint8mf8_t __riscv_vsll_vx_u8mf8 (vuint8mf8_t op1, size_t shift, size_t vl);
vuint8mf4_t __riscv_vsll_vx_u8mf4 (vuint8mf4_t op1, size_t shift, size_t vl);
vuint8mf2_t __riscv_vsll_vx_u8mf2 (vuint8mf2_t op1, size_t shift, size_t vl);
vuint8m1_t __riscv_vsll_vx_u8m1 (vuint8m1_t op1, size_t shift, size_t vl);
vuint8m2_t __riscv_vsll_vx_u8m2 (vuint8m2_t op1, size_t shift, size_t vl);
vuint8m4_t __riscv_vsll_vx_u8m4 (vuint8m4_t op1, size_t shift, size_t vl);
vuint8m8_t __riscv_vsll_vx_u8m8 (vuint8m8_t op1, size_t shift, size_t vl);
vuint16mf4_t __riscv_vsll_vx_u16mf4 (vuint16mf4_t op1, size_t shift, size_t vl);
vuint16mf2_t __riscv_vsll_vx_u16mf2 (vuint16mf2_t op1, size_t shift, size_t vl);
vuint16m1_t __riscv_vsll_vx_u16m1 (vuint16m1_t op1, size_t shift, size_t vl);
vuint16m2_t __riscv_vsll_vx_u16m2 (vuint16m2_t op1, size_t shift, size_t vl);
vuint16m4_t __riscv_vsll_vx_u16m4 (vuint16m4_t op1, size_t shift, size_t vl);
vuint16m8_t __riscv_vsll_vx_u16m8 (vuint16m8_t op1, size_t shift, size_t vl);
vuint32mf2_t __riscv_vsll_vx_u32mf2 (vuint32mf2_t op1, size_t shift, size_t vl);
vuint32m1_t __riscv_vsll_vx_u32m1 (vuint32m1_t op1, size_t shift, size_t vl);
vuint32m2_t __riscv_vsll_vx_u32m2 (vuint32m2_t op1, size_t shift, size_t vl);
vuint32m4_t __riscv_vsll_vx_u32m4 (vuint32m4_t op1, size_t shift, size_t vl);
vuint32m8_t __riscv_vsll_vx_u32m8 (vuint32m8_t op1, size_t shift, size_t vl);
vuint64m1_t __riscv_vsll_vx_u64m1 (vuint64m1_t op1, size_t shift, size_t vl);
vuint64m2_t __riscv_vsll_vx_u64m2 (vuint64m2_t op1, size_t shift, size_t vl);
vuint64m4_t __riscv_vsll_vx_u64m4 (vuint64m4_t op1, size_t shift, size_t vl);
vuint64m8_t __riscv_vsll_vx_u64m8 (vuint64m8_t op1, size_t shift, size_t vl);
vint8mf8_t __riscv_vsll_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl);
vint8mf4_t __riscv_vsll_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl);
vint8mf2_t __riscv_vsll_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl);
vint8m1_t __riscv_vsll_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl);
vint8m2_t __riscv_vsll_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl);
vint8m4_t __riscv_vsll_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl);
vint8m8_t __riscv_vsll_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl);
vint16mf4_t __riscv_vsll_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl);
vint16mf2_t __riscv_vsll_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl);
vint16m1_t __riscv_vsll_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl);
vint16m2_t __riscv_vsll_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl);
vint16m4_t __riscv_vsll_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl);
vint16m8_t __riscv_vsll_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl);
vint32mf2_t __riscv_vsll_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl);
vint32m1_t __riscv_vsll_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl);
vint32m2_t __riscv_vsll_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl);
vint32m4_t __riscv_vsll_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl);
vint32m8_t __riscv_vsll_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl);
vint64m1_t __riscv_vsll_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl);
vint64m2_t __riscv_vsll_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl);
vint64m4_t __riscv_vsll_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl);
vint64m8_t __riscv_vsll_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl);
vuint8mf8_t __riscv_vsll_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl);
vuint8mf4_t __riscv_vsll_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl);
vuint8mf2_t __riscv_vsll_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl);
vuint8m1_t __riscv_vsll_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl);
vuint8m2_t __riscv_vsll_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl);
vuint8m4_t __riscv_vsll_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl);
vuint8m8_t __riscv_vsll_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl);
vuint16mf4_t __riscv_vsll_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl);
vuint16mf2_t __riscv_vsll_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl);
vuint16m1_t __riscv_vsll_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl);
vuint16m2_t __riscv_vsll_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl);
vuint16m4_t __riscv_vsll_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl);
vuint16m8_t __riscv_vsll_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl);
vuint32mf2_t __riscv_vsll_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl);
vuint32m1_t __riscv_vsll_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl);
vuint32m2_t __riscv_vsll_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl);
vuint32m4_t __riscv_vsll_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl);
vuint32m8_t __riscv_vsll_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl);
vuint64m1_t __riscv_vsll_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl);
vuint64m2_t __riscv_vsll_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl);
vuint64m4_t __riscv_vsll_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl);
vuint64m8_t __riscv_vsll_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl);
14.13. vsll.vi
- Mnemonic
vsll.vi vd, vs2, uimm, vm
- Encoding
- Description
-
Bit shift operations, vector-immediate
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsll_vi.h
- Intrinsic Functions
Details
14.14. vsrl.vv
- Mnemonic
vsrl.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Bit shift operations, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsrl_vv.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vsrl_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t shift, size_t vl);
vuint8mf4_t __riscv_vsrl_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t shift, size_t vl);
vuint8mf2_t __riscv_vsrl_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t shift, size_t vl);
vuint8m1_t __riscv_vsrl_vv_u8m1 (vuint8m1_t op1, vuint8m1_t shift, size_t vl);
vuint8m2_t __riscv_vsrl_vv_u8m2 (vuint8m2_t op1, vuint8m2_t shift, size_t vl);
vuint8m4_t __riscv_vsrl_vv_u8m4 (vuint8m4_t op1, vuint8m4_t shift, size_t vl);
vuint8m8_t __riscv_vsrl_vv_u8m8 (vuint8m8_t op1, vuint8m8_t shift, size_t vl);
vuint16mf4_t __riscv_vsrl_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t shift, size_t vl);
vuint16mf2_t __riscv_vsrl_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t shift, size_t vl);
vuint16m1_t __riscv_vsrl_vv_u16m1 (vuint16m1_t op1, vuint16m1_t shift, size_t vl);
vuint16m2_t __riscv_vsrl_vv_u16m2 (vuint16m2_t op1, vuint16m2_t shift, size_t vl);
vuint16m4_t __riscv_vsrl_vv_u16m4 (vuint16m4_t op1, vuint16m4_t shift, size_t vl);
vuint16m8_t __riscv_vsrl_vv_u16m8 (vuint16m8_t op1, vuint16m8_t shift, size_t vl);
vuint32mf2_t __riscv_vsrl_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t shift, size_t vl);
vuint32m1_t __riscv_vsrl_vv_u32m1 (vuint32m1_t op1, vuint32m1_t shift, size_t vl);
vuint32m2_t __riscv_vsrl_vv_u32m2 (vuint32m2_t op1, vuint32m2_t shift, size_t vl);
vuint32m4_t __riscv_vsrl_vv_u32m4 (vuint32m4_t op1, vuint32m4_t shift, size_t vl);
vuint32m8_t __riscv_vsrl_vv_u32m8 (vuint32m8_t op1, vuint32m8_t shift, size_t vl);
vuint64m1_t __riscv_vsrl_vv_u64m1 (vuint64m1_t op1, vuint64m1_t shift, size_t vl);
vuint64m2_t __riscv_vsrl_vv_u64m2 (vuint64m2_t op1, vuint64m2_t shift, size_t vl);
vuint64m4_t __riscv_vsrl_vv_u64m4 (vuint64m4_t op1, vuint64m4_t shift, size_t vl);
vuint64m8_t __riscv_vsrl_vv_u64m8 (vuint64m8_t op1, vuint64m8_t shift, size_t vl);
vuint8mf8_t __riscv_vsrl_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl);
vuint8mf4_t __riscv_vsrl_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl);
vuint8mf2_t __riscv_vsrl_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl);
vuint8m1_t __riscv_vsrl_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, size_t vl);
vuint8m2_t __riscv_vsrl_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, size_t vl);
vuint8m4_t __riscv_vsrl_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, size_t vl);
vuint8m8_t __riscv_vsrl_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, size_t vl);
vuint16mf4_t __riscv_vsrl_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl);
vuint16mf2_t __riscv_vsrl_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl);
vuint16m1_t __riscv_vsrl_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, size_t vl);
vuint16m2_t __riscv_vsrl_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, size_t vl);
vuint16m4_t __riscv_vsrl_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, size_t vl);
vuint16m8_t __riscv_vsrl_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, size_t vl);
vuint32mf2_t __riscv_vsrl_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl);
vuint32m1_t __riscv_vsrl_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, size_t vl);
vuint32m2_t __riscv_vsrl_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, size_t vl);
vuint32m4_t __riscv_vsrl_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, size_t vl);
vuint32m8_t __riscv_vsrl_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, size_t vl);
vuint64m1_t __riscv_vsrl_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, size_t vl);
vuint64m2_t __riscv_vsrl_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, size_t vl);
vuint64m4_t __riscv_vsrl_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, size_t vl);
vuint64m8_t __riscv_vsrl_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, size_t vl);
14.15. vsrl.vx
- Mnemonic
vsrl.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Bit shift operations, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsrl_vx.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vsrl_vx_u8mf8 (vuint8mf8_t op1, size_t shift, size_t vl);
vuint8mf4_t __riscv_vsrl_vx_u8mf4 (vuint8mf4_t op1, size_t shift, size_t vl);
vuint8mf2_t __riscv_vsrl_vx_u8mf2 (vuint8mf2_t op1, size_t shift, size_t vl);
vuint8m1_t __riscv_vsrl_vx_u8m1 (vuint8m1_t op1, size_t shift, size_t vl);
vuint8m2_t __riscv_vsrl_vx_u8m2 (vuint8m2_t op1, size_t shift, size_t vl);
vuint8m4_t __riscv_vsrl_vx_u8m4 (vuint8m4_t op1, size_t shift, size_t vl);
vuint8m8_t __riscv_vsrl_vx_u8m8 (vuint8m8_t op1, size_t shift, size_t vl);
vuint16mf4_t __riscv_vsrl_vx_u16mf4 (vuint16mf4_t op1, size_t shift, size_t vl);
vuint16mf2_t __riscv_vsrl_vx_u16mf2 (vuint16mf2_t op1, size_t shift, size_t vl);
vuint16m1_t __riscv_vsrl_vx_u16m1 (vuint16m1_t op1, size_t shift, size_t vl);
vuint16m2_t __riscv_vsrl_vx_u16m2 (vuint16m2_t op1, size_t shift, size_t vl);
vuint16m4_t __riscv_vsrl_vx_u16m4 (vuint16m4_t op1, size_t shift, size_t vl);
vuint16m8_t __riscv_vsrl_vx_u16m8 (vuint16m8_t op1, size_t shift, size_t vl);
vuint32mf2_t __riscv_vsrl_vx_u32mf2 (vuint32mf2_t op1, size_t shift, size_t vl);
vuint32m1_t __riscv_vsrl_vx_u32m1 (vuint32m1_t op1, size_t shift, size_t vl);
vuint32m2_t __riscv_vsrl_vx_u32m2 (vuint32m2_t op1, size_t shift, size_t vl);
vuint32m4_t __riscv_vsrl_vx_u32m4 (vuint32m4_t op1, size_t shift, size_t vl);
vuint32m8_t __riscv_vsrl_vx_u32m8 (vuint32m8_t op1, size_t shift, size_t vl);
vuint64m1_t __riscv_vsrl_vx_u64m1 (vuint64m1_t op1, size_t shift, size_t vl);
vuint64m2_t __riscv_vsrl_vx_u64m2 (vuint64m2_t op1, size_t shift, size_t vl);
vuint64m4_t __riscv_vsrl_vx_u64m4 (vuint64m4_t op1, size_t shift, size_t vl);
vuint64m8_t __riscv_vsrl_vx_u64m8 (vuint64m8_t op1, size_t shift, size_t vl);
vuint8mf8_t __riscv_vsrl_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, size_t shift, size_t vl);
vuint8mf4_t __riscv_vsrl_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, size_t shift, size_t vl);
vuint8mf2_t __riscv_vsrl_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, size_t shift, size_t vl);
vuint8m1_t __riscv_vsrl_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, size_t shift, size_t vl);
vuint8m2_t __riscv_vsrl_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, size_t shift, size_t vl);
vuint8m4_t __riscv_vsrl_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, size_t shift, size_t vl);
vuint8m8_t __riscv_vsrl_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, size_t shift, size_t vl);
vuint16mf4_t __riscv_vsrl_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl);
vuint16mf2_t __riscv_vsrl_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl);
vuint16m1_t __riscv_vsrl_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl);
vuint16m2_t __riscv_vsrl_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl);
vuint16m4_t __riscv_vsrl_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl);
vuint16m8_t __riscv_vsrl_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl);
vuint32mf2_t __riscv_vsrl_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl);
vuint32m1_t __riscv_vsrl_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl);
vuint32m2_t __riscv_vsrl_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl);
vuint32m4_t __riscv_vsrl_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl);
vuint32m8_t __riscv_vsrl_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl);
vuint64m1_t __riscv_vsrl_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl);
vuint64m2_t __riscv_vsrl_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl);
vuint64m4_t __riscv_vsrl_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl);
vuint64m8_t __riscv_vsrl_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl);
14.16. vsrl.vi
- Mnemonic
vsrl.vi vd, vs2, uimm, vm
- Encoding
- Description
-
Bit shift operations, vector-immediate
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsrl_vi.h
- Intrinsic Functions
Details
14.17. vsra.vv
- Mnemonic
vsra.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Bit shift operations, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsra_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vsra_vv_i8mf8 (vint8mf8_t op1, vuint8mf8_t shift, size_t vl);
vint8mf4_t __riscv_vsra_vv_i8mf4 (vint8mf4_t op1, vuint8mf4_t shift, size_t vl);
vint8mf2_t __riscv_vsra_vv_i8mf2 (vint8mf2_t op1, vuint8mf2_t shift, size_t vl);
vint8m1_t __riscv_vsra_vv_i8m1 (vint8m1_t op1, vuint8m1_t shift, size_t vl);
vint8m2_t __riscv_vsra_vv_i8m2 (vint8m2_t op1, vuint8m2_t shift, size_t vl);
vint8m4_t __riscv_vsra_vv_i8m4 (vint8m4_t op1, vuint8m4_t shift, size_t vl);
vint8m8_t __riscv_vsra_vv_i8m8 (vint8m8_t op1, vuint8m8_t shift, size_t vl);
vint16mf4_t __riscv_vsra_vv_i16mf4 (vint16mf4_t op1, vuint16mf4_t shift, size_t vl);
vint16mf2_t __riscv_vsra_vv_i16mf2 (vint16mf2_t op1, vuint16mf2_t shift, size_t vl);
vint16m1_t __riscv_vsra_vv_i16m1 (vint16m1_t op1, vuint16m1_t shift, size_t vl);
vint16m2_t __riscv_vsra_vv_i16m2 (vint16m2_t op1, vuint16m2_t shift, size_t vl);
vint16m4_t __riscv_vsra_vv_i16m4 (vint16m4_t op1, vuint16m4_t shift, size_t vl);
vint16m8_t __riscv_vsra_vv_i16m8 (vint16m8_t op1, vuint16m8_t shift, size_t vl);
vint32mf2_t __riscv_vsra_vv_i32mf2 (vint32mf2_t op1, vuint32mf2_t shift, size_t vl);
vint32m1_t __riscv_vsra_vv_i32m1 (vint32m1_t op1, vuint32m1_t shift, size_t vl);
vint32m2_t __riscv_vsra_vv_i32m2 (vint32m2_t op1, vuint32m2_t shift, size_t vl);
vint32m4_t __riscv_vsra_vv_i32m4 (vint32m4_t op1, vuint32m4_t shift, size_t vl);
vint32m8_t __riscv_vsra_vv_i32m8 (vint32m8_t op1, vuint32m8_t shift, size_t vl);
vint64m1_t __riscv_vsra_vv_i64m1 (vint64m1_t op1, vuint64m1_t shift, size_t vl);
vint64m2_t __riscv_vsra_vv_i64m2 (vint64m2_t op1, vuint64m2_t shift, size_t vl);
vint64m4_t __riscv_vsra_vv_i64m4 (vint64m4_t op1, vuint64m4_t shift, size_t vl);
vint64m8_t __riscv_vsra_vv_i64m8 (vint64m8_t op1, vuint64m8_t shift, size_t vl);
vint8mf8_t __riscv_vsra_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, size_t vl);
vint8mf4_t __riscv_vsra_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, size_t vl);
vint8mf2_t __riscv_vsra_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, size_t vl);
vint8m1_t __riscv_vsra_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, size_t vl);
vint8m2_t __riscv_vsra_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, size_t vl);
vint8m4_t __riscv_vsra_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, size_t vl);
vint8m8_t __riscv_vsra_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, size_t vl);
vint16mf4_t __riscv_vsra_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, size_t vl);
vint16mf2_t __riscv_vsra_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, size_t vl);
vint16m1_t __riscv_vsra_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, size_t vl);
vint16m2_t __riscv_vsra_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, size_t vl);
vint16m4_t __riscv_vsra_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, size_t vl);
vint16m8_t __riscv_vsra_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, size_t vl);
vint32mf2_t __riscv_vsra_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, size_t vl);
vint32m1_t __riscv_vsra_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, size_t vl);
vint32m2_t __riscv_vsra_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, size_t vl);
vint32m4_t __riscv_vsra_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, size_t vl);
vint32m8_t __riscv_vsra_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, size_t vl);
vint64m1_t __riscv_vsra_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, size_t vl);
vint64m2_t __riscv_vsra_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, size_t vl);
vint64m4_t __riscv_vsra_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, size_t vl);
vint64m8_t __riscv_vsra_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, size_t vl);
14.18. vsra.vx
- Mnemonic
vsra.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Bit shift operations, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsra_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vsra_vx_i8mf8 (vint8mf8_t op1, size_t shift, size_t vl);
vint8mf4_t __riscv_vsra_vx_i8mf4 (vint8mf4_t op1, size_t shift, size_t vl);
vint8mf2_t __riscv_vsra_vx_i8mf2 (vint8mf2_t op1, size_t shift, size_t vl);
vint8m1_t __riscv_vsra_vx_i8m1 (vint8m1_t op1, size_t shift, size_t vl);
vint8m2_t __riscv_vsra_vx_i8m2 (vint8m2_t op1, size_t shift, size_t vl);
vint8m4_t __riscv_vsra_vx_i8m4 (vint8m4_t op1, size_t shift, size_t vl);
vint8m8_t __riscv_vsra_vx_i8m8 (vint8m8_t op1, size_t shift, size_t vl);
vint16mf4_t __riscv_vsra_vx_i16mf4 (vint16mf4_t op1, size_t shift, size_t vl);
vint16mf2_t __riscv_vsra_vx_i16mf2 (vint16mf2_t op1, size_t shift, size_t vl);
vint16m1_t __riscv_vsra_vx_i16m1 (vint16m1_t op1, size_t shift, size_t vl);
vint16m2_t __riscv_vsra_vx_i16m2 (vint16m2_t op1, size_t shift, size_t vl);
vint16m4_t __riscv_vsra_vx_i16m4 (vint16m4_t op1, size_t shift, size_t vl);
vint16m8_t __riscv_vsra_vx_i16m8 (vint16m8_t op1, size_t shift, size_t vl);
vint32mf2_t __riscv_vsra_vx_i32mf2 (vint32mf2_t op1, size_t shift, size_t vl);
vint32m1_t __riscv_vsra_vx_i32m1 (vint32m1_t op1, size_t shift, size_t vl);
vint32m2_t __riscv_vsra_vx_i32m2 (vint32m2_t op1, size_t shift, size_t vl);
vint32m4_t __riscv_vsra_vx_i32m4 (vint32m4_t op1, size_t shift, size_t vl);
vint32m8_t __riscv_vsra_vx_i32m8 (vint32m8_t op1, size_t shift, size_t vl);
vint64m1_t __riscv_vsra_vx_i64m1 (vint64m1_t op1, size_t shift, size_t vl);
vint64m2_t __riscv_vsra_vx_i64m2 (vint64m2_t op1, size_t shift, size_t vl);
vint64m4_t __riscv_vsra_vx_i64m4 (vint64m4_t op1, size_t shift, size_t vl);
vint64m8_t __riscv_vsra_vx_i64m8 (vint64m8_t op1, size_t shift, size_t vl);
vint8mf8_t __riscv_vsra_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, size_t shift, size_t vl);
vint8mf4_t __riscv_vsra_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, size_t shift, size_t vl);
vint8mf2_t __riscv_vsra_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, size_t shift, size_t vl);
vint8m1_t __riscv_vsra_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, size_t shift, size_t vl);
vint8m2_t __riscv_vsra_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, size_t shift, size_t vl);
vint8m4_t __riscv_vsra_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, size_t shift, size_t vl);
vint8m8_t __riscv_vsra_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, size_t shift, size_t vl);
vint16mf4_t __riscv_vsra_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl);
vint16mf2_t __riscv_vsra_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl);
vint16m1_t __riscv_vsra_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl);
vint16m2_t __riscv_vsra_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl);
vint16m4_t __riscv_vsra_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl);
vint16m8_t __riscv_vsra_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl);
vint32mf2_t __riscv_vsra_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl);
vint32m1_t __riscv_vsra_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl);
vint32m2_t __riscv_vsra_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl);
vint32m4_t __riscv_vsra_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl);
vint32m8_t __riscv_vsra_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl);
vint64m1_t __riscv_vsra_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl);
vint64m2_t __riscv_vsra_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl);
vint64m4_t __riscv_vsra_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl);
vint64m8_t __riscv_vsra_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl);
14.19. vsra.vi
- Mnemonic
vsra.vi vd, vs2, uimm, vm
- Encoding
- Description
-
Bit shift operations, vector-immediate
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vsra_vi.h
- Intrinsic Functions
Details
14.20. Vector Narrowing Integer Right Shift Instructions
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/Vector.h
- Intrinsic Functions
Details
== Vector Loads and Stores Intrinsics
=== Vector Unit-Stride Load Intrinsics
=== Vector Unit-Stride Store Intrinsics
=== Vector Mask Load/Store Intrinsics
=== Vector Strided Load Intrinsics
=== Vector Strided Store Intrinsics
=== Vector Indexed Load Intrinsics
=== Vector Indexed Store Intrinsics
== Vector Loads and Stores Segment Instructions
=== Vector Unit-Stride Segment Load Intrinsics
=== Vector Unit-Stride Segment Store Intrinsics
=== Vector Strided Segment Load Intrinsics
=== Vector Strided Segment Store Intrinsics
=== Vector Indexed Segment Load Intrinsics
=== Vector Indexed Segment Store Intrinsics
== Vector Integer Arithmetic Instructions
=== Vector Single-Width Integer Add and Subtract Intrinsics
=== Vector Widening Integer Add/Subtract Intrinsics
=== Vector Integer Widening Intrinsics
=== Vector Integer Extension Intrinsics
=== Vector Integer Add-with-Carry / Subtract-with-Borrow Intrinsics
=== Vector Bitwise Binary Logical Intrinsics
=== Vector Bitwise Unary Logical Intrinsics
=== Vector Single-Width Bit Shift Intrinsics
=== Vector Narrowing Integer Right Shift Intrinsics
=== Vector Integer Narrowing Intrinsics
=== Vector Integer Compare Intrinsics
=== Vector Integer Min/Max Intrinsics
=== Vector Single-Width Integer Multiply Intrinsics
=== Vector Integer Divide Intrinsics
=== Vector Widening Integer Multiply Intrinsics
=== Vector Single-Width Integer Multiply-Add Intrinsics
=== Vector Widening Integer Multiply-Add Intrinsics
=== Vector Integer Merge Intrinsics
=== Vector Integer Move Intrinsics
== Vector Fixed-Point Arithmetic Instructions
=== Vector Single-Width Saturating Add and Subtract Intrinsics
=== Vector Single-Width Averaging Add and Subtract Intrinsics
=== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics
=== Vector Single-Width Scaling Shift Intrinsics
=== Vector Narrowing Fixed-Point Clip Intrinsics
== Vector Floating-Point Instructions
=== Vector Single-Width Floating-Point Add/Subtract Intrinsics
=== Vector Widening Floating-Point Add/Subtract Intrinsics
=== Vector Single-Width Floating-Point Multiply/Divide Intrinsics
=== Vector Widening Floating-Point Multiply Intrinsics
=== Vector Single-Width Floating-Point Fused Multiply-Add Intrinsics
=== Vector Widening Floating-Point Fused Multiply-Add Intrinsics
=== Vector Floating-Point Square-Root Intrinsics
=== Vector Floating-Point Reciprocal Square-Root Estimate Intrinsics
=== Vector Floating-Point Reciprocal Estimate Intrinsics
=== Vector Floating-Point MIN/MAX Intrinsics
=== Vector Floating-Point Sign-Injection Intrinsics
=== Vector Floating-Point Absolute Value Intrinsics
=== Vector Floating-Point Compare Intrinsics
=== Vector Floating-Point Classify Intrinsics
=== Vector Floating-Point Merge Intrinsics
=== Vector Floating-Point Move Intrinsics
== Vector Reduction Operations
=== Vector Single-Width Integer Reduction Intrinsics
=== Vector Widening Integer Reduction Intrinsics
=== Vector Single-Width Floating-Point Reduction Intrinsics
=== Vector Widening Floating-Point Reduction Intrinsics
== Vector Mask Instructions
=== Vector Mask-Register Logical
=== Vector count population in mask `vcpop.m`
=== Vector Iota Intrinsics
=== Vector Element Index Intrinsics
== Vector Permutation Instructions
=== Vector Slideup Intrinsics
=== Vector Slidedown Intrinsics
=== Vector Slide1up and Slide1down Intrinsics
=== Vector Register Gather Intrinsics
=== Vector Compress Intrinsics
== Miscellaneous Vector Utility Intrinsics
=== Vector LMUL Extension Intrinsics
=== Vector LMUL Truncation Intrinsics
=== Vector Initialization Intrinsics
=== Vector Insertion Intrinsics
=== Vector Extraction Intrinsics
14.21. vnsrl.wv
- Mnemonic
vnsrl.wv vd, vs2, vs1, vm
- Encoding
- Description
-
Narrowing shift right logical, SEW = (2*SEW) >> SEW, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vnsrl_wv.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vnsrl_wv_u8mf8 (vuint16mf4_t op1, vuint8mf8_t shift, size_t vl);
vuint8mf4_t __riscv_vnsrl_wv_u8mf4 (vuint16mf2_t op1, vuint8mf4_t shift, size_t vl);
vuint8mf2_t __riscv_vnsrl_wv_u8mf2 (vuint16m1_t op1, vuint8mf2_t shift, size_t vl);
vuint8m1_t __riscv_vnsrl_wv_u8m1 (vuint16m2_t op1, vuint8m1_t shift, size_t vl);
vuint8m2_t __riscv_vnsrl_wv_u8m2 (vuint16m4_t op1, vuint8m2_t shift, size_t vl);
vuint8m4_t __riscv_vnsrl_wv_u8m4 (vuint16m8_t op1, vuint8m4_t shift, size_t vl);
vuint16mf4_t __riscv_vnsrl_wv_u16mf4 (vuint32mf2_t op1, vuint16mf4_t shift, size_t vl);
vuint16mf2_t __riscv_vnsrl_wv_u16mf2 (vuint32m1_t op1, vuint16mf2_t shift, size_t vl);
vuint16m1_t __riscv_vnsrl_wv_u16m1 (vuint32m2_t op1, vuint16m1_t shift, size_t vl);
vuint16m2_t __riscv_vnsrl_wv_u16m2 (vuint32m4_t op1, vuint16m2_t shift, size_t vl);
vuint16m4_t __riscv_vnsrl_wv_u16m4 (vuint32m8_t op1, vuint16m4_t shift, size_t vl);
vuint32mf2_t __riscv_vnsrl_wv_u32mf2 (vuint64m1_t op1, vuint32mf2_t shift, size_t vl);
vuint32m1_t __riscv_vnsrl_wv_u32m1 (vuint64m2_t op1, vuint32m1_t shift, size_t vl);
vuint32m2_t __riscv_vnsrl_wv_u32m2 (vuint64m4_t op1, vuint32m2_t shift, size_t vl);
vuint32m4_t __riscv_vnsrl_wv_u32m4 (vuint64m8_t op1, vuint32m4_t shift, size_t vl);
vuint8mf8_t __riscv_vnsrl_wv_u8mf8_m (vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl);
vuint8mf4_t __riscv_vnsrl_wv_u8mf4_m (vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl);
vuint8mf2_t __riscv_vnsrl_wv_u8mf2_m (vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, size_t vl);
vuint8m1_t __riscv_vnsrl_wv_u8m1_m (vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, size_t vl);
vuint8m2_t __riscv_vnsrl_wv_u8m2_m (vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, size_t vl);
vuint8m4_t __riscv_vnsrl_wv_u8m4_m (vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, size_t vl);
vuint16mf4_t __riscv_vnsrl_wv_u16mf4_m (vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl);
vuint16mf2_t __riscv_vnsrl_wv_u16mf2_m (vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, size_t vl);
vuint16m1_t __riscv_vnsrl_wv_u16m1_m (vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, size_t vl);
vuint16m2_t __riscv_vnsrl_wv_u16m2_m (vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, size_t vl);
vuint16m4_t __riscv_vnsrl_wv_u16m4_m (vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, size_t vl);
vuint32mf2_t __riscv_vnsrl_wv_u32mf2_m (vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, size_t vl);
vuint32m1_t __riscv_vnsrl_wv_u32m1_m (vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, size_t vl);
vuint32m2_t __riscv_vnsrl_wv_u32m2_m (vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, size_t vl);
vuint32m4_t __riscv_vnsrl_wv_u32m4_m (vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, size_t vl);
14.22. vnsrl.wx
- Mnemonic
vnsrl.wx vd, vs2, rs1, vm
- Encoding
- Description
-
Narrowing shift right logical, SEW = (2*SEW) >> SEW, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vnsrl_wx.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vnsrl_wx_u8mf8 (vuint16mf4_t op1, size_t shift, size_t vl);
vuint8mf4_t __riscv_vnsrl_wx_u8mf4 (vuint16mf2_t op1, size_t shift, size_t vl);
vuint8mf2_t __riscv_vnsrl_wx_u8mf2 (vuint16m1_t op1, size_t shift, size_t vl);
vuint8m1_t __riscv_vnsrl_wx_u8m1 (vuint16m2_t op1, size_t shift, size_t vl);
vuint8m2_t __riscv_vnsrl_wx_u8m2 (vuint16m4_t op1, size_t shift, size_t vl);
vuint8m4_t __riscv_vnsrl_wx_u8m4 (vuint16m8_t op1, size_t shift, size_t vl);
vuint16mf4_t __riscv_vnsrl_wx_u16mf4 (vuint32mf2_t op1, size_t shift, size_t vl);
vuint16mf2_t __riscv_vnsrl_wx_u16mf2 (vuint32m1_t op1, size_t shift, size_t vl);
vuint16m1_t __riscv_vnsrl_wx_u16m1 (vuint32m2_t op1, size_t shift, size_t vl);
vuint16m2_t __riscv_vnsrl_wx_u16m2 (vuint32m4_t op1, size_t shift, size_t vl);
vuint16m4_t __riscv_vnsrl_wx_u16m4 (vuint32m8_t op1, size_t shift, size_t vl);
vuint32mf2_t __riscv_vnsrl_wx_u32mf2 (vuint64m1_t op1, size_t shift, size_t vl);
vuint32m1_t __riscv_vnsrl_wx_u32m1 (vuint64m2_t op1, size_t shift, size_t vl);
vuint32m2_t __riscv_vnsrl_wx_u32m2 (vuint64m4_t op1, size_t shift, size_t vl);
vuint32m4_t __riscv_vnsrl_wx_u32m4 (vuint64m8_t op1, size_t shift, size_t vl);
vuint8mf8_t __riscv_vnsrl_wx_u8mf8_m (vbool64_t mask, vuint16mf4_t op1, size_t shift, size_t vl);
vuint8mf4_t __riscv_vnsrl_wx_u8mf4_m (vbool32_t mask, vuint16mf2_t op1, size_t shift, size_t vl);
vuint8mf2_t __riscv_vnsrl_wx_u8mf2_m (vbool16_t mask, vuint16m1_t op1, size_t shift, size_t vl);
vuint8m1_t __riscv_vnsrl_wx_u8m1_m (vbool8_t mask, vuint16m2_t op1, size_t shift, size_t vl);
vuint8m2_t __riscv_vnsrl_wx_u8m2_m (vbool4_t mask, vuint16m4_t op1, size_t shift, size_t vl);
vuint8m4_t __riscv_vnsrl_wx_u8m4_m (vbool2_t mask, vuint16m8_t op1, size_t shift, size_t vl);
vuint16mf4_t __riscv_vnsrl_wx_u16mf4_m (vbool64_t mask, vuint32mf2_t op1, size_t shift, size_t vl);
vuint16mf2_t __riscv_vnsrl_wx_u16mf2_m (vbool32_t mask, vuint32m1_t op1, size_t shift, size_t vl);
vuint16m1_t __riscv_vnsrl_wx_u16m1_m (vbool16_t mask, vuint32m2_t op1, size_t shift, size_t vl);
vuint16m2_t __riscv_vnsrl_wx_u16m2_m (vbool8_t mask, vuint32m4_t op1, size_t shift, size_t vl);
vuint16m4_t __riscv_vnsrl_wx_u16m4_m (vbool4_t mask, vuint32m8_t op1, size_t shift, size_t vl);
vuint32mf2_t __riscv_vnsrl_wx_u32mf2_m (vbool64_t mask, vuint64m1_t op1, size_t shift, size_t vl);
vuint32m1_t __riscv_vnsrl_wx_u32m1_m (vbool32_t mask, vuint64m2_t op1, size_t shift, size_t vl);
vuint32m2_t __riscv_vnsrl_wx_u32m2_m (vbool16_t mask, vuint64m4_t op1, size_t shift, size_t vl);
vuint32m4_t __riscv_vnsrl_wx_u32m4_m (vbool8_t mask, vuint64m8_t op1, size_t shift, size_t vl);
14.23. vnsrl.wi
- Mnemonic
vnsrl.wi vd, vs2, uimm, vm
- Encoding
- Description
-
Narrowing shift right logical, SEW = (2*SEW) >> SEW, vector-immediate
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vnsrl_wi.h
- Intrinsic Functions
Details
14.24. vnsra.wv
- Mnemonic
vnsra.wv vd, vs2, vs1, vm
- Encoding
- Description
-
Narrowing shift right arithmetic, SEW = (2*SEW) >> SEW, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vnsra_wv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vnsra_wv_i8mf8 (vint16mf4_t op1, vuint8mf8_t shift, size_t vl);
vint8mf4_t __riscv_vnsra_wv_i8mf4 (vint16mf2_t op1, vuint8mf4_t shift, size_t vl);
vint8mf2_t __riscv_vnsra_wv_i8mf2 (vint16m1_t op1, vuint8mf2_t shift, size_t vl);
vint8m1_t __riscv_vnsra_wv_i8m1 (vint16m2_t op1, vuint8m1_t shift, size_t vl);
vint8m2_t __riscv_vnsra_wv_i8m2 (vint16m4_t op1, vuint8m2_t shift, size_t vl);
vint8m4_t __riscv_vnsra_wv_i8m4 (vint16m8_t op1, vuint8m4_t shift, size_t vl);
vint16mf4_t __riscv_vnsra_wv_i16mf4 (vint32mf2_t op1, vuint16mf4_t shift, size_t vl);
vint16mf2_t __riscv_vnsra_wv_i16mf2 (vint32m1_t op1, vuint16mf2_t shift, size_t vl);
vint16m1_t __riscv_vnsra_wv_i16m1 (vint32m2_t op1, vuint16m1_t shift, size_t vl);
vint16m2_t __riscv_vnsra_wv_i16m2 (vint32m4_t op1, vuint16m2_t shift, size_t vl);
vint16m4_t __riscv_vnsra_wv_i16m4 (vint32m8_t op1, vuint16m4_t shift, size_t vl);
vint32mf2_t __riscv_vnsra_wv_i32mf2 (vint64m1_t op1, vuint32mf2_t shift, size_t vl);
vint32m1_t __riscv_vnsra_wv_i32m1 (vint64m2_t op1, vuint32m1_t shift, size_t vl);
vint32m2_t __riscv_vnsra_wv_i32m2 (vint64m4_t op1, vuint32m2_t shift, size_t vl);
vint32m4_t __riscv_vnsra_wv_i32m4 (vint64m8_t op1, vuint32m4_t shift, size_t vl);
vint8mf8_t __riscv_vnsra_wv_i8mf8_m (vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, size_t vl);
vint8mf4_t __riscv_vnsra_wv_i8mf4_m (vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, size_t vl);
vint8mf2_t __riscv_vnsra_wv_i8mf2_m (vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, size_t vl);
vint8m1_t __riscv_vnsra_wv_i8m1_m (vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, size_t vl);
vint8m2_t __riscv_vnsra_wv_i8m2_m (vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, size_t vl);
vint8m4_t __riscv_vnsra_wv_i8m4_m (vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, size_t vl);
vint16mf4_t __riscv_vnsra_wv_i16mf4_m (vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, size_t vl);
vint16mf2_t __riscv_vnsra_wv_i16mf2_m (vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, size_t vl);
vint16m1_t __riscv_vnsra_wv_i16m1_m (vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, size_t vl);
vint16m2_t __riscv_vnsra_wv_i16m2_m (vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, size_t vl);
vint16m4_t __riscv_vnsra_wv_i16m4_m (vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, size_t vl);
vint32mf2_t __riscv_vnsra_wv_i32mf2_m (vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, size_t vl);
vint32m1_t __riscv_vnsra_wv_i32m1_m (vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, size_t vl);
vint32m2_t __riscv_vnsra_wv_i32m2_m (vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, size_t vl);
vint32m4_t __riscv_vnsra_wv_i32m4_m (vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, size_t vl);
14.25. vnsra.wx
- Mnemonic
vnsra.wx vd, vs2, rs1, vm
- Encoding
- Description
-
Narrowing shift right arithmetic, SEW = (2*SEW) >> SEW, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vnsra_wx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vnsra_wx_i8mf8 (vint16mf4_t op1, size_t shift, size_t vl);
vint8mf4_t __riscv_vnsra_wx_i8mf4 (vint16mf2_t op1, size_t shift, size_t vl);
vint8mf2_t __riscv_vnsra_wx_i8mf2 (vint16m1_t op1, size_t shift, size_t vl);
vint8m1_t __riscv_vnsra_wx_i8m1 (vint16m2_t op1, size_t shift, size_t vl);
vint8m2_t __riscv_vnsra_wx_i8m2 (vint16m4_t op1, size_t shift, size_t vl);
vint8m4_t __riscv_vnsra_wx_i8m4 (vint16m8_t op1, size_t shift, size_t vl);
vint16mf4_t __riscv_vnsra_wx_i16mf4 (vint32mf2_t op1, size_t shift, size_t vl);
vint16mf2_t __riscv_vnsra_wx_i16mf2 (vint32m1_t op1, size_t shift, size_t vl);
vint16m1_t __riscv_vnsra_wx_i16m1 (vint32m2_t op1, size_t shift, size_t vl);
vint16m2_t __riscv_vnsra_wx_i16m2 (vint32m4_t op1, size_t shift, size_t vl);
vint16m4_t __riscv_vnsra_wx_i16m4 (vint32m8_t op1, size_t shift, size_t vl);
vint32mf2_t __riscv_vnsra_wx_i32mf2 (vint64m1_t op1, size_t shift, size_t vl);
vint32m1_t __riscv_vnsra_wx_i32m1 (vint64m2_t op1, size_t shift, size_t vl);
vint32m2_t __riscv_vnsra_wx_i32m2 (vint64m4_t op1, size_t shift, size_t vl);
vint32m4_t __riscv_vnsra_wx_i32m4 (vint64m8_t op1, size_t shift, size_t vl);
vint8mf8_t __riscv_vnsra_wx_i8mf8_m (vbool64_t mask, vint16mf4_t op1, size_t shift, size_t vl);
vint8mf4_t __riscv_vnsra_wx_i8mf4_m (vbool32_t mask, vint16mf2_t op1, size_t shift, size_t vl);
vint8mf2_t __riscv_vnsra_wx_i8mf2_m (vbool16_t mask, vint16m1_t op1, size_t shift, size_t vl);
vint8m1_t __riscv_vnsra_wx_i8m1_m (vbool8_t mask, vint16m2_t op1, size_t shift, size_t vl);
vint8m2_t __riscv_vnsra_wx_i8m2_m (vbool4_t mask, vint16m4_t op1, size_t shift, size_t vl);
vint8m4_t __riscv_vnsra_wx_i8m4_m (vbool2_t mask, vint16m8_t op1, size_t shift, size_t vl);
vint16mf4_t __riscv_vnsra_wx_i16mf4_m (vbool64_t mask, vint32mf2_t op1, size_t shift, size_t vl);
vint16mf2_t __riscv_vnsra_wx_i16mf2_m (vbool32_t mask, vint32m1_t op1, size_t shift, size_t vl);
vint16m1_t __riscv_vnsra_wx_i16m1_m (vbool16_t mask, vint32m2_t op1, size_t shift, size_t vl);
vint16m2_t __riscv_vnsra_wx_i16m2_m (vbool8_t mask, vint32m4_t op1, size_t shift, size_t vl);
vint16m4_t __riscv_vnsra_wx_i16m4_m (vbool4_t mask, vint32m8_t op1, size_t shift, size_t vl);
vint32mf2_t __riscv_vnsra_wx_i32mf2_m (vbool64_t mask, vint64m1_t op1, size_t shift, size_t vl);
vint32m1_t __riscv_vnsra_wx_i32m1_m (vbool32_t mask, vint64m2_t op1, size_t shift, size_t vl);
vint32m2_t __riscv_vnsra_wx_i32m2_m (vbool16_t mask, vint64m4_t op1, size_t shift, size_t vl);
vint32m4_t __riscv_vnsra_wx_i32m4_m (vbool8_t mask, vint64m8_t op1, size_t shift, size_t vl);
14.26. vnsra.wi
- Mnemonic
vnsra.wi vd, vs2, uimm, vm
- Encoding
- Description
-
Narrowing shift right arithmetic, SEW = (2*SEW) >> SEW, vector-immediate
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vnsra_wi.h
- Intrinsic Functions
Details
14.27. Vector Integer Compare Instructions
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/Vector.h
- Intrinsic Functions
Details
== Vector Loads and Stores Intrinsics
=== Vector Unit-Stride Load Intrinsics
=== Vector Unit-Stride Store Intrinsics
=== Vector Mask Load/Store Intrinsics
=== Vector Strided Load Intrinsics
=== Vector Strided Store Intrinsics
=== Vector Indexed Load Intrinsics
=== Vector Indexed Store Intrinsics
== Vector Loads and Stores Segment Instructions
=== Vector Unit-Stride Segment Load Intrinsics
=== Vector Unit-Stride Segment Store Intrinsics
=== Vector Strided Segment Load Intrinsics
=== Vector Strided Segment Store Intrinsics
=== Vector Indexed Segment Load Intrinsics
=== Vector Indexed Segment Store Intrinsics
== Vector Integer Arithmetic Instructions
=== Vector Single-Width Integer Add and Subtract Intrinsics
=== Vector Widening Integer Add/Subtract Intrinsics
=== Vector Integer Widening Intrinsics
=== Vector Integer Extension Intrinsics
=== Vector Integer Add-with-Carry / Subtract-with-Borrow Intrinsics
=== Vector Bitwise Binary Logical Intrinsics
=== Vector Bitwise Unary Logical Intrinsics
=== Vector Single-Width Bit Shift Intrinsics
=== Vector Narrowing Integer Right Shift Intrinsics
=== Vector Integer Narrowing Intrinsics
=== Vector Integer Compare Intrinsics
=== Vector Integer Min/Max Intrinsics
=== Vector Single-Width Integer Multiply Intrinsics
=== Vector Integer Divide Intrinsics
=== Vector Widening Integer Multiply Intrinsics
=== Vector Single-Width Integer Multiply-Add Intrinsics
=== Vector Widening Integer Multiply-Add Intrinsics
=== Vector Integer Merge Intrinsics
=== Vector Integer Move Intrinsics
== Vector Fixed-Point Arithmetic Instructions
=== Vector Single-Width Saturating Add and Subtract Intrinsics
=== Vector Single-Width Averaging Add and Subtract Intrinsics
=== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics
=== Vector Single-Width Scaling Shift Intrinsics
=== Vector Narrowing Fixed-Point Clip Intrinsics
== Vector Floating-Point Instructions
=== Vector Single-Width Floating-Point Add/Subtract Intrinsics
=== Vector Widening Floating-Point Add/Subtract Intrinsics
=== Vector Single-Width Floating-Point Multiply/Divide Intrinsics
=== Vector Widening Floating-Point Multiply Intrinsics
=== Vector Single-Width Floating-Point Fused Multiply-Add Intrinsics
=== Vector Widening Floating-Point Fused Multiply-Add Intrinsics
=== Vector Floating-Point Square-Root Intrinsics
=== Vector Floating-Point Reciprocal Square-Root Estimate Intrinsics
=== Vector Floating-Point Reciprocal Estimate Intrinsics
=== Vector Floating-Point MIN/MAX Intrinsics
=== Vector Floating-Point Sign-Injection Intrinsics
=== Vector Floating-Point Absolute Value Intrinsics
=== Vector Floating-Point Compare Intrinsics
=== Vector Floating-Point Classify Intrinsics
=== Vector Floating-Point Merge Intrinsics
=== Vector Floating-Point Move Intrinsics
== Vector Reduction Operations
=== Vector Single-Width Integer Reduction Intrinsics
=== Vector Widening Integer Reduction Intrinsics
=== Vector Single-Width Floating-Point Reduction Intrinsics
=== Vector Widening Floating-Point Reduction Intrinsics
== Vector Mask Instructions
=== Vector Mask-Register Logical
=== Vector count population in mask `vcpop.m`
=== Vector Iota Intrinsics
=== Vector Element Index Intrinsics
== Vector Permutation Instructions
=== Vector Slideup Intrinsics
=== Vector Slidedown Intrinsics
=== Vector Slide1up and Slide1down Intrinsics
=== Vector Register Gather Intrinsics
=== Vector Compress Intrinsics
== Miscellaneous Vector Utility Intrinsics
=== Vector LMUL Extension Intrinsics
=== Vector LMUL Truncation Intrinsics
=== Vector Initialization Intrinsics
=== Vector Insertion Intrinsics
=== Vector Extraction Intrinsics
14.28. vmseq.vv
- Mnemonic
vmseq.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Set if equal, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmseq_vv.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmseq_vv_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vbool4_t __riscv_vmseq_vv_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vbool2_t __riscv_vmseq_vv_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vbool1_t __riscv_vmseq_vv_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vv_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vbool4_t __riscv_vmseq_vv_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vbool2_t __riscv_vmseq_vv_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vv_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vbool4_t __riscv_vmseq_vv_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vv_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vv_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vbool4_t __riscv_vmseq_vv_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vbool2_t __riscv_vmseq_vv_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vbool1_t __riscv_vmseq_vv_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vv_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vbool4_t __riscv_vmseq_vv_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vbool2_t __riscv_vmseq_vv_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vv_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vbool4_t __riscv_vmseq_vv_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vv_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vv_i8mf8_b64_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_i8mf4_b32_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_i8mf2_b16_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_i8m1_b8_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vbool4_t __riscv_vmseq_vv_i8m2_b4_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vbool2_t __riscv_vmseq_vv_i8m4_b2_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vbool1_t __riscv_vmseq_vv_i8m8_b1_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vv_i16mf4_b64_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_i16mf2_b32_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_i16m1_b16_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_i16m2_b8_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vbool4_t __riscv_vmseq_vv_i16m4_b4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vbool2_t __riscv_vmseq_vv_i16m8_b2_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vv_i32mf2_b64_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_i32m1_b32_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_i32m2_b16_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_i32m4_b8_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vbool4_t __riscv_vmseq_vv_i32m8_b4_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vv_i64m1_b64_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_i64m2_b32_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_i64m4_b16_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_i64m8_b8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vv_u8mf8_b64_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_u8mf4_b32_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_u8mf2_b16_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_u8m1_b8_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vbool4_t __riscv_vmseq_vv_u8m2_b4_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vbool2_t __riscv_vmseq_vv_u8m4_b2_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vbool1_t __riscv_vmseq_vv_u8m8_b1_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vv_u16mf4_b64_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_u16mf2_b32_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_u16m1_b16_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_u16m2_b8_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vbool4_t __riscv_vmseq_vv_u16m4_b4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vbool2_t __riscv_vmseq_vv_u16m8_b2_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vv_u32mf2_b64_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_u32m1_b32_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_u32m2_b16_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_u32m4_b8_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vbool4_t __riscv_vmseq_vv_u32m8_b4_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vv_u64m1_b64_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vbool32_t __riscv_vmseq_vv_u64m2_b32_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vbool16_t __riscv_vmseq_vv_u64m4_b16_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vbool8_t __riscv_vmseq_vv_u64m8_b8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
14.29. vmseq.vx
- Mnemonic
vmseq.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Set if equal, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmseq_vx.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmseq_vx_i8mf8_b64 (vint8mf8_t op1, int8_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_i8mf4_b32 (vint8mf4_t op1, int8_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_i8mf2_b16 (vint8mf2_t op1, int8_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_i8m1_b8 (vint8m1_t op1, int8_t op2, size_t vl);
vbool4_t __riscv_vmseq_vx_i8m2_b4 (vint8m2_t op1, int8_t op2, size_t vl);
vbool2_t __riscv_vmseq_vx_i8m4_b2 (vint8m4_t op1, int8_t op2, size_t vl);
vbool1_t __riscv_vmseq_vx_i8m8_b1 (vint8m8_t op1, int8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vx_i16mf4_b64 (vint16mf4_t op1, int16_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_i16mf2_b32 (vint16mf2_t op1, int16_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_i16m1_b16 (vint16m1_t op1, int16_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_i16m2_b8 (vint16m2_t op1, int16_t op2, size_t vl);
vbool4_t __riscv_vmseq_vx_i16m4_b4 (vint16m4_t op1, int16_t op2, size_t vl);
vbool2_t __riscv_vmseq_vx_i16m8_b2 (vint16m8_t op1, int16_t op2, size_t vl);
vbool64_t __riscv_vmseq_vx_i32mf2_b64 (vint32mf2_t op1, int32_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_i32m1_b32 (vint32m1_t op1, int32_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_i32m2_b16 (vint32m2_t op1, int32_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_i32m4_b8 (vint32m4_t op1, int32_t op2, size_t vl);
vbool4_t __riscv_vmseq_vx_i32m8_b4 (vint32m8_t op1, int32_t op2, size_t vl);
vbool64_t __riscv_vmseq_vx_i64m1_b64 (vint64m1_t op1, int64_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_i64m2_b32 (vint64m2_t op1, int64_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_i64m4_b16 (vint64m4_t op1, int64_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_i64m8_b8 (vint64m8_t op1, int64_t op2, size_t vl);
vbool64_t __riscv_vmseq_vx_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_u8m1_b8 (vuint8m1_t op1, uint8_t op2, size_t vl);
vbool4_t __riscv_vmseq_vx_u8m2_b4 (vuint8m2_t op1, uint8_t op2, size_t vl);
vbool2_t __riscv_vmseq_vx_u8m4_b2 (vuint8m4_t op1, uint8_t op2, size_t vl);
vbool1_t __riscv_vmseq_vx_u8m8_b1 (vuint8m8_t op1, uint8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vx_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_u16m1_b16 (vuint16m1_t op1, uint16_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_u16m2_b8 (vuint16m2_t op1, uint16_t op2, size_t vl);
vbool4_t __riscv_vmseq_vx_u16m4_b4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vbool2_t __riscv_vmseq_vx_u16m8_b2 (vuint16m8_t op1, uint16_t op2, size_t vl);
vbool64_t __riscv_vmseq_vx_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_u32m1_b32 (vuint32m1_t op1, uint32_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_u32m2_b16 (vuint32m2_t op1, uint32_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_u32m4_b8 (vuint32m4_t op1, uint32_t op2, size_t vl);
vbool4_t __riscv_vmseq_vx_u32m8_b4 (vuint32m8_t op1, uint32_t op2, size_t vl);
vbool64_t __riscv_vmseq_vx_u64m1_b64 (vuint64m1_t op1, uint64_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_u64m2_b32 (vuint64m2_t op1, uint64_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_u64m4_b16 (vuint64m4_t op1, uint64_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_u64m8_b8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vbool64_t __riscv_vmseq_vx_i8mf8_b64_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_i8mf4_b32_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_i8mf2_b16_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_i8m1_b8_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vbool4_t __riscv_vmseq_vx_i8m2_b4_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vbool2_t __riscv_vmseq_vx_i8m4_b2_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vbool1_t __riscv_vmseq_vx_i8m8_b1_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vx_i16mf4_b64_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_i16mf2_b32_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_i16m1_b16_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_i16m2_b8_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vbool4_t __riscv_vmseq_vx_i16m4_b4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vbool2_t __riscv_vmseq_vx_i16m8_b2_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vbool64_t __riscv_vmseq_vx_i32mf2_b64_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_i32m1_b32_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_i32m2_b16_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_i32m4_b8_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vbool4_t __riscv_vmseq_vx_i32m8_b4_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vbool64_t __riscv_vmseq_vx_i64m1_b64_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_i64m2_b32_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_i64m4_b16_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_i64m8_b8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
vbool64_t __riscv_vmseq_vx_u8mf8_b64_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_u8mf4_b32_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_u8mf2_b16_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_u8m1_b8_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vbool4_t __riscv_vmseq_vx_u8m2_b4_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vbool2_t __riscv_vmseq_vx_u8m4_b2_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vbool1_t __riscv_vmseq_vx_u8m8_b1_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vbool64_t __riscv_vmseq_vx_u16mf4_b64_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_u16mf2_b32_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_u16m1_b16_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_u16m2_b8_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vbool4_t __riscv_vmseq_vx_u16m4_b4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vbool2_t __riscv_vmseq_vx_u16m8_b2_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vbool64_t __riscv_vmseq_vx_u32mf2_b64_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_u32m1_b32_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_u32m2_b16_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_u32m4_b8_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vbool4_t __riscv_vmseq_vx_u32m8_b4_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vbool64_t __riscv_vmseq_vx_u64m1_b64_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vbool32_t __riscv_vmseq_vx_u64m2_b32_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vbool16_t __riscv_vmseq_vx_u64m4_b16_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vbool8_t __riscv_vmseq_vx_u64m8_b8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
14.30. vmseq.vi
- Mnemonic
vmseq.vi vd, vs2, imm, vm
- Encoding
- Description
-
Set if equal, vector-immediate
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmseq_vi.h
- Intrinsic Functions
Details
14.31. vmsne.vv
- Mnemonic
vmsne.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Set if not equal, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsne_vv.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmsne_vv_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vbool4_t __riscv_vmsne_vv_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vbool2_t __riscv_vmsne_vv_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vbool1_t __riscv_vmsne_vv_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vv_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vbool4_t __riscv_vmsne_vv_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vbool2_t __riscv_vmsne_vv_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vv_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vbool4_t __riscv_vmsne_vv_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vv_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vv_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vbool4_t __riscv_vmsne_vv_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vbool2_t __riscv_vmsne_vv_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vbool1_t __riscv_vmsne_vv_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vv_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vbool4_t __riscv_vmsne_vv_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vbool2_t __riscv_vmsne_vv_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vv_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vbool4_t __riscv_vmsne_vv_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vv_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vv_i8mf8_b64_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_i8mf4_b32_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_i8mf2_b16_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_i8m1_b8_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vbool4_t __riscv_vmsne_vv_i8m2_b4_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vbool2_t __riscv_vmsne_vv_i8m4_b2_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vbool1_t __riscv_vmsne_vv_i8m8_b1_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vv_i16mf4_b64_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_i16mf2_b32_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_i16m1_b16_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_i16m2_b8_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vbool4_t __riscv_vmsne_vv_i16m4_b4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vbool2_t __riscv_vmsne_vv_i16m8_b2_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vv_i32mf2_b64_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_i32m1_b32_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_i32m2_b16_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_i32m4_b8_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vbool4_t __riscv_vmsne_vv_i32m8_b4_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vv_i64m1_b64_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_i64m2_b32_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_i64m4_b16_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_i64m8_b8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vv_u8mf8_b64_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_u8mf4_b32_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_u8mf2_b16_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_u8m1_b8_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vbool4_t __riscv_vmsne_vv_u8m2_b4_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vbool2_t __riscv_vmsne_vv_u8m4_b2_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vbool1_t __riscv_vmsne_vv_u8m8_b1_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vv_u16mf4_b64_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_u16mf2_b32_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_u16m1_b16_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_u16m2_b8_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vbool4_t __riscv_vmsne_vv_u16m4_b4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vbool2_t __riscv_vmsne_vv_u16m8_b2_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vv_u32mf2_b64_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_u32m1_b32_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_u32m2_b16_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_u32m4_b8_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vbool4_t __riscv_vmsne_vv_u32m8_b4_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vv_u64m1_b64_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vbool32_t __riscv_vmsne_vv_u64m2_b32_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vbool16_t __riscv_vmsne_vv_u64m4_b16_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vbool8_t __riscv_vmsne_vv_u64m8_b8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
14.32. vmsne.vx
- Mnemonic
vmsne.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Set if not equal, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsne_vx.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmsne_vx_i8mf8_b64 (vint8mf8_t op1, int8_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_i8mf4_b32 (vint8mf4_t op1, int8_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_i8mf2_b16 (vint8mf2_t op1, int8_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_i8m1_b8 (vint8m1_t op1, int8_t op2, size_t vl);
vbool4_t __riscv_vmsne_vx_i8m2_b4 (vint8m2_t op1, int8_t op2, size_t vl);
vbool2_t __riscv_vmsne_vx_i8m4_b2 (vint8m4_t op1, int8_t op2, size_t vl);
vbool1_t __riscv_vmsne_vx_i8m8_b1 (vint8m8_t op1, int8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vx_i16mf4_b64 (vint16mf4_t op1, int16_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_i16mf2_b32 (vint16mf2_t op1, int16_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_i16m1_b16 (vint16m1_t op1, int16_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_i16m2_b8 (vint16m2_t op1, int16_t op2, size_t vl);
vbool4_t __riscv_vmsne_vx_i16m4_b4 (vint16m4_t op1, int16_t op2, size_t vl);
vbool2_t __riscv_vmsne_vx_i16m8_b2 (vint16m8_t op1, int16_t op2, size_t vl);
vbool64_t __riscv_vmsne_vx_i32mf2_b64 (vint32mf2_t op1, int32_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_i32m1_b32 (vint32m1_t op1, int32_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_i32m2_b16 (vint32m2_t op1, int32_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_i32m4_b8 (vint32m4_t op1, int32_t op2, size_t vl);
vbool4_t __riscv_vmsne_vx_i32m8_b4 (vint32m8_t op1, int32_t op2, size_t vl);
vbool64_t __riscv_vmsne_vx_i64m1_b64 (vint64m1_t op1, int64_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_i64m2_b32 (vint64m2_t op1, int64_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_i64m4_b16 (vint64m4_t op1, int64_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_i64m8_b8 (vint64m8_t op1, int64_t op2, size_t vl);
vbool64_t __riscv_vmsne_vx_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_u8m1_b8 (vuint8m1_t op1, uint8_t op2, size_t vl);
vbool4_t __riscv_vmsne_vx_u8m2_b4 (vuint8m2_t op1, uint8_t op2, size_t vl);
vbool2_t __riscv_vmsne_vx_u8m4_b2 (vuint8m4_t op1, uint8_t op2, size_t vl);
vbool1_t __riscv_vmsne_vx_u8m8_b1 (vuint8m8_t op1, uint8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vx_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_u16m1_b16 (vuint16m1_t op1, uint16_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_u16m2_b8 (vuint16m2_t op1, uint16_t op2, size_t vl);
vbool4_t __riscv_vmsne_vx_u16m4_b4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vbool2_t __riscv_vmsne_vx_u16m8_b2 (vuint16m8_t op1, uint16_t op2, size_t vl);
vbool64_t __riscv_vmsne_vx_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_u32m1_b32 (vuint32m1_t op1, uint32_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_u32m2_b16 (vuint32m2_t op1, uint32_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_u32m4_b8 (vuint32m4_t op1, uint32_t op2, size_t vl);
vbool4_t __riscv_vmsne_vx_u32m8_b4 (vuint32m8_t op1, uint32_t op2, size_t vl);
vbool64_t __riscv_vmsne_vx_u64m1_b64 (vuint64m1_t op1, uint64_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_u64m2_b32 (vuint64m2_t op1, uint64_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_u64m4_b16 (vuint64m4_t op1, uint64_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_u64m8_b8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vbool64_t __riscv_vmsne_vx_i8mf8_b64_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_i8mf4_b32_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_i8mf2_b16_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_i8m1_b8_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vbool4_t __riscv_vmsne_vx_i8m2_b4_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vbool2_t __riscv_vmsne_vx_i8m4_b2_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vbool1_t __riscv_vmsne_vx_i8m8_b1_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vx_i16mf4_b64_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_i16mf2_b32_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_i16m1_b16_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_i16m2_b8_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vbool4_t __riscv_vmsne_vx_i16m4_b4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vbool2_t __riscv_vmsne_vx_i16m8_b2_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vbool64_t __riscv_vmsne_vx_i32mf2_b64_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_i32m1_b32_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_i32m2_b16_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_i32m4_b8_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vbool4_t __riscv_vmsne_vx_i32m8_b4_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vbool64_t __riscv_vmsne_vx_i64m1_b64_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_i64m2_b32_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_i64m4_b16_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_i64m8_b8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
vbool64_t __riscv_vmsne_vx_u8mf8_b64_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_u8mf4_b32_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_u8mf2_b16_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_u8m1_b8_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vbool4_t __riscv_vmsne_vx_u8m2_b4_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vbool2_t __riscv_vmsne_vx_u8m4_b2_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vbool1_t __riscv_vmsne_vx_u8m8_b1_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vbool64_t __riscv_vmsne_vx_u16mf4_b64_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_u16mf2_b32_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_u16m1_b16_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_u16m2_b8_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vbool4_t __riscv_vmsne_vx_u16m4_b4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vbool2_t __riscv_vmsne_vx_u16m8_b2_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vbool64_t __riscv_vmsne_vx_u32mf2_b64_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_u32m1_b32_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_u32m2_b16_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_u32m4_b8_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vbool4_t __riscv_vmsne_vx_u32m8_b4_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vbool64_t __riscv_vmsne_vx_u64m1_b64_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vbool32_t __riscv_vmsne_vx_u64m2_b32_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vbool16_t __riscv_vmsne_vx_u64m4_b16_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vbool8_t __riscv_vmsne_vx_u64m8_b8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
14.33. vmsne.vi
- Mnemonic
vmsne.vi vd, vs2, imm, vm
- Encoding
- Description
-
Set if not equal, vector-immediate
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsne_vi.h
- Intrinsic Functions
Details
14.34. vmsltu.vv
- Mnemonic
vmsltu.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Set if less than, unsigned, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsltu_vv.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmsltu_vv_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vv_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vv_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vv_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vbool4_t __riscv_vmsltu_vv_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vbool2_t __riscv_vmsltu_vv_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vbool1_t __riscv_vmsltu_vv_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vbool64_t __riscv_vmsltu_vv_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vv_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vv_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vv_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vbool4_t __riscv_vmsltu_vv_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vbool2_t __riscv_vmsltu_vv_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vbool64_t __riscv_vmsltu_vv_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vv_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vv_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vv_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vbool4_t __riscv_vmsltu_vv_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vbool64_t __riscv_vmsltu_vv_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vv_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vv_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vv_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vbool64_t __riscv_vmsltu_vv_u8mf8_b64_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vv_u8mf4_b32_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vv_u8mf2_b16_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vv_u8m1_b8_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vbool4_t __riscv_vmsltu_vv_u8m2_b4_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vbool2_t __riscv_vmsltu_vv_u8m4_b2_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vbool1_t __riscv_vmsltu_vv_u8m8_b1_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vbool64_t __riscv_vmsltu_vv_u16mf4_b64_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vv_u16mf2_b32_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vv_u16m1_b16_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vv_u16m2_b8_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vbool4_t __riscv_vmsltu_vv_u16m4_b4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vbool2_t __riscv_vmsltu_vv_u16m8_b2_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vbool64_t __riscv_vmsltu_vv_u32mf2_b64_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vv_u32m1_b32_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vv_u32m2_b16_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vv_u32m4_b8_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vbool4_t __riscv_vmsltu_vv_u32m8_b4_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vbool64_t __riscv_vmsltu_vv_u64m1_b64_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vv_u64m2_b32_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vv_u64m4_b16_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vv_u64m8_b8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
14.35. vmsltu.vx
- Mnemonic
vmsltu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Set if less than, unsigned, Vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsltu_vx.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmsltu_vx_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vx_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vx_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vx_u8m1_b8 (vuint8m1_t op1, uint8_t op2, size_t vl);
vbool4_t __riscv_vmsltu_vx_u8m2_b4 (vuint8m2_t op1, uint8_t op2, size_t vl);
vbool2_t __riscv_vmsltu_vx_u8m4_b2 (vuint8m4_t op1, uint8_t op2, size_t vl);
vbool1_t __riscv_vmsltu_vx_u8m8_b1 (vuint8m8_t op1, uint8_t op2, size_t vl);
vbool64_t __riscv_vmsltu_vx_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vx_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vx_u16m1_b16 (vuint16m1_t op1, uint16_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vx_u16m2_b8 (vuint16m2_t op1, uint16_t op2, size_t vl);
vbool4_t __riscv_vmsltu_vx_u16m4_b4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vbool2_t __riscv_vmsltu_vx_u16m8_b2 (vuint16m8_t op1, uint16_t op2, size_t vl);
vbool64_t __riscv_vmsltu_vx_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vx_u32m1_b32 (vuint32m1_t op1, uint32_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vx_u32m2_b16 (vuint32m2_t op1, uint32_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vx_u32m4_b8 (vuint32m4_t op1, uint32_t op2, size_t vl);
vbool4_t __riscv_vmsltu_vx_u32m8_b4 (vuint32m8_t op1, uint32_t op2, size_t vl);
vbool64_t __riscv_vmsltu_vx_u64m1_b64 (vuint64m1_t op1, uint64_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vx_u64m2_b32 (vuint64m2_t op1, uint64_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vx_u64m4_b16 (vuint64m4_t op1, uint64_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vx_u64m8_b8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vbool64_t __riscv_vmsltu_vx_u8mf8_b64_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vx_u8mf4_b32_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vx_u8mf2_b16_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vx_u8m1_b8_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vbool4_t __riscv_vmsltu_vx_u8m2_b4_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vbool2_t __riscv_vmsltu_vx_u8m4_b2_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vbool1_t __riscv_vmsltu_vx_u8m8_b1_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vbool64_t __riscv_vmsltu_vx_u16mf4_b64_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vx_u16mf2_b32_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vx_u16m1_b16_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vx_u16m2_b8_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vbool4_t __riscv_vmsltu_vx_u16m4_b4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vbool2_t __riscv_vmsltu_vx_u16m8_b2_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vbool64_t __riscv_vmsltu_vx_u32mf2_b64_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vx_u32m1_b32_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vx_u32m2_b16_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vx_u32m4_b8_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vbool4_t __riscv_vmsltu_vx_u32m8_b4_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vbool64_t __riscv_vmsltu_vx_u64m1_b64_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vbool32_t __riscv_vmsltu_vx_u64m2_b32_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vbool16_t __riscv_vmsltu_vx_u64m4_b16_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vbool8_t __riscv_vmsltu_vx_u64m8_b8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
14.36. vmslt.vv
- Mnemonic
vmslt.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Set if less than, signed, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmslt_vv.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmslt_vv_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmslt_vv_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmslt_vv_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmslt_vv_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vbool4_t __riscv_vmslt_vv_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vbool2_t __riscv_vmslt_vv_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vbool1_t __riscv_vmslt_vv_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vbool64_t __riscv_vmslt_vv_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmslt_vv_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmslt_vv_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vbool8_t __riscv_vmslt_vv_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vbool4_t __riscv_vmslt_vv_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vbool2_t __riscv_vmslt_vv_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vbool64_t __riscv_vmslt_vv_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmslt_vv_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vbool16_t __riscv_vmslt_vv_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vbool8_t __riscv_vmslt_vv_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vbool4_t __riscv_vmslt_vv_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vbool64_t __riscv_vmslt_vv_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vbool32_t __riscv_vmslt_vv_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vbool16_t __riscv_vmslt_vv_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vbool8_t __riscv_vmslt_vv_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vbool64_t __riscv_vmslt_vv_i8mf8_b64_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmslt_vv_i8mf4_b32_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmslt_vv_i8mf2_b16_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmslt_vv_i8m1_b8_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vbool4_t __riscv_vmslt_vv_i8m2_b4_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vbool2_t __riscv_vmslt_vv_i8m4_b2_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vbool1_t __riscv_vmslt_vv_i8m8_b1_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vbool64_t __riscv_vmslt_vv_i16mf4_b64_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmslt_vv_i16mf2_b32_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmslt_vv_i16m1_b16_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vbool8_t __riscv_vmslt_vv_i16m2_b8_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vbool4_t __riscv_vmslt_vv_i16m4_b4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vbool2_t __riscv_vmslt_vv_i16m8_b2_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vbool64_t __riscv_vmslt_vv_i32mf2_b64_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmslt_vv_i32m1_b32_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vbool16_t __riscv_vmslt_vv_i32m2_b16_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vbool8_t __riscv_vmslt_vv_i32m4_b8_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vbool4_t __riscv_vmslt_vv_i32m8_b4_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vbool64_t __riscv_vmslt_vv_i64m1_b64_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vbool32_t __riscv_vmslt_vv_i64m2_b32_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vbool16_t __riscv_vmslt_vv_i64m4_b16_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vbool8_t __riscv_vmslt_vv_i64m8_b8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
14.37. vmslt.vx
- Mnemonic
vmslt.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Set if less than, signed, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmslt_vx.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmslt_vx_i8mf8_b64 (vint8mf8_t op1, int8_t op2, size_t vl);
vbool32_t __riscv_vmslt_vx_i8mf4_b32 (vint8mf4_t op1, int8_t op2, size_t vl);
vbool16_t __riscv_vmslt_vx_i8mf2_b16 (vint8mf2_t op1, int8_t op2, size_t vl);
vbool8_t __riscv_vmslt_vx_i8m1_b8 (vint8m1_t op1, int8_t op2, size_t vl);
vbool4_t __riscv_vmslt_vx_i8m2_b4 (vint8m2_t op1, int8_t op2, size_t vl);
vbool2_t __riscv_vmslt_vx_i8m4_b2 (vint8m4_t op1, int8_t op2, size_t vl);
vbool1_t __riscv_vmslt_vx_i8m8_b1 (vint8m8_t op1, int8_t op2, size_t vl);
vbool64_t __riscv_vmslt_vx_i16mf4_b64 (vint16mf4_t op1, int16_t op2, size_t vl);
vbool32_t __riscv_vmslt_vx_i16mf2_b32 (vint16mf2_t op1, int16_t op2, size_t vl);
vbool16_t __riscv_vmslt_vx_i16m1_b16 (vint16m1_t op1, int16_t op2, size_t vl);
vbool8_t __riscv_vmslt_vx_i16m2_b8 (vint16m2_t op1, int16_t op2, size_t vl);
vbool4_t __riscv_vmslt_vx_i16m4_b4 (vint16m4_t op1, int16_t op2, size_t vl);
vbool2_t __riscv_vmslt_vx_i16m8_b2 (vint16m8_t op1, int16_t op2, size_t vl);
vbool64_t __riscv_vmslt_vx_i32mf2_b64 (vint32mf2_t op1, int32_t op2, size_t vl);
vbool32_t __riscv_vmslt_vx_i32m1_b32 (vint32m1_t op1, int32_t op2, size_t vl);
vbool16_t __riscv_vmslt_vx_i32m2_b16 (vint32m2_t op1, int32_t op2, size_t vl);
vbool8_t __riscv_vmslt_vx_i32m4_b8 (vint32m4_t op1, int32_t op2, size_t vl);
vbool4_t __riscv_vmslt_vx_i32m8_b4 (vint32m8_t op1, int32_t op2, size_t vl);
vbool64_t __riscv_vmslt_vx_i64m1_b64 (vint64m1_t op1, int64_t op2, size_t vl);
vbool32_t __riscv_vmslt_vx_i64m2_b32 (vint64m2_t op1, int64_t op2, size_t vl);
vbool16_t __riscv_vmslt_vx_i64m4_b16 (vint64m4_t op1, int64_t op2, size_t vl);
vbool8_t __riscv_vmslt_vx_i64m8_b8 (vint64m8_t op1, int64_t op2, size_t vl);
vbool64_t __riscv_vmslt_vx_i8mf8_b64_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vbool32_t __riscv_vmslt_vx_i8mf4_b32_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vbool16_t __riscv_vmslt_vx_i8mf2_b16_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vbool8_t __riscv_vmslt_vx_i8m1_b8_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vbool4_t __riscv_vmslt_vx_i8m2_b4_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vbool2_t __riscv_vmslt_vx_i8m4_b2_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vbool1_t __riscv_vmslt_vx_i8m8_b1_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vbool64_t __riscv_vmslt_vx_i16mf4_b64_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vbool32_t __riscv_vmslt_vx_i16mf2_b32_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vbool16_t __riscv_vmslt_vx_i16m1_b16_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vbool8_t __riscv_vmslt_vx_i16m2_b8_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vbool4_t __riscv_vmslt_vx_i16m4_b4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vbool2_t __riscv_vmslt_vx_i16m8_b2_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vbool64_t __riscv_vmslt_vx_i32mf2_b64_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vbool32_t __riscv_vmslt_vx_i32m1_b32_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vbool16_t __riscv_vmslt_vx_i32m2_b16_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vbool8_t __riscv_vmslt_vx_i32m4_b8_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vbool4_t __riscv_vmslt_vx_i32m8_b4_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vbool64_t __riscv_vmslt_vx_i64m1_b64_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vbool32_t __riscv_vmslt_vx_i64m2_b32_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vbool16_t __riscv_vmslt_vx_i64m4_b16_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vbool8_t __riscv_vmslt_vx_i64m8_b8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
14.38. vmsleu.vv
- Mnemonic
vmsleu.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Set if less than or equal, unsigned, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsleu_vv.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmsleu_vv_u8mf8_b64 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vv_u8mf4_b32 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vv_u8mf2_b16 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vv_u8m1_b8 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vbool4_t __riscv_vmsleu_vv_u8m2_b4 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vbool2_t __riscv_vmsleu_vv_u8m4_b2 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vbool1_t __riscv_vmsleu_vv_u8m8_b1 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vbool64_t __riscv_vmsleu_vv_u16mf4_b64 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vv_u16mf2_b32 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vv_u16m1_b16 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vv_u16m2_b8 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vbool4_t __riscv_vmsleu_vv_u16m4_b4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vbool2_t __riscv_vmsleu_vv_u16m8_b2 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vbool64_t __riscv_vmsleu_vv_u32mf2_b64 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vv_u32m1_b32 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vv_u32m2_b16 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vv_u32m4_b8 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vbool4_t __riscv_vmsleu_vv_u32m8_b4 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vbool64_t __riscv_vmsleu_vv_u64m1_b64 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vv_u64m2_b32 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vv_u64m4_b16 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vv_u64m8_b8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vbool64_t __riscv_vmsleu_vv_u8mf8_b64_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vv_u8mf4_b32_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vv_u8mf2_b16_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vv_u8m1_b8_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vbool4_t __riscv_vmsleu_vv_u8m2_b4_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vbool2_t __riscv_vmsleu_vv_u8m4_b2_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vbool1_t __riscv_vmsleu_vv_u8m8_b1_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vbool64_t __riscv_vmsleu_vv_u16mf4_b64_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vv_u16mf2_b32_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vv_u16m1_b16_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vv_u16m2_b8_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vbool4_t __riscv_vmsleu_vv_u16m4_b4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vbool2_t __riscv_vmsleu_vv_u16m8_b2_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vbool64_t __riscv_vmsleu_vv_u32mf2_b64_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vv_u32m1_b32_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vv_u32m2_b16_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vv_u32m4_b8_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vbool4_t __riscv_vmsleu_vv_u32m8_b4_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vbool64_t __riscv_vmsleu_vv_u64m1_b64_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vv_u64m2_b32_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vv_u64m4_b16_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vv_u64m8_b8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
14.39. vmsleu.vx
- Mnemonic
vmsleu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Set if less than or equal, unsigned, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsleu_vx.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmsleu_vx_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vx_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vx_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vx_u8m1_b8 (vuint8m1_t op1, uint8_t op2, size_t vl);
vbool4_t __riscv_vmsleu_vx_u8m2_b4 (vuint8m2_t op1, uint8_t op2, size_t vl);
vbool2_t __riscv_vmsleu_vx_u8m4_b2 (vuint8m4_t op1, uint8_t op2, size_t vl);
vbool1_t __riscv_vmsleu_vx_u8m8_b1 (vuint8m8_t op1, uint8_t op2, size_t vl);
vbool64_t __riscv_vmsleu_vx_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vx_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vx_u16m1_b16 (vuint16m1_t op1, uint16_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vx_u16m2_b8 (vuint16m2_t op1, uint16_t op2, size_t vl);
vbool4_t __riscv_vmsleu_vx_u16m4_b4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vbool2_t __riscv_vmsleu_vx_u16m8_b2 (vuint16m8_t op1, uint16_t op2, size_t vl);
vbool64_t __riscv_vmsleu_vx_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vx_u32m1_b32 (vuint32m1_t op1, uint32_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vx_u32m2_b16 (vuint32m2_t op1, uint32_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vx_u32m4_b8 (vuint32m4_t op1, uint32_t op2, size_t vl);
vbool4_t __riscv_vmsleu_vx_u32m8_b4 (vuint32m8_t op1, uint32_t op2, size_t vl);
vbool64_t __riscv_vmsleu_vx_u64m1_b64 (vuint64m1_t op1, uint64_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vx_u64m2_b32 (vuint64m2_t op1, uint64_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vx_u64m4_b16 (vuint64m4_t op1, uint64_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vx_u64m8_b8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vbool64_t __riscv_vmsleu_vx_u8mf8_b64_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vx_u8mf4_b32_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vx_u8mf2_b16_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vx_u8m1_b8_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vbool4_t __riscv_vmsleu_vx_u8m2_b4_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vbool2_t __riscv_vmsleu_vx_u8m4_b2_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vbool1_t __riscv_vmsleu_vx_u8m8_b1_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vbool64_t __riscv_vmsleu_vx_u16mf4_b64_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vx_u16mf2_b32_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vx_u16m1_b16_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vx_u16m2_b8_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vbool4_t __riscv_vmsleu_vx_u16m4_b4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vbool2_t __riscv_vmsleu_vx_u16m8_b2_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vbool64_t __riscv_vmsleu_vx_u32mf2_b64_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vx_u32m1_b32_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vx_u32m2_b16_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vx_u32m4_b8_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vbool4_t __riscv_vmsleu_vx_u32m8_b4_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vbool64_t __riscv_vmsleu_vx_u64m1_b64_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vbool32_t __riscv_vmsleu_vx_u64m2_b32_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vbool16_t __riscv_vmsleu_vx_u64m4_b16_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vbool8_t __riscv_vmsleu_vx_u64m8_b8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
14.40. vmsleu.vi
- Mnemonic
vmsleu.vi vd, vs2, imm, vm
- Encoding
- Description
-
Set if less than or equal, unsigned, Vector-immediate
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsleu_vi.h
- Intrinsic Functions
Details
14.41. vmsle.vv
- Mnemonic
vmsle.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Set if less than or equal, signed, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsle_vv.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmsle_vv_i8mf8_b64 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmsle_vv_i8mf4_b32 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmsle_vv_i8mf2_b16 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmsle_vv_i8m1_b8 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vbool4_t __riscv_vmsle_vv_i8m2_b4 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vbool2_t __riscv_vmsle_vv_i8m4_b2 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vbool1_t __riscv_vmsle_vv_i8m8_b1 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vbool64_t __riscv_vmsle_vv_i16mf4_b64 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmsle_vv_i16mf2_b32 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmsle_vv_i16m1_b16 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vbool8_t __riscv_vmsle_vv_i16m2_b8 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vbool4_t __riscv_vmsle_vv_i16m4_b4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vbool2_t __riscv_vmsle_vv_i16m8_b2 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vbool64_t __riscv_vmsle_vv_i32mf2_b64 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmsle_vv_i32m1_b32 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vbool16_t __riscv_vmsle_vv_i32m2_b16 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vbool8_t __riscv_vmsle_vv_i32m4_b8 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vbool4_t __riscv_vmsle_vv_i32m8_b4 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vbool64_t __riscv_vmsle_vv_i64m1_b64 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vbool32_t __riscv_vmsle_vv_i64m2_b32 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vbool16_t __riscv_vmsle_vv_i64m4_b16 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vbool8_t __riscv_vmsle_vv_i64m8_b8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vbool64_t __riscv_vmsle_vv_i8mf8_b64_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vbool32_t __riscv_vmsle_vv_i8mf4_b32_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vbool16_t __riscv_vmsle_vv_i8mf2_b16_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vbool8_t __riscv_vmsle_vv_i8m1_b8_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vbool4_t __riscv_vmsle_vv_i8m2_b4_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vbool2_t __riscv_vmsle_vv_i8m4_b2_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vbool1_t __riscv_vmsle_vv_i8m8_b1_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vbool64_t __riscv_vmsle_vv_i16mf4_b64_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vbool32_t __riscv_vmsle_vv_i16mf2_b32_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vbool16_t __riscv_vmsle_vv_i16m1_b16_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vbool8_t __riscv_vmsle_vv_i16m2_b8_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vbool4_t __riscv_vmsle_vv_i16m4_b4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vbool2_t __riscv_vmsle_vv_i16m8_b2_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vbool64_t __riscv_vmsle_vv_i32mf2_b64_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vbool32_t __riscv_vmsle_vv_i32m1_b32_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vbool16_t __riscv_vmsle_vv_i32m2_b16_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vbool8_t __riscv_vmsle_vv_i32m4_b8_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vbool4_t __riscv_vmsle_vv_i32m8_b4_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vbool64_t __riscv_vmsle_vv_i64m1_b64_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vbool32_t __riscv_vmsle_vv_i64m2_b32_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vbool16_t __riscv_vmsle_vv_i64m4_b16_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vbool8_t __riscv_vmsle_vv_i64m8_b8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
14.42. vmsle.vx
- Mnemonic
vmsle.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Set if less than or equal, signed, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsle_vx.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmsle_vx_i8mf8_b64 (vint8mf8_t op1, int8_t op2, size_t vl);
vbool32_t __riscv_vmsle_vx_i8mf4_b32 (vint8mf4_t op1, int8_t op2, size_t vl);
vbool16_t __riscv_vmsle_vx_i8mf2_b16 (vint8mf2_t op1, int8_t op2, size_t vl);
vbool8_t __riscv_vmsle_vx_i8m1_b8 (vint8m1_t op1, int8_t op2, size_t vl);
vbool4_t __riscv_vmsle_vx_i8m2_b4 (vint8m2_t op1, int8_t op2, size_t vl);
vbool2_t __riscv_vmsle_vx_i8m4_b2 (vint8m4_t op1, int8_t op2, size_t vl);
vbool1_t __riscv_vmsle_vx_i8m8_b1 (vint8m8_t op1, int8_t op2, size_t vl);
vbool64_t __riscv_vmsle_vx_i16mf4_b64 (vint16mf4_t op1, int16_t op2, size_t vl);
vbool32_t __riscv_vmsle_vx_i16mf2_b32 (vint16mf2_t op1, int16_t op2, size_t vl);
vbool16_t __riscv_vmsle_vx_i16m1_b16 (vint16m1_t op1, int16_t op2, size_t vl);
vbool8_t __riscv_vmsle_vx_i16m2_b8 (vint16m2_t op1, int16_t op2, size_t vl);
vbool4_t __riscv_vmsle_vx_i16m4_b4 (vint16m4_t op1, int16_t op2, size_t vl);
vbool2_t __riscv_vmsle_vx_i16m8_b2 (vint16m8_t op1, int16_t op2, size_t vl);
vbool64_t __riscv_vmsle_vx_i32mf2_b64 (vint32mf2_t op1, int32_t op2, size_t vl);
vbool32_t __riscv_vmsle_vx_i32m1_b32 (vint32m1_t op1, int32_t op2, size_t vl);
vbool16_t __riscv_vmsle_vx_i32m2_b16 (vint32m2_t op1, int32_t op2, size_t vl);
vbool8_t __riscv_vmsle_vx_i32m4_b8 (vint32m4_t op1, int32_t op2, size_t vl);
vbool4_t __riscv_vmsle_vx_i32m8_b4 (vint32m8_t op1, int32_t op2, size_t vl);
vbool64_t __riscv_vmsle_vx_i64m1_b64 (vint64m1_t op1, int64_t op2, size_t vl);
vbool32_t __riscv_vmsle_vx_i64m2_b32 (vint64m2_t op1, int64_t op2, size_t vl);
vbool16_t __riscv_vmsle_vx_i64m4_b16 (vint64m4_t op1, int64_t op2, size_t vl);
vbool8_t __riscv_vmsle_vx_i64m8_b8 (vint64m8_t op1, int64_t op2, size_t vl);
vbool64_t __riscv_vmsle_vx_i8mf8_b64_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vbool32_t __riscv_vmsle_vx_i8mf4_b32_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vbool16_t __riscv_vmsle_vx_i8mf2_b16_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vbool8_t __riscv_vmsle_vx_i8m1_b8_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vbool4_t __riscv_vmsle_vx_i8m2_b4_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vbool2_t __riscv_vmsle_vx_i8m4_b2_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vbool1_t __riscv_vmsle_vx_i8m8_b1_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vbool64_t __riscv_vmsle_vx_i16mf4_b64_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vbool32_t __riscv_vmsle_vx_i16mf2_b32_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vbool16_t __riscv_vmsle_vx_i16m1_b16_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vbool8_t __riscv_vmsle_vx_i16m2_b8_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vbool4_t __riscv_vmsle_vx_i16m4_b4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vbool2_t __riscv_vmsle_vx_i16m8_b2_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vbool64_t __riscv_vmsle_vx_i32mf2_b64_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vbool32_t __riscv_vmsle_vx_i32m1_b32_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vbool16_t __riscv_vmsle_vx_i32m2_b16_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vbool8_t __riscv_vmsle_vx_i32m4_b8_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vbool4_t __riscv_vmsle_vx_i32m8_b4_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vbool64_t __riscv_vmsle_vx_i64m1_b64_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vbool32_t __riscv_vmsle_vx_i64m2_b32_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vbool16_t __riscv_vmsle_vx_i64m4_b16_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vbool8_t __riscv_vmsle_vx_i64m8_b8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
14.43. vmsle.vi
- Mnemonic
vmsle.vi vd, vs2, imm, vm
- Encoding
- Description
-
Set if less than or equal, signed, vector-immediate
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsle_vi.h
- Intrinsic Functions
Details
14.44. vmsgtu.vx
- Mnemonic
vmsgtu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Set if greater than, unsigned, Vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsgtu_vx.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmsgtu_vx_u8mf8_b64 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vbool32_t __riscv_vmsgtu_vx_u8mf4_b32 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vbool16_t __riscv_vmsgtu_vx_u8mf2_b16 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vbool8_t __riscv_vmsgtu_vx_u8m1_b8 (vuint8m1_t op1, uint8_t op2, size_t vl);
vbool4_t __riscv_vmsgtu_vx_u8m2_b4 (vuint8m2_t op1, uint8_t op2, size_t vl);
vbool2_t __riscv_vmsgtu_vx_u8m4_b2 (vuint8m4_t op1, uint8_t op2, size_t vl);
vbool1_t __riscv_vmsgtu_vx_u8m8_b1 (vuint8m8_t op1, uint8_t op2, size_t vl);
vbool64_t __riscv_vmsgtu_vx_u16mf4_b64 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vbool32_t __riscv_vmsgtu_vx_u16mf2_b32 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vbool16_t __riscv_vmsgtu_vx_u16m1_b16 (vuint16m1_t op1, uint16_t op2, size_t vl);
vbool8_t __riscv_vmsgtu_vx_u16m2_b8 (vuint16m2_t op1, uint16_t op2, size_t vl);
vbool4_t __riscv_vmsgtu_vx_u16m4_b4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vbool2_t __riscv_vmsgtu_vx_u16m8_b2 (vuint16m8_t op1, uint16_t op2, size_t vl);
vbool64_t __riscv_vmsgtu_vx_u32mf2_b64 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vbool32_t __riscv_vmsgtu_vx_u32m1_b32 (vuint32m1_t op1, uint32_t op2, size_t vl);
vbool16_t __riscv_vmsgtu_vx_u32m2_b16 (vuint32m2_t op1, uint32_t op2, size_t vl);
vbool8_t __riscv_vmsgtu_vx_u32m4_b8 (vuint32m4_t op1, uint32_t op2, size_t vl);
vbool4_t __riscv_vmsgtu_vx_u32m8_b4 (vuint32m8_t op1, uint32_t op2, size_t vl);
vbool64_t __riscv_vmsgtu_vx_u64m1_b64 (vuint64m1_t op1, uint64_t op2, size_t vl);
vbool32_t __riscv_vmsgtu_vx_u64m2_b32 (vuint64m2_t op1, uint64_t op2, size_t vl);
vbool16_t __riscv_vmsgtu_vx_u64m4_b16 (vuint64m4_t op1, uint64_t op2, size_t vl);
vbool8_t __riscv_vmsgtu_vx_u64m8_b8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vbool64_t __riscv_vmsgtu_vx_u8mf8_b64_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vbool32_t __riscv_vmsgtu_vx_u8mf4_b32_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vbool16_t __riscv_vmsgtu_vx_u8mf2_b16_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vbool8_t __riscv_vmsgtu_vx_u8m1_b8_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vbool4_t __riscv_vmsgtu_vx_u8m2_b4_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vbool2_t __riscv_vmsgtu_vx_u8m4_b2_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vbool1_t __riscv_vmsgtu_vx_u8m8_b1_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vbool64_t __riscv_vmsgtu_vx_u16mf4_b64_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vbool32_t __riscv_vmsgtu_vx_u16mf2_b32_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vbool16_t __riscv_vmsgtu_vx_u16m1_b16_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vbool8_t __riscv_vmsgtu_vx_u16m2_b8_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vbool4_t __riscv_vmsgtu_vx_u16m4_b4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vbool2_t __riscv_vmsgtu_vx_u16m8_b2_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vbool64_t __riscv_vmsgtu_vx_u32mf2_b64_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vbool32_t __riscv_vmsgtu_vx_u32m1_b32_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vbool16_t __riscv_vmsgtu_vx_u32m2_b16_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vbool8_t __riscv_vmsgtu_vx_u32m4_b8_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vbool4_t __riscv_vmsgtu_vx_u32m8_b4_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vbool64_t __riscv_vmsgtu_vx_u64m1_b64_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vbool32_t __riscv_vmsgtu_vx_u64m2_b32_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vbool16_t __riscv_vmsgtu_vx_u64m4_b16_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vbool8_t __riscv_vmsgtu_vx_u64m8_b8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
14.45. vmsgtu.vi
- Mnemonic
vmsgtu.vi vd, vs2, imm, vm
- Encoding
- Description
-
Set if greater than, unsigned, Vector-immediate
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsgtu_vi.h
- Intrinsic Functions
Details
14.46. vmsgt.vx
- Mnemonic
vmsgt.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Set if greater than, signed, Vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsgt_vx.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmsgt_vx_i8mf8_b64 (vint8mf8_t op1, int8_t op2, size_t vl);
vbool32_t __riscv_vmsgt_vx_i8mf4_b32 (vint8mf4_t op1, int8_t op2, size_t vl);
vbool16_t __riscv_vmsgt_vx_i8mf2_b16 (vint8mf2_t op1, int8_t op2, size_t vl);
vbool8_t __riscv_vmsgt_vx_i8m1_b8 (vint8m1_t op1, int8_t op2, size_t vl);
vbool4_t __riscv_vmsgt_vx_i8m2_b4 (vint8m2_t op1, int8_t op2, size_t vl);
vbool2_t __riscv_vmsgt_vx_i8m4_b2 (vint8m4_t op1, int8_t op2, size_t vl);
vbool1_t __riscv_vmsgt_vx_i8m8_b1 (vint8m8_t op1, int8_t op2, size_t vl);
vbool64_t __riscv_vmsgt_vx_i16mf4_b64 (vint16mf4_t op1, int16_t op2, size_t vl);
vbool32_t __riscv_vmsgt_vx_i16mf2_b32 (vint16mf2_t op1, int16_t op2, size_t vl);
vbool16_t __riscv_vmsgt_vx_i16m1_b16 (vint16m1_t op1, int16_t op2, size_t vl);
vbool8_t __riscv_vmsgt_vx_i16m2_b8 (vint16m2_t op1, int16_t op2, size_t vl);
vbool4_t __riscv_vmsgt_vx_i16m4_b4 (vint16m4_t op1, int16_t op2, size_t vl);
vbool2_t __riscv_vmsgt_vx_i16m8_b2 (vint16m8_t op1, int16_t op2, size_t vl);
vbool64_t __riscv_vmsgt_vx_i32mf2_b64 (vint32mf2_t op1, int32_t op2, size_t vl);
vbool32_t __riscv_vmsgt_vx_i32m1_b32 (vint32m1_t op1, int32_t op2, size_t vl);
vbool16_t __riscv_vmsgt_vx_i32m2_b16 (vint32m2_t op1, int32_t op2, size_t vl);
vbool8_t __riscv_vmsgt_vx_i32m4_b8 (vint32m4_t op1, int32_t op2, size_t vl);
vbool4_t __riscv_vmsgt_vx_i32m8_b4 (vint32m8_t op1, int32_t op2, size_t vl);
vbool64_t __riscv_vmsgt_vx_i64m1_b64 (vint64m1_t op1, int64_t op2, size_t vl);
vbool32_t __riscv_vmsgt_vx_i64m2_b32 (vint64m2_t op1, int64_t op2, size_t vl);
vbool16_t __riscv_vmsgt_vx_i64m4_b16 (vint64m4_t op1, int64_t op2, size_t vl);
vbool8_t __riscv_vmsgt_vx_i64m8_b8 (vint64m8_t op1, int64_t op2, size_t vl);
vbool64_t __riscv_vmsgt_vx_i8mf8_b64_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vbool32_t __riscv_vmsgt_vx_i8mf4_b32_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vbool16_t __riscv_vmsgt_vx_i8mf2_b16_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vbool8_t __riscv_vmsgt_vx_i8m1_b8_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vbool4_t __riscv_vmsgt_vx_i8m2_b4_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vbool2_t __riscv_vmsgt_vx_i8m4_b2_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vbool1_t __riscv_vmsgt_vx_i8m8_b1_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vbool64_t __riscv_vmsgt_vx_i16mf4_b64_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vbool32_t __riscv_vmsgt_vx_i16mf2_b32_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vbool16_t __riscv_vmsgt_vx_i16m1_b16_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vbool8_t __riscv_vmsgt_vx_i16m2_b8_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vbool4_t __riscv_vmsgt_vx_i16m4_b4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vbool2_t __riscv_vmsgt_vx_i16m8_b2_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vbool64_t __riscv_vmsgt_vx_i32mf2_b64_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vbool32_t __riscv_vmsgt_vx_i32m1_b32_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vbool16_t __riscv_vmsgt_vx_i32m2_b16_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vbool8_t __riscv_vmsgt_vx_i32m4_b8_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vbool4_t __riscv_vmsgt_vx_i32m8_b4_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vbool64_t __riscv_vmsgt_vx_i64m1_b64_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vbool32_t __riscv_vmsgt_vx_i64m2_b32_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vbool16_t __riscv_vmsgt_vx_i64m4_b16_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vbool8_t __riscv_vmsgt_vx_i64m8_b8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
14.47. vmsgt.vi
- Mnemonic
vmsgt.vi vd, vs2, imm, vm
- Encoding
- Description
-
Set if greater than, signed, Vector-immediate
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsgt_vi.h
- Intrinsic Functions
Details
14.48. Vector Integer Min/Max Instructions
Signed and unsigned integer minimum and maximum instructions are supported.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/Vector.h
- Intrinsic Functions
Details
== Vector Loads and Stores Intrinsics
=== Vector Unit-Stride Load Intrinsics
=== Vector Unit-Stride Store Intrinsics
=== Vector Mask Load/Store Intrinsics
=== Vector Strided Load Intrinsics
=== Vector Strided Store Intrinsics
=== Vector Indexed Load Intrinsics
=== Vector Indexed Store Intrinsics
== Vector Loads and Stores Segment Instructions
=== Vector Unit-Stride Segment Load Intrinsics
=== Vector Unit-Stride Segment Store Intrinsics
=== Vector Strided Segment Load Intrinsics
=== Vector Strided Segment Store Intrinsics
=== Vector Indexed Segment Load Intrinsics
=== Vector Indexed Segment Store Intrinsics
== Vector Integer Arithmetic Instructions
=== Vector Single-Width Integer Add and Subtract Intrinsics
=== Vector Widening Integer Add/Subtract Intrinsics
=== Vector Integer Widening Intrinsics
=== Vector Integer Extension Intrinsics
=== Vector Integer Add-with-Carry / Subtract-with-Borrow Intrinsics
=== Vector Bitwise Binary Logical Intrinsics
=== Vector Bitwise Unary Logical Intrinsics
=== Vector Single-Width Bit Shift Intrinsics
=== Vector Narrowing Integer Right Shift Intrinsics
=== Vector Integer Narrowing Intrinsics
=== Vector Integer Compare Intrinsics
=== Vector Integer Min/Max Intrinsics
=== Vector Single-Width Integer Multiply Intrinsics
=== Vector Integer Divide Intrinsics
=== Vector Widening Integer Multiply Intrinsics
=== Vector Single-Width Integer Multiply-Add Intrinsics
=== Vector Widening Integer Multiply-Add Intrinsics
=== Vector Integer Merge Intrinsics
=== Vector Integer Move Intrinsics
== Vector Fixed-Point Arithmetic Instructions
=== Vector Single-Width Saturating Add and Subtract Intrinsics
=== Vector Single-Width Averaging Add and Subtract Intrinsics
=== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics
=== Vector Single-Width Scaling Shift Intrinsics
=== Vector Narrowing Fixed-Point Clip Intrinsics
== Vector Floating-Point Instructions
=== Vector Single-Width Floating-Point Add/Subtract Intrinsics
=== Vector Widening Floating-Point Add/Subtract Intrinsics
=== Vector Single-Width Floating-Point Multiply/Divide Intrinsics
=== Vector Widening Floating-Point Multiply Intrinsics
=== Vector Single-Width Floating-Point Fused Multiply-Add Intrinsics
=== Vector Widening Floating-Point Fused Multiply-Add Intrinsics
=== Vector Floating-Point Square-Root Intrinsics
=== Vector Floating-Point Reciprocal Square-Root Estimate Intrinsics
=== Vector Floating-Point Reciprocal Estimate Intrinsics
=== Vector Floating-Point MIN/MAX Intrinsics
=== Vector Floating-Point Sign-Injection Intrinsics
=== Vector Floating-Point Absolute Value Intrinsics
=== Vector Floating-Point Compare Intrinsics
=== Vector Floating-Point Classify Intrinsics
=== Vector Floating-Point Merge Intrinsics
=== Vector Floating-Point Move Intrinsics
== Vector Reduction Operations
=== Vector Single-Width Integer Reduction Intrinsics
=== Vector Widening Integer Reduction Intrinsics
=== Vector Single-Width Floating-Point Reduction Intrinsics
=== Vector Widening Floating-Point Reduction Intrinsics
== Vector Mask Instructions
=== Vector Mask-Register Logical
=== Vector count population in mask `vcpop.m`
=== Vector Iota Intrinsics
=== Vector Element Index Intrinsics
== Vector Permutation Instructions
=== Vector Slideup Intrinsics
=== Vector Slidedown Intrinsics
=== Vector Slide1up and Slide1down Intrinsics
=== Vector Register Gather Intrinsics
=== Vector Compress Intrinsics
== Miscellaneous Vector Utility Intrinsics
=== Vector LMUL Extension Intrinsics
=== Vector LMUL Truncation Intrinsics
=== Vector Initialization Intrinsics
=== Vector Insertion Intrinsics
=== Vector Extraction Intrinsics
14.49. vminu.vv
- Mnemonic
vminu.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Unsigned minimum, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vminu_vv.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vminu_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vminu_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vminu_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vminu_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vminu_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vminu_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vminu_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vminu_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vminu_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vminu_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vminu_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vminu_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vminu_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vminu_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vminu_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vminu_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vminu_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vminu_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vminu_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vminu_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vminu_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vminu_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vminu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vminu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vminu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vminu_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vminu_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vminu_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vminu_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vminu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vminu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vminu_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vminu_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vminu_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vminu_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vminu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vminu_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vminu_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vminu_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vminu_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vminu_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vminu_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vminu_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vminu_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
14.50. vminu.vx
- Mnemonic
vminu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Unsigned minimum, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vminu_vx.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vminu_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vminu_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vminu_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vminu_vx_u8m1 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vminu_vx_u8m2 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vminu_vx_u8m4 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vminu_vx_u8m8 (vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vminu_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vminu_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vminu_vx_u16m1 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vminu_vx_u16m2 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vminu_vx_u16m4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vminu_vx_u16m8 (vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vminu_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vminu_vx_u32m1 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vminu_vx_u32m2 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vminu_vx_u32m4 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vminu_vx_u32m8 (vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vminu_vx_u64m1 (vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vminu_vx_u64m2 (vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vminu_vx_u64m4 (vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vminu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vuint8mf8_t __riscv_vminu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vminu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vminu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vminu_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vminu_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vminu_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vminu_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vminu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vminu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vminu_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vminu_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vminu_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vminu_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vminu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vminu_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vminu_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vminu_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vminu_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vminu_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vminu_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vminu_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vminu_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
14.51. vmin.vv
- Mnemonic
vmin.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Signed minimum, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmin_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmin_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vmin_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vmin_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vmin_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vmin_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vmin_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vmin_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vmin_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vmin_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vmin_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vmin_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vmin_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vmin_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vmin_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vmin_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vmin_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vmin_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vmin_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vmin_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vmin_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vmin_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vmin_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vint8mf8_t __riscv_vmin_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vmin_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vmin_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vmin_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vmin_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vmin_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vmin_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vmin_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vmin_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vmin_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vmin_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vmin_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vmin_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vmin_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vmin_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vmin_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vmin_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vmin_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vmin_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vmin_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vmin_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vmin_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
14.52. vmin.vx
- Mnemonic
vmin.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Signed minimum, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmin_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmin_vx_i8mf8 (vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vmin_vx_i8mf4 (vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vmin_vx_i8mf2 (vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vmin_vx_i8m1 (vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vmin_vx_i8m2 (vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vmin_vx_i8m4 (vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vmin_vx_i8m8 (vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vmin_vx_i16mf4 (vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vmin_vx_i16mf2 (vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vmin_vx_i16m1 (vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vmin_vx_i16m2 (vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vmin_vx_i16m4 (vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vmin_vx_i16m8 (vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vmin_vx_i32mf2 (vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vmin_vx_i32m1 (vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vmin_vx_i32m2 (vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vmin_vx_i32m4 (vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vmin_vx_i32m8 (vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vmin_vx_i64m1 (vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vmin_vx_i64m2 (vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vmin_vx_i64m4 (vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vmin_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl);
vint8mf8_t __riscv_vmin_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vmin_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vmin_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vmin_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vmin_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vmin_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vmin_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vmin_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vmin_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vmin_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vmin_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vmin_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vmin_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vmin_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vmin_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vmin_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vmin_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vmin_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vmin_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vmin_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vmin_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vmin_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
14.53. vmaxu.vv
- Mnemonic
vmaxu.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Unsigned maximum, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmaxu_vv.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vmaxu_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vmaxu_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vmaxu_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vmaxu_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vmaxu_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vmaxu_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vmaxu_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vmaxu_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vmaxu_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vmaxu_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vmaxu_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vmaxu_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vmaxu_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vmaxu_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vmaxu_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vmaxu_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vmaxu_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vmaxu_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vmaxu_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vmaxu_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vmaxu_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vmaxu_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vmaxu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vmaxu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vmaxu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vmaxu_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vmaxu_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vmaxu_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vmaxu_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vmaxu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vmaxu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vmaxu_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vmaxu_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vmaxu_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vmaxu_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vmaxu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vmaxu_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vmaxu_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vmaxu_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vmaxu_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vmaxu_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vmaxu_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vmaxu_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vmaxu_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
14.54. vmaxu.vx
- Mnemonic
vmaxu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Unsigned maximum, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmaxu_vx.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vmaxu_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vmaxu_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vmaxu_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vmaxu_vx_u8m1 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vmaxu_vx_u8m2 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vmaxu_vx_u8m4 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vmaxu_vx_u8m8 (vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vmaxu_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vmaxu_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vmaxu_vx_u16m1 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vmaxu_vx_u16m2 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vmaxu_vx_u16m4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vmaxu_vx_u16m8 (vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vmaxu_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vmaxu_vx_u32m1 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vmaxu_vx_u32m2 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vmaxu_vx_u32m4 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vmaxu_vx_u32m8 (vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vmaxu_vx_u64m1 (vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vmaxu_vx_u64m2 (vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vmaxu_vx_u64m4 (vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vmaxu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vuint8mf8_t __riscv_vmaxu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vmaxu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vmaxu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vmaxu_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vmaxu_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vmaxu_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vmaxu_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vmaxu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vmaxu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vmaxu_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vmaxu_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vmaxu_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vmaxu_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vmaxu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vmaxu_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vmaxu_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vmaxu_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vmaxu_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vmaxu_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vmaxu_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vmaxu_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vmaxu_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
14.55. vmax.vv
- Mnemonic
vmax.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Signed maximum, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmax_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmax_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vmax_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vmax_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vmax_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vmax_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vmax_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vmax_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vmax_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vmax_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vmax_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vmax_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vmax_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vmax_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vmax_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vmax_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vmax_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vmax_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vmax_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vmax_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vmax_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vmax_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vmax_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vint8mf8_t __riscv_vmax_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vmax_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vmax_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vmax_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vmax_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vmax_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vmax_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vmax_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vmax_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vmax_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vmax_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vmax_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vmax_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vmax_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vmax_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vmax_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vmax_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vmax_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vmax_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vmax_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vmax_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vmax_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
14.56. vmax.vx
- Mnemonic
vmax.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Signed maximum, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmax_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmax_vx_i8mf8 (vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vmax_vx_i8mf4 (vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vmax_vx_i8mf2 (vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vmax_vx_i8m1 (vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vmax_vx_i8m2 (vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vmax_vx_i8m4 (vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vmax_vx_i8m8 (vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vmax_vx_i16mf4 (vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vmax_vx_i16mf2 (vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vmax_vx_i16m1 (vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vmax_vx_i16m2 (vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vmax_vx_i16m4 (vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vmax_vx_i16m8 (vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vmax_vx_i32mf2 (vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vmax_vx_i32m1 (vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vmax_vx_i32m2 (vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vmax_vx_i32m4 (vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vmax_vx_i32m8 (vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vmax_vx_i64m1 (vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vmax_vx_i64m2 (vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vmax_vx_i64m4 (vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vmax_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl);
vint8mf8_t __riscv_vmax_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vmax_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vmax_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vmax_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vmax_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vmax_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vmax_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vmax_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vmax_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vmax_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vmax_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vmax_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vmax_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vmax_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vmax_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vmax_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vmax_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vmax_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vmax_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vmax_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vmax_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vmax_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
15. Vector Single-Width Integer Multiply Instructions
15.1. vmul.vv
- Mnemonic
vmul.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Signed multiply, returning low bits of product, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmul_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmul_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vmul_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vmul_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vmul_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vmul_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vmul_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vmul_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vmul_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vmul_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vmul_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vmul_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vmul_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vmul_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vmul_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vmul_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vmul_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vmul_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vmul_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vmul_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vmul_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vmul_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vmul_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vmul_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vmul_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vmul_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vmul_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vmul_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vmul_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vmul_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vmul_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vmul_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vmul_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vmul_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vmul_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vmul_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vmul_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vmul_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vmul_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vmul_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vmul_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vmul_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vmul_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vmul_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vmul_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vint8mf8_t __riscv_vmul_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vmul_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vmul_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vmul_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vmul_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vmul_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vmul_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vmul_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vmul_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vmul_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vmul_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vmul_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vmul_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vmul_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vmul_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vmul_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vmul_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vmul_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vmul_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vmul_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vmul_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vmul_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vmul_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vmul_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vmul_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vmul_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vmul_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vmul_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vmul_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vmul_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vmul_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vmul_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vmul_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vmul_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vmul_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vmul_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vmul_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vmul_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vmul_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vmul_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vmul_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vmul_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vmul_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vmul_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
15.2. vmul.vx
- Mnemonic
vmul.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Signed multiply, returning low bits of product, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmul_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmul_vx_i8mf8 (vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vmul_vx_i8mf4 (vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vmul_vx_i8mf2 (vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vmul_vx_i8m1 (vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vmul_vx_i8m2 (vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vmul_vx_i8m4 (vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vmul_vx_i8m8 (vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vmul_vx_i16mf4 (vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vmul_vx_i16mf2 (vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vmul_vx_i16m1 (vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vmul_vx_i16m2 (vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vmul_vx_i16m4 (vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vmul_vx_i16m8 (vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vmul_vx_i32mf2 (vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vmul_vx_i32m1 (vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vmul_vx_i32m2 (vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vmul_vx_i32m4 (vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vmul_vx_i32m8 (vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vmul_vx_i64m1 (vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vmul_vx_i64m2 (vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vmul_vx_i64m4 (vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vmul_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl);
vuint8mf8_t __riscv_vmul_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vmul_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vmul_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vmul_vx_u8m1 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vmul_vx_u8m2 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vmul_vx_u8m4 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vmul_vx_u8m8 (vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vmul_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vmul_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vmul_vx_u16m1 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vmul_vx_u16m2 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vmul_vx_u16m4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vmul_vx_u16m8 (vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vmul_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vmul_vx_u32m1 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vmul_vx_u32m2 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vmul_vx_u32m4 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vmul_vx_u32m8 (vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vmul_vx_u64m1 (vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vmul_vx_u64m2 (vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vmul_vx_u64m4 (vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vmul_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vint8mf8_t __riscv_vmul_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vmul_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vmul_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vmul_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vmul_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vmul_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vmul_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vmul_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vmul_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vmul_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vmul_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vmul_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vmul_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vmul_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vmul_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vmul_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vmul_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vmul_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vmul_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vmul_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vmul_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vmul_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
vuint8mf8_t __riscv_vmul_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vmul_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vmul_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vmul_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vmul_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vmul_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vmul_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vmul_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vmul_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vmul_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vmul_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vmul_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vmul_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vmul_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vmul_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vmul_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vmul_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vmul_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vmul_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vmul_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vmul_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vmul_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
15.3. vmulh.vv
- Mnemonic
vmulh.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Signed multiply, returning high bits of product, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmulh_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmulh_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vmulh_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vmulh_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vmulh_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vmulh_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vmulh_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vmulh_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vmulh_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vmulh_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vmulh_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vmulh_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vmulh_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vmulh_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vmulh_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vmulh_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vmulh_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vmulh_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vmulh_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vmulh_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vmulh_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vmulh_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vmulh_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vint8mf8_t __riscv_vmulh_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vmulh_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vmulh_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vmulh_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vmulh_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vmulh_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vmulh_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vmulh_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vmulh_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vmulh_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vmulh_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vmulh_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vmulh_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vmulh_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vmulh_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vmulh_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vmulh_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vmulh_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vmulh_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vmulh_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vmulh_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vmulh_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
15.4. vmulh.vx
- Mnemonic
vmulh.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Signed multiply, returning high bits of product, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmulh_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmulh_vx_i8mf8 (vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vmulh_vx_i8mf4 (vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vmulh_vx_i8mf2 (vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vmulh_vx_i8m1 (vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vmulh_vx_i8m2 (vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vmulh_vx_i8m4 (vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vmulh_vx_i8m8 (vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vmulh_vx_i16mf4 (vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vmulh_vx_i16mf2 (vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vmulh_vx_i16m1 (vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vmulh_vx_i16m2 (vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vmulh_vx_i16m4 (vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vmulh_vx_i16m8 (vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vmulh_vx_i32mf2 (vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vmulh_vx_i32m1 (vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vmulh_vx_i32m2 (vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vmulh_vx_i32m4 (vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vmulh_vx_i32m8 (vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vmulh_vx_i64m1 (vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vmulh_vx_i64m2 (vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vmulh_vx_i64m4 (vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vmulh_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl);
vint8mf8_t __riscv_vmulh_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vmulh_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vmulh_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vmulh_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vmulh_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vmulh_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vmulh_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vmulh_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vmulh_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vmulh_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vmulh_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vmulh_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vmulh_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vmulh_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vmulh_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vmulh_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vmulh_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vmulh_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vmulh_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vmulh_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vmulh_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vmulh_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
15.5. vmulhu.vv
- Mnemonic
vmulhu.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Unsigned multiply, returning high bits of product, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmulhu_vv.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vmulhu_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vmulhu_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vmulhu_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vmulhu_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vmulhu_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vmulhu_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vmulhu_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vmulhu_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vmulhu_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vmulhu_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vmulhu_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vmulhu_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vmulhu_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vmulhu_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vmulhu_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vmulhu_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vmulhu_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vmulhu_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vmulhu_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vmulhu_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vmulhu_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vmulhu_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vmulhu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vmulhu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vmulhu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vmulhu_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vmulhu_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vmulhu_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vmulhu_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vmulhu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vmulhu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vmulhu_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vmulhu_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vmulhu_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vmulhu_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vmulhu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vmulhu_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vmulhu_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vmulhu_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vmulhu_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vmulhu_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vmulhu_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vmulhu_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vmulhu_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
15.6. vmulhu.vx
- Mnemonic
vmulhu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Unsigned multiply, returning high bits of product, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmulhu_vx.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vmulhu_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vmulhu_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vmulhu_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vmulhu_vx_u8m1 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vmulhu_vx_u8m2 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vmulhu_vx_u8m4 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vmulhu_vx_u8m8 (vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vmulhu_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vmulhu_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vmulhu_vx_u16m1 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vmulhu_vx_u16m2 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vmulhu_vx_u16m4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vmulhu_vx_u16m8 (vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vmulhu_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vmulhu_vx_u32m1 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vmulhu_vx_u32m2 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vmulhu_vx_u32m4 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vmulhu_vx_u32m8 (vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vmulhu_vx_u64m1 (vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vmulhu_vx_u64m2 (vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vmulhu_vx_u64m4 (vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vmulhu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vuint8mf8_t __riscv_vmulhu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vmulhu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vmulhu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vmulhu_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vmulhu_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vmulhu_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vmulhu_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vmulhu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vmulhu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vmulhu_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vmulhu_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vmulhu_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vmulhu_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vmulhu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vmulhu_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vmulhu_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vmulhu_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vmulhu_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vmulhu_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vmulhu_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vmulhu_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vmulhu_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
15.7. vmulhsu.vv
- Mnemonic
vmulhsu.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Signed(vs2)-Unsigned multiply, returning high bits of product, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmulhsu_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmulhsu_vv_i8mf8 (vint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vmulhsu_vv_i8mf4 (vint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vmulhsu_vv_i8mf2 (vint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vmulhsu_vv_i8m1 (vint8m1_t op1, vuint8m1_t op2, size_t vl);
vint8m2_t __riscv_vmulhsu_vv_i8m2 (vint8m2_t op1, vuint8m2_t op2, size_t vl);
vint8m4_t __riscv_vmulhsu_vv_i8m4 (vint8m4_t op1, vuint8m4_t op2, size_t vl);
vint8m8_t __riscv_vmulhsu_vv_i8m8 (vint8m8_t op1, vuint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vmulhsu_vv_i16mf4 (vint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vmulhsu_vv_i16mf2 (vint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vmulhsu_vv_i16m1 (vint16m1_t op1, vuint16m1_t op2, size_t vl);
vint16m2_t __riscv_vmulhsu_vv_i16m2 (vint16m2_t op1, vuint16m2_t op2, size_t vl);
vint16m4_t __riscv_vmulhsu_vv_i16m4 (vint16m4_t op1, vuint16m4_t op2, size_t vl);
vint16m8_t __riscv_vmulhsu_vv_i16m8 (vint16m8_t op1, vuint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vmulhsu_vv_i32mf2 (vint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vmulhsu_vv_i32m1 (vint32m1_t op1, vuint32m1_t op2, size_t vl);
vint32m2_t __riscv_vmulhsu_vv_i32m2 (vint32m2_t op1, vuint32m2_t op2, size_t vl);
vint32m4_t __riscv_vmulhsu_vv_i32m4 (vint32m4_t op1, vuint32m4_t op2, size_t vl);
vint32m8_t __riscv_vmulhsu_vv_i32m8 (vint32m8_t op1, vuint32m8_t op2, size_t vl);
vint64m1_t __riscv_vmulhsu_vv_i64m1 (vint64m1_t op1, vuint64m1_t op2, size_t vl);
vint64m2_t __riscv_vmulhsu_vv_i64m2 (vint64m2_t op1, vuint64m2_t op2, size_t vl);
vint64m4_t __riscv_vmulhsu_vv_i64m4 (vint64m4_t op1, vuint64m4_t op2, size_t vl);
vint64m8_t __riscv_vmulhsu_vv_i64m8 (vint64m8_t op1, vuint64m8_t op2, size_t vl);
vint8mf8_t __riscv_vmulhsu_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vmulhsu_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vmulhsu_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vmulhsu_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl);
vint8m2_t __riscv_vmulhsu_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl);
vint8m4_t __riscv_vmulhsu_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl);
vint8m8_t __riscv_vmulhsu_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vuint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vmulhsu_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vmulhsu_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vmulhsu_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl);
vint16m2_t __riscv_vmulhsu_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl);
vint16m4_t __riscv_vmulhsu_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl);
vint16m8_t __riscv_vmulhsu_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vuint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vmulhsu_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vmulhsu_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl);
vint32m2_t __riscv_vmulhsu_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl);
vint32m4_t __riscv_vmulhsu_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl);
vint32m8_t __riscv_vmulhsu_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vuint32m8_t op2, size_t vl);
vint64m1_t __riscv_vmulhsu_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vuint64m1_t op2, size_t vl);
vint64m2_t __riscv_vmulhsu_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vuint64m2_t op2, size_t vl);
vint64m4_t __riscv_vmulhsu_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vuint64m4_t op2, size_t vl);
vint64m8_t __riscv_vmulhsu_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vuint64m8_t op2, size_t vl);
15.8. vmulhsu.vx
- Mnemonic
vmulhsu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Signed(vs2)-Unsigned multiply, returning high bits of product, vector-scalar
16. Vector Integer Divide Instructions
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmulhsu_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmulhsu_vx_i8mf8 (vint8mf8_t op1, uint8_t op2, size_t vl);
vint8mf4_t __riscv_vmulhsu_vx_i8mf4 (vint8mf4_t op1, uint8_t op2, size_t vl);
vint8mf2_t __riscv_vmulhsu_vx_i8mf2 (vint8mf2_t op1, uint8_t op2, size_t vl);
vint8m1_t __riscv_vmulhsu_vx_i8m1 (vint8m1_t op1, uint8_t op2, size_t vl);
vint8m2_t __riscv_vmulhsu_vx_i8m2 (vint8m2_t op1, uint8_t op2, size_t vl);
vint8m4_t __riscv_vmulhsu_vx_i8m4 (vint8m4_t op1, uint8_t op2, size_t vl);
vint8m8_t __riscv_vmulhsu_vx_i8m8 (vint8m8_t op1, uint8_t op2, size_t vl);
vint16mf4_t __riscv_vmulhsu_vx_i16mf4 (vint16mf4_t op1, uint16_t op2, size_t vl);
vint16mf2_t __riscv_vmulhsu_vx_i16mf2 (vint16mf2_t op1, uint16_t op2, size_t vl);
vint16m1_t __riscv_vmulhsu_vx_i16m1 (vint16m1_t op1, uint16_t op2, size_t vl);
vint16m2_t __riscv_vmulhsu_vx_i16m2 (vint16m2_t op1, uint16_t op2, size_t vl);
vint16m4_t __riscv_vmulhsu_vx_i16m4 (vint16m4_t op1, uint16_t op2, size_t vl);
vint16m8_t __riscv_vmulhsu_vx_i16m8 (vint16m8_t op1, uint16_t op2, size_t vl);
vint32mf2_t __riscv_vmulhsu_vx_i32mf2 (vint32mf2_t op1, uint32_t op2, size_t vl);
vint32m1_t __riscv_vmulhsu_vx_i32m1 (vint32m1_t op1, uint32_t op2, size_t vl);
vint32m2_t __riscv_vmulhsu_vx_i32m2 (vint32m2_t op1, uint32_t op2, size_t vl);
vint32m4_t __riscv_vmulhsu_vx_i32m4 (vint32m4_t op1, uint32_t op2, size_t vl);
vint32m8_t __riscv_vmulhsu_vx_i32m8 (vint32m8_t op1, uint32_t op2, size_t vl);
vint64m1_t __riscv_vmulhsu_vx_i64m1 (vint64m1_t op1, uint64_t op2, size_t vl);
vint64m2_t __riscv_vmulhsu_vx_i64m2 (vint64m2_t op1, uint64_t op2, size_t vl);
vint64m4_t __riscv_vmulhsu_vx_i64m4 (vint64m4_t op1, uint64_t op2, size_t vl);
vint64m8_t __riscv_vmulhsu_vx_i64m8 (vint64m8_t op1, uint64_t op2, size_t vl);
vint8mf8_t __riscv_vmulhsu_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl);
vint8mf4_t __riscv_vmulhsu_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl);
vint8mf2_t __riscv_vmulhsu_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl);
vint8m1_t __riscv_vmulhsu_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl);
vint8m2_t __riscv_vmulhsu_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl);
vint8m4_t __riscv_vmulhsu_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl);
vint8m8_t __riscv_vmulhsu_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, uint8_t op2, size_t vl);
vint16mf4_t __riscv_vmulhsu_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl);
vint16mf2_t __riscv_vmulhsu_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl);
vint16m1_t __riscv_vmulhsu_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl);
vint16m2_t __riscv_vmulhsu_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl);
vint16m4_t __riscv_vmulhsu_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl);
vint16m8_t __riscv_vmulhsu_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, uint16_t op2, size_t vl);
vint32mf2_t __riscv_vmulhsu_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl);
vint32m1_t __riscv_vmulhsu_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl);
vint32m2_t __riscv_vmulhsu_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl);
vint32m4_t __riscv_vmulhsu_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl);
vint32m8_t __riscv_vmulhsu_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, uint32_t op2, size_t vl);
vint64m1_t __riscv_vmulhsu_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, uint64_t op2, size_t vl);
vint64m2_t __riscv_vmulhsu_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, uint64_t op2, size_t vl);
vint64m4_t __riscv_vmulhsu_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, uint64_t op2, size_t vl);
vint64m8_t __riscv_vmulhsu_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, uint64_t op2, size_t vl);
16.1. vdivu.vv
- Mnemonic
vdivu.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Unsigned divide, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vdivu_vv.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vdivu_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vdivu_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vdivu_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vdivu_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vdivu_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vdivu_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vdivu_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vdivu_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vdivu_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vdivu_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vdivu_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vdivu_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vdivu_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vdivu_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vdivu_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vdivu_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vdivu_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vdivu_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vdivu_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vdivu_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vdivu_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vdivu_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vdivu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vdivu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vdivu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vdivu_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vdivu_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vdivu_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vdivu_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vdivu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vdivu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vdivu_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vdivu_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vdivu_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vdivu_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vdivu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vdivu_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vdivu_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vdivu_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vdivu_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vdivu_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vdivu_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vdivu_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vdivu_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
16.2. vdivu.vx
- Mnemonic
vdivu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Unsigned divide, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vdivu_vx.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vdivu_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vdivu_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vdivu_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vdivu_vx_u8m1 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vdivu_vx_u8m2 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vdivu_vx_u8m4 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vdivu_vx_u8m8 (vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vdivu_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vdivu_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vdivu_vx_u16m1 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vdivu_vx_u16m2 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vdivu_vx_u16m4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vdivu_vx_u16m8 (vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vdivu_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vdivu_vx_u32m1 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vdivu_vx_u32m2 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vdivu_vx_u32m4 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vdivu_vx_u32m8 (vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vdivu_vx_u64m1 (vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vdivu_vx_u64m2 (vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vdivu_vx_u64m4 (vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vdivu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vuint8mf8_t __riscv_vdivu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vdivu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vdivu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vdivu_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vdivu_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vdivu_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vdivu_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vdivu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vdivu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vdivu_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vdivu_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vdivu_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vdivu_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vdivu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vdivu_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vdivu_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vdivu_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vdivu_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vdivu_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vdivu_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vdivu_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vdivu_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
16.3. vdiv.vv
- Mnemonic
vdiv.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Signed divide, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vdiv_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vdiv_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vdiv_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vdiv_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vdiv_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vdiv_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vdiv_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vdiv_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vdiv_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vdiv_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vdiv_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vdiv_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vdiv_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vdiv_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vdiv_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vdiv_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vdiv_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vdiv_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vdiv_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vdiv_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vdiv_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vdiv_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vdiv_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vint8mf8_t __riscv_vdiv_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vdiv_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vdiv_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vdiv_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vdiv_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vdiv_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vdiv_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vdiv_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vdiv_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vdiv_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vdiv_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vdiv_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vdiv_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vdiv_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vdiv_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vdiv_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vdiv_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vdiv_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vdiv_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vdiv_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vdiv_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vdiv_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
16.4. vdiv.vx
- Mnemonic
vdiv.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Signed divide, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vdiv_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vdiv_vx_i8mf8 (vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vdiv_vx_i8mf4 (vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vdiv_vx_i8mf2 (vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vdiv_vx_i8m1 (vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vdiv_vx_i8m2 (vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vdiv_vx_i8m4 (vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vdiv_vx_i8m8 (vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vdiv_vx_i16mf4 (vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vdiv_vx_i16mf2 (vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vdiv_vx_i16m1 (vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vdiv_vx_i16m2 (vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vdiv_vx_i16m4 (vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vdiv_vx_i16m8 (vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vdiv_vx_i32mf2 (vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vdiv_vx_i32m1 (vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vdiv_vx_i32m2 (vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vdiv_vx_i32m4 (vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vdiv_vx_i32m8 (vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vdiv_vx_i64m1 (vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vdiv_vx_i64m2 (vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vdiv_vx_i64m4 (vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vdiv_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl);
vint8mf8_t __riscv_vdiv_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vdiv_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vdiv_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vdiv_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vdiv_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vdiv_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vdiv_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vdiv_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vdiv_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vdiv_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vdiv_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vdiv_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vdiv_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vdiv_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vdiv_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vdiv_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vdiv_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vdiv_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vdiv_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vdiv_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vdiv_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vdiv_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
16.5. vremu.vv
- Mnemonic
vremu.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Unsigned remainder, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vremu_vv.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vremu_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vremu_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vremu_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vremu_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vremu_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vremu_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vremu_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vremu_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vremu_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vremu_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vremu_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vremu_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vremu_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vremu_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vremu_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vremu_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vremu_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vremu_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vremu_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vremu_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vremu_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vremu_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vremu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vremu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vremu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vremu_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vremu_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vremu_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vremu_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vremu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vremu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vremu_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vremu_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vremu_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vremu_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vremu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vremu_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vremu_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vremu_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vremu_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vremu_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vremu_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vremu_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vremu_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
16.6. vremu.vx
- Mnemonic
vremu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Unsigned remainder, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vremu_vx.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vremu_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vremu_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vremu_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vremu_vx_u8m1 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vremu_vx_u8m2 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vremu_vx_u8m4 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vremu_vx_u8m8 (vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vremu_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vremu_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vremu_vx_u16m1 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vremu_vx_u16m2 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vremu_vx_u16m4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vremu_vx_u16m8 (vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vremu_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vremu_vx_u32m1 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vremu_vx_u32m2 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vremu_vx_u32m4 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vremu_vx_u32m8 (vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vremu_vx_u64m1 (vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vremu_vx_u64m2 (vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vremu_vx_u64m4 (vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vremu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vuint8mf8_t __riscv_vremu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vremu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vremu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vremu_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vremu_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vremu_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vremu_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vremu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vremu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vremu_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vremu_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vremu_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vremu_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vremu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vremu_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vremu_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vremu_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vremu_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vremu_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vremu_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vremu_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vremu_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
16.7. vrem.vv
- Mnemonic
vrem.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Signed remainder, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vrem_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vrem_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vrem_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vrem_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vrem_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vrem_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vrem_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vrem_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vrem_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vrem_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vrem_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vrem_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vrem_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vrem_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vrem_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vrem_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vrem_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vrem_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vrem_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vrem_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vrem_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vrem_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vrem_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vint8mf8_t __riscv_vrem_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vrem_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vrem_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vrem_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vrem_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vrem_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vrem_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vrem_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vrem_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vrem_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vrem_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vrem_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vrem_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vrem_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vrem_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vrem_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vrem_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vrem_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vrem_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vrem_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vrem_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vrem_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
16.8. vrem.vx
- Mnemonic
vrem.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Signed remainder, vector-scalar
17. Vector Widening Integer Multiply Instructions
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vrem_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vrem_vx_i8mf8 (vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vrem_vx_i8mf4 (vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vrem_vx_i8mf2 (vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vrem_vx_i8m1 (vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vrem_vx_i8m2 (vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vrem_vx_i8m4 (vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vrem_vx_i8m8 (vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vrem_vx_i16mf4 (vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vrem_vx_i16mf2 (vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vrem_vx_i16m1 (vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vrem_vx_i16m2 (vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vrem_vx_i16m4 (vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vrem_vx_i16m8 (vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vrem_vx_i32mf2 (vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vrem_vx_i32m1 (vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vrem_vx_i32m2 (vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vrem_vx_i32m4 (vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vrem_vx_i32m8 (vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vrem_vx_i64m1 (vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vrem_vx_i64m2 (vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vrem_vx_i64m4 (vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vrem_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl);
vint8mf8_t __riscv_vrem_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vrem_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vrem_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vrem_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vrem_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vrem_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vrem_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vrem_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vrem_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vrem_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vrem_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vrem_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vrem_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vrem_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vrem_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vrem_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vrem_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vrem_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vrem_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vrem_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vrem_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vrem_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
17.1. vwmul.vv
- Mnemonic
vwmul.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening signed-integer multiply, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwmul_vv.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwmul_vv_i16mf4 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint16mf2_t __riscv_vwmul_vv_i16mf2 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint16m1_t __riscv_vwmul_vv_i16m1 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint16m2_t __riscv_vwmul_vv_i16m2 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint16m4_t __riscv_vwmul_vv_i16m4 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint16m8_t __riscv_vwmul_vv_i16m8 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint32mf2_t __riscv_vwmul_vv_i32mf2 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint32m1_t __riscv_vwmul_vv_i32m1 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint32m2_t __riscv_vwmul_vv_i32m2 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint32m4_t __riscv_vwmul_vv_i32m4 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint32m8_t __riscv_vwmul_vv_i32m8 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint64m1_t __riscv_vwmul_vv_i64m1 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint64m2_t __riscv_vwmul_vv_i64m2 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint64m4_t __riscv_vwmul_vv_i64m4 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint64m8_t __riscv_vwmul_vv_i64m8 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint16mf4_t __riscv_vwmul_vv_i16mf4_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint16mf2_t __riscv_vwmul_vv_i16mf2_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint16m1_t __riscv_vwmul_vv_i16m1_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint16m2_t __riscv_vwmul_vv_i16m2_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint16m4_t __riscv_vwmul_vv_i16m4_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint16m8_t __riscv_vwmul_vv_i16m8_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint32mf2_t __riscv_vwmul_vv_i32mf2_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint32m1_t __riscv_vwmul_vv_i32m1_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint32m2_t __riscv_vwmul_vv_i32m2_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint32m4_t __riscv_vwmul_vv_i32m4_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint32m8_t __riscv_vwmul_vv_i32m8_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint64m1_t __riscv_vwmul_vv_i64m1_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint64m2_t __riscv_vwmul_vv_i64m2_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint64m4_t __riscv_vwmul_vv_i64m4_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint64m8_t __riscv_vwmul_vv_i64m8_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
17.2. vwmul.vx
- Mnemonic
vwmul.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Widening signed-integer multiply, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwmul_vx.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwmul_vx_i16mf4 (vint8mf8_t op1, int8_t op2, size_t vl);
vint16mf2_t __riscv_vwmul_vx_i16mf2 (vint8mf4_t op1, int8_t op2, size_t vl);
vint16m1_t __riscv_vwmul_vx_i16m1 (vint8mf2_t op1, int8_t op2, size_t vl);
vint16m2_t __riscv_vwmul_vx_i16m2 (vint8m1_t op1, int8_t op2, size_t vl);
vint16m4_t __riscv_vwmul_vx_i16m4 (vint8m2_t op1, int8_t op2, size_t vl);
vint16m8_t __riscv_vwmul_vx_i16m8 (vint8m4_t op1, int8_t op2, size_t vl);
vint32mf2_t __riscv_vwmul_vx_i32mf2 (vint16mf4_t op1, int16_t op2, size_t vl);
vint32m1_t __riscv_vwmul_vx_i32m1 (vint16mf2_t op1, int16_t op2, size_t vl);
vint32m2_t __riscv_vwmul_vx_i32m2 (vint16m1_t op1, int16_t op2, size_t vl);
vint32m4_t __riscv_vwmul_vx_i32m4 (vint16m2_t op1, int16_t op2, size_t vl);
vint32m8_t __riscv_vwmul_vx_i32m8 (vint16m4_t op1, int16_t op2, size_t vl);
vint64m1_t __riscv_vwmul_vx_i64m1 (vint32mf2_t op1, int32_t op2, size_t vl);
vint64m2_t __riscv_vwmul_vx_i64m2 (vint32m1_t op1, int32_t op2, size_t vl);
vint64m4_t __riscv_vwmul_vx_i64m4 (vint32m2_t op1, int32_t op2, size_t vl);
vint64m8_t __riscv_vwmul_vx_i64m8 (vint32m4_t op1, int32_t op2, size_t vl);
vint16mf4_t __riscv_vwmul_vx_i16mf4_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint16mf2_t __riscv_vwmul_vx_i16mf2_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint16m1_t __riscv_vwmul_vx_i16m1_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint16m2_t __riscv_vwmul_vx_i16m2_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint16m4_t __riscv_vwmul_vx_i16m4_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint16m8_t __riscv_vwmul_vx_i16m8_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint32mf2_t __riscv_vwmul_vx_i32mf2_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint32m1_t __riscv_vwmul_vx_i32m1_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint32m2_t __riscv_vwmul_vx_i32m2_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint32m4_t __riscv_vwmul_vx_i32m4_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint32m8_t __riscv_vwmul_vx_i32m8_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint64m1_t __riscv_vwmul_vx_i64m1_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint64m2_t __riscv_vwmul_vx_i64m2_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint64m4_t __riscv_vwmul_vx_i64m4_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint64m8_t __riscv_vwmul_vx_i64m8_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
17.3. vwmulu.vv
- Mnemonic
vwmulu.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening unsigned-integer multiply, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwmulu_vv.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vwmulu_vv_u16mf4 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint16mf2_t __riscv_vwmulu_vv_u16mf2 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint16m1_t __riscv_vwmulu_vv_u16m1 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint16m2_t __riscv_vwmulu_vv_u16m2 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint16m4_t __riscv_vwmulu_vv_u16m4 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint16m8_t __riscv_vwmulu_vv_u16m8 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint32mf2_t __riscv_vwmulu_vv_u32mf2 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint32m1_t __riscv_vwmulu_vv_u32m1 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint32m2_t __riscv_vwmulu_vv_u32m2 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint32m4_t __riscv_vwmulu_vv_u32m4 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint32m8_t __riscv_vwmulu_vv_u32m8 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint64m1_t __riscv_vwmulu_vv_u64m1 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint64m2_t __riscv_vwmulu_vv_u64m2 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint64m4_t __riscv_vwmulu_vv_u64m4 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint64m8_t __riscv_vwmulu_vv_u64m8 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint16mf4_t __riscv_vwmulu_vv_u16mf4_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint16mf2_t __riscv_vwmulu_vv_u16mf2_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint16m1_t __riscv_vwmulu_vv_u16m1_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint16m2_t __riscv_vwmulu_vv_u16m2_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint16m4_t __riscv_vwmulu_vv_u16m4_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint16m8_t __riscv_vwmulu_vv_u16m8_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint32mf2_t __riscv_vwmulu_vv_u32mf2_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint32m1_t __riscv_vwmulu_vv_u32m1_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint32m2_t __riscv_vwmulu_vv_u32m2_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint32m4_t __riscv_vwmulu_vv_u32m4_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint32m8_t __riscv_vwmulu_vv_u32m8_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint64m1_t __riscv_vwmulu_vv_u64m1_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint64m2_t __riscv_vwmulu_vv_u64m2_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint64m4_t __riscv_vwmulu_vv_u64m4_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint64m8_t __riscv_vwmulu_vv_u64m8_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
17.4. vwmulu.vx
- Mnemonic
vwmulu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Widening unsigned-integer multiply, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwmulu_vx.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vwmulu_vx_u16mf4 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint16mf2_t __riscv_vwmulu_vx_u16mf2 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint16m1_t __riscv_vwmulu_vx_u16m1 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint16m2_t __riscv_vwmulu_vx_u16m2 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint16m4_t __riscv_vwmulu_vx_u16m4 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint16m8_t __riscv_vwmulu_vx_u16m8 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint32mf2_t __riscv_vwmulu_vx_u32mf2 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint32m1_t __riscv_vwmulu_vx_u32m1 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint32m2_t __riscv_vwmulu_vx_u32m2 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint32m4_t __riscv_vwmulu_vx_u32m4 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint32m8_t __riscv_vwmulu_vx_u32m8 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint64m1_t __riscv_vwmulu_vx_u64m1 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint64m2_t __riscv_vwmulu_vx_u64m2 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint64m4_t __riscv_vwmulu_vx_u64m4 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint64m8_t __riscv_vwmulu_vx_u64m8 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint16mf4_t __riscv_vwmulu_vx_u16mf4_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint16mf2_t __riscv_vwmulu_vx_u16mf2_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint16m1_t __riscv_vwmulu_vx_u16m1_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint16m2_t __riscv_vwmulu_vx_u16m2_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint16m4_t __riscv_vwmulu_vx_u16m4_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint16m8_t __riscv_vwmulu_vx_u16m8_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint32mf2_t __riscv_vwmulu_vx_u32mf2_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint32m1_t __riscv_vwmulu_vx_u32m1_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint32m2_t __riscv_vwmulu_vx_u32m2_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint32m4_t __riscv_vwmulu_vx_u32m4_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint32m8_t __riscv_vwmulu_vx_u32m8_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint64m1_t __riscv_vwmulu_vx_u64m1_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint64m2_t __riscv_vwmulu_vx_u64m2_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint64m4_t __riscv_vwmulu_vx_u64m4_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint64m8_t __riscv_vwmulu_vx_u64m8_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
17.5. vwmulsu.vv
- Mnemonic
vwmulsu.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening signed(vs2)-unsigned integer multiply, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwmulsu_vv.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwmulsu_vv_i16mf4 (vint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vint16mf2_t __riscv_vwmulsu_vv_i16mf2 (vint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vint16m1_t __riscv_vwmulsu_vv_i16m1 (vint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vint16m2_t __riscv_vwmulsu_vv_i16m2 (vint8m1_t op1, vuint8m1_t op2, size_t vl);
vint16m4_t __riscv_vwmulsu_vv_i16m4 (vint8m2_t op1, vuint8m2_t op2, size_t vl);
vint16m8_t __riscv_vwmulsu_vv_i16m8 (vint8m4_t op1, vuint8m4_t op2, size_t vl);
vint32mf2_t __riscv_vwmulsu_vv_i32mf2 (vint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vint32m1_t __riscv_vwmulsu_vv_i32m1 (vint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vint32m2_t __riscv_vwmulsu_vv_i32m2 (vint16m1_t op1, vuint16m1_t op2, size_t vl);
vint32m4_t __riscv_vwmulsu_vv_i32m4 (vint16m2_t op1, vuint16m2_t op2, size_t vl);
vint32m8_t __riscv_vwmulsu_vv_i32m8 (vint16m4_t op1, vuint16m4_t op2, size_t vl);
vint64m1_t __riscv_vwmulsu_vv_i64m1 (vint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vint64m2_t __riscv_vwmulsu_vv_i64m2 (vint32m1_t op1, vuint32m1_t op2, size_t vl);
vint64m4_t __riscv_vwmulsu_vv_i64m4 (vint32m2_t op1, vuint32m2_t op2, size_t vl);
vint64m8_t __riscv_vwmulsu_vv_i64m8 (vint32m4_t op1, vuint32m4_t op2, size_t vl);
vint16mf4_t __riscv_vwmulsu_vv_i16mf4_m (vbool64_t mask, vint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vint16mf2_t __riscv_vwmulsu_vv_i16mf2_m (vbool32_t mask, vint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vint16m1_t __riscv_vwmulsu_vv_i16m1_m (vbool16_t mask, vint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vint16m2_t __riscv_vwmulsu_vv_i16m2_m (vbool8_t mask, vint8m1_t op1, vuint8m1_t op2, size_t vl);
vint16m4_t __riscv_vwmulsu_vv_i16m4_m (vbool4_t mask, vint8m2_t op1, vuint8m2_t op2, size_t vl);
vint16m8_t __riscv_vwmulsu_vv_i16m8_m (vbool2_t mask, vint8m4_t op1, vuint8m4_t op2, size_t vl);
vint32mf2_t __riscv_vwmulsu_vv_i32mf2_m (vbool64_t mask, vint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vint32m1_t __riscv_vwmulsu_vv_i32m1_m (vbool32_t mask, vint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vint32m2_t __riscv_vwmulsu_vv_i32m2_m (vbool16_t mask, vint16m1_t op1, vuint16m1_t op2, size_t vl);
vint32m4_t __riscv_vwmulsu_vv_i32m4_m (vbool8_t mask, vint16m2_t op1, vuint16m2_t op2, size_t vl);
vint32m8_t __riscv_vwmulsu_vv_i32m8_m (vbool4_t mask, vint16m4_t op1, vuint16m4_t op2, size_t vl);
vint64m1_t __riscv_vwmulsu_vv_i64m1_m (vbool64_t mask, vint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vint64m2_t __riscv_vwmulsu_vv_i64m2_m (vbool32_t mask, vint32m1_t op1, vuint32m1_t op2, size_t vl);
vint64m4_t __riscv_vwmulsu_vv_i64m4_m (vbool16_t mask, vint32m2_t op1, vuint32m2_t op2, size_t vl);
vint64m8_t __riscv_vwmulsu_vv_i64m8_m (vbool8_t mask, vint32m4_t op1, vuint32m4_t op2, size_t vl);
17.6. vwmulsu.vx
- Mnemonic
vwmulsu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Widening signed(vs2)-unsigned integer multiply, vector-scalar
18. Vector Single-Width Integer Multiply-Add Instructions
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwmulsu_vx.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwmulsu_vx_i16mf4 (vint8mf8_t op1, uint8_t op2, size_t vl);
vint16mf2_t __riscv_vwmulsu_vx_i16mf2 (vint8mf4_t op1, uint8_t op2, size_t vl);
vint16m1_t __riscv_vwmulsu_vx_i16m1 (vint8mf2_t op1, uint8_t op2, size_t vl);
vint16m2_t __riscv_vwmulsu_vx_i16m2 (vint8m1_t op1, uint8_t op2, size_t vl);
vint16m4_t __riscv_vwmulsu_vx_i16m4 (vint8m2_t op1, uint8_t op2, size_t vl);
vint16m8_t __riscv_vwmulsu_vx_i16m8 (vint8m4_t op1, uint8_t op2, size_t vl);
vint32mf2_t __riscv_vwmulsu_vx_i32mf2 (vint16mf4_t op1, uint16_t op2, size_t vl);
vint32m1_t __riscv_vwmulsu_vx_i32m1 (vint16mf2_t op1, uint16_t op2, size_t vl);
vint32m2_t __riscv_vwmulsu_vx_i32m2 (vint16m1_t op1, uint16_t op2, size_t vl);
vint32m4_t __riscv_vwmulsu_vx_i32m4 (vint16m2_t op1, uint16_t op2, size_t vl);
vint32m8_t __riscv_vwmulsu_vx_i32m8 (vint16m4_t op1, uint16_t op2, size_t vl);
vint64m1_t __riscv_vwmulsu_vx_i64m1 (vint32mf2_t op1, uint32_t op2, size_t vl);
vint64m2_t __riscv_vwmulsu_vx_i64m2 (vint32m1_t op1, uint32_t op2, size_t vl);
vint64m4_t __riscv_vwmulsu_vx_i64m4 (vint32m2_t op1, uint32_t op2, size_t vl);
vint64m8_t __riscv_vwmulsu_vx_i64m8 (vint32m4_t op1, uint32_t op2, size_t vl);
vint16mf4_t __riscv_vwmulsu_vx_i16mf4_m (vbool64_t mask, vint8mf8_t op1, uint8_t op2, size_t vl);
vint16mf2_t __riscv_vwmulsu_vx_i16mf2_m (vbool32_t mask, vint8mf4_t op1, uint8_t op2, size_t vl);
vint16m1_t __riscv_vwmulsu_vx_i16m1_m (vbool16_t mask, vint8mf2_t op1, uint8_t op2, size_t vl);
vint16m2_t __riscv_vwmulsu_vx_i16m2_m (vbool8_t mask, vint8m1_t op1, uint8_t op2, size_t vl);
vint16m4_t __riscv_vwmulsu_vx_i16m4_m (vbool4_t mask, vint8m2_t op1, uint8_t op2, size_t vl);
vint16m8_t __riscv_vwmulsu_vx_i16m8_m (vbool2_t mask, vint8m4_t op1, uint8_t op2, size_t vl);
vint32mf2_t __riscv_vwmulsu_vx_i32mf2_m (vbool64_t mask, vint16mf4_t op1, uint16_t op2, size_t vl);
vint32m1_t __riscv_vwmulsu_vx_i32m1_m (vbool32_t mask, vint16mf2_t op1, uint16_t op2, size_t vl);
vint32m2_t __riscv_vwmulsu_vx_i32m2_m (vbool16_t mask, vint16m1_t op1, uint16_t op2, size_t vl);
vint32m4_t __riscv_vwmulsu_vx_i32m4_m (vbool8_t mask, vint16m2_t op1, uint16_t op2, size_t vl);
vint32m8_t __riscv_vwmulsu_vx_i32m8_m (vbool4_t mask, vint16m4_t op1, uint16_t op2, size_t vl);
vint64m1_t __riscv_vwmulsu_vx_i64m1_m (vbool64_t mask, vint32mf2_t op1, uint32_t op2, size_t vl);
vint64m2_t __riscv_vwmulsu_vx_i64m2_m (vbool32_t mask, vint32m1_t op1, uint32_t op2, size_t vl);
vint64m4_t __riscv_vwmulsu_vx_i64m4_m (vbool16_t mask, vint32m2_t op1, uint32_t op2, size_t vl);
vint64m8_t __riscv_vwmulsu_vx_i64m8_m (vbool8_t mask, vint32m4_t op1, uint32_t op2, size_t vl);
18.1. vmacc.vv
- Mnemonic
vmacc.vv vd, vs1, vs2, vm
- Encoding
- Description
-
Integer multiply-add, overwrite addend. vd[i] = +(vs1[i] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmacc_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmacc_vv_i8mf8 (vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vmacc_vv_i8mf4 (vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vmacc_vv_i8mf2 (vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vmacc_vv_i8m1 (vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vmacc_vv_i8m2 (vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vmacc_vv_i8m4 (vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vmacc_vv_i8m8 (vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vmacc_vv_i16mf4 (vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vmacc_vv_i16mf2 (vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vmacc_vv_i16m1 (vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vmacc_vv_i16m2 (vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vmacc_vv_i16m4 (vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vmacc_vv_i16m8 (vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vmacc_vv_i32mf2 (vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vmacc_vv_i32m1 (vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vmacc_vv_i32m2 (vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vmacc_vv_i32m4 (vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vmacc_vv_i32m8 (vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vmacc_vv_i64m1 (vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vmacc_vv_i64m2 (vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vmacc_vv_i64m4 (vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vmacc_vv_i64m8 (vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vmacc_vv_u8mf8 (vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vmacc_vv_u8mf4 (vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vmacc_vv_u8mf2 (vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vmacc_vv_u8m1 (vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vmacc_vv_u8m2 (vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vmacc_vv_u8m4 (vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vmacc_vv_u8m8 (vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vmacc_vv_u16mf4 (vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vmacc_vv_u16mf2 (vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vmacc_vv_u16m1 (vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vmacc_vv_u16m2 (vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vmacc_vv_u16m4 (vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vmacc_vv_u16m8 (vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vmacc_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vmacc_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vmacc_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vmacc_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vmacc_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vmacc_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vmacc_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vmacc_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vmacc_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl);
vint8mf8_t __riscv_vmacc_vv_i8mf8_m (vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vmacc_vv_i8mf4_m (vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vmacc_vv_i8mf2_m (vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vmacc_vv_i8m1_m (vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vmacc_vv_i8m2_m (vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vmacc_vv_i8m4_m (vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vmacc_vv_i8m8_m (vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vmacc_vv_i16mf4_m (vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vmacc_vv_i16mf2_m (vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vmacc_vv_i16m1_m (vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vmacc_vv_i16m2_m (vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vmacc_vv_i16m4_m (vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vmacc_vv_i16m8_m (vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vmacc_vv_i32mf2_m (vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vmacc_vv_i32m1_m (vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vmacc_vv_i32m2_m (vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vmacc_vv_i32m4_m (vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vmacc_vv_i32m8_m (vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vmacc_vv_i64m1_m (vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vmacc_vv_i64m2_m (vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vmacc_vv_i64m4_m (vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vmacc_vv_i64m8_m (vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vmacc_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vmacc_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vmacc_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vmacc_vv_u8m1_m (vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vmacc_vv_u8m2_m (vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vmacc_vv_u8m4_m (vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vmacc_vv_u8m8_m (vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vmacc_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vmacc_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vmacc_vv_u16m1_m (vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vmacc_vv_u16m2_m (vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vmacc_vv_u16m4_m (vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vmacc_vv_u16m8_m (vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vmacc_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vmacc_vv_u32m1_m (vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vmacc_vv_u32m2_m (vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vmacc_vv_u32m4_m (vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vmacc_vv_u32m8_m (vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vmacc_vv_u64m1_m (vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vmacc_vv_u64m2_m (vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vmacc_vv_u64m4_m (vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vmacc_vv_u64m8_m (vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl);
18.2. vmacc.vx
- Mnemonic
vmacc.vx vd, rs1, vs2, vm
- Encoding
- Description
-
Integer multiply-add, overwrite addend. vd[i] = +(x[rs1] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmacc_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmacc_vx_i8mf8 (vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vmacc_vx_i8mf4 (vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vmacc_vx_i8mf2 (vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vmacc_vx_i8m1 (vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vmacc_vx_i8m2 (vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vmacc_vx_i8m4 (vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vmacc_vx_i8m8 (vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vmacc_vx_i16mf4 (vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vmacc_vx_i16mf2 (vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vmacc_vx_i16m1 (vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vmacc_vx_i16m2 (vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vmacc_vx_i16m4 (vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vmacc_vx_i16m8 (vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vmacc_vx_i32mf2 (vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vmacc_vx_i32m1 (vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vmacc_vx_i32m2 (vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vmacc_vx_i32m4 (vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vmacc_vx_i32m8 (vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vmacc_vx_i64m1 (vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vmacc_vx_i64m2 (vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vmacc_vx_i64m4 (vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vmacc_vx_i64m8 (vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vmacc_vx_u8mf8 (vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vmacc_vx_u8mf4 (vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vmacc_vx_u8mf2 (vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vmacc_vx_u8m1 (vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vmacc_vx_u8m2 (vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vmacc_vx_u8m4 (vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vmacc_vx_u8m8 (vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vmacc_vx_u16mf4 (vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vmacc_vx_u16mf2 (vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vmacc_vx_u16m1 (vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vmacc_vx_u16m2 (vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vmacc_vx_u16m4 (vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vmacc_vx_u16m8 (vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vmacc_vx_u32mf2 (vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vmacc_vx_u32m1 (vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vmacc_vx_u32m2 (vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vmacc_vx_u32m4 (vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vmacc_vx_u32m8 (vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vmacc_vx_u64m1 (vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vmacc_vx_u64m2 (vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vmacc_vx_u64m4 (vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vmacc_vx_u64m8 (vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl);
vint8mf8_t __riscv_vmacc_vx_i8mf8_m (vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vmacc_vx_i8mf4_m (vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vmacc_vx_i8mf2_m (vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vmacc_vx_i8m1_m (vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vmacc_vx_i8m2_m (vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vmacc_vx_i8m4_m (vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vmacc_vx_i8m8_m (vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vmacc_vx_i16mf4_m (vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vmacc_vx_i16mf2_m (vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vmacc_vx_i16m1_m (vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vmacc_vx_i16m2_m (vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vmacc_vx_i16m4_m (vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vmacc_vx_i16m8_m (vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vmacc_vx_i32mf2_m (vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vmacc_vx_i32m1_m (vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vmacc_vx_i32m2_m (vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vmacc_vx_i32m4_m (vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vmacc_vx_i32m8_m (vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vmacc_vx_i64m1_m (vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vmacc_vx_i64m2_m (vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vmacc_vx_i64m4_m (vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vmacc_vx_i64m8_m (vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vmacc_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vmacc_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vmacc_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vmacc_vx_u8m1_m (vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vmacc_vx_u8m2_m (vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vmacc_vx_u8m4_m (vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vmacc_vx_u8m8_m (vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vmacc_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vmacc_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vmacc_vx_u16m1_m (vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vmacc_vx_u16m2_m (vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vmacc_vx_u16m4_m (vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vmacc_vx_u16m8_m (vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vmacc_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vmacc_vx_u32m1_m (vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vmacc_vx_u32m2_m (vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vmacc_vx_u32m4_m (vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vmacc_vx_u32m8_m (vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vmacc_vx_u64m1_m (vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vmacc_vx_u64m2_m (vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vmacc_vx_u64m4_m (vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vmacc_vx_u64m8_m (vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl);
18.3. vnmsac.vv
- Mnemonic
vnmsac.vv vd, vs1, vs2, vm
- Encoding
- Description
-
Integer multiply-sub, overwrite minuend. vd[i] = -(vs1[i] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vnmsac_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vnmsac_vv_i8mf8 (vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vnmsac_vv_i8mf4 (vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vnmsac_vv_i8mf2 (vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vnmsac_vv_i8m1 (vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vnmsac_vv_i8m2 (vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vnmsac_vv_i8m4 (vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vnmsac_vv_i8m8 (vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vnmsac_vv_i16mf4 (vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vnmsac_vv_i16mf2 (vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vnmsac_vv_i16m1 (vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vnmsac_vv_i16m2 (vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vnmsac_vv_i16m4 (vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vnmsac_vv_i16m8 (vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vnmsac_vv_i32mf2 (vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vnmsac_vv_i32m1 (vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vnmsac_vv_i32m2 (vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vnmsac_vv_i32m4 (vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vnmsac_vv_i32m8 (vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vnmsac_vv_i64m1 (vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vnmsac_vv_i64m2 (vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vnmsac_vv_i64m4 (vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vnmsac_vv_i64m8 (vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vnmsac_vv_u8mf8 (vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vnmsac_vv_u8mf4 (vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vnmsac_vv_u8mf2 (vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vnmsac_vv_u8m1 (vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vnmsac_vv_u8m2 (vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vnmsac_vv_u8m4 (vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vnmsac_vv_u8m8 (vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vnmsac_vv_u16mf4 (vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vnmsac_vv_u16mf2 (vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vnmsac_vv_u16m1 (vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vnmsac_vv_u16m2 (vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vnmsac_vv_u16m4 (vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vnmsac_vv_u16m8 (vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vnmsac_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vnmsac_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vnmsac_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vnmsac_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vnmsac_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vnmsac_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vnmsac_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vnmsac_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vnmsac_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl);
vint8mf8_t __riscv_vnmsac_vv_i8mf8_m (vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vnmsac_vv_i8mf4_m (vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vnmsac_vv_i8mf2_m (vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vnmsac_vv_i8m1_m (vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vnmsac_vv_i8m2_m (vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vnmsac_vv_i8m4_m (vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vnmsac_vv_i8m8_m (vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vnmsac_vv_i16mf4_m (vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vnmsac_vv_i16mf2_m (vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vnmsac_vv_i16m1_m (vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vnmsac_vv_i16m2_m (vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vnmsac_vv_i16m4_m (vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vnmsac_vv_i16m8_m (vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vnmsac_vv_i32mf2_m (vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vnmsac_vv_i32m1_m (vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vnmsac_vv_i32m2_m (vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vnmsac_vv_i32m4_m (vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vnmsac_vv_i32m8_m (vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vnmsac_vv_i64m1_m (vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vnmsac_vv_i64m2_m (vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vnmsac_vv_i64m4_m (vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vnmsac_vv_i64m8_m (vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vnmsac_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vnmsac_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vnmsac_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vnmsac_vv_u8m1_m (vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vnmsac_vv_u8m2_m (vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vnmsac_vv_u8m4_m (vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vnmsac_vv_u8m8_m (vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vnmsac_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vnmsac_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vnmsac_vv_u16m1_m (vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vnmsac_vv_u16m2_m (vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vnmsac_vv_u16m4_m (vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vnmsac_vv_u16m8_m (vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vnmsac_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vnmsac_vv_u32m1_m (vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vnmsac_vv_u32m2_m (vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vnmsac_vv_u32m4_m (vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vnmsac_vv_u32m8_m (vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vnmsac_vv_u64m1_m (vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vnmsac_vv_u64m2_m (vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vnmsac_vv_u64m4_m (vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vnmsac_vv_u64m8_m (vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl);
18.4. vnmsac.vx
- Mnemonic
vnmsac.vx vd, rs1, vs2, vm
- Encoding
- Description
-
Integer multiply-sub, overwrite minuend. vd[i] = -(x[rs1] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vnmsac_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vnmsac_vx_i8mf8 (vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vnmsac_vx_i8mf4 (vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vnmsac_vx_i8mf2 (vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vnmsac_vx_i8m1 (vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vnmsac_vx_i8m2 (vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vnmsac_vx_i8m4 (vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vnmsac_vx_i8m8 (vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vnmsac_vx_i16mf4 (vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vnmsac_vx_i16mf2 (vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vnmsac_vx_i16m1 (vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vnmsac_vx_i16m2 (vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vnmsac_vx_i16m4 (vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vnmsac_vx_i16m8 (vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vnmsac_vx_i32mf2 (vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vnmsac_vx_i32m1 (vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vnmsac_vx_i32m2 (vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vnmsac_vx_i32m4 (vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vnmsac_vx_i32m8 (vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vnmsac_vx_i64m1 (vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vnmsac_vx_i64m2 (vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vnmsac_vx_i64m4 (vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vnmsac_vx_i64m8 (vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vnmsac_vx_u8mf8 (vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vnmsac_vx_u8mf4 (vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vnmsac_vx_u8mf2 (vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vnmsac_vx_u8m1 (vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vnmsac_vx_u8m2 (vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vnmsac_vx_u8m4 (vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vnmsac_vx_u8m8 (vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vnmsac_vx_u16mf4 (vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vnmsac_vx_u16mf2 (vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vnmsac_vx_u16m1 (vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vnmsac_vx_u16m2 (vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vnmsac_vx_u16m4 (vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vnmsac_vx_u16m8 (vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vnmsac_vx_u32mf2 (vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vnmsac_vx_u32m1 (vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vnmsac_vx_u32m2 (vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vnmsac_vx_u32m4 (vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vnmsac_vx_u32m8 (vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vnmsac_vx_u64m1 (vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vnmsac_vx_u64m2 (vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vnmsac_vx_u64m4 (vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vnmsac_vx_u64m8 (vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl);
vint8mf8_t __riscv_vnmsac_vx_i8mf8_m (vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vnmsac_vx_i8mf4_m (vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vnmsac_vx_i8mf2_m (vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vnmsac_vx_i8m1_m (vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vnmsac_vx_i8m2_m (vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vnmsac_vx_i8m4_m (vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vnmsac_vx_i8m8_m (vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vnmsac_vx_i16mf4_m (vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vnmsac_vx_i16mf2_m (vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vnmsac_vx_i16m1_m (vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vnmsac_vx_i16m2_m (vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vnmsac_vx_i16m4_m (vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vnmsac_vx_i16m8_m (vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vnmsac_vx_i32mf2_m (vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vnmsac_vx_i32m1_m (vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vnmsac_vx_i32m2_m (vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vnmsac_vx_i32m4_m (vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vnmsac_vx_i32m8_m (vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vnmsac_vx_i64m1_m (vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vnmsac_vx_i64m2_m (vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vnmsac_vx_i64m4_m (vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vnmsac_vx_i64m8_m (vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vnmsac_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vnmsac_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vnmsac_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vnmsac_vx_u8m1_m (vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vnmsac_vx_u8m2_m (vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vnmsac_vx_u8m4_m (vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vnmsac_vx_u8m8_m (vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vnmsac_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vnmsac_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vnmsac_vx_u16m1_m (vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vnmsac_vx_u16m2_m (vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vnmsac_vx_u16m4_m (vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vnmsac_vx_u16m8_m (vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vnmsac_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vnmsac_vx_u32m1_m (vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vnmsac_vx_u32m2_m (vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vnmsac_vx_u32m4_m (vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vnmsac_vx_u32m8_m (vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vnmsac_vx_u64m1_m (vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vnmsac_vx_u64m2_m (vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vnmsac_vx_u64m4_m (vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vnmsac_vx_u64m8_m (vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl);
18.5. vmadd.vv
- Mnemonic
vmadd.vv vd, vs1, vs2, vm
- Encoding
- Description
-
Integer multiply-add, overwrite multiplicand. vd[i] = (vs1[i] * vd[i]) + vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmadd_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmadd_vv_i8mf8 (vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vmadd_vv_i8mf4 (vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vmadd_vv_i8mf2 (vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vmadd_vv_i8m1 (vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vmadd_vv_i8m2 (vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vmadd_vv_i8m4 (vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vmadd_vv_i8m8 (vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vmadd_vv_i16mf4 (vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vmadd_vv_i16mf2 (vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vmadd_vv_i16m1 (vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vmadd_vv_i16m2 (vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vmadd_vv_i16m4 (vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vmadd_vv_i16m8 (vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vmadd_vv_i32mf2 (vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vmadd_vv_i32m1 (vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vmadd_vv_i32m2 (vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vmadd_vv_i32m4 (vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vmadd_vv_i32m8 (vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vmadd_vv_i64m1 (vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vmadd_vv_i64m2 (vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vmadd_vv_i64m4 (vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vmadd_vv_i64m8 (vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vmadd_vv_u8mf8 (vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vmadd_vv_u8mf4 (vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vmadd_vv_u8mf2 (vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vmadd_vv_u8m1 (vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vmadd_vv_u8m2 (vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vmadd_vv_u8m4 (vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vmadd_vv_u8m8 (vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vmadd_vv_u16mf4 (vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vmadd_vv_u16mf2 (vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vmadd_vv_u16m1 (vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vmadd_vv_u16m2 (vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vmadd_vv_u16m4 (vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vmadd_vv_u16m8 (vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vmadd_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vmadd_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vmadd_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vmadd_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vmadd_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vmadd_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vmadd_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vmadd_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vmadd_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl);
vint8mf8_t __riscv_vmadd_vv_i8mf8_m (vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vmadd_vv_i8mf4_m (vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vmadd_vv_i8mf2_m (vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vmadd_vv_i8m1_m (vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vmadd_vv_i8m2_m (vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vmadd_vv_i8m4_m (vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vmadd_vv_i8m8_m (vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vmadd_vv_i16mf4_m (vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vmadd_vv_i16mf2_m (vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vmadd_vv_i16m1_m (vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vmadd_vv_i16m2_m (vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vmadd_vv_i16m4_m (vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vmadd_vv_i16m8_m (vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vmadd_vv_i32mf2_m (vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vmadd_vv_i32m1_m (vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vmadd_vv_i32m2_m (vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vmadd_vv_i32m4_m (vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vmadd_vv_i32m8_m (vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vmadd_vv_i64m1_m (vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vmadd_vv_i64m2_m (vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vmadd_vv_i64m4_m (vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vmadd_vv_i64m8_m (vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vmadd_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vmadd_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vmadd_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vmadd_vv_u8m1_m (vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vmadd_vv_u8m2_m (vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vmadd_vv_u8m4_m (vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vmadd_vv_u8m8_m (vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vmadd_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vmadd_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vmadd_vv_u16m1_m (vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vmadd_vv_u16m2_m (vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vmadd_vv_u16m4_m (vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vmadd_vv_u16m8_m (vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vmadd_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vmadd_vv_u32m1_m (vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vmadd_vv_u32m2_m (vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vmadd_vv_u32m4_m (vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vmadd_vv_u32m8_m (vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vmadd_vv_u64m1_m (vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vmadd_vv_u64m2_m (vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vmadd_vv_u64m4_m (vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vmadd_vv_u64m8_m (vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl);
18.6. vmadd.vx
- Mnemonic
vmadd.vx vd, rs1, vs2, vm
- Encoding
- Description
-
Integer multiply-add, overwrite multiplicand. vd[i] = (x[rs1] * vd[i]) + vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmadd_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmadd_vx_i8mf8 (vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vmadd_vx_i8mf4 (vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vmadd_vx_i8mf2 (vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vmadd_vx_i8m1 (vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vmadd_vx_i8m2 (vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vmadd_vx_i8m4 (vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vmadd_vx_i8m8 (vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vmadd_vx_i16mf4 (vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vmadd_vx_i16mf2 (vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vmadd_vx_i16m1 (vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vmadd_vx_i16m2 (vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vmadd_vx_i16m4 (vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vmadd_vx_i16m8 (vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vmadd_vx_i32mf2 (vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vmadd_vx_i32m1 (vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vmadd_vx_i32m2 (vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vmadd_vx_i32m4 (vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vmadd_vx_i32m8 (vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vmadd_vx_i64m1 (vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vmadd_vx_i64m2 (vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vmadd_vx_i64m4 (vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vmadd_vx_i64m8 (vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vmadd_vx_u8mf8 (vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vmadd_vx_u8mf4 (vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vmadd_vx_u8mf2 (vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vmadd_vx_u8m1 (vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vmadd_vx_u8m2 (vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vmadd_vx_u8m4 (vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vmadd_vx_u8m8 (vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vmadd_vx_u16mf4 (vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vmadd_vx_u16mf2 (vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vmadd_vx_u16m1 (vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vmadd_vx_u16m2 (vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vmadd_vx_u16m4 (vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vmadd_vx_u16m8 (vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vmadd_vx_u32mf2 (vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vmadd_vx_u32m1 (vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vmadd_vx_u32m2 (vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vmadd_vx_u32m4 (vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vmadd_vx_u32m8 (vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vmadd_vx_u64m1 (vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vmadd_vx_u64m2 (vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vmadd_vx_u64m4 (vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vmadd_vx_u64m8 (vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl);
vint8mf8_t __riscv_vmadd_vx_i8mf8_m (vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vmadd_vx_i8mf4_m (vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vmadd_vx_i8mf2_m (vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vmadd_vx_i8m1_m (vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vmadd_vx_i8m2_m (vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vmadd_vx_i8m4_m (vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vmadd_vx_i8m8_m (vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vmadd_vx_i16mf4_m (vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vmadd_vx_i16mf2_m (vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vmadd_vx_i16m1_m (vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vmadd_vx_i16m2_m (vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vmadd_vx_i16m4_m (vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vmadd_vx_i16m8_m (vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vmadd_vx_i32mf2_m (vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vmadd_vx_i32m1_m (vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vmadd_vx_i32m2_m (vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vmadd_vx_i32m4_m (vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vmadd_vx_i32m8_m (vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vmadd_vx_i64m1_m (vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vmadd_vx_i64m2_m (vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vmadd_vx_i64m4_m (vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vmadd_vx_i64m8_m (vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vmadd_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vmadd_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vmadd_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vmadd_vx_u8m1_m (vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vmadd_vx_u8m2_m (vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vmadd_vx_u8m4_m (vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vmadd_vx_u8m8_m (vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vmadd_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vmadd_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vmadd_vx_u16m1_m (vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vmadd_vx_u16m2_m (vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vmadd_vx_u16m4_m (vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vmadd_vx_u16m8_m (vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vmadd_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vmadd_vx_u32m1_m (vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vmadd_vx_u32m2_m (vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vmadd_vx_u32m4_m (vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vmadd_vx_u32m8_m (vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vmadd_vx_u64m1_m (vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vmadd_vx_u64m2_m (vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vmadd_vx_u64m4_m (vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vmadd_vx_u64m8_m (vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl);
18.7. vnmsub.vv
- Mnemonic
vnmsub.vv vd, vs1, vs2, vm
- Encoding
- Description
-
Integer multiply-sub, overwrite multiplicand. vd[i] = -(vs1[i] * vd[i]) + vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vnmsub_vv.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vnmsub_vv_i8mf8 (vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vnmsub_vv_i8mf4 (vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vnmsub_vv_i8mf2 (vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vnmsub_vv_i8m1 (vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vnmsub_vv_i8m2 (vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vnmsub_vv_i8m4 (vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vnmsub_vv_i8m8 (vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vnmsub_vv_i16mf4 (vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vnmsub_vv_i16mf2 (vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vnmsub_vv_i16m1 (vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vnmsub_vv_i16m2 (vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vnmsub_vv_i16m4 (vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vnmsub_vv_i16m8 (vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vnmsub_vv_i32mf2 (vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vnmsub_vv_i32m1 (vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vnmsub_vv_i32m2 (vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vnmsub_vv_i32m4 (vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vnmsub_vv_i32m8 (vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vnmsub_vv_i64m1 (vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vnmsub_vv_i64m2 (vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vnmsub_vv_i64m4 (vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vnmsub_vv_i64m8 (vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vnmsub_vv_u8mf8 (vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vnmsub_vv_u8mf4 (vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vnmsub_vv_u8mf2 (vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vnmsub_vv_u8m1 (vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vnmsub_vv_u8m2 (vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vnmsub_vv_u8m4 (vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vnmsub_vv_u8m8 (vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vnmsub_vv_u16mf4 (vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vnmsub_vv_u16mf2 (vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vnmsub_vv_u16m1 (vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vnmsub_vv_u16m2 (vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vnmsub_vv_u16m4 (vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vnmsub_vv_u16m8 (vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vnmsub_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vnmsub_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vnmsub_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vnmsub_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vnmsub_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vnmsub_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vnmsub_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vnmsub_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vnmsub_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl);
vint8mf8_t __riscv_vnmsub_vv_i8mf8_m (vbool64_t mask, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vnmsub_vv_i8mf4_m (vbool32_t mask, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vnmsub_vv_i8mf2_m (vbool16_t mask, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vnmsub_vv_i8m1_m (vbool8_t mask, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vnmsub_vv_i8m2_m (vbool4_t mask, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vnmsub_vv_i8m4_m (vbool2_t mask, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vnmsub_vv_i8m8_m (vbool1_t mask, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vnmsub_vv_i16mf4_m (vbool64_t mask, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vnmsub_vv_i16mf2_m (vbool32_t mask, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vnmsub_vv_i16m1_m (vbool16_t mask, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vnmsub_vv_i16m2_m (vbool8_t mask, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vnmsub_vv_i16m4_m (vbool4_t mask, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vnmsub_vv_i16m8_m (vbool2_t mask, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vnmsub_vv_i32mf2_m (vbool64_t mask, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vnmsub_vv_i32m1_m (vbool32_t mask, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vnmsub_vv_i32m2_m (vbool16_t mask, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vnmsub_vv_i32m4_m (vbool8_t mask, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vnmsub_vv_i32m8_m (vbool4_t mask, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vnmsub_vv_i64m1_m (vbool64_t mask, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vnmsub_vv_i64m2_m (vbool32_t mask, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vnmsub_vv_i64m4_m (vbool16_t mask, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vnmsub_vv_i64m8_m (vbool8_t mask, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vnmsub_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vnmsub_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vnmsub_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vnmsub_vv_u8m1_m (vbool8_t mask, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vnmsub_vv_u8m2_m (vbool4_t mask, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vnmsub_vv_u8m4_m (vbool2_t mask, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vnmsub_vv_u8m8_m (vbool1_t mask, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vnmsub_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vnmsub_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vnmsub_vv_u16m1_m (vbool16_t mask, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vnmsub_vv_u16m2_m (vbool8_t mask, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vnmsub_vv_u16m4_m (vbool4_t mask, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vnmsub_vv_u16m8_m (vbool2_t mask, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vnmsub_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vnmsub_vv_u32m1_m (vbool32_t mask, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vnmsub_vv_u32m2_m (vbool16_t mask, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vnmsub_vv_u32m4_m (vbool8_t mask, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vnmsub_vv_u32m8_m (vbool4_t mask, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vnmsub_vv_u64m1_m (vbool64_t mask, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vnmsub_vv_u64m2_m (vbool32_t mask, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vnmsub_vv_u64m4_m (vbool16_t mask, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vnmsub_vv_u64m8_m (vbool8_t mask, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl);
18.8. vnmsub.vx
- Mnemonic
vnmsub.vx vd, rs1, vs2, vm
- Encoding
- Description
-
Integer multiply-sub, overwrite multiplicand. vd[i] = -(x[rs1] * vd[i]) + vs2[i]
19. Vector Widening Integer Multiply-Add Instructions
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vnmsub_vx.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vnmsub_vx_i8mf8 (vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vnmsub_vx_i8mf4 (vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vnmsub_vx_i8mf2 (vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vnmsub_vx_i8m1 (vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vnmsub_vx_i8m2 (vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vnmsub_vx_i8m4 (vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vnmsub_vx_i8m8 (vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vnmsub_vx_i16mf4 (vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vnmsub_vx_i16mf2 (vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vnmsub_vx_i16m1 (vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vnmsub_vx_i16m2 (vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vnmsub_vx_i16m4 (vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vnmsub_vx_i16m8 (vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vnmsub_vx_i32mf2 (vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vnmsub_vx_i32m1 (vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vnmsub_vx_i32m2 (vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vnmsub_vx_i32m4 (vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vnmsub_vx_i32m8 (vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vnmsub_vx_i64m1 (vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vnmsub_vx_i64m2 (vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vnmsub_vx_i64m4 (vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vnmsub_vx_i64m8 (vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vnmsub_vx_u8mf8 (vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vnmsub_vx_u8mf4 (vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vnmsub_vx_u8mf2 (vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vnmsub_vx_u8m1 (vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vnmsub_vx_u8m2 (vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vnmsub_vx_u8m4 (vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vnmsub_vx_u8m8 (vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vnmsub_vx_u16mf4 (vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vnmsub_vx_u16mf2 (vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vnmsub_vx_u16m1 (vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vnmsub_vx_u16m2 (vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vnmsub_vx_u16m4 (vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vnmsub_vx_u16m8 (vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vnmsub_vx_u32mf2 (vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vnmsub_vx_u32m1 (vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vnmsub_vx_u32m2 (vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vnmsub_vx_u32m4 (vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vnmsub_vx_u32m8 (vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vnmsub_vx_u64m1 (vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vnmsub_vx_u64m2 (vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vnmsub_vx_u64m4 (vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vnmsub_vx_u64m8 (vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl);
vint8mf8_t __riscv_vnmsub_vx_i8mf8_m (vbool64_t mask, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl);
vint8mf4_t __riscv_vnmsub_vx_i8mf4_m (vbool32_t mask, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl);
vint8mf2_t __riscv_vnmsub_vx_i8mf2_m (vbool16_t mask, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl);
vint8m1_t __riscv_vnmsub_vx_i8m1_m (vbool8_t mask, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl);
vint8m2_t __riscv_vnmsub_vx_i8m2_m (vbool4_t mask, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl);
vint8m4_t __riscv_vnmsub_vx_i8m4_m (vbool2_t mask, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl);
vint8m8_t __riscv_vnmsub_vx_i8m8_m (vbool1_t mask, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl);
vint16mf4_t __riscv_vnmsub_vx_i16mf4_m (vbool64_t mask, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl);
vint16mf2_t __riscv_vnmsub_vx_i16mf2_m (vbool32_t mask, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl);
vint16m1_t __riscv_vnmsub_vx_i16m1_m (vbool16_t mask, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl);
vint16m2_t __riscv_vnmsub_vx_i16m2_m (vbool8_t mask, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl);
vint16m4_t __riscv_vnmsub_vx_i16m4_m (vbool4_t mask, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl);
vint16m8_t __riscv_vnmsub_vx_i16m8_m (vbool2_t mask, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl);
vint32mf2_t __riscv_vnmsub_vx_i32mf2_m (vbool64_t mask, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl);
vint32m1_t __riscv_vnmsub_vx_i32m1_m (vbool32_t mask, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl);
vint32m2_t __riscv_vnmsub_vx_i32m2_m (vbool16_t mask, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl);
vint32m4_t __riscv_vnmsub_vx_i32m4_m (vbool8_t mask, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl);
vint32m8_t __riscv_vnmsub_vx_i32m8_m (vbool4_t mask, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl);
vint64m1_t __riscv_vnmsub_vx_i64m1_m (vbool64_t mask, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl);
vint64m2_t __riscv_vnmsub_vx_i64m2_m (vbool32_t mask, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl);
vint64m4_t __riscv_vnmsub_vx_i64m4_m (vbool16_t mask, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl);
vint64m8_t __riscv_vnmsub_vx_i64m8_m (vbool8_t mask, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl);
vuint8mf8_t __riscv_vnmsub_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl);
vuint8mf4_t __riscv_vnmsub_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl);
vuint8mf2_t __riscv_vnmsub_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl);
vuint8m1_t __riscv_vnmsub_vx_u8m1_m (vbool8_t mask, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl);
vuint8m2_t __riscv_vnmsub_vx_u8m2_m (vbool4_t mask, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl);
vuint8m4_t __riscv_vnmsub_vx_u8m4_m (vbool2_t mask, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl);
vuint8m8_t __riscv_vnmsub_vx_u8m8_m (vbool1_t mask, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl);
vuint16mf4_t __riscv_vnmsub_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl);
vuint16mf2_t __riscv_vnmsub_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl);
vuint16m1_t __riscv_vnmsub_vx_u16m1_m (vbool16_t mask, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl);
vuint16m2_t __riscv_vnmsub_vx_u16m2_m (vbool8_t mask, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl);
vuint16m4_t __riscv_vnmsub_vx_u16m4_m (vbool4_t mask, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl);
vuint16m8_t __riscv_vnmsub_vx_u16m8_m (vbool2_t mask, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl);
vuint32mf2_t __riscv_vnmsub_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl);
vuint32m1_t __riscv_vnmsub_vx_u32m1_m (vbool32_t mask, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl);
vuint32m2_t __riscv_vnmsub_vx_u32m2_m (vbool16_t mask, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl);
vuint32m4_t __riscv_vnmsub_vx_u32m4_m (vbool8_t mask, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl);
vuint32m8_t __riscv_vnmsub_vx_u32m8_m (vbool4_t mask, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl);
vuint64m1_t __riscv_vnmsub_vx_u64m1_m (vbool64_t mask, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl);
vuint64m2_t __riscv_vnmsub_vx_u64m2_m (vbool32_t mask, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl);
vuint64m4_t __riscv_vnmsub_vx_u64m4_m (vbool16_t mask, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl);
vuint64m8_t __riscv_vnmsub_vx_u64m8_m (vbool8_t mask, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl);
19.1. vwmaccu.vv
- Mnemonic
vwmaccu.vv vd, vs1, vs2, vm
- Encoding
- Description
-
Widening unsigned-integer multiply-add, overwrite addend. vd[i] = +(vs1[i] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwmaccu_vv.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vwmaccu_vv_u16mf4 (vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl);
vuint16mf2_t __riscv_vwmaccu_vv_u16mf2 (vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl);
vuint16m1_t __riscv_vwmaccu_vv_u16m1 (vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl);
vuint16m2_t __riscv_vwmaccu_vv_u16m2 (vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl);
vuint16m4_t __riscv_vwmaccu_vv_u16m4 (vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl);
vuint16m8_t __riscv_vwmaccu_vv_u16m8 (vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl);
vuint32mf2_t __riscv_vwmaccu_vv_u32mf2 (vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl);
vuint32m1_t __riscv_vwmaccu_vv_u32m1 (vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl);
vuint32m2_t __riscv_vwmaccu_vv_u32m2 (vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl);
vuint32m4_t __riscv_vwmaccu_vv_u32m4 (vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl);
vuint32m8_t __riscv_vwmaccu_vv_u32m8 (vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl);
vuint64m1_t __riscv_vwmaccu_vv_u64m1 (vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl);
vuint64m2_t __riscv_vwmaccu_vv_u64m2 (vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl);
vuint64m4_t __riscv_vwmaccu_vv_u64m4 (vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl);
vuint64m8_t __riscv_vwmaccu_vv_u64m8 (vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl);
vuint16mf4_t __riscv_vwmaccu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl);
vuint16mf2_t __riscv_vwmaccu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl);
vuint16m1_t __riscv_vwmaccu_vv_u16m1_m (vbool16_t mask, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl);
vuint16m2_t __riscv_vwmaccu_vv_u16m2_m (vbool8_t mask, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl);
vuint16m4_t __riscv_vwmaccu_vv_u16m4_m (vbool4_t mask, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl);
vuint16m8_t __riscv_vwmaccu_vv_u16m8_m (vbool2_t mask, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl);
vuint32mf2_t __riscv_vwmaccu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl);
vuint32m1_t __riscv_vwmaccu_vv_u32m1_m (vbool32_t mask, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl);
vuint32m2_t __riscv_vwmaccu_vv_u32m2_m (vbool16_t mask, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl);
vuint32m4_t __riscv_vwmaccu_vv_u32m4_m (vbool8_t mask, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl);
vuint32m8_t __riscv_vwmaccu_vv_u32m8_m (vbool4_t mask, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl);
vuint64m1_t __riscv_vwmaccu_vv_u64m1_m (vbool64_t mask, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl);
vuint64m2_t __riscv_vwmaccu_vv_u64m2_m (vbool32_t mask, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl);
vuint64m4_t __riscv_vwmaccu_vv_u64m4_m (vbool16_t mask, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl);
vuint64m8_t __riscv_vwmaccu_vv_u64m8_m (vbool8_t mask, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl);
19.2. vwmaccu.vx
- Mnemonic
vwmaccu.vx vd, rs1, vs2, vm
- Encoding
- Description
-
Widening unsigned-integer multiply-add, overwrite addend. vd[i] = +(x[rs1] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwmaccu_vx.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vwmaccu_vx_u16mf4 (vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl);
vuint16mf2_t __riscv_vwmaccu_vx_u16mf2 (vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl);
vuint16m1_t __riscv_vwmaccu_vx_u16m1 (vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl);
vuint16m2_t __riscv_vwmaccu_vx_u16m2 (vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl);
vuint16m4_t __riscv_vwmaccu_vx_u16m4 (vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl);
vuint16m8_t __riscv_vwmaccu_vx_u16m8 (vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl);
vuint32mf2_t __riscv_vwmaccu_vx_u32mf2 (vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl);
vuint32m1_t __riscv_vwmaccu_vx_u32m1 (vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl);
vuint32m2_t __riscv_vwmaccu_vx_u32m2 (vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl);
vuint32m4_t __riscv_vwmaccu_vx_u32m4 (vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl);
vuint32m8_t __riscv_vwmaccu_vx_u32m8 (vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl);
vuint64m1_t __riscv_vwmaccu_vx_u64m1 (vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl);
vuint64m2_t __riscv_vwmaccu_vx_u64m2 (vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl);
vuint64m4_t __riscv_vwmaccu_vx_u64m4 (vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl);
vuint64m8_t __riscv_vwmaccu_vx_u64m8 (vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl);
vuint16mf4_t __riscv_vwmaccu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl);
vuint16mf2_t __riscv_vwmaccu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl);
vuint16m1_t __riscv_vwmaccu_vx_u16m1_m (vbool16_t mask, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl);
vuint16m2_t __riscv_vwmaccu_vx_u16m2_m (vbool8_t mask, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl);
vuint16m4_t __riscv_vwmaccu_vx_u16m4_m (vbool4_t mask, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl);
vuint16m8_t __riscv_vwmaccu_vx_u16m8_m (vbool2_t mask, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl);
vuint32mf2_t __riscv_vwmaccu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl);
vuint32m1_t __riscv_vwmaccu_vx_u32m1_m (vbool32_t mask, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl);
vuint32m2_t __riscv_vwmaccu_vx_u32m2_m (vbool16_t mask, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl);
vuint32m4_t __riscv_vwmaccu_vx_u32m4_m (vbool8_t mask, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl);
vuint32m8_t __riscv_vwmaccu_vx_u32m8_m (vbool4_t mask, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl);
vuint64m1_t __riscv_vwmaccu_vx_u64m1_m (vbool64_t mask, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl);
vuint64m2_t __riscv_vwmaccu_vx_u64m2_m (vbool32_t mask, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl);
vuint64m4_t __riscv_vwmaccu_vx_u64m4_m (vbool16_t mask, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl);
vuint64m8_t __riscv_vwmaccu_vx_u64m8_m (vbool8_t mask, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl);
19.3. vwmacc.vv
- Mnemonic
vwmacc.vv vd, vs1, vs2, vm
- Encoding
- Description
-
Widening signed-integer multiply-add, overwrite addend. vd[i] = +(vs1[i] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwmacc_vv.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwmacc_vv_i16mf4 (vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl);
vint16mf2_t __riscv_vwmacc_vv_i16mf2 (vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl);
vint16m1_t __riscv_vwmacc_vv_i16m1 (vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl);
vint16m2_t __riscv_vwmacc_vv_i16m2 (vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl);
vint16m4_t __riscv_vwmacc_vv_i16m4 (vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl);
vint16m8_t __riscv_vwmacc_vv_i16m8 (vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl);
vint32mf2_t __riscv_vwmacc_vv_i32mf2 (vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl);
vint32m1_t __riscv_vwmacc_vv_i32m1 (vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl);
vint32m2_t __riscv_vwmacc_vv_i32m2 (vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl);
vint32m4_t __riscv_vwmacc_vv_i32m4 (vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl);
vint32m8_t __riscv_vwmacc_vv_i32m8 (vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl);
vint64m1_t __riscv_vwmacc_vv_i64m1 (vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl);
vint64m2_t __riscv_vwmacc_vv_i64m2 (vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl);
vint64m4_t __riscv_vwmacc_vv_i64m4 (vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl);
vint64m8_t __riscv_vwmacc_vv_i64m8 (vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl);
vint16mf4_t __riscv_vwmacc_vv_i16mf4_m (vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl);
vint16mf2_t __riscv_vwmacc_vv_i16mf2_m (vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl);
vint16m1_t __riscv_vwmacc_vv_i16m1_m (vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl);
vint16m2_t __riscv_vwmacc_vv_i16m2_m (vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl);
vint16m4_t __riscv_vwmacc_vv_i16m4_m (vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl);
vint16m8_t __riscv_vwmacc_vv_i16m8_m (vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl);
vint32mf2_t __riscv_vwmacc_vv_i32mf2_m (vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl);
vint32m1_t __riscv_vwmacc_vv_i32m1_m (vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl);
vint32m2_t __riscv_vwmacc_vv_i32m2_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl);
vint32m4_t __riscv_vwmacc_vv_i32m4_m (vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl);
vint32m8_t __riscv_vwmacc_vv_i32m8_m (vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl);
vint64m1_t __riscv_vwmacc_vv_i64m1_m (vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl);
vint64m2_t __riscv_vwmacc_vv_i64m2_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl);
vint64m4_t __riscv_vwmacc_vv_i64m4_m (vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl);
vint64m8_t __riscv_vwmacc_vv_i64m8_m (vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl);
19.4. vwmacc.vx
- Mnemonic
vwmacc.vx vd, rs1, vs2, vm
- Encoding
- Description
-
Widening signed-integer multiply-add, overwrite addend. vd[i] = +(x[rs1] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwmacc_vx.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwmacc_vx_i16mf4 (vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl);
vint16mf2_t __riscv_vwmacc_vx_i16mf2 (vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl);
vint16m1_t __riscv_vwmacc_vx_i16m1 (vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl);
vint16m2_t __riscv_vwmacc_vx_i16m2 (vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl);
vint16m4_t __riscv_vwmacc_vx_i16m4 (vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl);
vint16m8_t __riscv_vwmacc_vx_i16m8 (vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl);
vint32mf2_t __riscv_vwmacc_vx_i32mf2 (vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl);
vint32m1_t __riscv_vwmacc_vx_i32m1 (vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl);
vint32m2_t __riscv_vwmacc_vx_i32m2 (vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl);
vint32m4_t __riscv_vwmacc_vx_i32m4 (vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl);
vint32m8_t __riscv_vwmacc_vx_i32m8 (vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl);
vint64m1_t __riscv_vwmacc_vx_i64m1 (vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl);
vint64m2_t __riscv_vwmacc_vx_i64m2 (vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl);
vint64m4_t __riscv_vwmacc_vx_i64m4 (vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl);
vint64m8_t __riscv_vwmacc_vx_i64m8 (vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl);
vint16mf4_t __riscv_vwmacc_vx_i16mf4_m (vbool64_t mask, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl);
vint16mf2_t __riscv_vwmacc_vx_i16mf2_m (vbool32_t mask, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl);
vint16m1_t __riscv_vwmacc_vx_i16m1_m (vbool16_t mask, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl);
vint16m2_t __riscv_vwmacc_vx_i16m2_m (vbool8_t mask, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl);
vint16m4_t __riscv_vwmacc_vx_i16m4_m (vbool4_t mask, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl);
vint16m8_t __riscv_vwmacc_vx_i16m8_m (vbool2_t mask, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl);
vint32mf2_t __riscv_vwmacc_vx_i32mf2_m (vbool64_t mask, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl);
vint32m1_t __riscv_vwmacc_vx_i32m1_m (vbool32_t mask, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl);
vint32m2_t __riscv_vwmacc_vx_i32m2_m (vbool16_t mask, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl);
vint32m4_t __riscv_vwmacc_vx_i32m4_m (vbool8_t mask, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl);
vint32m8_t __riscv_vwmacc_vx_i32m8_m (vbool4_t mask, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl);
vint64m1_t __riscv_vwmacc_vx_i64m1_m (vbool64_t mask, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl);
vint64m2_t __riscv_vwmacc_vx_i64m2_m (vbool32_t mask, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl);
vint64m4_t __riscv_vwmacc_vx_i64m4_m (vbool16_t mask, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl);
vint64m8_t __riscv_vwmacc_vx_i64m8_m (vbool8_t mask, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl);
19.5. vwmaccsu.vv
- Mnemonic
vwmaccsu.vv vd, vs1, vs2, vm
- Encoding
- Description
-
Widening signed-unsigned-integer multiply-add, overwrite addend. vd[i] = +(signed(vs1[i]) * unsigned(vs2[i])) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwmaccsu_vv.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwmaccsu_vv_i16mf4 (vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl);
vint16mf2_t __riscv_vwmaccsu_vv_i16mf2 (vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl);
vint16m1_t __riscv_vwmaccsu_vv_i16m1 (vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl);
vint16m2_t __riscv_vwmaccsu_vv_i16m2 (vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl);
vint16m4_t __riscv_vwmaccsu_vv_i16m4 (vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl);
vint16m8_t __riscv_vwmaccsu_vv_i16m8 (vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl);
vint32mf2_t __riscv_vwmaccsu_vv_i32mf2 (vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl);
vint32m1_t __riscv_vwmaccsu_vv_i32m1 (vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl);
vint32m2_t __riscv_vwmaccsu_vv_i32m2 (vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl);
vint32m4_t __riscv_vwmaccsu_vv_i32m4 (vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl);
vint32m8_t __riscv_vwmaccsu_vv_i32m8 (vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl);
vint64m1_t __riscv_vwmaccsu_vv_i64m1 (vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl);
vint64m2_t __riscv_vwmaccsu_vv_i64m2 (vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl);
vint64m4_t __riscv_vwmaccsu_vv_i64m4 (vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl);
vint64m8_t __riscv_vwmaccsu_vv_i64m8 (vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl);
vint16mf4_t __riscv_vwmaccsu_vv_i16mf4_m (vbool64_t mask, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl);
vint16mf2_t __riscv_vwmaccsu_vv_i16mf2_m (vbool32_t mask, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl);
vint16m1_t __riscv_vwmaccsu_vv_i16m1_m (vbool16_t mask, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl);
vint16m2_t __riscv_vwmaccsu_vv_i16m2_m (vbool8_t mask, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl);
vint16m4_t __riscv_vwmaccsu_vv_i16m4_m (vbool4_t mask, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl);
vint16m8_t __riscv_vwmaccsu_vv_i16m8_m (vbool2_t mask, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl);
vint32mf2_t __riscv_vwmaccsu_vv_i32mf2_m (vbool64_t mask, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl);
vint32m1_t __riscv_vwmaccsu_vv_i32m1_m (vbool32_t mask, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl);
vint32m2_t __riscv_vwmaccsu_vv_i32m2_m (vbool16_t mask, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl);
vint32m4_t __riscv_vwmaccsu_vv_i32m4_m (vbool8_t mask, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl);
vint32m8_t __riscv_vwmaccsu_vv_i32m8_m (vbool4_t mask, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl);
vint64m1_t __riscv_vwmaccsu_vv_i64m1_m (vbool64_t mask, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl);
vint64m2_t __riscv_vwmaccsu_vv_i64m2_m (vbool32_t mask, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl);
vint64m4_t __riscv_vwmaccsu_vv_i64m4_m (vbool16_t mask, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl);
vint64m8_t __riscv_vwmaccsu_vv_i64m8_m (vbool8_t mask, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl);
19.6. vwmaccsu.vx
- Mnemonic
vwmaccsu.vx vd, rs1, vs2, vm
- Encoding
- Description
-
Widening signed-unsigned-integer multiply-add, overwrite addend. vd[i] = +(signed(x[rs1]) * unsigned(vs2[i])) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwmaccsu_vx.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwmaccsu_vx_i16mf4 (vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl);
vint16mf2_t __riscv_vwmaccsu_vx_i16mf2 (vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl);
vint16m1_t __riscv_vwmaccsu_vx_i16m1 (vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl);
vint16m2_t __riscv_vwmaccsu_vx_i16m2 (vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl);
vint16m4_t __riscv_vwmaccsu_vx_i16m4 (vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl);
vint16m8_t __riscv_vwmaccsu_vx_i16m8 (vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl);
vint32mf2_t __riscv_vwmaccsu_vx_i32mf2 (vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl);
vint32m1_t __riscv_vwmaccsu_vx_i32m1 (vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl);
vint32m2_t __riscv_vwmaccsu_vx_i32m2 (vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl);
vint32m4_t __riscv_vwmaccsu_vx_i32m4 (vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl);
vint32m8_t __riscv_vwmaccsu_vx_i32m8 (vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl);
vint64m1_t __riscv_vwmaccsu_vx_i64m1 (vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl);
vint64m2_t __riscv_vwmaccsu_vx_i64m2 (vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl);
vint64m4_t __riscv_vwmaccsu_vx_i64m4 (vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl);
vint64m8_t __riscv_vwmaccsu_vx_i64m8 (vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl);
vint16mf4_t __riscv_vwmaccsu_vx_i16mf4_m (vbool64_t mask, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl);
vint16mf2_t __riscv_vwmaccsu_vx_i16mf2_m (vbool32_t mask, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl);
vint16m1_t __riscv_vwmaccsu_vx_i16m1_m (vbool16_t mask, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl);
vint16m2_t __riscv_vwmaccsu_vx_i16m2_m (vbool8_t mask, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl);
vint16m4_t __riscv_vwmaccsu_vx_i16m4_m (vbool4_t mask, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl);
vint16m8_t __riscv_vwmaccsu_vx_i16m8_m (vbool2_t mask, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl);
vint32mf2_t __riscv_vwmaccsu_vx_i32mf2_m (vbool64_t mask, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl);
vint32m1_t __riscv_vwmaccsu_vx_i32m1_m (vbool32_t mask, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl);
vint32m2_t __riscv_vwmaccsu_vx_i32m2_m (vbool16_t mask, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl);
vint32m4_t __riscv_vwmaccsu_vx_i32m4_m (vbool8_t mask, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl);
vint32m8_t __riscv_vwmaccsu_vx_i32m8_m (vbool4_t mask, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl);
vint64m1_t __riscv_vwmaccsu_vx_i64m1_m (vbool64_t mask, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl);
vint64m2_t __riscv_vwmaccsu_vx_i64m2_m (vbool32_t mask, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl);
vint64m4_t __riscv_vwmaccsu_vx_i64m4_m (vbool16_t mask, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl);
vint64m8_t __riscv_vwmaccsu_vx_i64m8_m (vbool8_t mask, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl);
19.7. vwmaccus.vx
- Mnemonic
vwmaccus.vx vd, rs1, vs2, vm
- Encoding
- Description
-
Widening unsigned-signed-integer multiply-add, overwrite addend. vd[i] = +(unsigned(x[rs1]) * signed(vs2[i])) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vwmaccus_vx.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vwmaccus_vx_i16mf4 (vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl);
vint16mf2_t __riscv_vwmaccus_vx_i16mf2 (vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl);
vint16m1_t __riscv_vwmaccus_vx_i16m1 (vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl);
vint16m2_t __riscv_vwmaccus_vx_i16m2 (vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl);
vint16m4_t __riscv_vwmaccus_vx_i16m4 (vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl);
vint16m8_t __riscv_vwmaccus_vx_i16m8 (vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl);
vint32mf2_t __riscv_vwmaccus_vx_i32mf2 (vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl);
vint32m1_t __riscv_vwmaccus_vx_i32m1 (vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl);
vint32m2_t __riscv_vwmaccus_vx_i32m2 (vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl);
vint32m4_t __riscv_vwmaccus_vx_i32m4 (vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl);
vint32m8_t __riscv_vwmaccus_vx_i32m8 (vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl);
vint64m1_t __riscv_vwmaccus_vx_i64m1 (vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl);
vint64m2_t __riscv_vwmaccus_vx_i64m2 (vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl);
vint64m4_t __riscv_vwmaccus_vx_i64m4 (vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl);
vint64m8_t __riscv_vwmaccus_vx_i64m8 (vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl);
vint16mf4_t __riscv_vwmaccus_vx_i16mf4_m (vbool64_t mask, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl);
vint16mf2_t __riscv_vwmaccus_vx_i16mf2_m (vbool32_t mask, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl);
vint16m1_t __riscv_vwmaccus_vx_i16m1_m (vbool16_t mask, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl);
vint16m2_t __riscv_vwmaccus_vx_i16m2_m (vbool8_t mask, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl);
vint16m4_t __riscv_vwmaccus_vx_i16m4_m (vbool4_t mask, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl);
vint16m8_t __riscv_vwmaccus_vx_i16m8_m (vbool2_t mask, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl);
vint32mf2_t __riscv_vwmaccus_vx_i32mf2_m (vbool64_t mask, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl);
vint32m1_t __riscv_vwmaccus_vx_i32m1_m (vbool32_t mask, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl);
vint32m2_t __riscv_vwmaccus_vx_i32m2_m (vbool16_t mask, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl);
vint32m4_t __riscv_vwmaccus_vx_i32m4_m (vbool8_t mask, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl);
vint32m8_t __riscv_vwmaccus_vx_i32m8_m (vbool4_t mask, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl);
vint64m1_t __riscv_vwmaccus_vx_i64m1_m (vbool64_t mask, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl);
vint64m2_t __riscv_vwmaccus_vx_i64m2_m (vbool32_t mask, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl);
vint64m4_t __riscv_vwmaccus_vx_i64m4_m (vbool16_t mask, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl);
vint64m8_t __riscv_vwmaccus_vx_i64m8_m (vbool8_t mask, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl);
20. Vector Integer Merge Instructions
20.1. vmerge.vvm
- Mnemonic
vmerge.vvm vd, vs2, vs1, v0
- Encoding
- Description
-
vd[i] = v0.mask[i] ? vs1[i] : vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmerge_vvm.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmerge_vvm_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl);
vint8mf4_t __riscv_vmerge_vvm_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl);
vint8mf2_t __riscv_vmerge_vvm_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl);
vint8m1_t __riscv_vmerge_vvm_i8m1 (vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl);
vint8m2_t __riscv_vmerge_vvm_i8m2 (vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl);
vint8m4_t __riscv_vmerge_vvm_i8m4 (vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl);
vint8m8_t __riscv_vmerge_vvm_i8m8 (vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl);
vint16mf4_t __riscv_vmerge_vvm_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl);
vint16mf2_t __riscv_vmerge_vvm_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl);
vint16m1_t __riscv_vmerge_vvm_i16m1 (vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl);
vint16m2_t __riscv_vmerge_vvm_i16m2 (vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl);
vint16m4_t __riscv_vmerge_vvm_i16m4 (vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl);
vint16m8_t __riscv_vmerge_vvm_i16m8 (vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl);
vint32mf2_t __riscv_vmerge_vvm_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl);
vint32m1_t __riscv_vmerge_vvm_i32m1 (vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl);
vint32m2_t __riscv_vmerge_vvm_i32m2 (vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl);
vint32m4_t __riscv_vmerge_vvm_i32m4 (vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl);
vint32m8_t __riscv_vmerge_vvm_i32m8 (vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl);
vint64m1_t __riscv_vmerge_vvm_i64m1 (vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl);
vint64m2_t __riscv_vmerge_vvm_i64m2 (vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl);
vint64m4_t __riscv_vmerge_vvm_i64m4 (vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl);
vint64m8_t __riscv_vmerge_vvm_i64m8 (vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl);
vuint8mf8_t __riscv_vmerge_vvm_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl);
vuint8mf4_t __riscv_vmerge_vvm_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl);
vuint8mf2_t __riscv_vmerge_vvm_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl);
vuint8m1_t __riscv_vmerge_vvm_u8m1 (vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl);
vuint8m2_t __riscv_vmerge_vvm_u8m2 (vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl);
vuint8m4_t __riscv_vmerge_vvm_u8m4 (vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl);
vuint8m8_t __riscv_vmerge_vvm_u8m8 (vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl);
vuint16mf4_t __riscv_vmerge_vvm_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl);
vuint16mf2_t __riscv_vmerge_vvm_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl);
vuint16m1_t __riscv_vmerge_vvm_u16m1 (vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl);
vuint16m2_t __riscv_vmerge_vvm_u16m2 (vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl);
vuint16m4_t __riscv_vmerge_vvm_u16m4 (vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl);
vuint16m8_t __riscv_vmerge_vvm_u16m8 (vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl);
vuint32mf2_t __riscv_vmerge_vvm_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl);
vuint32m1_t __riscv_vmerge_vvm_u32m1 (vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl);
vuint32m2_t __riscv_vmerge_vvm_u32m2 (vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl);
vuint32m4_t __riscv_vmerge_vvm_u32m4 (vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl);
vuint32m8_t __riscv_vmerge_vvm_u32m8 (vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl);
vuint64m1_t __riscv_vmerge_vvm_u64m1 (vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl);
vuint64m2_t __riscv_vmerge_vvm_u64m2 (vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl);
vuint64m4_t __riscv_vmerge_vvm_u64m4 (vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl);
vuint64m8_t __riscv_vmerge_vvm_u64m8 (vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl);
vfloat16mf4_t __riscv_vmerge_vvm_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl);
vfloat16mf2_t __riscv_vmerge_vvm_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl);
vfloat16m1_t __riscv_vmerge_vvm_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl);
vfloat16m2_t __riscv_vmerge_vvm_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl);
vfloat16m4_t __riscv_vmerge_vvm_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl);
vfloat16m8_t __riscv_vmerge_vvm_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl);
vfloat32mf2_t __riscv_vmerge_vvm_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl);
vfloat32m1_t __riscv_vmerge_vvm_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl);
vfloat32m2_t __riscv_vmerge_vvm_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl);
vfloat32m4_t __riscv_vmerge_vvm_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl);
vfloat32m8_t __riscv_vmerge_vvm_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl);
vfloat64m1_t __riscv_vmerge_vvm_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl);
vfloat64m2_t __riscv_vmerge_vvm_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl);
vfloat64m4_t __riscv_vmerge_vvm_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl);
vfloat64m8_t __riscv_vmerge_vvm_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl);
20.2. vmerge.vxm
- Mnemonic
vmerge.vxm vd, vs2, rs1, v0
- Encoding
- Description
-
vd[i] = v0.mask[i] ? x[rs1] : vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmerge_vxm.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmerge_vxm_i8mf8 (vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl);
vint8mf4_t __riscv_vmerge_vxm_i8mf4 (vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl);
vint8mf2_t __riscv_vmerge_vxm_i8mf2 (vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl);
vint8m1_t __riscv_vmerge_vxm_i8m1 (vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl);
vint8m2_t __riscv_vmerge_vxm_i8m2 (vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl);
vint8m4_t __riscv_vmerge_vxm_i8m4 (vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl);
vint8m8_t __riscv_vmerge_vxm_i8m8 (vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl);
vint16mf4_t __riscv_vmerge_vxm_i16mf4 (vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl);
vint16mf2_t __riscv_vmerge_vxm_i16mf2 (vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl);
vint16m1_t __riscv_vmerge_vxm_i16m1 (vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl);
vint16m2_t __riscv_vmerge_vxm_i16m2 (vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl);
vint16m4_t __riscv_vmerge_vxm_i16m4 (vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl);
vint16m8_t __riscv_vmerge_vxm_i16m8 (vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl);
vint32mf2_t __riscv_vmerge_vxm_i32mf2 (vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl);
vint32m1_t __riscv_vmerge_vxm_i32m1 (vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl);
vint32m2_t __riscv_vmerge_vxm_i32m2 (vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl);
vint32m4_t __riscv_vmerge_vxm_i32m4 (vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl);
vint32m8_t __riscv_vmerge_vxm_i32m8 (vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl);
vint64m1_t __riscv_vmerge_vxm_i64m1 (vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl);
vint64m2_t __riscv_vmerge_vxm_i64m2 (vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl);
vint64m4_t __riscv_vmerge_vxm_i64m4 (vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl);
vint64m8_t __riscv_vmerge_vxm_i64m8 (vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl);
vuint8mf8_t __riscv_vmerge_vxm_u8mf8 (vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl);
vuint8mf4_t __riscv_vmerge_vxm_u8mf4 (vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl);
vuint8mf2_t __riscv_vmerge_vxm_u8mf2 (vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl);
vuint8m1_t __riscv_vmerge_vxm_u8m1 (vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl);
vuint8m2_t __riscv_vmerge_vxm_u8m2 (vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl);
vuint8m4_t __riscv_vmerge_vxm_u8m4 (vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl);
vuint8m8_t __riscv_vmerge_vxm_u8m8 (vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl);
vuint16mf4_t __riscv_vmerge_vxm_u16mf4 (vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl);
vuint16mf2_t __riscv_vmerge_vxm_u16mf2 (vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl);
vuint16m1_t __riscv_vmerge_vxm_u16m1 (vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl);
vuint16m2_t __riscv_vmerge_vxm_u16m2 (vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl);
vuint16m4_t __riscv_vmerge_vxm_u16m4 (vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl);
vuint16m8_t __riscv_vmerge_vxm_u16m8 (vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl);
vuint32mf2_t __riscv_vmerge_vxm_u32mf2 (vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl);
vuint32m1_t __riscv_vmerge_vxm_u32m1 (vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl);
vuint32m2_t __riscv_vmerge_vxm_u32m2 (vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl);
vuint32m4_t __riscv_vmerge_vxm_u32m4 (vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl);
vuint32m8_t __riscv_vmerge_vxm_u32m8 (vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl);
vuint64m1_t __riscv_vmerge_vxm_u64m1 (vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl);
vuint64m2_t __riscv_vmerge_vxm_u64m2 (vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl);
vuint64m4_t __riscv_vmerge_vxm_u64m4 (vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl);
vuint64m8_t __riscv_vmerge_vxm_u64m8 (vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl);
20.3. vmerge.vim
- Mnemonic
vmerge.vim vd, vs2, imm, v0
- Encoding
- Description
-
vd[i] = v0.mask[i] ? imm : vs2[i]
21. Vector Integer Move Instructions
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmerge_vim.h
- Intrinsic Functions
Details
21.1. vmv.v.v
- Mnemonic
vmv.v.v vd, vs1
- Encoding
- Description
-
vd[i] = vs1[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmv_v_v.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmv_v_v_i8mf8 (vint8mf8_t src, size_t vl);
vint8mf4_t __riscv_vmv_v_v_i8mf4 (vint8mf4_t src, size_t vl);
vint8mf2_t __riscv_vmv_v_v_i8mf2 (vint8mf2_t src, size_t vl);
vint8m1_t __riscv_vmv_v_v_i8m1 (vint8m1_t src, size_t vl);
vint8m2_t __riscv_vmv_v_v_i8m2 (vint8m2_t src, size_t vl);
vint8m4_t __riscv_vmv_v_v_i8m4 (vint8m4_t src, size_t vl);
vint8m8_t __riscv_vmv_v_v_i8m8 (vint8m8_t src, size_t vl);
vint16mf4_t __riscv_vmv_v_v_i16mf4 (vint16mf4_t src, size_t vl);
vint16mf2_t __riscv_vmv_v_v_i16mf2 (vint16mf2_t src, size_t vl);
vint16m1_t __riscv_vmv_v_v_i16m1 (vint16m1_t src, size_t vl);
vint16m2_t __riscv_vmv_v_v_i16m2 (vint16m2_t src, size_t vl);
vint16m4_t __riscv_vmv_v_v_i16m4 (vint16m4_t src, size_t vl);
vint16m8_t __riscv_vmv_v_v_i16m8 (vint16m8_t src, size_t vl);
vint32mf2_t __riscv_vmv_v_v_i32mf2 (vint32mf2_t src, size_t vl);
vint32m1_t __riscv_vmv_v_v_i32m1 (vint32m1_t src, size_t vl);
vint32m2_t __riscv_vmv_v_v_i32m2 (vint32m2_t src, size_t vl);
vint32m4_t __riscv_vmv_v_v_i32m4 (vint32m4_t src, size_t vl);
vint32m8_t __riscv_vmv_v_v_i32m8 (vint32m8_t src, size_t vl);
vint64m1_t __riscv_vmv_v_v_i64m1 (vint64m1_t src, size_t vl);
vint64m2_t __riscv_vmv_v_v_i64m2 (vint64m2_t src, size_t vl);
vint64m4_t __riscv_vmv_v_v_i64m4 (vint64m4_t src, size_t vl);
vint64m8_t __riscv_vmv_v_v_i64m8 (vint64m8_t src, size_t vl);
vuint8mf8_t __riscv_vmv_v_v_u8mf8 (vuint8mf8_t src, size_t vl);
vuint8mf4_t __riscv_vmv_v_v_u8mf4 (vuint8mf4_t src, size_t vl);
vuint8mf2_t __riscv_vmv_v_v_u8mf2 (vuint8mf2_t src, size_t vl);
vuint8m1_t __riscv_vmv_v_v_u8m1 (vuint8m1_t src, size_t vl);
vuint8m2_t __riscv_vmv_v_v_u8m2 (vuint8m2_t src, size_t vl);
vuint8m4_t __riscv_vmv_v_v_u8m4 (vuint8m4_t src, size_t vl);
vuint8m8_t __riscv_vmv_v_v_u8m8 (vuint8m8_t src, size_t vl);
vuint16mf4_t __riscv_vmv_v_v_u16mf4 (vuint16mf4_t src, size_t vl);
vuint16mf2_t __riscv_vmv_v_v_u16mf2 (vuint16mf2_t src, size_t vl);
vuint16m1_t __riscv_vmv_v_v_u16m1 (vuint16m1_t src, size_t vl);
vuint16m2_t __riscv_vmv_v_v_u16m2 (vuint16m2_t src, size_t vl);
vuint16m4_t __riscv_vmv_v_v_u16m4 (vuint16m4_t src, size_t vl);
vuint16m8_t __riscv_vmv_v_v_u16m8 (vuint16m8_t src, size_t vl);
vuint32mf2_t __riscv_vmv_v_v_u32mf2 (vuint32mf2_t src, size_t vl);
vuint32m1_t __riscv_vmv_v_v_u32m1 (vuint32m1_t src, size_t vl);
vuint32m2_t __riscv_vmv_v_v_u32m2 (vuint32m2_t src, size_t vl);
vuint32m4_t __riscv_vmv_v_v_u32m4 (vuint32m4_t src, size_t vl);
vuint32m8_t __riscv_vmv_v_v_u32m8 (vuint32m8_t src, size_t vl);
vuint64m1_t __riscv_vmv_v_v_u64m1 (vuint64m1_t src, size_t vl);
vuint64m2_t __riscv_vmv_v_v_u64m2 (vuint64m2_t src, size_t vl);
vuint64m4_t __riscv_vmv_v_v_u64m4 (vuint64m4_t src, size_t vl);
vuint64m8_t __riscv_vmv_v_v_u64m8 (vuint64m8_t src, size_t vl);
vfloat16mf4_t __riscv_vmv_v_v_f16mf4 (vfloat16mf4_t src, size_t vl);
vfloat16mf2_t __riscv_vmv_v_v_f16mf2 (vfloat16mf2_t src, size_t vl);
vfloat16m1_t __riscv_vmv_v_v_f16m1 (vfloat16m1_t src, size_t vl);
vfloat16m2_t __riscv_vmv_v_v_f16m2 (vfloat16m2_t src, size_t vl);
vfloat16m4_t __riscv_vmv_v_v_f16m4 (vfloat16m4_t src, size_t vl);
vfloat16m8_t __riscv_vmv_v_v_f16m8 (vfloat16m8_t src, size_t vl);
vfloat32mf2_t __riscv_vmv_v_v_f32mf2 (vfloat32mf2_t src, size_t vl);
vfloat32m1_t __riscv_vmv_v_v_f32m1 (vfloat32m1_t src, size_t vl);
vfloat32m2_t __riscv_vmv_v_v_f32m2 (vfloat32m2_t src, size_t vl);
vfloat32m4_t __riscv_vmv_v_v_f32m4 (vfloat32m4_t src, size_t vl);
vfloat32m8_t __riscv_vmv_v_v_f32m8 (vfloat32m8_t src, size_t vl);
vfloat64m1_t __riscv_vmv_v_v_f64m1 (vfloat64m1_t src, size_t vl);
vfloat64m2_t __riscv_vmv_v_v_f64m2 (vfloat64m2_t src, size_t vl);
vfloat64m4_t __riscv_vmv_v_v_f64m4 (vfloat64m4_t src, size_t vl);
vfloat64m8_t __riscv_vmv_v_v_f64m8 (vfloat64m8_t src, size_t vl);
21.2. vmv.v.x
- Mnemonic
vmv.v.x vd, rs1
- Encoding
- Description
-
vd[i] = x[rs1]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmv_v_x.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vmv_v_x_i8mf8 (int8_t src, size_t vl);
vint8mf4_t __riscv_vmv_v_x_i8mf4 (int8_t src, size_t vl);
vint8mf2_t __riscv_vmv_v_x_i8mf2 (int8_t src, size_t vl);
vint8m1_t __riscv_vmv_v_x_i8m1 (int8_t src, size_t vl);
vint8m2_t __riscv_vmv_v_x_i8m2 (int8_t src, size_t vl);
vint8m4_t __riscv_vmv_v_x_i8m4 (int8_t src, size_t vl);
vint8m8_t __riscv_vmv_v_x_i8m8 (int8_t src, size_t vl);
vint16mf4_t __riscv_vmv_v_x_i16mf4 (int16_t src, size_t vl);
vint16mf2_t __riscv_vmv_v_x_i16mf2 (int16_t src, size_t vl);
vint16m1_t __riscv_vmv_v_x_i16m1 (int16_t src, size_t vl);
vint16m2_t __riscv_vmv_v_x_i16m2 (int16_t src, size_t vl);
vint16m4_t __riscv_vmv_v_x_i16m4 (int16_t src, size_t vl);
vint16m8_t __riscv_vmv_v_x_i16m8 (int16_t src, size_t vl);
vint32mf2_t __riscv_vmv_v_x_i32mf2 (int32_t src, size_t vl);
vint32m1_t __riscv_vmv_v_x_i32m1 (int32_t src, size_t vl);
vint32m2_t __riscv_vmv_v_x_i32m2 (int32_t src, size_t vl);
vint32m4_t __riscv_vmv_v_x_i32m4 (int32_t src, size_t vl);
vint32m8_t __riscv_vmv_v_x_i32m8 (int32_t src, size_t vl);
vint64m1_t __riscv_vmv_v_x_i64m1 (int64_t src, size_t vl);
vint64m2_t __riscv_vmv_v_x_i64m2 (int64_t src, size_t vl);
vint64m4_t __riscv_vmv_v_x_i64m4 (int64_t src, size_t vl);
vint64m8_t __riscv_vmv_v_x_i64m8 (int64_t src, size_t vl);
vuint8mf8_t __riscv_vmv_v_x_u8mf8 (uint8_t src, size_t vl);
vuint8mf4_t __riscv_vmv_v_x_u8mf4 (uint8_t src, size_t vl);
vuint8mf2_t __riscv_vmv_v_x_u8mf2 (uint8_t src, size_t vl);
vuint8m1_t __riscv_vmv_v_x_u8m1 (uint8_t src, size_t vl);
vuint8m2_t __riscv_vmv_v_x_u8m2 (uint8_t src, size_t vl);
vuint8m4_t __riscv_vmv_v_x_u8m4 (uint8_t src, size_t vl);
vuint8m8_t __riscv_vmv_v_x_u8m8 (uint8_t src, size_t vl);
vuint16mf4_t __riscv_vmv_v_x_u16mf4 (uint16_t src, size_t vl);
vuint16mf2_t __riscv_vmv_v_x_u16mf2 (uint16_t src, size_t vl);
vuint16m1_t __riscv_vmv_v_x_u16m1 (uint16_t src, size_t vl);
vuint16m2_t __riscv_vmv_v_x_u16m2 (uint16_t src, size_t vl);
vuint16m4_t __riscv_vmv_v_x_u16m4 (uint16_t src, size_t vl);
vuint16m8_t __riscv_vmv_v_x_u16m8 (uint16_t src, size_t vl);
vuint32mf2_t __riscv_vmv_v_x_u32mf2 (uint32_t src, size_t vl);
vuint32m1_t __riscv_vmv_v_x_u32m1 (uint32_t src, size_t vl);
vuint32m2_t __riscv_vmv_v_x_u32m2 (uint32_t src, size_t vl);
vuint32m4_t __riscv_vmv_v_x_u32m4 (uint32_t src, size_t vl);
vuint32m8_t __riscv_vmv_v_x_u32m8 (uint32_t src, size_t vl);
vuint64m1_t __riscv_vmv_v_x_u64m1 (uint64_t src, size_t vl);
vuint64m2_t __riscv_vmv_v_x_u64m2 (uint64_t src, size_t vl);
vuint64m4_t __riscv_vmv_v_x_u64m4 (uint64_t src, size_t vl);
vuint64m8_t __riscv_vmv_v_x_u64m8 (uint64_t src, size_t vl);
21.3. vmv.v.i
- Mnemonic
vmv.v.i vd, imm
- Encoding
- Description
-
vd[i] = imm
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmv_v_i.h
- Intrinsic Functions
Details
22. Vector Fixed-Point Arithmetic Instructions
22.1. Vector Single-Width Saturating Add and Subtract
22.2. vsaddu.vv
- Mnemonic
vsaddu.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Saturating adds of unsigned integers, Vector-vector
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vsaddu_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vsaddu_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vsaddu_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vsaddu_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vsaddu_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vsaddu_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vsaddu_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vsaddu_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vsaddu_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vsaddu_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vsaddu_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vsaddu_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vsaddu_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vsaddu_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vsaddu_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vsaddu_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vsaddu_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vsaddu_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vsaddu_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vsaddu_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vsaddu_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vsaddu_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vsaddu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vsaddu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vsaddu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vsaddu_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vsaddu_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vsaddu_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vsaddu_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vsaddu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vsaddu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vsaddu_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vsaddu_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vsaddu_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vsaddu_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vsaddu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vsaddu_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vsaddu_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vsaddu_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vsaddu_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vsaddu_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vsaddu_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vsaddu_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vsaddu_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
22.3. vsaddu.vx
- Mnemonic
vsaddu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Saturating adds of unsigned integers, vector-scalar
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vsaddu_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vsaddu_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vsaddu_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vsaddu_vx_u8m1 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vsaddu_vx_u8m2 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vsaddu_vx_u8m4 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vsaddu_vx_u8m8 (vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vsaddu_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vsaddu_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vsaddu_vx_u16m1 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vsaddu_vx_u16m2 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vsaddu_vx_u16m4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vsaddu_vx_u16m8 (vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vsaddu_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vsaddu_vx_u32m1 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vsaddu_vx_u32m2 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vsaddu_vx_u32m4 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vsaddu_vx_u32m8 (vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vsaddu_vx_u64m1 (vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vsaddu_vx_u64m2 (vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vsaddu_vx_u64m4 (vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vsaddu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vuint8mf8_t __riscv_vsaddu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vsaddu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vsaddu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vsaddu_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vsaddu_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vsaddu_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vsaddu_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vsaddu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vsaddu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vsaddu_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vsaddu_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vsaddu_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vsaddu_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vsaddu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vsaddu_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vsaddu_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vsaddu_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vsaddu_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vsaddu_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vsaddu_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vsaddu_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vsaddu_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
22.4. vsaddu.vi
- Mnemonic
vsaddu.vi vd, vs2, imm, vm
- Encoding
- Description
-
Saturating adds of unsigned integers, vector-immediate
- Intrinsic Functions
Details
22.5. vsadd.vv
- Mnemonic
vsadd.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Saturating adds of signed integers, Vector-vector
- Intrinsic Functions
Details
vint8mf8_t __riscv_vsadd_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vsadd_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vsadd_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vsadd_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vsadd_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vsadd_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vsadd_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vsadd_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vsadd_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vsadd_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vsadd_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vsadd_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vsadd_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vsadd_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vsadd_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vsadd_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vsadd_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vsadd_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vsadd_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vsadd_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vsadd_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vsadd_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vint8mf8_t __riscv_vsadd_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vsadd_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vsadd_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vsadd_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vsadd_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vsadd_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vsadd_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vsadd_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vsadd_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vsadd_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vsadd_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vsadd_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vsadd_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vsadd_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vsadd_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vsadd_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vsadd_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vsadd_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vsadd_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vsadd_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vsadd_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vsadd_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
22.6. vsadd.vx
- Mnemonic
vsadd.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Saturating adds of signed integers, vector-scalar
- Intrinsic Functions
Details
vint8mf8_t __riscv_vsadd_vx_i8mf8 (vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vsadd_vx_i8mf4 (vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vsadd_vx_i8mf2 (vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vsadd_vx_i8m1 (vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vsadd_vx_i8m2 (vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vsadd_vx_i8m4 (vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vsadd_vx_i8m8 (vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vsadd_vx_i16mf4 (vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vsadd_vx_i16mf2 (vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vsadd_vx_i16m1 (vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vsadd_vx_i16m2 (vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vsadd_vx_i16m4 (vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vsadd_vx_i16m8 (vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vsadd_vx_i32mf2 (vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vsadd_vx_i32m1 (vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vsadd_vx_i32m2 (vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vsadd_vx_i32m4 (vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vsadd_vx_i32m8 (vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vsadd_vx_i64m1 (vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vsadd_vx_i64m2 (vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vsadd_vx_i64m4 (vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vsadd_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl);
vint8mf8_t __riscv_vsadd_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vsadd_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vsadd_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vsadd_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vsadd_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vsadd_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vsadd_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vsadd_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vsadd_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vsadd_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vsadd_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vsadd_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vsadd_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vsadd_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vsadd_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vsadd_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vsadd_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vsadd_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vsadd_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vsadd_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vsadd_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vsadd_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
22.7. vsadd.vi
- Mnemonic
vsadd.vi vd, vs2, imm, vm
- Encoding
- Description
-
Saturating adds of signed integers, vector-immediate
- Intrinsic Functions
Details
22.8. vssubu.vv
- Mnemonic
vssubu.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Saturating subtract of unsigned integers, Vector-vector
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vssubu_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vssubu_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vssubu_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vssubu_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vssubu_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vssubu_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vssubu_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vssubu_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vssubu_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vssubu_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vssubu_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vssubu_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vssubu_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vssubu_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vssubu_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vssubu_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vssubu_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vssubu_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vssubu_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vssubu_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vssubu_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vssubu_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, size_t vl);
vuint8mf8_t __riscv_vssubu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl);
vuint8mf4_t __riscv_vssubu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl);
vuint8mf2_t __riscv_vssubu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl);
vuint8m1_t __riscv_vssubu_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl);
vuint8m2_t __riscv_vssubu_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl);
vuint8m4_t __riscv_vssubu_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl);
vuint8m8_t __riscv_vssubu_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl);
vuint16mf4_t __riscv_vssubu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl);
vuint16mf2_t __riscv_vssubu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl);
vuint16m1_t __riscv_vssubu_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl);
vuint16m2_t __riscv_vssubu_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl);
vuint16m4_t __riscv_vssubu_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl);
vuint16m8_t __riscv_vssubu_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl);
vuint32mf2_t __riscv_vssubu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl);
vuint32m1_t __riscv_vssubu_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl);
vuint32m2_t __riscv_vssubu_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl);
vuint32m4_t __riscv_vssubu_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl);
vuint32m8_t __riscv_vssubu_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl);
vuint64m1_t __riscv_vssubu_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl);
vuint64m2_t __riscv_vssubu_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl);
vuint64m4_t __riscv_vssubu_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl);
vuint64m8_t __riscv_vssubu_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl);
22.9. vssubu.vx
- Mnemonic
vssubu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Saturating subtract of unsigned integers, vector-scalar
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vssubu_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vssubu_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vssubu_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vssubu_vx_u8m1 (vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vssubu_vx_u8m2 (vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vssubu_vx_u8m4 (vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vssubu_vx_u8m8 (vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vssubu_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vssubu_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vssubu_vx_u16m1 (vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vssubu_vx_u16m2 (vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vssubu_vx_u16m4 (vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vssubu_vx_u16m8 (vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vssubu_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vssubu_vx_u32m1 (vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vssubu_vx_u32m2 (vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vssubu_vx_u32m4 (vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vssubu_vx_u32m8 (vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vssubu_vx_u64m1 (vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vssubu_vx_u64m2 (vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vssubu_vx_u64m4 (vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vssubu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl);
vuint8mf8_t __riscv_vssubu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl);
vuint8mf4_t __riscv_vssubu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl);
vuint8mf2_t __riscv_vssubu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl);
vuint8m1_t __riscv_vssubu_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl);
vuint8m2_t __riscv_vssubu_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl);
vuint8m4_t __riscv_vssubu_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl);
vuint8m8_t __riscv_vssubu_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl);
vuint16mf4_t __riscv_vssubu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl);
vuint16mf2_t __riscv_vssubu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl);
vuint16m1_t __riscv_vssubu_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl);
vuint16m2_t __riscv_vssubu_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl);
vuint16m4_t __riscv_vssubu_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl);
vuint16m8_t __riscv_vssubu_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl);
vuint32mf2_t __riscv_vssubu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl);
vuint32m1_t __riscv_vssubu_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl);
vuint32m2_t __riscv_vssubu_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl);
vuint32m4_t __riscv_vssubu_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl);
vuint32m8_t __riscv_vssubu_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl);
vuint64m1_t __riscv_vssubu_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl);
vuint64m2_t __riscv_vssubu_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl);
vuint64m4_t __riscv_vssubu_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl);
vuint64m8_t __riscv_vssubu_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl);
22.10. vssub.vv
- Mnemonic
vssub.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Saturating subtract of signed integers, Vector-vector
- Intrinsic Functions
Details
vint8mf8_t __riscv_vssub_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vssub_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vssub_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vssub_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vssub_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vssub_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vssub_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vssub_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vssub_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vssub_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vssub_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vssub_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vssub_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vssub_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vssub_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vssub_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vssub_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vssub_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vssub_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vssub_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vssub_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vssub_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, size_t vl);
vint8mf8_t __riscv_vssub_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl);
vint8mf4_t __riscv_vssub_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl);
vint8mf2_t __riscv_vssub_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl);
vint8m1_t __riscv_vssub_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl);
vint8m2_t __riscv_vssub_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl);
vint8m4_t __riscv_vssub_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl);
vint8m8_t __riscv_vssub_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl);
vint16mf4_t __riscv_vssub_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl);
vint16mf2_t __riscv_vssub_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl);
vint16m1_t __riscv_vssub_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl);
vint16m2_t __riscv_vssub_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl);
vint16m4_t __riscv_vssub_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl);
vint16m8_t __riscv_vssub_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl);
vint32mf2_t __riscv_vssub_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl);
vint32m1_t __riscv_vssub_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
vint32m2_t __riscv_vssub_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl);
vint32m4_t __riscv_vssub_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl);
vint32m8_t __riscv_vssub_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl);
vint64m1_t __riscv_vssub_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl);
vint64m2_t __riscv_vssub_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl);
vint64m4_t __riscv_vssub_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl);
vint64m8_t __riscv_vssub_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl);
22.11. vssub.vx
- Mnemonic
vssub.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Saturating subtract of signed integers, vector-scalar
- Intrinsic Functions
Details
vint8mf8_t __riscv_vssub_vx_i8mf8 (vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vssub_vx_i8mf4 (vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vssub_vx_i8mf2 (vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vssub_vx_i8m1 (vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vssub_vx_i8m2 (vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vssub_vx_i8m4 (vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vssub_vx_i8m8 (vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vssub_vx_i16mf4 (vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vssub_vx_i16mf2 (vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vssub_vx_i16m1 (vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vssub_vx_i16m2 (vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vssub_vx_i16m4 (vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vssub_vx_i16m8 (vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vssub_vx_i32mf2 (vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vssub_vx_i32m1 (vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vssub_vx_i32m2 (vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vssub_vx_i32m4 (vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vssub_vx_i32m8 (vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vssub_vx_i64m1 (vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vssub_vx_i64m2 (vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vssub_vx_i64m4 (vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vssub_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl);
vint8mf8_t __riscv_vssub_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl);
vint8mf4_t __riscv_vssub_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl);
vint8mf2_t __riscv_vssub_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl);
vint8m1_t __riscv_vssub_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl);
vint8m2_t __riscv_vssub_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl);
vint8m4_t __riscv_vssub_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl);
vint8m8_t __riscv_vssub_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl);
vint16mf4_t __riscv_vssub_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl);
vint16mf2_t __riscv_vssub_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl);
vint16m1_t __riscv_vssub_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl);
vint16m2_t __riscv_vssub_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl);
vint16m4_t __riscv_vssub_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl);
vint16m8_t __riscv_vssub_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl);
vint32mf2_t __riscv_vssub_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl);
vint32m1_t __riscv_vssub_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl);
vint32m2_t __riscv_vssub_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl);
vint32m4_t __riscv_vssub_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl);
vint32m8_t __riscv_vssub_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl);
vint64m1_t __riscv_vssub_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl);
vint64m2_t __riscv_vssub_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl);
vint64m4_t __riscv_vssub_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl);
vint64m8_t __riscv_vssub_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl);
22.12. Vector Single-Width Averaging Add and Subtract
22.13. vaaddu.vv
- Mnemonic
vaaddu.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Averaging adds of unsigned integers, roundoff_unsigned(vs2[i] + vs1[i], 1)
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vaaddu_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vaaddu_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vaaddu_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vaaddu_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vaaddu_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vaaddu_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vaaddu_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vaaddu_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vaaddu_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vaaddu_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vaaddu_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vaaddu_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vaaddu_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vaaddu_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vaaddu_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vaaddu_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vaaddu_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vaaddu_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vaaddu_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vaaddu_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vaaddu_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vaaddu_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vaaddu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vaaddu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vaaddu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vaaddu_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vaaddu_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vaaddu_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vaaddu_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vaaddu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vaaddu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vaaddu_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vaaddu_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vaaddu_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vaaddu_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vaaddu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vaaddu_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vaaddu_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vaaddu_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vaaddu_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vaaddu_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vaaddu_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vaaddu_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vaaddu_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, unsigned int vxrm, size_t vl);
22.14. vaaddu.vx
- Mnemonic
vaaddu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Averaging adds of unsigned integers, roundoff_unsigned(vs2[i] + x[rs1], 1)
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vaaddu_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vaaddu_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vaaddu_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vaaddu_vx_u8m1 (vuint8m1_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vaaddu_vx_u8m2 (vuint8m2_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vaaddu_vx_u8m4 (vuint8m4_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vaaddu_vx_u8m8 (vuint8m8_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vaaddu_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vaaddu_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vaaddu_vx_u16m1 (vuint16m1_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vaaddu_vx_u16m2 (vuint16m2_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vaaddu_vx_u16m4 (vuint16m4_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vaaddu_vx_u16m8 (vuint16m8_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vaaddu_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vaaddu_vx_u32m1 (vuint32m1_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vaaddu_vx_u32m2 (vuint32m2_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vaaddu_vx_u32m4 (vuint32m4_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vaaddu_vx_u32m8 (vuint32m8_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vaaddu_vx_u64m1 (vuint64m1_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vaaddu_vx_u64m2 (vuint64m2_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vaaddu_vx_u64m4 (vuint64m4_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vaaddu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vaaddu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vaaddu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vaaddu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vaaddu_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vaaddu_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vaaddu_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vaaddu_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vaaddu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vaaddu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vaaddu_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vaaddu_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vaaddu_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vaaddu_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vaaddu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vaaddu_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vaaddu_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vaaddu_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vaaddu_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vaaddu_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vaaddu_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vaaddu_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vaaddu_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
22.15. vaadd.vv
- Mnemonic
vaadd.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Averaging adds of signed integers, roundoff_signed(vs2[i] + vs1[i], 1)
- Intrinsic Functions
Details
vint8mf8_t __riscv_vaadd_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vaadd_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vaadd_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vaadd_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vaadd_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vaadd_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vaadd_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vaadd_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vaadd_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vaadd_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vaadd_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vaadd_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vaadd_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vaadd_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vaadd_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vaadd_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vaadd_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vaadd_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vaadd_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vaadd_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vaadd_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vaadd_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vaadd_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vaadd_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vaadd_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vaadd_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vaadd_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vaadd_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vaadd_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vaadd_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vaadd_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vaadd_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vaadd_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vaadd_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vaadd_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vaadd_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vaadd_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vaadd_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vaadd_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vaadd_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vaadd_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vaadd_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vaadd_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vaadd_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, unsigned int vxrm, size_t vl);
22.16. vaadd.vx
- Mnemonic
vaadd.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Averaging adds of signed integers, roundoff_signed(vs2[i] + x[rs1], 1)
- Intrinsic Functions
Details
vint8mf8_t __riscv_vaadd_vx_i8mf8 (vint8mf8_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vaadd_vx_i8mf4 (vint8mf4_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vaadd_vx_i8mf2 (vint8mf2_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vaadd_vx_i8m1 (vint8m1_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vaadd_vx_i8m2 (vint8m2_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vaadd_vx_i8m4 (vint8m4_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vaadd_vx_i8m8 (vint8m8_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vaadd_vx_i16mf4 (vint16mf4_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vaadd_vx_i16mf2 (vint16mf2_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vaadd_vx_i16m1 (vint16m1_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vaadd_vx_i16m2 (vint16m2_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vaadd_vx_i16m4 (vint16m4_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vaadd_vx_i16m8 (vint16m8_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vaadd_vx_i32mf2 (vint32mf2_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vaadd_vx_i32m1 (vint32m1_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vaadd_vx_i32m2 (vint32m2_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vaadd_vx_i32m4 (vint32m4_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vaadd_vx_i32m8 (vint32m8_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vaadd_vx_i64m1 (vint64m1_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vaadd_vx_i64m2 (vint64m2_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vaadd_vx_i64m4 (vint64m4_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vaadd_vx_i64m8 (vint64m8_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vaadd_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vaadd_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vaadd_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vaadd_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vaadd_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vaadd_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vaadd_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vaadd_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vaadd_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vaadd_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vaadd_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vaadd_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vaadd_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vaadd_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vaadd_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vaadd_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vaadd_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vaadd_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vaadd_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vaadd_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vaadd_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vaadd_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, unsigned int vxrm, size_t vl);
22.17. vasubu.vv
- Encoding
- Description
-
Averaging subtract of unsigned integers, roundoff_unsigned(vs2[i] - vs1[i], 1)
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vasubu_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t op2, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vasubu_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t op2, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vasubu_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t op2, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vasubu_vv_u8m1 (vuint8m1_t op1, vuint8m1_t op2, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vasubu_vv_u8m2 (vuint8m2_t op1, vuint8m2_t op2, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vasubu_vv_u8m4 (vuint8m4_t op1, vuint8m4_t op2, unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vasubu_vv_u8m8 (vuint8m8_t op1, vuint8m8_t op2, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vasubu_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t op2, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vasubu_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t op2, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vasubu_vv_u16m1 (vuint16m1_t op1, vuint16m1_t op2, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vasubu_vv_u16m2 (vuint16m2_t op1, vuint16m2_t op2, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vasubu_vv_u16m4 (vuint16m4_t op1, vuint16m4_t op2, unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vasubu_vv_u16m8 (vuint16m8_t op1, vuint16m8_t op2, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vasubu_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t op2, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vasubu_vv_u32m1 (vuint32m1_t op1, vuint32m1_t op2, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vasubu_vv_u32m2 (vuint32m2_t op1, vuint32m2_t op2, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vasubu_vv_u32m4 (vuint32m4_t op1, vuint32m4_t op2, unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vasubu_vv_u32m8 (vuint32m8_t op1, vuint32m8_t op2, unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vasubu_vv_u64m1 (vuint64m1_t op1, vuint64m1_t op2, unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vasubu_vv_u64m2 (vuint64m2_t op1, vuint64m2_t op2, unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vasubu_vv_u64m4 (vuint64m4_t op1, vuint64m4_t op2, unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vasubu_vv_u64m8 (vuint64m8_t op1, vuint64m8_t op2, unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vasubu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vasubu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vasubu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vasubu_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vasubu_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vasubu_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vasubu_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vasubu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vasubu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vasubu_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vasubu_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vasubu_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vasubu_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vasubu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vasubu_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vasubu_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vasubu_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vasubu_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vasubu_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vasubu_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vasubu_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vasubu_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, unsigned int vxrm, size_t vl);
22.18. vasubu.vx
- Mnemonic
vasubu.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Averaging subtract of unsigned integers, roundoff_unsigned(vs2[i] - x[rs1], 1)
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vasubu_vx_u8mf8 (vuint8mf8_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vasubu_vx_u8mf4 (vuint8mf4_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vasubu_vx_u8mf2 (vuint8mf2_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vasubu_vx_u8m1 (vuint8m1_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vasubu_vx_u8m2 (vuint8m2_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vasubu_vx_u8m4 (vuint8m4_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vasubu_vx_u8m8 (vuint8m8_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vasubu_vx_u16mf4 (vuint16mf4_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vasubu_vx_u16mf2 (vuint16mf2_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vasubu_vx_u16m1 (vuint16m1_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vasubu_vx_u16m2 (vuint16m2_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vasubu_vx_u16m4 (vuint16m4_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vasubu_vx_u16m8 (vuint16m8_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vasubu_vx_u32mf2 (vuint32mf2_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vasubu_vx_u32m1 (vuint32m1_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vasubu_vx_u32m2 (vuint32m2_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vasubu_vx_u32m4 (vuint32m4_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vasubu_vx_u32m8 (vuint32m8_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vasubu_vx_u64m1 (vuint64m1_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vasubu_vx_u64m2 (vuint64m2_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vasubu_vx_u64m4 (vuint64m4_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vasubu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vasubu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vasubu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vasubu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vasubu_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vasubu_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vasubu_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vasubu_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, uint8_t op2, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vasubu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vasubu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vasubu_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vasubu_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vasubu_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vasubu_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, uint16_t op2, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vasubu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vasubu_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vasubu_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vasubu_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vasubu_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, uint32_t op2, unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vasubu_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vasubu_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vasubu_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vasubu_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, uint64_t op2, unsigned int vxrm, size_t vl);
22.19. vasub.vv
- Mnemonic
vasub.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Averaging subtract of signed integers, roundoff_signed(vs2[i] - vs1[i], 1)
- Intrinsic Functions
Details
vint8mf8_t __riscv_vasub_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vasub_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vasub_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vasub_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vasub_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vasub_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vasub_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vasub_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vasub_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vasub_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vasub_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vasub_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vasub_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vasub_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vasub_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vasub_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vasub_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vasub_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vasub_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vasub_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vasub_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vasub_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vasub_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vasub_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vasub_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vasub_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vasub_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vasub_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vasub_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vasub_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vasub_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vasub_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vasub_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vasub_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vasub_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vasub_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vasub_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vasub_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vasub_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vasub_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vasub_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vasub_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vasub_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vasub_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, unsigned int vxrm, size_t vl);
22.20. vasub.vx
- Mnemonic
vasub.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Averaging subtract of signed integers, roundoff_signed(vs2[i] - x[rs1], 1)
- Intrinsic Functions
Details
vint8mf8_t __riscv_vasub_vx_i8mf8 (vint8mf8_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vasub_vx_i8mf4 (vint8mf4_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vasub_vx_i8mf2 (vint8mf2_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vasub_vx_i8m1 (vint8m1_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vasub_vx_i8m2 (vint8m2_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vasub_vx_i8m4 (vint8m4_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vasub_vx_i8m8 (vint8m8_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vasub_vx_i16mf4 (vint16mf4_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vasub_vx_i16mf2 (vint16mf2_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vasub_vx_i16m1 (vint16m1_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vasub_vx_i16m2 (vint16m2_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vasub_vx_i16m4 (vint16m4_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vasub_vx_i16m8 (vint16m8_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vasub_vx_i32mf2 (vint32mf2_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vasub_vx_i32m1 (vint32m1_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vasub_vx_i32m2 (vint32m2_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vasub_vx_i32m4 (vint32m4_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vasub_vx_i32m8 (vint32m8_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vasub_vx_i64m1 (vint64m1_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vasub_vx_i64m2 (vint64m2_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vasub_vx_i64m4 (vint64m4_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vasub_vx_i64m8 (vint64m8_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vasub_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vasub_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vasub_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vasub_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vasub_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vasub_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vasub_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vasub_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vasub_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vasub_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vasub_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vasub_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vasub_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vasub_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vasub_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vasub_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vasub_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vasub_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vasub_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vasub_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vasub_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vasub_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, unsigned int vxrm, size_t vl);
22.21. Vector Single-Width Fractional Multiply with Rounding and Saturation
22.22. vsmul.vv
- Mnemonic
vsmul.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Signed saturating and rounding fractional multiply vd[i] = clip(roundoff_signed(vs2[i]*vs1[i], SEW-1))
- Intrinsic Functions
Details
vint8mf8_t __riscv_vsmul_vv_i8mf8 (vint8mf8_t op1, vint8mf8_t op2, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vsmul_vv_i8mf4 (vint8mf4_t op1, vint8mf4_t op2, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vsmul_vv_i8mf2 (vint8mf2_t op1, vint8mf2_t op2, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vsmul_vv_i8m1 (vint8m1_t op1, vint8m1_t op2, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vsmul_vv_i8m2 (vint8m2_t op1, vint8m2_t op2, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vsmul_vv_i8m4 (vint8m4_t op1, vint8m4_t op2, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vsmul_vv_i8m8 (vint8m8_t op1, vint8m8_t op2, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vsmul_vv_i16mf4 (vint16mf4_t op1, vint16mf4_t op2, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vsmul_vv_i16mf2 (vint16mf2_t op1, vint16mf2_t op2, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vsmul_vv_i16m1 (vint16m1_t op1, vint16m1_t op2, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vsmul_vv_i16m2 (vint16m2_t op1, vint16m2_t op2, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vsmul_vv_i16m4 (vint16m4_t op1, vint16m4_t op2, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vsmul_vv_i16m8 (vint16m8_t op1, vint16m8_t op2, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vsmul_vv_i32mf2 (vint32mf2_t op1, vint32mf2_t op2, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vsmul_vv_i32m1 (vint32m1_t op1, vint32m1_t op2, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vsmul_vv_i32m2 (vint32m2_t op1, vint32m2_t op2, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vsmul_vv_i32m4 (vint32m4_t op1, vint32m4_t op2, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vsmul_vv_i32m8 (vint32m8_t op1, vint32m8_t op2, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vsmul_vv_i64m1 (vint64m1_t op1, vint64m1_t op2, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vsmul_vv_i64m2 (vint64m2_t op1, vint64m2_t op2, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vsmul_vv_i64m4 (vint64m4_t op1, vint64m4_t op2, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vsmul_vv_i64m8 (vint64m8_t op1, vint64m8_t op2, unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vsmul_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vsmul_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vsmul_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vsmul_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vint8m1_t op2, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vsmul_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vint8m2_t op2, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vsmul_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vint8m4_t op2, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vsmul_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vint8m8_t op2, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vsmul_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vsmul_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vsmul_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vint16m1_t op2, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vsmul_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vint16m2_t op2, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vsmul_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vint16m4_t op2, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vsmul_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vint16m8_t op2, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vsmul_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vsmul_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vsmul_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vint32m2_t op2, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vsmul_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vint32m4_t op2, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vsmul_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vint32m8_t op2, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vsmul_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vint64m1_t op2, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vsmul_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vint64m2_t op2, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vsmul_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vint64m4_t op2, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vsmul_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vint64m8_t op2, unsigned int vxrm, size_t vl);
22.23. vsmul.vx
- Mnemonic
vsmul.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Signed saturating and rounding fractional multiply vd[i] = clip(roundoff_signed(vs2[i]*x[rs1], SEW-1))
- Intrinsic Functions
Details
vint8mf8_t __riscv_vsmul_vx_i8mf8 (vint8mf8_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vsmul_vx_i8mf4 (vint8mf4_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vsmul_vx_i8mf2 (vint8mf2_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vsmul_vx_i8m1 (vint8m1_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vsmul_vx_i8m2 (vint8m2_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vsmul_vx_i8m4 (vint8m4_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vsmul_vx_i8m8 (vint8m8_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vsmul_vx_i16mf4 (vint16mf4_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vsmul_vx_i16mf2 (vint16mf2_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vsmul_vx_i16m1 (vint16m1_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vsmul_vx_i16m2 (vint16m2_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vsmul_vx_i16m4 (vint16m4_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vsmul_vx_i16m8 (vint16m8_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vsmul_vx_i32mf2 (vint32mf2_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vsmul_vx_i32m1 (vint32m1_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vsmul_vx_i32m2 (vint32m2_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vsmul_vx_i32m4 (vint32m4_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vsmul_vx_i32m8 (vint32m8_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vsmul_vx_i64m1 (vint64m1_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vsmul_vx_i64m2 (vint64m2_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vsmul_vx_i64m4 (vint64m4_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vsmul_vx_i64m8 (vint64m8_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vsmul_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vsmul_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vsmul_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vsmul_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vsmul_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vsmul_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vsmul_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, int8_t op2, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vsmul_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vsmul_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vsmul_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vsmul_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vsmul_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vsmul_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, int16_t op2, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vsmul_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vsmul_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vsmul_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vsmul_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vsmul_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, int32_t op2, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vsmul_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vsmul_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vsmul_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, int64_t op2, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vsmul_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, int64_t op2, unsigned int vxrm, size_t vl);
22.24. Vector Single-Width Scaling Shift Instructions
22.25. vssrl.vv
- Mnemonic
vssrl.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Scaling shift right logical. vd[i] = roundoff_unsigned(vs2[i], vs1[i])
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vssrl_vv_u8mf8 (vuint8mf8_t op1, vuint8mf8_t shift, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vssrl_vv_u8mf4 (vuint8mf4_t op1, vuint8mf4_t shift, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vssrl_vv_u8mf2 (vuint8mf2_t op1, vuint8mf2_t shift, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vssrl_vv_u8m1 (vuint8m1_t op1, vuint8m1_t shift, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vssrl_vv_u8m2 (vuint8m2_t op1, vuint8m2_t shift, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vssrl_vv_u8m4 (vuint8m4_t op1, vuint8m4_t shift, unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vssrl_vv_u8m8 (vuint8m8_t op1, vuint8m8_t shift, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vssrl_vv_u16mf4 (vuint16mf4_t op1, vuint16mf4_t shift, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vssrl_vv_u16mf2 (vuint16mf2_t op1, vuint16mf2_t shift, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vssrl_vv_u16m1 (vuint16m1_t op1, vuint16m1_t shift, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vssrl_vv_u16m2 (vuint16m2_t op1, vuint16m2_t shift, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vssrl_vv_u16m4 (vuint16m4_t op1, vuint16m4_t shift, unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vssrl_vv_u16m8 (vuint16m8_t op1, vuint16m8_t shift, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vssrl_vv_u32mf2 (vuint32mf2_t op1, vuint32mf2_t shift, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vssrl_vv_u32m1 (vuint32m1_t op1, vuint32m1_t shift, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vssrl_vv_u32m2 (vuint32m2_t op1, vuint32m2_t shift, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vssrl_vv_u32m4 (vuint32m4_t op1, vuint32m4_t shift, unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vssrl_vv_u32m8 (vuint32m8_t op1, vuint32m8_t shift, unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vssrl_vv_u64m1 (vuint64m1_t op1, vuint64m1_t shift, unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vssrl_vv_u64m2 (vuint64m2_t op1, vuint64m2_t shift, unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vssrl_vv_u64m4 (vuint64m4_t op1, vuint64m4_t shift, unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vssrl_vv_u64m8 (vuint64m8_t op1, vuint64m8_t shift, unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vssrl_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t shift, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vssrl_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t shift, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vssrl_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t shift, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vssrl_vv_u8m1_m (vbool8_t mask, vuint8m1_t op1, vuint8m1_t shift, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vssrl_vv_u8m2_m (vbool4_t mask, vuint8m2_t op1, vuint8m2_t shift, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vssrl_vv_u8m4_m (vbool2_t mask, vuint8m4_t op1, vuint8m4_t shift, unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vssrl_vv_u8m8_m (vbool1_t mask, vuint8m8_t op1, vuint8m8_t shift, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vssrl_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t shift, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vssrl_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t shift, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vssrl_vv_u16m1_m (vbool16_t mask, vuint16m1_t op1, vuint16m1_t shift, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vssrl_vv_u16m2_m (vbool8_t mask, vuint16m2_t op1, vuint16m2_t shift, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vssrl_vv_u16m4_m (vbool4_t mask, vuint16m4_t op1, vuint16m4_t shift, unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vssrl_vv_u16m8_m (vbool2_t mask, vuint16m8_t op1, vuint16m8_t shift, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vssrl_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t shift, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vssrl_vv_u32m1_m (vbool32_t mask, vuint32m1_t op1, vuint32m1_t shift, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vssrl_vv_u32m2_m (vbool16_t mask, vuint32m2_t op1, vuint32m2_t shift, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vssrl_vv_u32m4_m (vbool8_t mask, vuint32m4_t op1, vuint32m4_t shift, unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vssrl_vv_u32m8_m (vbool4_t mask, vuint32m8_t op1, vuint32m8_t shift, unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vssrl_vv_u64m1_m (vbool64_t mask, vuint64m1_t op1, vuint64m1_t shift, unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vssrl_vv_u64m2_m (vbool32_t mask, vuint64m2_t op1, vuint64m2_t shift, unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vssrl_vv_u64m4_m (vbool16_t mask, vuint64m4_t op1, vuint64m4_t shift, unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vssrl_vv_u64m8_m (vbool8_t mask, vuint64m8_t op1, vuint64m8_t shift, unsigned int vxrm, size_t vl);
22.26. vssrl.vx
- Mnemonic
vssrl.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Scaling shift right logical.
vd[i] = roundoff_unsigned(vs2[i], x[rs1])
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vssrl_vx_u8mf8 (vuint8mf8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vssrl_vx_u8mf4 (vuint8mf4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vssrl_vx_u8mf2 (vuint8mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vssrl_vx_u8m1 (vuint8m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vssrl_vx_u8m2 (vuint8m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vssrl_vx_u8m4 (vuint8m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vssrl_vx_u8m8 (vuint8m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vssrl_vx_u16mf4 (vuint16mf4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vssrl_vx_u16mf2 (vuint16mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vssrl_vx_u16m1 (vuint16m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vssrl_vx_u16m2 (vuint16m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vssrl_vx_u16m4 (vuint16m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vssrl_vx_u16m8 (vuint16m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vssrl_vx_u32mf2 (vuint32mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vssrl_vx_u32m1 (vuint32m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vssrl_vx_u32m2 (vuint32m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vssrl_vx_u32m4 (vuint32m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vssrl_vx_u32m8 (vuint32m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vssrl_vx_u64m1 (vuint64m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vssrl_vx_u64m2 (vuint64m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vssrl_vx_u64m4 (vuint64m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vssrl_vx_u64m8 (vuint64m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vssrl_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vssrl_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vssrl_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vssrl_vx_u8m1_m (vbool8_t mask, vuint8m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vssrl_vx_u8m2_m (vbool4_t mask, vuint8m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vssrl_vx_u8m4_m (vbool2_t mask, vuint8m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8m8_t __riscv_vssrl_vx_u8m8_m (vbool1_t mask, vuint8m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vssrl_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vssrl_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vssrl_vx_u16m1_m (vbool16_t mask, vuint16m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vssrl_vx_u16m2_m (vbool8_t mask, vuint16m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vssrl_vx_u16m4_m (vbool4_t mask, vuint16m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16m8_t __riscv_vssrl_vx_u16m8_m (vbool2_t mask, vuint16m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vssrl_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vssrl_vx_u32m1_m (vbool32_t mask, vuint32m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vssrl_vx_u32m2_m (vbool16_t mask, vuint32m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vssrl_vx_u32m4_m (vbool8_t mask, vuint32m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32m8_t __riscv_vssrl_vx_u32m8_m (vbool4_t mask, vuint32m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint64m1_t __riscv_vssrl_vx_u64m1_m (vbool64_t mask, vuint64m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint64m2_t __riscv_vssrl_vx_u64m2_m (vbool32_t mask, vuint64m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint64m4_t __riscv_vssrl_vx_u64m4_m (vbool16_t mask, vuint64m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint64m8_t __riscv_vssrl_vx_u64m8_m (vbool8_t mask, vuint64m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
22.27. vssrl.vi
- Mnemonic
vssrl.vi vd, vs2, uimm, vm
- Encoding
- Description
-
Scaling shift right logical.
vd[i] = roundoff_unsigned(vs2[i], uimm)
- Intrinsic Functions
Details
22.28. vssra.vv
- Mnemonic
vssra.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Scaling shift right arithmetic.
vd[i] = roundoff_signed(vs2[i],vs1[i])
- Intrinsic Functions
Details
vint8mf8_t __riscv_vssra_vv_i8mf8 (vint8mf8_t op1, vuint8mf8_t shift, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vssra_vv_i8mf4 (vint8mf4_t op1, vuint8mf4_t shift, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vssra_vv_i8mf2 (vint8mf2_t op1, vuint8mf2_t shift, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vssra_vv_i8m1 (vint8m1_t op1, vuint8m1_t shift, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vssra_vv_i8m2 (vint8m2_t op1, vuint8m2_t shift, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vssra_vv_i8m4 (vint8m4_t op1, vuint8m4_t shift, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vssra_vv_i8m8 (vint8m8_t op1, vuint8m8_t shift, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vssra_vv_i16mf4 (vint16mf4_t op1, vuint16mf4_t shift, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vssra_vv_i16mf2 (vint16mf2_t op1, vuint16mf2_t shift, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vssra_vv_i16m1 (vint16m1_t op1, vuint16m1_t shift, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vssra_vv_i16m2 (vint16m2_t op1, vuint16m2_t shift, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vssra_vv_i16m4 (vint16m4_t op1, vuint16m4_t shift, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vssra_vv_i16m8 (vint16m8_t op1, vuint16m8_t shift, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vssra_vv_i32mf2 (vint32mf2_t op1, vuint32mf2_t shift, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vssra_vv_i32m1 (vint32m1_t op1, vuint32m1_t shift, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vssra_vv_i32m2 (vint32m2_t op1, vuint32m2_t shift, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vssra_vv_i32m4 (vint32m4_t op1, vuint32m4_t shift, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vssra_vv_i32m8 (vint32m8_t op1, vuint32m8_t shift, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vssra_vv_i64m1 (vint64m1_t op1, vuint64m1_t shift, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vssra_vv_i64m2 (vint64m2_t op1, vuint64m2_t shift, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vssra_vv_i64m4 (vint64m4_t op1, vuint64m4_t shift, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vssra_vv_i64m8 (vint64m8_t op1, vuint64m8_t shift, unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vssra_vv_i8mf8_m (vbool64_t mask, vint8mf8_t op1, vuint8mf8_t shift, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vssra_vv_i8mf4_m (vbool32_t mask, vint8mf4_t op1, vuint8mf4_t shift, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vssra_vv_i8mf2_m (vbool16_t mask, vint8mf2_t op1, vuint8mf2_t shift, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vssra_vv_i8m1_m (vbool8_t mask, vint8m1_t op1, vuint8m1_t shift, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vssra_vv_i8m2_m (vbool4_t mask, vint8m2_t op1, vuint8m2_t shift, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vssra_vv_i8m4_m (vbool2_t mask, vint8m4_t op1, vuint8m4_t shift, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vssra_vv_i8m8_m (vbool1_t mask, vint8m8_t op1, vuint8m8_t shift, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vssra_vv_i16mf4_m (vbool64_t mask, vint16mf4_t op1, vuint16mf4_t shift, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vssra_vv_i16mf2_m (vbool32_t mask, vint16mf2_t op1, vuint16mf2_t shift, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vssra_vv_i16m1_m (vbool16_t mask, vint16m1_t op1, vuint16m1_t shift, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vssra_vv_i16m2_m (vbool8_t mask, vint16m2_t op1, vuint16m2_t shift, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vssra_vv_i16m4_m (vbool4_t mask, vint16m4_t op1, vuint16m4_t shift, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vssra_vv_i16m8_m (vbool2_t mask, vint16m8_t op1, vuint16m8_t shift, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vssra_vv_i32mf2_m (vbool64_t mask, vint32mf2_t op1, vuint32mf2_t shift, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vssra_vv_i32m1_m (vbool32_t mask, vint32m1_t op1, vuint32m1_t shift, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vssra_vv_i32m2_m (vbool16_t mask, vint32m2_t op1, vuint32m2_t shift, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vssra_vv_i32m4_m (vbool8_t mask, vint32m4_t op1, vuint32m4_t shift, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vssra_vv_i32m8_m (vbool4_t mask, vint32m8_t op1, vuint32m8_t shift, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vssra_vv_i64m1_m (vbool64_t mask, vint64m1_t op1, vuint64m1_t shift, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vssra_vv_i64m2_m (vbool32_t mask, vint64m2_t op1, vuint64m2_t shift, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vssra_vv_i64m4_m (vbool16_t mask, vint64m4_t op1, vuint64m4_t shift, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vssra_vv_i64m8_m (vbool8_t mask, vint64m8_t op1, vuint64m8_t shift, unsigned int vxrm, size_t vl);
22.29. vssra.vx
- Mnemonic
vssra.vx vd, vs2, rs1, vm
- Encoding
- Description
-
Scaling shift right arithmetic.
vd[i] = roundoff_signed(vs2[i], x[rs1])
- Intrinsic Functions
Details
vint8mf8_t __riscv_vssra_vx_i8mf8 (vint8mf8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vssra_vx_i8mf4 (vint8mf4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vssra_vx_i8mf2 (vint8mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vssra_vx_i8m1 (vint8m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vssra_vx_i8m2 (vint8m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vssra_vx_i8m4 (vint8m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vssra_vx_i8m8 (vint8m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vssra_vx_i16mf4 (vint16mf4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vssra_vx_i16mf2 (vint16mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vssra_vx_i16m1 (vint16m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vssra_vx_i16m2 (vint16m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vssra_vx_i16m4 (vint16m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vssra_vx_i16m8 (vint16m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vssra_vx_i32mf2 (vint32mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vssra_vx_i32m1 (vint32m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vssra_vx_i32m2 (vint32m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vssra_vx_i32m4 (vint32m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vssra_vx_i32m8 (vint32m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vssra_vx_i64m1 (vint64m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vssra_vx_i64m2 (vint64m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vssra_vx_i64m4 (vint64m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vssra_vx_i64m8 (vint64m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vssra_vx_i8mf8_m (vbool64_t mask, vint8mf8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vssra_vx_i8mf4_m (vbool32_t mask, vint8mf4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vssra_vx_i8mf2_m (vbool16_t mask, vint8mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vssra_vx_i8m1_m (vbool8_t mask, vint8m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vssra_vx_i8m2_m (vbool4_t mask, vint8m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vssra_vx_i8m4_m (vbool2_t mask, vint8m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8m8_t __riscv_vssra_vx_i8m8_m (vbool1_t mask, vint8m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vssra_vx_i16mf4_m (vbool64_t mask, vint16mf4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vssra_vx_i16mf2_m (vbool32_t mask, vint16mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vssra_vx_i16m1_m (vbool16_t mask, vint16m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vssra_vx_i16m2_m (vbool8_t mask, vint16m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vssra_vx_i16m4_m (vbool4_t mask, vint16m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16m8_t __riscv_vssra_vx_i16m8_m (vbool2_t mask, vint16m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vssra_vx_i32mf2_m (vbool64_t mask, vint32mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vssra_vx_i32m1_m (vbool32_t mask, vint32m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vssra_vx_i32m2_m (vbool16_t mask, vint32m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vssra_vx_i32m4_m (vbool8_t mask, vint32m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32m8_t __riscv_vssra_vx_i32m8_m (vbool4_t mask, vint32m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint64m1_t __riscv_vssra_vx_i64m1_m (vbool64_t mask, vint64m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint64m2_t __riscv_vssra_vx_i64m2_m (vbool32_t mask, vint64m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint64m4_t __riscv_vssra_vx_i64m4_m (vbool16_t mask, vint64m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint64m8_t __riscv_vssra_vx_i64m8_m (vbool8_t mask, vint64m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
22.30. vssra.vi
- Mnemonic
vssra.vi vd, vs2, uimm, vm
- Encoding
- Description
-
Scaling shift right arithmetic.
vd[i] = roundoff_signed(vs2[i], uimm)
- Intrinsic Functions
Details
22.31. Vector Narrowing Fixed-Point Clip Instructions
22.32. vnclipu.wv
- Mnemonic
vnclipu.wv vd, vs2, vs1, vm
- Encoding
- Description
-
Narrowing unsigned clip.
vd[i] = clip(roundoff_unsigned(vs2[i], vs1[i]))
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vnclipu_wv_u8mf8 (vuint16mf4_t op1, vuint8mf8_t shift, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vnclipu_wv_u8mf4 (vuint16mf2_t op1, vuint8mf4_t shift, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vnclipu_wv_u8mf2 (vuint16m1_t op1, vuint8mf2_t shift, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vnclipu_wv_u8m1 (vuint16m2_t op1, vuint8m1_t shift, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vnclipu_wv_u8m2 (vuint16m4_t op1, vuint8m2_t shift, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vnclipu_wv_u8m4 (vuint16m8_t op1, vuint8m4_t shift, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vnclipu_wv_u16mf4 (vuint32mf2_t op1, vuint16mf4_t shift, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vnclipu_wv_u16mf2 (vuint32m1_t op1, vuint16mf2_t shift, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vnclipu_wv_u16m1 (vuint32m2_t op1, vuint16m1_t shift, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vnclipu_wv_u16m2 (vuint32m4_t op1, vuint16m2_t shift, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vnclipu_wv_u16m4 (vuint32m8_t op1, vuint16m4_t shift, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vnclipu_wv_u32mf2 (vuint64m1_t op1, vuint32mf2_t shift, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vnclipu_wv_u32m1 (vuint64m2_t op1, vuint32m1_t shift, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vnclipu_wv_u32m2 (vuint64m4_t op1, vuint32m2_t shift, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vnclipu_wv_u32m4 (vuint64m8_t op1, vuint32m4_t shift, unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vnclipu_wv_u8mf8_m (vbool64_t mask, vuint16mf4_t op1, vuint8mf8_t shift, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vnclipu_wv_u8mf4_m (vbool32_t mask, vuint16mf2_t op1, vuint8mf4_t shift, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vnclipu_wv_u8mf2_m (vbool16_t mask, vuint16m1_t op1, vuint8mf2_t shift, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vnclipu_wv_u8m1_m (vbool8_t mask, vuint16m2_t op1, vuint8m1_t shift, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vnclipu_wv_u8m2_m (vbool4_t mask, vuint16m4_t op1, vuint8m2_t shift, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vnclipu_wv_u8m4_m (vbool2_t mask, vuint16m8_t op1, vuint8m4_t shift, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vnclipu_wv_u16mf4_m (vbool64_t mask, vuint32mf2_t op1, vuint16mf4_t shift, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vnclipu_wv_u16mf2_m (vbool32_t mask, vuint32m1_t op1, vuint16mf2_t shift, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vnclipu_wv_u16m1_m (vbool16_t mask, vuint32m2_t op1, vuint16m1_t shift, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vnclipu_wv_u16m2_m (vbool8_t mask, vuint32m4_t op1, vuint16m2_t shift, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vnclipu_wv_u16m4_m (vbool4_t mask, vuint32m8_t op1, vuint16m4_t shift, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vnclipu_wv_u32mf2_m (vbool64_t mask, vuint64m1_t op1, vuint32mf2_t shift, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vnclipu_wv_u32m1_m (vbool32_t mask, vuint64m2_t op1, vuint32m1_t shift, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vnclipu_wv_u32m2_m (vbool16_t mask, vuint64m4_t op1, vuint32m2_t shift, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vnclipu_wv_u32m4_m (vbool8_t mask, vuint64m8_t op1, vuint32m4_t shift, unsigned int vxrm, size_t vl);
22.33. vnclipu.wx
- Mnemonic
vnclipu.wx vd, vs2, rs1, vm
- Encoding
- Description
-
Narrowing unsigned clip.
vd[i] = clip(roundoff_unsigned(vs2[i], x[rs1]))
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vnclipu_wx_u8mf8 (vuint16mf4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vnclipu_wx_u8mf4 (vuint16mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vnclipu_wx_u8mf2 (vuint16m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vnclipu_wx_u8m1 (vuint16m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vnclipu_wx_u8m2 (vuint16m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vnclipu_wx_u8m4 (vuint16m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vnclipu_wx_u16mf4 (vuint32mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vnclipu_wx_u16mf2 (vuint32m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vnclipu_wx_u16m1 (vuint32m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vnclipu_wx_u16m2 (vuint32m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vnclipu_wx_u16m4 (vuint32m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vnclipu_wx_u32mf2 (vuint64m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vnclipu_wx_u32m1 (vuint64m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vnclipu_wx_u32m2 (vuint64m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vnclipu_wx_u32m4 (vuint64m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8mf8_t __riscv_vnclipu_wx_u8mf8_m (vbool64_t mask, vuint16mf4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8mf4_t __riscv_vnclipu_wx_u8mf4_m (vbool32_t mask, vuint16mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8mf2_t __riscv_vnclipu_wx_u8mf2_m (vbool16_t mask, vuint16m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8m1_t __riscv_vnclipu_wx_u8m1_m (vbool8_t mask, vuint16m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8m2_t __riscv_vnclipu_wx_u8m2_m (vbool4_t mask, vuint16m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint8m4_t __riscv_vnclipu_wx_u8m4_m (vbool2_t mask, vuint16m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16mf4_t __riscv_vnclipu_wx_u16mf4_m (vbool64_t mask, vuint32mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16mf2_t __riscv_vnclipu_wx_u16mf2_m (vbool32_t mask, vuint32m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16m1_t __riscv_vnclipu_wx_u16m1_m (vbool16_t mask, vuint32m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16m2_t __riscv_vnclipu_wx_u16m2_m (vbool8_t mask, vuint32m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint16m4_t __riscv_vnclipu_wx_u16m4_m (vbool4_t mask, vuint32m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32mf2_t __riscv_vnclipu_wx_u32mf2_m (vbool64_t mask, vuint64m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32m1_t __riscv_vnclipu_wx_u32m1_m (vbool32_t mask, vuint64m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32m2_t __riscv_vnclipu_wx_u32m2_m (vbool16_t mask, vuint64m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vuint32m4_t __riscv_vnclipu_wx_u32m4_m (vbool8_t mask, vuint64m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
22.34. vnclipu.wi
- Mnemonic
vnclipu.wi vd, vs2, uimm, vm
- Encoding
- Description
-
Narrowing unsigned clip.
vd[i] = clip(roundoff_unsigned(vs2[i], uimm))
- Intrinsic Functions
Details
22.35. vnclip.wv
- Mnemonic
vnclip.wv vd, vs2, vs1, vm
- Encoding
- Description
-
Narrowing signed clip.
vd[i] = clip(roundoff_signed(vs2[i], vs1[i]))
- Intrinsic Functions
Details
vint8mf8_t __riscv_vnclip_wv_i8mf8 (vint16mf4_t op1, vuint8mf8_t shift, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vnclip_wv_i8mf4 (vint16mf2_t op1, vuint8mf4_t shift, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vnclip_wv_i8mf2 (vint16m1_t op1, vuint8mf2_t shift, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vnclip_wv_i8m1 (vint16m2_t op1, vuint8m1_t shift, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vnclip_wv_i8m2 (vint16m4_t op1, vuint8m2_t shift, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vnclip_wv_i8m4 (vint16m8_t op1, vuint8m4_t shift, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vnclip_wv_i16mf4 (vint32mf2_t op1, vuint16mf4_t shift, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vnclip_wv_i16mf2 (vint32m1_t op1, vuint16mf2_t shift, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vnclip_wv_i16m1 (vint32m2_t op1, vuint16m1_t shift, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vnclip_wv_i16m2 (vint32m4_t op1, vuint16m2_t shift, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vnclip_wv_i16m4 (vint32m8_t op1, vuint16m4_t shift, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vnclip_wv_i32mf2 (vint64m1_t op1, vuint32mf2_t shift, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vnclip_wv_i32m1 (vint64m2_t op1, vuint32m1_t shift, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vnclip_wv_i32m2 (vint64m4_t op1, vuint32m2_t shift, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vnclip_wv_i32m4 (vint64m8_t op1, vuint32m4_t shift, unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vnclip_wv_i8mf8_m (vbool64_t mask, vint16mf4_t op1, vuint8mf8_t shift, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vnclip_wv_i8mf4_m (vbool32_t mask, vint16mf2_t op1, vuint8mf4_t shift, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vnclip_wv_i8mf2_m (vbool16_t mask, vint16m1_t op1, vuint8mf2_t shift, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vnclip_wv_i8m1_m (vbool8_t mask, vint16m2_t op1, vuint8m1_t shift, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vnclip_wv_i8m2_m (vbool4_t mask, vint16m4_t op1, vuint8m2_t shift, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vnclip_wv_i8m4_m (vbool2_t mask, vint16m8_t op1, vuint8m4_t shift, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vnclip_wv_i16mf4_m (vbool64_t mask, vint32mf2_t op1, vuint16mf4_t shift, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vnclip_wv_i16mf2_m (vbool32_t mask, vint32m1_t op1, vuint16mf2_t shift, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vnclip_wv_i16m1_m (vbool16_t mask, vint32m2_t op1, vuint16m1_t shift, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vnclip_wv_i16m2_m (vbool8_t mask, vint32m4_t op1, vuint16m2_t shift, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vnclip_wv_i16m4_m (vbool4_t mask, vint32m8_t op1, vuint16m4_t shift, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vnclip_wv_i32mf2_m (vbool64_t mask, vint64m1_t op1, vuint32mf2_t shift, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vnclip_wv_i32m1_m (vbool32_t mask, vint64m2_t op1, vuint32m1_t shift, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vnclip_wv_i32m2_m (vbool16_t mask, vint64m4_t op1, vuint32m2_t shift, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vnclip_wv_i32m4_m (vbool8_t mask, vint64m8_t op1, vuint32m4_t shift, unsigned int vxrm, size_t vl);
22.36. vnclip.wx
- Mnemonic
vnclip.wx vd, vs2, rs1, vm
- Encoding
- Description
-
Narrowing signed clip.
vd[i] = clip(roundoff_signed(vs2[i], x[rs1]))
- Intrinsic Functions
Details
vint8mf8_t __riscv_vnclip_wx_i8mf8 (vint16mf4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vnclip_wx_i8mf4 (vint16mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vnclip_wx_i8mf2 (vint16m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vnclip_wx_i8m1 (vint16m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vnclip_wx_i8m2 (vint16m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vnclip_wx_i8m4 (vint16m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vnclip_wx_i16mf4 (vint32mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vnclip_wx_i16mf2 (vint32m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vnclip_wx_i16m1 (vint32m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vnclip_wx_i16m2 (vint32m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vnclip_wx_i16m4 (vint32m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vnclip_wx_i32mf2 (vint64m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vnclip_wx_i32m1 (vint64m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vnclip_wx_i32m2 (vint64m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vnclip_wx_i32m4 (vint64m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8mf8_t __riscv_vnclip_wx_i8mf8_m (vbool64_t mask, vint16mf4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8mf4_t __riscv_vnclip_wx_i8mf4_m (vbool32_t mask, vint16mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8mf2_t __riscv_vnclip_wx_i8mf2_m (vbool16_t mask, vint16m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8m1_t __riscv_vnclip_wx_i8m1_m (vbool8_t mask, vint16m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8m2_t __riscv_vnclip_wx_i8m2_m (vbool4_t mask, vint16m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint8m4_t __riscv_vnclip_wx_i8m4_m (vbool2_t mask, vint16m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16mf4_t __riscv_vnclip_wx_i16mf4_m (vbool64_t mask, vint32mf2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16mf2_t __riscv_vnclip_wx_i16mf2_m (vbool32_t mask, vint32m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16m1_t __riscv_vnclip_wx_i16m1_m (vbool16_t mask, vint32m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16m2_t __riscv_vnclip_wx_i16m2_m (vbool8_t mask, vint32m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint16m4_t __riscv_vnclip_wx_i16m4_m (vbool4_t mask, vint32m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32mf2_t __riscv_vnclip_wx_i32mf2_m (vbool64_t mask, vint64m1_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32m1_t __riscv_vnclip_wx_i32m1_m (vbool32_t mask, vint64m2_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32m2_t __riscv_vnclip_wx_i32m2_m (vbool16_t mask, vint64m4_t op1, size_t shift, unsigned int vxrm, size_t vl);
vint32m4_t __riscv_vnclip_wx_i32m4_m (vbool8_t mask, vint64m8_t op1, size_t shift, unsigned int vxrm, size_t vl);
22.37. vnclip.wi
- Mnemonic
vnclip.wi vd, vs2, uimm, vm
- Encoding
- Description
-
Narrowing signed clip.
vd[i] = clip(roundoff_signed(vs2[i], uimm))
- Intrinsic Functions
Details
23. Vector Floating-Point Instructions
23.1. Vector Single-Width Floating-Point Add/Subtract Instructions
23.2. vfadd.vv
- Mnemonic
vfadd.vv vd, vs2, vs1, vm
- Description
-
Floating-point add, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfadd_vv.h
- Encoding
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfadd_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfadd_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfadd_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfadd_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfadd_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfadd_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfadd_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfadd_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfadd_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfadd_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfadd_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfadd_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfadd_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfadd_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vfloat16mf4_t __riscv_vfadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfadd_vv_f64m1_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfadd_vv_f64m2_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfadd_vv_f64m4_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfadd_vv_f64m8_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vfloat16mf4_t __riscv_vfadd_vv_f16mf4_rm (vfloat16mf4_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfadd_vv_f16mf2_rm (vfloat16mf2_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfadd_vv_f16m1_rm (vfloat16m1_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfadd_vv_f16m2_rm (vfloat16m2_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfadd_vv_f16m4_rm (vfloat16m4_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfadd_vv_f16m8_rm (vfloat16m8_t op1, vfloat16m8_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfadd_vv_f32mf2_rm (vfloat32mf2_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfadd_vv_f32m1_rm (vfloat32m1_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfadd_vv_f32m2_rm (vfloat32m2_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfadd_vv_f32m4_rm (vfloat32m4_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfadd_vv_f32m8_rm (vfloat32m8_t op1, vfloat32m8_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfadd_vv_f64m1_rm (vfloat64m1_t op1, vfloat64m1_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfadd_vv_f64m2_rm (vfloat64m2_t op1, vfloat64m2_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfadd_vv_f64m4_rm (vfloat64m4_t op1, vfloat64m4_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfadd_vv_f64m8_rm (vfloat64m8_t op1, vfloat64m8_t op2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfadd_vv_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfadd_vv_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfadd_vv_f16m1_rm_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfadd_vv_f16m2_rm_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfadd_vv_f16m4_rm_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfadd_vv_f16m8_rm_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfadd_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfadd_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfadd_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfadd_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfadd_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfadd_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfadd_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfadd_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfadd_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, unsigned int frm, size_t vl);
23.3. vfadd.vf
- Mnemonic
vfadd.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Floating-point add, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfadd_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfadd_vf_f16mf4 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfadd_vf_f16mf2 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfadd_vf_f16m1 (vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfadd_vf_f16m2 (vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfadd_vf_f16m4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfadd_vf_f16m8 (vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfadd_vf_f32mf2 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfadd_vf_f32m1 (vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfadd_vf_f32m2 (vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfadd_vf_f32m4 (vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfadd_vf_f32m8 (vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfadd_vf_f64m1 (vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfadd_vf_f64m2 (vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfadd_vf_f64m4 (vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfadd_vf_f64m8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfadd_vf_f64m1_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfadd_vf_f64m2_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfadd_vf_f64m4_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfadd_vf_f64m8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfadd_vf_f16mf4_rm (vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfadd_vf_f16mf2_rm (vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfadd_vf_f16m1_rm (vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfadd_vf_f16m2_rm (vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfadd_vf_f16m4_rm (vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfadd_vf_f16m8_rm (vfloat16m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfadd_vf_f32mf2_rm (vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfadd_vf_f32m1_rm (vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfadd_vf_f32m2_rm (vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfadd_vf_f32m4_rm (vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfadd_vf_f32m8_rm (vfloat32m8_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfadd_vf_f64m1_rm (vfloat64m1_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfadd_vf_f64m2_rm (vfloat64m2_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfadd_vf_f64m4_rm (vfloat64m4_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfadd_vf_f64m8_rm (vfloat64m8_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfadd_vf_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfadd_vf_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfadd_vf_f16m1_rm_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfadd_vf_f16m2_rm_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfadd_vf_f16m4_rm_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfadd_vf_f16m8_rm_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfadd_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfadd_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfadd_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfadd_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfadd_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfadd_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfadd_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfadd_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfadd_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, unsigned int frm, size_t vl);
23.4. vfsub.vv
- Mnemonic
vfsub.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Floating-point subtract, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfsub_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfsub_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsub_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfsub_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfsub_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfsub_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfsub_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsub_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfsub_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfsub_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfsub_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfsub_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfsub_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfsub_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfsub_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfsub_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vfloat16mf4_t __riscv_vfsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsub_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfsub_vv_f32m1_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfsub_vv_f32m2_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfsub_vv_f32m4_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfsub_vv_f32m8_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfsub_vv_f64m1_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfsub_vv_f64m2_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfsub_vv_f64m4_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfsub_vv_f64m8_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vfloat16mf4_t __riscv_vfsub_vv_f16mf4_rm (vfloat16mf4_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfsub_vv_f16mf2_rm (vfloat16mf2_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfsub_vv_f16m1_rm (vfloat16m1_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfsub_vv_f16m2_rm (vfloat16m2_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfsub_vv_f16m4_rm (vfloat16m4_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfsub_vv_f16m8_rm (vfloat16m8_t op1, vfloat16m8_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfsub_vv_f32mf2_rm (vfloat32mf2_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfsub_vv_f32m1_rm (vfloat32m1_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfsub_vv_f32m2_rm (vfloat32m2_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfsub_vv_f32m4_rm (vfloat32m4_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfsub_vv_f32m8_rm (vfloat32m8_t op1, vfloat32m8_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfsub_vv_f64m1_rm (vfloat64m1_t op1, vfloat64m1_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfsub_vv_f64m2_rm (vfloat64m2_t op1, vfloat64m2_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfsub_vv_f64m4_rm (vfloat64m4_t op1, vfloat64m4_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfsub_vv_f64m8_rm (vfloat64m8_t op1, vfloat64m8_t op2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfsub_vv_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfsub_vv_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfsub_vv_f16m1_rm_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfsub_vv_f16m2_rm_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfsub_vv_f16m4_rm_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfsub_vv_f16m8_rm_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfsub_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfsub_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfsub_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfsub_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfsub_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfsub_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfsub_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfsub_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfsub_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, unsigned int frm, size_t vl);
23.5. vfsub.vf
- Mnemonic
vfsub.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Floating-point subtract, Vector-scalar vd[i] = vs2[i] - f[rs1]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfsub_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfsub_vf_f16mf4 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsub_vf_f16mf2 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfsub_vf_f16m1 (vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfsub_vf_f16m2 (vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfsub_vf_f16m4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfsub_vf_f16m8 (vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsub_vf_f32mf2 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfsub_vf_f32m1 (vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfsub_vf_f32m2 (vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfsub_vf_f32m4 (vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfsub_vf_f32m8 (vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfsub_vf_f64m1 (vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfsub_vf_f64m2 (vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfsub_vf_f64m4 (vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfsub_vf_f64m8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsub_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfsub_vf_f32m1_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfsub_vf_f32m2_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfsub_vf_f32m4_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfsub_vf_f32m8_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfsub_vf_f64m1_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfsub_vf_f64m2_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfsub_vf_f64m4_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfsub_vf_f64m8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfsub_vf_f16mf4_rm (vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfsub_vf_f16mf2_rm (vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfsub_vf_f16m1_rm (vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfsub_vf_f16m2_rm (vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfsub_vf_f16m4_rm (vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfsub_vf_f16m8_rm (vfloat16m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfsub_vf_f32mf2_rm (vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfsub_vf_f32m1_rm (vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfsub_vf_f32m2_rm (vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfsub_vf_f32m4_rm (vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfsub_vf_f32m8_rm (vfloat32m8_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfsub_vf_f64m1_rm (vfloat64m1_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfsub_vf_f64m2_rm (vfloat64m2_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfsub_vf_f64m4_rm (vfloat64m4_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfsub_vf_f64m8_rm (vfloat64m8_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfsub_vf_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfsub_vf_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfsub_vf_f16m1_rm_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfsub_vf_f16m2_rm_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfsub_vf_f16m4_rm_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfsub_vf_f16m8_rm_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfsub_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfsub_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfsub_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfsub_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfsub_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfsub_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfsub_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfsub_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfsub_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, unsigned int frm, size_t vl);
23.6. vfrsub.vf
- Mnemonic
vfrsub.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Floating-point subtract, Scalar-vector vd[i] = f[rs1] - vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfrsub_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfrsub_vf_f16mf4 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfrsub_vf_f16mf2 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfrsub_vf_f16m1 (vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfrsub_vf_f16m2 (vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfrsub_vf_f16m4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfrsub_vf_f16m8 (vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfrsub_vf_f32mf2 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfrsub_vf_f32m1 (vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfrsub_vf_f32m2 (vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfrsub_vf_f32m4 (vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfrsub_vf_f32m8 (vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfrsub_vf_f64m1 (vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfrsub_vf_f64m2 (vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfrsub_vf_f64m4 (vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfrsub_vf_f64m8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfrsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfrsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfrsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfrsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfrsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfrsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfrsub_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfrsub_vf_f32m1_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfrsub_vf_f32m2_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfrsub_vf_f32m4_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfrsub_vf_f32m8_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfrsub_vf_f64m1_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfrsub_vf_f64m2_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfrsub_vf_f64m4_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfrsub_vf_f64m8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfrsub_vf_f16mf4_rm (vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfrsub_vf_f16mf2_rm (vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfrsub_vf_f16m1_rm (vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfrsub_vf_f16m2_rm (vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfrsub_vf_f16m4_rm (vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfrsub_vf_f16m8_rm (vfloat16m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfrsub_vf_f32mf2_rm (vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfrsub_vf_f32m1_rm (vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfrsub_vf_f32m2_rm (vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfrsub_vf_f32m4_rm (vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfrsub_vf_f32m8_rm (vfloat32m8_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfrsub_vf_f64m1_rm (vfloat64m1_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfrsub_vf_f64m2_rm (vfloat64m2_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfrsub_vf_f64m4_rm (vfloat64m4_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfrsub_vf_f64m8_rm (vfloat64m8_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfrsub_vf_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfrsub_vf_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfrsub_vf_f16m1_rm_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfrsub_vf_f16m2_rm_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfrsub_vf_f16m4_rm_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfrsub_vf_f16m8_rm_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfrsub_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfrsub_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfrsub_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfrsub_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfrsub_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfrsub_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfrsub_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfrsub_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfrsub_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, unsigned int frm, size_t vl);
23.7. Vector Widening Floating-Point Add/Subtract Instructions
23.8. vfwadd.vv
- Mnemonic
vfwadd.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening FP add/subtract, 2*SEW = SEW +/- SEW, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwadd_vv.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwadd_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat32m1_t __riscv_vfwadd_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat32m2_t __riscv_vfwadd_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat32m4_t __riscv_vfwadd_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat32m8_t __riscv_vfwadd_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat64m1_t __riscv_vfwadd_vv_f64m1 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat64m2_t __riscv_vfwadd_vv_f64m2 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat64m4_t __riscv_vfwadd_vv_f64m4 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat64m8_t __riscv_vfwadd_vv_f64m8 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwadd_vv_f32mf2_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat32m1_t __riscv_vfwadd_vv_f32m1_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat32m2_t __riscv_vfwadd_vv_f32m2_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat32m4_t __riscv_vfwadd_vv_f32m4_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat32m8_t __riscv_vfwadd_vv_f32m8_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat64m1_t __riscv_vfwadd_vv_f64m1_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat64m2_t __riscv_vfwadd_vv_f64m2_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat64m4_t __riscv_vfwadd_vv_f64m4_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat64m8_t __riscv_vfwadd_vv_f64m8_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwadd_vv_f32mf2_rm (vfloat16mf4_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwadd_vv_f32m1_rm (vfloat16mf2_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwadd_vv_f32m2_rm (vfloat16m1_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwadd_vv_f32m4_rm (vfloat16m2_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwadd_vv_f32m8_rm (vfloat16m4_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwadd_vv_f64m1_rm (vfloat32mf2_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwadd_vv_f64m2_rm (vfloat32m1_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwadd_vv_f64m4_rm (vfloat32m2_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwadd_vv_f64m8_rm (vfloat32m4_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwadd_vv_f32mf2_rm_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwadd_vv_f32m1_rm_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwadd_vv_f32m2_rm_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwadd_vv_f32m4_rm_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwadd_vv_f32m8_rm_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwadd_vv_f64m1_rm_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwadd_vv_f64m2_rm_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwadd_vv_f64m4_rm_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwadd_vv_f64m8_rm_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
23.9. vfwadd.vf
- Mnemonic
vfwadd.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Widening FP add/subtract, 2*SEW = SEW +/- SEW, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwadd_vf.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwadd_vf_f32mf2 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat32m1_t __riscv_vfwadd_vf_f32m1 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat32m2_t __riscv_vfwadd_vf_f32m2 (vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat32m4_t __riscv_vfwadd_vf_f32m4 (vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat32m8_t __riscv_vfwadd_vf_f32m8 (vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat64m1_t __riscv_vfwadd_vf_f64m1 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat64m2_t __riscv_vfwadd_vf_f64m2 (vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat64m4_t __riscv_vfwadd_vf_f64m4 (vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat64m8_t __riscv_vfwadd_vf_f64m8 (vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwadd_vf_f32mf2_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat32m1_t __riscv_vfwadd_vf_f32m1_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat32m2_t __riscv_vfwadd_vf_f32m2_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat32m4_t __riscv_vfwadd_vf_f32m4_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat32m8_t __riscv_vfwadd_vf_f32m8_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat64m1_t __riscv_vfwadd_vf_f64m1_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat64m2_t __riscv_vfwadd_vf_f64m2_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat64m4_t __riscv_vfwadd_vf_f64m4_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat64m8_t __riscv_vfwadd_vf_f64m8_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwadd_vf_f32mf2_rm (vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwadd_vf_f32m1_rm (vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwadd_vf_f32m2_rm (vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwadd_vf_f32m4_rm (vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwadd_vf_f32m8_rm (vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwadd_vf_f64m1_rm (vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwadd_vf_f64m2_rm (vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwadd_vf_f64m4_rm (vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwadd_vf_f64m8_rm (vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwadd_vf_f32mf2_rm_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwadd_vf_f32m1_rm_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwadd_vf_f32m2_rm_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwadd_vf_f32m4_rm_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwadd_vf_f32m8_rm_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwadd_vf_f64m1_rm_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwadd_vf_f64m2_rm_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwadd_vf_f64m4_rm_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwadd_vf_f64m8_rm_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
23.10. vfwsub.vv
- Mnemonic
vfwsub.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening FP add/subtract, 2*SEW = SEW +/- SEW, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwsb_vv.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwsub_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat32m1_t __riscv_vfwsub_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat32m2_t __riscv_vfwsub_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat32m4_t __riscv_vfwsub_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat32m8_t __riscv_vfwsub_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat64m1_t __riscv_vfwsub_vv_f64m1 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat64m2_t __riscv_vfwsub_vv_f64m2 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat64m4_t __riscv_vfwsub_vv_f64m4 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat64m8_t __riscv_vfwsub_vv_f64m8 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwsub_vv_f32mf2_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat32m1_t __riscv_vfwsub_vv_f32m1_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat32m2_t __riscv_vfwsub_vv_f32m2_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat32m4_t __riscv_vfwsub_vv_f32m4_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat32m8_t __riscv_vfwsub_vv_f32m8_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat64m1_t __riscv_vfwsub_vv_f64m1_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat64m2_t __riscv_vfwsub_vv_f64m2_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat64m4_t __riscv_vfwsub_vv_f64m4_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat64m8_t __riscv_vfwsub_vv_f64m8_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwsub_vv_f32mf2_rm (vfloat16mf4_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwsub_vv_f32m1_rm (vfloat16mf2_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwsub_vv_f32m2_rm (vfloat16m1_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwsub_vv_f32m4_rm (vfloat16m2_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwsub_vv_f32m8_rm (vfloat16m4_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwsub_vv_f64m1_rm (vfloat32mf2_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwsub_vv_f64m2_rm (vfloat32m1_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwsub_vv_f64m4_rm (vfloat32m2_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwsub_vv_f64m8_rm (vfloat32m4_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwsub_vv_f32mf2_rm_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwsub_vv_f32m1_rm_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwsub_vv_f32m2_rm_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwsub_vv_f32m4_rm_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwsub_vv_f32m8_rm_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwsub_vv_f64m1_rm_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwsub_vv_f64m2_rm_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwsub_vv_f64m4_rm_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwsub_vv_f64m8_rm_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
23.11. vfwsub.vf
- Mnemonic
vfwsub.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Widening FP add/subtract, 2*SEW = SEW +/- SEW, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwsub_vf.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwsub_vf_f32mf2 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat32m1_t __riscv_vfwsub_vf_f32m1 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat32m2_t __riscv_vfwsub_vf_f32m2 (vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat32m4_t __riscv_vfwsub_vf_f32m4 (vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat32m8_t __riscv_vfwsub_vf_f32m8 (vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat64m1_t __riscv_vfwsub_vf_f64m1 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat64m2_t __riscv_vfwsub_vf_f64m2 (vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat64m4_t __riscv_vfwsub_vf_f64m4 (vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat64m8_t __riscv_vfwsub_vf_f64m8 (vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwsub_vf_f32mf2_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat32m1_t __riscv_vfwsub_vf_f32m1_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat32m2_t __riscv_vfwsub_vf_f32m2_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat32m4_t __riscv_vfwsub_vf_f32m4_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat32m8_t __riscv_vfwsub_vf_f32m8_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat64m1_t __riscv_vfwsub_vf_f64m1_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat64m2_t __riscv_vfwsub_vf_f64m2_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat64m4_t __riscv_vfwsub_vf_f64m4_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat64m8_t __riscv_vfwsub_vf_f64m8_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwsub_vf_f32mf2_rm (vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwsub_vf_f32m1_rm (vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwsub_vf_f32m2_rm (vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwsub_vf_f32m4_rm (vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwsub_vf_f32m8_rm (vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwsub_vf_f64m1_rm (vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwsub_vf_f64m2_rm (vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwsub_vf_f64m4_rm (vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwsub_vf_f64m8_rm (vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwsub_vf_f32mf2_rm_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwsub_vf_f32m1_rm_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwsub_vf_f32m2_rm_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwsub_vf_f32m4_rm_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwsub_vf_f32m8_rm_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwsub_vf_f64m1_rm_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwsub_vf_f64m2_rm_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwsub_vf_f64m4_rm_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwsub_vf_f64m8_rm_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
23.12. vfwadd.wv
- Mnemonic
vfwadd.wv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwadd_wv.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwadd_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl);
vfloat32m1_t __riscv_vfwadd_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl);
vfloat32m2_t __riscv_vfwadd_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl);
vfloat32m4_t __riscv_vfwadd_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl);
vfloat32m8_t __riscv_vfwadd_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl);
vfloat64m1_t __riscv_vfwadd_wv_f64m1 (vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl);
vfloat64m2_t __riscv_vfwadd_wv_f64m2 (vfloat64m2_t op1, vfloat32m1_t op2, size_t vl);
vfloat64m4_t __riscv_vfwadd_wv_f64m4 (vfloat64m4_t op1, vfloat32m2_t op2, size_t vl);
vfloat64m8_t __riscv_vfwadd_wv_f64m8 (vfloat64m8_t op1, vfloat32m4_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwadd_wv_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl);
vfloat32m1_t __riscv_vfwadd_wv_f32m1_m (vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl);
vfloat32m2_t __riscv_vfwadd_wv_f32m2_m (vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl);
vfloat32m4_t __riscv_vfwadd_wv_f32m4_m (vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl);
vfloat32m8_t __riscv_vfwadd_wv_f32m8_m (vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl);
vfloat64m1_t __riscv_vfwadd_wv_f64m1_m (vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl);
vfloat64m2_t __riscv_vfwadd_wv_f64m2_m (vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl);
vfloat64m4_t __riscv_vfwadd_wv_f64m4_m (vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl);
vfloat64m8_t __riscv_vfwadd_wv_f64m8_m (vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwadd_wv_f32mf2_rm (vfloat32mf2_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwadd_wv_f32m1_rm (vfloat32m1_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwadd_wv_f32m2_rm (vfloat32m2_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwadd_wv_f32m4_rm (vfloat32m4_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwadd_wv_f32m8_rm (vfloat32m8_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwadd_wv_f64m1_rm (vfloat64m1_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwadd_wv_f64m2_rm (vfloat64m2_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwadd_wv_f64m4_rm (vfloat64m4_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwadd_wv_f64m8_rm (vfloat64m8_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwadd_wv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwadd_wv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwadd_wv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwadd_wv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwadd_wv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwadd_wv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwadd_wv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwadd_wv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwadd_wv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
23.13. vfwadd.wf
- Mnemonic
vfwadd.wf vd, vs2, rs1, vm
- Encoding
- Description
-
Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwadd_wf.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwadd_wf_f32mf2 (vfloat32mf2_t op1, float16_t op2, size_t vl);
vfloat32m1_t __riscv_vfwadd_wf_f32m1 (vfloat32m1_t op1, float16_t op2, size_t vl);
vfloat32m2_t __riscv_vfwadd_wf_f32m2 (vfloat32m2_t op1, float16_t op2, size_t vl);
vfloat32m4_t __riscv_vfwadd_wf_f32m4 (vfloat32m4_t op1, float16_t op2, size_t vl);
vfloat32m8_t __riscv_vfwadd_wf_f32m8 (vfloat32m8_t op1, float16_t op2, size_t vl);
vfloat64m1_t __riscv_vfwadd_wf_f64m1 (vfloat64m1_t op1, float32_t op2, size_t vl);
vfloat64m2_t __riscv_vfwadd_wf_f64m2 (vfloat64m2_t op1, float32_t op2, size_t vl);
vfloat64m4_t __riscv_vfwadd_wf_f64m4 (vfloat64m4_t op1, float32_t op2, size_t vl);
vfloat64m8_t __riscv_vfwadd_wf_f64m8 (vfloat64m8_t op1, float32_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwadd_wf_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, float16_t op2, size_t vl);
vfloat32m1_t __riscv_vfwadd_wf_f32m1_m (vbool32_t mask, vfloat32m1_t op1, float16_t op2, size_t vl);
vfloat32m2_t __riscv_vfwadd_wf_f32m2_m (vbool16_t mask, vfloat32m2_t op1, float16_t op2, size_t vl);
vfloat32m4_t __riscv_vfwadd_wf_f32m4_m (vbool8_t mask, vfloat32m4_t op1, float16_t op2, size_t vl);
vfloat32m8_t __riscv_vfwadd_wf_f32m8_m (vbool4_t mask, vfloat32m8_t op1, float16_t op2, size_t vl);
vfloat64m1_t __riscv_vfwadd_wf_f64m1_m (vbool64_t mask, vfloat64m1_t op1, float32_t op2, size_t vl);
vfloat64m2_t __riscv_vfwadd_wf_f64m2_m (vbool32_t mask, vfloat64m2_t op1, float32_t op2, size_t vl);
vfloat64m4_t __riscv_vfwadd_wf_f64m4_m (vbool16_t mask, vfloat64m4_t op1, float32_t op2, size_t vl);
vfloat64m8_t __riscv_vfwadd_wf_f64m8_m (vbool8_t mask, vfloat64m8_t op1, float32_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwadd_wf_f32mf2_rm (vfloat32mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwadd_wf_f32m1_rm (vfloat32m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwadd_wf_f32m2_rm (vfloat32m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwadd_wf_f32m4_rm (vfloat32m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwadd_wf_f32m8_rm (vfloat32m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwadd_wf_f64m1_rm (vfloat64m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwadd_wf_f64m2_rm (vfloat64m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwadd_wf_f64m4_rm (vfloat64m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwadd_wf_f64m8_rm (vfloat64m8_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwadd_wf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwadd_wf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwadd_wf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwadd_wf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwadd_wf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwadd_wf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwadd_wf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwadd_wf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwadd_wf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, float32_t op2, unsigned int frm, size_t vl);
23.14. vfwsub.wv
- Mnemonic
vfwsub.wv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwsub_wf.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwsub_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl);
vfloat32m1_t __riscv_vfwsub_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl);
vfloat32m2_t __riscv_vfwsub_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl);
vfloat32m4_t __riscv_vfwsub_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl);
vfloat32m8_t __riscv_vfwsub_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl);
vfloat64m1_t __riscv_vfwsub_wv_f64m1 (vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl);
vfloat64m2_t __riscv_vfwsub_wv_f64m2 (vfloat64m2_t op1, vfloat32m1_t op2, size_t vl);
vfloat64m4_t __riscv_vfwsub_wv_f64m4 (vfloat64m4_t op1, vfloat32m2_t op2, size_t vl);
vfloat64m8_t __riscv_vfwsub_wv_f64m8 (vfloat64m8_t op1, vfloat32m4_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwsub_wv_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl);
vfloat32m1_t __riscv_vfwsub_wv_f32m1_m (vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl);
vfloat32m2_t __riscv_vfwsub_wv_f32m2_m (vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl);
vfloat32m4_t __riscv_vfwsub_wv_f32m4_m (vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl);
vfloat32m8_t __riscv_vfwsub_wv_f32m8_m (vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl);
vfloat64m1_t __riscv_vfwsub_wv_f64m1_m (vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl);
vfloat64m2_t __riscv_vfwsub_wv_f64m2_m (vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl);
vfloat64m4_t __riscv_vfwsub_wv_f64m4_m (vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl);
vfloat64m8_t __riscv_vfwsub_wv_f64m8_m (vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwsub_wv_f32mf2_rm (vfloat32mf2_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwsub_wv_f32m1_rm (vfloat32m1_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwsub_wv_f32m2_rm (vfloat32m2_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwsub_wv_f32m4_rm (vfloat32m4_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwsub_wv_f32m8_rm (vfloat32m8_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwsub_wv_f64m1_rm (vfloat64m1_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwsub_wv_f64m2_rm (vfloat64m2_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwsub_wv_f64m4_rm (vfloat64m4_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwsub_wv_f64m8_rm (vfloat64m8_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwsub_wv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwsub_wv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwsub_wv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwsub_wv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwsub_wv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwsub_wv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwsub_wv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwsub_wv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwsub_wv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
23.15. vfwsub.wf
- Mnemonic
vfwsub.wf vd, vs2, rs1, vm
- Encoding
- Description
-
Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwsub_wf.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwsub_wf_f32mf2 (vfloat32mf2_t op1, float16_t op2, size_t vl);
vfloat32m1_t __riscv_vfwsub_wf_f32m1 (vfloat32m1_t op1, float16_t op2, size_t vl);
vfloat32m2_t __riscv_vfwsub_wf_f32m2 (vfloat32m2_t op1, float16_t op2, size_t vl);
vfloat32m4_t __riscv_vfwsub_wf_f32m4 (vfloat32m4_t op1, float16_t op2, size_t vl);
vfloat32m8_t __riscv_vfwsub_wf_f32m8 (vfloat32m8_t op1, float16_t op2, size_t vl);
vfloat64m1_t __riscv_vfwsub_wf_f64m1 (vfloat64m1_t op1, float32_t op2, size_t vl);
vfloat64m2_t __riscv_vfwsub_wf_f64m2 (vfloat64m2_t op1, float32_t op2, size_t vl);
vfloat64m4_t __riscv_vfwsub_wf_f64m4 (vfloat64m4_t op1, float32_t op2, size_t vl);
vfloat64m8_t __riscv_vfwsub_wf_f64m8 (vfloat64m8_t op1, float32_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwsub_wf_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, float16_t op2, size_t vl);
vfloat32m1_t __riscv_vfwsub_wf_f32m1_m (vbool32_t mask, vfloat32m1_t op1, float16_t op2, size_t vl);
vfloat32m2_t __riscv_vfwsub_wf_f32m2_m (vbool16_t mask, vfloat32m2_t op1, float16_t op2, size_t vl);
vfloat32m4_t __riscv_vfwsub_wf_f32m4_m (vbool8_t mask, vfloat32m4_t op1, float16_t op2, size_t vl);
vfloat32m8_t __riscv_vfwsub_wf_f32m8_m (vbool4_t mask, vfloat32m8_t op1, float16_t op2, size_t vl);
vfloat64m1_t __riscv_vfwsub_wf_f64m1_m (vbool64_t mask, vfloat64m1_t op1, float32_t op2, size_t vl);
vfloat64m2_t __riscv_vfwsub_wf_f64m2_m (vbool32_t mask, vfloat64m2_t op1, float32_t op2, size_t vl);
vfloat64m4_t __riscv_vfwsub_wf_f64m4_m (vbool16_t mask, vfloat64m4_t op1, float32_t op2, size_t vl);
vfloat64m8_t __riscv_vfwsub_wf_f64m8_m (vbool8_t mask, vfloat64m8_t op1, float32_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwsub_wf_f32mf2_rm (vfloat32mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwsub_wf_f32m1_rm (vfloat32m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwsub_wf_f32m2_rm (vfloat32m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwsub_wf_f32m4_rm (vfloat32m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwsub_wf_f32m8_rm (vfloat32m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwsub_wf_f64m1_rm (vfloat64m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwsub_wf_f64m2_rm (vfloat64m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwsub_wf_f64m4_rm (vfloat64m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwsub_wf_f64m8_rm (vfloat64m8_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwsub_wf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwsub_wf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwsub_wf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwsub_wf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwsub_wf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwsub_wf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwsub_wf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwsub_wf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwsub_wf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, float32_t op2, unsigned int frm, size_t vl);
23.16. Vector Single-Width Floating-Point Multiply/Divide Instructions
23.17. vfmul.vv
- Mnemonic
vfmul.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Floating-point multiply, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmul_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmul_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfmul_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfmul_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfmul_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfmul_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfmul_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfmul_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfmul_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfmul_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfmul_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfmul_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfmul_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfmul_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfmul_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfmul_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vfloat16mf4_t __riscv_vfmul_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfmul_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfmul_vv_f16m1_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfmul_vv_f16m2_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfmul_vv_f16m4_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfmul_vv_f16m8_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfmul_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfmul_vv_f32m1_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfmul_vv_f32m2_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfmul_vv_f32m4_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfmul_vv_f32m8_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfmul_vv_f64m1_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfmul_vv_f64m2_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfmul_vv_f64m4_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfmul_vv_f64m8_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vfloat16mf4_t __riscv_vfmul_vv_f16mf4_rm (vfloat16mf4_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmul_vv_f16mf2_rm (vfloat16mf2_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmul_vv_f16m1_rm (vfloat16m1_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmul_vv_f16m2_rm (vfloat16m2_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmul_vv_f16m4_rm (vfloat16m4_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmul_vv_f16m8_rm (vfloat16m8_t op1, vfloat16m8_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmul_vv_f32mf2_rm (vfloat32mf2_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmul_vv_f32m1_rm (vfloat32m1_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmul_vv_f32m2_rm (vfloat32m2_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmul_vv_f32m4_rm (vfloat32m4_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmul_vv_f32m8_rm (vfloat32m8_t op1, vfloat32m8_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmul_vv_f64m1_rm (vfloat64m1_t op1, vfloat64m1_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmul_vv_f64m2_rm (vfloat64m2_t op1, vfloat64m2_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmul_vv_f64m4_rm (vfloat64m4_t op1, vfloat64m4_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmul_vv_f64m8_rm (vfloat64m8_t op1, vfloat64m8_t op2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfmul_vv_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmul_vv_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmul_vv_f16m1_rm_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmul_vv_f16m2_rm_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmul_vv_f16m4_rm_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmul_vv_f16m8_rm_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmul_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmul_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmul_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmul_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmul_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmul_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmul_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmul_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmul_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, unsigned int frm, size_t vl);
23.18. vfmul.vf
- Mnemonic
vfmul.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Floating-point multiply, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmul_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmul_vf_f16mf4 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfmul_vf_f16mf2 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfmul_vf_f16m1 (vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfmul_vf_f16m2 (vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfmul_vf_f16m4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfmul_vf_f16m8 (vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfmul_vf_f32mf2 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfmul_vf_f32m1 (vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfmul_vf_f32m2 (vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfmul_vf_f32m4 (vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfmul_vf_f32m8 (vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfmul_vf_f64m1 (vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfmul_vf_f64m2 (vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfmul_vf_f64m4 (vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfmul_vf_f64m8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfmul_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfmul_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfmul_vf_f16m1_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfmul_vf_f16m2_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfmul_vf_f16m4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfmul_vf_f16m8_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfmul_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfmul_vf_f32m1_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfmul_vf_f32m2_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfmul_vf_f32m4_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfmul_vf_f32m8_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfmul_vf_f64m1_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfmul_vf_f64m2_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfmul_vf_f64m4_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfmul_vf_f64m8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfmul_vf_f16mf4_rm (vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmul_vf_f16mf2_rm (vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmul_vf_f16m1_rm (vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmul_vf_f16m2_rm (vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmul_vf_f16m4_rm (vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmul_vf_f16m8_rm (vfloat16m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmul_vf_f32mf2_rm (vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmul_vf_f32m1_rm (vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmul_vf_f32m2_rm (vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmul_vf_f32m4_rm (vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmul_vf_f32m8_rm (vfloat32m8_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmul_vf_f64m1_rm (vfloat64m1_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmul_vf_f64m2_rm (vfloat64m2_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmul_vf_f64m4_rm (vfloat64m4_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmul_vf_f64m8_rm (vfloat64m8_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfmul_vf_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmul_vf_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmul_vf_f16m1_rm_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmul_vf_f16m2_rm_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmul_vf_f16m4_rm_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmul_vf_f16m8_rm_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmul_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmul_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmul_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmul_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmul_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmul_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmul_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmul_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmul_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, unsigned int frm, size_t vl);
23.19. vfdiv.vv
- Mnemonic
vfdiv.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Floating-point divide, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfdiv_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfdiv_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfdiv_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfdiv_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfdiv_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfdiv_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfdiv_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfdiv_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfdiv_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfdiv_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfdiv_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfdiv_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfdiv_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfdiv_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfdiv_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfdiv_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vfloat16mf4_t __riscv_vfdiv_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfdiv_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfdiv_vv_f16m1_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfdiv_vv_f16m2_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfdiv_vv_f16m4_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfdiv_vv_f16m8_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfdiv_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfdiv_vv_f32m1_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfdiv_vv_f32m2_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfdiv_vv_f32m4_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfdiv_vv_f32m8_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfdiv_vv_f64m1_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfdiv_vv_f64m2_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfdiv_vv_f64m4_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfdiv_vv_f64m8_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vfloat16mf4_t __riscv_vfdiv_vv_f16mf4_rm (vfloat16mf4_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfdiv_vv_f16mf2_rm (vfloat16mf2_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfdiv_vv_f16m1_rm (vfloat16m1_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfdiv_vv_f16m2_rm (vfloat16m2_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfdiv_vv_f16m4_rm (vfloat16m4_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfdiv_vv_f16m8_rm (vfloat16m8_t op1, vfloat16m8_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfdiv_vv_f32mf2_rm (vfloat32mf2_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfdiv_vv_f32m1_rm (vfloat32m1_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfdiv_vv_f32m2_rm (vfloat32m2_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfdiv_vv_f32m4_rm (vfloat32m4_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfdiv_vv_f32m8_rm (vfloat32m8_t op1, vfloat32m8_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfdiv_vv_f64m1_rm (vfloat64m1_t op1, vfloat64m1_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfdiv_vv_f64m2_rm (vfloat64m2_t op1, vfloat64m2_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfdiv_vv_f64m4_rm (vfloat64m4_t op1, vfloat64m4_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfdiv_vv_f64m8_rm (vfloat64m8_t op1, vfloat64m8_t op2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfdiv_vv_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfdiv_vv_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfdiv_vv_f16m1_rm_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfdiv_vv_f16m2_rm_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfdiv_vv_f16m4_rm_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfdiv_vv_f16m8_rm_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfdiv_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfdiv_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfdiv_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfdiv_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfdiv_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfdiv_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfdiv_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfdiv_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfdiv_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, unsigned int frm, size_t vl);
23.20. vfdiv.vf
- Mnemonic
vfdiv.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Floating-point divide, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfdiv_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfdiv_vf_f16mf4 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfdiv_vf_f16mf2 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfdiv_vf_f16m1 (vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfdiv_vf_f16m2 (vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfdiv_vf_f16m4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfdiv_vf_f16m8 (vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfdiv_vf_f32mf2 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfdiv_vf_f32m1 (vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfdiv_vf_f32m2 (vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfdiv_vf_f32m4 (vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfdiv_vf_f32m8 (vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfdiv_vf_f64m1 (vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfdiv_vf_f64m2 (vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfdiv_vf_f64m4 (vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfdiv_vf_f64m8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfdiv_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfdiv_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfdiv_vf_f16m1_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfdiv_vf_f16m2_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfdiv_vf_f16m4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfdiv_vf_f16m8_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfdiv_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfdiv_vf_f32m1_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfdiv_vf_f32m2_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfdiv_vf_f32m4_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfdiv_vf_f32m8_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfdiv_vf_f64m1_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfdiv_vf_f64m2_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfdiv_vf_f64m4_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfdiv_vf_f64m8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfdiv_vf_f16mf4_rm (vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfdiv_vf_f16mf2_rm (vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfdiv_vf_f16m1_rm (vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfdiv_vf_f16m2_rm (vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfdiv_vf_f16m4_rm (vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfdiv_vf_f16m8_rm (vfloat16m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfdiv_vf_f32mf2_rm (vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfdiv_vf_f32m1_rm (vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfdiv_vf_f32m2_rm (vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfdiv_vf_f32m4_rm (vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfdiv_vf_f32m8_rm (vfloat32m8_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfdiv_vf_f64m1_rm (vfloat64m1_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfdiv_vf_f64m2_rm (vfloat64m2_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfdiv_vf_f64m4_rm (vfloat64m4_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfdiv_vf_f64m8_rm (vfloat64m8_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfdiv_vf_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfdiv_vf_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfdiv_vf_f16m1_rm_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfdiv_vf_f16m2_rm_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfdiv_vf_f16m4_rm_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfdiv_vf_f16m8_rm_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfdiv_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfdiv_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfdiv_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfdiv_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfdiv_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfdiv_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfdiv_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfdiv_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfdiv_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, unsigned int frm, size_t vl);
23.21. vfrdiv.vf
- Mnemonic
vfrdiv.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Reverse floating-point divide vector = scalar / vector, scalar-vector, vd[i] = f[rs1]/vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfrdiv_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfrdiv_vf_f16mf4 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfrdiv_vf_f16mf2 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfrdiv_vf_f16m1 (vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfrdiv_vf_f16m2 (vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfrdiv_vf_f16m4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfrdiv_vf_f16m8 (vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfrdiv_vf_f32mf2 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfrdiv_vf_f32m1 (vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfrdiv_vf_f32m2 (vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfrdiv_vf_f32m4 (vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfrdiv_vf_f32m8 (vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfrdiv_vf_f64m1 (vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfrdiv_vf_f64m2 (vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfrdiv_vf_f64m4 (vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfrdiv_vf_f64m8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfrdiv_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfrdiv_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfrdiv_vf_f16m1_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfrdiv_vf_f16m2_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfrdiv_vf_f16m4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfrdiv_vf_f16m8_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfrdiv_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfrdiv_vf_f32m1_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfrdiv_vf_f32m2_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfrdiv_vf_f32m4_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfrdiv_vf_f32m8_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfrdiv_vf_f64m1_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfrdiv_vf_f64m2_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfrdiv_vf_f64m4_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfrdiv_vf_f64m8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfrdiv_vf_f16mf4_rm (vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfrdiv_vf_f16mf2_rm (vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfrdiv_vf_f16m1_rm (vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfrdiv_vf_f16m2_rm (vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfrdiv_vf_f16m4_rm (vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfrdiv_vf_f16m8_rm (vfloat16m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfrdiv_vf_f32mf2_rm (vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfrdiv_vf_f32m1_rm (vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfrdiv_vf_f32m2_rm (vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfrdiv_vf_f32m4_rm (vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfrdiv_vf_f32m8_rm (vfloat32m8_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfrdiv_vf_f64m1_rm (vfloat64m1_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfrdiv_vf_f64m2_rm (vfloat64m2_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfrdiv_vf_f64m4_rm (vfloat64m4_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfrdiv_vf_f64m8_rm (vfloat64m8_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfrdiv_vf_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfrdiv_vf_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfrdiv_vf_f16m1_rm_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfrdiv_vf_f16m2_rm_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfrdiv_vf_f16m4_rm_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfrdiv_vf_f16m8_rm_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfrdiv_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfrdiv_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfrdiv_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfrdiv_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfrdiv_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfrdiv_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfrdiv_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfrdiv_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfrdiv_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, unsigned int frm, size_t vl);
23.22. Vector Widening Floating-Point Multiply
23.23. vfwmul.vv
- Mnemonic
vfwmul.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Widening floating-point multiply, vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwmul_vv.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwmul_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat32m1_t __riscv_vfwmul_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat32m2_t __riscv_vfwmul_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat32m4_t __riscv_vfwmul_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat32m8_t __riscv_vfwmul_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat64m1_t __riscv_vfwmul_vv_f64m1 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat64m2_t __riscv_vfwmul_vv_f64m2 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat64m4_t __riscv_vfwmul_vv_f64m4 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat64m8_t __riscv_vfwmul_vv_f64m8 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwmul_vv_f32mf2_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat32m1_t __riscv_vfwmul_vv_f32m1_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat32m2_t __riscv_vfwmul_vv_f32m2_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat32m4_t __riscv_vfwmul_vv_f32m4_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat32m8_t __riscv_vfwmul_vv_f32m8_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat64m1_t __riscv_vfwmul_vv_f64m1_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat64m2_t __riscv_vfwmul_vv_f64m2_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat64m4_t __riscv_vfwmul_vv_f64m4_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat64m8_t __riscv_vfwmul_vv_f64m8_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwmul_vv_f32mf2_rm (vfloat16mf4_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwmul_vv_f32m1_rm (vfloat16mf2_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwmul_vv_f32m2_rm (vfloat16m1_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwmul_vv_f32m4_rm (vfloat16m2_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwmul_vv_f32m8_rm (vfloat16m4_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwmul_vv_f64m1_rm (vfloat32mf2_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwmul_vv_f64m2_rm (vfloat32m1_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwmul_vv_f64m4_rm (vfloat32m2_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwmul_vv_f64m8_rm (vfloat32m4_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwmul_vv_f32mf2_rm_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwmul_vv_f32m1_rm_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwmul_vv_f32m2_rm_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwmul_vv_f32m4_rm_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwmul_vv_f32m8_rm_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwmul_vv_f64m1_rm_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwmul_vv_f64m2_rm_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwmul_vv_f64m4_rm_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwmul_vv_f64m8_rm_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, unsigned int frm, size_t vl);
23.24. vfwmul.vf
- Mnemonic
vfwmul.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Widening floating-point multiply, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwmul_vf.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwmul_vf_f32mf2 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat32m1_t __riscv_vfwmul_vf_f32m1 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat32m2_t __riscv_vfwmul_vf_f32m2 (vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat32m4_t __riscv_vfwmul_vf_f32m4 (vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat32m8_t __riscv_vfwmul_vf_f32m8 (vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat64m1_t __riscv_vfwmul_vf_f64m1 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat64m2_t __riscv_vfwmul_vf_f64m2 (vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat64m4_t __riscv_vfwmul_vf_f64m4 (vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat64m8_t __riscv_vfwmul_vf_f64m8 (vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwmul_vf_f32mf2_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat32m1_t __riscv_vfwmul_vf_f32m1_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat32m2_t __riscv_vfwmul_vf_f32m2_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat32m4_t __riscv_vfwmul_vf_f32m4_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat32m8_t __riscv_vfwmul_vf_f32m8_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat64m1_t __riscv_vfwmul_vf_f64m1_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat64m2_t __riscv_vfwmul_vf_f64m2_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat64m4_t __riscv_vfwmul_vf_f64m4_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat64m8_t __riscv_vfwmul_vf_f64m8_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32mf2_t __riscv_vfwmul_vf_f32mf2_rm (vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwmul_vf_f32m1_rm (vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwmul_vf_f32m2_rm (vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwmul_vf_f32m4_rm (vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwmul_vf_f32m8_rm (vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwmul_vf_f64m1_rm (vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwmul_vf_f64m2_rm (vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwmul_vf_f64m4_rm (vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwmul_vf_f64m8_rm (vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwmul_vf_f32mf2_rm_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwmul_vf_f32m1_rm_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwmul_vf_f32m2_rm_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwmul_vf_f32m4_rm_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwmul_vf_f32m8_rm_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwmul_vf_f64m1_rm_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwmul_vf_f64m2_rm_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwmul_vf_f64m4_rm_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwmul_vf_f64m8_rm_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, unsigned int frm, size_t vl);
23.25. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
23.26. vfmacc.vv
- Mnemonic
vfmacc.vv vd, vs1, vs2, vm
- Encoding
- Description
-
FP multiply-accumulate, overwrites addend, vd[i] = +(vs1[i] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmacc_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmacc_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmacc_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmacc_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmacc_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmacc_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmacc_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmacc_vv_f32mf2 (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmacc_vv_f32m1 (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmacc_vv_f32m2 (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmacc_vv_f32m4 (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmacc_vv_f32m8 (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmacc_vv_f64m1 (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmacc_vv_f64m2 (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmacc_vv_f64m4 (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmacc_vv_f64m8 (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmacc_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmacc_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmacc_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmacc_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmacc_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmacc_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmacc_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmacc_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmacc_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmacc_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmacc_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmacc_vv_f64m1_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmacc_vv_f64m2_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmacc_vv_f64m4_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmacc_vv_f64m8_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmacc_vv_f16mf4_rm (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmacc_vv_f16mf2_rm (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmacc_vv_f16m1_rm (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmacc_vv_f16m2_rm (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmacc_vv_f16m4_rm (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmacc_vv_f16m8_rm (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmacc_vv_f32mf2_rm (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmacc_vv_f32m1_rm (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmacc_vv_f32m2_rm (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmacc_vv_f32m4_rm (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmacc_vv_f32m8_rm (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmacc_vv_f64m1_rm (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmacc_vv_f64m2_rm (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmacc_vv_f64m4_rm (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmacc_vv_f64m8_rm (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfmacc_vv_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmacc_vv_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmacc_vv_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmacc_vv_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmacc_vv_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmacc_vv_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmacc_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmacc_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmacc_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmacc_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmacc_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmacc_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmacc_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmacc_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmacc_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.27. vfmacc.vf
- Mnemonic
vfmacc.vf vd, rs1, vs2, vm
- Encoding
- Description
-
FP multiply-accumulate, overwrites addend, vd[i] = +(f[rs1] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmacc_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmacc_vf_f16mf4 (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmacc_vf_f16mf2 (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmacc_vf_f16m1 (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmacc_vf_f16m2 (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmacc_vf_f16m4 (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmacc_vf_f16m8 (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmacc_vf_f32mf2 (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmacc_vf_f32m1 (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmacc_vf_f32m2 (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmacc_vf_f32m4 (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmacc_vf_f32m8 (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmacc_vf_f64m1 (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmacc_vf_f64m2 (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmacc_vf_f64m4 (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmacc_vf_f64m8 (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmacc_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmacc_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmacc_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmacc_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmacc_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmacc_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmacc_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmacc_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmacc_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmacc_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmacc_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmacc_vf_f64m1_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmacc_vf_f64m2_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmacc_vf_f64m4_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmacc_vf_f64m8_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmacc_vf_f16mf4_rm (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmacc_vf_f16mf2_rm (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmacc_vf_f16m1_rm (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmacc_vf_f16m2_rm (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmacc_vf_f16m4_rm (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmacc_vf_f16m8_rm (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmacc_vf_f32mf2_rm (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmacc_vf_f32m1_rm (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmacc_vf_f32m2_rm (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmacc_vf_f32m4_rm (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmacc_vf_f32m8_rm (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmacc_vf_f64m1_rm (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmacc_vf_f64m2_rm (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmacc_vf_f64m4_rm (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmacc_vf_f64m8_rm (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfmacc_vf_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmacc_vf_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmacc_vf_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmacc_vf_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmacc_vf_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmacc_vf_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmacc_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmacc_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmacc_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmacc_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmacc_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmacc_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmacc_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmacc_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmacc_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.28. vfnmacc.vv
- Mnemonic
vfnmacc.vv vd, vs1, vs2, vm
- Encoding
- Description
-
FP negate-(multiply-accumulate), overwrites subtrahend, vd[i] = -(vs1[i] * vs2[i]) - vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfnmacc_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfnmacc_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmacc_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmacc_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmacc_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmacc_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmacc_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmacc_vv_f32mf2 (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmacc_vv_f32m1 (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmacc_vv_f32m2 (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmacc_vv_f32m4 (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmacc_vv_f32m8 (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmacc_vv_f64m1 (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmacc_vv_f64m2 (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmacc_vv_f64m4 (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmacc_vv_f64m8 (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmacc_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmacc_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmacc_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmacc_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmacc_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmacc_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmacc_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmacc_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmacc_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmacc_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmacc_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmacc_vv_f64m1_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmacc_vv_f64m2_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmacc_vv_f64m4_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmacc_vv_f64m8_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmacc_vv_f16mf4_rm (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmacc_vv_f16mf2_rm (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmacc_vv_f16m1_rm (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmacc_vv_f16m2_rm (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmacc_vv_f16m4_rm (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmacc_vv_f16m8_rm (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmacc_vv_f32mf2_rm (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmacc_vv_f32m1_rm (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmacc_vv_f32m2_rm (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmacc_vv_f32m4_rm (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmacc_vv_f32m8_rm (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmacc_vv_f64m1_rm (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmacc_vv_f64m2_rm (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmacc_vv_f64m4_rm (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmacc_vv_f64m8_rm (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfnmacc_vv_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmacc_vv_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmacc_vv_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmacc_vv_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmacc_vv_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmacc_vv_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmacc_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmacc_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmacc_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmacc_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmacc_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmacc_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmacc_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmacc_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmacc_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.29. vfnmacc.vf
- Mnemonic
vfnmacc.vf vd, rs1, vs2, vm
- Encoding
- Description
-
FP negate-(multiply-accumulate), overwrites subtrahend, vd[i] = -(f[rs1] * vs2[i]) - vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfnmacc_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfnmacc_vf_f16mf4 (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmacc_vf_f16mf2 (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmacc_vf_f16m1 (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmacc_vf_f16m2 (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmacc_vf_f16m4 (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmacc_vf_f16m8 (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmacc_vf_f32mf2 (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmacc_vf_f32m1 (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmacc_vf_f32m2 (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmacc_vf_f32m4 (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmacc_vf_f32m8 (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmacc_vf_f64m1 (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmacc_vf_f64m2 (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmacc_vf_f64m4 (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmacc_vf_f64m8 (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmacc_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmacc_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmacc_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmacc_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmacc_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmacc_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmacc_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmacc_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmacc_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmacc_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmacc_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmacc_vf_f64m1_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmacc_vf_f64m2_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmacc_vf_f64m4_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmacc_vf_f64m8_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmacc_vf_f16mf4_rm (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmacc_vf_f16mf2_rm (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmacc_vf_f16m1_rm (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmacc_vf_f16m2_rm (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmacc_vf_f16m4_rm (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmacc_vf_f16m8_rm (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmacc_vf_f32mf2_rm (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmacc_vf_f32m1_rm (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmacc_vf_f32m2_rm (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmacc_vf_f32m4_rm (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmacc_vf_f32m8_rm (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmacc_vf_f64m1_rm (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmacc_vf_f64m2_rm (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmacc_vf_f64m4_rm (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmacc_vf_f64m8_rm (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfnmacc_vf_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmacc_vf_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmacc_vf_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmacc_vf_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmacc_vf_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmacc_vf_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmacc_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmacc_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmacc_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmacc_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmacc_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmacc_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmacc_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmacc_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmacc_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.30. vfmsac.vv
- Mnemonic
vfmsac.vv vd, vs1, vs2, vm
- Encoding
- Description
-
FP multiply-subtract-accumulator, overwrites subtrahend, vd[i] = +(vs1[i] * vs2[i]) - vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmsac_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmsac_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmsac_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmsac_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmsac_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmsac_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmsac_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmsac_vv_f32mf2 (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmsac_vv_f32m1 (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmsac_vv_f32m2 (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmsac_vv_f32m4 (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmsac_vv_f32m8 (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmsac_vv_f64m1 (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmsac_vv_f64m2 (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmsac_vv_f64m4 (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmsac_vv_f64m8 (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmsac_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmsac_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmsac_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmsac_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmsac_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmsac_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmsac_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmsac_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmsac_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmsac_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmsac_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmsac_vv_f64m1_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmsac_vv_f64m2_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmsac_vv_f64m4_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmsac_vv_f64m8_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmsac_vv_f16mf4_rm (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmsac_vv_f16mf2_rm (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmsac_vv_f16m1_rm (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmsac_vv_f16m2_rm (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmsac_vv_f16m4_rm (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmsac_vv_f16m8_rm (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmsac_vv_f32mf2_rm (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmsac_vv_f32m1_rm (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmsac_vv_f32m2_rm (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmsac_vv_f32m4_rm (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmsac_vv_f32m8_rm (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmsac_vv_f64m1_rm (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmsac_vv_f64m2_rm (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmsac_vv_f64m4_rm (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmsac_vv_f64m8_rm (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfmsac_vv_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmsac_vv_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmsac_vv_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmsac_vv_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmsac_vv_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmsac_vv_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmsac_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmsac_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmsac_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmsac_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmsac_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmsac_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmsac_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmsac_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmsac_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.31. vfmsac.vf
- Mnemonic
vfmsac.vf vd, rs1, vs2, vm
- Encoding
- Description
-
FP multiply-subtract-accumulator, overwrites subtrahend, vd[i] = +(f[rs1] * vs2[i]) - vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmsac_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmsac_vf_f16mf4 (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmsac_vf_f16mf2 (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmsac_vf_f16m1 (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmsac_vf_f16m2 (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmsac_vf_f16m4 (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmsac_vf_f16m8 (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmsac_vf_f32mf2 (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmsac_vf_f32m1 (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmsac_vf_f32m2 (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmsac_vf_f32m4 (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmsac_vf_f32m8 (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmsac_vf_f64m1 (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmsac_vf_f64m2 (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmsac_vf_f64m4 (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmsac_vf_f64m8 (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmsac_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmsac_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmsac_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmsac_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmsac_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmsac_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmsac_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmsac_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmsac_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmsac_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmsac_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmsac_vf_f64m1_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmsac_vf_f64m2_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmsac_vf_f64m4_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmsac_vf_f64m8_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmsac_vf_f16mf4_rm (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmsac_vf_f16mf2_rm (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmsac_vf_f16m1_rm (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmsac_vf_f16m2_rm (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmsac_vf_f16m4_rm (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmsac_vf_f16m8_rm (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmsac_vf_f32mf2_rm (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmsac_vf_f32m1_rm (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmsac_vf_f32m2_rm (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmsac_vf_f32m4_rm (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmsac_vf_f32m8_rm (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmsac_vf_f64m1_rm (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmsac_vf_f64m2_rm (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmsac_vf_f64m4_rm (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmsac_vf_f64m8_rm (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfmsac_vf_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmsac_vf_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmsac_vf_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmsac_vf_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmsac_vf_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmsac_vf_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmsac_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmsac_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmsac_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmsac_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmsac_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmsac_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmsac_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmsac_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmsac_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.32. vfnmsac.vv
- Mnemonic
vfnmsac.vv vd, vs1, vs2, vm
- Encoding
- Description
-
FP negate-(multiply-subtract-accumulator), overwrites minuend, vd[i] = -(vs1[i] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfnmsac_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfnmsac_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmsac_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmsac_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmsac_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmsac_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmsac_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmsac_vv_f32mf2 (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmsac_vv_f32m1 (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmsac_vv_f32m2 (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmsac_vv_f32m4 (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmsac_vv_f32m8 (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmsac_vv_f64m1 (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmsac_vv_f64m2 (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmsac_vv_f64m4 (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmsac_vv_f64m8 (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmsac_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmsac_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmsac_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmsac_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmsac_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmsac_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmsac_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmsac_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmsac_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmsac_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmsac_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmsac_vv_f64m1_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmsac_vv_f64m2_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmsac_vv_f64m4_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmsac_vv_f64m8_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmsac_vv_f16mf4_rm (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmsac_vv_f16mf2_rm (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmsac_vv_f16m1_rm (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmsac_vv_f16m2_rm (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmsac_vv_f16m4_rm (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmsac_vv_f16m8_rm (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmsac_vv_f32mf2_rm (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmsac_vv_f32m1_rm (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmsac_vv_f32m2_rm (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmsac_vv_f32m4_rm (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmsac_vv_f32m8_rm (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmsac_vv_f64m1_rm (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmsac_vv_f64m2_rm (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmsac_vv_f64m4_rm (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmsac_vv_f64m8_rm (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfnmsac_vv_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmsac_vv_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmsac_vv_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmsac_vv_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmsac_vv_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmsac_vv_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmsac_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmsac_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmsac_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmsac_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmsac_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmsac_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmsac_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmsac_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmsac_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.33. vfnmsac.vf
- Mnemonic
vfnmsac.vf vd, rs1, vs2, vm
- Encoding
- Description
-
FP negate-(multiply-subtract-accumulator), overwrites minuend, vd[i] = -(f[rs1] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfnmsac_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfnmsac_vf_f16mf4 (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmsac_vf_f16mf2 (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmsac_vf_f16m1 (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmsac_vf_f16m2 (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmsac_vf_f16m4 (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmsac_vf_f16m8 (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmsac_vf_f32mf2 (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmsac_vf_f32m1 (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmsac_vf_f32m2 (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmsac_vf_f32m4 (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmsac_vf_f32m8 (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmsac_vf_f64m1 (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmsac_vf_f64m2 (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmsac_vf_f64m4 (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmsac_vf_f64m8 (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmsac_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmsac_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmsac_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmsac_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmsac_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmsac_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmsac_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmsac_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmsac_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmsac_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmsac_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmsac_vf_f64m1_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmsac_vf_f64m2_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmsac_vf_f64m4_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmsac_vf_f64m8_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmsac_vf_f16mf4_rm (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmsac_vf_f16mf2_rm (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmsac_vf_f16m1_rm (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmsac_vf_f16m2_rm (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmsac_vf_f16m4_rm (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmsac_vf_f16m8_rm (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmsac_vf_f32mf2_rm (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmsac_vf_f32m1_rm (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmsac_vf_f32m2_rm (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmsac_vf_f32m4_rm (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmsac_vf_f32m8_rm (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmsac_vf_f64m1_rm (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmsac_vf_f64m2_rm (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmsac_vf_f64m4_rm (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmsac_vf_f64m8_rm (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfnmsac_vf_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmsac_vf_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmsac_vf_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmsac_vf_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmsac_vf_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmsac_vf_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmsac_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmsac_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmsac_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmsac_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmsac_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmsac_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmsac_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmsac_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmsac_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.34. vfmadd.vv
- Mnemonic
vfmadd.vv vd, vs1, vs2, vm
- Encoding
- Description
-
FP multiply-add, overwrites multiplicand, vd[i] = +(vs1[i] * vd[i]) + vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmadd_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmadd_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmadd_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmadd_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmadd_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmadd_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmadd_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmadd_vv_f32mf2 (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmadd_vv_f32m1 (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmadd_vv_f32m2 (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmadd_vv_f32m4 (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmadd_vv_f32m8 (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmadd_vv_f64m1 (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmadd_vv_f64m2 (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmadd_vv_f64m4 (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmadd_vv_f64m8 (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmadd_vv_f64m1_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmadd_vv_f64m2_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmadd_vv_f64m4_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmadd_vv_f64m8_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmadd_vv_f16mf4_rm (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmadd_vv_f16mf2_rm (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmadd_vv_f16m1_rm (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmadd_vv_f16m2_rm (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmadd_vv_f16m4_rm (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmadd_vv_f16m8_rm (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmadd_vv_f32mf2_rm (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmadd_vv_f32m1_rm (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmadd_vv_f32m2_rm (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmadd_vv_f32m4_rm (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmadd_vv_f32m8_rm (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmadd_vv_f64m1_rm (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmadd_vv_f64m2_rm (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmadd_vv_f64m4_rm (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmadd_vv_f64m8_rm (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfmadd_vv_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmadd_vv_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmadd_vv_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmadd_vv_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmadd_vv_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmadd_vv_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmadd_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmadd_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmadd_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmadd_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmadd_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmadd_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmadd_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmadd_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmadd_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.35. vfmadd.vf
- Mnemonic
vfmadd.vf vd, rs1, vs2, vm
- Encoding
- Description
-
FP multiply-add, overwrites multiplicand, vd[i] = +(f[rs1] * vd[i]) + vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmadd_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmadd_vf_f16mf4 (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmadd_vf_f16mf2 (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmadd_vf_f16m1 (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmadd_vf_f16m2 (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmadd_vf_f16m4 (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmadd_vf_f16m8 (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmadd_vf_f32mf2 (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmadd_vf_f32m1 (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmadd_vf_f32m2 (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmadd_vf_f32m4 (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmadd_vf_f32m8 (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmadd_vf_f64m1 (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmadd_vf_f64m2 (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmadd_vf_f64m4 (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmadd_vf_f64m8 (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmadd_vf_f64m1_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmadd_vf_f64m2_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmadd_vf_f64m4_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmadd_vf_f64m8_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmadd_vf_f16mf4_rm (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmadd_vf_f16mf2_rm (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmadd_vf_f16m1_rm (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmadd_vf_f16m2_rm (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmadd_vf_f16m4_rm (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmadd_vf_f16m8_rm (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmadd_vf_f32mf2_rm (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmadd_vf_f32m1_rm (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmadd_vf_f32m2_rm (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmadd_vf_f32m4_rm (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmadd_vf_f32m8_rm (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmadd_vf_f64m1_rm (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmadd_vf_f64m2_rm (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmadd_vf_f64m4_rm (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmadd_vf_f64m8_rm (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfmadd_vf_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmadd_vf_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmadd_vf_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmadd_vf_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmadd_vf_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmadd_vf_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmadd_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmadd_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmadd_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmadd_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmadd_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmadd_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmadd_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmadd_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmadd_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.36. vfnmadd.vv
- Mnemonic
vfnmadd.vv vd, vs1, vs2, vm
- Encoding
- Description
-
FP negate-(multiply-add), overwrites multiplicand, vd[i] = -(vs1[i] * vd[i]) - vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfnmadd_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfnmadd_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmadd_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmadd_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmadd_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmadd_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmadd_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmadd_vv_f32mf2 (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmadd_vv_f32m1 (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmadd_vv_f32m2 (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmadd_vv_f32m4 (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmadd_vv_f32m8 (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmadd_vv_f64m1 (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmadd_vv_f64m2 (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmadd_vv_f64m4 (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmadd_vv_f64m8 (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmadd_vv_f64m1_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmadd_vv_f64m2_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmadd_vv_f64m4_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmadd_vv_f64m8_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmadd_vv_f16mf4_rm (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmadd_vv_f16mf2_rm (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmadd_vv_f16m1_rm (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmadd_vv_f16m2_rm (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmadd_vv_f16m4_rm (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmadd_vv_f16m8_rm (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmadd_vv_f32mf2_rm (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmadd_vv_f32m1_rm (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmadd_vv_f32m2_rm (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmadd_vv_f32m4_rm (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmadd_vv_f32m8_rm (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmadd_vv_f64m1_rm (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmadd_vv_f64m2_rm (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmadd_vv_f64m4_rm (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmadd_vv_f64m8_rm (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfnmadd_vv_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmadd_vv_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmadd_vv_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmadd_vv_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmadd_vv_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmadd_vv_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmadd_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmadd_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmadd_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmadd_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmadd_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmadd_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmadd_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmadd_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmadd_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.37. vfnmadd.vf
- Mnemonic
vfnmadd.vf vd, rs1, vs2, vm
- Encoding
- Description
-
FP negate-(multiply-add), overwrites multiplicand, vd[i] = -(f[rs1] * vd[i]) - vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfnmadd_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfnmadd_vf_f16mf4 (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmadd_vf_f16mf2 (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmadd_vf_f16m1 (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmadd_vf_f16m2 (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmadd_vf_f16m4 (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmadd_vf_f16m8 (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmadd_vf_f32mf2 (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmadd_vf_f32m1 (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmadd_vf_f32m2 (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmadd_vf_f32m4 (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmadd_vf_f32m8 (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmadd_vf_f64m1 (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmadd_vf_f64m2 (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmadd_vf_f64m4 (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmadd_vf_f64m8 (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmadd_vf_f64m1_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmadd_vf_f64m2_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmadd_vf_f64m4_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmadd_vf_f64m8_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmadd_vf_f16mf4_rm (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmadd_vf_f16mf2_rm (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmadd_vf_f16m1_rm (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmadd_vf_f16m2_rm (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmadd_vf_f16m4_rm (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmadd_vf_f16m8_rm (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmadd_vf_f32mf2_rm (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmadd_vf_f32m1_rm (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmadd_vf_f32m2_rm (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmadd_vf_f32m4_rm (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmadd_vf_f32m8_rm (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmadd_vf_f64m1_rm (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmadd_vf_f64m2_rm (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmadd_vf_f64m4_rm (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmadd_vf_f64m8_rm (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfnmadd_vf_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmadd_vf_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmadd_vf_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmadd_vf_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmadd_vf_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmadd_vf_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmadd_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmadd_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmadd_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmadd_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmadd_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmadd_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmadd_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmadd_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmadd_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.38. vfmsub.vv
- Mnemonic
vfmsub.vv vd, vs1, vs2, vm
- Encoding
- Description
-
FP multiply-sub, overwrites multiplicand, vd[i] = +(vs1[i] * vd[i]) - vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmsub_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmsub_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmsub_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmsub_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmsub_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmsub_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmsub_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmsub_vv_f32mf2 (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmsub_vv_f32m1 (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmsub_vv_f32m2 (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmsub_vv_f32m4 (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmsub_vv_f32m8 (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmsub_vv_f64m1 (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmsub_vv_f64m2 (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmsub_vv_f64m4 (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmsub_vv_f64m8 (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmsub_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmsub_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmsub_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmsub_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmsub_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmsub_vv_f64m1_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmsub_vv_f64m2_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmsub_vv_f64m4_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmsub_vv_f64m8_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmsub_vv_f16mf4_rm (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmsub_vv_f16mf2_rm (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmsub_vv_f16m1_rm (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmsub_vv_f16m2_rm (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmsub_vv_f16m4_rm (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmsub_vv_f16m8_rm (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmsub_vv_f32mf2_rm (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmsub_vv_f32m1_rm (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmsub_vv_f32m2_rm (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmsub_vv_f32m4_rm (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmsub_vv_f32m8_rm (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmsub_vv_f64m1_rm (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmsub_vv_f64m2_rm (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmsub_vv_f64m4_rm (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmsub_vv_f64m8_rm (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfmsub_vv_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmsub_vv_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmsub_vv_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmsub_vv_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmsub_vv_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmsub_vv_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmsub_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmsub_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmsub_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmsub_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmsub_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmsub_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmsub_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmsub_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmsub_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.39. vfmsub.vf
- Mnemonic
vfmsub.vf vd, rs1, vs2, vm
- Encoding
- Description
-
FP multiply-sub, overwrites multiplicand, vd[i] = +(f[rs1] * vd[i]) - vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmsub_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmsub_vf_f16mf4 (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmsub_vf_f16mf2 (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmsub_vf_f16m1 (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmsub_vf_f16m2 (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmsub_vf_f16m4 (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmsub_vf_f16m8 (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmsub_vf_f32mf2 (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmsub_vf_f32m1 (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmsub_vf_f32m2 (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmsub_vf_f32m4 (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmsub_vf_f32m8 (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmsub_vf_f64m1 (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmsub_vf_f64m2 (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmsub_vf_f64m4 (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmsub_vf_f64m8 (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfmsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfmsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfmsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfmsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfmsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfmsub_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfmsub_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfmsub_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfmsub_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfmsub_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfmsub_vf_f64m1_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfmsub_vf_f64m2_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfmsub_vf_f64m4_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfmsub_vf_f64m8_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfmsub_vf_f16mf4_rm (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmsub_vf_f16mf2_rm (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmsub_vf_f16m1_rm (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmsub_vf_f16m2_rm (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmsub_vf_f16m4_rm (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmsub_vf_f16m8_rm (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmsub_vf_f32mf2_rm (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmsub_vf_f32m1_rm (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmsub_vf_f32m2_rm (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmsub_vf_f32m4_rm (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmsub_vf_f32m8_rm (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmsub_vf_f64m1_rm (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmsub_vf_f64m2_rm (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmsub_vf_f64m4_rm (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmsub_vf_f64m8_rm (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfmsub_vf_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfmsub_vf_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfmsub_vf_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfmsub_vf_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfmsub_vf_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfmsub_vf_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfmsub_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfmsub_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfmsub_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfmsub_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfmsub_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfmsub_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfmsub_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfmsub_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfmsub_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.40. vfnmsub.vv
- Mnemonic
vfnmsub.vv vd, vs1, vs2, vm
- Encoding
- Description
-
FP negate-(multiply-sub), overwrites multiplicand, vd[i] = -(vs1[i] * vd[i]) + vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfnmsub_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfnmsub_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmsub_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmsub_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmsub_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmsub_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmsub_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmsub_vv_f32mf2 (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmsub_vv_f32m1 (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmsub_vv_f32m2 (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmsub_vv_f32m4 (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmsub_vv_f32m8 (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmsub_vv_f64m1 (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmsub_vv_f64m2 (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmsub_vv_f64m4 (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmsub_vv_f64m8 (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmsub_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmsub_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmsub_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmsub_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmsub_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmsub_vv_f64m1_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmsub_vv_f64m2_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmsub_vv_f64m4_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmsub_vv_f64m8_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmsub_vv_f16mf4_rm (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmsub_vv_f16mf2_rm (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmsub_vv_f16m1_rm (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmsub_vv_f16m2_rm (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmsub_vv_f16m4_rm (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmsub_vv_f16m8_rm (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmsub_vv_f32mf2_rm (vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmsub_vv_f32m1_rm (vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmsub_vv_f32m2_rm (vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmsub_vv_f32m4_rm (vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmsub_vv_f32m8_rm (vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmsub_vv_f64m1_rm (vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmsub_vv_f64m2_rm (vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmsub_vv_f64m4_rm (vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmsub_vv_f64m8_rm (vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfnmsub_vv_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmsub_vv_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmsub_vv_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmsub_vv_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmsub_vv_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmsub_vv_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmsub_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmsub_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmsub_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmsub_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmsub_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmsub_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmsub_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmsub_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmsub_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.41. vfnmsub.vf
- Mnemonic
vfnmsub.vf vd, rs1, vs2, vm
- Encoding
- Description
-
FP negate-(multiply-sub), overwrites multiplicand, vd[i] = -(f[rs1] * vd[i]) + vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfnmsub_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfnmsub_vf_f16mf4 (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmsub_vf_f16mf2 (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmsub_vf_f16m1 (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmsub_vf_f16m2 (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmsub_vf_f16m4 (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmsub_vf_f16m8 (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmsub_vf_f32mf2 (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmsub_vf_f32m1 (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmsub_vf_f32m2 (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmsub_vf_f32m4 (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmsub_vf_f32m8 (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmsub_vf_f64m1 (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmsub_vf_f64m2 (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmsub_vf_f64m4 (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmsub_vf_f64m8 (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, size_t vl);
vfloat16mf2_t __riscv_vfnmsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, size_t vl);
vfloat16m1_t __riscv_vfnmsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, size_t vl);
vfloat16m2_t __riscv_vfnmsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, size_t vl);
vfloat16m4_t __riscv_vfnmsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, size_t vl);
vfloat16m8_t __riscv_vfnmsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfnmsub_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, size_t vl);
vfloat32m1_t __riscv_vfnmsub_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, size_t vl);
vfloat32m2_t __riscv_vfnmsub_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, size_t vl);
vfloat32m4_t __riscv_vfnmsub_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, size_t vl);
vfloat32m8_t __riscv_vfnmsub_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, size_t vl);
vfloat64m1_t __riscv_vfnmsub_vf_f64m1_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, size_t vl);
vfloat64m2_t __riscv_vfnmsub_vf_f64m2_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, size_t vl);
vfloat64m4_t __riscv_vfnmsub_vf_f64m4_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, size_t vl);
vfloat64m8_t __riscv_vfnmsub_vf_f64m8_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, size_t vl);
vfloat16mf4_t __riscv_vfnmsub_vf_f16mf4_rm (vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmsub_vf_f16mf2_rm (vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmsub_vf_f16m1_rm (vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmsub_vf_f16m2_rm (vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmsub_vf_f16m4_rm (vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmsub_vf_f16m8_rm (vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmsub_vf_f32mf2_rm (vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmsub_vf_f32m1_rm (vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmsub_vf_f32m2_rm (vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmsub_vf_f32m4_rm (vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmsub_vf_f32m8_rm (vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmsub_vf_f64m1_rm (vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmsub_vf_f64m2_rm (vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmsub_vf_f64m4_rm (vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmsub_vf_f64m8_rm (vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfnmsub_vf_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t vd, float16_t rs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfnmsub_vf_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t vd, float16_t rs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfnmsub_vf_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vd, float16_t rs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfnmsub_vf_f16m2_rm_m (vbool8_t mask, vfloat16m2_t vd, float16_t rs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfnmsub_vf_f16m4_rm_m (vbool4_t mask, vfloat16m4_t vd, float16_t rs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfnmsub_vf_f16m8_rm_m (vbool2_t mask, vfloat16m8_t vd, float16_t rs1, vfloat16m8_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfnmsub_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, float32_t rs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfnmsub_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, float32_t rs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfnmsub_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, float32_t rs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfnmsub_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, float32_t rs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfnmsub_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, float32_t rs1, vfloat32m8_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfnmsub_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, float64_t rs1, vfloat64m1_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfnmsub_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, float64_t rs1, vfloat64m2_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfnmsub_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, float64_t rs1, vfloat64m4_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfnmsub_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, float64_t rs1, vfloat64m8_t vs2, unsigned int frm, size_t vl);
23.42. Vector Widening Floating-Point Fused Multiply-Add Instructions
- Intrinsic Functions
Details
== Vector Loads and Stores Intrinsics
=== Vector Unit-Stride Load Intrinsics
=== Vector Unit-Stride Store Intrinsics
=== Vector Mask Load/Store Intrinsics
=== Vector Strided Load Intrinsics
=== Vector Strided Store Intrinsics
=== Vector Indexed Load Intrinsics
=== Vector Indexed Store Intrinsics
== Vector Loads and Stores Segment Instructions
=== Vector Unit-Stride Segment Load Intrinsics
=== Vector Unit-Stride Segment Store Intrinsics
=== Vector Strided Segment Load Intrinsics
=== Vector Strided Segment Store Intrinsics
=== Vector Indexed Segment Load Intrinsics
=== Vector Indexed Segment Store Intrinsics
== Vector Integer Arithmetic Instructions
=== Vector Single-Width Integer Add and Subtract Intrinsics
=== Vector Widening Integer Add/Subtract Intrinsics
=== Vector Integer Widening Intrinsics
=== Vector Integer Extension Intrinsics
=== Vector Integer Add-with-Carry / Subtract-with-Borrow Intrinsics
=== Vector Bitwise Binary Logical Intrinsics
=== Vector Bitwise Unary Logical Intrinsics
=== Vector Single-Width Bit Shift Intrinsics
=== Vector Narrowing Integer Right Shift Intrinsics
=== Vector Integer Narrowing Intrinsics
=== Vector Integer Compare Intrinsics
=== Vector Integer Min/Max Intrinsics
=== Vector Single-Width Integer Multiply Intrinsics
=== Vector Integer Divide Intrinsics
=== Vector Widening Integer Multiply Intrinsics
=== Vector Single-Width Integer Multiply-Add Intrinsics
=== Vector Widening Integer Multiply-Add Intrinsics
=== Vector Integer Merge Intrinsics
=== Vector Integer Move Intrinsics
== Vector Fixed-Point Arithmetic Instructions
=== Vector Single-Width Saturating Add and Subtract Intrinsics
=== Vector Single-Width Averaging Add and Subtract Intrinsics
=== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics
=== Vector Single-Width Scaling Shift Intrinsics
=== Vector Narrowing Fixed-Point Clip Intrinsics
== Vector Floating-Point Instructions
=== Vector Single-Width Floating-Point Add/Subtract Intrinsics
=== Vector Widening Floating-Point Add/Subtract Intrinsics
=== Vector Single-Width Floating-Point Multiply/Divide Intrinsics
=== Vector Widening Floating-Point Multiply Intrinsics
=== Vector Single-Width Floating-Point Fused Multiply-Add Intrinsics
=== Vector Widening Floating-Point Fused Multiply-Add Intrinsics
=== Vector Floating-Point Square-Root Intrinsics
=== Vector Floating-Point Reciprocal Square-Root Estimate Intrinsics
=== Vector Floating-Point Reciprocal Estimate Intrinsics
=== Vector Floating-Point MIN/MAX Intrinsics
=== Vector Floating-Point Sign-Injection Intrinsics
=== Vector Floating-Point Absolute Value Intrinsics
=== Vector Floating-Point Compare Intrinsics
=== Vector Floating-Point Classify Intrinsics
=== Vector Floating-Point Merge Intrinsics
=== Vector Floating-Point Move Intrinsics
== Vector Reduction Operations
=== Vector Single-Width Integer Reduction Intrinsics
=== Vector Widening Integer Reduction Intrinsics
=== Vector Single-Width Floating-Point Reduction Intrinsics
=== Vector Widening Floating-Point Reduction Intrinsics
== Vector Mask Instructions
=== Vector Mask-Register Logical
=== Vector count population in mask `vcpop.m`
=== Vector Iota Intrinsics
=== Vector Element Index Intrinsics
== Vector Permutation Instructions
=== Vector Slideup Intrinsics
=== Vector Slidedown Intrinsics
=== Vector Slide1up and Slide1down Intrinsics
=== Vector Register Gather Intrinsics
=== Vector Compress Intrinsics
== Miscellaneous Vector Utility Intrinsics
=== Vector LMUL Extension Intrinsics
=== Vector LMUL Truncation Intrinsics
=== Vector Initialization Intrinsics
=== Vector Insertion Intrinsics
=== Vector Extraction Intrinsics
23.43. vfwmacc.vv
- Mnemonic
vfwmacc.vv vd, vs1, vs2, vm
- Encoding
- Description
-
FP widening multiply-accumulate, overwrites addend, vd[i] = +(vs1[i] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwmacc_vv.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwmacc_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwmacc_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwmacc_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwmacc_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwmacc_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwmacc_vv_f64m1 (vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwmacc_vv_f64m2 (vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwmacc_vv_f64m4 (vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwmacc_vv_f64m8 (vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwmacc_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwmacc_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwmacc_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwmacc_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwmacc_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwmacc_vv_f64m1_m (vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwmacc_vv_f64m2_m (vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwmacc_vv_f64m4_m (vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwmacc_vv_f64m8_m (vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwmacc_vv_f32mf2_rm (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwmacc_vv_f32m1_rm (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwmacc_vv_f32m2_rm (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwmacc_vv_f32m4_rm (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwmacc_vv_f32m8_rm (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwmacc_vv_f64m1_rm (vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwmacc_vv_f64m2_rm (vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwmacc_vv_f64m4_rm (vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwmacc_vv_f64m8_rm (vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwmacc_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwmacc_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwmacc_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwmacc_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwmacc_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwmacc_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwmacc_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwmacc_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwmacc_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
23.44. vfwmacc.vf
- Mnemonic
vfwmacc.vf vd, rs1, vs2, vm
- Encoding
- Description
-
FP widening multiply-accumulate, overwrites addend, vd[i] = +(f[rs1] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwmacc_vf.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwmacc_vf_f32mf2 (vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwmacc_vf_f32m1 (vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwmacc_vf_f32m2 (vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwmacc_vf_f32m4 (vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwmacc_vf_f32m8 (vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwmacc_vf_f64m1 (vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwmacc_vf_f64m2 (vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwmacc_vf_f64m4 (vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwmacc_vf_f64m8 (vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwmacc_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwmacc_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwmacc_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwmacc_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwmacc_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwmacc_vf_f64m1_m (vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwmacc_vf_f64m2_m (vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwmacc_vf_f64m4_m (vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwmacc_vf_f64m8_m (vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwmacc_vf_f32mf2_rm (vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwmacc_vf_f32m1_rm (vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwmacc_vf_f32m2_rm (vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwmacc_vf_f32m4_rm (vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwmacc_vf_f32m8_rm (vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwmacc_vf_f64m1_rm (vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwmacc_vf_f64m2_rm (vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwmacc_vf_f64m4_rm (vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwmacc_vf_f64m8_rm (vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwmacc_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwmacc_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwmacc_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwmacc_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwmacc_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwmacc_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwmacc_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwmacc_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwmacc_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
23.45. vfwnmacc.vv
- Mnemonic
vfwnmacc.vv vd, vs1, vs2, vm
- Encoding
- Description
-
FP widening negate-(multiply-accumulate), overwrites addend, vd[i] = -(vs1[i] * vs2[i]) - vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwnmacc_vv.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwnmacc_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwnmacc_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwnmacc_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwnmacc_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwnmacc_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwnmacc_vv_f64m1 (vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwnmacc_vv_f64m2 (vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwnmacc_vv_f64m4 (vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwnmacc_vv_f64m8 (vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwnmacc_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwnmacc_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwnmacc_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwnmacc_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwnmacc_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwnmacc_vv_f64m1_m (vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwnmacc_vv_f64m2_m (vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwnmacc_vv_f64m4_m (vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwnmacc_vv_f64m8_m (vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwnmacc_vv_f32mf2_rm (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwnmacc_vv_f32m1_rm (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwnmacc_vv_f32m2_rm (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwnmacc_vv_f32m4_rm (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwnmacc_vv_f32m8_rm (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwnmacc_vv_f64m1_rm (vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwnmacc_vv_f64m2_rm (vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwnmacc_vv_f64m4_rm (vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwnmacc_vv_f64m8_rm (vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwnmacc_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwnmacc_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwnmacc_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwnmacc_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwnmacc_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwnmacc_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwnmacc_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwnmacc_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwnmacc_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
23.46. vfwnmacc.vf
- Mnemonic
vfwnmacc.vf vd, rs1, vs2, vm
- Encoding
- Description
-
FP widening negate-(multiply-accumulate), overwrites addend, vd[i] = -(f[rs1] * vs2[i]) - vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwnmacc_vf.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwnmacc_vf_f32mf2 (vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwnmacc_vf_f32m1 (vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwnmacc_vf_f32m2 (vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwnmacc_vf_f32m4 (vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwnmacc_vf_f32m8 (vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwnmacc_vf_f64m1 (vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwnmacc_vf_f64m2 (vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwnmacc_vf_f64m4 (vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwnmacc_vf_f64m8 (vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwnmacc_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwnmacc_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwnmacc_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwnmacc_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwnmacc_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwnmacc_vf_f64m1_m (vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwnmacc_vf_f64m2_m (vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwnmacc_vf_f64m4_m (vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwnmacc_vf_f64m8_m (vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwnmacc_vf_f32mf2_rm (vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwnmacc_vf_f32m1_rm (vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwnmacc_vf_f32m2_rm (vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwnmacc_vf_f32m4_rm (vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwnmacc_vf_f32m8_rm (vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwnmacc_vf_f64m1_rm (vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwnmacc_vf_f64m2_rm (vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwnmacc_vf_f64m4_rm (vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwnmacc_vf_f64m8_rm (vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwnmacc_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwnmacc_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwnmacc_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwnmacc_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwnmacc_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwnmacc_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwnmacc_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwnmacc_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwnmacc_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
23.47. vfwmsac.vv
- Mnemonic
vfwmsac.vv vd, vs1, vs2, vm
- Encoding
- Description
-
FP widening multiply-subtract-accumulator, overwrites addend, vd[i] = +(vs1[i] * vs2[i]) - vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwmsac_vv.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwmsac_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwmsac_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwmsac_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwmsac_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwmsac_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwmsac_vv_f64m1 (vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwmsac_vv_f64m2 (vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwmsac_vv_f64m4 (vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwmsac_vv_f64m8 (vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwmsac_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwmsac_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwmsac_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwmsac_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwmsac_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwmsac_vv_f64m1_m (vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwmsac_vv_f64m2_m (vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwmsac_vv_f64m4_m (vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwmsac_vv_f64m8_m (vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwmsac_vv_f32mf2_rm (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwmsac_vv_f32m1_rm (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwmsac_vv_f32m2_rm (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwmsac_vv_f32m4_rm (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwmsac_vv_f32m8_rm (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwmsac_vv_f64m1_rm (vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwmsac_vv_f64m2_rm (vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwmsac_vv_f64m4_rm (vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwmsac_vv_f64m8_rm (vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwmsac_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwmsac_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwmsac_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwmsac_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwmsac_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwmsac_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwmsac_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwmsac_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwmsac_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
23.48. vfwmsac.vf
- Mnemonic
vfwmsac.vf vd, rs1, vs2, vm
- Encoding
- Description
-
FP widening multiply-subtract-accumulator, overwrites addend, vd[i] = +(f[rs1] * vs2[i]) - vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwmsac_vf.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwmsac_vf_f32mf2 (vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwmsac_vf_f32m1 (vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwmsac_vf_f32m2 (vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwmsac_vf_f32m4 (vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwmsac_vf_f32m8 (vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwmsac_vf_f64m1 (vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwmsac_vf_f64m2 (vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwmsac_vf_f64m4 (vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwmsac_vf_f64m8 (vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwmsac_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwmsac_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwmsac_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwmsac_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwmsac_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwmsac_vf_f64m1_m (vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwmsac_vf_f64m2_m (vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwmsac_vf_f64m4_m (vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwmsac_vf_f64m8_m (vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwmsac_vf_f32mf2_rm (vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwmsac_vf_f32m1_rm (vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwmsac_vf_f32m2_rm (vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwmsac_vf_f32m4_rm (vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwmsac_vf_f32m8_rm (vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwmsac_vf_f64m1_rm (vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwmsac_vf_f64m2_rm (vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwmsac_vf_f64m4_rm (vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwmsac_vf_f64m8_rm (vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwmsac_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwmsac_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwmsac_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwmsac_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwmsac_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwmsac_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwmsac_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwmsac_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwmsac_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
23.49. vfwnmsac.vv
- Mnemonic
vfwnmsac.vv vd, vs1, vs2, vm
- Encoding
- Description
-
FP widening negate-(multiply-subtract-accumulator), overwrites addend, vd[i] = -(vs1[i] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwnmsac_vv.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwnmsac_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwnmsac_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwnmsac_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwnmsac_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwnmsac_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwnmsac_vv_f64m1 (vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwnmsac_vv_f64m2 (vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwnmsac_vv_f64m4 (vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwnmsac_vv_f64m8 (vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwnmsac_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwnmsac_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwnmsac_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwnmsac_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwnmsac_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwnmsac_vv_f64m1_m (vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwnmsac_vv_f64m2_m (vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwnmsac_vv_f64m4_m (vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwnmsac_vv_f64m8_m (vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwnmsac_vv_f32mf2_rm (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwnmsac_vv_f32m1_rm (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwnmsac_vv_f32m2_rm (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwnmsac_vv_f32m4_rm (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwnmsac_vv_f32m8_rm (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwnmsac_vv_f64m1_rm (vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwnmsac_vv_f64m2_rm (vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwnmsac_vv_f64m4_rm (vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwnmsac_vv_f64m8_rm (vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwnmsac_vv_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwnmsac_vv_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwnmsac_vv_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwnmsac_vv_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwnmsac_vv_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwnmsac_vv_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwnmsac_vv_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwnmsac_vv_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwnmsac_vv_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
23.50. vfwnmsac.vf
- Mnemonic
vfwnmsac.vf vd, rs1, vs2, vm
- Encoding
- Description
-
FP widening negate-(multiply-subtract-accumulator), overwrites addend, vd[i] = -(f[rs1] * vs2[i]) + vd[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwnmsac_vf.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwnmsac_vf_f32mf2 (vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwnmsac_vf_f32m1 (vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwnmsac_vf_f32m2 (vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwnmsac_vf_f32m4 (vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwnmsac_vf_f32m8 (vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwnmsac_vf_f64m1 (vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwnmsac_vf_f64m2 (vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwnmsac_vf_f64m4 (vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwnmsac_vf_f64m8 (vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwnmsac_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, size_t vl);
vfloat32m1_t __riscv_vfwnmsac_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, size_t vl);
vfloat32m2_t __riscv_vfwnmsac_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, size_t vl);
vfloat32m4_t __riscv_vfwnmsac_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, size_t vl);
vfloat32m8_t __riscv_vfwnmsac_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, size_t vl);
vfloat64m1_t __riscv_vfwnmsac_vf_f64m1_m (vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, size_t vl);
vfloat64m2_t __riscv_vfwnmsac_vf_f64m2_m (vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, size_t vl);
vfloat64m4_t __riscv_vfwnmsac_vf_f64m4_m (vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, size_t vl);
vfloat64m8_t __riscv_vfwnmsac_vf_f64m8_m (vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, size_t vl);
vfloat32mf2_t __riscv_vfwnmsac_vf_f32mf2_rm (vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwnmsac_vf_f32m1_rm (vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwnmsac_vf_f32m2_rm (vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwnmsac_vf_f32m4_rm (vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwnmsac_vf_f32m8_rm (vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwnmsac_vf_f64m1_rm (vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwnmsac_vf_f64m2_rm (vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwnmsac_vf_f64m4_rm (vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwnmsac_vf_f64m8_rm (vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfwnmsac_vf_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t vd, float16_t vs1, vfloat16mf4_t vs2, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwnmsac_vf_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vd, float16_t vs1, vfloat16mf2_t vs2, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfwnmsac_vf_f32m2_rm_m (vbool16_t mask, vfloat32m2_t vd, float16_t vs1, vfloat16m1_t vs2, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfwnmsac_vf_f32m4_rm_m (vbool8_t mask, vfloat32m4_t vd, float16_t vs1, vfloat16m2_t vs2, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfwnmsac_vf_f32m8_rm_m (vbool4_t mask, vfloat32m8_t vd, float16_t vs1, vfloat16m4_t vs2, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwnmsac_vf_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vd, float32_t vs1, vfloat32mf2_t vs2, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfwnmsac_vf_f64m2_rm_m (vbool32_t mask, vfloat64m2_t vd, float32_t vs1, vfloat32m1_t vs2, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfwnmsac_vf_f64m4_rm_m (vbool16_t mask, vfloat64m4_t vd, float32_t vs1, vfloat32m2_t vs2, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfwnmsac_vf_f64m8_rm_m (vbool8_t mask, vfloat64m8_t vd, float32_t vs1, vfloat32m4_t vs2, unsigned int frm, size_t vl);
23.51. Vector Floating-Point Square-Root Instruction
- Intrinsic Functions
Details
== Vector Loads and Stores Intrinsics
=== Vector Unit-Stride Load Intrinsics
=== Vector Unit-Stride Store Intrinsics
=== Vector Mask Load/Store Intrinsics
=== Vector Strided Load Intrinsics
=== Vector Strided Store Intrinsics
=== Vector Indexed Load Intrinsics
=== Vector Indexed Store Intrinsics
== Vector Loads and Stores Segment Instructions
=== Vector Unit-Stride Segment Load Intrinsics
=== Vector Unit-Stride Segment Store Intrinsics
=== Vector Strided Segment Load Intrinsics
=== Vector Strided Segment Store Intrinsics
=== Vector Indexed Segment Load Intrinsics
=== Vector Indexed Segment Store Intrinsics
== Vector Integer Arithmetic Instructions
=== Vector Single-Width Integer Add and Subtract Intrinsics
=== Vector Widening Integer Add/Subtract Intrinsics
=== Vector Integer Widening Intrinsics
=== Vector Integer Extension Intrinsics
=== Vector Integer Add-with-Carry / Subtract-with-Borrow Intrinsics
=== Vector Bitwise Binary Logical Intrinsics
=== Vector Bitwise Unary Logical Intrinsics
=== Vector Single-Width Bit Shift Intrinsics
=== Vector Narrowing Integer Right Shift Intrinsics
=== Vector Integer Narrowing Intrinsics
=== Vector Integer Compare Intrinsics
=== Vector Integer Min/Max Intrinsics
=== Vector Single-Width Integer Multiply Intrinsics
=== Vector Integer Divide Intrinsics
=== Vector Widening Integer Multiply Intrinsics
=== Vector Single-Width Integer Multiply-Add Intrinsics
=== Vector Widening Integer Multiply-Add Intrinsics
=== Vector Integer Merge Intrinsics
=== Vector Integer Move Intrinsics
== Vector Fixed-Point Arithmetic Instructions
=== Vector Single-Width Saturating Add and Subtract Intrinsics
=== Vector Single-Width Averaging Add and Subtract Intrinsics
=== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics
=== Vector Single-Width Scaling Shift Intrinsics
=== Vector Narrowing Fixed-Point Clip Intrinsics
== Vector Floating-Point Instructions
=== Vector Single-Width Floating-Point Add/Subtract Intrinsics
=== Vector Widening Floating-Point Add/Subtract Intrinsics
=== Vector Single-Width Floating-Point Multiply/Divide Intrinsics
=== Vector Widening Floating-Point Multiply Intrinsics
=== Vector Single-Width Floating-Point Fused Multiply-Add Intrinsics
=== Vector Widening Floating-Point Fused Multiply-Add Intrinsics
=== Vector Floating-Point Square-Root Intrinsics
=== Vector Floating-Point Reciprocal Square-Root Estimate Intrinsics
=== Vector Floating-Point Reciprocal Estimate Intrinsics
=== Vector Floating-Point MIN/MAX Intrinsics
=== Vector Floating-Point Sign-Injection Intrinsics
=== Vector Floating-Point Absolute Value Intrinsics
=== Vector Floating-Point Compare Intrinsics
=== Vector Floating-Point Classify Intrinsics
=== Vector Floating-Point Merge Intrinsics
=== Vector Floating-Point Move Intrinsics
== Vector Reduction Operations
=== Vector Single-Width Integer Reduction Intrinsics
=== Vector Widening Integer Reduction Intrinsics
=== Vector Single-Width Floating-Point Reduction Intrinsics
=== Vector Widening Floating-Point Reduction Intrinsics
== Vector Mask Instructions
=== Vector Mask-Register Logical
=== Vector count population in mask `vcpop.m`
=== Vector Iota Intrinsics
=== Vector Element Index Intrinsics
== Vector Permutation Instructions
=== Vector Slideup Intrinsics
=== Vector Slidedown Intrinsics
=== Vector Slide1up and Slide1down Intrinsics
=== Vector Register Gather Intrinsics
=== Vector Compress Intrinsics
== Miscellaneous Vector Utility Intrinsics
=== Vector LMUL Extension Intrinsics
=== Vector LMUL Truncation Intrinsics
=== Vector Initialization Intrinsics
=== Vector Insertion Intrinsics
=== Vector Extraction Intrinsics
23.52. vfsqrt.v
- Mnemonic
vfsqrt.v vd, vs2, vm
- Encoding
- Description
-
Floating-point square root, Vector-vector square root
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfsqrt_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfsqrt_v_f16mf4 (vfloat16mf4_t op1, size_t vl);
vfloat16mf2_t __riscv_vfsqrt_v_f16mf2 (vfloat16mf2_t op1, size_t vl);
vfloat16m1_t __riscv_vfsqrt_v_f16m1 (vfloat16m1_t op1, size_t vl);
vfloat16m2_t __riscv_vfsqrt_v_f16m2 (vfloat16m2_t op1, size_t vl);
vfloat16m4_t __riscv_vfsqrt_v_f16m4 (vfloat16m4_t op1, size_t vl);
vfloat16m8_t __riscv_vfsqrt_v_f16m8 (vfloat16m8_t op1, size_t vl);
vfloat32mf2_t __riscv_vfsqrt_v_f32mf2 (vfloat32mf2_t op1, size_t vl);
vfloat32m1_t __riscv_vfsqrt_v_f32m1 (vfloat32m1_t op1, size_t vl);
vfloat32m2_t __riscv_vfsqrt_v_f32m2 (vfloat32m2_t op1, size_t vl);
vfloat32m4_t __riscv_vfsqrt_v_f32m4 (vfloat32m4_t op1, size_t vl);
vfloat32m8_t __riscv_vfsqrt_v_f32m8 (vfloat32m8_t op1, size_t vl);
vfloat64m1_t __riscv_vfsqrt_v_f64m1 (vfloat64m1_t op1, size_t vl);
vfloat64m2_t __riscv_vfsqrt_v_f64m2 (vfloat64m2_t op1, size_t vl);
vfloat64m4_t __riscv_vfsqrt_v_f64m4 (vfloat64m4_t op1, size_t vl);
vfloat64m8_t __riscv_vfsqrt_v_f64m8 (vfloat64m8_t op1, size_t vl);
vfloat16mf4_t __riscv_vfsqrt_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, size_t vl);
vfloat16mf2_t __riscv_vfsqrt_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, size_t vl);
vfloat16m1_t __riscv_vfsqrt_v_f16m1_m (vbool16_t mask, vfloat16m1_t op1, size_t vl);
vfloat16m2_t __riscv_vfsqrt_v_f16m2_m (vbool8_t mask, vfloat16m2_t op1, size_t vl);
vfloat16m4_t __riscv_vfsqrt_v_f16m4_m (vbool4_t mask, vfloat16m4_t op1, size_t vl);
vfloat16m8_t __riscv_vfsqrt_v_f16m8_m (vbool2_t mask, vfloat16m8_t op1, size_t vl);
vfloat32mf2_t __riscv_vfsqrt_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, size_t vl);
vfloat32m1_t __riscv_vfsqrt_v_f32m1_m (vbool32_t mask, vfloat32m1_t op1, size_t vl);
vfloat32m2_t __riscv_vfsqrt_v_f32m2_m (vbool16_t mask, vfloat32m2_t op1, size_t vl);
vfloat32m4_t __riscv_vfsqrt_v_f32m4_m (vbool8_t mask, vfloat32m4_t op1, size_t vl);
vfloat32m8_t __riscv_vfsqrt_v_f32m8_m (vbool4_t mask, vfloat32m8_t op1, size_t vl);
vfloat64m1_t __riscv_vfsqrt_v_f64m1_m (vbool64_t mask, vfloat64m1_t op1, size_t vl);
vfloat64m2_t __riscv_vfsqrt_v_f64m2_m (vbool32_t mask, vfloat64m2_t op1, size_t vl);
vfloat64m4_t __riscv_vfsqrt_v_f64m4_m (vbool16_t mask, vfloat64m4_t op1, size_t vl);
vfloat64m8_t __riscv_vfsqrt_v_f64m8_m (vbool8_t mask, vfloat64m8_t op1, size_t vl);
vfloat16mf4_t __riscv_vfsqrt_v_f16mf4_rm (vfloat16mf4_t op1, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfsqrt_v_f16mf2_rm (vfloat16mf2_t op1, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfsqrt_v_f16m1_rm (vfloat16m1_t op1, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfsqrt_v_f16m2_rm (vfloat16m2_t op1, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfsqrt_v_f16m4_rm (vfloat16m4_t op1, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfsqrt_v_f16m8_rm (vfloat16m8_t op1, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfsqrt_v_f32mf2_rm (vfloat32mf2_t op1, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfsqrt_v_f32m1_rm (vfloat32m1_t op1, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfsqrt_v_f32m2_rm (vfloat32m2_t op1, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfsqrt_v_f32m4_rm (vfloat32m4_t op1, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfsqrt_v_f32m8_rm (vfloat32m8_t op1, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfsqrt_v_f64m1_rm (vfloat64m1_t op1, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfsqrt_v_f64m2_rm (vfloat64m2_t op1, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfsqrt_v_f64m4_rm (vfloat64m4_t op1, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfsqrt_v_f64m8_rm (vfloat64m8_t op1, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfsqrt_v_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t op1, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfsqrt_v_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t op1, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfsqrt_v_f16m1_rm_m (vbool16_t mask, vfloat16m1_t op1, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfsqrt_v_f16m2_rm_m (vbool8_t mask, vfloat16m2_t op1, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfsqrt_v_f16m4_rm_m (vbool4_t mask, vfloat16m4_t op1, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfsqrt_v_f16m8_rm_m (vbool2_t mask, vfloat16m8_t op1, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfsqrt_v_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfsqrt_v_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfsqrt_v_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfsqrt_v_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfsqrt_v_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfsqrt_v_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfsqrt_v_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfsqrt_v_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfsqrt_v_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, unsigned int frm, size_t vl);
23.53. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
- Intrinsic Functions
Details
== Vector Loads and Stores Intrinsics
=== Vector Unit-Stride Load Intrinsics
=== Vector Unit-Stride Store Intrinsics
=== Vector Mask Load/Store Intrinsics
=== Vector Strided Load Intrinsics
=== Vector Strided Store Intrinsics
=== Vector Indexed Load Intrinsics
=== Vector Indexed Store Intrinsics
== Vector Loads and Stores Segment Instructions
=== Vector Unit-Stride Segment Load Intrinsics
=== Vector Unit-Stride Segment Store Intrinsics
=== Vector Strided Segment Load Intrinsics
=== Vector Strided Segment Store Intrinsics
=== Vector Indexed Segment Load Intrinsics
=== Vector Indexed Segment Store Intrinsics
== Vector Integer Arithmetic Instructions
=== Vector Single-Width Integer Add and Subtract Intrinsics
=== Vector Widening Integer Add/Subtract Intrinsics
=== Vector Integer Widening Intrinsics
=== Vector Integer Extension Intrinsics
=== Vector Integer Add-with-Carry / Subtract-with-Borrow Intrinsics
=== Vector Bitwise Binary Logical Intrinsics
=== Vector Bitwise Unary Logical Intrinsics
=== Vector Single-Width Bit Shift Intrinsics
=== Vector Narrowing Integer Right Shift Intrinsics
=== Vector Integer Narrowing Intrinsics
=== Vector Integer Compare Intrinsics
=== Vector Integer Min/Max Intrinsics
=== Vector Single-Width Integer Multiply Intrinsics
=== Vector Integer Divide Intrinsics
=== Vector Widening Integer Multiply Intrinsics
=== Vector Single-Width Integer Multiply-Add Intrinsics
=== Vector Widening Integer Multiply-Add Intrinsics
=== Vector Integer Merge Intrinsics
=== Vector Integer Move Intrinsics
== Vector Fixed-Point Arithmetic Instructions
=== Vector Single-Width Saturating Add and Subtract Intrinsics
=== Vector Single-Width Averaging Add and Subtract Intrinsics
=== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics
=== Vector Single-Width Scaling Shift Intrinsics
=== Vector Narrowing Fixed-Point Clip Intrinsics
== Vector Floating-Point Instructions
=== Vector Single-Width Floating-Point Add/Subtract Intrinsics
=== Vector Widening Floating-Point Add/Subtract Intrinsics
=== Vector Single-Width Floating-Point Multiply/Divide Intrinsics
=== Vector Widening Floating-Point Multiply Intrinsics
=== Vector Single-Width Floating-Point Fused Multiply-Add Intrinsics
=== Vector Widening Floating-Point Fused Multiply-Add Intrinsics
=== Vector Floating-Point Square-Root Intrinsics
=== Vector Floating-Point Reciprocal Square-Root Estimate Intrinsics
=== Vector Floating-Point Reciprocal Estimate Intrinsics
=== Vector Floating-Point MIN/MAX Intrinsics
=== Vector Floating-Point Sign-Injection Intrinsics
=== Vector Floating-Point Absolute Value Intrinsics
=== Vector Floating-Point Compare Intrinsics
=== Vector Floating-Point Classify Intrinsics
=== Vector Floating-Point Merge Intrinsics
=== Vector Floating-Point Move Intrinsics
== Vector Reduction Operations
=== Vector Single-Width Integer Reduction Intrinsics
=== Vector Widening Integer Reduction Intrinsics
=== Vector Single-Width Floating-Point Reduction Intrinsics
=== Vector Widening Floating-Point Reduction Intrinsics
== Vector Mask Instructions
=== Vector Mask-Register Logical
=== Vector count population in mask `vcpop.m`
=== Vector Iota Intrinsics
=== Vector Element Index Intrinsics
== Vector Permutation Instructions
=== Vector Slideup Intrinsics
=== Vector Slidedown Intrinsics
=== Vector Slide1up and Slide1down Intrinsics
=== Vector Register Gather Intrinsics
=== Vector Compress Intrinsics
== Miscellaneous Vector Utility Intrinsics
=== Vector LMUL Extension Intrinsics
=== Vector LMUL Truncation Intrinsics
=== Vector Initialization Intrinsics
=== Vector Insertion Intrinsics
=== Vector Extraction Intrinsics
23.54. vfrsqrt7.v
- Mnemonic
vfrsqrt7.v vd, vs2, vm
- Encoding
- Description
-
Floating-point reciprocal square-root estimate to 7 bits.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfrsqrt7_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfrsqrt7_v_f16mf4 (vfloat16mf4_t op1, size_t vl);
vfloat16mf2_t __riscv_vfrsqrt7_v_f16mf2 (vfloat16mf2_t op1, size_t vl);
vfloat16m1_t __riscv_vfrsqrt7_v_f16m1 (vfloat16m1_t op1, size_t vl);
vfloat16m2_t __riscv_vfrsqrt7_v_f16m2 (vfloat16m2_t op1, size_t vl);
vfloat16m4_t __riscv_vfrsqrt7_v_f16m4 (vfloat16m4_t op1, size_t vl);
vfloat16m8_t __riscv_vfrsqrt7_v_f16m8 (vfloat16m8_t op1, size_t vl);
vfloat32mf2_t __riscv_vfrsqrt7_v_f32mf2 (vfloat32mf2_t op1, size_t vl);
vfloat32m1_t __riscv_vfrsqrt7_v_f32m1 (vfloat32m1_t op1, size_t vl);
vfloat32m2_t __riscv_vfrsqrt7_v_f32m2 (vfloat32m2_t op1, size_t vl);
vfloat32m4_t __riscv_vfrsqrt7_v_f32m4 (vfloat32m4_t op1, size_t vl);
vfloat32m8_t __riscv_vfrsqrt7_v_f32m8 (vfloat32m8_t op1, size_t vl);
vfloat64m1_t __riscv_vfrsqrt7_v_f64m1 (vfloat64m1_t op1, size_t vl);
vfloat64m2_t __riscv_vfrsqrt7_v_f64m2 (vfloat64m2_t op1, size_t vl);
vfloat64m4_t __riscv_vfrsqrt7_v_f64m4 (vfloat64m4_t op1, size_t vl);
vfloat64m8_t __riscv_vfrsqrt7_v_f64m8 (vfloat64m8_t op1, size_t vl);
vfloat16mf4_t __riscv_vfrsqrt7_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, size_t vl);
vfloat16mf2_t __riscv_vfrsqrt7_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, size_t vl);
vfloat16m1_t __riscv_vfrsqrt7_v_f16m1_m (vbool16_t mask, vfloat16m1_t op1, size_t vl);
vfloat16m2_t __riscv_vfrsqrt7_v_f16m2_m (vbool8_t mask, vfloat16m2_t op1, size_t vl);
vfloat16m4_t __riscv_vfrsqrt7_v_f16m4_m (vbool4_t mask, vfloat16m4_t op1, size_t vl);
vfloat16m8_t __riscv_vfrsqrt7_v_f16m8_m (vbool2_t mask, vfloat16m8_t op1, size_t vl);
vfloat32mf2_t __riscv_vfrsqrt7_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, size_t vl);
vfloat32m1_t __riscv_vfrsqrt7_v_f32m1_m (vbool32_t mask, vfloat32m1_t op1, size_t vl);
vfloat32m2_t __riscv_vfrsqrt7_v_f32m2_m (vbool16_t mask, vfloat32m2_t op1, size_t vl);
vfloat32m4_t __riscv_vfrsqrt7_v_f32m4_m (vbool8_t mask, vfloat32m4_t op1, size_t vl);
vfloat32m8_t __riscv_vfrsqrt7_v_f32m8_m (vbool4_t mask, vfloat32m8_t op1, size_t vl);
vfloat64m1_t __riscv_vfrsqrt7_v_f64m1_m (vbool64_t mask, vfloat64m1_t op1, size_t vl);
vfloat64m2_t __riscv_vfrsqrt7_v_f64m2_m (vbool32_t mask, vfloat64m2_t op1, size_t vl);
vfloat64m4_t __riscv_vfrsqrt7_v_f64m4_m (vbool16_t mask, vfloat64m4_t op1, size_t vl);
vfloat64m8_t __riscv_vfrsqrt7_v_f64m8_m (vbool8_t mask, vfloat64m8_t op1, size_t vl);
23.55. Vector Floating-Point Reciprocal Estimate Instruction
- Intrinsic Functions
Details
== Vector Loads and Stores Intrinsics
=== Vector Unit-Stride Load Intrinsics
=== Vector Unit-Stride Store Intrinsics
=== Vector Mask Load/Store Intrinsics
=== Vector Strided Load Intrinsics
=== Vector Strided Store Intrinsics
=== Vector Indexed Load Intrinsics
=== Vector Indexed Store Intrinsics
== Vector Loads and Stores Segment Instructions
=== Vector Unit-Stride Segment Load Intrinsics
=== Vector Unit-Stride Segment Store Intrinsics
=== Vector Strided Segment Load Intrinsics
=== Vector Strided Segment Store Intrinsics
=== Vector Indexed Segment Load Intrinsics
=== Vector Indexed Segment Store Intrinsics
== Vector Integer Arithmetic Instructions
=== Vector Single-Width Integer Add and Subtract Intrinsics
=== Vector Widening Integer Add/Subtract Intrinsics
=== Vector Integer Widening Intrinsics
=== Vector Integer Extension Intrinsics
=== Vector Integer Add-with-Carry / Subtract-with-Borrow Intrinsics
=== Vector Bitwise Binary Logical Intrinsics
=== Vector Bitwise Unary Logical Intrinsics
=== Vector Single-Width Bit Shift Intrinsics
=== Vector Narrowing Integer Right Shift Intrinsics
=== Vector Integer Narrowing Intrinsics
=== Vector Integer Compare Intrinsics
=== Vector Integer Min/Max Intrinsics
=== Vector Single-Width Integer Multiply Intrinsics
=== Vector Integer Divide Intrinsics
=== Vector Widening Integer Multiply Intrinsics
=== Vector Single-Width Integer Multiply-Add Intrinsics
=== Vector Widening Integer Multiply-Add Intrinsics
=== Vector Integer Merge Intrinsics
=== Vector Integer Move Intrinsics
== Vector Fixed-Point Arithmetic Instructions
=== Vector Single-Width Saturating Add and Subtract Intrinsics
=== Vector Single-Width Averaging Add and Subtract Intrinsics
=== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics
=== Vector Single-Width Scaling Shift Intrinsics
=== Vector Narrowing Fixed-Point Clip Intrinsics
== Vector Floating-Point Instructions
=== Vector Single-Width Floating-Point Add/Subtract Intrinsics
=== Vector Widening Floating-Point Add/Subtract Intrinsics
=== Vector Single-Width Floating-Point Multiply/Divide Intrinsics
=== Vector Widening Floating-Point Multiply Intrinsics
=== Vector Single-Width Floating-Point Fused Multiply-Add Intrinsics
=== Vector Widening Floating-Point Fused Multiply-Add Intrinsics
=== Vector Floating-Point Square-Root Intrinsics
=== Vector Floating-Point Reciprocal Square-Root Estimate Intrinsics
=== Vector Floating-Point Reciprocal Estimate Intrinsics
=== Vector Floating-Point MIN/MAX Intrinsics
=== Vector Floating-Point Sign-Injection Intrinsics
=== Vector Floating-Point Absolute Value Intrinsics
=== Vector Floating-Point Compare Intrinsics
=== Vector Floating-Point Classify Intrinsics
=== Vector Floating-Point Merge Intrinsics
=== Vector Floating-Point Move Intrinsics
== Vector Reduction Operations
=== Vector Single-Width Integer Reduction Intrinsics
=== Vector Widening Integer Reduction Intrinsics
=== Vector Single-Width Floating-Point Reduction Intrinsics
=== Vector Widening Floating-Point Reduction Intrinsics
== Vector Mask Instructions
=== Vector Mask-Register Logical
=== Vector count population in mask `vcpop.m`
=== Vector Iota Intrinsics
=== Vector Element Index Intrinsics
== Vector Permutation Instructions
=== Vector Slideup Intrinsics
=== Vector Slidedown Intrinsics
=== Vector Slide1up and Slide1down Intrinsics
=== Vector Register Gather Intrinsics
=== Vector Compress Intrinsics
== Miscellaneous Vector Utility Intrinsics
=== Vector LMUL Extension Intrinsics
=== Vector LMUL Truncation Intrinsics
=== Vector Initialization Intrinsics
=== Vector Insertion Intrinsics
=== Vector Extraction Intrinsics
23.56. vfrec7.v
- Mnemonic
vfrec7.v vd, vs2, vm
- Description
-
Floating-point reciprocal estimate to 7 bits.
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfrec7_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfrec7_v_f16mf4 (vfloat16mf4_t op1, size_t vl);
vfloat16mf2_t __riscv_vfrec7_v_f16mf2 (vfloat16mf2_t op1, size_t vl);
vfloat16m1_t __riscv_vfrec7_v_f16m1 (vfloat16m1_t op1, size_t vl);
vfloat16m2_t __riscv_vfrec7_v_f16m2 (vfloat16m2_t op1, size_t vl);
vfloat16m4_t __riscv_vfrec7_v_f16m4 (vfloat16m4_t op1, size_t vl);
vfloat16m8_t __riscv_vfrec7_v_f16m8 (vfloat16m8_t op1, size_t vl);
vfloat32mf2_t __riscv_vfrec7_v_f32mf2 (vfloat32mf2_t op1, size_t vl);
vfloat32m1_t __riscv_vfrec7_v_f32m1 (vfloat32m1_t op1, size_t vl);
vfloat32m2_t __riscv_vfrec7_v_f32m2 (vfloat32m2_t op1, size_t vl);
vfloat32m4_t __riscv_vfrec7_v_f32m4 (vfloat32m4_t op1, size_t vl);
vfloat32m8_t __riscv_vfrec7_v_f32m8 (vfloat32m8_t op1, size_t vl);
vfloat64m1_t __riscv_vfrec7_v_f64m1 (vfloat64m1_t op1, size_t vl);
vfloat64m2_t __riscv_vfrec7_v_f64m2 (vfloat64m2_t op1, size_t vl);
vfloat64m4_t __riscv_vfrec7_v_f64m4 (vfloat64m4_t op1, size_t vl);
vfloat64m8_t __riscv_vfrec7_v_f64m8 (vfloat64m8_t op1, size_t vl);
vfloat16mf4_t __riscv_vfrec7_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, size_t vl);
vfloat16mf2_t __riscv_vfrec7_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, size_t vl);
vfloat16m1_t __riscv_vfrec7_v_f16m1_m (vbool16_t mask, vfloat16m1_t op1, size_t vl);
vfloat16m2_t __riscv_vfrec7_v_f16m2_m (vbool8_t mask, vfloat16m2_t op1, size_t vl);
vfloat16m4_t __riscv_vfrec7_v_f16m4_m (vbool4_t mask, vfloat16m4_t op1, size_t vl);
vfloat16m8_t __riscv_vfrec7_v_f16m8_m (vbool2_t mask, vfloat16m8_t op1, size_t vl);
vfloat32mf2_t __riscv_vfrec7_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, size_t vl);
vfloat32m1_t __riscv_vfrec7_v_f32m1_m (vbool32_t mask, vfloat32m1_t op1, size_t vl);
vfloat32m2_t __riscv_vfrec7_v_f32m2_m (vbool16_t mask, vfloat32m2_t op1, size_t vl);
vfloat32m4_t __riscv_vfrec7_v_f32m4_m (vbool8_t mask, vfloat32m4_t op1, size_t vl);
vfloat32m8_t __riscv_vfrec7_v_f32m8_m (vbool4_t mask, vfloat32m8_t op1, size_t vl);
vfloat64m1_t __riscv_vfrec7_v_f64m1_m (vbool64_t mask, vfloat64m1_t op1, size_t vl);
vfloat64m2_t __riscv_vfrec7_v_f64m2_m (vbool32_t mask, vfloat64m2_t op1, size_t vl);
vfloat64m4_t __riscv_vfrec7_v_f64m4_m (vbool16_t mask, vfloat64m4_t op1, size_t vl);
vfloat64m8_t __riscv_vfrec7_v_f64m8_m (vbool8_t mask, vfloat64m8_t op1, size_t vl);
vfloat16mf4_t __riscv_vfrec7_v_f16mf4_rm (vfloat16mf4_t op1, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfrec7_v_f16mf2_rm (vfloat16mf2_t op1, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfrec7_v_f16m1_rm (vfloat16m1_t op1, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfrec7_v_f16m2_rm (vfloat16m2_t op1, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfrec7_v_f16m4_rm (vfloat16m4_t op1, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfrec7_v_f16m8_rm (vfloat16m8_t op1, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfrec7_v_f32mf2_rm (vfloat32mf2_t op1, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfrec7_v_f32m1_rm (vfloat32m1_t op1, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfrec7_v_f32m2_rm (vfloat32m2_t op1, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfrec7_v_f32m4_rm (vfloat32m4_t op1, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfrec7_v_f32m8_rm (vfloat32m8_t op1, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfrec7_v_f64m1_rm (vfloat64m1_t op1, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfrec7_v_f64m2_rm (vfloat64m2_t op1, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfrec7_v_f64m4_rm (vfloat64m4_t op1, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfrec7_v_f64m8_rm (vfloat64m8_t op1, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfrec7_v_f16mf4_rm_m (vbool64_t mask, vfloat16mf4_t op1, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfrec7_v_f16mf2_rm_m (vbool32_t mask, vfloat16mf2_t op1, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfrec7_v_f16m1_rm_m (vbool16_t mask, vfloat16m1_t op1, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfrec7_v_f16m2_rm_m (vbool8_t mask, vfloat16m2_t op1, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfrec7_v_f16m4_rm_m (vbool4_t mask, vfloat16m4_t op1, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfrec7_v_f16m8_rm_m (vbool2_t mask, vfloat16m8_t op1, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfrec7_v_f32mf2_rm_m (vbool64_t mask, vfloat32mf2_t op1, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfrec7_v_f32m1_rm_m (vbool32_t mask, vfloat32m1_t op1, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfrec7_v_f32m2_rm_m (vbool16_t mask, vfloat32m2_t op1, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfrec7_v_f32m4_rm_m (vbool8_t mask, vfloat32m4_t op1, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfrec7_v_f32m8_rm_m (vbool4_t mask, vfloat32m8_t op1, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfrec7_v_f64m1_rm_m (vbool64_t mask, vfloat64m1_t op1, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfrec7_v_f64m2_rm_m (vbool32_t mask, vfloat64m2_t op1, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfrec7_v_f64m4_rm_m (vbool16_t mask, vfloat64m4_t op1, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfrec7_v_f64m8_rm_m (vbool8_t mask, vfloat64m8_t op1, unsigned int frm, size_t vl);
23.57. Vector Floating-Point MIN/MAX Instructions
- Intrinsic Functions
Details
== Vector Loads and Stores Intrinsics
=== Vector Unit-Stride Load Intrinsics
=== Vector Unit-Stride Store Intrinsics
=== Vector Mask Load/Store Intrinsics
=== Vector Strided Load Intrinsics
=== Vector Strided Store Intrinsics
=== Vector Indexed Load Intrinsics
=== Vector Indexed Store Intrinsics
== Vector Loads and Stores Segment Instructions
=== Vector Unit-Stride Segment Load Intrinsics
=== Vector Unit-Stride Segment Store Intrinsics
=== Vector Strided Segment Load Intrinsics
=== Vector Strided Segment Store Intrinsics
=== Vector Indexed Segment Load Intrinsics
=== Vector Indexed Segment Store Intrinsics
== Vector Integer Arithmetic Instructions
=== Vector Single-Width Integer Add and Subtract Intrinsics
=== Vector Widening Integer Add/Subtract Intrinsics
=== Vector Integer Widening Intrinsics
=== Vector Integer Extension Intrinsics
=== Vector Integer Add-with-Carry / Subtract-with-Borrow Intrinsics
=== Vector Bitwise Binary Logical Intrinsics
=== Vector Bitwise Unary Logical Intrinsics
=== Vector Single-Width Bit Shift Intrinsics
=== Vector Narrowing Integer Right Shift Intrinsics
=== Vector Integer Narrowing Intrinsics
=== Vector Integer Compare Intrinsics
=== Vector Integer Min/Max Intrinsics
=== Vector Single-Width Integer Multiply Intrinsics
=== Vector Integer Divide Intrinsics
=== Vector Widening Integer Multiply Intrinsics
=== Vector Single-Width Integer Multiply-Add Intrinsics
=== Vector Widening Integer Multiply-Add Intrinsics
=== Vector Integer Merge Intrinsics
=== Vector Integer Move Intrinsics
== Vector Fixed-Point Arithmetic Instructions
=== Vector Single-Width Saturating Add and Subtract Intrinsics
=== Vector Single-Width Averaging Add and Subtract Intrinsics
=== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics
=== Vector Single-Width Scaling Shift Intrinsics
=== Vector Narrowing Fixed-Point Clip Intrinsics
== Vector Floating-Point Instructions
=== Vector Single-Width Floating-Point Add/Subtract Intrinsics
=== Vector Widening Floating-Point Add/Subtract Intrinsics
=== Vector Single-Width Floating-Point Multiply/Divide Intrinsics
=== Vector Widening Floating-Point Multiply Intrinsics
=== Vector Single-Width Floating-Point Fused Multiply-Add Intrinsics
=== Vector Widening Floating-Point Fused Multiply-Add Intrinsics
=== Vector Floating-Point Square-Root Intrinsics
=== Vector Floating-Point Reciprocal Square-Root Estimate Intrinsics
=== Vector Floating-Point Reciprocal Estimate Intrinsics
=== Vector Floating-Point MIN/MAX Intrinsics
=== Vector Floating-Point Sign-Injection Intrinsics
=== Vector Floating-Point Absolute Value Intrinsics
=== Vector Floating-Point Compare Intrinsics
=== Vector Floating-Point Classify Intrinsics
=== Vector Floating-Point Merge Intrinsics
=== Vector Floating-Point Move Intrinsics
== Vector Reduction Operations
=== Vector Single-Width Integer Reduction Intrinsics
=== Vector Widening Integer Reduction Intrinsics
=== Vector Single-Width Floating-Point Reduction Intrinsics
=== Vector Widening Floating-Point Reduction Intrinsics
== Vector Mask Instructions
=== Vector Mask-Register Logical
=== Vector count population in mask `vcpop.m`
=== Vector Iota Intrinsics
=== Vector Element Index Intrinsics
== Vector Permutation Instructions
=== Vector Slideup Intrinsics
=== Vector Slidedown Intrinsics
=== Vector Slide1up and Slide1down Intrinsics
=== Vector Register Gather Intrinsics
=== Vector Compress Intrinsics
== Miscellaneous Vector Utility Intrinsics
=== Vector LMUL Extension Intrinsics
=== Vector LMUL Truncation Intrinsics
=== Vector Initialization Intrinsics
=== Vector Insertion Intrinsics
=== Vector Extraction Intrinsics
23.58. vfmin.vv
- Mnemonic
vfmin.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Floating-point minimum, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmin_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmin_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfmin_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfmin_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfmin_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfmin_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfmin_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfmin_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfmin_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfmin_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfmin_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfmin_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfmin_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfmin_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfmin_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfmin_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vfloat16mf4_t __riscv_vfmin_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfmin_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfmin_vv_f16m1_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfmin_vv_f16m2_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfmin_vv_f16m4_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfmin_vv_f16m8_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfmin_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfmin_vv_f32m1_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfmin_vv_f32m2_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfmin_vv_f32m4_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfmin_vv_f32m8_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfmin_vv_f64m1_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfmin_vv_f64m2_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfmin_vv_f64m4_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfmin_vv_f64m8_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
23.59. vfmin.vf
- Mnemonic
vfmin.vf vd, vs2, rs1, vm
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmin_vf.h
- Description
-
Floating-point minimum, vector-scalar
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmin_vf_f16mf4 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfmin_vf_f16mf2 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfmin_vf_f16m1 (vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfmin_vf_f16m2 (vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfmin_vf_f16m4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfmin_vf_f16m8 (vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfmin_vf_f32mf2 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfmin_vf_f32m1 (vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfmin_vf_f32m2 (vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfmin_vf_f32m4 (vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfmin_vf_f32m8 (vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfmin_vf_f64m1 (vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfmin_vf_f64m2 (vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfmin_vf_f64m4 (vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfmin_vf_f64m8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfmin_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfmin_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfmin_vf_f16m1_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfmin_vf_f16m2_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfmin_vf_f16m4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfmin_vf_f16m8_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfmin_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfmin_vf_f32m1_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfmin_vf_f32m2_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfmin_vf_f32m4_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfmin_vf_f32m8_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfmin_vf_f64m1_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfmin_vf_f64m2_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfmin_vf_f64m4_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfmin_vf_f64m8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
23.60. vfmax.vv
- Mnemonic
vfmax.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Floating-point maximum, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmax_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmax_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfmax_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfmax_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfmax_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfmax_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfmax_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfmax_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfmax_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfmax_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfmax_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfmax_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfmax_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfmax_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfmax_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfmax_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vfloat16mf4_t __riscv_vfmax_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfmax_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfmax_vv_f16m1_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfmax_vv_f16m2_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfmax_vv_f16m4_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfmax_vv_f16m8_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfmax_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfmax_vv_f32m1_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfmax_vv_f32m2_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfmax_vv_f32m4_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfmax_vv_f32m8_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfmax_vv_f64m1_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfmax_vv_f64m2_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfmax_vv_f64m4_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfmax_vv_f64m8_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
23.61. vfmax.vf
- Mnemonic
vfmax.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Floating-point maximum, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmax_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmax_vf_f16mf4 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfmax_vf_f16mf2 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfmax_vf_f16m1 (vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfmax_vf_f16m2 (vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfmax_vf_f16m4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfmax_vf_f16m8 (vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfmax_vf_f32mf2 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfmax_vf_f32m1 (vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfmax_vf_f32m2 (vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfmax_vf_f32m4 (vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfmax_vf_f32m8 (vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfmax_vf_f64m1 (vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfmax_vf_f64m2 (vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfmax_vf_f64m4 (vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfmax_vf_f64m8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfmax_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfmax_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfmax_vf_f16m1_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfmax_vf_f16m2_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfmax_vf_f16m4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfmax_vf_f16m8_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfmax_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfmax_vf_f32m1_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfmax_vf_f32m2_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfmax_vf_f32m4_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfmax_vf_f32m8_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfmax_vf_f64m1_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfmax_vf_f64m2_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfmax_vf_f64m4_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfmax_vf_f64m8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
23.62. Vector Floating-Point Sign-Injection Instructions
- Intrinsic Functions
Details
== Vector Loads and Stores Intrinsics
=== Vector Unit-Stride Load Intrinsics
=== Vector Unit-Stride Store Intrinsics
=== Vector Mask Load/Store Intrinsics
=== Vector Strided Load Intrinsics
=== Vector Strided Store Intrinsics
=== Vector Indexed Load Intrinsics
=== Vector Indexed Store Intrinsics
== Vector Loads and Stores Segment Instructions
=== Vector Unit-Stride Segment Load Intrinsics
=== Vector Unit-Stride Segment Store Intrinsics
=== Vector Strided Segment Load Intrinsics
=== Vector Strided Segment Store Intrinsics
=== Vector Indexed Segment Load Intrinsics
=== Vector Indexed Segment Store Intrinsics
== Vector Integer Arithmetic Instructions
=== Vector Single-Width Integer Add and Subtract Intrinsics
=== Vector Widening Integer Add/Subtract Intrinsics
=== Vector Integer Widening Intrinsics
=== Vector Integer Extension Intrinsics
=== Vector Integer Add-with-Carry / Subtract-with-Borrow Intrinsics
=== Vector Bitwise Binary Logical Intrinsics
=== Vector Bitwise Unary Logical Intrinsics
=== Vector Single-Width Bit Shift Intrinsics
=== Vector Narrowing Integer Right Shift Intrinsics
=== Vector Integer Narrowing Intrinsics
=== Vector Integer Compare Intrinsics
=== Vector Integer Min/Max Intrinsics
=== Vector Single-Width Integer Multiply Intrinsics
=== Vector Integer Divide Intrinsics
=== Vector Widening Integer Multiply Intrinsics
=== Vector Single-Width Integer Multiply-Add Intrinsics
=== Vector Widening Integer Multiply-Add Intrinsics
=== Vector Integer Merge Intrinsics
=== Vector Integer Move Intrinsics
== Vector Fixed-Point Arithmetic Instructions
=== Vector Single-Width Saturating Add and Subtract Intrinsics
=== Vector Single-Width Averaging Add and Subtract Intrinsics
=== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics
=== Vector Single-Width Scaling Shift Intrinsics
=== Vector Narrowing Fixed-Point Clip Intrinsics
== Vector Floating-Point Instructions
=== Vector Single-Width Floating-Point Add/Subtract Intrinsics
=== Vector Widening Floating-Point Add/Subtract Intrinsics
=== Vector Single-Width Floating-Point Multiply/Divide Intrinsics
=== Vector Widening Floating-Point Multiply Intrinsics
=== Vector Single-Width Floating-Point Fused Multiply-Add Intrinsics
=== Vector Widening Floating-Point Fused Multiply-Add Intrinsics
=== Vector Floating-Point Square-Root Intrinsics
=== Vector Floating-Point Reciprocal Square-Root Estimate Intrinsics
=== Vector Floating-Point Reciprocal Estimate Intrinsics
=== Vector Floating-Point MIN/MAX Intrinsics
=== Vector Floating-Point Sign-Injection Intrinsics
=== Vector Floating-Point Absolute Value Intrinsics
=== Vector Floating-Point Compare Intrinsics
=== Vector Floating-Point Classify Intrinsics
=== Vector Floating-Point Merge Intrinsics
=== Vector Floating-Point Move Intrinsics
== Vector Reduction Operations
=== Vector Single-Width Integer Reduction Intrinsics
=== Vector Widening Integer Reduction Intrinsics
=== Vector Single-Width Floating-Point Reduction Intrinsics
=== Vector Widening Floating-Point Reduction Intrinsics
== Vector Mask Instructions
=== Vector Mask-Register Logical
=== Vector count population in mask `vcpop.m`
=== Vector Iota Intrinsics
=== Vector Element Index Intrinsics
== Vector Permutation Instructions
=== Vector Slideup Intrinsics
=== Vector Slidedown Intrinsics
=== Vector Slide1up and Slide1down Intrinsics
=== Vector Register Gather Intrinsics
=== Vector Compress Intrinsics
== Miscellaneous Vector Utility Intrinsics
=== Vector LMUL Extension Intrinsics
=== Vector LMUL Truncation Intrinsics
=== Vector Initialization Intrinsics
=== Vector Insertion Intrinsics
=== Vector Extraction Intrinsics
23.63. vfsgnj.vv
- Mnemonic
vfsgnj.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfsgnj_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfsgnj_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsgnj_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfsgnj_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfsgnj_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfsgnj_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfsgnj_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsgnj_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfsgnj_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfsgnj_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfsgnj_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfsgnj_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfsgnj_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfsgnj_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfsgnj_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfsgnj_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vfloat16mf4_t __riscv_vfsgnj_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsgnj_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfsgnj_vv_f16m1_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfsgnj_vv_f16m2_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfsgnj_vv_f16m4_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfsgnj_vv_f16m8_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsgnj_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfsgnj_vv_f32m1_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfsgnj_vv_f32m2_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfsgnj_vv_f32m4_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfsgnj_vv_f32m8_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfsgnj_vv_f64m1_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfsgnj_vv_f64m2_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfsgnj_vv_f64m4_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfsgnj_vv_f64m8_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
23.64. vfsgnj.vf
- Mnemonic
vfsgnj.vf vd, vs2, rs1, vm
- Encoding
- Description
-
vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfsgnj_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfsgnj_vf_f16mf4 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsgnj_vf_f16mf2 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfsgnj_vf_f16m1 (vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfsgnj_vf_f16m2 (vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfsgnj_vf_f16m4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfsgnj_vf_f16m8 (vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsgnj_vf_f32mf2 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfsgnj_vf_f32m1 (vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfsgnj_vf_f32m2 (vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfsgnj_vf_f32m4 (vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfsgnj_vf_f32m8 (vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfsgnj_vf_f64m1 (vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfsgnj_vf_f64m2 (vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfsgnj_vf_f64m4 (vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfsgnj_vf_f64m8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfsgnj_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsgnj_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfsgnj_vf_f16m1_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfsgnj_vf_f16m2_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfsgnj_vf_f16m4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfsgnj_vf_f16m8_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsgnj_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfsgnj_vf_f32m1_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfsgnj_vf_f32m2_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfsgnj_vf_f32m4_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfsgnj_vf_f32m8_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfsgnj_vf_f64m1_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfsgnj_vf_f64m2_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfsgnj_vf_f64m4_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfsgnj_vf_f64m8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
23.65. vfsgnjn.vv
- Mnemonic
vfsgnjn.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfsgnjn_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfsgnjn_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsgnjn_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfsgnjn_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfsgnjn_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfsgnjn_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfsgnjn_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsgnjn_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfsgnjn_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfsgnjn_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfsgnjn_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfsgnjn_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfsgnjn_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfsgnjn_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfsgnjn_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfsgnjn_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vfloat16mf4_t __riscv_vfsgnjn_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsgnjn_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfsgnjn_vv_f16m1_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfsgnjn_vv_f16m2_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfsgnjn_vv_f16m4_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfsgnjn_vv_f16m8_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsgnjn_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfsgnjn_vv_f32m1_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfsgnjn_vv_f32m2_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfsgnjn_vv_f32m4_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfsgnjn_vv_f32m8_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfsgnjn_vv_f64m1_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfsgnjn_vv_f64m2_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfsgnjn_vv_f64m4_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfsgnjn_vv_f64m8_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
23.66. vfsgnjn.vf
- Mnemonic
vfsgnjn.vf vd, vs2, rs1, vm
- Encoding
- Description
-
vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfsgnjn_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfsgnjn_vf_f16mf4 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsgnjn_vf_f16mf2 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfsgnjn_vf_f16m1 (vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfsgnjn_vf_f16m2 (vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfsgnjn_vf_f16m4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfsgnjn_vf_f16m8 (vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsgnjn_vf_f32mf2 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfsgnjn_vf_f32m1 (vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfsgnjn_vf_f32m2 (vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfsgnjn_vf_f32m4 (vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfsgnjn_vf_f32m8 (vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfsgnjn_vf_f64m1 (vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfsgnjn_vf_f64m2 (vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfsgnjn_vf_f64m4 (vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfsgnjn_vf_f64m8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfsgnjn_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsgnjn_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfsgnjn_vf_f16m1_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfsgnjn_vf_f16m2_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfsgnjn_vf_f16m4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfsgnjn_vf_f16m8_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsgnjn_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfsgnjn_vf_f32m1_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfsgnjn_vf_f32m2_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfsgnjn_vf_f32m4_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfsgnjn_vf_f32m8_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfsgnjn_vf_f64m1_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfsgnjn_vf_f64m2_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfsgnjn_vf_f64m4_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfsgnjn_vf_f64m8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
23.67. vfsgnjx.vv
- Mnemonic
vfsgnjx.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfsgnjx_vv.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfsgnjx_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsgnjx_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfsgnjx_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfsgnjx_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfsgnjx_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfsgnjx_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsgnjx_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfsgnjx_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfsgnjx_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfsgnjx_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfsgnjx_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfsgnjx_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfsgnjx_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfsgnjx_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfsgnjx_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vfloat16mf4_t __riscv_vfsgnjx_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsgnjx_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vfloat16m1_t __riscv_vfsgnjx_vv_f16m1_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vfloat16m2_t __riscv_vfsgnjx_vv_f16m2_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vfloat16m4_t __riscv_vfsgnjx_vv_f16m4_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vfloat16m8_t __riscv_vfsgnjx_vv_f16m8_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsgnjx_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vfloat32m1_t __riscv_vfsgnjx_vv_f32m1_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vfloat32m2_t __riscv_vfsgnjx_vv_f32m2_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vfloat32m4_t __riscv_vfsgnjx_vv_f32m4_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vfloat32m8_t __riscv_vfsgnjx_vv_f32m8_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vfloat64m1_t __riscv_vfsgnjx_vv_f64m1_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vfloat64m2_t __riscv_vfsgnjx_vv_f64m2_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vfloat64m4_t __riscv_vfsgnjx_vv_f64m4_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vfloat64m8_t __riscv_vfsgnjx_vv_f64m8_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
23.68. vfsgnjx.vf
- Mnemonic
vfsgnjx.vf vd, vs2, rs1, vm
- Encoding
- Description
-
vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfsgnjx_vf.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfsgnjx_vf_f16mf4 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsgnjx_vf_f16mf2 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfsgnjx_vf_f16m1 (vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfsgnjx_vf_f16m2 (vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfsgnjx_vf_f16m4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfsgnjx_vf_f16m8 (vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsgnjx_vf_f32mf2 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfsgnjx_vf_f32m1 (vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfsgnjx_vf_f32m2 (vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfsgnjx_vf_f32m4 (vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfsgnjx_vf_f32m8 (vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfsgnjx_vf_f64m1 (vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfsgnjx_vf_f64m2 (vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfsgnjx_vf_f64m4 (vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfsgnjx_vf_f64m8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vfloat16mf4_t __riscv_vfsgnjx_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vfloat16mf2_t __riscv_vfsgnjx_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vfloat16m1_t __riscv_vfsgnjx_vf_f16m1_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vfloat16m2_t __riscv_vfsgnjx_vf_f16m2_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vfloat16m4_t __riscv_vfsgnjx_vf_f16m4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vfloat16m8_t __riscv_vfsgnjx_vf_f16m8_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vfloat32mf2_t __riscv_vfsgnjx_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vfloat32m1_t __riscv_vfsgnjx_vf_f32m1_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vfloat32m2_t __riscv_vfsgnjx_vf_f32m2_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vfloat32m4_t __riscv_vfsgnjx_vf_f32m4_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vfloat32m8_t __riscv_vfsgnjx_vf_f32m8_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vfloat64m1_t __riscv_vfsgnjx_vf_f64m1_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vfloat64m2_t __riscv_vfsgnjx_vf_f64m2_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vfloat64m4_t __riscv_vfsgnjx_vf_f64m4_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vfloat64m8_t __riscv_vfsgnjx_vf_f64m8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
23.69. Vector Floating-Point Compare Instructions
- Intrinsic Functions
Details
== Vector Loads and Stores Intrinsics
=== Vector Unit-Stride Load Intrinsics
=== Vector Unit-Stride Store Intrinsics
=== Vector Mask Load/Store Intrinsics
=== Vector Strided Load Intrinsics
=== Vector Strided Store Intrinsics
=== Vector Indexed Load Intrinsics
=== Vector Indexed Store Intrinsics
== Vector Loads and Stores Segment Instructions
=== Vector Unit-Stride Segment Load Intrinsics
=== Vector Unit-Stride Segment Store Intrinsics
=== Vector Strided Segment Load Intrinsics
=== Vector Strided Segment Store Intrinsics
=== Vector Indexed Segment Load Intrinsics
=== Vector Indexed Segment Store Intrinsics
== Vector Integer Arithmetic Instructions
=== Vector Single-Width Integer Add and Subtract Intrinsics
=== Vector Widening Integer Add/Subtract Intrinsics
=== Vector Integer Widening Intrinsics
=== Vector Integer Extension Intrinsics
=== Vector Integer Add-with-Carry / Subtract-with-Borrow Intrinsics
=== Vector Bitwise Binary Logical Intrinsics
=== Vector Bitwise Unary Logical Intrinsics
=== Vector Single-Width Bit Shift Intrinsics
=== Vector Narrowing Integer Right Shift Intrinsics
=== Vector Integer Narrowing Intrinsics
=== Vector Integer Compare Intrinsics
=== Vector Integer Min/Max Intrinsics
=== Vector Single-Width Integer Multiply Intrinsics
=== Vector Integer Divide Intrinsics
=== Vector Widening Integer Multiply Intrinsics
=== Vector Single-Width Integer Multiply-Add Intrinsics
=== Vector Widening Integer Multiply-Add Intrinsics
=== Vector Integer Merge Intrinsics
=== Vector Integer Move Intrinsics
== Vector Fixed-Point Arithmetic Instructions
=== Vector Single-Width Saturating Add and Subtract Intrinsics
=== Vector Single-Width Averaging Add and Subtract Intrinsics
=== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics
=== Vector Single-Width Scaling Shift Intrinsics
=== Vector Narrowing Fixed-Point Clip Intrinsics
== Vector Floating-Point Instructions
=== Vector Single-Width Floating-Point Add/Subtract Intrinsics
=== Vector Widening Floating-Point Add/Subtract Intrinsics
=== Vector Single-Width Floating-Point Multiply/Divide Intrinsics
=== Vector Widening Floating-Point Multiply Intrinsics
=== Vector Single-Width Floating-Point Fused Multiply-Add Intrinsics
=== Vector Widening Floating-Point Fused Multiply-Add Intrinsics
=== Vector Floating-Point Square-Root Intrinsics
=== Vector Floating-Point Reciprocal Square-Root Estimate Intrinsics
=== Vector Floating-Point Reciprocal Estimate Intrinsics
=== Vector Floating-Point MIN/MAX Intrinsics
=== Vector Floating-Point Sign-Injection Intrinsics
=== Vector Floating-Point Absolute Value Intrinsics
=== Vector Floating-Point Compare Intrinsics
=== Vector Floating-Point Classify Intrinsics
=== Vector Floating-Point Merge Intrinsics
=== Vector Floating-Point Move Intrinsics
== Vector Reduction Operations
=== Vector Single-Width Integer Reduction Intrinsics
=== Vector Widening Integer Reduction Intrinsics
=== Vector Single-Width Floating-Point Reduction Intrinsics
=== Vector Widening Floating-Point Reduction Intrinsics
== Vector Mask Instructions
=== Vector Mask-Register Logical
=== Vector count population in mask `vcpop.m`
=== Vector Iota Intrinsics
=== Vector Element Index Intrinsics
== Vector Permutation Instructions
=== Vector Slideup Intrinsics
=== Vector Slidedown Intrinsics
=== Vector Slide1up and Slide1down Intrinsics
=== Vector Register Gather Intrinsics
=== Vector Compress Intrinsics
== Miscellaneous Vector Utility Intrinsics
=== Vector LMUL Extension Intrinsics
=== Vector LMUL Truncation Intrinsics
=== Vector Initialization Intrinsics
=== Vector Insertion Intrinsics
=== Vector Extraction Intrinsics
23.70. vmfeq.vv
- Mnemonic
vmfeq.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Compare equal, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmfeq_vv.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmfeq_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vbool32_t __riscv_vmfeq_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vbool16_t __riscv_vmfeq_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vbool8_t __riscv_vmfeq_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vbool4_t __riscv_vmfeq_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vbool2_t __riscv_vmfeq_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vbool64_t __riscv_vmfeq_vv_f32mf2_b64 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vbool32_t __riscv_vmfeq_vv_f32m1_b32 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vbool16_t __riscv_vmfeq_vv_f32m2_b16 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vbool8_t __riscv_vmfeq_vv_f32m4_b8 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vbool4_t __riscv_vmfeq_vv_f32m8_b4 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vbool64_t __riscv_vmfeq_vv_f64m1_b64 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vbool32_t __riscv_vmfeq_vv_f64m2_b32 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vbool16_t __riscv_vmfeq_vv_f64m4_b16 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vbool8_t __riscv_vmfeq_vv_f64m8_b8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vbool64_t __riscv_vmfeq_vv_f16mf4_b64_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vbool32_t __riscv_vmfeq_vv_f16mf2_b32_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vbool16_t __riscv_vmfeq_vv_f16m1_b16_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vbool8_t __riscv_vmfeq_vv_f16m2_b8_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vbool4_t __riscv_vmfeq_vv_f16m4_b4_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vbool2_t __riscv_vmfeq_vv_f16m8_b2_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vbool64_t __riscv_vmfeq_vv_f32mf2_b64_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vbool32_t __riscv_vmfeq_vv_f32m1_b32_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vbool16_t __riscv_vmfeq_vv_f32m2_b16_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vbool8_t __riscv_vmfeq_vv_f32m4_b8_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vbool4_t __riscv_vmfeq_vv_f32m8_b4_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vbool64_t __riscv_vmfeq_vv_f64m1_b64_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vbool32_t __riscv_vmfeq_vv_f64m2_b32_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vbool16_t __riscv_vmfeq_vv_f64m4_b16_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vbool8_t __riscv_vmfeq_vv_f64m8_b8_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
23.71. vmfeq.vf
- Mnemonic
vmfeq.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Compare equal, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmeq_vf.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmfeq_vf_f16mf4_b64 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vbool32_t __riscv_vmfeq_vf_f16mf2_b32 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vbool16_t __riscv_vmfeq_vf_f16m1_b16 (vfloat16m1_t op1, float16_t op2, size_t vl);
vbool8_t __riscv_vmfeq_vf_f16m2_b8 (vfloat16m2_t op1, float16_t op2, size_t vl);
vbool4_t __riscv_vmfeq_vf_f16m4_b4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vbool2_t __riscv_vmfeq_vf_f16m8_b2 (vfloat16m8_t op1, float16_t op2, size_t vl);
vbool64_t __riscv_vmfeq_vf_f32mf2_b64 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vbool32_t __riscv_vmfeq_vf_f32m1_b32 (vfloat32m1_t op1, float32_t op2, size_t vl);
vbool16_t __riscv_vmfeq_vf_f32m2_b16 (vfloat32m2_t op1, float32_t op2, size_t vl);
vbool8_t __riscv_vmfeq_vf_f32m4_b8 (vfloat32m4_t op1, float32_t op2, size_t vl);
vbool4_t __riscv_vmfeq_vf_f32m8_b4 (vfloat32m8_t op1, float32_t op2, size_t vl);
vbool64_t __riscv_vmfeq_vf_f64m1_b64 (vfloat64m1_t op1, float64_t op2, size_t vl);
vbool32_t __riscv_vmfeq_vf_f64m2_b32 (vfloat64m2_t op1, float64_t op2, size_t vl);
vbool16_t __riscv_vmfeq_vf_f64m4_b16 (vfloat64m4_t op1, float64_t op2, size_t vl);
vbool8_t __riscv_vmfeq_vf_f64m8_b8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vbool64_t __riscv_vmfeq_vf_f16mf4_b64_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vbool32_t __riscv_vmfeq_vf_f16mf2_b32_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vbool16_t __riscv_vmfeq_vf_f16m1_b16_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vbool8_t __riscv_vmfeq_vf_f16m2_b8_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vbool4_t __riscv_vmfeq_vf_f16m4_b4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vbool2_t __riscv_vmfeq_vf_f16m8_b2_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vbool64_t __riscv_vmfeq_vf_f32mf2_b64_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vbool32_t __riscv_vmfeq_vf_f32m1_b32_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vbool16_t __riscv_vmfeq_vf_f32m2_b16_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vbool8_t __riscv_vmfeq_vf_f32m4_b8_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vbool4_t __riscv_vmfeq_vf_f32m8_b4_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vbool64_t __riscv_vmfeq_vf_f64m1_b64_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vbool32_t __riscv_vmfeq_vf_f64m2_b32_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vbool16_t __riscv_vmfeq_vf_f64m4_b16_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vbool8_t __riscv_vmfeq_vf_f64m8_b8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
23.72. vmfne.vv
- Mnemonic
vmfne.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Compare not equal, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmfne_vv.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmfne_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vbool32_t __riscv_vmfne_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vbool16_t __riscv_vmfne_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vbool8_t __riscv_vmfne_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vbool4_t __riscv_vmfne_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vbool2_t __riscv_vmfne_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vbool64_t __riscv_vmfne_vv_f32mf2_b64 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vbool32_t __riscv_vmfne_vv_f32m1_b32 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vbool16_t __riscv_vmfne_vv_f32m2_b16 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vbool8_t __riscv_vmfne_vv_f32m4_b8 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vbool4_t __riscv_vmfne_vv_f32m8_b4 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vbool64_t __riscv_vmfne_vv_f64m1_b64 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vbool32_t __riscv_vmfne_vv_f64m2_b32 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vbool16_t __riscv_vmfne_vv_f64m4_b16 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vbool8_t __riscv_vmfne_vv_f64m8_b8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vbool64_t __riscv_vmfne_vv_f16mf4_b64_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vbool32_t __riscv_vmfne_vv_f16mf2_b32_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vbool16_t __riscv_vmfne_vv_f16m1_b16_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vbool8_t __riscv_vmfne_vv_f16m2_b8_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vbool4_t __riscv_vmfne_vv_f16m4_b4_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vbool2_t __riscv_vmfne_vv_f16m8_b2_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vbool64_t __riscv_vmfne_vv_f32mf2_b64_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vbool32_t __riscv_vmfne_vv_f32m1_b32_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vbool16_t __riscv_vmfne_vv_f32m2_b16_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vbool8_t __riscv_vmfne_vv_f32m4_b8_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vbool4_t __riscv_vmfne_vv_f32m8_b4_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vbool64_t __riscv_vmfne_vv_f64m1_b64_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vbool32_t __riscv_vmfne_vv_f64m2_b32_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vbool16_t __riscv_vmfne_vv_f64m4_b16_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vbool8_t __riscv_vmfne_vv_f64m8_b8_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
23.73. vmfne.vf
- Mnemonic
vmfne.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Compare not equal, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmfne_vf.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmfne_vf_f16mf4_b64 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vbool32_t __riscv_vmfne_vf_f16mf2_b32 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vbool16_t __riscv_vmfne_vf_f16m1_b16 (vfloat16m1_t op1, float16_t op2, size_t vl);
vbool8_t __riscv_vmfne_vf_f16m2_b8 (vfloat16m2_t op1, float16_t op2, size_t vl);
vbool4_t __riscv_vmfne_vf_f16m4_b4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vbool2_t __riscv_vmfne_vf_f16m8_b2 (vfloat16m8_t op1, float16_t op2, size_t vl);
vbool64_t __riscv_vmfne_vf_f32mf2_b64 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vbool32_t __riscv_vmfne_vf_f32m1_b32 (vfloat32m1_t op1, float32_t op2, size_t vl);
vbool16_t __riscv_vmfne_vf_f32m2_b16 (vfloat32m2_t op1, float32_t op2, size_t vl);
vbool8_t __riscv_vmfne_vf_f32m4_b8 (vfloat32m4_t op1, float32_t op2, size_t vl);
vbool4_t __riscv_vmfne_vf_f32m8_b4 (vfloat32m8_t op1, float32_t op2, size_t vl);
vbool64_t __riscv_vmfne_vf_f64m1_b64 (vfloat64m1_t op1, float64_t op2, size_t vl);
vbool32_t __riscv_vmfne_vf_f64m2_b32 (vfloat64m2_t op1, float64_t op2, size_t vl);
vbool16_t __riscv_vmfne_vf_f64m4_b16 (vfloat64m4_t op1, float64_t op2, size_t vl);
vbool8_t __riscv_vmfne_vf_f64m8_b8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vbool64_t __riscv_vmfne_vf_f16mf4_b64_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vbool32_t __riscv_vmfne_vf_f16mf2_b32_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vbool16_t __riscv_vmfne_vf_f16m1_b16_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vbool8_t __riscv_vmfne_vf_f16m2_b8_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vbool4_t __riscv_vmfne_vf_f16m4_b4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vbool2_t __riscv_vmfne_vf_f16m8_b2_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vbool64_t __riscv_vmfne_vf_f32mf2_b64_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vbool32_t __riscv_vmfne_vf_f32m1_b32_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vbool16_t __riscv_vmfne_vf_f32m2_b16_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vbool8_t __riscv_vmfne_vf_f32m4_b8_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vbool4_t __riscv_vmfne_vf_f32m8_b4_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vbool64_t __riscv_vmfne_vf_f64m1_b64_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vbool32_t __riscv_vmfne_vf_f64m2_b32_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vbool16_t __riscv_vmfne_vf_f64m4_b16_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vbool8_t __riscv_vmfne_vf_f64m8_b8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
23.74. vmflt.vv
- Mnemonic
vmflt.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Compare less than, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmflt_vv.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmflt_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vbool32_t __riscv_vmflt_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vbool16_t __riscv_vmflt_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vbool8_t __riscv_vmflt_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vbool4_t __riscv_vmflt_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vbool2_t __riscv_vmflt_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vbool64_t __riscv_vmflt_vv_f32mf2_b64 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vbool32_t __riscv_vmflt_vv_f32m1_b32 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vbool16_t __riscv_vmflt_vv_f32m2_b16 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vbool8_t __riscv_vmflt_vv_f32m4_b8 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vbool4_t __riscv_vmflt_vv_f32m8_b4 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vbool64_t __riscv_vmflt_vv_f64m1_b64 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vbool32_t __riscv_vmflt_vv_f64m2_b32 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vbool16_t __riscv_vmflt_vv_f64m4_b16 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vbool8_t __riscv_vmflt_vv_f64m8_b8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vbool64_t __riscv_vmflt_vv_f16mf4_b64_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vbool32_t __riscv_vmflt_vv_f16mf2_b32_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vbool16_t __riscv_vmflt_vv_f16m1_b16_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vbool8_t __riscv_vmflt_vv_f16m2_b8_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vbool4_t __riscv_vmflt_vv_f16m4_b4_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vbool2_t __riscv_vmflt_vv_f16m8_b2_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vbool64_t __riscv_vmflt_vv_f32mf2_b64_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vbool32_t __riscv_vmflt_vv_f32m1_b32_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vbool16_t __riscv_vmflt_vv_f32m2_b16_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vbool8_t __riscv_vmflt_vv_f32m4_b8_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vbool4_t __riscv_vmflt_vv_f32m8_b4_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vbool64_t __riscv_vmflt_vv_f64m1_b64_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vbool32_t __riscv_vmflt_vv_f64m2_b32_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vbool16_t __riscv_vmflt_vv_f64m4_b16_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vbool8_t __riscv_vmflt_vv_f64m8_b8_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
23.75. vmflt.vf
- Mnemonic
vmflt.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Compare less than, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmflt_vf.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmflt_vf_f16mf4_b64 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vbool32_t __riscv_vmflt_vf_f16mf2_b32 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vbool16_t __riscv_vmflt_vf_f16m1_b16 (vfloat16m1_t op1, float16_t op2, size_t vl);
vbool8_t __riscv_vmflt_vf_f16m2_b8 (vfloat16m2_t op1, float16_t op2, size_t vl);
vbool4_t __riscv_vmflt_vf_f16m4_b4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vbool2_t __riscv_vmflt_vf_f16m8_b2 (vfloat16m8_t op1, float16_t op2, size_t vl);
vbool64_t __riscv_vmflt_vf_f32mf2_b64 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vbool32_t __riscv_vmflt_vf_f32m1_b32 (vfloat32m1_t op1, float32_t op2, size_t vl);
vbool16_t __riscv_vmflt_vf_f32m2_b16 (vfloat32m2_t op1, float32_t op2, size_t vl);
vbool8_t __riscv_vmflt_vf_f32m4_b8 (vfloat32m4_t op1, float32_t op2, size_t vl);
vbool4_t __riscv_vmflt_vf_f32m8_b4 (vfloat32m8_t op1, float32_t op2, size_t vl);
vbool64_t __riscv_vmflt_vf_f64m1_b64 (vfloat64m1_t op1, float64_t op2, size_t vl);
vbool32_t __riscv_vmflt_vf_f64m2_b32 (vfloat64m2_t op1, float64_t op2, size_t vl);
vbool16_t __riscv_vmflt_vf_f64m4_b16 (vfloat64m4_t op1, float64_t op2, size_t vl);
vbool8_t __riscv_vmflt_vf_f64m8_b8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vbool64_t __riscv_vmflt_vf_f16mf4_b64_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vbool32_t __riscv_vmflt_vf_f16mf2_b32_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vbool16_t __riscv_vmflt_vf_f16m1_b16_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vbool8_t __riscv_vmflt_vf_f16m2_b8_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vbool4_t __riscv_vmflt_vf_f16m4_b4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vbool2_t __riscv_vmflt_vf_f16m8_b2_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vbool64_t __riscv_vmflt_vf_f32mf2_b64_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vbool32_t __riscv_vmflt_vf_f32m1_b32_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vbool16_t __riscv_vmflt_vf_f32m2_b16_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vbool8_t __riscv_vmflt_vf_f32m4_b8_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vbool4_t __riscv_vmflt_vf_f32m8_b4_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vbool64_t __riscv_vmflt_vf_f64m1_b64_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vbool32_t __riscv_vmflt_vf_f64m2_b32_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vbool16_t __riscv_vmflt_vf_f64m4_b16_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vbool8_t __riscv_vmflt_vf_f64m8_b8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
23.76. vmfle.vv
- Mnemonic
vmfle.vv vd, vs2, vs1, vm
- Encoding
- Description
-
Compare less than or equal, Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmfle_vv.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmfle_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vbool32_t __riscv_vmfle_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vbool16_t __riscv_vmfle_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vbool8_t __riscv_vmfle_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vbool4_t __riscv_vmfle_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vbool2_t __riscv_vmfle_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vbool64_t __riscv_vmfle_vv_f32mf2_b64 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vbool32_t __riscv_vmfle_vv_f32m1_b32 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vbool16_t __riscv_vmfle_vv_f32m2_b16 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vbool8_t __riscv_vmfle_vv_f32m4_b8 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vbool4_t __riscv_vmfle_vv_f32m8_b4 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vbool64_t __riscv_vmfle_vv_f64m1_b64 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vbool32_t __riscv_vmfle_vv_f64m2_b32 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vbool16_t __riscv_vmfle_vv_f64m4_b16 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vbool8_t __riscv_vmfle_vv_f64m8_b8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
vbool64_t __riscv_vmfle_vv_f16mf4_b64_m (vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl);
vbool32_t __riscv_vmfle_vv_f16mf2_b32_m (vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl);
vbool16_t __riscv_vmfle_vv_f16m1_b16_m (vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl);
vbool8_t __riscv_vmfle_vv_f16m2_b8_m (vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl);
vbool4_t __riscv_vmfle_vv_f16m4_b4_m (vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl);
vbool2_t __riscv_vmfle_vv_f16m8_b2_m (vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl);
vbool64_t __riscv_vmfle_vv_f32mf2_b64_m (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl);
vbool32_t __riscv_vmfle_vv_f32m1_b32_m (vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl);
vbool16_t __riscv_vmfle_vv_f32m2_b16_m (vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl);
vbool8_t __riscv_vmfle_vv_f32m4_b8_m (vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl);
vbool4_t __riscv_vmfle_vv_f32m8_b4_m (vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl);
vbool64_t __riscv_vmfle_vv_f64m1_b64_m (vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl);
vbool32_t __riscv_vmfle_vv_f64m2_b32_m (vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl);
vbool16_t __riscv_vmfle_vv_f64m4_b16_m (vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl);
vbool8_t __riscv_vmfle_vv_f64m8_b8_m (vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl);
23.77. vmfle.vf
- Mnemonic
vmfle.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Compare less than or equal, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmfle_vf.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmfle_vf_f16mf4_b64 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vbool32_t __riscv_vmfle_vf_f16mf2_b32 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vbool16_t __riscv_vmfle_vf_f16m1_b16 (vfloat16m1_t op1, float16_t op2, size_t vl);
vbool8_t __riscv_vmfle_vf_f16m2_b8 (vfloat16m2_t op1, float16_t op2, size_t vl);
vbool4_t __riscv_vmfle_vf_f16m4_b4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vbool2_t __riscv_vmfle_vf_f16m8_b2 (vfloat16m8_t op1, float16_t op2, size_t vl);
vbool64_t __riscv_vmfle_vf_f32mf2_b64 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vbool32_t __riscv_vmfle_vf_f32m1_b32 (vfloat32m1_t op1, float32_t op2, size_t vl);
vbool16_t __riscv_vmfle_vf_f32m2_b16 (vfloat32m2_t op1, float32_t op2, size_t vl);
vbool8_t __riscv_vmfle_vf_f32m4_b8 (vfloat32m4_t op1, float32_t op2, size_t vl);
vbool4_t __riscv_vmfle_vf_f32m8_b4 (vfloat32m8_t op1, float32_t op2, size_t vl);
vbool64_t __riscv_vmfle_vf_f64m1_b64 (vfloat64m1_t op1, float64_t op2, size_t vl);
vbool32_t __riscv_vmfle_vf_f64m2_b32 (vfloat64m2_t op1, float64_t op2, size_t vl);
vbool16_t __riscv_vmfle_vf_f64m4_b16 (vfloat64m4_t op1, float64_t op2, size_t vl);
vbool8_t __riscv_vmfle_vf_f64m8_b8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vbool64_t __riscv_vmfle_vf_f16mf4_b64_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vbool32_t __riscv_vmfle_vf_f16mf2_b32_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vbool16_t __riscv_vmfle_vf_f16m1_b16_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vbool8_t __riscv_vmfle_vf_f16m2_b8_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vbool4_t __riscv_vmfle_vf_f16m4_b4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vbool2_t __riscv_vmfle_vf_f16m8_b2_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vbool64_t __riscv_vmfle_vf_f32mf2_b64_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vbool32_t __riscv_vmfle_vf_f32m1_b32_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vbool16_t __riscv_vmfle_vf_f32m2_b16_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vbool8_t __riscv_vmfle_vf_f32m4_b8_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vbool4_t __riscv_vmfle_vf_f32m8_b4_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vbool64_t __riscv_vmfle_vf_f64m1_b64_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vbool32_t __riscv_vmfle_vf_f64m2_b32_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vbool16_t __riscv_vmfle_vf_f64m4_b16_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vbool8_t __riscv_vmfle_vf_f64m8_b8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
23.78. vmfgt.vf
- Mnemonic
vmfgt.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Compare greater than, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmfgt_vf.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmfgt_vf_f16mf4_b64 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vbool32_t __riscv_vmfgt_vf_f16mf2_b32 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vbool16_t __riscv_vmfgt_vf_f16m1_b16 (vfloat16m1_t op1, float16_t op2, size_t vl);
vbool8_t __riscv_vmfgt_vf_f16m2_b8 (vfloat16m2_t op1, float16_t op2, size_t vl);
vbool4_t __riscv_vmfgt_vf_f16m4_b4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vbool2_t __riscv_vmfgt_vf_f16m8_b2 (vfloat16m8_t op1, float16_t op2, size_t vl);
vbool64_t __riscv_vmfgt_vf_f32mf2_b64 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vbool32_t __riscv_vmfgt_vf_f32m1_b32 (vfloat32m1_t op1, float32_t op2, size_t vl);
vbool16_t __riscv_vmfgt_vf_f32m2_b16 (vfloat32m2_t op1, float32_t op2, size_t vl);
vbool8_t __riscv_vmfgt_vf_f32m4_b8 (vfloat32m4_t op1, float32_t op2, size_t vl);
vbool4_t __riscv_vmfgt_vf_f32m8_b4 (vfloat32m8_t op1, float32_t op2, size_t vl);
vbool64_t __riscv_vmfgt_vf_f64m1_b64 (vfloat64m1_t op1, float64_t op2, size_t vl);
vbool32_t __riscv_vmfgt_vf_f64m2_b32 (vfloat64m2_t op1, float64_t op2, size_t vl);
vbool16_t __riscv_vmfgt_vf_f64m4_b16 (vfloat64m4_t op1, float64_t op2, size_t vl);
vbool8_t __riscv_vmfgt_vf_f64m8_b8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vbool64_t __riscv_vmfgt_vf_f16mf4_b64_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vbool32_t __riscv_vmfgt_vf_f16mf2_b32_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vbool16_t __riscv_vmfgt_vf_f16m1_b16_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vbool8_t __riscv_vmfgt_vf_f16m2_b8_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vbool4_t __riscv_vmfgt_vf_f16m4_b4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vbool2_t __riscv_vmfgt_vf_f16m8_b2_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vbool64_t __riscv_vmfgt_vf_f32mf2_b64_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vbool32_t __riscv_vmfgt_vf_f32m1_b32_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vbool16_t __riscv_vmfgt_vf_f32m2_b16_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vbool8_t __riscv_vmfgt_vf_f32m4_b8_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vbool4_t __riscv_vmfgt_vf_f32m8_b4_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vbool64_t __riscv_vmfgt_vf_f64m1_b64_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vbool32_t __riscv_vmfgt_vf_f64m2_b32_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vbool16_t __riscv_vmfgt_vf_f64m4_b16_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vbool8_t __riscv_vmfgt_vf_f64m8_b8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
23.79. vmfge.vf
- Mnemonic
vmfge.vf vd, vs2, rs1, vm
- Encoding
- Description
-
Compare greater than or equal, vector-scalar
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmfge_vf.h
- Intrinsic Functions
Details
vbool64_t __riscv_vmfge_vf_f16mf4_b64 (vfloat16mf4_t op1, float16_t op2, size_t vl);
vbool32_t __riscv_vmfge_vf_f16mf2_b32 (vfloat16mf2_t op1, float16_t op2, size_t vl);
vbool16_t __riscv_vmfge_vf_f16m1_b16 (vfloat16m1_t op1, float16_t op2, size_t vl);
vbool8_t __riscv_vmfge_vf_f16m2_b8 (vfloat16m2_t op1, float16_t op2, size_t vl);
vbool4_t __riscv_vmfge_vf_f16m4_b4 (vfloat16m4_t op1, float16_t op2, size_t vl);
vbool2_t __riscv_vmfge_vf_f16m8_b2 (vfloat16m8_t op1, float16_t op2, size_t vl);
vbool64_t __riscv_vmfge_vf_f32mf2_b64 (vfloat32mf2_t op1, float32_t op2, size_t vl);
vbool32_t __riscv_vmfge_vf_f32m1_b32 (vfloat32m1_t op1, float32_t op2, size_t vl);
vbool16_t __riscv_vmfge_vf_f32m2_b16 (vfloat32m2_t op1, float32_t op2, size_t vl);
vbool8_t __riscv_vmfge_vf_f32m4_b8 (vfloat32m4_t op1, float32_t op2, size_t vl);
vbool4_t __riscv_vmfge_vf_f32m8_b4 (vfloat32m8_t op1, float32_t op2, size_t vl);
vbool64_t __riscv_vmfge_vf_f64m1_b64 (vfloat64m1_t op1, float64_t op2, size_t vl);
vbool32_t __riscv_vmfge_vf_f64m2_b32 (vfloat64m2_t op1, float64_t op2, size_t vl);
vbool16_t __riscv_vmfge_vf_f64m4_b16 (vfloat64m4_t op1, float64_t op2, size_t vl);
vbool8_t __riscv_vmfge_vf_f64m8_b8 (vfloat64m8_t op1, float64_t op2, size_t vl);
vbool64_t __riscv_vmfge_vf_f16mf4_b64_m (vbool64_t mask, vfloat16mf4_t op1, float16_t op2, size_t vl);
vbool32_t __riscv_vmfge_vf_f16mf2_b32_m (vbool32_t mask, vfloat16mf2_t op1, float16_t op2, size_t vl);
vbool16_t __riscv_vmfge_vf_f16m1_b16_m (vbool16_t mask, vfloat16m1_t op1, float16_t op2, size_t vl);
vbool8_t __riscv_vmfge_vf_f16m2_b8_m (vbool8_t mask, vfloat16m2_t op1, float16_t op2, size_t vl);
vbool4_t __riscv_vmfge_vf_f16m4_b4_m (vbool4_t mask, vfloat16m4_t op1, float16_t op2, size_t vl);
vbool2_t __riscv_vmfge_vf_f16m8_b2_m (vbool2_t mask, vfloat16m8_t op1, float16_t op2, size_t vl);
vbool64_t __riscv_vmfge_vf_f32mf2_b64_m (vbool64_t mask, vfloat32mf2_t op1, float32_t op2, size_t vl);
vbool32_t __riscv_vmfge_vf_f32m1_b32_m (vbool32_t mask, vfloat32m1_t op1, float32_t op2, size_t vl);
vbool16_t __riscv_vmfge_vf_f32m2_b16_m (vbool16_t mask, vfloat32m2_t op1, float32_t op2, size_t vl);
vbool8_t __riscv_vmfge_vf_f32m4_b8_m (vbool8_t mask, vfloat32m4_t op1, float32_t op2, size_t vl);
vbool4_t __riscv_vmfge_vf_f32m8_b4_m (vbool4_t mask, vfloat32m8_t op1, float32_t op2, size_t vl);
vbool64_t __riscv_vmfge_vf_f64m1_b64_m (vbool64_t mask, vfloat64m1_t op1, float64_t op2, size_t vl);
vbool32_t __riscv_vmfge_vf_f64m2_b32_m (vbool32_t mask, vfloat64m2_t op1, float64_t op2, size_t vl);
vbool16_t __riscv_vmfge_vf_f64m4_b16_m (vbool16_t mask, vfloat64m4_t op1, float64_t op2, size_t vl);
vbool8_t __riscv_vmfge_vf_f64m8_b8_m (vbool8_t mask, vfloat64m8_t op1, float64_t op2, size_t vl);
23.80. Vector Floating-Point Classify Instruction
- Intrinsic Functions
Details
== Vector Loads and Stores Intrinsics
=== Vector Unit-Stride Load Intrinsics
=== Vector Unit-Stride Store Intrinsics
=== Vector Mask Load/Store Intrinsics
=== Vector Strided Load Intrinsics
=== Vector Strided Store Intrinsics
=== Vector Indexed Load Intrinsics
=== Vector Indexed Store Intrinsics
== Vector Loads and Stores Segment Instructions
=== Vector Unit-Stride Segment Load Intrinsics
=== Vector Unit-Stride Segment Store Intrinsics
=== Vector Strided Segment Load Intrinsics
=== Vector Strided Segment Store Intrinsics
=== Vector Indexed Segment Load Intrinsics
=== Vector Indexed Segment Store Intrinsics
== Vector Integer Arithmetic Instructions
=== Vector Single-Width Integer Add and Subtract Intrinsics
=== Vector Widening Integer Add/Subtract Intrinsics
=== Vector Integer Widening Intrinsics
=== Vector Integer Extension Intrinsics
=== Vector Integer Add-with-Carry / Subtract-with-Borrow Intrinsics
=== Vector Bitwise Binary Logical Intrinsics
=== Vector Bitwise Unary Logical Intrinsics
=== Vector Single-Width Bit Shift Intrinsics
=== Vector Narrowing Integer Right Shift Intrinsics
=== Vector Integer Narrowing Intrinsics
=== Vector Integer Compare Intrinsics
=== Vector Integer Min/Max Intrinsics
=== Vector Single-Width Integer Multiply Intrinsics
=== Vector Integer Divide Intrinsics
=== Vector Widening Integer Multiply Intrinsics
=== Vector Single-Width Integer Multiply-Add Intrinsics
=== Vector Widening Integer Multiply-Add Intrinsics
=== Vector Integer Merge Intrinsics
=== Vector Integer Move Intrinsics
== Vector Fixed-Point Arithmetic Instructions
=== Vector Single-Width Saturating Add and Subtract Intrinsics
=== Vector Single-Width Averaging Add and Subtract Intrinsics
=== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics
=== Vector Single-Width Scaling Shift Intrinsics
=== Vector Narrowing Fixed-Point Clip Intrinsics
== Vector Floating-Point Instructions
=== Vector Single-Width Floating-Point Add/Subtract Intrinsics
=== Vector Widening Floating-Point Add/Subtract Intrinsics
=== Vector Single-Width Floating-Point Multiply/Divide Intrinsics
=== Vector Widening Floating-Point Multiply Intrinsics
=== Vector Single-Width Floating-Point Fused Multiply-Add Intrinsics
=== Vector Widening Floating-Point Fused Multiply-Add Intrinsics
=== Vector Floating-Point Square-Root Intrinsics
=== Vector Floating-Point Reciprocal Square-Root Estimate Intrinsics
=== Vector Floating-Point Reciprocal Estimate Intrinsics
=== Vector Floating-Point MIN/MAX Intrinsics
=== Vector Floating-Point Sign-Injection Intrinsics
=== Vector Floating-Point Absolute Value Intrinsics
=== Vector Floating-Point Compare Intrinsics
=== Vector Floating-Point Classify Intrinsics
=== Vector Floating-Point Merge Intrinsics
=== Vector Floating-Point Move Intrinsics
== Vector Reduction Operations
=== Vector Single-Width Integer Reduction Intrinsics
=== Vector Widening Integer Reduction Intrinsics
=== Vector Single-Width Floating-Point Reduction Intrinsics
=== Vector Widening Floating-Point Reduction Intrinsics
== Vector Mask Instructions
=== Vector Mask-Register Logical
=== Vector count population in mask `vcpop.m`
=== Vector Iota Intrinsics
=== Vector Element Index Intrinsics
== Vector Permutation Instructions
=== Vector Slideup Intrinsics
=== Vector Slidedown Intrinsics
=== Vector Slide1up and Slide1down Intrinsics
=== Vector Register Gather Intrinsics
=== Vector Compress Intrinsics
== Miscellaneous Vector Utility Intrinsics
=== Vector LMUL Extension Intrinsics
=== Vector LMUL Truncation Intrinsics
=== Vector Initialization Intrinsics
=== Vector Insertion Intrinsics
=== Vector Extraction Intrinsics
23.81. vfclass.v
- Mnemonic
vfclass.v vd, vs2, vm
- Encoding
- Description
-
Vector-vector
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfclass_v.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vfclass_v_u16mf4 (vfloat16mf4_t op1, size_t vl);
vuint16mf2_t __riscv_vfclass_v_u16mf2 (vfloat16mf2_t op1, size_t vl);
vuint16m1_t __riscv_vfclass_v_u16m1 (vfloat16m1_t op1, size_t vl);
vuint16m2_t __riscv_vfclass_v_u16m2 (vfloat16m2_t op1, size_t vl);
vuint16m4_t __riscv_vfclass_v_u16m4 (vfloat16m4_t op1, size_t vl);
vuint16m8_t __riscv_vfclass_v_u16m8 (vfloat16m8_t op1, size_t vl);
vuint32mf2_t __riscv_vfclass_v_u32mf2 (vfloat32mf2_t op1, size_t vl);
vuint32m1_t __riscv_vfclass_v_u32m1 (vfloat32m1_t op1, size_t vl);
vuint32m2_t __riscv_vfclass_v_u32m2 (vfloat32m2_t op1, size_t vl);
vuint32m4_t __riscv_vfclass_v_u32m4 (vfloat32m4_t op1, size_t vl);
vuint32m8_t __riscv_vfclass_v_u32m8 (vfloat32m8_t op1, size_t vl);
vuint64m1_t __riscv_vfclass_v_u64m1 (vfloat64m1_t op1, size_t vl);
vuint64m2_t __riscv_vfclass_v_u64m2 (vfloat64m2_t op1, size_t vl);
vuint64m4_t __riscv_vfclass_v_u64m4 (vfloat64m4_t op1, size_t vl);
vuint64m8_t __riscv_vfclass_v_u64m8 (vfloat64m8_t op1, size_t vl);
vuint16mf4_t __riscv_vfclass_v_u16mf4_m (vbool64_t mask, vfloat16mf4_t op1, size_t vl);
vuint16mf2_t __riscv_vfclass_v_u16mf2_m (vbool32_t mask, vfloat16mf2_t op1, size_t vl);
vuint16m1_t __riscv_vfclass_v_u16m1_m (vbool16_t mask, vfloat16m1_t op1, size_t vl);
vuint16m2_t __riscv_vfclass_v_u16m2_m (vbool8_t mask, vfloat16m2_t op1, size_t vl);
vuint16m4_t __riscv_vfclass_v_u16m4_m (vbool4_t mask, vfloat16m4_t op1, size_t vl);
vuint16m8_t __riscv_vfclass_v_u16m8_m (vbool2_t mask, vfloat16m8_t op1, size_t vl);
vuint32mf2_t __riscv_vfclass_v_u32mf2_m (vbool64_t mask, vfloat32mf2_t op1, size_t vl);
vuint32m1_t __riscv_vfclass_v_u32m1_m (vbool32_t mask, vfloat32m1_t op1, size_t vl);
vuint32m2_t __riscv_vfclass_v_u32m2_m (vbool16_t mask, vfloat32m2_t op1, size_t vl);
vuint32m4_t __riscv_vfclass_v_u32m4_m (vbool8_t mask, vfloat32m4_t op1, size_t vl);
vuint32m8_t __riscv_vfclass_v_u32m8_m (vbool4_t mask, vfloat32m8_t op1, size_t vl);
vuint64m1_t __riscv_vfclass_v_u64m1_m (vbool64_t mask, vfloat64m1_t op1, size_t vl);
vuint64m2_t __riscv_vfclass_v_u64m2_m (vbool32_t mask, vfloat64m2_t op1, size_t vl);
vuint64m4_t __riscv_vfclass_v_u64m4_m (vbool16_t mask, vfloat64m4_t op1, size_t vl);
vuint64m8_t __riscv_vfclass_v_u64m8_m (vbool8_t mask, vfloat64m8_t op1, size_t vl);
23.82. Vector Floating-Point Merge Instruction
- Intrinsic Functions
Details
== Vector Loads and Stores Intrinsics
=== Vector Unit-Stride Load Intrinsics
=== Vector Unit-Stride Store Intrinsics
=== Vector Mask Load/Store Intrinsics
=== Vector Strided Load Intrinsics
=== Vector Strided Store Intrinsics
=== Vector Indexed Load Intrinsics
=== Vector Indexed Store Intrinsics
== Vector Loads and Stores Segment Instructions
=== Vector Unit-Stride Segment Load Intrinsics
=== Vector Unit-Stride Segment Store Intrinsics
=== Vector Strided Segment Load Intrinsics
=== Vector Strided Segment Store Intrinsics
=== Vector Indexed Segment Load Intrinsics
=== Vector Indexed Segment Store Intrinsics
== Vector Integer Arithmetic Instructions
=== Vector Single-Width Integer Add and Subtract Intrinsics
=== Vector Widening Integer Add/Subtract Intrinsics
=== Vector Integer Widening Intrinsics
=== Vector Integer Extension Intrinsics
=== Vector Integer Add-with-Carry / Subtract-with-Borrow Intrinsics
=== Vector Bitwise Binary Logical Intrinsics
=== Vector Bitwise Unary Logical Intrinsics
=== Vector Single-Width Bit Shift Intrinsics
=== Vector Narrowing Integer Right Shift Intrinsics
=== Vector Integer Narrowing Intrinsics
=== Vector Integer Compare Intrinsics
=== Vector Integer Min/Max Intrinsics
=== Vector Single-Width Integer Multiply Intrinsics
=== Vector Integer Divide Intrinsics
=== Vector Widening Integer Multiply Intrinsics
=== Vector Single-Width Integer Multiply-Add Intrinsics
=== Vector Widening Integer Multiply-Add Intrinsics
=== Vector Integer Merge Intrinsics
=== Vector Integer Move Intrinsics
== Vector Fixed-Point Arithmetic Instructions
=== Vector Single-Width Saturating Add and Subtract Intrinsics
=== Vector Single-Width Averaging Add and Subtract Intrinsics
=== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics
=== Vector Single-Width Scaling Shift Intrinsics
=== Vector Narrowing Fixed-Point Clip Intrinsics
== Vector Floating-Point Instructions
=== Vector Single-Width Floating-Point Add/Subtract Intrinsics
=== Vector Widening Floating-Point Add/Subtract Intrinsics
=== Vector Single-Width Floating-Point Multiply/Divide Intrinsics
=== Vector Widening Floating-Point Multiply Intrinsics
=== Vector Single-Width Floating-Point Fused Multiply-Add Intrinsics
=== Vector Widening Floating-Point Fused Multiply-Add Intrinsics
=== Vector Floating-Point Square-Root Intrinsics
=== Vector Floating-Point Reciprocal Square-Root Estimate Intrinsics
=== Vector Floating-Point Reciprocal Estimate Intrinsics
=== Vector Floating-Point MIN/MAX Intrinsics
=== Vector Floating-Point Sign-Injection Intrinsics
=== Vector Floating-Point Absolute Value Intrinsics
=== Vector Floating-Point Compare Intrinsics
=== Vector Floating-Point Classify Intrinsics
=== Vector Floating-Point Merge Intrinsics
=== Vector Floating-Point Move Intrinsics
== Vector Reduction Operations
=== Vector Single-Width Integer Reduction Intrinsics
=== Vector Widening Integer Reduction Intrinsics
=== Vector Single-Width Floating-Point Reduction Intrinsics
=== Vector Widening Floating-Point Reduction Intrinsics
== Vector Mask Instructions
=== Vector Mask-Register Logical
=== Vector count population in mask `vcpop.m`
=== Vector Iota Intrinsics
=== Vector Element Index Intrinsics
== Vector Permutation Instructions
=== Vector Slideup Intrinsics
=== Vector Slidedown Intrinsics
=== Vector Slide1up and Slide1down Intrinsics
=== Vector Register Gather Intrinsics
=== Vector Compress Intrinsics
== Miscellaneous Vector Utility Intrinsics
=== Vector LMUL Extension Intrinsics
=== Vector LMUL Truncation Intrinsics
=== Vector Initialization Intrinsics
=== Vector Insertion Intrinsics
=== Vector Extraction Intrinsics
23.83. vfmerge.vfm
- Mnemonic
vfmerge.vfm vd, vs2, rs1, v0
- Encoding
- Description
-
vd[i] = v0.mask[i] ? f[rs1] : vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmerge_vfm.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmerge_vfm_f16mf4 (vfloat16mf4_t op1, float16_t op2, vbool64_t mask, size_t vl);
vfloat16mf2_t __riscv_vfmerge_vfm_f16mf2 (vfloat16mf2_t op1, float16_t op2, vbool32_t mask, size_t vl);
vfloat16m1_t __riscv_vfmerge_vfm_f16m1 (vfloat16m1_t op1, float16_t op2, vbool16_t mask, size_t vl);
vfloat16m2_t __riscv_vfmerge_vfm_f16m2 (vfloat16m2_t op1, float16_t op2, vbool8_t mask, size_t vl);
vfloat16m4_t __riscv_vfmerge_vfm_f16m4 (vfloat16m4_t op1, float16_t op2, vbool4_t mask, size_t vl);
vfloat16m8_t __riscv_vfmerge_vfm_f16m8 (vfloat16m8_t op1, float16_t op2, vbool2_t mask, size_t vl);
vfloat32mf2_t __riscv_vfmerge_vfm_f32mf2 (vfloat32mf2_t op1, float32_t op2, vbool64_t mask, size_t vl);
vfloat32m1_t __riscv_vfmerge_vfm_f32m1 (vfloat32m1_t op1, float32_t op2, vbool32_t mask, size_t vl);
vfloat32m2_t __riscv_vfmerge_vfm_f32m2 (vfloat32m2_t op1, float32_t op2, vbool16_t mask, size_t vl);
vfloat32m4_t __riscv_vfmerge_vfm_f32m4 (vfloat32m4_t op1, float32_t op2, vbool8_t mask, size_t vl);
vfloat32m8_t __riscv_vfmerge_vfm_f32m8 (vfloat32m8_t op1, float32_t op2, vbool4_t mask, size_t vl);
vfloat64m1_t __riscv_vfmerge_vfm_f64m1 (vfloat64m1_t op1, float64_t op2, vbool64_t mask, size_t vl);
vfloat64m2_t __riscv_vfmerge_vfm_f64m2 (vfloat64m2_t op1, float64_t op2, vbool32_t mask, size_t vl);
vfloat64m4_t __riscv_vfmerge_vfm_f64m4 (vfloat64m4_t op1, float64_t op2, vbool16_t mask, size_t vl);
vfloat64m8_t __riscv_vfmerge_vfm_f64m8 (vfloat64m8_t op1, float64_t op2, vbool8_t mask, size_t vl);
23.84. Vector Floating-Point Move Instruction
- Intrinsic Functions
Details
== Vector Loads and Stores Intrinsics
=== Vector Unit-Stride Load Intrinsics
=== Vector Unit-Stride Store Intrinsics
=== Vector Mask Load/Store Intrinsics
=== Vector Strided Load Intrinsics
=== Vector Strided Store Intrinsics
=== Vector Indexed Load Intrinsics
=== Vector Indexed Store Intrinsics
== Vector Loads and Stores Segment Instructions
=== Vector Unit-Stride Segment Load Intrinsics
=== Vector Unit-Stride Segment Store Intrinsics
=== Vector Strided Segment Load Intrinsics
=== Vector Strided Segment Store Intrinsics
=== Vector Indexed Segment Load Intrinsics
=== Vector Indexed Segment Store Intrinsics
== Vector Integer Arithmetic Instructions
=== Vector Single-Width Integer Add and Subtract Intrinsics
=== Vector Widening Integer Add/Subtract Intrinsics
=== Vector Integer Widening Intrinsics
=== Vector Integer Extension Intrinsics
=== Vector Integer Add-with-Carry / Subtract-with-Borrow Intrinsics
=== Vector Bitwise Binary Logical Intrinsics
=== Vector Bitwise Unary Logical Intrinsics
=== Vector Single-Width Bit Shift Intrinsics
=== Vector Narrowing Integer Right Shift Intrinsics
=== Vector Integer Narrowing Intrinsics
=== Vector Integer Compare Intrinsics
=== Vector Integer Min/Max Intrinsics
=== Vector Single-Width Integer Multiply Intrinsics
=== Vector Integer Divide Intrinsics
=== Vector Widening Integer Multiply Intrinsics
=== Vector Single-Width Integer Multiply-Add Intrinsics
=== Vector Widening Integer Multiply-Add Intrinsics
=== Vector Integer Merge Intrinsics
=== Vector Integer Move Intrinsics
== Vector Fixed-Point Arithmetic Instructions
=== Vector Single-Width Saturating Add and Subtract Intrinsics
=== Vector Single-Width Averaging Add and Subtract Intrinsics
=== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics
=== Vector Single-Width Scaling Shift Intrinsics
=== Vector Narrowing Fixed-Point Clip Intrinsics
== Vector Floating-Point Instructions
=== Vector Single-Width Floating-Point Add/Subtract Intrinsics
=== Vector Widening Floating-Point Add/Subtract Intrinsics
=== Vector Single-Width Floating-Point Multiply/Divide Intrinsics
=== Vector Widening Floating-Point Multiply Intrinsics
=== Vector Single-Width Floating-Point Fused Multiply-Add Intrinsics
=== Vector Widening Floating-Point Fused Multiply-Add Intrinsics
=== Vector Floating-Point Square-Root Intrinsics
=== Vector Floating-Point Reciprocal Square-Root Estimate Intrinsics
=== Vector Floating-Point Reciprocal Estimate Intrinsics
=== Vector Floating-Point MIN/MAX Intrinsics
=== Vector Floating-Point Sign-Injection Intrinsics
=== Vector Floating-Point Absolute Value Intrinsics
=== Vector Floating-Point Compare Intrinsics
=== Vector Floating-Point Classify Intrinsics
=== Vector Floating-Point Merge Intrinsics
=== Vector Floating-Point Move Intrinsics
== Vector Reduction Operations
=== Vector Single-Width Integer Reduction Intrinsics
=== Vector Widening Integer Reduction Intrinsics
=== Vector Single-Width Floating-Point Reduction Intrinsics
=== Vector Widening Floating-Point Reduction Intrinsics
== Vector Mask Instructions
=== Vector Mask-Register Logical
=== Vector count population in mask `vcpop.m`
=== Vector Iota Intrinsics
=== Vector Element Index Intrinsics
== Vector Permutation Instructions
=== Vector Slideup Intrinsics
=== Vector Slidedown Intrinsics
=== Vector Slide1up and Slide1down Intrinsics
=== Vector Register Gather Intrinsics
=== Vector Compress Intrinsics
== Miscellaneous Vector Utility Intrinsics
=== Vector LMUL Extension Intrinsics
=== Vector LMUL Truncation Intrinsics
=== Vector Initialization Intrinsics
=== Vector Insertion Intrinsics
=== Vector Extraction Intrinsics
23.85. vfmv.v.f
- Mnemonic
vfmv.v.f vd, rs1
- Encoding
- Description
-
vd[i] = f[rs1]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmv_v_f.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfmv_v_f_f16mf4 (float16_t src, size_t vl);
vfloat16mf2_t __riscv_vfmv_v_f_f16mf2 (float16_t src, size_t vl);
vfloat16m1_t __riscv_vfmv_v_f_f16m1 (float16_t src, size_t vl);
vfloat16m2_t __riscv_vfmv_v_f_f16m2 (float16_t src, size_t vl);
vfloat16m4_t __riscv_vfmv_v_f_f16m4 (float16_t src, size_t vl);
vfloat16m8_t __riscv_vfmv_v_f_f16m8 (float16_t src, size_t vl);
vfloat32mf2_t __riscv_vfmv_v_f_f32mf2 (float32_t src, size_t vl);
vfloat32m1_t __riscv_vfmv_v_f_f32m1 (float32_t src, size_t vl);
vfloat32m2_t __riscv_vfmv_v_f_f32m2 (float32_t src, size_t vl);
vfloat32m4_t __riscv_vfmv_v_f_f32m4 (float32_t src, size_t vl);
vfloat32m8_t __riscv_vfmv_v_f_f32m8 (float32_t src, size_t vl);
vfloat64m1_t __riscv_vfmv_v_f_f64m1 (float64_t src, size_t vl);
vfloat64m2_t __riscv_vfmv_v_f_f64m2 (float64_t src, size_t vl);
vfloat64m4_t __riscv_vfmv_v_f_f64m4 (float64_t src, size_t vl);
vfloat64m8_t __riscv_vfmv_v_f_f64m8 (float64_t src, size_t vl);
23.86. Single-Width Floating-Point/Integer Type-Convert Instructions
23.87. vfcvt.xu.f.v
- Mnemonic
vfcvt.xu.f.v vd, vs2, vm
- Description
-
Convert float to unsigned integer.
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfcvt_xu_f_v.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vfcvt_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl);
vuint16mf2_t __riscv_vfcvt_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl);
vuint16m1_t __riscv_vfcvt_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl);
vuint16m2_t __riscv_vfcvt_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl);
vuint16m4_t __riscv_vfcvt_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl);
vuint16m8_t __riscv_vfcvt_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl);
vuint32mf2_t __riscv_vfcvt_xu_f_v_u32mf2 (vfloat32mf2_t src, size_t vl);
vuint32m1_t __riscv_vfcvt_xu_f_v_u32m1 (vfloat32m1_t src, size_t vl);
vuint32m2_t __riscv_vfcvt_xu_f_v_u32m2 (vfloat32m2_t src, size_t vl);
vuint32m4_t __riscv_vfcvt_xu_f_v_u32m4 (vfloat32m4_t src, size_t vl);
vuint32m8_t __riscv_vfcvt_xu_f_v_u32m8 (vfloat32m8_t src, size_t vl);
vuint64m1_t __riscv_vfcvt_xu_f_v_u64m1 (vfloat64m1_t src, size_t vl);
vuint64m2_t __riscv_vfcvt_xu_f_v_u64m2 (vfloat64m2_t src, size_t vl);
vuint64m4_t __riscv_vfcvt_xu_f_v_u64m4 (vfloat64m4_t src, size_t vl);
vuint64m8_t __riscv_vfcvt_xu_f_v_u64m8 (vfloat64m8_t src, size_t vl);
vuint16mf4_t __riscv_vfcvt_xu_f_v_u16mf4_m (vbool64_t mask, vfloat16mf4_t src, size_t vl);
vuint16mf2_t __riscv_vfcvt_xu_f_v_u16mf2_m (vbool32_t mask, vfloat16mf2_t src, size_t vl);
vuint16m1_t __riscv_vfcvt_xu_f_v_u16m1_m (vbool16_t mask, vfloat16m1_t src, size_t vl);
vuint16m2_t __riscv_vfcvt_xu_f_v_u16m2_m (vbool8_t mask, vfloat16m2_t src, size_t vl);
vuint16m4_t __riscv_vfcvt_xu_f_v_u16m4_m (vbool4_t mask, vfloat16m4_t src, size_t vl);
vuint16m8_t __riscv_vfcvt_xu_f_v_u16m8_m (vbool2_t mask, vfloat16m8_t src, size_t vl);
vuint32mf2_t __riscv_vfcvt_xu_f_v_u32mf2_m (vbool64_t mask, vfloat32mf2_t src, size_t vl);
vuint32m1_t __riscv_vfcvt_xu_f_v_u32m1_m (vbool32_t mask, vfloat32m1_t src, size_t vl);
vuint32m2_t __riscv_vfcvt_xu_f_v_u32m2_m (vbool16_t mask, vfloat32m2_t src, size_t vl);
vuint32m4_t __riscv_vfcvt_xu_f_v_u32m4_m (vbool8_t mask, vfloat32m4_t src, size_t vl);
vuint32m8_t __riscv_vfcvt_xu_f_v_u32m8_m (vbool4_t mask, vfloat32m8_t src, size_t vl);
vuint64m1_t __riscv_vfcvt_xu_f_v_u64m1_m (vbool64_t mask, vfloat64m1_t src, size_t vl);
vuint64m2_t __riscv_vfcvt_xu_f_v_u64m2_m (vbool32_t mask, vfloat64m2_t src, size_t vl);
vuint64m4_t __riscv_vfcvt_xu_f_v_u64m4_m (vbool16_t mask, vfloat64m4_t src, size_t vl);
vuint64m8_t __riscv_vfcvt_xu_f_v_u64m8_m (vbool8_t mask, vfloat64m8_t src, size_t vl);
vuint16mf4_t __riscv_vfcvt_xu_f_v_u16mf4_rm (vfloat16mf4_t src, unsigned int frm, size_t vl);
vuint16mf2_t __riscv_vfcvt_xu_f_v_u16mf2_rm (vfloat16mf2_t src, unsigned int frm, size_t vl);
vuint16m1_t __riscv_vfcvt_xu_f_v_u16m1_rm (vfloat16m1_t src, unsigned int frm, size_t vl);
vuint16m2_t __riscv_vfcvt_xu_f_v_u16m2_rm (vfloat16m2_t src, unsigned int frm, size_t vl);
vuint16m4_t __riscv_vfcvt_xu_f_v_u16m4_rm (vfloat16m4_t src, unsigned int frm, size_t vl);
vuint16m8_t __riscv_vfcvt_xu_f_v_u16m8_rm (vfloat16m8_t src, unsigned int frm, size_t vl);
vuint32mf2_t __riscv_vfcvt_xu_f_v_u32mf2_rm (vfloat32mf2_t src, unsigned int frm, size_t vl);
vuint32m1_t __riscv_vfcvt_xu_f_v_u32m1_rm (vfloat32m1_t src, unsigned int frm, size_t vl);
vuint32m2_t __riscv_vfcvt_xu_f_v_u32m2_rm (vfloat32m2_t src, unsigned int frm, size_t vl);
vuint32m4_t __riscv_vfcvt_xu_f_v_u32m4_rm (vfloat32m4_t src, unsigned int frm, size_t vl);
vuint32m8_t __riscv_vfcvt_xu_f_v_u32m8_rm (vfloat32m8_t src, unsigned int frm, size_t vl);
vuint64m1_t __riscv_vfcvt_xu_f_v_u64m1_rm (vfloat64m1_t src, unsigned int frm, size_t vl);
vuint64m2_t __riscv_vfcvt_xu_f_v_u64m2_rm (vfloat64m2_t src, unsigned int frm, size_t vl);
vuint64m4_t __riscv_vfcvt_xu_f_v_u64m4_rm (vfloat64m4_t src, unsigned int frm, size_t vl);
vuint64m8_t __riscv_vfcvt_xu_f_v_u64m8_rm (vfloat64m8_t src, unsigned int frm, size_t vl);
vuint16mf4_t __riscv_vfcvt_xu_f_v_u16mf4_rm_m (vbool64_t mask, vfloat16mf4_t src, unsigned int frm, size_t vl);
vuint16mf2_t __riscv_vfcvt_xu_f_v_u16mf2_rm_m (vbool32_t mask, vfloat16mf2_t src, unsigned int frm, size_t vl);
vuint16m1_t __riscv_vfcvt_xu_f_v_u16m1_rm_m (vbool16_t mask, vfloat16m1_t src, unsigned int frm, size_t vl);
vuint16m2_t __riscv_vfcvt_xu_f_v_u16m2_rm_m (vbool8_t mask, vfloat16m2_t src, unsigned int frm, size_t vl);
vuint16m4_t __riscv_vfcvt_xu_f_v_u16m4_rm_m (vbool4_t mask, vfloat16m4_t src, unsigned int frm, size_t vl);
vuint16m8_t __riscv_vfcvt_xu_f_v_u16m8_rm_m (vbool2_t mask, vfloat16m8_t src, unsigned int frm, size_t vl);
vuint32mf2_t __riscv_vfcvt_xu_f_v_u32mf2_rm_m (vbool64_t mask, vfloat32mf2_t src, unsigned int frm, size_t vl);
vuint32m1_t __riscv_vfcvt_xu_f_v_u32m1_rm_m (vbool32_t mask, vfloat32m1_t src, unsigned int frm, size_t vl);
vuint32m2_t __riscv_vfcvt_xu_f_v_u32m2_rm_m (vbool16_t mask, vfloat32m2_t src, unsigned int frm, size_t vl);
vuint32m4_t __riscv_vfcvt_xu_f_v_u32m4_rm_m (vbool8_t mask, vfloat32m4_t src, unsigned int frm, size_t vl);
vuint32m8_t __riscv_vfcvt_xu_f_v_u32m8_rm_m (vbool4_t mask, vfloat32m8_t src, unsigned int frm, size_t vl);
vuint64m1_t __riscv_vfcvt_xu_f_v_u64m1_rm_m (vbool64_t mask, vfloat64m1_t src, unsigned int frm, size_t vl);
vuint64m2_t __riscv_vfcvt_xu_f_v_u64m2_rm_m (vbool32_t mask, vfloat64m2_t src, unsigned int frm, size_t vl);
vuint64m4_t __riscv_vfcvt_xu_f_v_u64m4_rm_m (vbool16_t mask, vfloat64m4_t src, unsigned int frm, size_t vl);
vuint64m8_t __riscv_vfcvt_xu_f_v_u64m8_rm_m (vbool8_t mask, vfloat64m8_t src, unsigned int frm, size_t vl);
23.88. vfcvt.x.f.v
- Mnemonic
vfcvt.x.f.v vd, vs2, vm
- Encoding
- Description
-
Convert float to signed integer.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfcvt_x_f_v.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vfcvt_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl);
vint16mf2_t __riscv_vfcvt_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl);
vint16m1_t __riscv_vfcvt_x_f_v_i16m1 (vfloat16m1_t src, size_t vl);
vint16m2_t __riscv_vfcvt_x_f_v_i16m2 (vfloat16m2_t src, size_t vl);
vint16m4_t __riscv_vfcvt_x_f_v_i16m4 (vfloat16m4_t src, size_t vl);
vint16m8_t __riscv_vfcvt_x_f_v_i16m8 (vfloat16m8_t src, size_t vl);
vint32mf2_t __riscv_vfcvt_x_f_v_i32mf2 (vfloat32mf2_t src, size_t vl);
vint32m1_t __riscv_vfcvt_x_f_v_i32m1 (vfloat32m1_t src, size_t vl);
vint32m2_t __riscv_vfcvt_x_f_v_i32m2 (vfloat32m2_t src, size_t vl);
vint32m4_t __riscv_vfcvt_x_f_v_i32m4 (vfloat32m4_t src, size_t vl);
vint32m8_t __riscv_vfcvt_x_f_v_i32m8 (vfloat32m8_t src, size_t vl);
vint64m1_t __riscv_vfcvt_x_f_v_i64m1 (vfloat64m1_t src, size_t vl);
vint64m2_t __riscv_vfcvt_x_f_v_i64m2 (vfloat64m2_t src, size_t vl);
vint64m4_t __riscv_vfcvt_x_f_v_i64m4 (vfloat64m4_t src, size_t vl);
vint64m8_t __riscv_vfcvt_x_f_v_i64m8 (vfloat64m8_t src, size_t vl);
vint16mf4_t __riscv_vfcvt_x_f_v_i16mf4_m (vbool64_t mask, vfloat16mf4_t src, size_t vl);
vint16mf2_t __riscv_vfcvt_x_f_v_i16mf2_m (vbool32_t mask, vfloat16mf2_t src, size_t vl);
vint16m1_t __riscv_vfcvt_x_f_v_i16m1_m (vbool16_t mask, vfloat16m1_t src, size_t vl);
vint16m2_t __riscv_vfcvt_x_f_v_i16m2_m (vbool8_t mask, vfloat16m2_t src, size_t vl);
vint16m4_t __riscv_vfcvt_x_f_v_i16m4_m (vbool4_t mask, vfloat16m4_t src, size_t vl);
vint16m8_t __riscv_vfcvt_x_f_v_i16m8_m (vbool2_t mask, vfloat16m8_t src, size_t vl);
vint32mf2_t __riscv_vfcvt_x_f_v_i32mf2_m (vbool64_t mask, vfloat32mf2_t src, size_t vl);
vint32m1_t __riscv_vfcvt_x_f_v_i32m1_m (vbool32_t mask, vfloat32m1_t src, size_t vl);
vint32m2_t __riscv_vfcvt_x_f_v_i32m2_m (vbool16_t mask, vfloat32m2_t src, size_t vl);
vint32m4_t __riscv_vfcvt_x_f_v_i32m4_m (vbool8_t mask, vfloat32m4_t src, size_t vl);
vint32m8_t __riscv_vfcvt_x_f_v_i32m8_m (vbool4_t mask, vfloat32m8_t src, size_t vl);
vint64m1_t __riscv_vfcvt_x_f_v_i64m1_m (vbool64_t mask, vfloat64m1_t src, size_t vl);
vint64m2_t __riscv_vfcvt_x_f_v_i64m2_m (vbool32_t mask, vfloat64m2_t src, size_t vl);
vint64m4_t __riscv_vfcvt_x_f_v_i64m4_m (vbool16_t mask, vfloat64m4_t src, size_t vl);
vint64m8_t __riscv_vfcvt_x_f_v_i64m8_m (vbool8_t mask, vfloat64m8_t src, size_t vl);
vint16mf4_t __riscv_vfcvt_x_f_v_i16mf4_rm (vfloat16mf4_t src, unsigned int frm, size_t vl);
vint16mf2_t __riscv_vfcvt_x_f_v_i16mf2_rm (vfloat16mf2_t src, unsigned int frm, size_t vl);
vint16m1_t __riscv_vfcvt_x_f_v_i16m1_rm (vfloat16m1_t src, unsigned int frm, size_t vl);
vint16m2_t __riscv_vfcvt_x_f_v_i16m2_rm (vfloat16m2_t src, unsigned int frm, size_t vl);
vint16m4_t __riscv_vfcvt_x_f_v_i16m4_rm (vfloat16m4_t src, unsigned int frm, size_t vl);
vint16m8_t __riscv_vfcvt_x_f_v_i16m8_rm (vfloat16m8_t src, unsigned int frm, size_t vl);
vint32mf2_t __riscv_vfcvt_x_f_v_i32mf2_rm (vfloat32mf2_t src, unsigned int frm, size_t vl);
vint32m1_t __riscv_vfcvt_x_f_v_i32m1_rm (vfloat32m1_t src, unsigned int frm, size_t vl);
vint32m2_t __riscv_vfcvt_x_f_v_i32m2_rm (vfloat32m2_t src, unsigned int frm, size_t vl);
vint32m4_t __riscv_vfcvt_x_f_v_i32m4_rm (vfloat32m4_t src, unsigned int frm, size_t vl);
vint32m8_t __riscv_vfcvt_x_f_v_i32m8_rm (vfloat32m8_t src, unsigned int frm, size_t vl);
vint64m1_t __riscv_vfcvt_x_f_v_i64m1_rm (vfloat64m1_t src, unsigned int frm, size_t vl);
vint64m2_t __riscv_vfcvt_x_f_v_i64m2_rm (vfloat64m2_t src, unsigned int frm, size_t vl);
vint64m4_t __riscv_vfcvt_x_f_v_i64m4_rm (vfloat64m4_t src, unsigned int frm, size_t vl);
vint64m8_t __riscv_vfcvt_x_f_v_i64m8_rm (vfloat64m8_t src, unsigned int frm, size_t vl);
vint16mf4_t __riscv_vfcvt_x_f_v_i16mf4_rm_m (vbool64_t mask, vfloat16mf4_t src, unsigned int frm, size_t vl);
vint16mf2_t __riscv_vfcvt_x_f_v_i16mf2_rm_m (vbool32_t mask, vfloat16mf2_t src, unsigned int frm, size_t vl);
vint16m1_t __riscv_vfcvt_x_f_v_i16m1_rm_m (vbool16_t mask, vfloat16m1_t src, unsigned int frm, size_t vl);
vint16m2_t __riscv_vfcvt_x_f_v_i16m2_rm_m (vbool8_t mask, vfloat16m2_t src, unsigned int frm, size_t vl);
vint16m4_t __riscv_vfcvt_x_f_v_i16m4_rm_m (vbool4_t mask, vfloat16m4_t src, unsigned int frm, size_t vl);
vint16m8_t __riscv_vfcvt_x_f_v_i16m8_rm_m (vbool2_t mask, vfloat16m8_t src, unsigned int frm, size_t vl);
vint32mf2_t __riscv_vfcvt_x_f_v_i32mf2_rm_m (vbool64_t mask, vfloat32mf2_t src, unsigned int frm, size_t vl);
vint32m1_t __riscv_vfcvt_x_f_v_i32m1_rm_m (vbool32_t mask, vfloat32m1_t src, unsigned int frm, size_t vl);
vint32m2_t __riscv_vfcvt_x_f_v_i32m2_rm_m (vbool16_t mask, vfloat32m2_t src, unsigned int frm, size_t vl);
vint32m4_t __riscv_vfcvt_x_f_v_i32m4_rm_m (vbool8_t mask, vfloat32m4_t src, unsigned int frm, size_t vl);
vint32m8_t __riscv_vfcvt_x_f_v_i32m8_rm_m (vbool4_t mask, vfloat32m8_t src, unsigned int frm, size_t vl);
vint64m1_t __riscv_vfcvt_x_f_v_i64m1_rm_m (vbool64_t mask, vfloat64m1_t src, unsigned int frm, size_t vl);
vint64m2_t __riscv_vfcvt_x_f_v_i64m2_rm_m (vbool32_t mask, vfloat64m2_t src, unsigned int frm, size_t vl);
vint64m4_t __riscv_vfcvt_x_f_v_i64m4_rm_m (vbool16_t mask, vfloat64m4_t src, unsigned int frm, size_t vl);
vint64m8_t __riscv_vfcvt_x_f_v_i64m8_rm_m (vbool8_t mask, vfloat64m8_t src, unsigned int frm, size_t vl);
23.89. vfcvt.rtz.xu.f.v
- Mnemonic
vfcvt.rtz.xu.f.v vd, vs2, vm
- Encoding
- Description
-
Convert float to unsigned integer, truncating.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfcvt_rtz_xu_f_v.h
- Intrinsic Functions
Details
vuint16mf4_t __riscv_vfcvt_rtz_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl);
vuint16mf2_t __riscv_vfcvt_rtz_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl);
vuint16m1_t __riscv_vfcvt_rtz_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl);
vuint16m2_t __riscv_vfcvt_rtz_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl);
vuint16m4_t __riscv_vfcvt_rtz_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl);
vuint16m8_t __riscv_vfcvt_rtz_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl);
vuint32mf2_t __riscv_vfcvt_rtz_xu_f_v_u32mf2 (vfloat32mf2_t src, size_t vl);
vuint32m1_t __riscv_vfcvt_rtz_xu_f_v_u32m1 (vfloat32m1_t src, size_t vl);
vuint32m2_t __riscv_vfcvt_rtz_xu_f_v_u32m2 (vfloat32m2_t src, size_t vl);
vuint32m4_t __riscv_vfcvt_rtz_xu_f_v_u32m4 (vfloat32m4_t src, size_t vl);
vuint32m8_t __riscv_vfcvt_rtz_xu_f_v_u32m8 (vfloat32m8_t src, size_t vl);
vuint64m1_t __riscv_vfcvt_rtz_xu_f_v_u64m1 (vfloat64m1_t src, size_t vl);
vuint64m2_t __riscv_vfcvt_rtz_xu_f_v_u64m2 (vfloat64m2_t src, size_t vl);
vuint64m4_t __riscv_vfcvt_rtz_xu_f_v_u64m4 (vfloat64m4_t src, size_t vl);
vuint64m8_t __riscv_vfcvt_rtz_xu_f_v_u64m8 (vfloat64m8_t src, size_t vl);
vuint16mf4_t __riscv_vfcvt_rtz_xu_f_v_u16mf4_m (vbool64_t mask, vfloat16mf4_t src, size_t vl);
vuint16mf2_t __riscv_vfcvt_rtz_xu_f_v_u16mf2_m (vbool32_t mask, vfloat16mf2_t src, size_t vl);
vuint16m1_t __riscv_vfcvt_rtz_xu_f_v_u16m1_m (vbool16_t mask, vfloat16m1_t src, size_t vl);
vuint16m2_t __riscv_vfcvt_rtz_xu_f_v_u16m2_m (vbool8_t mask, vfloat16m2_t src, size_t vl);
vuint16m4_t __riscv_vfcvt_rtz_xu_f_v_u16m4_m (vbool4_t mask, vfloat16m4_t src, size_t vl);
vuint16m8_t __riscv_vfcvt_rtz_xu_f_v_u16m8_m (vbool2_t mask, vfloat16m8_t src, size_t vl);
vuint32mf2_t __riscv_vfcvt_rtz_xu_f_v_u32mf2_m (vbool64_t mask, vfloat32mf2_t src, size_t vl);
vuint32m1_t __riscv_vfcvt_rtz_xu_f_v_u32m1_m (vbool32_t mask, vfloat32m1_t src, size_t vl);
vuint32m2_t __riscv_vfcvt_rtz_xu_f_v_u32m2_m (vbool16_t mask, vfloat32m2_t src, size_t vl);
vuint32m4_t __riscv_vfcvt_rtz_xu_f_v_u32m4_m (vbool8_t mask, vfloat32m4_t src, size_t vl);
vuint32m8_t __riscv_vfcvt_rtz_xu_f_v_u32m8_m (vbool4_t mask, vfloat32m8_t src, size_t vl);
vuint64m1_t __riscv_vfcvt_rtz_xu_f_v_u64m1_m (vbool64_t mask, vfloat64m1_t src, size_t vl);
vuint64m2_t __riscv_vfcvt_rtz_xu_f_v_u64m2_m (vbool32_t mask, vfloat64m2_t src, size_t vl);
vuint64m4_t __riscv_vfcvt_rtz_xu_f_v_u64m4_m (vbool16_t mask, vfloat64m4_t src, size_t vl);
vuint64m8_t __riscv_vfcvt_rtz_xu_f_v_u64m8_m (vbool8_t mask, vfloat64m8_t src, size_t vl);
23.90. vfcvt.rtz.x.f.v
- Mnemonic
vfcvt.rtz.x.f.v vd, vs2, vm
- Encoding
- Description
-
Convert float to signed integer, truncating.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfcvt_rtz_x_f_v.h
- Intrinsic Functions
Details
vint16mf4_t __riscv_vfcvt_rtz_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl);
vint16mf2_t __riscv_vfcvt_rtz_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl);
vint16m1_t __riscv_vfcvt_rtz_x_f_v_i16m1 (vfloat16m1_t src, size_t vl);
vint16m2_t __riscv_vfcvt_rtz_x_f_v_i16m2 (vfloat16m2_t src, size_t vl);
vint16m4_t __riscv_vfcvt_rtz_x_f_v_i16m4 (vfloat16m4_t src, size_t vl);
vint16m8_t __riscv_vfcvt_rtz_x_f_v_i16m8 (vfloat16m8_t src, size_t vl);
vint32mf2_t __riscv_vfcvt_rtz_x_f_v_i32mf2 (vfloat32mf2_t src, size_t vl);
vint32m1_t __riscv_vfcvt_rtz_x_f_v_i32m1 (vfloat32m1_t src, size_t vl);
vint32m2_t __riscv_vfcvt_rtz_x_f_v_i32m2 (vfloat32m2_t src, size_t vl);
vint32m4_t __riscv_vfcvt_rtz_x_f_v_i32m4 (vfloat32m4_t src, size_t vl);
vint32m8_t __riscv_vfcvt_rtz_x_f_v_i32m8 (vfloat32m8_t src, size_t vl);
vint64m1_t __riscv_vfcvt_rtz_x_f_v_i64m1 (vfloat64m1_t src, size_t vl);
vint64m2_t __riscv_vfcvt_rtz_x_f_v_i64m2 (vfloat64m2_t src, size_t vl);
vint64m4_t __riscv_vfcvt_rtz_x_f_v_i64m4 (vfloat64m4_t src, size_t vl);
vint64m8_t __riscv_vfcvt_rtz_x_f_v_i64m8 (vfloat64m8_t src, size_t vl);
vint16mf4_t __riscv_vfcvt_rtz_x_f_v_i16mf4_m (vbool64_t mask, vfloat16mf4_t src, size_t vl);
vint16mf2_t __riscv_vfcvt_rtz_x_f_v_i16mf2_m (vbool32_t mask, vfloat16mf2_t src, size_t vl);
vint16m1_t __riscv_vfcvt_rtz_x_f_v_i16m1_m (vbool16_t mask, vfloat16m1_t src, size_t vl);
vint16m2_t __riscv_vfcvt_rtz_x_f_v_i16m2_m (vbool8_t mask, vfloat16m2_t src, size_t vl);
vint16m4_t __riscv_vfcvt_rtz_x_f_v_i16m4_m (vbool4_t mask, vfloat16m4_t src, size_t vl);
vint16m8_t __riscv_vfcvt_rtz_x_f_v_i16m8_m (vbool2_t mask, vfloat16m8_t src, size_t vl);
vint32mf2_t __riscv_vfcvt_rtz_x_f_v_i32mf2_m (vbool64_t mask, vfloat32mf2_t src, size_t vl);
vint32m1_t __riscv_vfcvt_rtz_x_f_v_i32m1_m (vbool32_t mask, vfloat32m1_t src, size_t vl);
vint32m2_t __riscv_vfcvt_rtz_x_f_v_i32m2_m (vbool16_t mask, vfloat32m2_t src, size_t vl);
vint32m4_t __riscv_vfcvt_rtz_x_f_v_i32m4_m (vbool8_t mask, vfloat32m4_t src, size_t vl);
vint32m8_t __riscv_vfcvt_rtz_x_f_v_i32m8_m (vbool4_t mask, vfloat32m8_t src, size_t vl);
vint64m1_t __riscv_vfcvt_rtz_x_f_v_i64m1_m (vbool64_t mask, vfloat64m1_t src, size_t vl);
vint64m2_t __riscv_vfcvt_rtz_x_f_v_i64m2_m (vbool32_t mask, vfloat64m2_t src, size_t vl);
vint64m4_t __riscv_vfcvt_rtz_x_f_v_i64m4_m (vbool16_t mask, vfloat64m4_t src, size_t vl);
vint64m8_t __riscv_vfcvt_rtz_x_f_v_i64m8_m (vbool8_t mask, vfloat64m8_t src, size_t vl);
23.91. vfcvt.f.xu.v
- Mnemonic
vfcvt.f.xu.v vd, vs2, vm
- Encoding
- Description
-
Convert unsigned integer to float.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfcvt_f_xu_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfcvt_f_xu_v_f16mf4 (vuint16mf4_t src, size_t vl);
vfloat16mf2_t __riscv_vfcvt_f_xu_v_f16mf2 (vuint16mf2_t src, size_t vl);
vfloat16m1_t __riscv_vfcvt_f_xu_v_f16m1 (vuint16m1_t src, size_t vl);
vfloat16m2_t __riscv_vfcvt_f_xu_v_f16m2 (vuint16m2_t src, size_t vl);
vfloat16m4_t __riscv_vfcvt_f_xu_v_f16m4 (vuint16m4_t src, size_t vl);
vfloat16m8_t __riscv_vfcvt_f_xu_v_f16m8 (vuint16m8_t src, size_t vl);
vfloat32mf2_t __riscv_vfcvt_f_xu_v_f32mf2 (vuint32mf2_t src, size_t vl);
vfloat32m1_t __riscv_vfcvt_f_xu_v_f32m1 (vuint32m1_t src, size_t vl);
vfloat32m2_t __riscv_vfcvt_f_xu_v_f32m2 (vuint32m2_t src, size_t vl);
vfloat32m4_t __riscv_vfcvt_f_xu_v_f32m4 (vuint32m4_t src, size_t vl);
vfloat32m8_t __riscv_vfcvt_f_xu_v_f32m8 (vuint32m8_t src, size_t vl);
vfloat64m1_t __riscv_vfcvt_f_xu_v_f64m1 (vuint64m1_t src, size_t vl);
vfloat64m2_t __riscv_vfcvt_f_xu_v_f64m2 (vuint64m2_t src, size_t vl);
vfloat64m4_t __riscv_vfcvt_f_xu_v_f64m4 (vuint64m4_t src, size_t vl);
vfloat64m8_t __riscv_vfcvt_f_xu_v_f64m8 (vuint64m8_t src, size_t vl);
vfloat16mf4_t __riscv_vfcvt_f_xu_v_f16mf4_m (vbool64_t mask, vuint16mf4_t src, size_t vl);
vfloat16mf2_t __riscv_vfcvt_f_xu_v_f16mf2_m (vbool32_t mask, vuint16mf2_t src, size_t vl);
vfloat16m1_t __riscv_vfcvt_f_xu_v_f16m1_m (vbool16_t mask, vuint16m1_t src, size_t vl);
vfloat16m2_t __riscv_vfcvt_f_xu_v_f16m2_m (vbool8_t mask, vuint16m2_t src, size_t vl);
vfloat16m4_t __riscv_vfcvt_f_xu_v_f16m4_m (vbool4_t mask, vuint16m4_t src, size_t vl);
vfloat16m8_t __riscv_vfcvt_f_xu_v_f16m8_m (vbool2_t mask, vuint16m8_t src, size_t vl);
vfloat32mf2_t __riscv_vfcvt_f_xu_v_f32mf2_m (vbool64_t mask, vuint32mf2_t src, size_t vl);
vfloat32m1_t __riscv_vfcvt_f_xu_v_f32m1_m (vbool32_t mask, vuint32m1_t src, size_t vl);
vfloat32m2_t __riscv_vfcvt_f_xu_v_f32m2_m (vbool16_t mask, vuint32m2_t src, size_t vl);
vfloat32m4_t __riscv_vfcvt_f_xu_v_f32m4_m (vbool8_t mask, vuint32m4_t src, size_t vl);
vfloat32m8_t __riscv_vfcvt_f_xu_v_f32m8_m (vbool4_t mask, vuint32m8_t src, size_t vl);
vfloat64m1_t __riscv_vfcvt_f_xu_v_f64m1_m (vbool64_t mask, vuint64m1_t src, size_t vl);
vfloat64m2_t __riscv_vfcvt_f_xu_v_f64m2_m (vbool32_t mask, vuint64m2_t src, size_t vl);
vfloat64m4_t __riscv_vfcvt_f_xu_v_f64m4_m (vbool16_t mask, vuint64m4_t src, size_t vl);
vfloat64m8_t __riscv_vfcvt_f_xu_v_f64m8_m (vbool8_t mask, vuint64m8_t src, size_t vl);
vfloat16mf4_t __riscv_vfcvt_f_xu_v_f16mf4_rm (vuint16mf4_t src, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfcvt_f_xu_v_f16mf2_rm (vuint16mf2_t src, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfcvt_f_xu_v_f16m1_rm (vuint16m1_t src, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfcvt_f_xu_v_f16m2_rm (vuint16m2_t src, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfcvt_f_xu_v_f16m4_rm (vuint16m4_t src, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfcvt_f_xu_v_f16m8_rm (vuint16m8_t src, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfcvt_f_xu_v_f32mf2_rm (vuint32mf2_t src, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfcvt_f_xu_v_f32m1_rm (vuint32m1_t src, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfcvt_f_xu_v_f32m2_rm (vuint32m2_t src, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfcvt_f_xu_v_f32m4_rm (vuint32m4_t src, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfcvt_f_xu_v_f32m8_rm (vuint32m8_t src, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfcvt_f_xu_v_f64m1_rm (vuint64m1_t src, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfcvt_f_xu_v_f64m2_rm (vuint64m2_t src, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfcvt_f_xu_v_f64m4_rm (vuint64m4_t src, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfcvt_f_xu_v_f64m8_rm (vuint64m8_t src, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfcvt_f_xu_v_f16mf4_rm_m (vbool64_t mask, vuint16mf4_t src, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfcvt_f_xu_v_f16mf2_rm_m (vbool32_t mask, vuint16mf2_t src, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfcvt_f_xu_v_f16m1_rm_m (vbool16_t mask, vuint16m1_t src, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfcvt_f_xu_v_f16m2_rm_m (vbool8_t mask, vuint16m2_t src, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfcvt_f_xu_v_f16m4_rm_m (vbool4_t mask, vuint16m4_t src, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfcvt_f_xu_v_f16m8_rm_m (vbool2_t mask, vuint16m8_t src, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfcvt_f_xu_v_f32mf2_rm_m (vbool64_t mask, vuint32mf2_t src, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfcvt_f_xu_v_f32m1_rm_m (vbool32_t mask, vuint32m1_t src, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfcvt_f_xu_v_f32m2_rm_m (vbool16_t mask, vuint32m2_t src, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfcvt_f_xu_v_f32m4_rm_m (vbool8_t mask, vuint32m4_t src, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfcvt_f_xu_v_f32m8_rm_m (vbool4_t mask, vuint32m8_t src, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfcvt_f_xu_v_f64m1_rm_m (vbool64_t mask, vuint64m1_t src, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfcvt_f_xu_v_f64m2_rm_m (vbool32_t mask, vuint64m2_t src, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfcvt_f_xu_v_f64m4_rm_m (vbool16_t mask, vuint64m4_t src, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfcvt_f_xu_v_f64m8_rm_m (vbool8_t mask, vuint64m8_t src, unsigned int frm, size_t vl);
23.92. vfcvt.f.x.v
- Mnemonic
vfcvt.f.x.v vd, vs2, vm
- Encoding
- Description
-
Convert signed integer to float.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfcvt_f_x_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfcvt_f_x_v_f16mf4 (vint16mf4_t src, size_t vl);
vfloat16mf2_t __riscv_vfcvt_f_x_v_f16mf2 (vint16mf2_t src, size_t vl);
vfloat16m1_t __riscv_vfcvt_f_x_v_f16m1 (vint16m1_t src, size_t vl);
vfloat16m2_t __riscv_vfcvt_f_x_v_f16m2 (vint16m2_t src, size_t vl);
vfloat16m4_t __riscv_vfcvt_f_x_v_f16m4 (vint16m4_t src, size_t vl);
vfloat16m8_t __riscv_vfcvt_f_x_v_f16m8 (vint16m8_t src, size_t vl);
vfloat32mf2_t __riscv_vfcvt_f_x_v_f32mf2 (vint32mf2_t src, size_t vl);
vfloat32m1_t __riscv_vfcvt_f_x_v_f32m1 (vint32m1_t src, size_t vl);
vfloat32m2_t __riscv_vfcvt_f_x_v_f32m2 (vint32m2_t src, size_t vl);
vfloat32m4_t __riscv_vfcvt_f_x_v_f32m4 (vint32m4_t src, size_t vl);
vfloat32m8_t __riscv_vfcvt_f_x_v_f32m8 (vint32m8_t src, size_t vl);
vfloat64m1_t __riscv_vfcvt_f_x_v_f64m1 (vint64m1_t src, size_t vl);
vfloat64m2_t __riscv_vfcvt_f_x_v_f64m2 (vint64m2_t src, size_t vl);
vfloat64m4_t __riscv_vfcvt_f_x_v_f64m4 (vint64m4_t src, size_t vl);
vfloat64m8_t __riscv_vfcvt_f_x_v_f64m8 (vint64m8_t src, size_t vl);
vfloat16mf4_t __riscv_vfcvt_f_x_v_f16mf4_m (vbool64_t mask, vint16mf4_t src, size_t vl);
vfloat16mf2_t __riscv_vfcvt_f_x_v_f16mf2_m (vbool32_t mask, vint16mf2_t src, size_t vl);
vfloat16m1_t __riscv_vfcvt_f_x_v_f16m1_m (vbool16_t mask, vint16m1_t src, size_t vl);
vfloat16m2_t __riscv_vfcvt_f_x_v_f16m2_m (vbool8_t mask, vint16m2_t src, size_t vl);
vfloat16m4_t __riscv_vfcvt_f_x_v_f16m4_m (vbool4_t mask, vint16m4_t src, size_t vl);
vfloat16m8_t __riscv_vfcvt_f_x_v_f16m8_m (vbool2_t mask, vint16m8_t src, size_t vl);
vfloat32mf2_t __riscv_vfcvt_f_x_v_f32mf2_m (vbool64_t mask, vint32mf2_t src, size_t vl);
vfloat32m1_t __riscv_vfcvt_f_x_v_f32m1_m (vbool32_t mask, vint32m1_t src, size_t vl);
vfloat32m2_t __riscv_vfcvt_f_x_v_f32m2_m (vbool16_t mask, vint32m2_t src, size_t vl);
vfloat32m4_t __riscv_vfcvt_f_x_v_f32m4_m (vbool8_t mask, vint32m4_t src, size_t vl);
vfloat32m8_t __riscv_vfcvt_f_x_v_f32m8_m (vbool4_t mask, vint32m8_t src, size_t vl);
vfloat64m1_t __riscv_vfcvt_f_x_v_f64m1_m (vbool64_t mask, vint64m1_t src, size_t vl);
vfloat64m2_t __riscv_vfcvt_f_x_v_f64m2_m (vbool32_t mask, vint64m2_t src, size_t vl);
vfloat64m4_t __riscv_vfcvt_f_x_v_f64m4_m (vbool16_t mask, vint64m4_t src, size_t vl);
vfloat64m8_t __riscv_vfcvt_f_x_v_f64m8_m (vbool8_t mask, vint64m8_t src, size_t vl);
vfloat16mf4_t __riscv_vfcvt_f_x_v_f16mf4_rm (vint16mf4_t src, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfcvt_f_x_v_f16mf2_rm (vint16mf2_t src, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfcvt_f_x_v_f16m1_rm (vint16m1_t src, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfcvt_f_x_v_f16m2_rm (vint16m2_t src, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfcvt_f_x_v_f16m4_rm (vint16m4_t src, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfcvt_f_x_v_f16m8_rm (vint16m8_t src, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfcvt_f_x_v_f32mf2_rm (vint32mf2_t src, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfcvt_f_x_v_f32m1_rm (vint32m1_t src, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfcvt_f_x_v_f32m2_rm (vint32m2_t src, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfcvt_f_x_v_f32m4_rm (vint32m4_t src, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfcvt_f_x_v_f32m8_rm (vint32m8_t src, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfcvt_f_x_v_f64m1_rm (vint64m1_t src, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfcvt_f_x_v_f64m2_rm (vint64m2_t src, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfcvt_f_x_v_f64m4_rm (vint64m4_t src, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfcvt_f_x_v_f64m8_rm (vint64m8_t src, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfcvt_f_x_v_f16mf4_rm_m (vbool64_t mask, vint16mf4_t src, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfcvt_f_x_v_f16mf2_rm_m (vbool32_t mask, vint16mf2_t src, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfcvt_f_x_v_f16m1_rm_m (vbool16_t mask, vint16m1_t src, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfcvt_f_x_v_f16m2_rm_m (vbool8_t mask, vint16m2_t src, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfcvt_f_x_v_f16m4_rm_m (vbool4_t mask, vint16m4_t src, unsigned int frm, size_t vl);
vfloat16m8_t __riscv_vfcvt_f_x_v_f16m8_rm_m (vbool2_t mask, vint16m8_t src, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfcvt_f_x_v_f32mf2_rm_m (vbool64_t mask, vint32mf2_t src, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfcvt_f_x_v_f32m1_rm_m (vbool32_t mask, vint32m1_t src, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfcvt_f_x_v_f32m2_rm_m (vbool16_t mask, vint32m2_t src, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfcvt_f_x_v_f32m4_rm_m (vbool8_t mask, vint32m4_t src, unsigned int frm, size_t vl);
vfloat32m8_t __riscv_vfcvt_f_x_v_f32m8_rm_m (vbool4_t mask, vint32m8_t src, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfcvt_f_x_v_f64m1_rm_m (vbool64_t mask, vint64m1_t src, unsigned int frm, size_t vl);
vfloat64m2_t __riscv_vfcvt_f_x_v_f64m2_rm_m (vbool32_t mask, vint64m2_t src, unsigned int frm, size_t vl);
vfloat64m4_t __riscv_vfcvt_f_x_v_f64m4_rm_m (vbool16_t mask, vint64m4_t src, unsigned int frm, size_t vl);
vfloat64m8_t __riscv_vfcvt_f_x_v_f64m8_rm_m (vbool8_t mask, vint64m8_t src, unsigned int frm, size_t vl);
23.93. Widening Floating-Point/Integer Type-Convert Instructions
- Intrinsic Functions
Details
=== Vector Widening Integer Add/Subtract Intrinsics
=== Vector Integer Widening Intrinsics
=== Vector Widening Integer Multiply Intrinsics
=== Vector Widening Integer Multiply-Add Intrinsics
=== Vector Widening Floating-Point Add/Subtract Intrinsics
=== Vector Widening Floating-Point Multiply Intrinsics
=== Vector Widening Floating-Point Fused Multiply-Add Intrinsics
=== Widening Floating-Point/Integer Type-Convert Intrinsics
=== Vector Widening Integer Reduction Intrinsics
=== Vector Widening Floating-Point Reduction Intrinsics
23.94. vfwcvt.xu.f.v
- Mnemonic
vfwcvt.xu.f.v vd, vs2, vm
- Encoding
- Description
-
Convert float to double-width unsigned integer.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwcvt_xu_f_v.h
- Intrinsic Functions
Details
vuint32mf2_t __riscv_vfwcvt_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl);
vuint32m1_t __riscv_vfwcvt_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl);
vuint32m2_t __riscv_vfwcvt_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl);
vuint32m4_t __riscv_vfwcvt_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl);
vuint32m8_t __riscv_vfwcvt_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl);
vuint64m1_t __riscv_vfwcvt_xu_f_v_u64m1 (vfloat32mf2_t src, size_t vl);
vuint64m2_t __riscv_vfwcvt_xu_f_v_u64m2 (vfloat32m1_t src, size_t vl);
vuint64m4_t __riscv_vfwcvt_xu_f_v_u64m4 (vfloat32m2_t src, size_t vl);
vuint64m8_t __riscv_vfwcvt_xu_f_v_u64m8 (vfloat32m4_t src, size_t vl);
vuint32mf2_t __riscv_vfwcvt_xu_f_v_u32mf2_m (vbool64_t mask, vfloat16mf4_t src, size_t vl);
vuint32m1_t __riscv_vfwcvt_xu_f_v_u32m1_m (vbool32_t mask, vfloat16mf2_t src, size_t vl);
vuint32m2_t __riscv_vfwcvt_xu_f_v_u32m2_m (vbool16_t mask, vfloat16m1_t src, size_t vl);
vuint32m4_t __riscv_vfwcvt_xu_f_v_u32m4_m (vbool8_t mask, vfloat16m2_t src, size_t vl);
vuint32m8_t __riscv_vfwcvt_xu_f_v_u32m8_m (vbool4_t mask, vfloat16m4_t src, size_t vl);
vuint64m1_t __riscv_vfwcvt_xu_f_v_u64m1_m (vbool64_t mask, vfloat32mf2_t src, size_t vl);
vuint64m2_t __riscv_vfwcvt_xu_f_v_u64m2_m (vbool32_t mask, vfloat32m1_t src, size_t vl);
vuint64m4_t __riscv_vfwcvt_xu_f_v_u64m4_m (vbool16_t mask, vfloat32m2_t src, size_t vl);
vuint64m8_t __riscv_vfwcvt_xu_f_v_u64m8_m (vbool8_t mask, vfloat32m4_t src, size_t vl);
vuint32mf2_t __riscv_vfwcvt_xu_f_v_u32mf2_rm (vfloat16mf4_t src, unsigned int frm, size_t vl);
vuint32m1_t __riscv_vfwcvt_xu_f_v_u32m1_rm (vfloat16mf2_t src, unsigned int frm, size_t vl);
vuint32m2_t __riscv_vfwcvt_xu_f_v_u32m2_rm (vfloat16m1_t src, unsigned int frm, size_t vl);
vuint32m4_t __riscv_vfwcvt_xu_f_v_u32m4_rm (vfloat16m2_t src, unsigned int frm, size_t vl);
vuint32m8_t __riscv_vfwcvt_xu_f_v_u32m8_rm (vfloat16m4_t src, unsigned int frm, size_t vl);
vuint64m1_t __riscv_vfwcvt_xu_f_v_u64m1_rm (vfloat32mf2_t src, unsigned int frm, size_t vl);
vuint64m2_t __riscv_vfwcvt_xu_f_v_u64m2_rm (vfloat32m1_t src, unsigned int frm, size_t vl);
vuint64m4_t __riscv_vfwcvt_xu_f_v_u64m4_rm (vfloat32m2_t src, unsigned int frm, size_t vl);
vuint64m8_t __riscv_vfwcvt_xu_f_v_u64m8_rm (vfloat32m4_t src, unsigned int frm, size_t vl);
vuint32mf2_t __riscv_vfwcvt_xu_f_v_u32mf2_rm_m (vbool64_t mask, vfloat16mf4_t src, unsigned int frm, size_t vl);
vuint32m1_t __riscv_vfwcvt_xu_f_v_u32m1_rm_m (vbool32_t mask, vfloat16mf2_t src, unsigned int frm, size_t vl);
vuint32m2_t __riscv_vfwcvt_xu_f_v_u32m2_rm_m (vbool16_t mask, vfloat16m1_t src, unsigned int frm, size_t vl);
vuint32m4_t __riscv_vfwcvt_xu_f_v_u32m4_rm_m (vbool8_t mask, vfloat16m2_t src, unsigned int frm, size_t vl);
vuint32m8_t __riscv_vfwcvt_xu_f_v_u32m8_rm_m (vbool4_t mask, vfloat16m4_t src, unsigned int frm, size_t vl);
vuint64m1_t __riscv_vfwcvt_xu_f_v_u64m1_rm_m (vbool64_t mask, vfloat32mf2_t src, unsigned int frm, size_t vl);
vuint64m2_t __riscv_vfwcvt_xu_f_v_u64m2_rm_m (vbool32_t mask, vfloat32m1_t src, unsigned int frm, size_t vl);
vuint64m4_t __riscv_vfwcvt_xu_f_v_u64m4_rm_m (vbool16_t mask, vfloat32m2_t src, unsigned int frm, size_t vl);
vuint64m8_t __riscv_vfwcvt_xu_f_v_u64m8_rm_m (vbool8_t mask, vfloat32m4_t src, unsigned int frm, size_t vl);
23.95. vfwcvt.x.f.v
- Mnemonic
vfwcvt.x.f.v vd, vs2, vm
- Encoding
- Description
-
Convert float to double-width signed integer.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwcvt_x_f_v.h
- Intrinsic Functions
Details
vint32mf2_t __riscv_vfwcvt_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl);
vint32m1_t __riscv_vfwcvt_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl);
vint32m2_t __riscv_vfwcvt_x_f_v_i32m2 (vfloat16m1_t src, size_t vl);
vint32m4_t __riscv_vfwcvt_x_f_v_i32m4 (vfloat16m2_t src, size_t vl);
vint32m8_t __riscv_vfwcvt_x_f_v_i32m8 (vfloat16m4_t src, size_t vl);
vint64m1_t __riscv_vfwcvt_x_f_v_i64m1 (vfloat32mf2_t src, size_t vl);
vint64m2_t __riscv_vfwcvt_x_f_v_i64m2 (vfloat32m1_t src, size_t vl);
vint64m4_t __riscv_vfwcvt_x_f_v_i64m4 (vfloat32m2_t src, size_t vl);
vint64m8_t __riscv_vfwcvt_x_f_v_i64m8 (vfloat32m4_t src, size_t vl);
vint32mf2_t __riscv_vfwcvt_x_f_v_i32mf2_m (vbool64_t mask, vfloat16mf4_t src, size_t vl);
vint32m1_t __riscv_vfwcvt_x_f_v_i32m1_m (vbool32_t mask, vfloat16mf2_t src, size_t vl);
vint32m2_t __riscv_vfwcvt_x_f_v_i32m2_m (vbool16_t mask, vfloat16m1_t src, size_t vl);
vint32m4_t __riscv_vfwcvt_x_f_v_i32m4_m (vbool8_t mask, vfloat16m2_t src, size_t vl);
vint32m8_t __riscv_vfwcvt_x_f_v_i32m8_m (vbool4_t mask, vfloat16m4_t src, size_t vl);
vint64m1_t __riscv_vfwcvt_x_f_v_i64m1_m (vbool64_t mask, vfloat32mf2_t src, size_t vl);
vint64m2_t __riscv_vfwcvt_x_f_v_i64m2_m (vbool32_t mask, vfloat32m1_t src, size_t vl);
vint64m4_t __riscv_vfwcvt_x_f_v_i64m4_m (vbool16_t mask, vfloat32m2_t src, size_t vl);
vint64m8_t __riscv_vfwcvt_x_f_v_i64m8_m (vbool8_t mask, vfloat32m4_t src, size_t vl);
vint32mf2_t __riscv_vfwcvt_x_f_v_i32mf2_rm (vfloat16mf4_t src, unsigned int frm, size_t vl);
vint32m1_t __riscv_vfwcvt_x_f_v_i32m1_rm (vfloat16mf2_t src, unsigned int frm, size_t vl);
vint32m2_t __riscv_vfwcvt_x_f_v_i32m2_rm (vfloat16m1_t src, unsigned int frm, size_t vl);
vint32m4_t __riscv_vfwcvt_x_f_v_i32m4_rm (vfloat16m2_t src, unsigned int frm, size_t vl);
vint32m8_t __riscv_vfwcvt_x_f_v_i32m8_rm (vfloat16m4_t src, unsigned int frm, size_t vl);
vint64m1_t __riscv_vfwcvt_x_f_v_i64m1_rm (vfloat32mf2_t src, unsigned int frm, size_t vl);
vint64m2_t __riscv_vfwcvt_x_f_v_i64m2_rm (vfloat32m1_t src, unsigned int frm, size_t vl);
vint64m4_t __riscv_vfwcvt_x_f_v_i64m4_rm (vfloat32m2_t src, unsigned int frm, size_t vl);
vint64m8_t __riscv_vfwcvt_x_f_v_i64m8_rm (vfloat32m4_t src, unsigned int frm, size_t vl);
vint32mf2_t __riscv_vfwcvt_x_f_v_i32mf2_rm_m (vbool64_t mask, vfloat16mf4_t src, unsigned int frm, size_t vl);
vint32m1_t __riscv_vfwcvt_x_f_v_i32m1_rm_m (vbool32_t mask, vfloat16mf2_t src, unsigned int frm, size_t vl);
vint32m2_t __riscv_vfwcvt_x_f_v_i32m2_rm_m (vbool16_t mask, vfloat16m1_t src, unsigned int frm, size_t vl);
vint32m4_t __riscv_vfwcvt_x_f_v_i32m4_rm_m (vbool8_t mask, vfloat16m2_t src, unsigned int frm, size_t vl);
vint32m8_t __riscv_vfwcvt_x_f_v_i32m8_rm_m (vbool4_t mask, vfloat16m4_t src, unsigned int frm, size_t vl);
vint64m1_t __riscv_vfwcvt_x_f_v_i64m1_rm_m (vbool64_t mask, vfloat32mf2_t src, unsigned int frm, size_t vl);
vint64m2_t __riscv_vfwcvt_x_f_v_i64m2_rm_m (vbool32_t mask, vfloat32m1_t src, unsigned int frm, size_t vl);
vint64m4_t __riscv_vfwcvt_x_f_v_i64m4_rm_m (vbool16_t mask, vfloat32m2_t src, unsigned int frm, size_t vl);
vint64m8_t __riscv_vfwcvt_x_f_v_i64m8_rm_m (vbool8_t mask, vfloat32m4_t src, unsigned int frm, size_t vl);
23.96. vfwcvt.rtz.xu.f.v
- Mnemonic
vfwcvt.rtz.xu.f.v vd, vs2, vm
- Encoding
- Description
-
Convert float to double-width unsigned integer, truncating.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwcvt_rtz_xu_f_v.h
- Intrinsic Functions
Details
vuint32mf2_t __riscv_vfwcvt_rtz_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl);
vuint32m1_t __riscv_vfwcvt_rtz_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl);
vuint32m2_t __riscv_vfwcvt_rtz_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl);
vuint32m4_t __riscv_vfwcvt_rtz_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl);
vuint32m8_t __riscv_vfwcvt_rtz_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl);
vuint64m1_t __riscv_vfwcvt_rtz_xu_f_v_u64m1 (vfloat32mf2_t src, size_t vl);
vuint64m2_t __riscv_vfwcvt_rtz_xu_f_v_u64m2 (vfloat32m1_t src, size_t vl);
vuint64m4_t __riscv_vfwcvt_rtz_xu_f_v_u64m4 (vfloat32m2_t src, size_t vl);
vuint64m8_t __riscv_vfwcvt_rtz_xu_f_v_u64m8 (vfloat32m4_t src, size_t vl);
vuint32mf2_t __riscv_vfwcvt_rtz_xu_f_v_u32mf2_m (vbool64_t mask, vfloat16mf4_t src, size_t vl);
vuint32m1_t __riscv_vfwcvt_rtz_xu_f_v_u32m1_m (vbool32_t mask, vfloat16mf2_t src, size_t vl);
vuint32m2_t __riscv_vfwcvt_rtz_xu_f_v_u32m2_m (vbool16_t mask, vfloat16m1_t src, size_t vl);
vuint32m4_t __riscv_vfwcvt_rtz_xu_f_v_u32m4_m (vbool8_t mask, vfloat16m2_t src, size_t vl);
vuint32m8_t __riscv_vfwcvt_rtz_xu_f_v_u32m8_m (vbool4_t mask, vfloat16m4_t src, size_t vl);
vuint64m1_t __riscv_vfwcvt_rtz_xu_f_v_u64m1_m (vbool64_t mask, vfloat32mf2_t src, size_t vl);
vuint64m2_t __riscv_vfwcvt_rtz_xu_f_v_u64m2_m (vbool32_t mask, vfloat32m1_t src, size_t vl);
vuint64m4_t __riscv_vfwcvt_rtz_xu_f_v_u64m4_m (vbool16_t mask, vfloat32m2_t src, size_t vl);
vuint64m8_t __riscv_vfwcvt_rtz_xu_f_v_u64m8_m (vbool8_t mask, vfloat32m4_t src, size_t vl);
23.97. vfwcvt.rtz.x.f.v
- Mnemonic
vfwcvt.rtz.x.f.v vd, vs2, vm
- Encoding
- Description
-
Convert float to double-width signed integer, truncating.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwcvt_rtz_x_f_v.h
- Intrinsic Functions
Details
vint32mf2_t __riscv_vfwcvt_rtz_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl);
vint32m1_t __riscv_vfwcvt_rtz_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl);
vint32m2_t __riscv_vfwcvt_rtz_x_f_v_i32m2 (vfloat16m1_t src, size_t vl);
vint32m4_t __riscv_vfwcvt_rtz_x_f_v_i32m4 (vfloat16m2_t src, size_t vl);
vint32m8_t __riscv_vfwcvt_rtz_x_f_v_i32m8 (vfloat16m4_t src, size_t vl);
vint64m1_t __riscv_vfwcvt_rtz_x_f_v_i64m1 (vfloat32mf2_t src, size_t vl);
vint64m2_t __riscv_vfwcvt_rtz_x_f_v_i64m2 (vfloat32m1_t src, size_t vl);
vint64m4_t __riscv_vfwcvt_rtz_x_f_v_i64m4 (vfloat32m2_t src, size_t vl);
vint64m8_t __riscv_vfwcvt_rtz_x_f_v_i64m8 (vfloat32m4_t src, size_t vl);
vint32mf2_t __riscv_vfwcvt_rtz_x_f_v_i32mf2_m (vbool64_t mask, vfloat16mf4_t src, size_t vl);
vint32m1_t __riscv_vfwcvt_rtz_x_f_v_i32m1_m (vbool32_t mask, vfloat16mf2_t src, size_t vl);
vint32m2_t __riscv_vfwcvt_rtz_x_f_v_i32m2_m (vbool16_t mask, vfloat16m1_t src, size_t vl);
vint32m4_t __riscv_vfwcvt_rtz_x_f_v_i32m4_m (vbool8_t mask, vfloat16m2_t src, size_t vl);
vint32m8_t __riscv_vfwcvt_rtz_x_f_v_i32m8_m (vbool4_t mask, vfloat16m4_t src, size_t vl);
vint64m1_t __riscv_vfwcvt_rtz_x_f_v_i64m1_m (vbool64_t mask, vfloat32mf2_t src, size_t vl);
vint64m2_t __riscv_vfwcvt_rtz_x_f_v_i64m2_m (vbool32_t mask, vfloat32m1_t src, size_t vl);
vint64m4_t __riscv_vfwcvt_rtz_x_f_v_i64m4_m (vbool16_t mask, vfloat32m2_t src, size_t vl);
vint64m8_t __riscv_vfwcvt_rtz_x_f_v_i64m8_m (vbool8_t mask, vfloat32m4_t src, size_t vl);
23.98. vfwcvt.f.xu.v
- Mnemonic
vfwcvt.f.xu.v vd, vs2, vm
- Encoding
- Description
-
Convert unsigned integer to double-width float.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwcvt_f_xu_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfwcvt_f_xu_v_f16mf4 (vuint8mf8_t src, size_t vl);
vfloat16mf2_t __riscv_vfwcvt_f_xu_v_f16mf2 (vuint8mf4_t src, size_t vl);
vfloat16m1_t __riscv_vfwcvt_f_xu_v_f16m1 (vuint8mf2_t src, size_t vl);
vfloat16m2_t __riscv_vfwcvt_f_xu_v_f16m2 (vuint8m1_t src, size_t vl);
vfloat16m4_t __riscv_vfwcvt_f_xu_v_f16m4 (vuint8m2_t src, size_t vl);
vfloat16m8_t __riscv_vfwcvt_f_xu_v_f16m8 (vuint8m4_t src, size_t vl);
vfloat32mf2_t __riscv_vfwcvt_f_xu_v_f32mf2 (vuint16mf4_t src, size_t vl);
vfloat32m1_t __riscv_vfwcvt_f_xu_v_f32m1 (vuint16mf2_t src, size_t vl);
vfloat32m2_t __riscv_vfwcvt_f_xu_v_f32m2 (vuint16m1_t src, size_t vl);
vfloat32m4_t __riscv_vfwcvt_f_xu_v_f32m4 (vuint16m2_t src, size_t vl);
vfloat32m8_t __riscv_vfwcvt_f_xu_v_f32m8 (vuint16m4_t src, size_t vl);
vfloat64m1_t __riscv_vfwcvt_f_xu_v_f64m1 (vuint32mf2_t src, size_t vl);
vfloat64m2_t __riscv_vfwcvt_f_xu_v_f64m2 (vuint32m1_t src, size_t vl);
vfloat64m4_t __riscv_vfwcvt_f_xu_v_f64m4 (vuint32m2_t src, size_t vl);
vfloat64m8_t __riscv_vfwcvt_f_xu_v_f64m8 (vuint32m4_t src, size_t vl);
vfloat16mf4_t __riscv_vfwcvt_f_xu_v_f16mf4_m (vbool64_t mask, vuint8mf8_t src, size_t vl);
vfloat16mf2_t __riscv_vfwcvt_f_xu_v_f16mf2_m (vbool32_t mask, vuint8mf4_t src, size_t vl);
vfloat16m1_t __riscv_vfwcvt_f_xu_v_f16m1_m (vbool16_t mask, vuint8mf2_t src, size_t vl);
vfloat16m2_t __riscv_vfwcvt_f_xu_v_f16m2_m (vbool8_t mask, vuint8m1_t src, size_t vl);
vfloat16m4_t __riscv_vfwcvt_f_xu_v_f16m4_m (vbool4_t mask, vuint8m2_t src, size_t vl);
vfloat16m8_t __riscv_vfwcvt_f_xu_v_f16m8_m (vbool2_t mask, vuint8m4_t src, size_t vl);
vfloat32mf2_t __riscv_vfwcvt_f_xu_v_f32mf2_m (vbool64_t mask, vuint16mf4_t src, size_t vl);
vfloat32m1_t __riscv_vfwcvt_f_xu_v_f32m1_m (vbool32_t mask, vuint16mf2_t src, size_t vl);
vfloat32m2_t __riscv_vfwcvt_f_xu_v_f32m2_m (vbool16_t mask, vuint16m1_t src, size_t vl);
vfloat32m4_t __riscv_vfwcvt_f_xu_v_f32m4_m (vbool8_t mask, vuint16m2_t src, size_t vl);
vfloat32m8_t __riscv_vfwcvt_f_xu_v_f32m8_m (vbool4_t mask, vuint16m4_t src, size_t vl);
vfloat64m1_t __riscv_vfwcvt_f_xu_v_f64m1_m (vbool64_t mask, vuint32mf2_t src, size_t vl);
vfloat64m2_t __riscv_vfwcvt_f_xu_v_f64m2_m (vbool32_t mask, vuint32m1_t src, size_t vl);
vfloat64m4_t __riscv_vfwcvt_f_xu_v_f64m4_m (vbool16_t mask, vuint32m2_t src, size_t vl);
vfloat64m8_t __riscv_vfwcvt_f_xu_v_f64m8_m (vbool8_t mask, vuint32m4_t src, size_t vl);
23.99. vfwcvt.f.x.v
- Mnemonic
vfwcvt.f.x.v vd, vs2, vm
- Encoding
- Description
-
Convert signed integer to double-width float.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwcvt_f_x_v.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfwcvt_f_x_v_f16mf4 (vint8mf8_t src, size_t vl);
vfloat16mf2_t __riscv_vfwcvt_f_x_v_f16mf2 (vint8mf4_t src, size_t vl);
vfloat16m1_t __riscv_vfwcvt_f_x_v_f16m1 (vint8mf2_t src, size_t vl);
vfloat16m2_t __riscv_vfwcvt_f_x_v_f16m2 (vint8m1_t src, size_t vl);
vfloat16m4_t __riscv_vfwcvt_f_x_v_f16m4 (vint8m2_t src, size_t vl);
vfloat16m8_t __riscv_vfwcvt_f_x_v_f16m8 (vint8m4_t src, size_t vl);
vfloat32mf2_t __riscv_vfwcvt_f_x_v_f32mf2 (vint16mf4_t src, size_t vl);
vfloat32m1_t __riscv_vfwcvt_f_x_v_f32m1 (vint16mf2_t src, size_t vl);
vfloat32m2_t __riscv_vfwcvt_f_x_v_f32m2 (vint16m1_t src, size_t vl);
vfloat32m4_t __riscv_vfwcvt_f_x_v_f32m4 (vint16m2_t src, size_t vl);
vfloat32m8_t __riscv_vfwcvt_f_x_v_f32m8 (vint16m4_t src, size_t vl);
vfloat64m1_t __riscv_vfwcvt_f_x_v_f64m1 (vint32mf2_t src, size_t vl);
vfloat64m2_t __riscv_vfwcvt_f_x_v_f64m2 (vint32m1_t src, size_t vl);
vfloat64m4_t __riscv_vfwcvt_f_x_v_f64m4 (vint32m2_t src, size_t vl);
vfloat64m8_t __riscv_vfwcvt_f_x_v_f64m8 (vint32m4_t src, size_t vl);
vfloat16mf4_t __riscv_vfwcvt_f_x_v_f16mf4_m (vbool64_t mask, vint8mf8_t src, size_t vl);
vfloat16mf2_t __riscv_vfwcvt_f_x_v_f16mf2_m (vbool32_t mask, vint8mf4_t src, size_t vl);
vfloat16m1_t __riscv_vfwcvt_f_x_v_f16m1_m (vbool16_t mask, vint8mf2_t src, size_t vl);
vfloat16m2_t __riscv_vfwcvt_f_x_v_f16m2_m (vbool8_t mask, vint8m1_t src, size_t vl);
vfloat16m4_t __riscv_vfwcvt_f_x_v_f16m4_m (vbool4_t mask, vint8m2_t src, size_t vl);
vfloat16m8_t __riscv_vfwcvt_f_x_v_f16m8_m (vbool2_t mask, vint8m4_t src, size_t vl);
vfloat32mf2_t __riscv_vfwcvt_f_x_v_f32mf2_m (vbool64_t mask, vint16mf4_t src, size_t vl);
vfloat32m1_t __riscv_vfwcvt_f_x_v_f32m1_m (vbool32_t mask, vint16mf2_t src, size_t vl);
vfloat32m2_t __riscv_vfwcvt_f_x_v_f32m2_m (vbool16_t mask, vint16m1_t src, size_t vl);
vfloat32m4_t __riscv_vfwcvt_f_x_v_f32m4_m (vbool8_t mask, vint16m2_t src, size_t vl);
vfloat32m8_t __riscv_vfwcvt_f_x_v_f32m8_m (vbool4_t mask, vint16m4_t src, size_t vl);
vfloat64m1_t __riscv_vfwcvt_f_x_v_f64m1_m (vbool64_t mask, vint32mf2_t src, size_t vl);
vfloat64m2_t __riscv_vfwcvt_f_x_v_f64m2_m (vbool32_t mask, vint32m1_t src, size_t vl);
vfloat64m4_t __riscv_vfwcvt_f_x_v_f64m4_m (vbool16_t mask, vint32m2_t src, size_t vl);
vfloat64m8_t __riscv_vfwcvt_f_x_v_f64m8_m (vbool8_t mask, vint32m4_t src, size_t vl);
23.100. vfwcvt.f.f.v
- Mnemonic
vfwcvt.f.f.v vd, vs2, vm
- Encoding
- Description
-
Convert single-width float to double-width float.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwcvt_f_f_v.h
- Intrinsic Functions
Details
vfloat32mf2_t __riscv_vfwcvt_f_f_v_f32mf2 (vfloat16mf4_t src, size_t vl);
vfloat32m1_t __riscv_vfwcvt_f_f_v_f32m1 (vfloat16mf2_t src, size_t vl);
vfloat32m2_t __riscv_vfwcvt_f_f_v_f32m2 (vfloat16m1_t src, size_t vl);
vfloat32m4_t __riscv_vfwcvt_f_f_v_f32m4 (vfloat16m2_t src, size_t vl);
vfloat32m8_t __riscv_vfwcvt_f_f_v_f32m8 (vfloat16m4_t src, size_t vl);
vfloat64m1_t __riscv_vfwcvt_f_f_v_f64m1 (vfloat32mf2_t src, size_t vl);
vfloat64m2_t __riscv_vfwcvt_f_f_v_f64m2 (vfloat32m1_t src, size_t vl);
vfloat64m4_t __riscv_vfwcvt_f_f_v_f64m4 (vfloat32m2_t src, size_t vl);
vfloat64m8_t __riscv_vfwcvt_f_f_v_f64m8 (vfloat32m4_t src, size_t vl);
vfloat32mf2_t __riscv_vfwcvt_f_f_v_f32mf2_m (vbool64_t mask, vfloat16mf4_t src, size_t vl);
vfloat32m1_t __riscv_vfwcvt_f_f_v_f32m1_m (vbool32_t mask, vfloat16mf2_t src, size_t vl);
vfloat32m2_t __riscv_vfwcvt_f_f_v_f32m2_m (vbool16_t mask, vfloat16m1_t src, size_t vl);
vfloat32m4_t __riscv_vfwcvt_f_f_v_f32m4_m (vbool8_t mask, vfloat16m2_t src, size_t vl);
vfloat32m8_t __riscv_vfwcvt_f_f_v_f32m8_m (vbool4_t mask, vfloat16m4_t src, size_t vl);
vfloat64m1_t __riscv_vfwcvt_f_f_v_f64m1_m (vbool64_t mask, vfloat32mf2_t src, size_t vl);
vfloat64m2_t __riscv_vfwcvt_f_f_v_f64m2_m (vbool32_t mask, vfloat32m1_t src, size_t vl);
vfloat64m4_t __riscv_vfwcvt_f_f_v_f64m4_m (vbool16_t mask, vfloat32m2_t src, size_t vl);
vfloat64m8_t __riscv_vfwcvt_f_f_v_f64m8_m (vbool8_t mask, vfloat32m4_t src, size_t vl);
23.101. Narrowing Floating-Point/Integer Type-Convert Instructions
- Intrinsic Functions
Details
=== Vector Narrowing Integer Right Shift Intrinsics
=== Vector Integer Narrowing Intrinsics
=== Vector Narrowing Fixed-Point Clip Intrinsics
=== Narrowing Floating-Point/Integer Type-Convert Intrinsics
23.102. vfncvt.xu.f.w
- Mnemonic
vfncvt.xu.f.w vd, vs2, vm
- Encoding
- Description
-
Convert double-width float to unsigned integer.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfncvt_xu_f_w.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vfncvt_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl);
vuint8mf4_t __riscv_vfncvt_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl);
vuint8mf2_t __riscv_vfncvt_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl);
vuint8m1_t __riscv_vfncvt_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl);
vuint8m2_t __riscv_vfncvt_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl);
vuint8m4_t __riscv_vfncvt_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl);
vuint16mf4_t __riscv_vfncvt_xu_f_w_u16mf4 (vfloat32mf2_t src, size_t vl);
vuint16mf2_t __riscv_vfncvt_xu_f_w_u16mf2 (vfloat32m1_t src, size_t vl);
vuint16m1_t __riscv_vfncvt_xu_f_w_u16m1 (vfloat32m2_t src, size_t vl);
vuint16m2_t __riscv_vfncvt_xu_f_w_u16m2 (vfloat32m4_t src, size_t vl);
vuint16m4_t __riscv_vfncvt_xu_f_w_u16m4 (vfloat32m8_t src, size_t vl);
vuint32mf2_t __riscv_vfncvt_xu_f_w_u32mf2 (vfloat64m1_t src, size_t vl);
vuint32m1_t __riscv_vfncvt_xu_f_w_u32m1 (vfloat64m2_t src, size_t vl);
vuint32m2_t __riscv_vfncvt_xu_f_w_u32m2 (vfloat64m4_t src, size_t vl);
vuint32m4_t __riscv_vfncvt_xu_f_w_u32m4 (vfloat64m8_t src, size_t vl);
vuint8mf8_t __riscv_vfncvt_xu_f_w_u8mf8_m (vbool64_t mask, vfloat16mf4_t src, size_t vl);
vuint8mf4_t __riscv_vfncvt_xu_f_w_u8mf4_m (vbool32_t mask, vfloat16mf2_t src, size_t vl);
vuint8mf2_t __riscv_vfncvt_xu_f_w_u8mf2_m (vbool16_t mask, vfloat16m1_t src, size_t vl);
vuint8m1_t __riscv_vfncvt_xu_f_w_u8m1_m (vbool8_t mask, vfloat16m2_t src, size_t vl);
vuint8m2_t __riscv_vfncvt_xu_f_w_u8m2_m (vbool4_t mask, vfloat16m4_t src, size_t vl);
vuint8m4_t __riscv_vfncvt_xu_f_w_u8m4_m (vbool2_t mask, vfloat16m8_t src, size_t vl);
vuint16mf4_t __riscv_vfncvt_xu_f_w_u16mf4_m (vbool64_t mask, vfloat32mf2_t src, size_t vl);
vuint16mf2_t __riscv_vfncvt_xu_f_w_u16mf2_m (vbool32_t mask, vfloat32m1_t src, size_t vl);
vuint16m1_t __riscv_vfncvt_xu_f_w_u16m1_m (vbool16_t mask, vfloat32m2_t src, size_t vl);
vuint16m2_t __riscv_vfncvt_xu_f_w_u16m2_m (vbool8_t mask, vfloat32m4_t src, size_t vl);
vuint16m4_t __riscv_vfncvt_xu_f_w_u16m4_m (vbool4_t mask, vfloat32m8_t src, size_t vl);
vuint32mf2_t __riscv_vfncvt_xu_f_w_u32mf2_m (vbool64_t mask, vfloat64m1_t src, size_t vl);
vuint32m1_t __riscv_vfncvt_xu_f_w_u32m1_m (vbool32_t mask, vfloat64m2_t src, size_t vl);
vuint32m2_t __riscv_vfncvt_xu_f_w_u32m2_m (vbool16_t mask, vfloat64m4_t src, size_t vl);
vuint32m4_t __riscv_vfncvt_xu_f_w_u32m4_m (vbool8_t mask, vfloat64m8_t src, size_t vl);
vuint8mf8_t __riscv_vfncvt_xu_f_w_u8mf8_rm (vfloat16mf4_t src, unsigned int frm, size_t vl);
vuint8mf4_t __riscv_vfncvt_xu_f_w_u8mf4_rm (vfloat16mf2_t src, unsigned int frm, size_t vl);
vuint8mf2_t __riscv_vfncvt_xu_f_w_u8mf2_rm (vfloat16m1_t src, unsigned int frm, size_t vl);
vuint8m1_t __riscv_vfncvt_xu_f_w_u8m1_rm (vfloat16m2_t src, unsigned int frm, size_t vl);
vuint8m2_t __riscv_vfncvt_xu_f_w_u8m2_rm (vfloat16m4_t src, unsigned int frm, size_t vl);
vuint8m4_t __riscv_vfncvt_xu_f_w_u8m4_rm (vfloat16m8_t src, unsigned int frm, size_t vl);
vuint16mf4_t __riscv_vfncvt_xu_f_w_u16mf4_rm (vfloat32mf2_t src, unsigned int frm, size_t vl);
vuint16mf2_t __riscv_vfncvt_xu_f_w_u16mf2_rm (vfloat32m1_t src, unsigned int frm, size_t vl);
vuint16m1_t __riscv_vfncvt_xu_f_w_u16m1_rm (vfloat32m2_t src, unsigned int frm, size_t vl);
vuint16m2_t __riscv_vfncvt_xu_f_w_u16m2_rm (vfloat32m4_t src, unsigned int frm, size_t vl);
vuint16m4_t __riscv_vfncvt_xu_f_w_u16m4_rm (vfloat32m8_t src, unsigned int frm, size_t vl);
vuint32mf2_t __riscv_vfncvt_xu_f_w_u32mf2_rm (vfloat64m1_t src, unsigned int frm, size_t vl);
vuint32m1_t __riscv_vfncvt_xu_f_w_u32m1_rm (vfloat64m2_t src, unsigned int frm, size_t vl);
vuint32m2_t __riscv_vfncvt_xu_f_w_u32m2_rm (vfloat64m4_t src, unsigned int frm, size_t vl);
vuint32m4_t __riscv_vfncvt_xu_f_w_u32m4_rm (vfloat64m8_t src, unsigned int frm, size_t vl);
vuint8mf8_t __riscv_vfncvt_xu_f_w_u8mf8_rm_m (vbool64_t mask, vfloat16mf4_t src, unsigned int frm, size_t vl);
vuint8mf4_t __riscv_vfncvt_xu_f_w_u8mf4_rm_m (vbool32_t mask, vfloat16mf2_t src, unsigned int frm, size_t vl);
vuint8mf2_t __riscv_vfncvt_xu_f_w_u8mf2_rm_m (vbool16_t mask, vfloat16m1_t src, unsigned int frm, size_t vl);
vuint8m1_t __riscv_vfncvt_xu_f_w_u8m1_rm_m (vbool8_t mask, vfloat16m2_t src, unsigned int frm, size_t vl);
vuint8m2_t __riscv_vfncvt_xu_f_w_u8m2_rm_m (vbool4_t mask, vfloat16m4_t src, unsigned int frm, size_t vl);
vuint8m4_t __riscv_vfncvt_xu_f_w_u8m4_rm_m (vbool2_t mask, vfloat16m8_t src, unsigned int frm, size_t vl);
vuint16mf4_t __riscv_vfncvt_xu_f_w_u16mf4_rm_m (vbool64_t mask, vfloat32mf2_t src, unsigned int frm, size_t vl);
vuint16mf2_t __riscv_vfncvt_xu_f_w_u16mf2_rm_m (vbool32_t mask, vfloat32m1_t src, unsigned int frm, size_t vl);
vuint16m1_t __riscv_vfncvt_xu_f_w_u16m1_rm_m (vbool16_t mask, vfloat32m2_t src, unsigned int frm, size_t vl);
vuint16m2_t __riscv_vfncvt_xu_f_w_u16m2_rm_m (vbool8_t mask, vfloat32m4_t src, unsigned int frm, size_t vl);
vuint16m4_t __riscv_vfncvt_xu_f_w_u16m4_rm_m (vbool4_t mask, vfloat32m8_t src, unsigned int frm, size_t vl);
vuint32mf2_t __riscv_vfncvt_xu_f_w_u32mf2_rm_m (vbool64_t mask, vfloat64m1_t src, unsigned int frm, size_t vl);
vuint32m1_t __riscv_vfncvt_xu_f_w_u32m1_rm_m (vbool32_t mask, vfloat64m2_t src, unsigned int frm, size_t vl);
vuint32m2_t __riscv_vfncvt_xu_f_w_u32m2_rm_m (vbool16_t mask, vfloat64m4_t src, unsigned int frm, size_t vl);
vuint32m4_t __riscv_vfncvt_xu_f_w_u32m4_rm_m (vbool8_t mask, vfloat64m8_t src, unsigned int frm, size_t vl);
23.103. vfncvt.x.f.w
- Mnemonic
vfncvt.x.f.w vd, vs2, vm
- Encoding
- Description
-
Convert double-width float to signed integer.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfncvt_x_f_w.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vfncvt_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl);
vint8mf4_t __riscv_vfncvt_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl);
vint8mf2_t __riscv_vfncvt_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl);
vint8m1_t __riscv_vfncvt_x_f_w_i8m1 (vfloat16m2_t src, size_t vl);
vint8m2_t __riscv_vfncvt_x_f_w_i8m2 (vfloat16m4_t src, size_t vl);
vint8m4_t __riscv_vfncvt_x_f_w_i8m4 (vfloat16m8_t src, size_t vl);
vint16mf4_t __riscv_vfncvt_x_f_w_i16mf4 (vfloat32mf2_t src, size_t vl);
vint16mf2_t __riscv_vfncvt_x_f_w_i16mf2 (vfloat32m1_t src, size_t vl);
vint16m1_t __riscv_vfncvt_x_f_w_i16m1 (vfloat32m2_t src, size_t vl);
vint16m2_t __riscv_vfncvt_x_f_w_i16m2 (vfloat32m4_t src, size_t vl);
vint16m4_t __riscv_vfncvt_x_f_w_i16m4 (vfloat32m8_t src, size_t vl);
vint32mf2_t __riscv_vfncvt_x_f_w_i32mf2 (vfloat64m1_t src, size_t vl);
vint32m1_t __riscv_vfncvt_x_f_w_i32m1 (vfloat64m2_t src, size_t vl);
vint32m2_t __riscv_vfncvt_x_f_w_i32m2 (vfloat64m4_t src, size_t vl);
vint32m4_t __riscv_vfncvt_x_f_w_i32m4 (vfloat64m8_t src, size_t vl);
vint8mf8_t __riscv_vfncvt_x_f_w_i8mf8_m (vbool64_t mask, vfloat16mf4_t src, size_t vl);
vint8mf4_t __riscv_vfncvt_x_f_w_i8mf4_m (vbool32_t mask, vfloat16mf2_t src, size_t vl);
vint8mf2_t __riscv_vfncvt_x_f_w_i8mf2_m (vbool16_t mask, vfloat16m1_t src, size_t vl);
vint8m1_t __riscv_vfncvt_x_f_w_i8m1_m (vbool8_t mask, vfloat16m2_t src, size_t vl);
vint8m2_t __riscv_vfncvt_x_f_w_i8m2_m (vbool4_t mask, vfloat16m4_t src, size_t vl);
vint8m4_t __riscv_vfncvt_x_f_w_i8m4_m (vbool2_t mask, vfloat16m8_t src, size_t vl);
vint16mf4_t __riscv_vfncvt_x_f_w_i16mf4_m (vbool64_t mask, vfloat32mf2_t src, size_t vl);
vint16mf2_t __riscv_vfncvt_x_f_w_i16mf2_m (vbool32_t mask, vfloat32m1_t src, size_t vl);
vint16m1_t __riscv_vfncvt_x_f_w_i16m1_m (vbool16_t mask, vfloat32m2_t src, size_t vl);
vint16m2_t __riscv_vfncvt_x_f_w_i16m2_m (vbool8_t mask, vfloat32m4_t src, size_t vl);
vint16m4_t __riscv_vfncvt_x_f_w_i16m4_m (vbool4_t mask, vfloat32m8_t src, size_t vl);
vint32mf2_t __riscv_vfncvt_x_f_w_i32mf2_m (vbool64_t mask, vfloat64m1_t src, size_t vl);
vint32m1_t __riscv_vfncvt_x_f_w_i32m1_m (vbool32_t mask, vfloat64m2_t src, size_t vl);
vint32m2_t __riscv_vfncvt_x_f_w_i32m2_m (vbool16_t mask, vfloat64m4_t src, size_t vl);
vint32m4_t __riscv_vfncvt_x_f_w_i32m4_m (vbool8_t mask, vfloat64m8_t src, size_t vl);
vint8mf8_t __riscv_vfncvt_x_f_w_i8mf8_rm (vfloat16mf4_t src, unsigned int frm, size_t vl);
vint8mf4_t __riscv_vfncvt_x_f_w_i8mf4_rm (vfloat16mf2_t src, unsigned int frm, size_t vl);
vint8mf2_t __riscv_vfncvt_x_f_w_i8mf2_rm (vfloat16m1_t src, unsigned int frm, size_t vl);
vint8m1_t __riscv_vfncvt_x_f_w_i8m1_rm (vfloat16m2_t src, unsigned int frm, size_t vl);
vint8m2_t __riscv_vfncvt_x_f_w_i8m2_rm (vfloat16m4_t src, unsigned int frm, size_t vl);
vint8m4_t __riscv_vfncvt_x_f_w_i8m4_rm (vfloat16m8_t src, unsigned int frm, size_t vl);
vint16mf4_t __riscv_vfncvt_x_f_w_i16mf4_rm (vfloat32mf2_t src, unsigned int frm, size_t vl);
vint16mf2_t __riscv_vfncvt_x_f_w_i16mf2_rm (vfloat32m1_t src, unsigned int frm, size_t vl);
vint16m1_t __riscv_vfncvt_x_f_w_i16m1_rm (vfloat32m2_t src, unsigned int frm, size_t vl);
vint16m2_t __riscv_vfncvt_x_f_w_i16m2_rm (vfloat32m4_t src, unsigned int frm, size_t vl);
vint16m4_t __riscv_vfncvt_x_f_w_i16m4_rm (vfloat32m8_t src, unsigned int frm, size_t vl);
vint32mf2_t __riscv_vfncvt_x_f_w_i32mf2_rm (vfloat64m1_t src, unsigned int frm, size_t vl);
vint32m1_t __riscv_vfncvt_x_f_w_i32m1_rm (vfloat64m2_t src, unsigned int frm, size_t vl);
vint32m2_t __riscv_vfncvt_x_f_w_i32m2_rm (vfloat64m4_t src, unsigned int frm, size_t vl);
vint32m4_t __riscv_vfncvt_x_f_w_i32m4_rm (vfloat64m8_t src, unsigned int frm, size_t vl);
vint8mf8_t __riscv_vfncvt_x_f_w_i8mf8_rm_m (vbool64_t mask, vfloat16mf4_t src, unsigned int frm, size_t vl);
vint8mf4_t __riscv_vfncvt_x_f_w_i8mf4_rm_m (vbool32_t mask, vfloat16mf2_t src, unsigned int frm, size_t vl);
vint8mf2_t __riscv_vfncvt_x_f_w_i8mf2_rm_m (vbool16_t mask, vfloat16m1_t src, unsigned int frm, size_t vl);
vint8m1_t __riscv_vfncvt_x_f_w_i8m1_rm_m (vbool8_t mask, vfloat16m2_t src, unsigned int frm, size_t vl);
vint8m2_t __riscv_vfncvt_x_f_w_i8m2_rm_m (vbool4_t mask, vfloat16m4_t src, unsigned int frm, size_t vl);
vint8m4_t __riscv_vfncvt_x_f_w_i8m4_rm_m (vbool2_t mask, vfloat16m8_t src, unsigned int frm, size_t vl);
vint16mf4_t __riscv_vfncvt_x_f_w_i16mf4_rm_m (vbool64_t mask, vfloat32mf2_t src, unsigned int frm, size_t vl);
vint16mf2_t __riscv_vfncvt_x_f_w_i16mf2_rm_m (vbool32_t mask, vfloat32m1_t src, unsigned int frm, size_t vl);
vint16m1_t __riscv_vfncvt_x_f_w_i16m1_rm_m (vbool16_t mask, vfloat32m2_t src, unsigned int frm, size_t vl);
vint16m2_t __riscv_vfncvt_x_f_w_i16m2_rm_m (vbool8_t mask, vfloat32m4_t src, unsigned int frm, size_t vl);
vint16m4_t __riscv_vfncvt_x_f_w_i16m4_rm_m (vbool4_t mask, vfloat32m8_t src, unsigned int frm, size_t vl);
vint32mf2_t __riscv_vfncvt_x_f_w_i32mf2_rm_m (vbool64_t mask, vfloat64m1_t src, unsigned int frm, size_t vl);
vint32m1_t __riscv_vfncvt_x_f_w_i32m1_rm_m (vbool32_t mask, vfloat64m2_t src, unsigned int frm, size_t vl);
vint32m2_t __riscv_vfncvt_x_f_w_i32m2_rm_m (vbool16_t mask, vfloat64m4_t src, unsigned int frm, size_t vl);
vint32m4_t __riscv_vfncvt_x_f_w_i32m4_rm_m (vbool8_t mask, vfloat64m8_t src, unsigned int frm, size_t vl);
23.104. vfncvt.rtz.xu.f.w
- Mnemonic
vfncvt.rtz.xu.f.w vd, vs2, vm
- Encoding
- Description
-
Convert double-width float to unsigned integer, truncating.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfncvt_rtz_xu_f_w.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vfncvt_rtz_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl);
vuint8mf4_t __riscv_vfncvt_rtz_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl);
vuint8mf2_t __riscv_vfncvt_rtz_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl);
vuint8m1_t __riscv_vfncvt_rtz_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl);
vuint8m2_t __riscv_vfncvt_rtz_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl);
vuint8m4_t __riscv_vfncvt_rtz_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl);
vuint16mf4_t __riscv_vfncvt_rtz_xu_f_w_u16mf4 (vfloat32mf2_t src, size_t vl);
vuint16mf2_t __riscv_vfncvt_rtz_xu_f_w_u16mf2 (vfloat32m1_t src, size_t vl);
vuint16m1_t __riscv_vfncvt_rtz_xu_f_w_u16m1 (vfloat32m2_t src, size_t vl);
vuint16m2_t __riscv_vfncvt_rtz_xu_f_w_u16m2 (vfloat32m4_t src, size_t vl);
vuint16m4_t __riscv_vfncvt_rtz_xu_f_w_u16m4 (vfloat32m8_t src, size_t vl);
vuint32mf2_t __riscv_vfncvt_rtz_xu_f_w_u32mf2 (vfloat64m1_t src, size_t vl);
vuint32m1_t __riscv_vfncvt_rtz_xu_f_w_u32m1 (vfloat64m2_t src, size_t vl);
vuint32m2_t __riscv_vfncvt_rtz_xu_f_w_u32m2 (vfloat64m4_t src, size_t vl);
vuint32m4_t __riscv_vfncvt_rtz_xu_f_w_u32m4 (vfloat64m8_t src, size_t vl);
vuint8mf8_t __riscv_vfncvt_rtz_xu_f_w_u8mf8_m (vbool64_t mask, vfloat16mf4_t src, size_t vl);
vuint8mf4_t __riscv_vfncvt_rtz_xu_f_w_u8mf4_m (vbool32_t mask, vfloat16mf2_t src, size_t vl);
vuint8mf2_t __riscv_vfncvt_rtz_xu_f_w_u8mf2_m (vbool16_t mask, vfloat16m1_t src, size_t vl);
vuint8m1_t __riscv_vfncvt_rtz_xu_f_w_u8m1_m (vbool8_t mask, vfloat16m2_t src, size_t vl);
vuint8m2_t __riscv_vfncvt_rtz_xu_f_w_u8m2_m (vbool4_t mask, vfloat16m4_t src, size_t vl);
vuint8m4_t __riscv_vfncvt_rtz_xu_f_w_u8m4_m (vbool2_t mask, vfloat16m8_t src, size_t vl);
vuint16mf4_t __riscv_vfncvt_rtz_xu_f_w_u16mf4_m (vbool64_t mask, vfloat32mf2_t src, size_t vl);
vuint16mf2_t __riscv_vfncvt_rtz_xu_f_w_u16mf2_m (vbool32_t mask, vfloat32m1_t src, size_t vl);
vuint16m1_t __riscv_vfncvt_rtz_xu_f_w_u16m1_m (vbool16_t mask, vfloat32m2_t src, size_t vl);
vuint16m2_t __riscv_vfncvt_rtz_xu_f_w_u16m2_m (vbool8_t mask, vfloat32m4_t src, size_t vl);
vuint16m4_t __riscv_vfncvt_rtz_xu_f_w_u16m4_m (vbool4_t mask, vfloat32m8_t src, size_t vl);
vuint32mf2_t __riscv_vfncvt_rtz_xu_f_w_u32mf2_m (vbool64_t mask, vfloat64m1_t src, size_t vl);
vuint32m1_t __riscv_vfncvt_rtz_xu_f_w_u32m1_m (vbool32_t mask, vfloat64m2_t src, size_t vl);
vuint32m2_t __riscv_vfncvt_rtz_xu_f_w_u32m2_m (vbool16_t mask, vfloat64m4_t src, size_t vl);
vuint32m4_t __riscv_vfncvt_rtz_xu_f_w_u32m4_m (vbool8_t mask, vfloat64m8_t src, size_t vl);
23.105. vfncvt.rtz.x.f.w
- Mnemonic
vfncvt.rtz.x.f.w vd, vs2, vm
- Encoding
- Description
-
Convert double-width float to signed integer, truncating.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfncvt_rtz_x_f_w.h
- Intrinsic Functions
Details
vint8mf8_t __riscv_vfncvt_rtz_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl);
vint8mf4_t __riscv_vfncvt_rtz_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl);
vint8mf2_t __riscv_vfncvt_rtz_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl);
vint8m1_t __riscv_vfncvt_rtz_x_f_w_i8m1 (vfloat16m2_t src, size_t vl);
vint8m2_t __riscv_vfncvt_rtz_x_f_w_i8m2 (vfloat16m4_t src, size_t vl);
vint8m4_t __riscv_vfncvt_rtz_x_f_w_i8m4 (vfloat16m8_t src, size_t vl);
vint16mf4_t __riscv_vfncvt_rtz_x_f_w_i16mf4 (vfloat32mf2_t src, size_t vl);
vint16mf2_t __riscv_vfncvt_rtz_x_f_w_i16mf2 (vfloat32m1_t src, size_t vl);
vint16m1_t __riscv_vfncvt_rtz_x_f_w_i16m1 (vfloat32m2_t src, size_t vl);
vint16m2_t __riscv_vfncvt_rtz_x_f_w_i16m2 (vfloat32m4_t src, size_t vl);
vint16m4_t __riscv_vfncvt_rtz_x_f_w_i16m4 (vfloat32m8_t src, size_t vl);
vint32mf2_t __riscv_vfncvt_rtz_x_f_w_i32mf2 (vfloat64m1_t src, size_t vl);
vint32m1_t __riscv_vfncvt_rtz_x_f_w_i32m1 (vfloat64m2_t src, size_t vl);
vint32m2_t __riscv_vfncvt_rtz_x_f_w_i32m2 (vfloat64m4_t src, size_t vl);
vint32m4_t __riscv_vfncvt_rtz_x_f_w_i32m4 (vfloat64m8_t src, size_t vl);
vint8mf8_t __riscv_vfncvt_rtz_x_f_w_i8mf8_m (vbool64_t mask, vfloat16mf4_t src, size_t vl);
vint8mf4_t __riscv_vfncvt_rtz_x_f_w_i8mf4_m (vbool32_t mask, vfloat16mf2_t src, size_t vl);
vint8mf2_t __riscv_vfncvt_rtz_x_f_w_i8mf2_m (vbool16_t mask, vfloat16m1_t src, size_t vl);
vint8m1_t __riscv_vfncvt_rtz_x_f_w_i8m1_m (vbool8_t mask, vfloat16m2_t src, size_t vl);
vint8m2_t __riscv_vfncvt_rtz_x_f_w_i8m2_m (vbool4_t mask, vfloat16m4_t src, size_t vl);
vint8m4_t __riscv_vfncvt_rtz_x_f_w_i8m4_m (vbool2_t mask, vfloat16m8_t src, size_t vl);
vint16mf4_t __riscv_vfncvt_rtz_x_f_w_i16mf4_m (vbool64_t mask, vfloat32mf2_t src, size_t vl);
vint16mf2_t __riscv_vfncvt_rtz_x_f_w_i16mf2_m (vbool32_t mask, vfloat32m1_t src, size_t vl);
vint16m1_t __riscv_vfncvt_rtz_x_f_w_i16m1_m (vbool16_t mask, vfloat32m2_t src, size_t vl);
vint16m2_t __riscv_vfncvt_rtz_x_f_w_i16m2_m (vbool8_t mask, vfloat32m4_t src, size_t vl);
vint16m4_t __riscv_vfncvt_rtz_x_f_w_i16m4_m (vbool4_t mask, vfloat32m8_t src, size_t vl);
vint32mf2_t __riscv_vfncvt_rtz_x_f_w_i32mf2_m (vbool64_t mask, vfloat64m1_t src, size_t vl);
vint32m1_t __riscv_vfncvt_rtz_x_f_w_i32m1_m (vbool32_t mask, vfloat64m2_t src, size_t vl);
vint32m2_t __riscv_vfncvt_rtz_x_f_w_i32m2_m (vbool16_t mask, vfloat64m4_t src, size_t vl);
vint32m4_t __riscv_vfncvt_rtz_x_f_w_i32m4_m (vbool8_t mask, vfloat64m8_t src, size_t vl);
23.106. vfncvt.f.xu.w
- Mnemonic
vfncvt.f.xu.w vd, vs2, vm
- Encoding
- Description
-
Convert double-width unsigned integer to float.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfncvt_f_xu_w.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfncvt_f_xu_w_f16mf4 (vuint32mf2_t src, size_t vl);
vfloat16mf2_t __riscv_vfncvt_f_xu_w_f16mf2 (vuint32m1_t src, size_t vl);
vfloat16m1_t __riscv_vfncvt_f_xu_w_f16m1 (vuint32m2_t src, size_t vl);
vfloat16m2_t __riscv_vfncvt_f_xu_w_f16m2 (vuint32m4_t src, size_t vl);
vfloat16m4_t __riscv_vfncvt_f_xu_w_f16m4 (vuint32m8_t src, size_t vl);
vfloat32mf2_t __riscv_vfncvt_f_xu_w_f32mf2 (vuint64m1_t src, size_t vl);
vfloat32m1_t __riscv_vfncvt_f_xu_w_f32m1 (vuint64m2_t src, size_t vl);
vfloat32m2_t __riscv_vfncvt_f_xu_w_f32m2 (vuint64m4_t src, size_t vl);
vfloat32m4_t __riscv_vfncvt_f_xu_w_f32m4 (vuint64m8_t src, size_t vl);
vfloat16mf4_t __riscv_vfncvt_f_xu_w_f16mf4_m (vbool64_t mask, vuint32mf2_t src, size_t vl);
vfloat16mf2_t __riscv_vfncvt_f_xu_w_f16mf2_m (vbool32_t mask, vuint32m1_t src, size_t vl);
vfloat16m1_t __riscv_vfncvt_f_xu_w_f16m1_m (vbool16_t mask, vuint32m2_t src, size_t vl);
vfloat16m2_t __riscv_vfncvt_f_xu_w_f16m2_m (vbool8_t mask, vuint32m4_t src, size_t vl);
vfloat16m4_t __riscv_vfncvt_f_xu_w_f16m4_m (vbool4_t mask, vuint32m8_t src, size_t vl);
vfloat32mf2_t __riscv_vfncvt_f_xu_w_f32mf2_m (vbool64_t mask, vuint64m1_t src, size_t vl);
vfloat32m1_t __riscv_vfncvt_f_xu_w_f32m1_m (vbool32_t mask, vuint64m2_t src, size_t vl);
vfloat32m2_t __riscv_vfncvt_f_xu_w_f32m2_m (vbool16_t mask, vuint64m4_t src, size_t vl);
vfloat32m4_t __riscv_vfncvt_f_xu_w_f32m4_m (vbool8_t mask, vuint64m8_t src, size_t vl);
vfloat16mf4_t __riscv_vfncvt_f_xu_w_f16mf4_rm (vuint32mf2_t src, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfncvt_f_xu_w_f16mf2_rm (vuint32m1_t src, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfncvt_f_xu_w_f16m1_rm (vuint32m2_t src, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfncvt_f_xu_w_f16m2_rm (vuint32m4_t src, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfncvt_f_xu_w_f16m4_rm (vuint32m8_t src, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfncvt_f_xu_w_f32mf2_rm (vuint64m1_t src, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfncvt_f_xu_w_f32m1_rm (vuint64m2_t src, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfncvt_f_xu_w_f32m2_rm (vuint64m4_t src, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfncvt_f_xu_w_f32m4_rm (vuint64m8_t src, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfncvt_f_xu_w_f16mf4_rm_m (vbool64_t mask, vuint32mf2_t src, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfncvt_f_xu_w_f16mf2_rm_m (vbool32_t mask, vuint32m1_t src, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfncvt_f_xu_w_f16m1_rm_m (vbool16_t mask, vuint32m2_t src, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfncvt_f_xu_w_f16m2_rm_m (vbool8_t mask, vuint32m4_t src, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfncvt_f_xu_w_f16m4_rm_m (vbool4_t mask, vuint32m8_t src, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfncvt_f_xu_w_f32mf2_rm_m (vbool64_t mask, vuint64m1_t src, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfncvt_f_xu_w_f32m1_rm_m (vbool32_t mask, vuint64m2_t src, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfncvt_f_xu_w_f32m2_rm_m (vbool16_t mask, vuint64m4_t src, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfncvt_f_xu_w_f32m4_rm_m (vbool8_t mask, vuint64m8_t src, unsigned int frm, size_t vl);
23.107. vfncvt.f.x.w
- Mnemonic
vfncvt.f.x.w vd, vs2, vm
- Encoding
- Description
-
Convert double-width signed integer to float.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfncvt_f_x_w.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfncvt_f_x_w_f16mf4 (vint32mf2_t src, size_t vl);
vfloat16mf2_t __riscv_vfncvt_f_x_w_f16mf2 (vint32m1_t src, size_t vl);
vfloat16m1_t __riscv_vfncvt_f_x_w_f16m1 (vint32m2_t src, size_t vl);
vfloat16m2_t __riscv_vfncvt_f_x_w_f16m2 (vint32m4_t src, size_t vl);
vfloat16m4_t __riscv_vfncvt_f_x_w_f16m4 (vint32m8_t src, size_t vl);
vfloat32mf2_t __riscv_vfncvt_f_x_w_f32mf2 (vint64m1_t src, size_t vl);
vfloat32m1_t __riscv_vfncvt_f_x_w_f32m1 (vint64m2_t src, size_t vl);
vfloat32m2_t __riscv_vfncvt_f_x_w_f32m2 (vint64m4_t src, size_t vl);
vfloat32m4_t __riscv_vfncvt_f_x_w_f32m4 (vint64m8_t src, size_t vl);
vfloat16mf4_t __riscv_vfncvt_f_x_w_f16mf4_m (vbool64_t mask, vint32mf2_t src, size_t vl);
vfloat16mf2_t __riscv_vfncvt_f_x_w_f16mf2_m (vbool32_t mask, vint32m1_t src, size_t vl);
vfloat16m1_t __riscv_vfncvt_f_x_w_f16m1_m (vbool16_t mask, vint32m2_t src, size_t vl);
vfloat16m2_t __riscv_vfncvt_f_x_w_f16m2_m (vbool8_t mask, vint32m4_t src, size_t vl);
vfloat16m4_t __riscv_vfncvt_f_x_w_f16m4_m (vbool4_t mask, vint32m8_t src, size_t vl);
vfloat32mf2_t __riscv_vfncvt_f_x_w_f32mf2_m (vbool64_t mask, vint64m1_t src, size_t vl);
vfloat32m1_t __riscv_vfncvt_f_x_w_f32m1_m (vbool32_t mask, vint64m2_t src, size_t vl);
vfloat32m2_t __riscv_vfncvt_f_x_w_f32m2_m (vbool16_t mask, vint64m4_t src, size_t vl);
vfloat32m4_t __riscv_vfncvt_f_x_w_f32m4_m (vbool8_t mask, vint64m8_t src, size_t vl);
vfloat16mf4_t __riscv_vfncvt_f_x_w_f16mf4_rm (vint32mf2_t src, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfncvt_f_x_w_f16mf2_rm (vint32m1_t src, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfncvt_f_x_w_f16m1_rm (vint32m2_t src, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfncvt_f_x_w_f16m2_rm (vint32m4_t src, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfncvt_f_x_w_f16m4_rm (vint32m8_t src, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfncvt_f_x_w_f32mf2_rm (vint64m1_t src, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfncvt_f_x_w_f32m1_rm (vint64m2_t src, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfncvt_f_x_w_f32m2_rm (vint64m4_t src, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfncvt_f_x_w_f32m4_rm (vint64m8_t src, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfncvt_f_x_w_f16mf4_rm_m (vbool64_t mask, vint32mf2_t src, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfncvt_f_x_w_f16mf2_rm_m (vbool32_t mask, vint32m1_t src, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfncvt_f_x_w_f16m1_rm_m (vbool16_t mask, vint32m2_t src, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfncvt_f_x_w_f16m2_rm_m (vbool8_t mask, vint32m4_t src, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfncvt_f_x_w_f16m4_rm_m (vbool4_t mask, vint32m8_t src, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfncvt_f_x_w_f32mf2_rm_m (vbool64_t mask, vint64m1_t src, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfncvt_f_x_w_f32m1_rm_m (vbool32_t mask, vint64m2_t src, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfncvt_f_x_w_f32m2_rm_m (vbool16_t mask, vint64m4_t src, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfncvt_f_x_w_f32m4_rm_m (vbool8_t mask, vint64m8_t src, unsigned int frm, size_t vl);
23.108. vfncvt.f.f.w
- Mnemonic
vfncvt.f.f.w vd, vs2, vm
- Encoding
- Description
-
Convert double-width float to single-width float.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfncvt_f_f_w.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfncvt_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl);
vfloat16mf2_t __riscv_vfncvt_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl);
vfloat16m1_t __riscv_vfncvt_f_f_w_f16m1 (vfloat32m2_t src, size_t vl);
vfloat16m2_t __riscv_vfncvt_f_f_w_f16m2 (vfloat32m4_t src, size_t vl);
vfloat16m4_t __riscv_vfncvt_f_f_w_f16m4 (vfloat32m8_t src, size_t vl);
vfloat32mf2_t __riscv_vfncvt_f_f_w_f32mf2 (vfloat64m1_t src, size_t vl);
vfloat32m1_t __riscv_vfncvt_f_f_w_f32m1 (vfloat64m2_t src, size_t vl);
vfloat32m2_t __riscv_vfncvt_f_f_w_f32m2 (vfloat64m4_t src, size_t vl);
vfloat32m4_t __riscv_vfncvt_f_f_w_f32m4 (vfloat64m8_t src, size_t vl);
vfloat16mf4_t __riscv_vfncvt_f_f_w_f16mf4_m (vbool64_t mask, vfloat32mf2_t src, size_t vl);
vfloat16mf2_t __riscv_vfncvt_f_f_w_f16mf2_m (vbool32_t mask, vfloat32m1_t src, size_t vl);
vfloat16m1_t __riscv_vfncvt_f_f_w_f16m1_m (vbool16_t mask, vfloat32m2_t src, size_t vl);
vfloat16m2_t __riscv_vfncvt_f_f_w_f16m2_m (vbool8_t mask, vfloat32m4_t src, size_t vl);
vfloat16m4_t __riscv_vfncvt_f_f_w_f16m4_m (vbool4_t mask, vfloat32m8_t src, size_t vl);
vfloat32mf2_t __riscv_vfncvt_f_f_w_f32mf2_m (vbool64_t mask, vfloat64m1_t src, size_t vl);
vfloat32m1_t __riscv_vfncvt_f_f_w_f32m1_m (vbool32_t mask, vfloat64m2_t src, size_t vl);
vfloat32m2_t __riscv_vfncvt_f_f_w_f32m2_m (vbool16_t mask, vfloat64m4_t src, size_t vl);
vfloat32m4_t __riscv_vfncvt_f_f_w_f32m4_m (vbool8_t mask, vfloat64m8_t src, size_t vl);
vfloat16mf4_t __riscv_vfncvt_f_f_w_f16mf4_rm (vfloat32mf2_t src, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfncvt_f_f_w_f16mf2_rm (vfloat32m1_t src, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfncvt_f_f_w_f16m1_rm (vfloat32m2_t src, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfncvt_f_f_w_f16m2_rm (vfloat32m4_t src, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfncvt_f_f_w_f16m4_rm (vfloat32m8_t src, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfncvt_f_f_w_f32mf2_rm (vfloat64m1_t src, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfncvt_f_f_w_f32m1_rm (vfloat64m2_t src, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfncvt_f_f_w_f32m2_rm (vfloat64m4_t src, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfncvt_f_f_w_f32m4_rm (vfloat64m8_t src, unsigned int frm, size_t vl);
vfloat16mf4_t __riscv_vfncvt_f_f_w_f16mf4_rm_m (vbool64_t mask, vfloat32mf2_t src, unsigned int frm, size_t vl);
vfloat16mf2_t __riscv_vfncvt_f_f_w_f16mf2_rm_m (vbool32_t mask, vfloat32m1_t src, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfncvt_f_f_w_f16m1_rm_m (vbool16_t mask, vfloat32m2_t src, unsigned int frm, size_t vl);
vfloat16m2_t __riscv_vfncvt_f_f_w_f16m2_rm_m (vbool8_t mask, vfloat32m4_t src, unsigned int frm, size_t vl);
vfloat16m4_t __riscv_vfncvt_f_f_w_f16m4_rm_m (vbool4_t mask, vfloat32m8_t src, unsigned int frm, size_t vl);
vfloat32mf2_t __riscv_vfncvt_f_f_w_f32mf2_rm_m (vbool64_t mask, vfloat64m1_t src, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfncvt_f_f_w_f32m1_rm_m (vbool32_t mask, vfloat64m2_t src, unsigned int frm, size_t vl);
vfloat32m2_t __riscv_vfncvt_f_f_w_f32m2_rm_m (vbool16_t mask, vfloat64m4_t src, unsigned int frm, size_t vl);
vfloat32m4_t __riscv_vfncvt_f_f_w_f32m4_rm_m (vbool8_t mask, vfloat64m8_t src, unsigned int frm, size_t vl);
23.109. vfncvt.rod.f.f.w
- Mnemonic
vfncvt.rod.f.f.w vd, vs2, vm
- Encoding
- Description
-
Convert double-width float to single-width float, rounding towards odd.
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfncvt_rod_f_f_w.h
- Intrinsic Functions
Details
vfloat16mf4_t __riscv_vfncvt_rod_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl);
vfloat16mf2_t __riscv_vfncvt_rod_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl);
vfloat16m1_t __riscv_vfncvt_rod_f_f_w_f16m1 (vfloat32m2_t src, size_t vl);
vfloat16m2_t __riscv_vfncvt_rod_f_f_w_f16m2 (vfloat32m4_t src, size_t vl);
vfloat16m4_t __riscv_vfncvt_rod_f_f_w_f16m4 (vfloat32m8_t src, size_t vl);
vfloat32mf2_t __riscv_vfncvt_rod_f_f_w_f32mf2 (vfloat64m1_t src, size_t vl);
vfloat32m1_t __riscv_vfncvt_rod_f_f_w_f32m1 (vfloat64m2_t src, size_t vl);
vfloat32m2_t __riscv_vfncvt_rod_f_f_w_f32m2 (vfloat64m4_t src, size_t vl);
vfloat32m4_t __riscv_vfncvt_rod_f_f_w_f32m4 (vfloat64m8_t src, size_t vl);
vfloat16mf4_t __riscv_vfncvt_rod_f_f_w_f16mf4_m (vbool64_t mask, vfloat32mf2_t src, size_t vl);
vfloat16mf2_t __riscv_vfncvt_rod_f_f_w_f16mf2_m (vbool32_t mask, vfloat32m1_t src, size_t vl);
vfloat16m1_t __riscv_vfncvt_rod_f_f_w_f16m1_m (vbool16_t mask, vfloat32m2_t src, size_t vl);
vfloat16m2_t __riscv_vfncvt_rod_f_f_w_f16m2_m (vbool8_t mask, vfloat32m4_t src, size_t vl);
vfloat16m4_t __riscv_vfncvt_rod_f_f_w_f16m4_m (vbool4_t mask, vfloat32m8_t src, size_t vl);
vfloat32mf2_t __riscv_vfncvt_rod_f_f_w_f32mf2_m (vbool64_t mask, vfloat64m1_t src, size_t vl);
vfloat32m1_t __riscv_vfncvt_rod_f_f_w_f32m1_m (vbool32_t mask, vfloat64m2_t src, size_t vl);
vfloat32m2_t __riscv_vfncvt_rod_f_f_w_f32m2_m (vbool16_t mask, vfloat64m4_t src, size_t vl);
vfloat32m4_t __riscv_vfncvt_rod_f_f_w_f32m4_m (vbool8_t mask, vfloat64m8_t src, size_t vl);
24. Vector Reduction Operations
25. Vector Single-Width Integer Reduction Instructions
25.1. vredsum.vs
- Mnemonic
vredsum.vs vd, vs2, vs1, vm
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vredsum_vs.h
- Description
-
vd[0] = sum( vs1[0] , vs2[*] )
- Intrinsic Functions
Details
vint8m1_t __riscv_vredsum_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl);
vint32m1_t __riscv_vredsum_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8mf8_i8m1_m (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8mf4_i8m1_m (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8mf2_i8m1_m (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m2_i8m1_m (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m4_i8m1_m (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredsum_vs_i8m8_i8m1_m (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16mf4_i16m1_m (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16mf2_i16m1_m (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m2_i16m1_m (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m4_i16m1_m (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredsum_vs_i16m8_i16m1_m (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl);
vint32m1_t __riscv_vredsum_vs_i32mf2_i32m1_m (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m2_i32m1_m (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m4_i32m1_m (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredsum_vs_i32m8_i32m1_m (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m2_i64m1_m (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m4_i64m1_m (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredsum_vs_i64m8_i64m1_m (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredsum_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredsum_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredsum_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredsum_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl);
25.2. vredmaxu.vs
- Mnemonic
vredmaxu.vs vd, vs2, vs1, vm
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vredmaxu_vs.h
- Description
-
vd[0] = maxu( vs1[0] , vs2[*] )
- Intrinsic Functions
Details
vuint8m1_t __riscv_vredmaxu_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredmaxu_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredmaxu_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredmaxu_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredmaxu_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl);
25.3. vredmax.vs
- Mnemonic
vredmax.vs vd, vs2, vs1, vm
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vredmax_vs.h
- Description
-
vd[0] = max( vs1[0] , vs2[*] )
- Intrinsic Functions
Details
vint8m1_t __riscv_vredmax_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmax_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8mf8_i8m1_m (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8mf4_i8m1_m (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8mf2_i8m1_m (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m2_i8m1_m (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m4_i8m1_m (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmax_vs_i8m8_i8m1_m (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16mf4_i16m1_m (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16mf2_i16m1_m (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m2_i16m1_m (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m4_i16m1_m (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmax_vs_i16m8_i16m1_m (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmax_vs_i32mf2_i32m1_m (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m2_i32m1_m (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m4_i32m1_m (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmax_vs_i32m8_i32m1_m (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m2_i64m1_m (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m4_i64m1_m (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmax_vs_i64m8_i64m1_m (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl);
25.4. vredminu.vs
- Mnemonic
vredminu.vs vd, vs2, vs1, vm
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vredminu_vs.h
- Description
-
vd[0] = minu( vs1[0] , vs2[*] )
- Intrinsic Functions
Details
vuint8m1_t __riscv_vredminu_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredminu_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredminu_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredminu_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredminu_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl);
25.5. vredmin.vs
- Mnemonic
vredmin.vs vd, vs2, vs1, vm
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vredmin_vs.h
- Description
-
vd[0] = min( vs1[0] , vs2[*] )
- Intrinsic Functions
Details
vint8m1_t __riscv_vredmin_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmin_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8mf8_i8m1_m (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8mf4_i8m1_m (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8mf2_i8m1_m (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m2_i8m1_m (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m4_i8m1_m (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredmin_vs_i8m8_i8m1_m (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16mf4_i16m1_m (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16mf2_i16m1_m (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m2_i16m1_m (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m4_i16m1_m (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredmin_vs_i16m8_i16m1_m (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmin_vs_i32mf2_i32m1_m (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m2_i32m1_m (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m4_i32m1_m (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredmin_vs_i32m8_i32m1_m (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m2_i64m1_m (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m4_i64m1_m (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredmin_vs_i64m8_i64m1_m (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl);
25.6. vredand.vs
- Mnemonic
vredand.vs vd, vs2, vs1, vm
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vredand_vs.h
- Description
-
vd[0] = and( vs1[0] , vs2[*] )
- Intrinsic Functions
Details
vint8m1_t __riscv_vredand_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredand_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredand_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredand_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredand_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredand_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredand_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl);
vint16m1_t __riscv_vredand_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredand_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredand_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredand_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredand_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredand_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl);
vint32m1_t __riscv_vredand_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredand_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredand_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredand_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredand_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl);
vint64m1_t __riscv_vredand_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredand_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredand_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredand_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl);
vint8m1_t __riscv_vredand_vs_i8mf8_i8m1_m (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredand_vs_i8mf4_i8m1_m (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredand_vs_i8mf2_i8m1_m (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredand_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredand_vs_i8m2_i8m1_m (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredand_vs_i8m4_i8m1_m (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredand_vs_i8m8_i8m1_m (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl);
vint16m1_t __riscv_vredand_vs_i16mf4_i16m1_m (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredand_vs_i16mf2_i16m1_m (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredand_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredand_vs_i16m2_i16m1_m (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredand_vs_i16m4_i16m1_m (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredand_vs_i16m8_i16m1_m (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl);
vint32m1_t __riscv_vredand_vs_i32mf2_i32m1_m (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredand_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredand_vs_i32m2_i32m1_m (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredand_vs_i32m4_i32m1_m (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredand_vs_i32m8_i32m1_m (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl);
vint64m1_t __riscv_vredand_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredand_vs_i64m2_i64m1_m (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredand_vs_i64m4_i64m1_m (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredand_vs_i64m8_i64m1_m (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredand_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredand_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredand_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredand_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl);
25.7. vredor.vs
- Mnemonic
vredor.vs vd, vs2, vs1, vm
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vredor_vs.h
- Description
-
vd[0] = or( vs1[0] , vs2[*] )
- Intrinsic Functions
Details
vint8m1_t __riscv_vredor_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredor_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredor_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredor_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredor_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredor_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredor_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl);
vint16m1_t __riscv_vredor_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredor_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredor_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredor_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredor_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredor_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl);
vint32m1_t __riscv_vredor_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredor_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredor_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredor_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredor_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl);
vint64m1_t __riscv_vredor_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredor_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredor_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredor_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl);
vint8m1_t __riscv_vredor_vs_i8mf8_i8m1_m (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredor_vs_i8mf4_i8m1_m (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredor_vs_i8mf2_i8m1_m (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredor_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredor_vs_i8m2_i8m1_m (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredor_vs_i8m4_i8m1_m (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredor_vs_i8m8_i8m1_m (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl);
vint16m1_t __riscv_vredor_vs_i16mf4_i16m1_m (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredor_vs_i16mf2_i16m1_m (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredor_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredor_vs_i16m2_i16m1_m (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredor_vs_i16m4_i16m1_m (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredor_vs_i16m8_i16m1_m (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl);
vint32m1_t __riscv_vredor_vs_i32mf2_i32m1_m (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredor_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredor_vs_i32m2_i32m1_m (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredor_vs_i32m4_i32m1_m (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredor_vs_i32m8_i32m1_m (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl);
vint64m1_t __riscv_vredor_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredor_vs_i64m2_i64m1_m (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredor_vs_i64m4_i64m1_m (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredor_vs_i64m8_i64m1_m (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredor_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredor_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredor_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredor_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl);
25.8. vredxor.vs
- Mnemonic
vredxor.vs vd, vs2, vs1, vm
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vredxor_vs.h
- Description
-
vd[0] = xor( vs1[0] , vs2[*] )
26. Vector Widening Integer Reduction Instructions
- Intrinsic Functions
Details
vint8m1_t __riscv_vredxor_vs_i8mf8_i8m1 (vint8mf8_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8mf4_i8m1 (vint8mf4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8mf2_i8m1 (vint8mf2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m1_i8m1 (vint8m1_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m2_i8m1 (vint8m2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m4_i8m1 (vint8m4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m8_i8m1 (vint8m8_t vector, vint8m1_t scalar, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16mf4_i16m1 (vint16mf4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16mf2_i16m1 (vint16mf2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m1_i16m1 (vint16m1_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m2_i16m1 (vint16m2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m4_i16m1 (vint16m4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m8_i16m1 (vint16m8_t vector, vint16m1_t scalar, size_t vl);
vint32m1_t __riscv_vredxor_vs_i32mf2_i32m1 (vint32mf2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m1_i32m1 (vint32m1_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m2_i32m1 (vint32m2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m4_i32m1 (vint32m4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m8_i32m1 (vint32m8_t vector, vint32m1_t scalar, size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m1_i64m1 (vint64m1_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m2_i64m1 (vint64m2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m4_i64m1 (vint64m4_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m8_i64m1 (vint64m8_t vector, vint64m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8mf8_u8m1 (vuint8mf8_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8mf4_u8m1 (vuint8mf4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8mf2_u8m1 (vuint8mf2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m1_u8m1 (vuint8m1_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m2_u8m1 (vuint8m2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m4_u8m1 (vuint8m4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m8_u8m1 (vuint8m8_t vector, vuint8m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16mf4_u16m1 (vuint16mf4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16mf2_u16m1 (vuint16mf2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m1_u16m1 (vuint16m1_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m2_u16m1 (vuint16m2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m4_u16m1 (vuint16m4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m8_u16m1 (vuint16m8_t vector, vuint16m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32mf2_u32m1 (vuint32mf2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m1_u32m1 (vuint32m1_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m2_u32m1 (vuint32m2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m4_u32m1 (vuint32m4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m8_u32m1 (vuint32m8_t vector, vuint32m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m1_u64m1 (vuint64m1_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m2_u64m1 (vuint64m2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m4_u64m1 (vuint64m4_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m8_u64m1 (vuint64m8_t vector, vuint64m1_t scalar, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8mf8_i8m1_m (vbool64_t mask, vint8mf8_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8mf4_i8m1_m (vbool32_t mask, vint8mf4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8mf2_i8m1_m (vbool16_t mask, vint8mf2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m1_i8m1_m (vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m2_i8m1_m (vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m4_i8m1_m (vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl);
vint8m1_t __riscv_vredxor_vs_i8m8_i8m1_m (vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16mf4_i16m1_m (vbool64_t mask, vint16mf4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16mf2_i16m1_m (vbool32_t mask, vint16mf2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m1_i16m1_m (vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m2_i16m1_m (vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m4_i16m1_m (vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vredxor_vs_i16m8_i16m1_m (vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl);
vint32m1_t __riscv_vredxor_vs_i32mf2_i32m1_m (vbool64_t mask, vint32mf2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m1_i32m1_m (vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m2_i32m1_m (vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m4_i32m1_m (vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vredxor_vs_i32m8_i32m1_m (vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m1_i64m1_m (vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m2_i64m1_m (vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m4_i64m1_m (vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vredxor_vs_i64m8_i64m1_m (vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8mf8_u8m1_m (vbool64_t mask, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8mf4_u8m1_m (vbool32_t mask, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8mf2_u8m1_m (vbool16_t mask, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m1_u8m1_m (vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m2_u8m1_m (vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m4_u8m1_m (vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl);
vuint8m1_t __riscv_vredxor_vs_u8m8_u8m1_m (vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16mf4_u16m1_m (vbool64_t mask, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16mf2_u16m1_m (vbool32_t mask, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m1_u16m1_m (vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m2_u16m1_m (vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m4_u16m1_m (vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vredxor_vs_u16m8_u16m1_m (vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32mf2_u32m1_m (vbool64_t mask, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m1_u32m1_m (vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m2_u32m1_m (vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m4_u32m1_m (vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vredxor_vs_u32m8_u32m1_m (vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m1_u64m1_m (vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m2_u64m1_m (vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m4_u64m1_m (vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vredxor_vs_u64m8_u64m1_m (vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl);
26.1. vwredsumu.vs
- Mnemonic
vwredsumu.vs vd, vs2, vs1, vm
- Encoding
- Description
-
Unsigned sum reduction into double-width accumulator
2*SEW = 2*SEW + sum(zero-extend(SEW))
- Intrinsic Functions
Details
vuint16m1_t __riscv_vwredsumu_vs_u8mf8_u16m1 (vuint8mf8_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8mf4_u16m1 (vuint8mf4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8mf2_u16m1 (vuint8mf2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m1_u16m1 (vuint8m1_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m2_u16m1 (vuint8m2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m4_u16m1 (vuint8m4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m8_u16m1 (vuint8m8_t vector, vuint16m1_t scalar, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16mf4_u32m1 (vuint16mf4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16mf2_u32m1 (vuint16mf2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m1_u32m1 (vuint16m1_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m2_u32m1 (vuint16m2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m4_u32m1 (vuint16m4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m8_u32m1 (vuint16m8_t vector, vuint32m1_t scalar, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32mf2_u64m1 (vuint32mf2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m1_u64m1 (vuint32m1_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m2_u64m1 (vuint32m2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m4_u64m1 (vuint32m4_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m8_u64m1 (vuint32m8_t vector, vuint64m1_t scalar, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8mf8_u16m1_m (vbool64_t mask, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8mf4_u16m1_m (vbool32_t mask, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8mf2_u16m1_m (vbool16_t mask, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m1_u16m1_m (vbool8_t mask, vuint8m1_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m2_u16m1_m (vbool4_t mask, vuint8m2_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m4_u16m1_m (vbool2_t mask, vuint8m4_t vector, vuint16m1_t scalar, size_t vl);
vuint16m1_t __riscv_vwredsumu_vs_u8m8_u16m1_m (vbool1_t mask, vuint8m8_t vector, vuint16m1_t scalar, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16mf4_u32m1_m (vbool64_t mask, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16mf2_u32m1_m (vbool32_t mask, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m1_u32m1_m (vbool16_t mask, vuint16m1_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m2_u32m1_m (vbool8_t mask, vuint16m2_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m4_u32m1_m (vbool4_t mask, vuint16m4_t vector, vuint32m1_t scalar, size_t vl);
vuint32m1_t __riscv_vwredsumu_vs_u16m8_u32m1_m (vbool2_t mask, vuint16m8_t vector, vuint32m1_t scalar, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32mf2_u64m1_m (vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m1_u64m1_m (vbool32_t mask, vuint32m1_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m2_u64m1_m (vbool16_t mask, vuint32m2_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m4_u64m1_m (vbool8_t mask, vuint32m4_t vector, vuint64m1_t scalar, size_t vl);
vuint64m1_t __riscv_vwredsumu_vs_u32m8_u64m1_m (vbool4_t mask, vuint32m8_t vector, vuint64m1_t scalar, size_t vl);
26.2. vwredsum.vs
- Mnemonic
vwredsum.vs vd, vs2, vs1, vm
- Encoding
- Description
-
Signed sum reduction into double-width accumulator
2*SEW = 2*SEW + sum(sign-extend(SEW))
27. Vector Single-Width Floating-Point Reduction Instructions
- Intrinsic Functions
Details
vint16m1_t __riscv_vwredsum_vs_i8mf8_i16m1 (vint8mf8_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8mf4_i16m1 (vint8mf4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8mf2_i16m1 (vint8mf2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m1_i16m1 (vint8m1_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m2_i16m1 (vint8m2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m4_i16m1 (vint8m4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m8_i16m1 (vint8m8_t vector, vint16m1_t scalar, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16mf4_i32m1 (vint16mf4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16mf2_i32m1 (vint16mf2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m1_i32m1 (vint16m1_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m2_i32m1 (vint16m2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m4_i32m1 (vint16m4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m8_i32m1 (vint16m8_t vector, vint32m1_t scalar, size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32mf2_i64m1 (vint32mf2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m1_i64m1 (vint32m1_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m2_i64m1 (vint32m2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m4_i64m1 (vint32m4_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m8_i64m1 (vint32m8_t vector, vint64m1_t scalar, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8mf8_i16m1_m (vbool64_t mask, vint8mf8_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8mf4_i16m1_m (vbool32_t mask, vint8mf4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8mf2_i16m1_m (vbool16_t mask, vint8mf2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m1_i16m1_m (vbool8_t mask, vint8m1_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m2_i16m1_m (vbool4_t mask, vint8m2_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m4_i16m1_m (vbool2_t mask, vint8m4_t vector, vint16m1_t scalar, size_t vl);
vint16m1_t __riscv_vwredsum_vs_i8m8_i16m1_m (vbool1_t mask, vint8m8_t vector, vint16m1_t scalar, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16mf4_i32m1_m (vbool64_t mask, vint16mf4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16mf2_i32m1_m (vbool32_t mask, vint16mf2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m1_i32m1_m (vbool16_t mask, vint16m1_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m2_i32m1_m (vbool8_t mask, vint16m2_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m4_i32m1_m (vbool4_t mask, vint16m4_t vector, vint32m1_t scalar, size_t vl);
vint32m1_t __riscv_vwredsum_vs_i16m8_i32m1_m (vbool2_t mask, vint16m8_t vector, vint32m1_t scalar, size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32mf2_i64m1_m (vbool64_t mask, vint32mf2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m1_i64m1_m (vbool32_t mask, vint32m1_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m2_i64m1_m (vbool16_t mask, vint32m2_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m4_i64m1_m (vbool8_t mask, vint32m4_t vector, vint64m1_t scalar, size_t vl);
vint64m1_t __riscv_vwredsum_vs_i32m8_i64m1_m (vbool4_t mask, vint32m8_t vector, vint64m1_t scalar, size_t vl);
27.1. vfredosum.vs
- Mnemonic
vfredosum.vs vd, vs2, vs1, vm
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfredosum_vs.h
- Description
-
Ordered sum
- Intrinsic Functions
Details
vfloat16m1_t __riscv_vfredosum_vs_f16mf4_f16m1 (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16mf2_f16m1 (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m1_f16m1 (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m2_f16m1 (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m4_f16m1 (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m8_f16m1 (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32mf2_f32m1 (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m1_f32m1 (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m2_f32m1 (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m4_f32m1 (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m8_f32m1 (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m1_f64m1 (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m2_f64m1 (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m4_f64m1 (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m8_f64m1 (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32mf2_f32m1_m (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m1_f32m1_m (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m2_f32m1_m (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m4_f32m1_m (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m8_f32m1_m (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m1_f64m1_m (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m2_f64m1_m (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m4_f64m1_m (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m8_f64m1_m (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16mf4_f16m1_rm (vfloat16mf4_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16mf2_f16m1_rm (vfloat16mf2_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m1_f16m1_rm (vfloat16m1_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m2_f16m1_rm (vfloat16m2_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m4_f16m1_rm (vfloat16m4_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m8_f16m1_rm (vfloat16m8_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32mf2_f32m1_rm (vfloat32mf2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m1_f32m1_rm (vfloat32m1_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m2_f32m1_rm (vfloat32m2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m4_f32m1_rm (vfloat32m4_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m8_f32m1_rm (vfloat32m8_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m1_f64m1_rm (vfloat64m1_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m2_f64m1_rm (vfloat64m2_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m4_f64m1_rm (vfloat64m4_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m8_f64m1_rm (vfloat64m8_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16mf4_f16m1_rm_m (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16mf2_f16m1_rm_m (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m1_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m2_f16m1_rm_m (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m4_f16m1_rm_m (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredosum_vs_f16m8_f16m1_rm_m (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32mf2_f32m1_rm_m (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m1_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m2_f32m1_rm_m (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m4_f32m1_rm_m (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredosum_vs_f32m8_f32m1_rm_m (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m1_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m2_f64m1_rm_m (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m4_f64m1_rm_m (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredosum_vs_f64m8_f64m1_rm_m (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
27.2. vfredusum.vs
- Mnemonic
vfredusum.vs vd, vs2, vs1, vm
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfredusum_vs.h
- Description
-
Unordered sum
- Intrinsic Functions
Details
vfloat16m1_t __riscv_vfredusum_vs_f16mf4_f16m1 (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16mf2_f16m1 (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m1_f16m1 (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m2_f16m1 (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m4_f16m1 (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m8_f16m1 (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32mf2_f32m1 (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m1_f32m1 (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m2_f32m1 (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m4_f32m1 (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m8_f32m1 (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m1_f64m1 (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m2_f64m1 (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m4_f64m1 (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m8_f64m1 (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32mf2_f32m1_m (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m1_f32m1_m (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m2_f32m1_m (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m4_f32m1_m (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m8_f32m1_m (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m1_f64m1_m (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m2_f64m1_m (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m4_f64m1_m (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m8_f64m1_m (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16mf4_f16m1_rm (vfloat16mf4_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16mf2_f16m1_rm (vfloat16mf2_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m1_f16m1_rm (vfloat16m1_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m2_f16m1_rm (vfloat16m2_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m4_f16m1_rm (vfloat16m4_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m8_f16m1_rm (vfloat16m8_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32mf2_f32m1_rm (vfloat32mf2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m1_f32m1_rm (vfloat32m1_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m2_f32m1_rm (vfloat32m2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m4_f32m1_rm (vfloat32m4_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m8_f32m1_rm (vfloat32m8_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m1_f64m1_rm (vfloat64m1_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m2_f64m1_rm (vfloat64m2_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m4_f64m1_rm (vfloat64m4_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m8_f64m1_rm (vfloat64m8_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16mf4_f16m1_rm_m (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16mf2_f16m1_rm_m (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m1_f16m1_rm_m (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m2_f16m1_rm_m (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m4_f16m1_rm_m (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat16m1_t __riscv_vfredusum_vs_f16m8_f16m1_rm_m (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32mf2_f32m1_rm_m (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m1_f32m1_rm_m (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m2_f32m1_rm_m (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m4_f32m1_rm_m (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfredusum_vs_f32m8_f32m1_rm_m (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m1_f64m1_rm_m (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m2_f64m1_rm_m (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m4_f64m1_rm_m (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfredusum_vs_f64m8_f64m1_rm_m (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
27.3. vfredmax.vs
- Mnemonic
vfredmax.vs vd, vs2, vs1, vm
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfredmax_vs.h
- Description
-
Maximum value
- Intrinsic Functions
Details
vfloat16m1_t __riscv_vfredmax_vs_f16mf4_f16m1 (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16mf2_f16m1 (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m1_f16m1 (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m2_f16m1 (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m4_f16m1 (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m8_f16m1 (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32mf2_f32m1 (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m1_f32m1 (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m2_f32m1 (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m4_f32m1 (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m8_f32m1 (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m1_f64m1 (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m2_f64m1 (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m4_f64m1 (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m8_f64m1 (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmax_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32mf2_f32m1_m (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m1_f32m1_m (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m2_f32m1_m (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m4_f32m1_m (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmax_vs_f32m8_f32m1_m (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m1_f64m1_m (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m2_f64m1_m (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m4_f64m1_m (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmax_vs_f64m8_f64m1_m (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl);
27.4. vfredmin.vs
- Mnemonic
vfredmin.vs vd, vs2, vs1, vm
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfredmin_vs.h
- Description
-
Minimum value
28. Vector Widening Floating-Point Reduction Instructions
- Intrinsic Functions
Details
vfloat16m1_t __riscv_vfredmin_vs_f16mf4_f16m1 (vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16mf2_f16m1 (vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m1_f16m1 (vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m2_f16m1 (vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m4_f16m1 (vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m8_f16m1 (vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32mf2_f32m1 (vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m1_f32m1 (vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m2_f32m1 (vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m4_f32m1 (vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m8_f32m1 (vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m1_f64m1 (vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m2_f64m1 (vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m4_f64m1 (vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m8_f64m1 (vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl);
vfloat16m1_t __riscv_vfredmin_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32mf2_f32m1_m (vbool64_t mask, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m1_f32m1_m (vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m2_f32m1_m (vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m4_f32m1_m (vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfredmin_vs_f32m8_f32m1_m (vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m1_f64m1_m (vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m2_f64m1_m (vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m4_f64m1_m (vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfredmin_vs_f64m8_f64m1_m (vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl);
28.1. vfwredosum.vs
- Mnemonic
vfwredosum.vs vd, vs2, vs1, vm
- Encoding
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfwredosum_vs.h
- Description
-
Ordered sum
- Intrinsic Functions
Details
vfloat32m1_t __riscv_vfwredosum_vs_f16mf4_f32m1 (vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16mf2_f32m1 (vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m1_f32m1 (vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m2_f32m1 (vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m4_f32m1 (vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m8_f32m1 (vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32mf2_f64m1 (vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m1_f64m1 (vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m2_f64m1 (vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m4_f64m1 (vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m8_f64m1 (vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16mf4_f32m1_m (vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16mf2_f32m1_m (vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m1_f32m1_m (vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m2_f32m1_m (vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m4_f32m1_m (vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m8_f32m1_m (vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32mf2_f64m1_m (vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m1_f64m1_m (vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m2_f64m1_m (vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m4_f64m1_m (vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m8_f64m1_m (vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16mf4_f32m1_rm (vfloat16mf4_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16mf2_f32m1_rm (vfloat16mf2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m1_f32m1_rm (vfloat16m1_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m2_f32m1_rm (vfloat16m2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m4_f32m1_rm (vfloat16m4_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m8_f32m1_rm (vfloat16m8_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32mf2_f64m1_rm (vfloat32mf2_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m1_f64m1_rm (vfloat32m1_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m2_f64m1_rm (vfloat32m2_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m4_f64m1_rm (vfloat32m4_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m8_f64m1_rm (vfloat32m8_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16mf4_f32m1_rm_m (vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16mf2_f32m1_rm_m (vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m1_f32m1_rm_m (vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m2_f32m1_rm_m (vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m4_f32m1_rm_m (vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredosum_vs_f16m8_f32m1_rm_m (vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32mf2_f64m1_rm_m (vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m1_f64m1_rm_m (vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m2_f64m1_rm_m (vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m4_f64m1_rm_m (vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredosum_vs_f32m8_f64m1_rm_m (vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
28.2. vfwredusum.vs
28.2.1. Mnemonic
vfwredusum.vs vd, vs2, vs1, vm
28.2.2. Encoding
28.2.3. Spike Implementation
28.2.4. Description
Unordered sum
- Intrinsic Functions
Details
vfloat32m1_t __riscv_vfwredusum_vs_f16mf4_f32m1 (vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16mf2_f32m1 (vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m1_f32m1 (vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m2_f32m1 (vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m4_f32m1 (vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m8_f32m1 (vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32mf2_f64m1 (vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m1_f64m1 (vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m2_f64m1 (vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m4_f64m1 (vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m8_f64m1 (vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16mf4_f32m1_m (vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16mf2_f32m1_m (vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m1_f32m1_m (vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m2_f32m1_m (vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m4_f32m1_m (vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m8_f32m1_m (vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32mf2_f64m1_m (vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m1_f64m1_m (vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m2_f64m1_m (vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m4_f64m1_m (vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m8_f64m1_m (vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16mf4_f32m1_rm (vfloat16mf4_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16mf2_f32m1_rm (vfloat16mf2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m1_f32m1_rm (vfloat16m1_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m2_f32m1_rm (vfloat16m2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m4_f32m1_rm (vfloat16m4_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m8_f32m1_rm (vfloat16m8_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32mf2_f64m1_rm (vfloat32mf2_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m1_f64m1_rm (vfloat32m1_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m2_f64m1_rm (vfloat32m2_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m4_f64m1_rm (vfloat32m4_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m8_f64m1_rm (vfloat32m8_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16mf4_f32m1_rm_m (vbool64_t mask, vfloat16mf4_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16mf2_f32m1_rm_m (vbool32_t mask, vfloat16mf2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m1_f32m1_rm_m (vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m2_f32m1_rm_m (vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m4_f32m1_rm_m (vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat32m1_t __riscv_vfwredusum_vs_f16m8_f32m1_rm_m (vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32mf2_f64m1_rm_m (vbool64_t mask, vfloat32mf2_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m1_f64m1_rm_m (vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m2_f64m1_rm_m (vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m4_f64m1_rm_m (vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
vfloat64m1_t __riscv_vfwredusum_vs_f32m8_f64m1_rm_m (vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, unsigned int frm, size_t vl);
29. Vector Mask Instructions
29.1. Vector Mask-Register Logical Instructions
29.2. vmand.mm
- Mnemonic
vmand.mm vd, vs2, vs1
- Encoding
- Description
vd.mask[i] = vs2.mask[i] && vs1.mask[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmand_mm.h
- Intrinsic Functions
Details
vbool1_t __riscv_vmand_mm_b1 (vbool1_t op1, vbool1_t op2, size_t vl);
vbool2_t __riscv_vmand_mm_b2 (vbool2_t op1, vbool2_t op2, size_t vl);
vbool4_t __riscv_vmand_mm_b4 (vbool4_t op1, vbool4_t op2, size_t vl);
vbool8_t __riscv_vmand_mm_b8 (vbool8_t op1, vbool8_t op2, size_t vl);
vbool16_t __riscv_vmand_mm_b16 (vbool16_t op1, vbool16_t op2, size_t vl);
vbool32_t __riscv_vmand_mm_b32 (vbool32_t op1, vbool32_t op2, size_t vl);
vbool64_t __riscv_vmand_mm_b64 (vbool64_t op1, vbool64_t op2, size_t vl);
29.3. vmnand.mm
- Mnemonic
vmnand.mm vd, vs2, vs1
- Encoding
- Description
vd.mask[i] = !(vs2.mask[i] && vs1.mask[i])
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmnand_mm.h
- Intrinsic Functions
Details
vbool1_t __riscv_vmnand_mm_b1 (vbool1_t op1, vbool1_t op2, size_t vl);
vbool2_t __riscv_vmnand_mm_b2 (vbool2_t op1, vbool2_t op2, size_t vl);
vbool4_t __riscv_vmnand_mm_b4 (vbool4_t op1, vbool4_t op2, size_t vl);
vbool8_t __riscv_vmnand_mm_b8 (vbool8_t op1, vbool8_t op2, size_t vl);
vbool16_t __riscv_vmnand_mm_b16 (vbool16_t op1, vbool16_t op2, size_t vl);
vbool32_t __riscv_vmnand_mm_b32 (vbool32_t op1, vbool32_t op2, size_t vl);
vbool64_t __riscv_vmnand_mm_b64 (vbool64_t op1, vbool64_t op2, size_t vl);
29.4. vmandn.mm
- Mnemonic
vmandn.mm vd, vs2, vs1
- Encoding
- Description
vd.mask[i] = vs2.mask[i] && !vs1.mask[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmandn_mm.h
- Intrinsic Functions
Details
vbool1_t __riscv_vmandn_mm_b1 (vbool1_t op1, vbool1_t op2, size_t vl);
vbool2_t __riscv_vmandn_mm_b2 (vbool2_t op1, vbool2_t op2, size_t vl);
vbool4_t __riscv_vmandn_mm_b4 (vbool4_t op1, vbool4_t op2, size_t vl);
vbool8_t __riscv_vmandn_mm_b8 (vbool8_t op1, vbool8_t op2, size_t vl);
vbool16_t __riscv_vmandn_mm_b16 (vbool16_t op1, vbool16_t op2, size_t vl);
vbool32_t __riscv_vmandn_mm_b32 (vbool32_t op1, vbool32_t op2, size_t vl);
vbool64_t __riscv_vmandn_mm_b64 (vbool64_t op1, vbool64_t op2, size_t vl);
29.5. vmxor.mm
- Mnemonic
vmxor.mm vd, vs2, vs1
- Encoding
- Description
vd.mask[i] = vs2.mask[i] ^^ vs1.mask[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmxor_mm.h
- Intrinsic Functions
Details
vbool1_t __riscv_vmxor_mm_b1 (vbool1_t op1, vbool1_t op2, size_t vl);
vbool2_t __riscv_vmxor_mm_b2 (vbool2_t op1, vbool2_t op2, size_t vl);
vbool4_t __riscv_vmxor_mm_b4 (vbool4_t op1, vbool4_t op2, size_t vl);
vbool8_t __riscv_vmxor_mm_b8 (vbool8_t op1, vbool8_t op2, size_t vl);
vbool16_t __riscv_vmxor_mm_b16 (vbool16_t op1, vbool16_t op2, size_t vl);
vbool32_t __riscv_vmxor_mm_b32 (vbool32_t op1, vbool32_t op2, size_t vl);
vbool64_t __riscv_vmxor_mm_b64 (vbool64_t op1, vbool64_t op2, size_t vl);
29.6. vmor.mm
- Mnemonic
vmor.mm vd, vs2, vs1
- Encoding
- Description
vd.mask[i] = vs2.mask[i] || vs1.mask[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmor_mm.h
- Intrinsic Functions
Details
vbool1_t __riscv_vmor_mm_b1 (vbool1_t op1, vbool1_t op2, size_t vl);
vbool2_t __riscv_vmor_mm_b2 (vbool2_t op1, vbool2_t op2, size_t vl);
vbool4_t __riscv_vmor_mm_b4 (vbool4_t op1, vbool4_t op2, size_t vl);
vbool8_t __riscv_vmor_mm_b8 (vbool8_t op1, vbool8_t op2, size_t vl);
vbool16_t __riscv_vmor_mm_b16 (vbool16_t op1, vbool16_t op2, size_t vl);
vbool32_t __riscv_vmor_mm_b32 (vbool32_t op1, vbool32_t op2, size_t vl);
vbool64_t __riscv_vmor_mm_b64 (vbool64_t op1, vbool64_t op2, size_t vl);
29.7. vmnor.mm
- Mnemonic
vmnor.mm vd, vs2, vs1
- Encoding
- Description
vd.mask[i] = !(vs2.mask[i] || vs1.mask[i])
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmnor_mm.h
- Intrinsic Functions
Details
vbool1_t __riscv_vmnor_mm_b1 (vbool1_t op1, vbool1_t op2, size_t vl);
vbool2_t __riscv_vmnor_mm_b2 (vbool2_t op1, vbool2_t op2, size_t vl);
vbool4_t __riscv_vmnor_mm_b4 (vbool4_t op1, vbool4_t op2, size_t vl);
vbool8_t __riscv_vmnor_mm_b8 (vbool8_t op1, vbool8_t op2, size_t vl);
vbool16_t __riscv_vmnor_mm_b16 (vbool16_t op1, vbool16_t op2, size_t vl);
vbool32_t __riscv_vmnor_mm_b32 (vbool32_t op1, vbool32_t op2, size_t vl);
vbool64_t __riscv_vmnor_mm_b64 (vbool64_t op1, vbool64_t op2, size_t vl);
29.8. vmorn.mm
- Mnemonic
vmorn.mm vd, vs2, vs1
- Encoding
- Description
vd.mask[i] = vs2.mask[i] || !vs1.mask[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmorn_mm.h
- Intrinsic Functions
Details
vbool1_t __riscv_vmorn_mm_b1 (vbool1_t op1, vbool1_t op2, size_t vl);
vbool2_t __riscv_vmorn_mm_b2 (vbool2_t op1, vbool2_t op2, size_t vl);
vbool4_t __riscv_vmorn_mm_b4 (vbool4_t op1, vbool4_t op2, size_t vl);
vbool8_t __riscv_vmorn_mm_b8 (vbool8_t op1, vbool8_t op2, size_t vl);
vbool16_t __riscv_vmorn_mm_b16 (vbool16_t op1, vbool16_t op2, size_t vl);
vbool32_t __riscv_vmorn_mm_b32 (vbool32_t op1, vbool32_t op2, size_t vl);
vbool64_t __riscv_vmorn_mm_b64 (vbool64_t op1, vbool64_t op2, size_t vl);
29.9. vmxnor.mm
- Mnemonic
vmxnor.mm vd, vs2, vs1
- Encoding
- Description
vd.mask[i] = !(vs2.mask[i] ^^ vs1.mask[i])
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmxnor_mm.h
- Intrinsic Functions
Details
vbool1_t __riscv_vmxnor_mm_b1 (vbool1_t op1, vbool1_t op2, size_t vl);
vbool2_t __riscv_vmxnor_mm_b2 (vbool2_t op1, vbool2_t op2, size_t vl);
vbool4_t __riscv_vmxnor_mm_b4 (vbool4_t op1, vbool4_t op2, size_t vl);
vbool8_t __riscv_vmxnor_mm_b8 (vbool8_t op1, vbool8_t op2, size_t vl);
vbool16_t __riscv_vmxnor_mm_b16 (vbool16_t op1, vbool16_t op2, size_t vl);
vbool32_t __riscv_vmxnor_mm_b32 (vbool32_t op1, vbool32_t op2, size_t vl);
vbool64_t __riscv_vmxnor_mm_b64 (vbool64_t op1, vbool64_t op2, size_t vl);
29.10. vcpop.m
- Mnemonic
vcpop.m rd, vs2, vm
- Encoding
- Description
-
Vector count population in mask
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vcpop_m.h
- Intrinsic Functions
Details
unsigned long __riscv_vcpop_m_b1 (vbool1_t op1, size_t vl);
unsigned long __riscv_vcpop_m_b2 (vbool2_t op1, size_t vl);
unsigned long __riscv_vcpop_m_b4 (vbool4_t op1, size_t vl);
unsigned long __riscv_vcpop_m_b8 (vbool8_t op1, size_t vl);
unsigned long __riscv_vcpop_m_b16 (vbool16_t op1, size_t vl);
unsigned long __riscv_vcpop_m_b32 (vbool32_t op1, size_t vl);
unsigned long __riscv_vcpop_m_b64 (vbool64_t op1, size_t vl);
unsigned long __riscv_vcpop_m_b1_m (vbool1_t mask, vbool1_t op1, size_t vl);
unsigned long __riscv_vcpop_m_b2_m (vbool2_t mask, vbool2_t op1, size_t vl);
unsigned long __riscv_vcpop_m_b4_m (vbool4_t mask, vbool4_t op1, size_t vl);
unsigned long __riscv_vcpop_m_b8_m (vbool8_t mask, vbool8_t op1, size_t vl);
unsigned long __riscv_vcpop_m_b16_m (vbool16_t mask, vbool16_t op1, size_t vl);
unsigned long __riscv_vcpop_m_b32_m (vbool32_t mask, vbool32_t op1, size_t vl);
unsigned long __riscv_vcpop_m_b64_m (vbool64_t mask, vbool64_t op1, size_t vl);
29.11. vfirst.m
- Mnemonic
vfirst.m rd, vs2, vm
- Encoding
- Description
-
find-first-set mask bit
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfirst_m.h
- Intrinsic Functions
Details
long __riscv_vfirst_m_b1 (vbool1_t op1, size_t vl);
long __riscv_vfirst_m_b2 (vbool2_t op1, size_t vl);
long __riscv_vfirst_m_b4 (vbool4_t op1, size_t vl);
long __riscv_vfirst_m_b8 (vbool8_t op1, size_t vl);
long __riscv_vfirst_m_b16 (vbool16_t op1, size_t vl);
long __riscv_vfirst_m_b32 (vbool32_t op1, size_t vl);
long __riscv_vfirst_m_b64 (vbool64_t op1, size_t vl);
long __riscv_vfirst_m_b1_m (vbool1_t mask, vbool1_t op1, size_t vl);
long __riscv_vfirst_m_b2_m (vbool2_t mask, vbool2_t op1, size_t vl);
long __riscv_vfirst_m_b4_m (vbool4_t mask, vbool4_t op1, size_t vl);
long __riscv_vfirst_m_b8_m (vbool8_t mask, vbool8_t op1, size_t vl);
long __riscv_vfirst_m_b16_m (vbool16_t mask, vbool16_t op1, size_t vl);
long __riscv_vfirst_m_b32_m (vbool32_t mask, vbool32_t op1, size_t vl);
long __riscv_vfirst_m_b64_m (vbool64_t mask, vbool64_t op1, size_t vl);
29.12. vmsbf.m
- Mnemonic
vmsbf.m vd, vs2, vm
- Encoding
- Description
-
set-before-first mask bit
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsbf_m.h
- Intrinsic Functions
Details
vbool1_t __riscv_vmsbf_m_b1 (vbool1_t op1, size_t vl);
vbool2_t __riscv_vmsbf_m_b2 (vbool2_t op1, size_t vl);
vbool4_t __riscv_vmsbf_m_b4 (vbool4_t op1, size_t vl);
vbool8_t __riscv_vmsbf_m_b8 (vbool8_t op1, size_t vl);
vbool16_t __riscv_vmsbf_m_b16 (vbool16_t op1, size_t vl);
vbool32_t __riscv_vmsbf_m_b32 (vbool32_t op1, size_t vl);
vbool64_t __riscv_vmsbf_m_b64 (vbool64_t op1, size_t vl);
vbool1_t __riscv_vmsbf_m_b1_m (vbool1_t mask, vbool1_t op1, size_t vl);
vbool2_t __riscv_vmsbf_m_b2_m (vbool2_t mask, vbool2_t op1, size_t vl);
vbool4_t __riscv_vmsbf_m_b4_m (vbool4_t mask, vbool4_t op1, size_t vl);
vbool8_t __riscv_vmsbf_m_b8_m (vbool8_t mask, vbool8_t op1, size_t vl);
vbool16_t __riscv_vmsbf_m_b16_m (vbool16_t mask, vbool16_t op1, size_t vl);
vbool32_t __riscv_vmsbf_m_b32_m (vbool32_t mask, vbool32_t op1, size_t vl);
vbool64_t __riscv_vmsbf_m_b64_m (vbool64_t mask, vbool64_t op1, size_t vl);
29.13. vmsif.m
- Mnemonic
vmsif.m vd, vs2, vm
- Encoding
- Description
-
set-including-first mask bit
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsif_m.h
- Intrinsic Functions
Details
vbool1_t __riscv_vmsif_m_b1 (vbool1_t op1, size_t vl);
vbool2_t __riscv_vmsif_m_b2 (vbool2_t op1, size_t vl);
vbool4_t __riscv_vmsif_m_b4 (vbool4_t op1, size_t vl);
vbool8_t __riscv_vmsif_m_b8 (vbool8_t op1, size_t vl);
vbool16_t __riscv_vmsif_m_b16 (vbool16_t op1, size_t vl);
vbool32_t __riscv_vmsif_m_b32 (vbool32_t op1, size_t vl);
vbool64_t __riscv_vmsif_m_b64 (vbool64_t op1, size_t vl);
vbool1_t __riscv_vmsif_m_b1_m (vbool1_t mask, vbool1_t op1, size_t vl);
vbool2_t __riscv_vmsif_m_b2_m (vbool2_t mask, vbool2_t op1, size_t vl);
vbool4_t __riscv_vmsif_m_b4_m (vbool4_t mask, vbool4_t op1, size_t vl);
vbool8_t __riscv_vmsif_m_b8_m (vbool8_t mask, vbool8_t op1, size_t vl);
vbool16_t __riscv_vmsif_m_b16_m (vbool16_t mask, vbool16_t op1, size_t vl);
vbool32_t __riscv_vmsif_m_b32_m (vbool32_t mask, vbool32_t op1, size_t vl);
vbool64_t __riscv_vmsif_m_b64_m (vbool64_t mask, vbool64_t op1, size_t vl);
29.14. vmsof.m
- Mnemonic
vmsof.m vd, vs2, vm
- Encoding
- Description
-
set-only-first mask bit
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmsof_m.h
- Intrinsic Functions
Details
vbool1_t __riscv_vmsof_m_b1 (vbool1_t op1, size_t vl);
vbool2_t __riscv_vmsof_m_b2 (vbool2_t op1, size_t vl);
vbool4_t __riscv_vmsof_m_b4 (vbool4_t op1, size_t vl);
vbool8_t __riscv_vmsof_m_b8 (vbool8_t op1, size_t vl);
vbool16_t __riscv_vmsof_m_b16 (vbool16_t op1, size_t vl);
vbool32_t __riscv_vmsof_m_b32 (vbool32_t op1, size_t vl);
vbool64_t __riscv_vmsof_m_b64 (vbool64_t op1, size_t vl);
vbool1_t __riscv_vmsof_m_b1_m (vbool1_t mask, vbool1_t op1, size_t vl);
vbool2_t __riscv_vmsof_m_b2_m (vbool2_t mask, vbool2_t op1, size_t vl);
vbool4_t __riscv_vmsof_m_b4_m (vbool4_t mask, vbool4_t op1, size_t vl);
vbool8_t __riscv_vmsof_m_b8_m (vbool8_t mask, vbool8_t op1, size_t vl);
vbool16_t __riscv_vmsof_m_b16_m (vbool16_t mask, vbool16_t op1, size_t vl);
vbool32_t __riscv_vmsof_m_b32_m (vbool32_t mask, vbool32_t op1, size_t vl);
vbool64_t __riscv_vmsof_m_b64_m (vbool64_t mask, vbool64_t op1, size_t vl);
29.15. viota.m
- Mnemonic
viota.m vd, vs2, vm
- Encoding
- Desciption
-
Vector Iota Instruction
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/viota_m.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_viota_m_u8mf8 (vbool64_t op1, size_t vl);
vuint8mf4_t __riscv_viota_m_u8mf4 (vbool32_t op1, size_t vl);
vuint8mf2_t __riscv_viota_m_u8mf2 (vbool16_t op1, size_t vl);
vuint8m1_t __riscv_viota_m_u8m1 (vbool8_t op1, size_t vl);
vuint8m2_t __riscv_viota_m_u8m2 (vbool4_t op1, size_t vl);
vuint8m4_t __riscv_viota_m_u8m4 (vbool2_t op1, size_t vl);
vuint8m8_t __riscv_viota_m_u8m8 (vbool1_t op1, size_t vl);
vuint16mf4_t __riscv_viota_m_u16mf4 (vbool64_t op1, size_t vl);
vuint16mf2_t __riscv_viota_m_u16mf2 (vbool32_t op1, size_t vl);
vuint16m1_t __riscv_viota_m_u16m1 (vbool16_t op1, size_t vl);
vuint16m2_t __riscv_viota_m_u16m2 (vbool8_t op1, size_t vl);
vuint16m4_t __riscv_viota_m_u16m4 (vbool4_t op1, size_t vl);
vuint16m8_t __riscv_viota_m_u16m8 (vbool2_t op1, size_t vl);
vuint32mf2_t __riscv_viota_m_u32mf2 (vbool64_t op1, size_t vl);
vuint32m1_t __riscv_viota_m_u32m1 (vbool32_t op1, size_t vl);
vuint32m2_t __riscv_viota_m_u32m2 (vbool16_t op1, size_t vl);
vuint32m4_t __riscv_viota_m_u32m4 (vbool8_t op1, size_t vl);
vuint32m8_t __riscv_viota_m_u32m8 (vbool4_t op1, size_t vl);
vuint64m1_t __riscv_viota_m_u64m1 (vbool64_t op1, size_t vl);
vuint64m2_t __riscv_viota_m_u64m2 (vbool32_t op1, size_t vl);
vuint64m4_t __riscv_viota_m_u64m4 (vbool16_t op1, size_t vl);
vuint64m8_t __riscv_viota_m_u64m8 (vbool8_t op1, size_t vl);
vuint8mf8_t __riscv_viota_m_u8mf8_m (vbool64_t mask, vbool64_t op1, size_t vl);
vuint8mf4_t __riscv_viota_m_u8mf4_m (vbool32_t mask, vbool32_t op1, size_t vl);
vuint8mf2_t __riscv_viota_m_u8mf2_m (vbool16_t mask, vbool16_t op1, size_t vl);
vuint8m1_t __riscv_viota_m_u8m1_m (vbool8_t mask, vbool8_t op1, size_t vl);
vuint8m2_t __riscv_viota_m_u8m2_m (vbool4_t mask, vbool4_t op1, size_t vl);
vuint8m4_t __riscv_viota_m_u8m4_m (vbool2_t mask, vbool2_t op1, size_t vl);
vuint8m8_t __riscv_viota_m_u8m8_m (vbool1_t mask, vbool1_t op1, size_t vl);
vuint16mf4_t __riscv_viota_m_u16mf4_m (vbool64_t mask, vbool64_t op1, size_t vl);
vuint16mf2_t __riscv_viota_m_u16mf2_m (vbool32_t mask, vbool32_t op1, size_t vl);
vuint16m1_t __riscv_viota_m_u16m1_m (vbool16_t mask, vbool16_t op1, size_t vl);
vuint16m2_t __riscv_viota_m_u16m2_m (vbool8_t mask, vbool8_t op1, size_t vl);
vuint16m4_t __riscv_viota_m_u16m4_m (vbool4_t mask, vbool4_t op1, size_t vl);
vuint16m8_t __riscv_viota_m_u16m8_m (vbool2_t mask, vbool2_t op1, size_t vl);
vuint32mf2_t __riscv_viota_m_u32mf2_m (vbool64_t mask, vbool64_t op1, size_t vl);
vuint32m1_t __riscv_viota_m_u32m1_m (vbool32_t mask, vbool32_t op1, size_t vl);
vuint32m2_t __riscv_viota_m_u32m2_m (vbool16_t mask, vbool16_t op1, size_t vl);
vuint32m4_t __riscv_viota_m_u32m4_m (vbool8_t mask, vbool8_t op1, size_t vl);
vuint32m8_t __riscv_viota_m_u32m8_m (vbool4_t mask, vbool4_t op1, size_t vl);
vuint64m1_t __riscv_viota_m_u64m1_m (vbool64_t mask, vbool64_t op1, size_t vl);
vuint64m2_t __riscv_viota_m_u64m2_m (vbool32_t mask, vbool32_t op1, size_t vl);
vuint64m4_t __riscv_viota_m_u64m4_m (vbool16_t mask, vbool16_t op1, size_t vl);
vuint64m8_t __riscv_viota_m_u64m8_m (vbool8_t mask, vbool8_t op1, size_t vl);
29.16. vid.v
- Mnemonic
vid.v vd, vm # Write element ID to destination.
- Encoding
- Description
-
Vector Element Index
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vid_v.h
- Intrinsic Functions
Details
vuint8mf8_t __riscv_vid_v_u8mf8 (size_t vl);
vuint8mf4_t __riscv_vid_v_u8mf4 (size_t vl);
vuint8mf2_t __riscv_vid_v_u8mf2 (size_t vl);
vuint8m1_t __riscv_vid_v_u8m1 (size_t vl);
vuint8m2_t __riscv_vid_v_u8m2 (size_t vl);
vuint8m4_t __riscv_vid_v_u8m4 (size_t vl);
vuint8m8_t __riscv_vid_v_u8m8 (size_t vl);
vuint16mf4_t __riscv_vid_v_u16mf4 (size_t vl);
vuint16mf2_t __riscv_vid_v_u16mf2 (size_t vl);
vuint16m1_t __riscv_vid_v_u16m1 (size_t vl);
vuint16m2_t __riscv_vid_v_u16m2 (size_t vl);
vuint16m4_t __riscv_vid_v_u16m4 (size_t vl);
vuint16m8_t __riscv_vid_v_u16m8 (size_t vl);
vuint32mf2_t __riscv_vid_v_u32mf2 (size_t vl);
vuint32m1_t __riscv_vid_v_u32m1 (size_t vl);
vuint32m2_t __riscv_vid_v_u32m2 (size_t vl);
vuint32m4_t __riscv_vid_v_u32m4 (size_t vl);
vuint32m8_t __riscv_vid_v_u32m8 (size_t vl);
vuint64m1_t __riscv_vid_v_u64m1 (size_t vl);
vuint64m2_t __riscv_vid_v_u64m2 (size_t vl);
vuint64m4_t __riscv_vid_v_u64m4 (size_t vl);
vuint64m8_t __riscv_vid_v_u64m8 (size_t vl);
vuint8mf8_t __riscv_vid_v_u8mf8_m (vbool64_t mask, size_t vl);
vuint8mf4_t __riscv_vid_v_u8mf4_m (vbool32_t mask, size_t vl);
vuint8mf2_t __riscv_vid_v_u8mf2_m (vbool16_t mask, size_t vl);
vuint8m1_t __riscv_vid_v_u8m1_m (vbool8_t mask, size_t vl);
vuint8m2_t __riscv_vid_v_u8m2_m (vbool4_t mask, size_t vl);
vuint8m4_t __riscv_vid_v_u8m4_m (vbool2_t mask, size_t vl);
vuint8m8_t __riscv_vid_v_u8m8_m (vbool1_t mask, size_t vl);
vuint16mf4_t __riscv_vid_v_u16mf4_m (vbool64_t mask, size_t vl);
vuint16mf2_t __riscv_vid_v_u16mf2_m (vbool32_t mask, size_t vl);
vuint16m1_t __riscv_vid_v_u16m1_m (vbool16_t mask, size_t vl);
vuint16m2_t __riscv_vid_v_u16m2_m (vbool8_t mask, size_t vl);
vuint16m4_t __riscv_vid_v_u16m4_m (vbool4_t mask, size_t vl);
vuint16m8_t __riscv_vid_v_u16m8_m (vbool2_t mask, size_t vl);
vuint32mf2_t __riscv_vid_v_u32mf2_m (vbool64_t mask, size_t vl);
vuint32m1_t __riscv_vid_v_u32m1_m (vbool32_t mask, size_t vl);
vuint32m2_t __riscv_vid_v_u32m2_m (vbool16_t mask, size_t vl);
vuint32m4_t __riscv_vid_v_u32m4_m (vbool8_t mask, size_t vl);
vuint32m8_t __riscv_vid_v_u32m8_m (vbool4_t mask, size_t vl);
vuint64m1_t __riscv_vid_v_u64m1_m (vbool64_t mask, size_t vl);
vuint64m2_t __riscv_vid_v_u64m2_m (vbool32_t mask, size_t vl);
vuint64m4_t __riscv_vid_v_u64m4_m (vbool16_t mask, size_t vl);
vuint64m8_t __riscv_vid_v_u64m8_m (vbool8_t mask, size_t vl);
30. Vector Permutation Instructions
31. Integer Scalar Move Instructions
31.1. vmv.x.s
- Mnemonic
vmv.x.s rd, vs2
- Encoding
- Description
-
x[rd] = vs2[0] (vs1=0)
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmv_x_s.h
- Intrinsics
int8_t vmv_x_s_i8m1_i8 (vint8m1_t vs2, size_t vl);
int8_t vmv_x_s_i8m2_i8 (vint8m2_t vs2, size_t vl);
int8_t vmv_x_s_i8m4_i8 (vint8m4_t vs2, size_t vl);
int8_t vmv_x_s_i8m8_i8 (vint8m8_t vs2, size_t vl);
31.2. vmv.s.x
- Mnemonic
vmv.s.x vd, rs1
- Encoding
- Description
-
vd[0] = x[rs1] (vs2=0)
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmv_s_x.h
- Intrinsics
Details
vint8mf8_t __riscv_vmv_s_x_i8mf8 (int8_t src, size_t vl);
vint8mf4_t __riscv_vmv_s_x_i8mf4 (int8_t src, size_t vl);
vint8mf2_t __riscv_vmv_s_x_i8mf2 (int8_t src, size_t vl);
vint8m1_t __riscv_vmv_s_x_i8m1 (int8_t src, size_t vl);
vint8m2_t __riscv_vmv_s_x_i8m2 (int8_t src, size_t vl);
vint8m4_t __riscv_vmv_s_x_i8m4 (int8_t src, size_t vl);
vint8m8_t __riscv_vmv_s_x_i8m8 (int8_t src, size_t vl);
vint16mf4_t __riscv_vmv_s_x_i16mf4 (int16_t src, size_t vl);
vint16mf2_t __riscv_vmv_s_x_i16mf2 (int16_t src, size_t vl);
vint16m1_t __riscv_vmv_s_x_i16m1 (int16_t src, size_t vl);
vint16m2_t __riscv_vmv_s_x_i16m2 (int16_t src, size_t vl);
vint16m4_t __riscv_vmv_s_x_i16m4 (int16_t src, size_t vl);
vint16m8_t __riscv_vmv_s_x_i16m8 (int16_t src, size_t vl);
vint32mf2_t __riscv_vmv_s_x_i32mf2 (int32_t src, size_t vl);
vint32m1_t __riscv_vmv_s_x_i32m1 (int32_t src, size_t vl);
vint32m2_t __riscv_vmv_s_x_i32m2 (int32_t src, size_t vl);
vint32m4_t __riscv_vmv_s_x_i32m4 (int32_t src, size_t vl);
vint32m8_t __riscv_vmv_s_x_i32m8 (int32_t src, size_t vl);
vint64m1_t __riscv_vmv_s_x_i64m1 (int64_t src, size_t vl);
vint64m2_t __riscv_vmv_s_x_i64m2 (int64_t src, size_t vl);
vint64m4_t __riscv_vmv_s_x_i64m4 (int64_t src, size_t vl);
vint64m8_t __riscv_vmv_s_x_i64m8 (int64_t src, size_t vl);
vuint8mf8_t __riscv_vmv_s_x_u8mf8 (uint8_t src, size_t vl);
vuint8mf4_t __riscv_vmv_s_x_u8mf4 (uint8_t src, size_t vl);
vuint8mf2_t __riscv_vmv_s_x_u8mf2 (uint8_t src, size_t vl);
vuint8m1_t __riscv_vmv_s_x_u8m1 (uint8_t src, size_t vl);
vuint8m2_t __riscv_vmv_s_x_u8m2 (uint8_t src, size_t vl);
vuint8m4_t __riscv_vmv_s_x_u8m4 (uint8_t src, size_t vl);
vuint8m8_t __riscv_vmv_s_x_u8m8 (uint8_t src, size_t vl);
vuint16mf4_t __riscv_vmv_s_x_u16mf4 (uint16_t src, size_t vl);
vuint16mf2_t __riscv_vmv_s_x_u16mf2 (uint16_t src, size_t vl);
vuint16m1_t __riscv_vmv_s_x_u16m1 (uint16_t src, size_t vl);
vuint16m2_t __riscv_vmv_s_x_u16m2 (uint16_t src, size_t vl);
vuint16m4_t __riscv_vmv_s_x_u16m4 (uint16_t src, size_t vl);
vuint16m8_t __riscv_vmv_s_x_u16m8 (uint16_t src, size_t vl);
vuint32mf2_t __riscv_vmv_s_x_u32mf2 (uint32_t src, size_t vl);
vuint32m1_t __riscv_vmv_s_x_u32m1 (uint32_t src, size_t vl);
vuint32m2_t __riscv_vmv_s_x_u32m2 (uint32_t src, size_t vl);
vuint32m4_t __riscv_vmv_s_x_u32m4 (uint32_t src, size_t vl);
vuint32m8_t __riscv_vmv_s_x_u32m8 (uint32_t src, size_t vl);
vuint64m1_t __riscv_vmv_s_x_u64m1 (uint64_t src, size_t vl);
vuint64m2_t __riscv_vmv_s_x_u64m2 (uint64_t src, size_t vl);
vuint64m4_t __riscv_vmv_s_x_u64m4 (uint64_t src, size_t vl);
vuint64m8_t __riscv_vmv_s_x_u64m8 (uint64_t src, size_t vl);
32. Floating-Point Scalar Move Instructions
32.1. vfmv.f.s
- Mnemonic
vfmv.f.s rd, vs2
- Encoding
- Description
-
f[rd] = vs2[0] (rs1=0)
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmv_f_s.h
- Intrinsics
float16_t __riscv_vfmv_f_s_f16mf4_f16 (vfloat16mf4_t src);
float16_t __riscv_vfmv_f_s_f16mf2_f16 (vfloat16mf2_t src);
float16_t __riscv_vfmv_f_s_f16m1_f16 (vfloat16m1_t src);
float16_t __riscv_vfmv_f_s_f16m2_f16 (vfloat16m2_t src);
float16_t __riscv_vfmv_f_s_f16m4_f16 (vfloat16m4_t src);
float16_t __riscv_vfmv_f_s_f16m8_f16 (vfloat16m8_t src);
float32_t __riscv_vfmv_f_s_f32mf2_f32 (vfloat32mf2_t src);
float32_t __riscv_vfmv_f_s_f32m1_f32 (vfloat32m1_t src);
float32_t __riscv_vfmv_f_s_f32m2_f32 (vfloat32m2_t src);
float32_t __riscv_vfmv_f_s_f32m4_f32 (vfloat32m4_t src);
float32_t __riscv_vfmv_f_s_f32m8_f32 (vfloat32m8_t src);
float64_t __riscv_vfmv_f_s_f64m1_f64 (vfloat64m1_t src);
float64_t __riscv_vfmv_f_s_f64m2_f64 (vfloat64m2_t src);
float64_t __riscv_vfmv_f_s_f64m4_f64 (vfloat64m4_t src);
float64_t __riscv_vfmv_f_s_f64m8_f64 (vfloat64m8_t src);
32.2. vfmv.s.f
- Mnemonic
vfmv.s.f vd, rs1
- Encoding
- Description
-
vd[0] = f[rs1] (vs2=0)
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfmv_s_f.h
- Intrinsics
vfloat16mf4_t __riscv_vfmv_s_f_f16mf4 (float16_t src, size_t vl);
vfloat16mf2_t __riscv_vfmv_s_f_f16mf2 (float16_t src, size_t vl);
vfloat16m1_t __riscv_vfmv_s_f_f16m1 (float16_t src, size_t vl);
vfloat16m2_t __riscv_vfmv_s_f_f16m2 (float16_t src, size_t vl);
vfloat16m4_t __riscv_vfmv_s_f_f16m4 (float16_t src, size_t vl);
vfloat16m8_t __riscv_vfmv_s_f_f16m8 (float16_t src, size_t vl);
vfloat32mf2_t __riscv_vfmv_s_f_f32mf2 (float32_t src, size_t vl);
vfloat32m1_t __riscv_vfmv_s_f_f32m1 (float32_t src, size_t vl);
vfloat32m2_t __riscv_vfmv_s_f_f32m2 (float32_t src, size_t vl);
vfloat32m4_t __riscv_vfmv_s_f_f32m4 (float32_t src, size_t vl);
vfloat32m8_t __riscv_vfmv_s_f_f32m8 (float32_t src, size_t vl);
vfloat64m1_t __riscv_vfmv_s_f_f64m1 (float64_t src, size_t vl);
vfloat64m2_t __riscv_vfmv_s_f_f64m2 (float64_t src, size_t vl);
vfloat64m4_t __riscv_vfmv_s_f_f64m4 (float64_t src, size_t vl);
vfloat64m8_t __riscv_vfmv_s_f_f64m8 (float64_t src, size_t vl);
33. Vector Slide Instructions
33.1. vslideup.vx
- Mnemonic
vslideup.vx vd, vs2, rs1, vm
- Encoding
- Description
-
vd[i+x[rs1]] = vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vslideup_vx.h
- Intrinsics
33.2. vslideup.vi
- Mnemonic
vslideup.vi vd, vs2, uimm, vm
- Encoding
- Description
-
vd[i+uimm] = vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vslideup_vi.h
- Intrinsics
33.3. vslidedown.vx
- Mnemonic
vslidedown.vx vd, vs2, rs1, vm
- Encoding
- Description
-
vd[i] = vs2[i+x[rs1]]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vslidedown_vx.h
- Intrinsics
33.4. vslidedown.vi
- Mnemonic
vslidedown.vi vd, vs2, uimm, vm
- Encoding
- Description
-
vd[i] = vs2[i+uimm]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vslidedown_vi.h
- Intrinsics
33.5. vslide1up.vx
- Mnemonic
vslide1up.vx vd, vs2, rs1, vm
- Encoding
- Description
-
vd[0]=x[rs1], vd[i+1] = vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vslide1up_vx.h
- Intrinsics
33.6. vfslide1up.vf
- Mnemonic
vfslide1up.vf vd, vs2, rs1, vm
- Encoding
- Description
-
vd[0]=f[rs1], vd[i+1] = vs2[i]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfslide1up_vf.h
- Intrinsics
33.7. vslide1down.vx
- Mnemonic
vslide1down.vx vd, vs2, rs1, vm
- Encoding
- Description
-
vd[i] = vs2[i+1], vd[vl-1]=x[rs1]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vslide1down_vx.h
- Intrinsics
33.8. vfslide1down.vf
- Mnemonic
vfslide1down.vf vd, vs2, rs1, vm
- Encoding
- Description
-
vd[i] = vs2[i+1], vd[vl-1]=f[rs1]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vfslide1down_vf.h
- Intrinsics
34. Vector Register Gather Instructions
34.1. vrgather.vv
- Mnemonic
vrgather.vv vd, vs2, vs1, vm
- Encoding
- Description
-
vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]];
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vrgather_vv.h
- Intrinsics
34.2. vrgatherei16.vv
- Mnemonic
vrgatherei16.vv vd, vs2, vs1, vm
- Encoding
- Description
-
vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]];
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vrgatherei16_vv.h
- Intrinsics
34.3. vrgather.vx
- Mnemonic
vrgather.vx vd, vs2, rs1, vm
- Encoding
- Description
-
vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[x[rs1]]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vrgather_vx.h
- Intrinsics
34.4. vrgather.vi
- Mnemonic
vrgather.vi vd, vs2, uimm, vm
- Encoding
- Description
-
vd[i] = (uimm >= VLMAX) ? 0 : vs2[uimm]
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vrgather_vi.h
- Intrinsics
35. Vector Compress Instruction
35.1. vcompress.vm
- Mnemonic
vcompress.vm vd, vs2, vs1 #
- Encoding
- Description
-
Compress into vd elements of vs2 where vs1 is enabled
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vcompress_vm.h
36. Whole Vector Register Move
36.1. vmv<nr>r.v
- Mnemonic
vmv1r.v vd, vs2 vmv2r.v vd, vs2 vmv4r.v vd, vs2 vmv8r.v vd, vs2
- Encoding
- Description
- Spike Implementation
-
https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmv1r.h https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmv2r.h https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmv4r.h https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/insns/vmv8r.h
- Intrinsics