|
13 | 13 |
|
14 | 14 | #pragma clang riscv intrinsic sifive_vector
|
15 | 15 |
|
| 16 | +#define __riscv_sf_vc_x_se_u8mf4(p27_26, p24_20, p11_7, rs1, vl) \ |
| 17 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 6, vl) |
| 18 | +#define __riscv_sf_vc_x_se_u8mf2(p27_26, p24_20, p11_7, rs1, vl) \ |
| 19 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 7, vl) |
| 20 | +#define __riscv_sf_vc_x_se_u8m1(p27_26, p24_20, p11_7, rs1, vl) \ |
| 21 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 0, vl) |
| 22 | +#define __riscv_sf_vc_x_se_u8m2(p27_26, p24_20, p11_7, rs1, vl) \ |
| 23 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 1, vl) |
| 24 | +#define __riscv_sf_vc_x_se_u8m4(p27_26, p24_20, p11_7, rs1, vl) \ |
| 25 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 2, vl) |
| 26 | +#define __riscv_sf_vc_x_se_u8m8(p27_26, p24_20, p11_7, rs1, vl) \ |
| 27 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 3, vl) |
| 28 | + |
| 29 | +#define __riscv_sf_vc_x_se_u16mf2(p27_26, p24_20, p11_7, rs1, vl) \ |
| 30 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 7, vl) |
| 31 | +#define __riscv_sf_vc_x_se_u16m1(p27_26, p24_20, p11_7, rs1, vl) \ |
| 32 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 0, vl) |
| 33 | +#define __riscv_sf_vc_x_se_u16m2(p27_26, p24_20, p11_7, rs1, vl) \ |
| 34 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 1, vl) |
| 35 | +#define __riscv_sf_vc_x_se_u16m4(p27_26, p24_20, p11_7, rs1, vl) \ |
| 36 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 2, vl) |
| 37 | +#define __riscv_sf_vc_x_se_u16m8(p27_26, p24_20, p11_7, rs1, vl) \ |
| 38 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 3, vl) |
| 39 | + |
| 40 | +#define __riscv_sf_vc_x_se_u32m1(p27_26, p24_20, p11_7, rs1, vl) \ |
| 41 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 0, vl) |
| 42 | +#define __riscv_sf_vc_x_se_u32m2(p27_26, p24_20, p11_7, rs1, vl) \ |
| 43 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 1, vl) |
| 44 | +#define __riscv_sf_vc_x_se_u32m4(p27_26, p24_20, p11_7, rs1, vl) \ |
| 45 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 2, vl) |
| 46 | +#define __riscv_sf_vc_x_se_u32m8(p27_26, p24_20, p11_7, rs1, vl) \ |
| 47 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 3, vl) |
| 48 | + |
| 49 | +#define __riscv_sf_vc_i_se_u8mf4(p27_26, p24_20, p11_7, simm5, vl) \ |
| 50 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 7, vl) |
| 51 | +#define __riscv_sf_vc_i_se_u8mf2(p27_26, p24_20, p11_7, simm5, vl) \ |
| 52 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 6, vl) |
| 53 | +#define __riscv_sf_vc_i_se_u8m1(p27_26, p24_20, p11_7, simm5, vl) \ |
| 54 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 0, vl) |
| 55 | +#define __riscv_sf_vc_i_se_u8m2(p27_26, p24_20, p11_7, simm5, vl) \ |
| 56 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 1, vl) |
| 57 | +#define __riscv_sf_vc_i_se_u8m4(p27_26, p24_20, p11_7, simm5, vl) \ |
| 58 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 2, vl) |
| 59 | +#define __riscv_sf_vc_i_se_u8m8(p27_26, p24_20, p11_7, simm5, vl) \ |
| 60 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 3, vl) |
| 61 | + |
| 62 | +#define __riscv_sf_vc_i_se_u16mf2(p27_26, p24_20, p11_7, simm5, vl) \ |
| 63 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 7, vl) |
| 64 | +#define __riscv_sf_vc_i_se_u16m1(p27_26, p24_20, p11_7, simm5, vl) \ |
| 65 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 0, vl) |
| 66 | +#define __riscv_sf_vc_i_se_u16m2(p27_26, p24_20, p11_7, simm5, vl) \ |
| 67 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 1, vl) |
| 68 | +#define __riscv_sf_vc_i_se_u16m4(p27_26, p24_20, p11_7, simm5, vl) \ |
| 69 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 2, vl) |
| 70 | +#define __riscv_sf_vc_i_se_u16m8(p27_26, p24_20, p11_7, simm5, vl) \ |
| 71 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 3, vl) |
| 72 | + |
| 73 | +#define __riscv_sf_vc_i_se_u32m1(p27_26, p24_20, p11_7, simm5, vl) \ |
| 74 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 0, vl) |
| 75 | +#define __riscv_sf_vc_i_se_u32m2(p27_26, p24_20, p11_7, simm5, vl) \ |
| 76 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 1, vl) |
| 77 | +#define __riscv_sf_vc_i_se_u32m4(p27_26, p24_20, p11_7, simm5, vl) \ |
| 78 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 2, vl) |
| 79 | +#define __riscv_sf_vc_i_se_u32m8(p27_26, p24_20, p11_7, simm5, vl) \ |
| 80 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 3, vl) |
| 81 | + |
| 82 | +#if __riscv_v_elen >= 64 |
| 83 | +#define __riscv_sf_vc_x_se_u8mf8(p27_26, p24_20, p11_7, rs1, vl) \ |
| 84 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 5, vl) |
| 85 | +#define __riscv_sf_vc_x_se_u16mf4(p27_26, p24_20, p11_7, rs1, vl) \ |
| 86 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 6, vl) |
| 87 | +#define __riscv_sf_vc_x_se_u32mf2(p27_26, p24_20, p11_7, rs1, vl) \ |
| 88 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 7, vl) |
| 89 | + |
| 90 | +#define __riscv_sf_vc_i_se_u8mf8(p27_26, p24_20, p11_7, simm5, vl) \ |
| 91 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 5, vl) |
| 92 | +#define __riscv_sf_vc_i_se_u16mf4(p27_26, p24_20, p11_7, simm5, vl) \ |
| 93 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 6, vl) |
| 94 | +#define __riscv_sf_vc_i_se_u32mf2(p27_26, p24_20, p11_7, simm5, vl) \ |
| 95 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 7, vl) |
| 96 | + |
| 97 | +#define __riscv_sf_vc_i_se_u64m1(p27_26, p24_20, p11_7, simm5, vl) \ |
| 98 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 0, vl) |
| 99 | +#define __riscv_sf_vc_i_se_u64m2(p27_26, p24_20, p11_7, simm5, vl) \ |
| 100 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 1, vl) |
| 101 | +#define __riscv_sf_vc_i_se_u64m4(p27_26, p24_20, p11_7, simm5, vl) \ |
| 102 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 2, vl) |
| 103 | +#define __riscv_sf_vc_i_se_u64m8(p27_26, p24_20, p11_7, simm5, vl) \ |
| 104 | + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 3, vl) |
| 105 | + |
| 106 | +#if __riscv_xlen >= 64 |
| 107 | +#define __riscv_sf_vc_x_se_u64m1(p27_26, p24_20, p11_7, rs1, vl) \ |
| 108 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 0, vl) |
| 109 | +#define __riscv_sf_vc_x_se_u64m2(p27_26, p24_20, p11_7, rs1, vl) \ |
| 110 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 1, vl) |
| 111 | +#define __riscv_sf_vc_x_se_u64m4(p27_26, p24_20, p11_7, rs1, vl) \ |
| 112 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 2, vl) |
| 113 | +#define __riscv_sf_vc_x_se_u64m8(p27_26, p24_20, p11_7, rs1, vl) \ |
| 114 | + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 3, vl) |
| 115 | +#endif |
| 116 | +#endif |
| 117 | + |
16 | 118 | #endif //_SIFIVE_VECTOR_H_
|
0 commit comments