|
1 | 1 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
|
2 | 2 | ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
|
3 | 3 | ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
|
4 |
| -; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB32 |
5 |
| -; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB64 |
| 4 | +; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB-NOZBB,CHECK-ZVKB32 |
| 5 | +; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB-NOZBB,CHECK-ZVKB64 |
| 6 | +; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb,+zbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB-ZBB,CHECK-ZVKB32 |
| 7 | +; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb,+zbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB-ZBB,CHECK-ZVKB64 |
6 | 8 |
|
7 | 9 | define <vscale x 1 x i8> @vandn_vv_nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %y) {
|
8 | 10 | ; CHECK-LABEL: vandn_vv_nxv1i8:
|
@@ -1931,3 +1933,177 @@ define <vscale x 8 x i64> @vandn_vx_swapped_nxv8i64(i64 %x, <vscale x 8 x i64> %
|
1931 | 1933 | %b = and <vscale x 8 x i64> %splat, %y
|
1932 | 1934 | ret <vscale x 8 x i64> %b
|
1933 | 1935 | }
|
| 1936 | + |
| 1937 | +define <vscale x 1 x i16> @vandn_vx_imm16(<vscale x 1 x i16> %x) { |
| 1938 | +; CHECK-LABEL: vandn_vx_imm16: |
| 1939 | +; CHECK: # %bb.0: |
| 1940 | +; CHECK-NEXT: lui a0, 8 |
| 1941 | +; CHECK-NEXT: addi a0, a0, -1 |
| 1942 | +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
| 1943 | +; CHECK-NEXT: vand.vx v8, v8, a0 |
| 1944 | +; CHECK-NEXT: ret |
| 1945 | +; |
| 1946 | +; CHECK-ZVKB-LABEL: vandn_vx_imm16: |
| 1947 | +; CHECK-ZVKB: # %bb.0: |
| 1948 | +; CHECK-ZVKB-NEXT: lui a0, 8 |
| 1949 | +; CHECK-ZVKB-NEXT: addi a0, a0, -1 |
| 1950 | +; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
| 1951 | +; CHECK-ZVKB-NEXT: vand.vx v8, v8, a0 |
| 1952 | +; CHECK-ZVKB-NEXT: ret |
| 1953 | + %a = and <vscale x 1 x i16> splat (i16 32767), %x |
| 1954 | + ret <vscale x 1 x i16> %a |
| 1955 | +} |
| 1956 | + |
| 1957 | +define <vscale x 1 x i16> @vandn_vx_swapped_imm16(<vscale x 1 x i16> %x) { |
| 1958 | +; CHECK-LABEL: vandn_vx_swapped_imm16: |
| 1959 | +; CHECK: # %bb.0: |
| 1960 | +; CHECK-NEXT: lui a0, 8 |
| 1961 | +; CHECK-NEXT: addi a0, a0, -1 |
| 1962 | +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
| 1963 | +; CHECK-NEXT: vand.vx v8, v8, a0 |
| 1964 | +; CHECK-NEXT: ret |
| 1965 | +; |
| 1966 | +; CHECK-ZVKB-LABEL: vandn_vx_swapped_imm16: |
| 1967 | +; CHECK-ZVKB: # %bb.0: |
| 1968 | +; CHECK-ZVKB-NEXT: lui a0, 8 |
| 1969 | +; CHECK-ZVKB-NEXT: addi a0, a0, -1 |
| 1970 | +; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
| 1971 | +; CHECK-ZVKB-NEXT: vand.vx v8, v8, a0 |
| 1972 | +; CHECK-ZVKB-NEXT: ret |
| 1973 | + %a = and <vscale x 1 x i16> %x, splat (i16 32767) |
| 1974 | + ret <vscale x 1 x i16> %a |
| 1975 | +} |
| 1976 | + |
| 1977 | +define <vscale x 1 x i64> @vandn_vx_imm64(<vscale x 1 x i64> %x) { |
| 1978 | +; CHECK-RV32-LABEL: vandn_vx_imm64: |
| 1979 | +; CHECK-RV32: # %bb.0: |
| 1980 | +; CHECK-RV32-NEXT: addi sp, sp, -16 |
| 1981 | +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16 |
| 1982 | +; CHECK-RV32-NEXT: lui a0, 1044480 |
| 1983 | +; CHECK-RV32-NEXT: li a1, 255 |
| 1984 | +; CHECK-RV32-NEXT: sw a1, 8(sp) |
| 1985 | +; CHECK-RV32-NEXT: sw a0, 12(sp) |
| 1986 | +; CHECK-RV32-NEXT: addi a0, sp, 8 |
| 1987 | +; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma |
| 1988 | +; CHECK-RV32-NEXT: vlse64.v v9, (a0), zero |
| 1989 | +; CHECK-RV32-NEXT: vand.vv v8, v8, v9 |
| 1990 | +; CHECK-RV32-NEXT: addi sp, sp, 16 |
| 1991 | +; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0 |
| 1992 | +; CHECK-RV32-NEXT: ret |
| 1993 | +; |
| 1994 | +; CHECK-RV64-LABEL: vandn_vx_imm64: |
| 1995 | +; CHECK-RV64: # %bb.0: |
| 1996 | +; CHECK-RV64-NEXT: li a0, -1 |
| 1997 | +; CHECK-RV64-NEXT: slli a0, a0, 56 |
| 1998 | +; CHECK-RV64-NEXT: addi a0, a0, 255 |
| 1999 | +; CHECK-RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma |
| 2000 | +; CHECK-RV64-NEXT: vand.vx v8, v8, a0 |
| 2001 | +; CHECK-RV64-NEXT: ret |
| 2002 | +; |
| 2003 | +; CHECK-ZVKB32-LABEL: vandn_vx_imm64: |
| 2004 | +; CHECK-ZVKB32: # %bb.0: |
| 2005 | +; CHECK-ZVKB32-NEXT: addi sp, sp, -16 |
| 2006 | +; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16 |
| 2007 | +; CHECK-ZVKB32-NEXT: lui a0, 1044480 |
| 2008 | +; CHECK-ZVKB32-NEXT: li a1, 255 |
| 2009 | +; CHECK-ZVKB32-NEXT: sw a1, 8(sp) |
| 2010 | +; CHECK-ZVKB32-NEXT: sw a0, 12(sp) |
| 2011 | +; CHECK-ZVKB32-NEXT: addi a0, sp, 8 |
| 2012 | +; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m1, ta, ma |
| 2013 | +; CHECK-ZVKB32-NEXT: vlse64.v v9, (a0), zero |
| 2014 | +; CHECK-ZVKB32-NEXT: vand.vv v8, v8, v9 |
| 2015 | +; CHECK-ZVKB32-NEXT: addi sp, sp, 16 |
| 2016 | +; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0 |
| 2017 | +; CHECK-ZVKB32-NEXT: ret |
| 2018 | +; |
| 2019 | +; CHECK-ZVKB64-LABEL: vandn_vx_imm64: |
| 2020 | +; CHECK-ZVKB64: # %bb.0: |
| 2021 | +; CHECK-ZVKB64-NEXT: li a0, -1 |
| 2022 | +; CHECK-ZVKB64-NEXT: slli a0, a0, 56 |
| 2023 | +; CHECK-ZVKB64-NEXT: addi a0, a0, 255 |
| 2024 | +; CHECK-ZVKB64-NEXT: vsetvli a1, zero, e64, m1, ta, ma |
| 2025 | +; CHECK-ZVKB64-NEXT: vand.vx v8, v8, a0 |
| 2026 | +; CHECK-ZVKB64-NEXT: ret |
| 2027 | + %a = and <vscale x 1 x i64> %x, splat (i64 -72057594037927681) |
| 2028 | + ret <vscale x 1 x i64> %a |
| 2029 | +} |
| 2030 | + |
| 2031 | +define <vscale x 1 x i16> @vandn_vx_multi_imm16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %y) { |
| 2032 | +; CHECK-LABEL: vandn_vx_multi_imm16: |
| 2033 | +; CHECK: # %bb.0: |
| 2034 | +; CHECK-NEXT: lui a0, 4 |
| 2035 | +; CHECK-NEXT: addi a0, a0, -1 |
| 2036 | +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
| 2037 | +; CHECK-NEXT: vand.vx v8, v8, a0 |
| 2038 | +; CHECK-NEXT: vand.vx v9, v9, a0 |
| 2039 | +; CHECK-NEXT: vadd.vv v8, v8, v9 |
| 2040 | +; CHECK-NEXT: ret |
| 2041 | +; |
| 2042 | +; CHECK-ZVKB-LABEL: vandn_vx_multi_imm16: |
| 2043 | +; CHECK-ZVKB: # %bb.0: |
| 2044 | +; CHECK-ZVKB-NEXT: lui a0, 4 |
| 2045 | +; CHECK-ZVKB-NEXT: addi a0, a0, -1 |
| 2046 | +; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
| 2047 | +; CHECK-ZVKB-NEXT: vand.vx v8, v8, a0 |
| 2048 | +; CHECK-ZVKB-NEXT: vand.vx v9, v9, a0 |
| 2049 | +; CHECK-ZVKB-NEXT: vadd.vv v8, v8, v9 |
| 2050 | +; CHECK-ZVKB-NEXT: ret |
| 2051 | + %a = and <vscale x 1 x i16> %x, splat (i16 16383) |
| 2052 | + %b = and <vscale x 1 x i16> %y, splat (i16 16383) |
| 2053 | + %c = add <vscale x 1 x i16> %a, %b |
| 2054 | + ret <vscale x 1 x i16> %c |
| 2055 | +} |
| 2056 | + |
| 2057 | +define <vscale x 1 x i16> @vandn_vx_multi_scalar_imm16(<vscale x 1 x i16> %x, i16 %y) { |
| 2058 | +; CHECK-LABEL: vandn_vx_multi_scalar_imm16: |
| 2059 | +; CHECK: # %bb.0: |
| 2060 | +; CHECK-NEXT: lui a1, 8 |
| 2061 | +; CHECK-NEXT: addi a1, a1, -1 |
| 2062 | +; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, ma |
| 2063 | +; CHECK-NEXT: vand.vx v8, v8, a1 |
| 2064 | +; CHECK-NEXT: or a0, a0, a1 |
| 2065 | +; CHECK-NEXT: vadd.vx v8, v8, a0 |
| 2066 | +; CHECK-NEXT: ret |
| 2067 | +; |
| 2068 | +; CHECK-ZVKB-LABEL: vandn_vx_multi_scalar_imm16: |
| 2069 | +; CHECK-ZVKB: # %bb.0: |
| 2070 | +; CHECK-ZVKB-NEXT: lui a1, 8 |
| 2071 | +; CHECK-ZVKB-NEXT: addi a1, a1, -1 |
| 2072 | +; CHECK-ZVKB-NEXT: vsetvli a2, zero, e16, mf4, ta, ma |
| 2073 | +; CHECK-ZVKB-NEXT: vand.vx v8, v8, a1 |
| 2074 | +; CHECK-ZVKB-NEXT: or a0, a0, a1 |
| 2075 | +; CHECK-ZVKB-NEXT: vadd.vx v8, v8, a0 |
| 2076 | +; CHECK-ZVKB-NEXT: ret |
| 2077 | + %a = and <vscale x 1 x i16> %x, splat (i16 32767) |
| 2078 | + %b = or i16 %y, 32767 |
| 2079 | + %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0 |
| 2080 | + %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer |
| 2081 | + %c = add <vscale x 1 x i16> %a, %splat |
| 2082 | + ret <vscale x 1 x i16> %c |
| 2083 | +} |
| 2084 | + |
| 2085 | +define <vscale x 1 x i16> @vand_vadd_vx_imm16(<vscale x 1 x i16> %x) { |
| 2086 | +; CHECK-LABEL: vand_vadd_vx_imm16: |
| 2087 | +; CHECK: # %bb.0: |
| 2088 | +; CHECK-NEXT: lui a0, 8 |
| 2089 | +; CHECK-NEXT: addi a0, a0, -1 |
| 2090 | +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
| 2091 | +; CHECK-NEXT: vand.vx v8, v8, a0 |
| 2092 | +; CHECK-NEXT: vadd.vx v8, v8, a0 |
| 2093 | +; CHECK-NEXT: ret |
| 2094 | +; |
| 2095 | +; CHECK-ZVKB-LABEL: vand_vadd_vx_imm16: |
| 2096 | +; CHECK-ZVKB: # %bb.0: |
| 2097 | +; CHECK-ZVKB-NEXT: lui a0, 8 |
| 2098 | +; CHECK-ZVKB-NEXT: addi a0, a0, -1 |
| 2099 | +; CHECK-ZVKB-NEXT: vsetvli a1, zero, e16, mf4, ta, ma |
| 2100 | +; CHECK-ZVKB-NEXT: vand.vx v8, v8, a0 |
| 2101 | +; CHECK-ZVKB-NEXT: vadd.vx v8, v8, a0 |
| 2102 | +; CHECK-ZVKB-NEXT: ret |
| 2103 | + %a = and <vscale x 1 x i16> %x, splat (i16 32767) |
| 2104 | + %b = add <vscale x 1 x i16> %a, splat (i16 32767) |
| 2105 | + ret <vscale x 1 x i16> %b |
| 2106 | +} |
| 2107 | +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| 2108 | +; CHECK-ZVKB-NOZBB: {{.*}} |
| 2109 | +; CHECK-ZVKB-ZBB: {{.*}} |
0 commit comments