@@ -62,6 +62,55 @@ for.exit:
62
62
ret void
63
63
}
64
64
65
+ define void @trunc_minimal_bitwidths_shufflevector (i8* %p , i32 %arg1 , i64 %len ) {
66
+ ; CHECK-LABEL: @trunc_minimal_bitwidths_shufflevector(
67
+ ; CHECK-NEXT: entry:
68
+ ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
69
+ ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
70
+ ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[LEN:%.*]], [[TMP1]]
71
+ ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
72
+ ; CHECK: vector.ph:
73
+ ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
74
+ ; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
75
+ ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[LEN]], [[TMP3]]
76
+ ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[LEN]], [[N_MOD_VF]]
77
+ ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[ARG1:%.*]], i32 0
78
+ ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
79
+ ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
80
+ ; CHECK: vector.body:
81
+ ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
82
+ ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[P:%.*]], i64 [[INDEX]]
83
+ ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to <vscale x 4 x i8>*
84
+ ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, <vscale x 4 x i8>* [[TMP5]], align 1
85
+ ; CHECK-NEXT: [[TMP6:%.*]] = trunc <vscale x 4 x i32> [[BROADCAST_SPLAT]] to <vscale x 4 x i8>
86
+ ; CHECK-NEXT: [[TMP7:%.*]] = xor <vscale x 4 x i8> [[WIDE_LOAD]], [[TMP6]]
87
+ ; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i8> [[TMP7]], [[WIDE_LOAD]]
88
+ ; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP4]] to <vscale x 4 x i8>*
89
+ ; CHECK-NEXT: store <vscale x 4 x i8> [[TMP8]], <vscale x 4 x i8>* [[TMP9]], align 1
90
+ ; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
91
+ ; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 4
92
+ ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
93
+ ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
94
+ ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
95
+ entry:
96
+ br label %for.body
97
+
98
+ for.body: ; preds = %entry
99
+ %indvars.iv = phi i64 [ 0 , %entry ], [ %indvars.iv.next , %for.body ]
100
+ %arrayidx = getelementptr inbounds i8 , i8* %p , i64 %indvars.iv
101
+ %0 = load i8 , i8* %arrayidx
102
+ %conv = zext i8 %0 to i32
103
+ %conv17 = xor i32 %conv , %arg1
104
+ %mul18 = mul nuw nsw i32 %conv17 , %conv
105
+ %conv19 = trunc i32 %mul18 to i8
106
+ store i8 %conv19 , i8* %arrayidx
107
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv , 1
108
+ %exitcond = icmp eq i64 %indvars.iv.next , %len
109
+ br i1 %exitcond , label %for.exit , label %for.body , !llvm.loop !0
110
+
111
+ for.exit: ; preds = %for.body
112
+ ret void
113
+ }
65
114
!0 = !{!0 , !1 , !2 }
66
115
!1 = !{!"llvm.loop.vectorize.width" , i32 4 }
67
116
!2 = !{!"llvm.loop.vectorize.scalable.enable" , i1 true }
0 commit comments