@@ -1394,3 +1394,77 @@ define <2 x double> @vid_step2_v2f64() {
1394
1394
; CHECK-NEXT: ret
1395
1395
ret <2 x double > <double 0 .0 , double 2 .0 >
1396
1396
}
1397
+
1398
+
1399
+ define <8 x float > @buildvec_v8f32_zvl256 (float %e0 , float %e1 , float %e2 , float %e3 , float %e4 , float %e5 , float %e6 , float %e7 ) vscale_range(4 , 128 ) {
1400
+ ; CHECK-LABEL: buildvec_v8f32_zvl256:
1401
+ ; CHECK: # %bb.0:
1402
+ ; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma
1403
+ ; CHECK-NEXT: vfmv.v.f v8, fa0
1404
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa1
1405
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa2
1406
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa3
1407
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa4
1408
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
1409
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa6
1410
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa7
1411
+ ; CHECK-NEXT: ret
1412
+ %v0 = insertelement <8 x float > poison, float %e0 , i64 0
1413
+ %v1 = insertelement <8 x float > %v0 , float %e1 , i64 1
1414
+ %v2 = insertelement <8 x float > %v1 , float %e2 , i64 2
1415
+ %v3 = insertelement <8 x float > %v2 , float %e3 , i64 3
1416
+ %v4 = insertelement <8 x float > %v3 , float %e4 , i64 4
1417
+ %v5 = insertelement <8 x float > %v4 , float %e5 , i64 5
1418
+ %v6 = insertelement <8 x float > %v5 , float %e6 , i64 6
1419
+ %v7 = insertelement <8 x float > %v6 , float %e7 , i64 7
1420
+ ret <8 x float > %v7
1421
+ }
1422
+
1423
+
1424
+ define <8 x double > @buildvec_v8f64_zvl256 (double %e0 , double %e1 , double %e2 , double %e3 , double %e4 , double %e5 , double %e6 , double %e7 ) vscale_range(4 , 128 ) {
1425
+ ; CHECK-LABEL: buildvec_v8f64_zvl256:
1426
+ ; CHECK: # %bb.0:
1427
+ ; CHECK-NEXT: vsetivli zero, 8, e64, m2, ta, ma
1428
+ ; CHECK-NEXT: vfmv.v.f v8, fa0
1429
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa1
1430
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa2
1431
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa3
1432
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa4
1433
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
1434
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa6
1435
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa7
1436
+ ; CHECK-NEXT: ret
1437
+ %v0 = insertelement <8 x double > poison, double %e0 , i64 0
1438
+ %v1 = insertelement <8 x double > %v0 , double %e1 , i64 1
1439
+ %v2 = insertelement <8 x double > %v1 , double %e2 , i64 2
1440
+ %v3 = insertelement <8 x double > %v2 , double %e3 , i64 3
1441
+ %v4 = insertelement <8 x double > %v3 , double %e4 , i64 4
1442
+ %v5 = insertelement <8 x double > %v4 , double %e5 , i64 5
1443
+ %v6 = insertelement <8 x double > %v5 , double %e6 , i64 6
1444
+ %v7 = insertelement <8 x double > %v6 , double %e7 , i64 7
1445
+ ret <8 x double > %v7
1446
+ }
1447
+
1448
+ define <8 x double > @buildvec_v8f64_zvl512 (double %e0 , double %e1 , double %e2 , double %e3 , double %e4 , double %e5 , double %e6 , double %e7 ) vscale_range(8 , 128 ) {
1449
+ ; CHECK-LABEL: buildvec_v8f64_zvl512:
1450
+ ; CHECK: # %bb.0:
1451
+ ; CHECK-NEXT: vsetivli zero, 8, e64, m1, ta, ma
1452
+ ; CHECK-NEXT: vfmv.v.f v8, fa0
1453
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa1
1454
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa2
1455
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa3
1456
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa4
1457
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
1458
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa6
1459
+ ; CHECK-NEXT: vfslide1down.vf v8, v8, fa7
1460
+ ; CHECK-NEXT: ret
1461
+ %v0 = insertelement <8 x double > poison, double %e0 , i64 0
1462
+ %v1 = insertelement <8 x double > %v0 , double %e1 , i64 1
1463
+ %v2 = insertelement <8 x double > %v1 , double %e2 , i64 2
1464
+ %v3 = insertelement <8 x double > %v2 , double %e3 , i64 3
1465
+ %v4 = insertelement <8 x double > %v3 , double %e4 , i64 4
1466
+ %v5 = insertelement <8 x double > %v4 , double %e5 , i64 5
1467
+ %v6 = insertelement <8 x double > %v5 , double %e6 , i64 6
1468
+ %v7 = insertelement <8 x double > %v6 , double %e7 , i64 7
1469
+ ret <8 x double > %v7
1470
+ }
0 commit comments