Skip to content
This repository was archived by the owner on Mar 28, 2020. It is now read-only.

Commit 4a8de03

Browse files
committed
[SLP] A new test for horizontal vectorization for non-power-of-2
instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@292626 91177308-0d34-0410-b5e6-96231b3b80d8
1 parent 37d2d7a commit 4a8de03

File tree

1 file changed

+290
-0
lines changed

1 file changed

+290
-0
lines changed

test/Transforms/SLPVectorizer/X86/horizontal-list.ll

Lines changed: 290 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -280,3 +280,293 @@ entry:
280280
ret float %max.0.mul3.2
281281
}
282282

283+
define float @f(float* nocapture readonly %x) {
284+
; CHECK-LABEL: @f(
285+
; CHECK-NEXT: entry:
286+
; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[X:%.*]], align 4
287+
; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 1
288+
; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[ARRAYIDX_1]], align 4
289+
; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[TMP1]], [[TMP0]]
290+
; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
291+
; CHECK-NEXT: [[TMP2:%.*]] = load float, float* [[ARRAYIDX_2]], align 4
292+
; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float [[TMP2]], [[ADD_1]]
293+
; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
294+
; CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[ARRAYIDX_3]], align 4
295+
; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float [[TMP3]], [[ADD_2]]
296+
; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
297+
; CHECK-NEXT: [[TMP4:%.*]] = load float, float* [[ARRAYIDX_4]], align 4
298+
; CHECK-NEXT: [[ADD_4:%.*]] = fadd fast float [[TMP4]], [[ADD_3]]
299+
; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
300+
; CHECK-NEXT: [[TMP5:%.*]] = load float, float* [[ARRAYIDX_5]], align 4
301+
; CHECK-NEXT: [[ADD_5:%.*]] = fadd fast float [[TMP5]], [[ADD_4]]
302+
; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
303+
; CHECK-NEXT: [[TMP6:%.*]] = load float, float* [[ARRAYIDX_6]], align 4
304+
; CHECK-NEXT: [[ADD_6:%.*]] = fadd fast float [[TMP6]], [[ADD_5]]
305+
; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
306+
; CHECK-NEXT: [[TMP7:%.*]] = load float, float* [[ARRAYIDX_7]], align 4
307+
; CHECK-NEXT: [[ADD_7:%.*]] = fadd fast float [[TMP7]], [[ADD_6]]
308+
; CHECK-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 8
309+
; CHECK-NEXT: [[TMP8:%.*]] = load float, float* [[ARRAYIDX_8]], align 4
310+
; CHECK-NEXT: [[ADD_8:%.*]] = fadd fast float [[TMP8]], [[ADD_7]]
311+
; CHECK-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 9
312+
; CHECK-NEXT: [[TMP9:%.*]] = load float, float* [[ARRAYIDX_9]], align 4
313+
; CHECK-NEXT: [[ADD_9:%.*]] = fadd fast float [[TMP9]], [[ADD_8]]
314+
; CHECK-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 10
315+
; CHECK-NEXT: [[TMP10:%.*]] = load float, float* [[ARRAYIDX_10]], align 4
316+
; CHECK-NEXT: [[ADD_10:%.*]] = fadd fast float [[TMP10]], [[ADD_9]]
317+
; CHECK-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 11
318+
; CHECK-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX_11]], align 4
319+
; CHECK-NEXT: [[ADD_11:%.*]] = fadd fast float [[TMP11]], [[ADD_10]]
320+
; CHECK-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 12
321+
; CHECK-NEXT: [[TMP12:%.*]] = load float, float* [[ARRAYIDX_12]], align 4
322+
; CHECK-NEXT: [[ADD_12:%.*]] = fadd fast float [[TMP12]], [[ADD_11]]
323+
; CHECK-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 13
324+
; CHECK-NEXT: [[TMP13:%.*]] = load float, float* [[ARRAYIDX_13]], align 4
325+
; CHECK-NEXT: [[ADD_13:%.*]] = fadd fast float [[TMP13]], [[ADD_12]]
326+
; CHECK-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 14
327+
; CHECK-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX_14]], align 4
328+
; CHECK-NEXT: [[ADD_14:%.*]] = fadd fast float [[TMP14]], [[ADD_13]]
329+
; CHECK-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 15
330+
; CHECK-NEXT: [[TMP15:%.*]] = load float, float* [[ARRAYIDX_15]], align 4
331+
; CHECK-NEXT: [[ADD_15:%.*]] = fadd fast float [[TMP15]], [[ADD_14]]
332+
; CHECK-NEXT: [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, float* [[X]], i64 16
333+
; CHECK-NEXT: [[ARRAYIDX_17:%.*]] = getelementptr inbounds float, float* [[X]], i64 17
334+
; CHECK-NEXT: [[ARRAYIDX_18:%.*]] = getelementptr inbounds float, float* [[X]], i64 18
335+
; CHECK-NEXT: [[ARRAYIDX_19:%.*]] = getelementptr inbounds float, float* [[X]], i64 19
336+
; CHECK-NEXT: [[ARRAYIDX_20:%.*]] = getelementptr inbounds float, float* [[X]], i64 20
337+
; CHECK-NEXT: [[ARRAYIDX_21:%.*]] = getelementptr inbounds float, float* [[X]], i64 21
338+
; CHECK-NEXT: [[ARRAYIDX_22:%.*]] = getelementptr inbounds float, float* [[X]], i64 22
339+
; CHECK-NEXT: [[ARRAYIDX_23:%.*]] = getelementptr inbounds float, float* [[X]], i64 23
340+
; CHECK-NEXT: [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, float* [[X]], i64 24
341+
; CHECK-NEXT: [[ARRAYIDX_25:%.*]] = getelementptr inbounds float, float* [[X]], i64 25
342+
; CHECK-NEXT: [[ARRAYIDX_26:%.*]] = getelementptr inbounds float, float* [[X]], i64 26
343+
; CHECK-NEXT: [[ARRAYIDX_27:%.*]] = getelementptr inbounds float, float* [[X]], i64 27
344+
; CHECK-NEXT: [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, float* [[X]], i64 28
345+
; CHECK-NEXT: [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, float* [[X]], i64 29
346+
; CHECK-NEXT: [[ARRAYIDX_30:%.*]] = getelementptr inbounds float, float* [[X]], i64 30
347+
; CHECK-NEXT: [[ARRAYIDX_31:%.*]] = getelementptr inbounds float, float* [[X]], i64 31
348+
; CHECK-NEXT: [[ARRAYIDX_32:%.*]] = getelementptr inbounds float, float* [[X]], i64 32
349+
; CHECK-NEXT: [[ARRAYIDX_33:%.*]] = getelementptr inbounds float, float* [[X]], i64 33
350+
; CHECK-NEXT: [[ARRAYIDX_34:%.*]] = getelementptr inbounds float, float* [[X]], i64 34
351+
; CHECK-NEXT: [[ARRAYIDX_35:%.*]] = getelementptr inbounds float, float* [[X]], i64 35
352+
; CHECK-NEXT: [[ARRAYIDX_36:%.*]] = getelementptr inbounds float, float* [[X]], i64 36
353+
; CHECK-NEXT: [[ARRAYIDX_37:%.*]] = getelementptr inbounds float, float* [[X]], i64 37
354+
; CHECK-NEXT: [[ARRAYIDX_38:%.*]] = getelementptr inbounds float, float* [[X]], i64 38
355+
; CHECK-NEXT: [[ARRAYIDX_39:%.*]] = getelementptr inbounds float, float* [[X]], i64 39
356+
; CHECK-NEXT: [[ARRAYIDX_40:%.*]] = getelementptr inbounds float, float* [[X]], i64 40
357+
; CHECK-NEXT: [[ARRAYIDX_41:%.*]] = getelementptr inbounds float, float* [[X]], i64 41
358+
; CHECK-NEXT: [[ARRAYIDX_42:%.*]] = getelementptr inbounds float, float* [[X]], i64 42
359+
; CHECK-NEXT: [[ARRAYIDX_43:%.*]] = getelementptr inbounds float, float* [[X]], i64 43
360+
; CHECK-NEXT: [[ARRAYIDX_44:%.*]] = getelementptr inbounds float, float* [[X]], i64 44
361+
; CHECK-NEXT: [[ARRAYIDX_45:%.*]] = getelementptr inbounds float, float* [[X]], i64 45
362+
; CHECK-NEXT: [[ARRAYIDX_46:%.*]] = getelementptr inbounds float, float* [[X]], i64 46
363+
; CHECK-NEXT: [[ARRAYIDX_47:%.*]] = getelementptr inbounds float, float* [[X]], i64 47
364+
; CHECK-NEXT: [[TMP16:%.*]] = bitcast float* [[ARRAYIDX_16]] to <32 x float>*
365+
; CHECK-NEXT: [[TMP17:%.*]] = load <32 x float>, <32 x float>* [[TMP16]], align 4
366+
; CHECK-NEXT: [[ADD_16:%.*]] = fadd fast float undef, [[ADD_15]]
367+
; CHECK-NEXT: [[ADD_17:%.*]] = fadd fast float undef, [[ADD_16]]
368+
; CHECK-NEXT: [[ADD_18:%.*]] = fadd fast float undef, [[ADD_17]]
369+
; CHECK-NEXT: [[ADD_19:%.*]] = fadd fast float undef, [[ADD_18]]
370+
; CHECK-NEXT: [[ADD_20:%.*]] = fadd fast float undef, [[ADD_19]]
371+
; CHECK-NEXT: [[ADD_21:%.*]] = fadd fast float undef, [[ADD_20]]
372+
; CHECK-NEXT: [[ADD_22:%.*]] = fadd fast float undef, [[ADD_21]]
373+
; CHECK-NEXT: [[ADD_23:%.*]] = fadd fast float undef, [[ADD_22]]
374+
; CHECK-NEXT: [[ADD_24:%.*]] = fadd fast float undef, [[ADD_23]]
375+
; CHECK-NEXT: [[ADD_25:%.*]] = fadd fast float undef, [[ADD_24]]
376+
; CHECK-NEXT: [[ADD_26:%.*]] = fadd fast float undef, [[ADD_25]]
377+
; CHECK-NEXT: [[ADD_27:%.*]] = fadd fast float undef, [[ADD_26]]
378+
; CHECK-NEXT: [[ADD_28:%.*]] = fadd fast float undef, [[ADD_27]]
379+
; CHECK-NEXT: [[ADD_29:%.*]] = fadd fast float undef, [[ADD_28]]
380+
; CHECK-NEXT: [[ADD_30:%.*]] = fadd fast float undef, [[ADD_29]]
381+
; CHECK-NEXT: [[ADD_31:%.*]] = fadd fast float undef, [[ADD_30]]
382+
; CHECK-NEXT: [[ADD_32:%.*]] = fadd fast float undef, [[ADD_31]]
383+
; CHECK-NEXT: [[ADD_33:%.*]] = fadd fast float undef, [[ADD_32]]
384+
; CHECK-NEXT: [[ADD_34:%.*]] = fadd fast float undef, [[ADD_33]]
385+
; CHECK-NEXT: [[ADD_35:%.*]] = fadd fast float undef, [[ADD_34]]
386+
; CHECK-NEXT: [[ADD_36:%.*]] = fadd fast float undef, [[ADD_35]]
387+
; CHECK-NEXT: [[ADD_37:%.*]] = fadd fast float undef, [[ADD_36]]
388+
; CHECK-NEXT: [[ADD_38:%.*]] = fadd fast float undef, [[ADD_37]]
389+
; CHECK-NEXT: [[ADD_39:%.*]] = fadd fast float undef, [[ADD_38]]
390+
; CHECK-NEXT: [[ADD_40:%.*]] = fadd fast float undef, [[ADD_39]]
391+
; CHECK-NEXT: [[ADD_41:%.*]] = fadd fast float undef, [[ADD_40]]
392+
; CHECK-NEXT: [[ADD_42:%.*]] = fadd fast float undef, [[ADD_41]]
393+
; CHECK-NEXT: [[ADD_43:%.*]] = fadd fast float undef, [[ADD_42]]
394+
; CHECK-NEXT: [[ADD_44:%.*]] = fadd fast float undef, [[ADD_43]]
395+
; CHECK-NEXT: [[ADD_45:%.*]] = fadd fast float undef, [[ADD_44]]
396+
; CHECK-NEXT: [[ADD_46:%.*]] = fadd fast float undef, [[ADD_45]]
397+
; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <32 x float> [[TMP17]], <32 x float> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
398+
; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <32 x float> [[TMP17]], [[RDX_SHUF]]
399+
; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <32 x float> [[BIN_RDX]], <32 x float> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
400+
; CHECK-NEXT: [[BIN_RDX2:%.*]] = fadd fast <32 x float> [[BIN_RDX]], [[RDX_SHUF1]]
401+
; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <32 x float> [[BIN_RDX2]], <32 x float> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
402+
; CHECK-NEXT: [[BIN_RDX4:%.*]] = fadd fast <32 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
403+
; CHECK-NEXT: [[RDX_SHUF5:%.*]] = shufflevector <32 x float> [[BIN_RDX4]], <32 x float> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
404+
; CHECK-NEXT: [[BIN_RDX6:%.*]] = fadd fast <32 x float> [[BIN_RDX4]], [[RDX_SHUF5]]
405+
; CHECK-NEXT: [[RDX_SHUF7:%.*]] = shufflevector <32 x float> [[BIN_RDX6]], <32 x float> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
406+
; CHECK-NEXT: [[BIN_RDX8:%.*]] = fadd fast <32 x float> [[BIN_RDX6]], [[RDX_SHUF7]]
407+
; CHECK-NEXT: [[TMP18:%.*]] = extractelement <32 x float> [[BIN_RDX8]], i32 0
408+
; CHECK-NEXT: [[TMP19:%.*]] = fadd fast float [[TMP18]], [[TMP15]]
409+
; CHECK-NEXT: [[TMP20:%.*]] = fadd fast float [[TMP19]], [[TMP14]]
410+
; CHECK-NEXT: [[TMP21:%.*]] = fadd fast float [[TMP20]], [[TMP13]]
411+
; CHECK-NEXT: [[TMP22:%.*]] = fadd fast float [[TMP21]], [[TMP12]]
412+
; CHECK-NEXT: [[TMP23:%.*]] = fadd fast float [[TMP22]], [[TMP11]]
413+
; CHECK-NEXT: [[TMP24:%.*]] = fadd fast float [[TMP23]], [[TMP10]]
414+
; CHECK-NEXT: [[TMP25:%.*]] = fadd fast float [[TMP24]], [[TMP9]]
415+
; CHECK-NEXT: [[TMP26:%.*]] = fadd fast float [[TMP25]], [[TMP8]]
416+
; CHECK-NEXT: [[TMP27:%.*]] = fadd fast float [[TMP26]], [[TMP7]]
417+
; CHECK-NEXT: [[TMP28:%.*]] = fadd fast float [[TMP27]], [[TMP6]]
418+
; CHECK-NEXT: [[TMP29:%.*]] = fadd fast float [[TMP28]], [[TMP5]]
419+
; CHECK-NEXT: [[TMP30:%.*]] = fadd fast float [[TMP29]], [[TMP4]]
420+
; CHECK-NEXT: [[TMP31:%.*]] = fadd fast float [[TMP30]], [[TMP3]]
421+
; CHECK-NEXT: [[TMP32:%.*]] = fadd fast float [[TMP31]], [[TMP2]]
422+
; CHECK-NEXT: [[TMP33:%.*]] = fadd fast float [[TMP32]], [[TMP1]]
423+
; CHECK-NEXT: [[TMP34:%.*]] = fadd fast float [[TMP33]], [[TMP0]]
424+
; CHECK-NEXT: [[ADD_47:%.*]] = fadd fast float undef, [[ADD_46]]
425+
; CHECK-NEXT: ret float [[TMP34]]
426+
;
427+
entry:
428+
%0 = load float, float* %x, align 4
429+
%arrayidx.1 = getelementptr inbounds float, float* %x, i64 1
430+
%1 = load float, float* %arrayidx.1, align 4
431+
%add.1 = fadd fast float %1, %0
432+
%arrayidx.2 = getelementptr inbounds float, float* %x, i64 2
433+
%2 = load float, float* %arrayidx.2, align 4
434+
%add.2 = fadd fast float %2, %add.1
435+
%arrayidx.3 = getelementptr inbounds float, float* %x, i64 3
436+
%3 = load float, float* %arrayidx.3, align 4
437+
%add.3 = fadd fast float %3, %add.2
438+
%arrayidx.4 = getelementptr inbounds float, float* %x, i64 4
439+
%4 = load float, float* %arrayidx.4, align 4
440+
%add.4 = fadd fast float %4, %add.3
441+
%arrayidx.5 = getelementptr inbounds float, float* %x, i64 5
442+
%5 = load float, float* %arrayidx.5, align 4
443+
%add.5 = fadd fast float %5, %add.4
444+
%arrayidx.6 = getelementptr inbounds float, float* %x, i64 6
445+
%6 = load float, float* %arrayidx.6, align 4
446+
%add.6 = fadd fast float %6, %add.5
447+
%arrayidx.7 = getelementptr inbounds float, float* %x, i64 7
448+
%7 = load float, float* %arrayidx.7, align 4
449+
%add.7 = fadd fast float %7, %add.6
450+
%arrayidx.8 = getelementptr inbounds float, float* %x, i64 8
451+
%8 = load float, float* %arrayidx.8, align 4
452+
%add.8 = fadd fast float %8, %add.7
453+
%arrayidx.9 = getelementptr inbounds float, float* %x, i64 9
454+
%9 = load float, float* %arrayidx.9, align 4
455+
%add.9 = fadd fast float %9, %add.8
456+
%arrayidx.10 = getelementptr inbounds float, float* %x, i64 10
457+
%10 = load float, float* %arrayidx.10, align 4
458+
%add.10 = fadd fast float %10, %add.9
459+
%arrayidx.11 = getelementptr inbounds float, float* %x, i64 11
460+
%11 = load float, float* %arrayidx.11, align 4
461+
%add.11 = fadd fast float %11, %add.10
462+
%arrayidx.12 = getelementptr inbounds float, float* %x, i64 12
463+
%12 = load float, float* %arrayidx.12, align 4
464+
%add.12 = fadd fast float %12, %add.11
465+
%arrayidx.13 = getelementptr inbounds float, float* %x, i64 13
466+
%13 = load float, float* %arrayidx.13, align 4
467+
%add.13 = fadd fast float %13, %add.12
468+
%arrayidx.14 = getelementptr inbounds float, float* %x, i64 14
469+
%14 = load float, float* %arrayidx.14, align 4
470+
%add.14 = fadd fast float %14, %add.13
471+
%arrayidx.15 = getelementptr inbounds float, float* %x, i64 15
472+
%15 = load float, float* %arrayidx.15, align 4
473+
%add.15 = fadd fast float %15, %add.14
474+
%arrayidx.16 = getelementptr inbounds float, float* %x, i64 16
475+
%16 = load float, float* %arrayidx.16, align 4
476+
%add.16 = fadd fast float %16, %add.15
477+
%arrayidx.17 = getelementptr inbounds float, float* %x, i64 17
478+
%17 = load float, float* %arrayidx.17, align 4
479+
%add.17 = fadd fast float %17, %add.16
480+
%arrayidx.18 = getelementptr inbounds float, float* %x, i64 18
481+
%18 = load float, float* %arrayidx.18, align 4
482+
%add.18 = fadd fast float %18, %add.17
483+
%arrayidx.19 = getelementptr inbounds float, float* %x, i64 19
484+
%19 = load float, float* %arrayidx.19, align 4
485+
%add.19 = fadd fast float %19, %add.18
486+
%arrayidx.20 = getelementptr inbounds float, float* %x, i64 20
487+
%20 = load float, float* %arrayidx.20, align 4
488+
%add.20 = fadd fast float %20, %add.19
489+
%arrayidx.21 = getelementptr inbounds float, float* %x, i64 21
490+
%21 = load float, float* %arrayidx.21, align 4
491+
%add.21 = fadd fast float %21, %add.20
492+
%arrayidx.22 = getelementptr inbounds float, float* %x, i64 22
493+
%22 = load float, float* %arrayidx.22, align 4
494+
%add.22 = fadd fast float %22, %add.21
495+
%arrayidx.23 = getelementptr inbounds float, float* %x, i64 23
496+
%23 = load float, float* %arrayidx.23, align 4
497+
%add.23 = fadd fast float %23, %add.22
498+
%arrayidx.24 = getelementptr inbounds float, float* %x, i64 24
499+
%24 = load float, float* %arrayidx.24, align 4
500+
%add.24 = fadd fast float %24, %add.23
501+
%arrayidx.25 = getelementptr inbounds float, float* %x, i64 25
502+
%25 = load float, float* %arrayidx.25, align 4
503+
%add.25 = fadd fast float %25, %add.24
504+
%arrayidx.26 = getelementptr inbounds float, float* %x, i64 26
505+
%26 = load float, float* %arrayidx.26, align 4
506+
%add.26 = fadd fast float %26, %add.25
507+
%arrayidx.27 = getelementptr inbounds float, float* %x, i64 27
508+
%27 = load float, float* %arrayidx.27, align 4
509+
%add.27 = fadd fast float %27, %add.26
510+
%arrayidx.28 = getelementptr inbounds float, float* %x, i64 28
511+
%28 = load float, float* %arrayidx.28, align 4
512+
%add.28 = fadd fast float %28, %add.27
513+
%arrayidx.29 = getelementptr inbounds float, float* %x, i64 29
514+
%29 = load float, float* %arrayidx.29, align 4
515+
%add.29 = fadd fast float %29, %add.28
516+
%arrayidx.30 = getelementptr inbounds float, float* %x, i64 30
517+
%30 = load float, float* %arrayidx.30, align 4
518+
%add.30 = fadd fast float %30, %add.29
519+
%arrayidx.31 = getelementptr inbounds float, float* %x, i64 31
520+
%31 = load float, float* %arrayidx.31, align 4
521+
%add.31 = fadd fast float %31, %add.30
522+
%arrayidx.32 = getelementptr inbounds float, float* %x, i64 32
523+
%32 = load float, float* %arrayidx.32, align 4
524+
%add.32 = fadd fast float %32, %add.31
525+
%arrayidx.33 = getelementptr inbounds float, float* %x, i64 33
526+
%33 = load float, float* %arrayidx.33, align 4
527+
%add.33 = fadd fast float %33, %add.32
528+
%arrayidx.34 = getelementptr inbounds float, float* %x, i64 34
529+
%34 = load float, float* %arrayidx.34, align 4
530+
%add.34 = fadd fast float %34, %add.33
531+
%arrayidx.35 = getelementptr inbounds float, float* %x, i64 35
532+
%35 = load float, float* %arrayidx.35, align 4
533+
%add.35 = fadd fast float %35, %add.34
534+
%arrayidx.36 = getelementptr inbounds float, float* %x, i64 36
535+
%36 = load float, float* %arrayidx.36, align 4
536+
%add.36 = fadd fast float %36, %add.35
537+
%arrayidx.37 = getelementptr inbounds float, float* %x, i64 37
538+
%37 = load float, float* %arrayidx.37, align 4
539+
%add.37 = fadd fast float %37, %add.36
540+
%arrayidx.38 = getelementptr inbounds float, float* %x, i64 38
541+
%38 = load float, float* %arrayidx.38, align 4
542+
%add.38 = fadd fast float %38, %add.37
543+
%arrayidx.39 = getelementptr inbounds float, float* %x, i64 39
544+
%39 = load float, float* %arrayidx.39, align 4
545+
%add.39 = fadd fast float %39, %add.38
546+
%arrayidx.40 = getelementptr inbounds float, float* %x, i64 40
547+
%40 = load float, float* %arrayidx.40, align 4
548+
%add.40 = fadd fast float %40, %add.39
549+
%arrayidx.41 = getelementptr inbounds float, float* %x, i64 41
550+
%41 = load float, float* %arrayidx.41, align 4
551+
%add.41 = fadd fast float %41, %add.40
552+
%arrayidx.42 = getelementptr inbounds float, float* %x, i64 42
553+
%42 = load float, float* %arrayidx.42, align 4
554+
%add.42 = fadd fast float %42, %add.41
555+
%arrayidx.43 = getelementptr inbounds float, float* %x, i64 43
556+
%43 = load float, float* %arrayidx.43, align 4
557+
%add.43 = fadd fast float %43, %add.42
558+
%arrayidx.44 = getelementptr inbounds float, float* %x, i64 44
559+
%44 = load float, float* %arrayidx.44, align 4
560+
%add.44 = fadd fast float %44, %add.43
561+
%arrayidx.45 = getelementptr inbounds float, float* %x, i64 45
562+
%45 = load float, float* %arrayidx.45, align 4
563+
%add.45 = fadd fast float %45, %add.44
564+
%arrayidx.46 = getelementptr inbounds float, float* %x, i64 46
565+
%46 = load float, float* %arrayidx.46, align 4
566+
%add.46 = fadd fast float %46, %add.45
567+
%arrayidx.47 = getelementptr inbounds float, float* %x, i64 47
568+
%47 = load float, float* %arrayidx.47, align 4
569+
%add.47 = fadd fast float %47, %add.46
570+
ret float %add.47
571+
}
572+

0 commit comments

Comments
 (0)