Skip to content

Commit 330e1b8

Browse files
author
Evandro Menezes
committed
[AArch64] Consider all vector types for FeatureSlowMisaligned128Store
The original code considered only v2i64 as slow for this feature. This patch consider all 128-bit long vector types as slow candidates. In internal tests, extending this feature to all 128-bit vector types resulted in an overall improvement of 1% on Exynos M1. Differential revision: https://reviews.llvm.org/D27998 llvm-svn: 291616
1 parent 51818c1 commit 330e1b8

File tree

2 files changed

+61
-20
lines changed

2 files changed

+61
-20
lines changed

llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -466,28 +466,27 @@ int AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
466466
return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
467467
}
468468

469-
int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
469+
int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
470470
unsigned Alignment, unsigned AddressSpace) {
471-
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
471+
auto LT = TLI->getTypeLegalizationCost(DL, Ty);
472472

473473
if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
474-
Src->isVectorTy() && Alignment != 16 &&
475-
Src->getVectorElementType()->isIntegerTy(64)) {
476-
// Unaligned stores are extremely inefficient. We don't split
477-
// unaligned v2i64 stores because the negative impact that has shown in
478-
// practice on inlined memcpy code.
479-
// We make v2i64 stores expensive so that we will only vectorize if there
474+
LT.second.is128BitVector() && Alignment < 16) {
475+
// Unaligned stores are extremely inefficient. We don't split all
476+
// unaligned 128-bit stores because the negative impact that has shown in
477+
// practice on inlined block copy code.
478+
// We make such stores expensive so that we will only vectorize if there
480479
// are 6 other instructions getting vectorized.
481-
int AmortizationCost = 6;
480+
const int AmortizationCost = 6;
482481

483482
return LT.first * 2 * AmortizationCost;
484483
}
485484

486-
if (Src->isVectorTy() && Src->getVectorElementType()->isIntegerTy(8) &&
487-
Src->getVectorNumElements() < 8) {
485+
if (Ty->isVectorTy() && Ty->getVectorElementType()->isIntegerTy(8) &&
486+
Ty->getVectorNumElements() < 8) {
488487
// We scalarize the loads/stores because there is not v.4b register and we
489488
// have to promote the elements to v.4h.
490-
unsigned NumVecElts = Src->getVectorNumElements();
489+
unsigned NumVecElts = Ty->getVectorNumElements();
491490
unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
492491
// We generate 2 instructions per vector element.
493492
return NumVectorizableInstsToAmortize * NumVecElts * 2;

llvm/test/Analysis/CostModel/AArch64/store.ll

Lines changed: 50 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,59 @@
1-
; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-apple-ios | FileCheck %s
2-
; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-apple-ios -mattr=slow-misaligned-128store | FileCheck %s --check-prefix=SLOW_MISALIGNED_128_STORE
1+
; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-unknown | FileCheck %s
2+
; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-unknown -mattr=slow-misaligned-128store | FileCheck %s --check-prefix=SLOW_MISALIGNED_128_STORE
33

44
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
55
; CHECK-LABEL: getMemoryOpCost
66
; SLOW_MISALIGNED_128_STORE-LABEL: getMemoryOpCost
77
define void @getMemoryOpCost() {
8-
; If FeatureSlowMisaligned128Store is set, we penalize <2 x i64> stores. On
9-
; Cyclone, for example, such stores should be expensive because we don't
10-
; split them and misaligned 16b stores have bad performance.
11-
;
12-
; CHECK: cost of 1 {{.*}} store
13-
; SLOW_MISALIGNED_128_STORE: cost of 12 {{.*}} store
8+
; If FeatureSlowMisaligned128Store is set, we penalize 128-bit stores.
9+
; The unlegalized 256-bit stores are further penalized when legalized down
10+
; to 128-bit stores.
11+
12+
; CHECK: cost of 2 for {{.*}} store <4 x i64>
13+
; SLOW_MISALIGNED_128_STORE: cost of 24 for {{.*}} store <4 x i64>
14+
store <4 x i64> undef, <4 x i64> * undef
15+
; CHECK-NEXT: cost of 2 for {{.*}} store <8 x i32>
16+
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <8 x i32>
17+
store <8 x i32> undef, <8 x i32> * undef
18+
; CHECK-NEXT: cost of 2 for {{.*}} store <16 x i16>
19+
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <16 x i16>
20+
store <16 x i16> undef, <16 x i16> * undef
21+
; CHECK-NEXT: cost of 2 for {{.*}} store <32 x i8>
22+
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <32 x i8>
23+
store <32 x i8> undef, <32 x i8> * undef
24+
25+
; CHECK-NEXT: cost of 2 for {{.*}} store <4 x double>
26+
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <4 x double>
27+
store <4 x double> undef, <4 x double> * undef
28+
; CHECK-NEXT: cost of 2 for {{.*}} store <8 x float>
29+
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <8 x float>
30+
store <8 x float> undef, <8 x float> * undef
31+
; CHECK-NEXT: cost of 2 for {{.*}} store <16 x half>
32+
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <16 x half>
33+
store <16 x half> undef, <16 x half> * undef
34+
35+
; CHECK-NEXT: cost of 1 for {{.*}} store <2 x i64>
36+
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <2 x i64>
1437
store <2 x i64> undef, <2 x i64> * undef
38+
; CHECK-NEXT: cost of 1 for {{.*}} store <4 x i32>
39+
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <4 x i32>
40+
store <4 x i32> undef, <4 x i32> * undef
41+
; CHECK-NEXT: cost of 1 for {{.*}} store <8 x i16>
42+
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <8 x i16>
43+
store <8 x i16> undef, <8 x i16> * undef
44+
; CHECK-NEXT: cost of 1 for {{.*}} store <16 x i8>
45+
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <16 x i8>
46+
store <16 x i8> undef, <16 x i8> * undef
47+
48+
; CHECK-NEXT: cost of 1 for {{.*}} store <2 x double>
49+
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <2 x double>
50+
store <2 x double> undef, <2 x double> * undef
51+
; CHECK-NEXT: cost of 1 for {{.*}} store <4 x float>
52+
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <4 x float>
53+
store <4 x float> undef, <4 x float> * undef
54+
; CHECK-NEXT: cost of 1 for {{.*}} store <8 x half>
55+
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <8 x half>
56+
store <8 x half> undef, <8 x half> * undef
1557

1658
; We scalarize the loads/stores because there is no vector register name for
1759
; these types (they get extended to v.4h/v.2s).

0 commit comments

Comments
 (0)