Skip to content

Commit 4ec4b80

Browse files
committed
Remove volatile bitfield separation heuristic
1 parent 1f9b452 commit 4ec4b80

File tree

3 files changed

+62
-83
lines changed

3 files changed

+62
-83
lines changed

clang/lib/CodeGen/CGRecordLayoutBuilder.cpp

Lines changed: 1 addition & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -499,7 +499,6 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
499499

500500
// The accumulation is also prevented when:
501501
// *) it would cross a zero-width bitfield (ABI-dependent), or
502-
// *) one of the candidate access units contains a volatile bitfield, or
503502
// *) fine-grained bitfield access option is in effect.
504503

505504
CharUnits RegSize =
@@ -523,10 +522,6 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
523522
RecordDecl::field_iterator BestEnd = Begin;
524523
CharUnits BestEndOffset;
525524

526-
bool Volatile; // True iff the initial span or post-BestEnd span contains a
527-
// volatile bitfield. We do not want to merge spans containing
528-
// a volatile bitfield.
529-
530525
for (;;) {
531526
// AtAlignedBoundary is true iff Field is the (potential) start of a new
532527
// span (or the end of the bitfields). When true, LimitOffset is the
@@ -546,7 +541,6 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
546541
assert((BitOffset % CharBits) == 0 && "Not at start of char");
547542
BeginOffset = bitsToCharUnits(BitOffset);
548543
BitSizeSinceBegin = 0;
549-
Volatile = false;
550544
} else if ((BitOffset % CharBits) != 0) {
551545
// Bitfield occupies the same char as previous, it must be part of the
552546
// same span.
@@ -604,10 +598,6 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
604598
// A zero-sized initial span -- this will install nothing and reset
605599
// for another.
606600
InstallBest = true;
607-
} else if (Volatile) {
608-
// We've encountered a volatile bitfield in the just-seen non-initial
609-
// span. It should not be merged into the current accumulation.
610-
InstallBest = true;
611601
} else if (AccessSize > RegSize)
612602
// Accumulating the just-seen span would create a multi-register access
613603
// unit, which would increase register pressure.
@@ -645,12 +635,6 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
645635
if (Barrier)
646636
// The next field is a barrier that we cannot merge-across.
647637
InstallBest = true;
648-
else if (Volatile)
649-
// The initial span contains a volatile bitfield, do not merge any
650-
// subsequent spans. (We can only get here for the initial span, any
651-
// subsequent potential volatile span will have already bailed
652-
// above.)
653-
InstallBest = true;
654638
else
655639
// LimitOffset is the offset of the (aligned) next bitfield in this
656640
// case.
@@ -681,13 +665,10 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
681665
} else {
682666
assert(Field != FieldEnd && Field->isBitField() &&
683667
"Accumulating past end of bitfields");
684-
assert(((getFieldBitOffset(*Field) % CharBits) != 0 ||
685-
(!Volatile && !Barrier)) &&
668+
assert(((getFieldBitOffset(*Field) % CharBits) != 0 || !Barrier) &&
686669
"Accumulating across volatile or barrier");
687670
// Accumulate this bitfield into the current (potential) span.
688671
BitSizeSinceBegin += Field->getBitWidthValue(Context);
689-
if (Field->getType().isVolatileQualified())
690-
Volatile = true;
691672
++Field;
692673
}
693674
}

clang/test/CodeGen/aapcs-bitfield-access-unit.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -68,12 +68,12 @@ struct st5 {
6868
volatile char c : 5;
6969
} st5;
7070
// LAYOUT-LABEL: LLVMType:%struct.st5 =
71-
// LAYOUT-SAME: type { i16, i8 }
71+
// LAYOUT-SAME: type { i32 }
7272
// LAYOUT: BitFields:[
73-
// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:12 IsSigned:1 StorageSize:16 StorageOffset:0
74-
// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:5 IsSigned:1 StorageSize:8 StorageOffset:2
75-
// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:4 Size:12 IsSigned:1 StorageSize:16 StorageOffset:0
76-
// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:3 Size:5 IsSigned:1 StorageSize:8 StorageOffset:2
73+
// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:0 Size:12 IsSigned:1 StorageSize:32 StorageOffset:0
74+
// LAYOUT_LE-NEXT: <CGBitFieldInfo Offset:16 Size:5 IsSigned:1 StorageSize:32 StorageOffset:0
75+
// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:20 Size:12 IsSigned:1 StorageSize:32 StorageOffset:0
76+
// LAYOUT_BE-NEXT: <CGBitFieldInfo Offset:11 Size:5 IsSigned:1 StorageSize:32 StorageOffset:0
7777
// LAYOUT-NEXT: ]>
7878

7979
struct st6 {

clang/test/CodeGen/aapcs-bitfield.c

Lines changed: 56 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -809,68 +809,70 @@ struct st5 {
809809

810810
// LE-LABEL: @st5_check_load(
811811
// LE-NEXT: entry:
812-
// LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
813-
// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
814-
// LE-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
815-
// LE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
816-
// LE-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
812+
// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
813+
// LE-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 11
814+
// LE-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
815+
// LE-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
816+
// LE-NEXT: [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
817817
// LE-NEXT: ret i32 [[CONV]]
818818
//
819819
// BE-LABEL: @st5_check_load(
820820
// BE-NEXT: entry:
821-
// BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
822-
// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
823-
// BE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
824-
// BE-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
821+
// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
822+
// BE-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
823+
// BE-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
824+
// BE-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
825+
// BE-NEXT: [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
825826
// BE-NEXT: ret i32 [[CONV]]
826827
//
827828
// LENUMLOADS-LABEL: @st5_check_load(
828829
// LENUMLOADS-NEXT: entry:
829-
// LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
830-
// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
831-
// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
832-
// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
833-
// LENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
830+
// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
831+
// LENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 11
832+
// LENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
833+
// LENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
834+
// LENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
834835
// LENUMLOADS-NEXT: ret i32 [[CONV]]
835836
//
836837
// BENUMLOADS-LABEL: @st5_check_load(
837838
// BENUMLOADS-NEXT: entry:
838-
// BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
839-
// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
840-
// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
841-
// BENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
839+
// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
840+
// BENUMLOADS-NEXT: [[BF_SHL:%.*]] = shl i32 [[BF_LOAD]], 16
841+
// BENUMLOADS-NEXT: [[BF_ASHR:%.*]] = ashr i32 [[BF_SHL]], 27
842+
// BENUMLOADS-NEXT: [[BF_CAST:%.*]] = trunc i32 [[BF_ASHR]] to i8
843+
// BENUMLOADS-NEXT: [[CONV:%.*]] = sext i8 [[BF_CAST]] to i32
842844
// BENUMLOADS-NEXT: ret i32 [[CONV]]
843845
//
844846
// LEWIDTH-LABEL: @st5_check_load(
845847
// LEWIDTH-NEXT: entry:
846-
// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
847-
// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
848+
// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
849+
// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
848850
// LEWIDTH-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
849851
// LEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
850852
// LEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
851853
// LEWIDTH-NEXT: ret i32 [[CONV]]
852854
//
853855
// BEWIDTH-LABEL: @st5_check_load(
854856
// BEWIDTH-NEXT: entry:
855-
// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
856-
// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
857+
// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
858+
// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
857859
// BEWIDTH-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
858860
// BEWIDTH-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
859861
// BEWIDTH-NEXT: ret i32 [[CONV]]
860862
//
861863
// LEWIDTHNUM-LABEL: @st5_check_load(
862864
// LEWIDTHNUM-NEXT: entry:
863-
// LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
864-
// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
865+
// LEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
866+
// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
865867
// LEWIDTHNUM-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3
866868
// LEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_SHL]], 3
867869
// LEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
868870
// LEWIDTHNUM-NEXT: ret i32 [[CONV]]
869871
//
870872
// BEWIDTHNUM-LABEL: @st5_check_load(
871873
// BEWIDTHNUM-NEXT: entry:
872-
// BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
873-
// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
874+
// BEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
875+
// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
874876
// BEWIDTHNUM-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3
875877
// BEWIDTHNUM-NEXT: [[CONV:%.*]] = sext i8 [[BF_ASHR]] to i32
876878
// BEWIDTHNUM-NEXT: ret i32 [[CONV]]
@@ -881,74 +883,70 @@ int st5_check_load(struct st5 *m) {
881883

882884
// LE-LABEL: @st5_check_store(
883885
// LE-NEXT: entry:
884-
// LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
885-
// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
886-
// LE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
887-
// LE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
888-
// LE-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
886+
// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
887+
// LE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -2031617
888+
// LE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
889+
// LE-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
889890
// LE-NEXT: ret void
890891
//
891892
// BE-LABEL: @st5_check_store(
892893
// BE-NEXT: entry:
893-
// BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
894-
// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
895-
// BE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
896-
// BE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
897-
// BE-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
894+
// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
895+
// BE-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -63489
896+
// BE-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 2048
897+
// BE-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
898898
// BE-NEXT: ret void
899899
//
900900
// LENUMLOADS-LABEL: @st5_check_store(
901901
// LENUMLOADS-NEXT: entry:
902-
// LENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
903-
// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
904-
// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
905-
// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
906-
// LENUMLOADS-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
902+
// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
903+
// LENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -2031617
904+
// LENUMLOADS-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 65536
905+
// LENUMLOADS-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
907906
// LENUMLOADS-NEXT: ret void
908907
//
909908
// BENUMLOADS-LABEL: @st5_check_store(
910909
// BENUMLOADS-NEXT: entry:
911-
// BENUMLOADS-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
912-
// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
913-
// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
914-
// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
915-
// BENUMLOADS-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
910+
// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i32, ptr [[M:%.*]], align 4
911+
// BENUMLOADS-NEXT: [[BF_CLEAR:%.*]] = and i32 [[BF_LOAD]], -63489
912+
// BENUMLOADS-NEXT: [[BF_SET:%.*]] = or i32 [[BF_CLEAR]], 2048
913+
// BENUMLOADS-NEXT: store volatile i32 [[BF_SET]], ptr [[M]], align 4
916914
// BENUMLOADS-NEXT: ret void
917915
//
918916
// LEWIDTH-LABEL: @st5_check_store(
919917
// LEWIDTH-NEXT: entry:
920-
// LEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
921-
// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
918+
// LEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
919+
// LEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
922920
// LEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
923921
// LEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
924-
// LEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
922+
// LEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
925923
// LEWIDTH-NEXT: ret void
926924
//
927925
// BEWIDTH-LABEL: @st5_check_store(
928926
// BEWIDTH-NEXT: entry:
929-
// BEWIDTH-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
930-
// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
927+
// BEWIDTH-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
928+
// BEWIDTH-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
931929
// BEWIDTH-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
932930
// BEWIDTH-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
933-
// BEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
931+
// BEWIDTH-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
934932
// BEWIDTH-NEXT: ret void
935933
//
936934
// LEWIDTHNUM-LABEL: @st5_check_store(
937935
// LEWIDTHNUM-NEXT: entry:
938-
// LEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
939-
// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
936+
// LEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
937+
// LEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
940938
// LEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32
941939
// LEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 1
942-
// LEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
940+
// LEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
943941
// LEWIDTHNUM-NEXT: ret void
944942
//
945943
// BEWIDTHNUM-LABEL: @st5_check_store(
946944
// BEWIDTHNUM-NEXT: entry:
947-
// BEWIDTHNUM-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST5:%.*]], ptr [[M:%.*]], i32 0, i32 1
948-
// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[C]], align 2
945+
// BEWIDTHNUM-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[M:%.*]], i32 2
946+
// BEWIDTHNUM-NEXT: [[BF_LOAD:%.*]] = load volatile i8, ptr [[TMP0]], align 2
949947
// BEWIDTHNUM-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7
950948
// BEWIDTHNUM-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 8
951-
// BEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[C]], align 2
949+
// BEWIDTHNUM-NEXT: store volatile i8 [[BF_SET]], ptr [[TMP0]], align 2
952950
// BEWIDTHNUM-NEXT: ret void
953951
//
954952
void st5_check_store(struct st5 *m) {

0 commit comments

Comments
 (0)