@@ -93,10 +93,6 @@ struct CGRecordLowering {
93
93
// MemberInfos are sorted so we define a < operator.
94
94
bool operator <(const MemberInfo& a) const { return Offset < a.Offset ; }
95
95
};
96
- // BitFieldAccessUnit is helper structure for accumulateBitFields. It
97
- // represents a set of bitfields in the same load/store unit.
98
- class BitFieldAccessUnit ;
99
-
100
96
// The constructor.
101
97
CGRecordLowering (CodeGenTypes &Types, const RecordDecl *D, bool Packed);
102
98
// Short helper routines.
@@ -193,10 +189,6 @@ struct CGRecordLowering {
193
189
RecordDecl::field_iterator
194
190
accumulateBitFields (RecordDecl::field_iterator Field,
195
191
RecordDecl::field_iterator FieldEnd);
196
- BitFieldAccessUnit
197
- gatherBitFieldAccessUnit (RecordDecl::field_iterator Field,
198
- RecordDecl::field_iterator FieldEnd) const ;
199
- void installBitFieldAccessUnit (const BitFieldAccessUnit &);
200
192
void computeVolatileBitfields ();
201
193
void accumulateBases ();
202
194
void accumulateVPtrs ();
@@ -411,114 +403,6 @@ void CGRecordLowering::accumulateFields() {
411
403
}
412
404
}
413
405
414
- // A run of bitfields assigned to the same access unit -- the size of memory
415
- // loads & stores.
416
- class CGRecordLowering ::BitFieldAccessUnit {
417
- RecordDecl::field_iterator Begin; // Field at start of this access unit.
418
- RecordDecl::field_iterator End; // Field just after this access unit.
419
-
420
- CharUnits StartOffset; // Starting offset in the containing record.
421
- CharUnits EndOffset; // Finish offset (exclusive) in the containing record.
422
-
423
- bool IsBarrier; // Is a barrier between access units.
424
- bool ContainsVolatile; // This access unit contains a volatile bitfield.
425
-
426
- public:
427
- BitFieldAccessUnit (RecordDecl::field_iterator B, RecordDecl::field_iterator E,
428
- CharUnits SO, CharUnits EO, bool Barrier = false ,
429
- bool Volatile = false )
430
- : Begin(B), End(E), StartOffset(SO), EndOffset(EO), IsBarrier(Barrier),
431
- ContainsVolatile (Volatile) {}
432
-
433
- // Re-set the end of this unit if there is space before Probe starts.
434
- void enlargeIfSpace (const BitFieldAccessUnit &Probe, CharUnits Offset) {
435
- if (Probe.getStartOffset () >= Offset) {
436
- End = Probe.begin ();
437
- EndOffset = Offset;
438
- }
439
- }
440
-
441
- public:
442
- RecordDecl::field_iterator begin () const { return Begin; }
443
- RecordDecl::field_iterator end () const { return End; }
444
-
445
- public:
446
- // Accessors
447
- CharUnits getSize () const { return EndOffset - StartOffset; }
448
- CharUnits getStartOffset () const { return StartOffset; }
449
- CharUnits getEndOffset () const { return EndOffset; }
450
-
451
- // Predicates
452
- bool isBarrier () const { return IsBarrier; }
453
- bool hasVolatile () const { return ContainsVolatile; }
454
- bool isEnd () const { return begin () == end (); }
455
- };
456
-
457
- CGRecordLowering::BitFieldAccessUnit CGRecordLowering::gatherBitFieldAccessUnit (
458
- RecordDecl::field_iterator Field,
459
- RecordDecl::field_iterator FieldEnd) const {
460
- if (Field == FieldEnd || !Field->isBitField ()) {
461
- // Skip over any empty fields to find the next used offset.
462
- auto Probe = Field;
463
- while (Probe != FieldEnd && Probe->isZeroSize (Context))
464
- ++Probe;
465
- // We can't necessarily use tail padding in C++ structs, so the NonVirtual
466
- // size is what we must use there.
467
- CharUnits Limit = Probe != FieldEnd
468
- ? bitsToCharUnits (getFieldBitOffset (*Probe))
469
- : RD ? Layout.getNonVirtualSize ()
470
- : Layout.getDataSize ();
471
- return BitFieldAccessUnit (Field, Field, Limit, Limit, true );
472
- }
473
-
474
- auto Begin = Field;
475
- uint64_t StartBit = getFieldBitOffset (*Field);
476
- uint64_t BitSize = Field->getBitWidthValue (Context);
477
- unsigned CharBits = Context.getCharWidth ();
478
- bool Volatile = Field->getType ().isVolatileQualified ();
479
-
480
- assert (!(StartBit % CharBits) && " Not at start of char" );
481
-
482
- // Gather bitfields until we have one starting at a byte boundary.
483
- for (++Field; Field != FieldEnd && Field->isBitField (); ++Field) {
484
- uint64_t BitOffset = getFieldBitOffset (*Field);
485
- if (!(BitOffset % CharBits))
486
- // This BitField starts at a byte boundary. It belongs in the next access
487
- // unit (initially).
488
- break ;
489
- assert (BitOffset == StartBit + BitSize &&
490
- " Concatenating non-contiguous bitfields" );
491
- BitSize += Field->getBitWidthValue (Context);
492
- if (Field->getType ().isVolatileQualified ())
493
- Volatile = true ;
494
- }
495
-
496
- // A zero-sized access unit is only a barrier under some conditions.
497
- bool Barrier =
498
- !BitSize && (Context.getTargetInfo ().useZeroLengthBitfieldAlignment () ||
499
- Context.getTargetInfo ().useBitFieldTypeAlignment ());
500
-
501
- return BitFieldAccessUnit (Begin, Field, bitsToCharUnits (StartBit),
502
- bitsToCharUnits (StartBit + BitSize + CharBits - 1 ),
503
- Barrier, Volatile);
504
- }
505
-
506
- // Create the containing access unit and install the bitfields.
507
- void CGRecordLowering::installBitFieldAccessUnit (const BitFieldAccessUnit &U) {
508
- if (U.getSize ().isZero ())
509
- return ;
510
-
511
- // Add the storage member for the access unit to the record. The
512
- // bitfields get the offset of their storage but come afterward and remain
513
- // there after a stable sort.
514
- llvm::Type *Type = getIntNType (Context.toBits (U.getSize ()));
515
- Members.push_back (StorageInfo (U.getStartOffset (), Type));
516
- for (auto F : U)
517
- if (!F->isZeroLengthBitField (Context))
518
- Members.push_back (
519
- MemberInfo (U.getStartOffset (), MemberInfo::Field, nullptr , F));
520
- }
521
-
522
406
// Create members for bitfields. Field is a bitfield, and FieldEnd is the end
523
407
// iterator of the record. Return the first non-bitfield encountered.
524
408
RecordDecl::field_iterator
@@ -621,66 +505,133 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
621
505
622
506
CharUnits RegSize =
623
507
bitsToCharUnits (Context.getTargetInfo ().getRegisterWidth ());
624
- // The natural size of an access of Size, unless poorly aligned, in which case
625
- // CharUnits::Zero is returned.
626
- auto NaturalSize = [&](CharUnits StartOffset, CharUnits Size) {
627
- if (Size.isZero ())
628
- return Size;
629
- llvm::Type *Type = getIntNType (Context.toBits (Size));
630
- if (!Context.getTargetInfo ().hasCheapUnalignedBitFieldAccess ()) {
631
- // This alignment is that of the storage used -- for instance
632
- // (usually) 4 bytes for a 24-bit type.
633
- CharUnits Align = getAlignment (Type);
634
- if (Align > Layout.getAlignment () || !StartOffset.isMultipleOf (Align))
635
- return CharUnits::Zero ();
508
+ unsigned CharBits = Context.getCharWidth ();
509
+
510
+ RecordDecl::field_iterator Begin = FieldEnd;
511
+ CharUnits StartOffset;
512
+ uint64_t BitSize;
513
+ CharUnits BestEndOffset;
514
+ RecordDecl::field_iterator BestEnd = Begin;
515
+ bool Volatile;
516
+
517
+ for (;;) {
518
+ CharUnits Limit;
519
+ bool Barrier;
520
+ bool Install = false ;
521
+
522
+ if (Field != FieldEnd && Field->isBitField ()) {
523
+ uint64_t BitOffset = getFieldBitOffset (*Field);
524
+ if (Begin == FieldEnd) {
525
+ // Beginning a new access unit.
526
+ Begin = Field;
527
+ BestEnd = Begin;
528
+
529
+ assert (!(BitOffset % CharBits) && " Not at start of char" );
530
+ StartOffset = bitsToCharUnits (BitOffset);
531
+ BitSize = 0 ;
532
+ Volatile = false ;
533
+ } else if (BitOffset % CharBits) {
534
+ // Bitfield occupies the same char as previous.
535
+ assert (BitOffset == Context.toBits (StartOffset) + BitSize &&
536
+ " Concatenating non-contiguous bitfields" );
537
+ } else {
538
+ // Bitfield begins a new access unit.
539
+ Limit = bitsToCharUnits (BitOffset);
540
+ Barrier = false ;
541
+ if (Field->isZeroLengthBitField (Context) &&
542
+ (Context.getTargetInfo ().useZeroLengthBitfieldAlignment () ||
543
+ Context.getTargetInfo ().useBitFieldTypeAlignment ()))
544
+ Barrier = true ;
545
+ Install = true ;
546
+ }
547
+ } else if (Begin == FieldEnd) {
548
+ // Completed the bitfields.
549
+ break ;
550
+ } else {
551
+ // End of the bitfield span, with active access unit.
552
+ auto Probe = Field;
553
+ while (Probe != FieldEnd && Probe->isZeroSize (Context))
554
+ ++Probe;
555
+ // We can't necessarily use tail padding in C++ structs, so the NonVirtual
556
+ // size is what we must use there.
557
+ Limit = Probe != FieldEnd ? bitsToCharUnits (getFieldBitOffset (*Probe))
558
+ : RD ? Layout.getNonVirtualSize ()
559
+ : Layout.getDataSize ();
560
+ Barrier = true ;
561
+ Install = true ;
636
562
}
637
- // This is the storage-size of Type -- for instance a 24-bit type will
638
- // require 4 bytes.
639
- return getSize (Type);
640
- };
641
563
642
- BitFieldAccessUnit Current = gatherBitFieldAccessUnit (Field, FieldEnd);
643
- do {
644
- BitFieldAccessUnit Probe =
645
- gatherBitFieldAccessUnit (Current.end (), FieldEnd);
646
- if (!Current.getSize ().isZero () &&
647
- !Types.getCodeGenOpts ().FineGrainedBitfieldAccesses ) {
648
- CharUnits Size = NaturalSize (Current.getStartOffset (), Current.getSize ());
649
- if (!Size.isZero ()) {
650
- CharUnits EndOffset = Current.getStartOffset () + Size;
651
-
652
- Current.enlargeIfSpace (Probe, EndOffset);
653
-
654
- if (!Current.hasVolatile ())
655
- // Look for beneficial merging with subsequent access units.
656
- while (!Probe.isBarrier () && !Probe.hasVolatile ()) {
657
- CharUnits NewSize = Probe.getEndOffset () - Current.getStartOffset ();
658
- if (NewSize > Size) {
659
- if (NewSize > RegSize)
660
- break ;
661
-
662
- Size = NaturalSize (Current.getStartOffset (), NewSize);
663
- if (Size.isZero ())
664
- break ;
665
- assert (Size <= RegSize && " Unexpectedly large storage" );
666
- EndOffset = Current.getStartOffset () + Size;
667
- }
668
- Probe = gatherBitFieldAccessUnit (Probe.end (), FieldEnd);
669
- Current.enlargeIfSpace (Probe, EndOffset);
564
+ if (Install) {
565
+ // Found the start of a new access unit. Determine if that completes the
566
+ // current one, or potentially extends it.
567
+ Install = false ;
568
+ CharUnits Size = bitsToCharUnits (BitSize + CharBits - 1 );
569
+ if (BestEnd == Begin) {
570
+ // This is the initial access unit.
571
+ BestEnd = Field;
572
+ BestEndOffset = StartOffset + Size;
573
+ if (!BitSize || Types.getCodeGenOpts ().FineGrainedBitfieldAccesses )
574
+ // A barrier, or we're fine grained.
575
+ Install = true ;
576
+ } else if (Size > RegSize || Volatile)
577
+ // Too big to accumulate, or just-seen access unit contains a volatile.
578
+ Install = true ;
579
+
580
+ if (!Install) {
581
+ llvm::Type *Type = getIntNType (Context.toBits (Size));
582
+ if (!Context.getTargetInfo ().hasCheapUnalignedBitFieldAccess ()) {
583
+ // This alignment is that of the storage used -- for instance
584
+ // (usually) 4 bytes for a 24-bit type.
585
+ CharUnits Align = getAlignment (Type);
586
+ if (Align > Layout.getAlignment () || !StartOffset.isMultipleOf (Align))
587
+ // Not naturally aligned.
588
+ Install = true ;
589
+ }
590
+
591
+ if (!Install) {
592
+ Size = getSize (Type);
593
+ if (StartOffset + Size <= Limit) {
594
+ // The next unit starts later, extend the current access unit to
595
+ // include the just-gathered access unit.
596
+ BestEndOffset = StartOffset + Size;
597
+ BestEnd = Field;
670
598
}
599
+
600
+ if (Volatile || Barrier)
601
+ // Contained a volatile, or next access unit is a barrier.
602
+ Install = true ;
603
+ else
604
+ BitSize = Context.toBits (Limit - StartOffset);
605
+ }
671
606
}
672
607
}
673
608
674
- installBitFieldAccessUnit (Current);
675
-
676
- // If Probe is the next access unit, we can use that, otherwise (we scanned
677
- // ahead and found nothing good), we have to recompute the next access unit.
678
- Current = Current.end () == Probe.begin ()
679
- ? Probe
680
- : gatherBitFieldAccessUnit (Current.end (), FieldEnd);
681
- } while (!Current.isEnd ());
609
+ if (Install) {
610
+ CharUnits Size = BestEndOffset - StartOffset;
611
+ if (!Size.isZero ()) {
612
+ // Add the storage member for the access unit to the record. The
613
+ // bitfields get the offset of their storage but come afterward and
614
+ // remain there after a stable sort.
615
+ llvm::Type *Type = getIntNType (Context.toBits (Size));
616
+ Members.push_back (StorageInfo (StartOffset, Type));
617
+ for (; Begin != BestEnd; ++Begin)
618
+ if (!Begin->isZeroLengthBitField (Context))
619
+ Members.push_back (
620
+ MemberInfo (StartOffset, MemberInfo::Field, nullptr , *Begin));
621
+ }
622
+ // Reset to start a new Access Unit.
623
+ Field = BestEnd;
624
+ Begin = FieldEnd;
625
+ } else {
626
+ // Accumulate this bitfield into the (potentially) current access unit.
627
+ BitSize += Field->getBitWidthValue (Context);
628
+ if (Field->getType ().isVolatileQualified ())
629
+ Volatile = true ;
630
+ ++Field;
631
+ }
632
+ }
682
633
683
- return Current. begin () ;
634
+ return Field ;
684
635
}
685
636
686
637
void CGRecordLowering::accumulateBases () {
0 commit comments