Skip to content

Commit 8c5dbbd

Browse files
authored
[SYCL] Update spec constant handling for struct hierarchies (#17204)
After #16976, CTS failed to compile due to an assertion being raised in `sycl-post-link`, more specifically while processing a `vec` spec constants at https://github.com/intel/llvm/blob/35fba198274c4a9f00c3f9de21cbd564242850c3/llvm/lib/SYCLLowerIR/SpecConstants.cpp#L593 However, the problem was more general and could be attributed to struct hierarchies. `getElemDefaultValue` was essentially assuming the return type for the spec constant instruction and the type used to initialize the default value for the spec constant had the same "structure" (modulo some padding fields). However, in general, this assumption does not seem to hold. The initializer will essentially have a flattened out structure, while the return type will retain the nested structure. Because of this `emitSpecConstantRecursiveImpl` has been updated to consider this. First, before recursion starts, we recurse the default value initializer in `collectDefinedElements`, collecting information on the children elements of the structure, noting what offset they exists at. Then, we recurse the return type in `emitSpecConstantRecursiveImpl`. When we reach a child element, we default value by using the information we collected earlier. There is some consideration needed for padding - before recursing, we check to see if any child element exists at the offset we are trying to recurse in. If there is none, then we must be at some padding fields.
1 parent 120667d commit 8c5dbbd

File tree

4 files changed

+290
-63
lines changed

4 files changed

+290
-63
lines changed

llvm/lib/SYCLLowerIR/SpecConstants.cpp

Lines changed: 70 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -568,51 +568,6 @@ Instruction *emitSpecConstantComposite(Type *Ty, ArrayRef<Value *> Elements,
568568
return emitCall(Ty, SPIRV_GET_SPEC_CONST_COMPOSITE, Elements, InsertBefore);
569569
}
570570

571-
// Select corresponding element of the default value. For a
572-
// struct, we getting the corresponding default value is a little
573-
// tricky. There are potentially distinct two types: the type of
574-
// the default value, which comes from the initializer of the
575-
// global spec constant value, and the return type of the call to
576-
// getComposite2020SpecConstValue. The return type can be a
577-
// version of the default value type, with padding fields
578-
// potentially inserted at the top level and within nested
579-
// structs.
580-
581-
// Examples: (RT = Return Type, DVT = Default Value Type)
582-
// RT: { i8, [3 x i8], i32 }, DVT = { i8, i32 }
583-
// RT: { { i32, i8, [3 x i8] }, i32 } DVT = { { i32, i8 }, i32 }
584-
585-
// For a given element of the default value type we are
586-
// trying to initialize, we will initialize that element with
587-
// the element of the default value type that has the same offset
588-
// as the element we are trying to initialize. If no such element
589-
// exists, we used undef as the initializer.
590-
Constant *getElemDefaultValue(Type *Ty, Type *ElTy, Constant *DefaultValue,
591-
size_t ElemIndex, const DataLayout &DL) {
592-
if (auto *StructTy = dyn_cast<StructType>(Ty)) {
593-
auto *DefaultValueType = cast<StructType>(DefaultValue->getType());
594-
const auto &DefaultValueTypeSL = DL.getStructLayout(DefaultValueType);
595-
// The struct has padding, so we have to adjust ElemIndex
596-
if (DefaultValueTypeSL->hasPadding()) {
597-
const auto &ReturnTypeSL = DL.getStructLayout(StructTy);
598-
ArrayRef<TypeSize> DefaultValueOffsets =
599-
DefaultValueTypeSL->getMemberOffsets();
600-
TypeSize CurrentIterationOffset =
601-
ReturnTypeSL->getElementOffset(ElemIndex);
602-
const auto It =
603-
std::find(DefaultValueOffsets.begin(), DefaultValueOffsets.end(),
604-
CurrentIterationOffset);
605-
606-
// The element we are looking at is a padding field
607-
if (It == DefaultValueOffsets.end())
608-
return UndefValue::get(ElTy);
609-
// Select the index with the same offset
610-
ElemIndex = It - DefaultValueOffsets.begin();
611-
}
612-
}
613-
return DefaultValue->getAggregateElement(ElemIndex);
614-
}
615-
616571
/// For specified specialization constant type emits LLVM IR which is required
617572
/// in order to correctly handle it later during LLVM IR -> SPIR-V translation.
618573
///
@@ -636,19 +591,26 @@ Constant *getElemDefaultValue(Type *Ty, Type *ElTy, Constant *DefaultValue,
636591
/// __spirvSpecConstantComposite calls for each composite member of the
637592
/// composite (plus for the top-level composite). Also enumerates all
638593
/// encountered scalars and assigns them IDs (or re-uses existing ones).
639-
Instruction *emitSpecConstantRecursiveImpl(Type *Ty, Instruction *InsertBefore,
640-
SmallVectorImpl<ID> &IDs,
641-
unsigned &Index,
642-
Constant *DefaultValue) {
594+
Instruction *emitSpecConstantRecursiveImpl(
595+
Type *Ty, Instruction *InsertBefore, SmallVectorImpl<ID> &IDs,
596+
unsigned &Index, unsigned CurrentOffset,
597+
const SmallVectorImpl<std::pair<uint64_t, Constant *>> &DefinedElements) {
643598
const Module &M = *InsertBefore->getModule();
644599
if (!Ty->isArrayTy() && !Ty->isStructTy() && !Ty->isVectorTy()) { // Scalar
600+
auto It = llvm::lower_bound(DefinedElements, CurrentOffset,
601+
[](const std::pair<uint64_t, Constant *> &LHS,
602+
uint64_t RHS) { return LHS.first < RHS; });
603+
assert(It != DefinedElements.end() && It->first == CurrentOffset);
604+
Constant *DefaultValue = It->second;
605+
645606
if (Index >= IDs.size()) {
646607
// If it is a new specialization constant, we need to generate IDs for
647608
// scalar elements, starting with the second one.
648609
assert(!isa<UndefValue>(DefaultValue) &&
649610
"All scalar values should be defined");
650611
IDs.push_back({IDs.back().ID + 1, false});
651612
}
613+
652614
return emitSpecConstant(IDs[Index++].ID, Ty, InsertBefore, DefaultValue);
653615
}
654616

@@ -662,44 +624,89 @@ Instruction *emitSpecConstantRecursiveImpl(Type *Ty, Instruction *InsertBefore,
662624
Elements.push_back(Def);
663625
Index++;
664626
};
665-
auto LoopIteration = [&](Type *ElTy, unsigned LocalIndex) {
666-
const auto ElemDefaultValue = getElemDefaultValue(
667-
Ty, ElTy, DefaultValue, LocalIndex, M.getDataLayout());
668-
627+
auto LoopIteration = [&](Type *ElTy, unsigned LocalOffset) {
628+
auto ElOffset = CurrentOffset + LocalOffset;
629+
auto It = llvm::lower_bound(DefinedElements, ElOffset,
630+
[](const std::pair<uint64_t, Constant *> &LHS,
631+
uint64_t RHS) { return LHS.first < RHS; });
669632
// If the default value is a composite and has the value 'undef', we should
670633
// not generate a bunch of __spirv_SpecConstant for its elements but
671634
// pass it into __spirv_SpecConstantComposite as is.
672-
if (isa<UndefValue>(ElemDefaultValue))
673-
HandleUndef(ElemDefaultValue);
635+
if (It == DefinedElements.end() || It->first != ElOffset)
636+
HandleUndef(UndefValue::get(ElTy));
674637
else
675638
Elements.push_back(emitSpecConstantRecursiveImpl(
676-
ElTy, InsertBefore, IDs, Index, ElemDefaultValue));
639+
ElTy, InsertBefore, IDs, Index, ElOffset, DefinedElements));
677640
};
678641

642+
auto DL = M.getDataLayout();
679643
if (auto *ArrTy = dyn_cast<ArrayType>(Ty)) {
644+
uint64_t ElSize = DL.getTypeAllocSize(ArrTy->getElementType());
680645
for (size_t I = 0; I < ArrTy->getNumElements(); ++I)
681-
LoopIteration(ArrTy->getElementType(), I);
646+
LoopIteration(ArrTy->getElementType(), I * ElSize);
682647
} else if (auto *StructTy = dyn_cast<StructType>(Ty)) {
683-
size_t I = 0;
684-
for (Type *ElTy : StructTy->elements())
685-
LoopIteration(ElTy, I++);
648+
const StructLayout *SL = M.getDataLayout().getStructLayout(StructTy);
649+
for (auto [ElTy, Offset] :
650+
zip_equal(StructTy->elements(), SL->getMemberOffsets()))
651+
LoopIteration(ElTy, Offset);
686652
} else if (auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
653+
uint64_t ElSize = DL.getTypeAllocSize(VecTy->getElementType());
687654
for (size_t I = 0; I < VecTy->getNumElements(); ++I)
688-
LoopIteration(VecTy->getElementType(), I);
655+
LoopIteration(VecTy->getElementType(), I * ElSize);
689656
} else {
690657
llvm_unreachable("Unexpected spec constant type");
691658
}
692659

693660
return emitSpecConstantComposite(Ty, Elements, InsertBefore);
694661
}
695662

663+
/// Recursively iterates over a composite type in order to collect information
664+
/// about the offsets of its scalar elements.
665+
void collectDefinedElements(
666+
Constant *C, const DataLayout &DL,
667+
SmallVectorImpl<std::pair<uint64_t, Constant *>> &Result,
668+
uint64_t CurrentOffset) {
669+
if (isa<UndefValue>(C)) {
670+
return;
671+
}
672+
673+
if (auto *StructTy = dyn_cast<StructType>(C->getType())) {
674+
const StructLayout *SL = DL.getStructLayout(StructTy);
675+
for (auto [I, MemberOffset] : enumerate(SL->getMemberOffsets()))
676+
collectDefinedElements(C->getAggregateElement(I), DL, Result,
677+
CurrentOffset + MemberOffset);
678+
}
679+
680+
else if (auto *ArrTy = dyn_cast<ArrayType>(C->getType())) {
681+
uint64_t ElSize = DL.getTypeAllocSize(ArrTy->getElementType());
682+
for (size_t I = 0; I < ArrTy->getNumElements(); ++I)
683+
collectDefinedElements(C->getAggregateElement(I), DL, Result,
684+
CurrentOffset + I * ElSize);
685+
}
686+
687+
else if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
688+
uint64_t ElSize = DL.getTypeAllocSize(VecTy->getElementType());
689+
for (size_t I = 0; I < VecTy->getNumElements(); ++I)
690+
collectDefinedElements(C->getAggregateElement(I), DL, Result,
691+
CurrentOffset + I * ElSize);
692+
}
693+
694+
else {
695+
Result.push_back({CurrentOffset, C});
696+
}
697+
}
698+
696699
/// Wrapper intended to hide IsFirstElement argument from the caller
697700
Instruction *emitSpecConstantRecursive(Type *Ty, Instruction *InsertBefore,
698701
SmallVectorImpl<ID> &IDs,
699702
Constant *DefaultValue) {
700703
unsigned Index = 0;
701-
return emitSpecConstantRecursiveImpl(Ty, InsertBefore, IDs, Index,
702-
DefaultValue);
704+
SmallVector<std::pair<uint64_t, Constant *>, 32> DefinedElements;
705+
collectDefinedElements(DefaultValue,
706+
InsertBefore->getModule()->getDataLayout(),
707+
DefinedElements, 0);
708+
return emitSpecConstantRecursiveImpl(Ty, InsertBefore, IDs, Index, 0,
709+
DefinedElements);
703710
}
704711

705712
/// Function creates load instruction from the given Buffer by the given Offset.
Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
; For C++ types that come from nested class hierarchy, the LLVM type corresponding
2+
; to that type seems to match the nested structure. However, it also seems that
3+
; when defining a constant for that type, the LLVM value defining the constant has a type
4+
; that is different, and is esssentially a flattened out version of the C++ type.
5+
; For example, this test is IR generated from getting the value of a spec constant
6+
; of a struct `scary` that has a deep nested hierarchy, but the specialization_id holding
7+
; the default value of `scary` is a flat struct with all the fields of `scary` flattened out.
8+
; (compare %struct.scary and @_ZL16scary_spec_const)
9+
; This test makes that the spec constant pass can handle such cases.
10+
; (note: IR generated from sycl/test-e2e/SpecConstants/2020/hierarchy.cpp)
11+
; RUN: sycl-post-link -properties --spec-const=native -S %s -o %t.table
12+
; RUN: FileCheck %s -input-file=%t_0.ll
13+
14+
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64-G1"
15+
target triple = "spir64-unknown-unknown"
16+
17+
%struct.anon = type { i32, i32 }
18+
%struct.anon.0 = type { i32 }
19+
%struct.scary = type { %struct.layer4.base, [15 x i8] }
20+
%struct.layer4.base = type { %struct.layer3.base }
21+
%struct.layer3.base = type <{ %struct.layer2, [4 x i8], %struct.foo.base }>
22+
%struct.layer2 = type { %struct.layer1 }
23+
%struct.layer1 = type { %struct.base }
24+
%struct.base = type { float, i8, i32, %struct.anon }
25+
%struct.foo.base = type <{ i32, [4 x i8], [5 x i64], [5 x %struct.anon.0], [5 x i8] }>
26+
27+
@__usid_str = private unnamed_addr constant [44 x i8] c"uid52dfb70f8b72bae7____ZL16scary_spec_const\00", align 1
28+
@_ZL16scary_spec_const = internal addrspace(1) constant { { float, i8, i32, %struct.anon, [4 x i8], i32, [5 x i64], [5 x %struct.anon.0], [5 x i8], [15 x i8] } } { { float, i8, i32, %struct.anon, [4 x i8], i32, [5 x i64], [5 x %struct.anon.0], [5 x i8], [15 x i8] } { float 0.000000e+00, i8 98, i32 0, %struct.anon zeroinitializer, [4 x i8] undef, i32 3, [5 x i64] [i64 5, i64 0, i64 0, i64 0, i64 0], [5 x %struct.anon.0] [%struct.anon.0 { i32 1 }, %struct.anon.0 { i32 2 }, %struct.anon.0 zeroinitializer, %struct.anon.0 zeroinitializer, %struct.anon.0 zeroinitializer], [5 x i8] c"abc\00\00", [15 x i8] undef } }, align 16
29+
30+
define weak_odr dso_local spir_kernel void @_ZTSZ4mainEUlN4sycl3_V114kernel_handlerEE_(ptr addrspace(1) noundef align 16 %_arg_p) {
31+
entry:
32+
%ref.tmp.i = alloca %struct.scary, align 16
33+
%ref.tmp.ascast.i = addrspacecast ptr %ref.tmp.i to ptr addrspace(4)
34+
call spir_func void @_Z40__sycl_getComposite2020SpecConstantValueI5scaryET_PKcPKvS5_(ptr addrspace(4) dead_on_unwind writable sret(%struct.scary) align 16 %ref.tmp.ascast.i, ptr addrspace(4) noundef addrspacecast (ptr @__usid_str to ptr addrspace(4)), ptr addrspace(4) noundef addrspacecast (ptr addrspace(1) @_ZL16scary_spec_const to ptr addrspace(4)), ptr addrspace(4) noundef null)
35+
call void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) align 16 %_arg_p, ptr align 16 %ref.tmp.i, i64 97, i1 false)
36+
ret void
37+
}
38+
39+
declare dso_local spir_func void @_Z40__sycl_getComposite2020SpecConstantValueI5scaryET_PKcPKvS5_(ptr addrspace(4) dead_on_unwind writable sret(%struct.scary) align 16, ptr addrspace(4) noundef, ptr addrspace(4) noundef, ptr addrspace(4) noundef)
40+
41+
42+
; CHECK: %[[#SCV0:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 0, float 0.000000e+00)
43+
; CHECK: %[[#SCV1:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 1, i8 98)
44+
; CHECK: %[[#SCV2:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 2, i32 0)
45+
; CHECK: %[[#SCV3:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 3, i32 0)
46+
; CHECK: %[[#SCV4:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 4, i32 0)
47+
; CHECK: %[[#SCV5:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(i32 %[[#SCV3]], i32 %[[#SCV4]])
48+
; CHECK: %[[#SCV6:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(float %[[#SCV0]], i8 %[[#SCV1]], i32 %[[#SCV2]], %struct.anon %[[#SCV5]])
49+
; CHECK: %[[#SCV7:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(%struct.base %[[#SCV6]])
50+
; CHECK: %[[#SCV8:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(%struct.layer1 %[[#SCV7]])
51+
; CHECK: %[[#SCV9:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 6, i32 3)
52+
; CHECK: %[[#SCV10:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 8, i64 5)
53+
; CHECK: %[[#SCV11:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 9, i64 0)
54+
; CHECK: %[[#SCV12:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 10, i64 0)
55+
; CHECK: %[[#SCV13:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 11, i64 0)
56+
; CHECK: %[[#SCV14:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 12, i64 0)
57+
; CHECK: %[[#SCV15:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(i64 %[[#SCV10]], i64 %[[#SCV11]], i64 %[[#SCV12]], i64 %[[#SCV13]], i64 %[[#SCV14]])
58+
; CHECK: %[[#SCV16:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 13, i32 1)
59+
; CHECK: %[[#SCV17:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(i32 %[[#SCV16]])
60+
; CHECK: %[[#SCV18:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 14, i32 2)
61+
; CHECK: %[[#SCV19:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(i32 %[[#SCV18]])
62+
; CHECK: %[[#SCV20:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 15, i32 0)
63+
; CHECK: %[[#SCV21:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(i32 %[[#SCV20]])
64+
; CHECK: %[[#SCV22:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 16, i32 0)
65+
; CHECK: %[[#SCV23:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(i32 %[[#SCV22]])
66+
; CHECK: %[[#SCV24:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 17, i32 0)
67+
; CHECK: %[[#SCV25:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(i32 %[[#SCV24]])
68+
; CHECK: %[[#SCV26:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(%struct.anon.0 %[[#SCV17]], %struct.anon.0 %[[#SCV19]], %struct.anon.0 %[[#SCV21]], %struct.anon.0 %[[#SCV23]], %struct.anon.0 %[[#SCV25]])
69+
; CHECK: %[[#SCV27:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 18, i8 97)
70+
; CHECK: %[[#SCV28:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 19, i8 98)
71+
; CHECK: %[[#SCV29:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 20, i8 99)
72+
; CHECK: %[[#SCV30:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 21, i8 0)
73+
; CHECK: %[[#SCV31:]] = {{.*}}@{{.*}}SpecConstant{{.*}}(i32 22, i8 0)
74+
; CHECK: %[[#SCV32:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(i8 %[[#SCV27]], i8 %[[#SCV28]], i8 %[[#SCV29]], i8 %[[#SCV30]], i8 %[[#SCV31]])
75+
; CHECK: %[[#SCV33:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(i32 %[[#SCV9]], [4 x i8] undef, [5 x i64] %[[#SCV15]], [5 x %struct.anon.0] %[[#SCV26]], [5 x i8] %[[#SCV32]])
76+
; CHECK: %[[#SCV34:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(%struct.layer2 %[[#SCV8]], [4 x i8] undef, %struct.foo.base %[[#SCV33]])
77+
; CHECK: %[[#SCV35:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(%struct.layer3.base %[[#SCV34]])
78+
; CHECK: %[[#SCV36:]] = {{.*}}@{{.*}}SpecConstantComposite{{.*}}(%struct.layer4.base %[[#SCV35]], [15 x i8] undef)
79+
80+
; Function Attrs: nocallback nofree nounwind willreturn memory(argmem: readwrite)
81+
declare void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
// RUN: %{build} -o %t.out
2+
// RUN: %{run} %t.out
3+
#include <sycl/detail/core.hpp>
4+
#include <sycl/specialization_id.hpp>
5+
#include <sycl/usm.hpp>
6+
7+
using namespace sycl;
8+
struct base {
9+
float a;
10+
char b = 'b';
11+
int c;
12+
struct {
13+
int x;
14+
int y;
15+
} d;
16+
};
17+
struct layer1 : base {};
18+
struct layer2 : layer1 {};
19+
struct foo {
20+
int e = 3;
21+
long long f[5] = {5};
22+
struct {
23+
int value;
24+
} g[5] = {1, 2};
25+
char h[5] = {'a', 'b', 'c'};
26+
};
27+
struct alignas(16) layer3 : layer2, foo {};
28+
struct layer4 : layer3 {};
29+
struct scary : layer4 {};
30+
31+
constexpr scary default_scary{};
32+
constexpr scary zero_scary{base{0, 0, 0, {0, 0}}, foo{0, {}, {}, {}}};
33+
constexpr specialization_id<scary> scary_spec_const(default_scary);
34+
35+
int main() {
36+
queue Q;
37+
auto *p = malloc_shared<scary>(1, Q);
38+
new (p) scary{zero_scary};
39+
40+
Q.single_task([=](kernel_handler h) {
41+
*p = h.get_specialization_constant<scary_spec_const>();
42+
}).wait();
43+
44+
int nfails = 0;
45+
#define EXPECT_EQ(a, b, ...) \
46+
if (a != b) { \
47+
nfails++; \
48+
std::cout << "FAIL: " << #a << " != " << #b << " (" << (int)a \
49+
<< " != " << (int)b << ")\n"; \
50+
}
51+
52+
// base
53+
EXPECT_EQ(p->a, 0, );
54+
EXPECT_EQ(p->b, 'b');
55+
EXPECT_EQ(p->c, 0);
56+
EXPECT_EQ(p->d.x, 0);
57+
EXPECT_EQ(p->d.y, 0);
58+
59+
// foo
60+
EXPECT_EQ(p->e, 3);
61+
62+
EXPECT_EQ(p->f[0], 5);
63+
EXPECT_EQ(p->f[1], 0);
64+
EXPECT_EQ(p->f[2], 0);
65+
EXPECT_EQ(p->f[3], 0);
66+
EXPECT_EQ(p->f[4], 0);
67+
68+
EXPECT_EQ(p->g[0].value, 1);
69+
EXPECT_EQ(p->g[1].value, 2);
70+
EXPECT_EQ(p->g[2].value, 0);
71+
EXPECT_EQ(p->g[3].value, 0);
72+
EXPECT_EQ(p->g[4].value, 0);
73+
74+
EXPECT_EQ(p->h[0], 'a');
75+
EXPECT_EQ(p->h[1], 'b');
76+
EXPECT_EQ(p->h[2], 'c');
77+
EXPECT_EQ(p->h[3], 0);
78+
EXPECT_EQ(p->h[4], 0);
79+
80+
if (nfails == 0) {
81+
std::cout << "PASS\n";
82+
} else {
83+
std::cout << "FAIL\n";
84+
}
85+
86+
free(p, Q);
87+
return nfails;
88+
}

0 commit comments

Comments
 (0)