Skip to content

Commit ec34699

Browse files
authored
[GlobalISel] convergence control tokens and intrinsics (#67006)
[GlobalISel] Implement convergence control tokens and intrinsics in GMIR In the IR translator, convert the LLVM token type to LLT::token(), which is an alias for the s0 type. These show up as implicit uses on convergent operations. Differential Revision: https://reviews.llvm.org/D158147
1 parent 55a02d1 commit ec34699

File tree

11 files changed

+155
-36
lines changed

11 files changed

+155
-36
lines changed

llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,9 @@ class CallLowering {
117117
/// vreg that the swifterror should be copied into after the call.
118118
Register SwiftErrorVReg;
119119

120+
/// Valid if the call is a controlled convergent operation.
121+
Register ConvergenceCtrlToken;
122+
120123
/// Original IR callsite corresponding to this call, if available.
121124
const CallBase *CB = nullptr;
122125

@@ -584,6 +587,7 @@ class CallLowering {
584587
bool lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &Call,
585588
ArrayRef<Register> ResRegs,
586589
ArrayRef<ArrayRef<Register>> ArgRegs, Register SwiftErrorVReg,
590+
Register ConvergenceCtrlToken,
587591
std::function<unsigned()> GetCalleeReg) const;
588592

589593
/// For targets which want to use big-endian can enable it with

llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -587,6 +587,10 @@ class IRTranslator : public MachineFunctionPass {
587587
return false;
588588
}
589589

590+
bool translateConvergenceControlIntrinsic(const CallInst &CI,
591+
Intrinsic::ID ID,
592+
MachineIRBuilder &MIRBuilder);
593+
590594
/// @}
591595

592596
// Builder for machine instruction a la IRBuilder.
@@ -705,6 +709,23 @@ class IRTranslator : public MachineFunctionPass {
705709
return Regs[0];
706710
}
707711

712+
Register getOrCreateConvergenceTokenVReg(const Value &Token) {
713+
assert(Token.getType()->isTokenTy());
714+
auto &Regs = *VMap.getVRegs(Token);
715+
if (!Regs.empty()) {
716+
assert(Regs.size() == 1 &&
717+
"Expected a single register for convergence tokens.");
718+
return Regs[0];
719+
}
720+
721+
auto Reg = MRI->createGenericVirtualRegister(LLT::token());
722+
Regs.push_back(Reg);
723+
auto &Offsets = *VMap.getOffsets(Token);
724+
if (Offsets.empty())
725+
Offsets.push_back(0);
726+
return Reg;
727+
}
728+
708729
/// Allocate some vregs and offsets in the VMap. Then populate just the
709730
/// offsets while leaving the vregs empty.
710731
ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);

llvm/lib/CodeGen/GlobalISel/CallLowering.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#include "llvm/CodeGen/MachineRegisterInfo.h"
2222
#include "llvm/CodeGen/TargetLowering.h"
2323
#include "llvm/IR/DataLayout.h"
24+
#include "llvm/IR/IntrinsicInst.h"
2425
#include "llvm/IR/LLVMContext.h"
2526
#include "llvm/IR/Module.h"
2627
#include "llvm/Target/TargetMachine.h"
@@ -91,6 +92,7 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
9192
ArrayRef<Register> ResRegs,
9293
ArrayRef<ArrayRef<Register>> ArgRegs,
9394
Register SwiftErrorVReg,
95+
Register ConvergenceCtrlToken,
9496
std::function<unsigned()> GetCalleeReg) const {
9597
CallLoweringInfo Info;
9698
const DataLayout &DL = MIRBuilder.getDataLayout();
@@ -121,7 +123,6 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
121123
CanBeTailCalled = false;
122124
}
123125

124-
125126
// First step is to marshall all the function's parameters into the correct
126127
// physregs and memory locations. Gather the sequence of argument types that
127128
// we'll pass to the assigner function.
@@ -187,6 +188,7 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
187188
Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
188189
Info.CallConv = CallConv;
189190
Info.SwiftErrorVReg = SwiftErrorVReg;
191+
Info.ConvergenceCtrlToken = ConvergenceCtrlToken;
190192
Info.IsMustTailCall = CB.isMustTailCall();
191193
Info.IsTailCall = CanBeTailCalled;
192194
Info.IsVarArg = IsVarArg;

llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp

Lines changed: 54 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -214,8 +214,9 @@ ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
214214
auto *VRegs = VMap.getVRegs(Val);
215215
auto *Offsets = VMap.getOffsets(Val);
216216

217-
assert(Val.getType()->isSized() &&
218-
"Don't know how to create an empty vreg");
217+
if (!Val.getType()->isTokenTy())
218+
assert(Val.getType()->isSized() &&
219+
"Don't know how to create an empty vreg");
219220

220221
SmallVector<LLT, 4> SplitTys;
221222
computeValueLLTs(*DL, *Val.getType(), SplitTys,
@@ -2074,6 +2075,36 @@ bool IRTranslator::translateIfEntryValueArgument(bool isDeclare, Value *Val,
20742075
return true;
20752076
}
20762077

2078+
static unsigned getConvOpcode(Intrinsic::ID ID) {
2079+
switch (ID) {
2080+
default:
2081+
llvm_unreachable("Unexpected intrinsic");
2082+
case Intrinsic::experimental_convergence_anchor:
2083+
return TargetOpcode::CONVERGENCECTRL_ANCHOR;
2084+
case Intrinsic::experimental_convergence_entry:
2085+
return TargetOpcode::CONVERGENCECTRL_ENTRY;
2086+
case Intrinsic::experimental_convergence_loop:
2087+
return TargetOpcode::CONVERGENCECTRL_LOOP;
2088+
}
2089+
}
2090+
2091+
bool IRTranslator::translateConvergenceControlIntrinsic(
2092+
const CallInst &CI, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder) {
2093+
MachineInstrBuilder MIB = MIRBuilder.buildInstr(getConvOpcode(ID));
2094+
Register OutputReg = getOrCreateConvergenceTokenVReg(CI);
2095+
MIB.addDef(OutputReg);
2096+
2097+
if (ID == Intrinsic::experimental_convergence_loop) {
2098+
auto Bundle = CI.getOperandBundle(LLVMContext::OB_convergencectrl);
2099+
assert(Bundle && "Expected a convergence control token.");
2100+
Register InputReg =
2101+
getOrCreateConvergenceTokenVReg(*Bundle->Inputs[0].get());
2102+
MIB.addUse(InputReg);
2103+
}
2104+
2105+
return true;
2106+
}
2107+
20772108
bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
20782109
MachineIRBuilder &MIRBuilder) {
20792110
if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
@@ -2530,7 +2561,10 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
25302561
#include "llvm/IR/ConstrainedOps.def"
25312562
return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
25322563
MIRBuilder);
2533-
2564+
case Intrinsic::experimental_convergence_anchor:
2565+
case Intrinsic::experimental_convergence_entry:
2566+
case Intrinsic::experimental_convergence_loop:
2567+
return translateConvergenceControlIntrinsic(CI, ID, MIRBuilder);
25342568
}
25352569
return false;
25362570
}
@@ -2581,12 +2615,18 @@ bool IRTranslator::translateCallBase(const CallBase &CB,
25812615
}
25822616
}
25832617

2618+
Register ConvergenceCtrlToken = 0;
2619+
if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
2620+
const auto &Token = *Bundle->Inputs[0].get();
2621+
ConvergenceCtrlToken = getOrCreateConvergenceTokenVReg(Token);
2622+
}
2623+
25842624
// We don't set HasCalls on MFI here yet because call lowering may decide to
25852625
// optimize into tail calls. Instead, we defer that to selection where a final
25862626
// scan is done to check if any instructions are calls.
2587-
bool Success =
2588-
CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
2589-
[&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
2627+
bool Success = CLI->lowerCall(
2628+
MIRBuilder, CB, Res, Args, SwiftErrorVReg, ConvergenceCtrlToken,
2629+
[&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
25902630

25912631
// Check if we just inserted a tail call.
25922632
if (Success) {
@@ -2700,6 +2740,14 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
27002740
MF->getMachineMemOperand(MPI, Info.flags, MemTy, Alignment, CI.getAAMetadata()));
27012741
}
27022742

2743+
if (CI.isConvergent()) {
2744+
if (auto Bundle = CI.getOperandBundle(LLVMContext::OB_convergencectrl)) {
2745+
auto *Token = Bundle->Inputs[0].get();
2746+
Register TokenReg = getOrCreateVReg(*Token);
2747+
MIB.addUse(TokenReg, RegState::Implicit);
2748+
}
2749+
}
2750+
27032751
return true;
27042752
}
27052753

llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -538,6 +538,14 @@ bool InlineAsmLowering::lowerInlineAsm(
538538
}
539539
}
540540

541+
if (auto Bundle = Call.getOperandBundle(LLVMContext::OB_convergencectrl)) {
542+
auto *Token = Bundle->Inputs[0].get();
543+
ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*Token);
544+
assert(SourceRegs.size() == 1 &&
545+
"Expected the control token to fit into a single virtual register");
546+
Inst.addUse(SourceRegs[0], RegState::Implicit);
547+
}
548+
541549
if (const MDNode *SrcLoc = Call.getMetadata("srcloc"))
542550
Inst.addMetadata(SrcLoc);
543551

llvm/lib/CodeGen/MIRParser/MIParser.cpp

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1919,10 +1919,13 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
19191919

19201920
if (Token.range().front() == 's') {
19211921
auto ScalarSize = APSInt(Token.range().drop_front()).getZExtValue();
1922-
if (!verifyScalarSize(ScalarSize))
1923-
return error("invalid size for scalar type");
1924-
1925-
Ty = LLT::scalar(ScalarSize);
1922+
if (ScalarSize) {
1923+
if (!verifyScalarSize(ScalarSize))
1924+
return error("invalid size for scalar type");
1925+
Ty = LLT::scalar(ScalarSize);
1926+
} else {
1927+
Ty = LLT::token();
1928+
}
19261929
lex();
19271930
return false;
19281931
} else if (Token.range().front() == 'p') {
@@ -1980,7 +1983,7 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
19801983
if (Token.range().front() == 's') {
19811984
auto ScalarSize = APSInt(Token.range().drop_front()).getZExtValue();
19821985
if (!verifyScalarSize(ScalarSize))
1983-
return error("invalid size for scalar type");
1986+
return error("invalid size for scalar element in vector");
19841987
Ty = LLT::scalar(ScalarSize);
19851988
} else if (Token.range().front() == 'p') {
19861989
const DataLayout &DL = MF.getDataLayout();

llvm/lib/IR/ConvergenceVerifier.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,14 +75,14 @@ GenericConvergenceVerifier<SSAContext>::findAndCheckConvergenceTokenUsed(
7575

7676
template <>
7777
bool GenericConvergenceVerifier<SSAContext>::isInsideConvergentFunction(
78-
const InstructionT &I) {
78+
const Instruction &I) {
7979
auto *F = I.getFunction();
8080
return F->isConvergent();
8181
}
8282

8383
template <>
8484
bool GenericConvergenceVerifier<SSAContext>::isConvergent(
85-
const InstructionT &I) {
85+
const Instruction &I) {
8686
if (auto *CB = dyn_cast<CallBase>(&I)) {
8787
return CB->isConvergent();
8888
}

llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1301,6 +1301,9 @@ bool AMDGPUCallLowering::lowerTailCall(
13011301
if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder))
13021302
return false;
13031303

1304+
if (Info.ConvergenceCtrlToken) {
1305+
MIB.addUse(Info.ConvergenceCtrlToken, RegState::Implicit);
1306+
}
13041307
handleImplicitCallArguments(MIRBuilder, MIB, ST, *FuncInfo, CalleeCC,
13051308
ImplicitArgRegs);
13061309

@@ -1483,6 +1486,9 @@ bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
14831486

14841487
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
14851488

1489+
if (Info.ConvergenceCtrlToken) {
1490+
MIB.addUse(Info.ConvergenceCtrlToken, RegState::Implicit);
1491+
}
14861492
handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, Info.CallConv,
14871493
ImplicitArgRegs);
14881494

llvm/test/CodeGen/AMDGPU/convergence-tokens.ll

Lines changed: 48 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,24 @@
11
; RUN: llc --amdgpu-disable-structurizer -stop-after=amdgpu-isel -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,ISEL %s
22
; RUN: llc --amdgpu-disable-structurizer -stop-after=dead-mi-elimination -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck --check-prefixes=CHECK,DEADMI %s
3+
; RUN: llc --amdgpu-disable-structurizer -global-isel -stop-after=irtranslator -mtriple=amdgcn-- -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck %s --check-prefixes=CHECK,GISEL
34

45
; CHECK-LABEL: name: basic_call
5-
; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ENTRY
6+
; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
67
; ISEL: {{.*}} SI_CALL_ISEL {{.*}}, @foo, [[TOKEN]], csr_amdgpu, {{.*}}
78
; DEADMI: {{.*}} SI_CALL {{.*}}, @foo, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
9+
; GISEL: {{.*}} G_SI_CALL {{.*}}, @foo, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
810
define i32 @basic_call(i32 %src) #0 {
911
%t = call token @llvm.experimental.convergence.entry()
1012
%r = call i32 @foo(i32 %src) [ "convergencectrl"(token %t) ]
1113
ret i32 %r
1214
}
1315

1416
; CHECK-LABEL: name: basic_intrinsic
15-
; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
17+
; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
1618
; ISEL: CONVERGENCECTRL_GLUE [[TOKEN]]
1719
; DEADMI-NOT: CONVERGENCECTRL_GLUE
18-
; CHECK: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
20+
; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
21+
; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[TOKEN]]
1922
define i32 @basic_intrinsic(i32 %src) #0 {
2023
%t = call token @llvm.experimental.convergence.anchor()
2124
%r = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t) ]
@@ -30,12 +33,13 @@ define i32 @uncontrolled_call(i32 %src) #0 {
3033
}
3134

3235
; CHECK-LABEL: name: basic_branch
33-
; CHECK: bb.0.entry:
34-
; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
35-
; CHECK: bb.1.then:
36+
; CHECK: bb.[[#]].entry:
37+
; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
38+
; CHECK: bb.[[#]].then:
3639
; ISEL: CONVERGENCECTRL_GLUE [[TOKEN]]
3740
; DEADMI-NOT: CONVERGENCECTRL_GLUE
38-
; CHECK: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
41+
; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[TOKEN]]
42+
; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[TOKEN]]
3943
define i32 @basic_branch(i32 %src, i1 %cond) #0 {
4044
entry:
4145
%t = call token @llvm.experimental.convergence.anchor()
@@ -52,12 +56,13 @@ else:
5256
}
5357

5458
; CHECK-LABEL: name: basic_loop
55-
; CHECK: [[TOKEN:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_ANCHOR
56-
; CHECK: bb.1.loop:
57-
; CHECK: [[LOOP:%[0-9]+]]:sreg_64 = CONVERGENCECTRL_LOOP [[TOKEN]]
59+
; CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
60+
; CHECK: bb.[[#]].loop:
61+
; CHECK: [[LOOP:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_LOOP [[TOKEN]]
5862
; ISEL: CONVERGENCECTRL_GLUE [[LOOP]]
5963
; DEADMI-NOT: CONVERGENCECTRL_GLUE
60-
; CHECK: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[LOOP]]
64+
; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[LOOP]]
65+
; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[LOOP]]
6166
define i32 @basic_loop(i32 %src, i1 %cond) #0 {
6267
%t1 = call token @llvm.experimental.convergence.anchor()
6368
br label %loop
@@ -71,6 +76,38 @@ end:
7176
ret i32 %r
7277
}
7378

79+
; CHECK-LABEL: name: nested
80+
; CHECK: [[ENTRY:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
81+
; CHECK: [[ANCHOR:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ANCHOR
82+
; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[ANCHOR]]
83+
; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[ANCHOR]]
84+
; ISEL: {{.*}} = V_READFIRSTLANE_B32 {{.*}}, implicit [[ENTRY]]
85+
; GISEL: {{.*}} = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane){{.*}}, implicit [[ENTRY]]
86+
define i32 @nested(i32 %src) #0 {
87+
%t1 = call token @llvm.experimental.convergence.entry()
88+
%t2 = call token @llvm.experimental.convergence.anchor()
89+
%r2 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t2) ]
90+
%r1 = call i32 @llvm.amdgcn.readfirstlane(i32 %src) [ "convergencectrl"(token %t1) ]
91+
%sum = add i32 %r1, %r2
92+
ret i32 %sum
93+
}
94+
95+
; COM: FIXME: Tokens on tail-call have not been implemented for SelectionDAG
96+
; COM: yet; the corresponding checks have been commented out.
97+
;
98+
; CHECK-LABEL: name: tail_call_void_func_void
99+
; GISEL: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
100+
; COM: CHECK: [[TOKEN:%[0-9]+]]{{[^ ]*}} = CONVERGENCECTRL_ENTRY
101+
; COM: ISEL: {{.*}} SI_CALL_ISEL {{.*}}, @external_void_func_void, [[TOKEN]], csr_amdgpu, {{.*}}
102+
; COM: DEADMI: {{.*}} SI_CALL {{.*}}, @external_void_func_void, csr_amdgpu, {{.*}}, implicit [[TOKEN]]
103+
; GISEL: {{.*}} SI_TCRETURN {{.*}}, @external_void_func_void, 0, csr_amdgpu, implicit [[TOKEN]]
104+
define void @tail_call_void_func_void() #0 {
105+
%t1 = call token @llvm.experimental.convergence.entry()
106+
tail call void @external_void_func_void() [ "convergencectrl"(token %t1) ]
107+
ret void
108+
}
109+
110+
declare hidden void @external_void_func_void() #0
74111
declare i32 @foo(i32 %x) #0
75112

76113
declare i32 @llvm.amdgcn.readfirstlane(i32) #0

llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid4.mir

Lines changed: 0 additions & 10 deletions
This file was deleted.

llvm/test/CodeGen/MIR/AArch64/parse-low-level-type-invalid6.mir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,6 @@ name: test_vector_element_size_0
55
body: |
66
bb.0:
77
liveins: $x0
8-
; CHECK: [[@LINE+1]]:15: invalid size for scalar type
8+
; CHECK: [[@LINE+1]]:15: invalid size for scalar element in vector
99
%0:_(<2 x s0>) = G_IMPLICIT_DEF
1010
...

0 commit comments

Comments
 (0)