Skip to content

[msan] Add 32-bit platforms support #109284

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 32 commits into from
Nov 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
a9bbb7e
[msan] Add 32-bit platforms support
Sep 19, 2024
8c8ca4a
removed MemoryMapParams, removed EagerCheck shadow copy, f fixed alig…
Sep 21, 2024
d183648
* Added MemorySanitizer tests for i386, mips32, riscv, arm32, ppc32.
Sep 30, 2024
58eb2ac
Deleted accidentally added log file
Sep 30, 2024
b832f46
MSan: removed unnecessary brackets, AllSupportedArchDefs.cmake: remov…
Oct 11, 2024
2776315
fixed tests with msan patch applied
Nov 7, 2024
fc61594
Merge remote-tracking branch 'origin/main' into kashapov-va_args_32bit
vitalybuka Nov 11, 2024
715df4b
update tests
vitalybuka Nov 12, 2024
94ba759
Merge remote-tracking branch 'origin/main' into kashapov-va_args_32bit
vitalybuka Nov 12, 2024
a069e4c
update tests
vitalybuka Nov 12, 2024
140f33d
Revert "[nfc][msan] Clang-format MemorySanitizer.cpp (#115828)"
vitalybuka Nov 12, 2024
7b7fddb
Merge commit '140f33de5591e588b03dc3233f8ac1f968ee7d53' into kashapov…
vitalybuka Nov 12, 2024
7fc981c
UndoForMerge
vitalybuka Nov 12, 2024
9fdb85e
Merge commit '7fc981c265e749b3bd510b33d92e86a5bb3d398f' into kashapov…
vitalybuka Nov 12, 2024
1c3fcab
Revert "[nfc][msan] Move VarArgGenericHelper"
vitalybuka Nov 12, 2024
a2bd2ad
Merge commit '1c3fcab921c903fdd052bfb3e3a2aadca1228dc8' into kashapov…
vitalybuka Nov 12, 2024
e4ebee1
MSan: Moved DL and Intptrsize to local scope, changed for-loop to use…
Nov 12, 2024
83763f3
nfc MSan: add reminders to remove msan-origin-base in tests for 32-bi…
Nov 12, 2024
c15c29f
Merge remote-tracking branch 'origin/main' into kashapov-va_args_32bit
vitalybuka Nov 14, 2024
ef030a6
Return DL back
vitalybuka Nov 14, 2024
18b6d5e
Simplify VarArgPowerPCHelper::finalizeInstrumentation
vitalybuka Nov 14, 2024
1fe3a4e
Remove empty line
vitalybuka Nov 14, 2024
f71ef90
range loop
vitalybuka Nov 14, 2024
95c8c2b
CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS)
vitalybuka Nov 14, 2024
7c425cf
remove empty line
vitalybuka Nov 14, 2024
4ad58c8
[msan] Remove unnecacary zero increment
vitalybuka Nov 14, 2024
3415aa5
Revert "[msan] Remove unnecacary zero increment"
vitalybuka Nov 14, 2024
c8ec352
remove add 0
vitalybuka Nov 14, 2024
9e108b0
Merge commit '3415aa534076' into kashapov-va_args_32bit
vitalybuka Nov 14, 2024
92c419c
Merge remote-tracking branch 'origin/main' into kashapov-va_args_32bit
vitalybuka Nov 14, 2024
7b03354
updatetests
vitalybuka Nov 14, 2024
fda60eb
Fix type of VAArgOverflowSizeTLS
vitalybuka Nov 14, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
213 changes: 192 additions & 21 deletions llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -411,6 +411,10 @@ static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
0x100000000000, // OriginBase
};

// mips32 Linux
// FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
// after picking good constants

// mips64 Linux
static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
0, // AndMask (not used)
Expand All @@ -419,6 +423,10 @@ static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
0x002000000000, // OriginBase
};

// ppc32 Linux
// FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
// after picking good constants

// ppc64 Linux
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
0xE00000000000, // AndMask
Expand All @@ -435,6 +443,10 @@ static const MemoryMapParams Linux_S390X_MemoryMapParams = {
0x1C0000000000, // OriginBase
};

// arm32 Linux
// FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
// after picking good constants

// aarch64 Linux
static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
0, // AndMask (not used)
Expand All @@ -451,6 +463,10 @@ static const MemoryMapParams Linux_LoongArch64_MemoryMapParams = {
0x100000000000, // OriginBase
};

// riscv32 Linux
// FIXME: Remove -msan-origin-base -msan-and-mask added by PR #109284 to tests
// after picking good constants

// aarch64 FreeBSD
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams = {
0x1800000000000, // AndMask
Expand Down Expand Up @@ -559,6 +575,7 @@ class MemorySanitizer {
friend struct VarArgAArch64Helper;
friend struct VarArgPowerPCHelper;
friend struct VarArgSystemZHelper;
friend struct VarArgI386Helper;
friend struct VarArgGenericHelper;

void initializeModule(Module &M);
Expand Down Expand Up @@ -5606,10 +5623,15 @@ struct VarArgPowerPCHelper : public VarArgHelperBase {
// Parameter save area starts at 48 bytes from frame pointer for ABIv1,
// and 32 bytes for ABIv2. This is usually determined by target
// endianness, but in theory could be overridden by function attribute.
if (TargetTriple.getArch() == Triple::ppc64)
VAArgBase = 48;
else
VAArgBase = 32;
if (TargetTriple.isPPC64()) {
if (TargetTriple.isPPC64ELFv2ABI())
VAArgBase = 32;
else
VAArgBase = 48;
} else {
// Parameter save area is 8 bytes from frame pointer in PPC32
VAArgBase = 8;
}
unsigned VAArgOffset = VAArgBase;
const DataLayout &DL = F.getDataLayout();
for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
Expand Down Expand Up @@ -5674,7 +5696,7 @@ struct VarArgPowerPCHelper : public VarArgHelperBase {
}

Constant *TotalVAArgSize =
ConstantInt::get(IRB.getInt64Ty(), VAArgOffset - VAArgBase);
ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
// Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
// a new class member i.e. it is the total size of all VarArgs.
IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
Expand Down Expand Up @@ -5705,14 +5727,24 @@ struct VarArgPowerPCHelper : public VarArgHelperBase {

// Instrument va_start.
// Copy va_list shadow from the backup copy of the TLS contents.
Triple TargetTriple(F.getParent()->getTargetTriple());
for (CallInst *OrigInst : VAStartInstrumentationList) {
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), MS.PtrTy);
Value *RegSaveAreaPtrPtr = IRB.CreatePtrToInt(VAListTag, MS.IntptrTy);

// In PPC32 va_list_tag is a struct, whereas in PPC64 it's a pointer
if (!TargetTriple.isPPC64()) {
RegSaveAreaPtrPtr =
IRB.CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
}
RegSaveAreaPtrPtr = IRB.CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);

Value *RegSaveAreaPtr = IRB.CreateLoad(MS.PtrTy, RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
const Align Alignment = Align(8);
const DataLayout &DL = F.getDataLayout();
unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
const Align Alignment = Align(IntptrSize);
std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
Alignment, /*isStore*/ true);
Expand Down Expand Up @@ -6003,8 +6035,118 @@ struct VarArgSystemZHelper : public VarArgHelperBase {
}
};

/// MIPS-specific implementation of VarArgHelper.
/// NOTE: This is also used for LoongArch64.
/// i386-specific implementation of VarArgHelper.
struct VarArgI386Helper : public VarArgHelperBase {
AllocaInst *VAArgTLSCopy = nullptr;
Value *VAArgSize = nullptr;

VarArgI386Helper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV)
: VarArgHelperBase(F, MS, MSV, /*VAListTagSize=*/4) {}

void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
const DataLayout &DL = F.getDataLayout();
unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
unsigned VAArgOffset = 0;
for (const auto &[ArgNo, A] : llvm::enumerate(CB.args())) {
bool IsFixed = ArgNo < CB.getFunctionType()->getNumParams();
bool IsByVal = CB.paramHasAttr(ArgNo, Attribute::ByVal);
if (IsByVal) {
assert(A->getType()->isPointerTy());
Type *RealTy = CB.getParamByValType(ArgNo);
uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
Align ArgAlign = CB.getParamAlign(ArgNo).value_or(Align(IntptrSize));
if (ArgAlign < IntptrSize)
ArgAlign = Align(IntptrSize);
VAArgOffset = alignTo(VAArgOffset, ArgAlign);
if (!IsFixed) {
Value *Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
if (Base) {
Value *AShadowPtr, *AOriginPtr;
std::tie(AShadowPtr, AOriginPtr) =
MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(),
kShadowTLSAlignment, /*isStore*/ false);

IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr,
kShadowTLSAlignment, ArgSize);
}
VAArgOffset += alignTo(ArgSize, Align(IntptrSize));
}
} else {
Value *Base;
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
Align ArgAlign = Align(IntptrSize);
VAArgOffset = alignTo(VAArgOffset, ArgAlign);
if (DL.isBigEndian()) {
// Adjusting the shadow for argument with size < IntptrSize to match
// the placement of bits in big endian system
if (ArgSize < IntptrSize)
VAArgOffset += (IntptrSize - ArgSize);
}
if (!IsFixed) {
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
if (Base)
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
VAArgOffset += ArgSize;
VAArgOffset = alignTo(VAArgOffset, Align(IntptrSize));
}
}
}

Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
// Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
// a new class member i.e. it is the total size of all VarArgs.
IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
}

void finalizeInstrumentation() override {
assert(!VAArgSize && !VAArgTLSCopy &&
"finalizeInstrumentation called twice");
IRBuilder<> IRB(MSV.FnPrologueEnd);
VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
Value *CopySize = VAArgSize;

if (!VAStartInstrumentationList.empty()) {
// If there is a va_start in this function, make a backup copy of
// va_arg_tls somewhere in the function entry block.
VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
VAArgTLSCopy->setAlignment(kShadowTLSAlignment);
IRB.CreateMemSet(VAArgTLSCopy, Constant::getNullValue(IRB.getInt8Ty()),
CopySize, kShadowTLSAlignment, false);

Value *SrcSize = IRB.CreateBinaryIntrinsic(
Intrinsic::umin, CopySize,
ConstantInt::get(IRB.getInt64Ty(), kParamTLSSize));
IRB.CreateMemCpy(VAArgTLSCopy, kShadowTLSAlignment, MS.VAArgTLS,
kShadowTLSAlignment, SrcSize);
}

// Instrument va_start.
// Copy va_list shadow from the backup copy of the TLS contents.
for (CallInst *OrigInst : VAStartInstrumentationList) {
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
Value *RegSaveAreaPtrPtr =
IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
PointerType::get(RegSaveAreaPtrTy, 0));
Value *RegSaveAreaPtr =
IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
const DataLayout &DL = F.getDataLayout();
unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
const Align Alignment = Align(IntptrSize);
std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
Alignment, /*isStore*/ true);
IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
CopySize);
}
}
};

/// Implementation of VarArgHelper that is used for ARM32, MIPS, RISCV,
/// LoongArch64.
struct VarArgGenericHelper : public VarArgHelperBase {
AllocaInst *VAArgTLSCopy = nullptr;
Value *VAArgSize = nullptr;
Expand All @@ -6016,24 +6158,25 @@ struct VarArgGenericHelper : public VarArgHelperBase {
void visitCallBase(CallBase &CB, IRBuilder<> &IRB) override {
unsigned VAArgOffset = 0;
const DataLayout &DL = F.getDataLayout();
unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
for (Value *A :
llvm::drop_begin(CB.args(), CB.getFunctionType()->getNumParams())) {
uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
if (DL.isBigEndian()) {
// Adjusting the shadow for argument with size < 8 to match the
// Adjusting the shadow for argument with size < IntptrSize to match the
// placement of bits in big endian system
if (ArgSize < 8)
VAArgOffset += (8 - ArgSize);
if (ArgSize < IntptrSize)
VAArgOffset += (IntptrSize - ArgSize);
}
Value *Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
VAArgOffset += ArgSize;
VAArgOffset = alignTo(VAArgOffset, 8);
VAArgOffset = alignTo(VAArgOffset, IntptrSize);
if (!Base)
continue;
IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
}

Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
// Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
// a new class member i.e. it is the total size of all VarArgs.
IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
Expand Down Expand Up @@ -6066,11 +6209,16 @@ struct VarArgGenericHelper : public VarArgHelperBase {
for (CallInst *OrigInst : VAStartInstrumentationList) {
NextNodeIRBuilder IRB(OrigInst);
Value *VAListTag = OrigInst->getArgOperand(0);
Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), MS.PtrTy);
Value *RegSaveAreaPtr = IRB.CreateLoad(MS.PtrTy, RegSaveAreaPtrPtr);
Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
Value *RegSaveAreaPtrPtr =
IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
PointerType::get(RegSaveAreaPtrTy, 0));
Value *RegSaveAreaPtr =
IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
const Align Alignment = Align(8);
const DataLayout &DL = F.getDataLayout();
unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
const Align Alignment = Align(IntptrSize);
std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
Alignment, /*isStore*/ true);
Expand All @@ -6080,8 +6228,10 @@ struct VarArgGenericHelper : public VarArgHelperBase {
}
};

// Loongarch64 is not a MIPS, but the current vargs calling convention matches
// the MIPS.
// ARM32, Loongarch64, MIPS and RISCV share the same calling conventions
// regarding VAArgs.
using VarArgARM32Helper = VarArgGenericHelper;
using VarArgRISCVHelper = VarArgGenericHelper;
using VarArgMIPSHelper = VarArgGenericHelper;
using VarArgLoongArch64Helper = VarArgGenericHelper;

Expand All @@ -6106,18 +6256,39 @@ static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
// VarArg handling is only implemented on AMD64. False positives are possible
// on other platforms.
Triple TargetTriple(Func.getParent()->getTargetTriple());

if (TargetTriple.getArch() == Triple::x86)
return new VarArgI386Helper(Func, Msan, Visitor);

if (TargetTriple.getArch() == Triple::x86_64)
return new VarArgAMD64Helper(Func, Msan, Visitor);

if (TargetTriple.isARM())
return new VarArgARM32Helper(Func, Msan, Visitor, /*VAListTagSize=*/4);

if (TargetTriple.isAArch64())
return new VarArgAArch64Helper(Func, Msan, Visitor);

if (TargetTriple.isSystemZ())
return new VarArgSystemZHelper(Func, Msan, Visitor);

// On PowerPC32 VAListTag is a struct
// {char, char, i16 padding, char *, char *}
if (TargetTriple.isPPC32())
return new VarArgPowerPCHelper(Func, Msan, Visitor, /*VAListTagSize=*/12);

if (TargetTriple.isPPC64())
return new VarArgPowerPCHelper(Func, Msan, Visitor, /*VAListTagSize=*/8);

if (TargetTriple.isRISCV32())
return new VarArgRISCVHelper(Func, Msan, Visitor, /*VAListTagSize=*/4);

if (TargetTriple.isRISCV64())
return new VarArgRISCVHelper(Func, Msan, Visitor, /*VAListTagSize=*/8);

if (TargetTriple.isMIPS32())
return new VarArgMIPSHelper(Func, Msan, Visitor, /*VAListTagSize=*/4);

if (TargetTriple.isMIPS64())
return new VarArgMIPSHelper(Func, Msan, Visitor, /*VAListTagSize=*/8);

Expand Down
12 changes: 6 additions & 6 deletions llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
; RUN: opt < %s -S -passes=msan -msan-origin-base=0x40000000 -msan-and-mask=0x80000000 2>&1 | FileCheck %s
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we remove those and add proper consts into msan pass?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can, but this will require some research on where to put shadow and origin. Currently, I cannot say what their proper addresses will be.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Then I afraid if someone decide to fix them will forget to update tests.
Can you put a comment in MemoryMapParams for corresponding platforms // FIXME: Remove -msan-origin-base -msan-and-mask added by #109284 after picking good constants.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will do, thanks


target datalayout = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
target triple = "mips64--linux"
target triple = "arm--linux"

define i32 @foo(i32 %guard, ...) {
; CHECK-LABEL: define i32 @foo(
Expand All @@ -15,20 +15,20 @@ define i32 @foo(i32 %guard, ...) {
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[VL:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[TMP5:%.*]] = ptrtoint ptr [[VL]] to i64
; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[TMP5]], 549755813888
; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], -2147483649
; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP7]], i8 0, i64 8, i1 false)
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[VL]])
; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[VL]] to i64
; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[TMP8]], 549755813888
; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], -2147483649
; CHECK-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 8, i1 false)
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 4, i1 false)
; CHECK-NEXT: call void @llvm.va_start.p0(ptr [[VL]])
; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[VL]] to i64
; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8
; CHECK-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = xor i64 [[TMP14]], 549755813888
; CHECK-NEXT: [[TMP15:%.*]] = and i64 [[TMP14]], -2147483649
; CHECK-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[TMP3]], i64 [[TMP2]], i1 false)
; CHECK-NEXT: call void @llvm.va_end.p0(ptr [[VL]])
Expand Down
Loading