@@ -197,3 +197,195 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorCall(
197
197
assert (!cir::MissingFeatures::opCallMustTail ());
198
198
return emitCall (fnInfo, callee, returnValue, args, nullptr , loc);
199
199
}
200
+
201
+ static mlir::Value emitCXXNewAllocSize (CIRGenFunction &cgf, const CXXNewExpr *e,
202
+ unsigned minElements,
203
+ mlir::Value &numElements,
204
+ mlir::Value &sizeWithoutCookie) {
205
+ QualType type = e->getAllocatedType ();
206
+ mlir::Location loc = cgf.getLoc (e->getSourceRange ());
207
+
208
+ if (!e->isArray ()) {
209
+ CharUnits typeSize = cgf.getContext ().getTypeSizeInChars (type);
210
+ sizeWithoutCookie = cgf.getBuilder ().getConstant (
211
+ loc, cir::IntAttr::get (cgf.SizeTy , typeSize.getQuantity ()));
212
+ return sizeWithoutCookie;
213
+ }
214
+
215
+ cgf.cgm .errorNYI (e->getSourceRange (), " emitCXXNewAllocSize: array" );
216
+ return {};
217
+ }
218
+
219
+ static void storeAnyExprIntoOneUnit (CIRGenFunction &cgf, const Expr *init,
220
+ QualType allocType, Address newPtr,
221
+ AggValueSlot::Overlap_t mayOverlap) {
222
+ // FIXME: Refactor with emitExprAsInit.
223
+ switch (cgf.getEvaluationKind (allocType)) {
224
+ case cir::TEK_Scalar:
225
+ cgf.emitScalarInit (init, cgf.getLoc (init->getSourceRange ()),
226
+ cgf.makeAddrLValue (newPtr, allocType), false );
227
+ return ;
228
+ case cir::TEK_Complex:
229
+ cgf.cgm .errorNYI (init->getSourceRange (),
230
+ " storeAnyExprIntoOneUnit: complex" );
231
+ return ;
232
+ case cir::TEK_Aggregate: {
233
+ assert (!cir::MissingFeatures::aggValueSlotGC ());
234
+ assert (!cir::MissingFeatures::sanitizers ());
235
+ AggValueSlot slot = AggValueSlot::forAddr (
236
+ newPtr, allocType.getQualifiers (), AggValueSlot::IsDestructed,
237
+ AggValueSlot::IsNotAliased, mayOverlap, AggValueSlot::IsNotZeroed);
238
+ cgf.emitAggExpr (init, slot);
239
+ return ;
240
+ }
241
+ }
242
+ llvm_unreachable (" bad evaluation kind" );
243
+ }
244
+
245
+ static void emitNewInitializer (CIRGenFunction &cgf, const CXXNewExpr *e,
246
+ QualType elementType, mlir::Type elementTy,
247
+ Address newPtr, mlir::Value numElements,
248
+ mlir::Value allocSizeWithoutCookie) {
249
+ assert (!cir::MissingFeatures::generateDebugInfo ());
250
+ if (e->isArray ()) {
251
+ cgf.cgm .errorNYI (e->getSourceRange (), " emitNewInitializer: array" );
252
+ } else if (const Expr *init = e->getInitializer ()) {
253
+ storeAnyExprIntoOneUnit (cgf, init, e->getAllocatedType (), newPtr,
254
+ AggValueSlot::DoesNotOverlap);
255
+ }
256
+ }
257
+
258
+ // / Emit a call to an operator new or operator delete function, as implicitly
259
+ // / created by new-expressions and delete-expressions.
260
+ static RValue emitNewDeleteCall (CIRGenFunction &cgf,
261
+ const FunctionDecl *calleeDecl,
262
+ const FunctionProtoType *calleeType,
263
+ const CallArgList &args) {
264
+ cir::CIRCallOpInterface callOrTryCall;
265
+ cir::FuncOp calleePtr = cgf.cgm .getAddrOfFunction (calleeDecl);
266
+ CIRGenCallee callee =
267
+ CIRGenCallee::forDirect (calleePtr, GlobalDecl (calleeDecl));
268
+ RValue rv =
269
+ cgf.emitCall (cgf.cgm .getTypes ().arrangeFreeFunctionCall (args, calleeType),
270
+ callee, ReturnValueSlot (), args, &callOrTryCall);
271
+
272
+ // / C++1y [expr.new]p10:
273
+ // / [In a new-expression,] an implementation is allowed to omit a call
274
+ // / to a replaceable global allocation function.
275
+ // /
276
+ // / We model such elidable calls with the 'builtin' attribute.
277
+ assert (!cir::MissingFeatures::attributeBuiltin ());
278
+ return rv;
279
+ }
280
+
281
+ mlir::Value CIRGenFunction::emitCXXNewExpr (const CXXNewExpr *e) {
282
+ // The element type being allocated.
283
+ QualType allocType = getContext ().getBaseElementType (e->getAllocatedType ());
284
+
285
+ // 1. Build a call to the allocation function.
286
+ FunctionDecl *allocator = e->getOperatorNew ();
287
+
288
+ // If there is a brace-initializer, cannot allocate fewer elements than inits.
289
+ unsigned minElements = 0 ;
290
+ if (e->isArray () && e->hasInitializer ()) {
291
+ cgm.errorNYI (e->getSourceRange (), " emitCXXNewExpr: array initializer" );
292
+ }
293
+
294
+ mlir::Value numElements = nullptr ;
295
+ mlir::Value allocSizeWithoutCookie = nullptr ;
296
+ mlir::Value allocSize = emitCXXNewAllocSize (
297
+ *this , e, minElements, numElements, allocSizeWithoutCookie);
298
+ CharUnits allocAlign = getContext ().getTypeAlignInChars (allocType);
299
+
300
+ // Emit the allocation call.
301
+ Address allocation = Address::invalid ();
302
+ CallArgList allocatorArgs;
303
+ if (allocator->isReservedGlobalPlacementOperator ()) {
304
+ cgm.errorNYI (e->getSourceRange (),
305
+ " emitCXXNewExpr: reserved global placement operator" );
306
+ } else {
307
+ const FunctionProtoType *allocatorType =
308
+ allocator->getType ()->castAs <FunctionProtoType>();
309
+ unsigned paramsToSkip = 0 ;
310
+
311
+ // The allocation size is the first argument.
312
+ QualType sizeType = getContext ().getSizeType ();
313
+ allocatorArgs.add (RValue::get (allocSize), sizeType);
314
+ ++paramsToSkip;
315
+
316
+ if (allocSize != allocSizeWithoutCookie) {
317
+ CharUnits cookieAlign = getSizeAlign (); // FIXME: Ask the ABI.
318
+ allocAlign = std::max (allocAlign, cookieAlign);
319
+ }
320
+
321
+ // The allocation alignment may be passed as the second argument.
322
+ if (e->passAlignment ()) {
323
+ cgm.errorNYI (e->getSourceRange (), " emitCXXNewExpr: pass alignment" );
324
+ }
325
+
326
+ // FIXME: Why do we not pass a CalleeDecl here?
327
+ emitCallArgs (allocatorArgs, allocatorType, e->placement_arguments (),
328
+ AbstractCallee (), paramsToSkip);
329
+ RValue rv =
330
+ emitNewDeleteCall (*this , allocator, allocatorType, allocatorArgs);
331
+
332
+ // Set !heapallocsite metadata on the call to operator new.
333
+ assert (!cir::MissingFeatures::generateDebugInfo ());
334
+
335
+ // If this was a call to a global replaceable allocation function that does
336
+ // not take an alignment argument, the allocator is known to produce storage
337
+ // that's suitably aligned for any object that fits, up to a known
338
+ // threshold. Otherwise assume it's suitably aligned for the allocated type.
339
+ CharUnits allocationAlign = allocAlign;
340
+ if (!e->passAlignment () &&
341
+ allocator->isReplaceableGlobalAllocationFunction ()) {
342
+ const TargetInfo &target = cgm.getASTContext ().getTargetInfo ();
343
+ unsigned allocatorAlign = llvm::bit_floor (std::min<uint64_t >(
344
+ target.getNewAlign (), getContext ().getTypeSize (allocType)));
345
+ allocationAlign = std::max (
346
+ allocationAlign, getContext ().toCharUnitsFromBits (allocatorAlign));
347
+ }
348
+
349
+ mlir::Value allocPtr = rv.getValue ();
350
+ allocation = Address (
351
+ allocPtr, mlir::cast<cir::PointerType>(allocPtr.getType ()).getPointee (),
352
+ allocationAlign);
353
+ }
354
+
355
+ // Emit a null check on the allocation result if the allocation
356
+ // function is allowed to return null (because it has a non-throwing
357
+ // exception spec or is the reserved placement new) and we have an
358
+ // interesting initializer will be running sanitizers on the initialization.
359
+ bool nullCheck = e->shouldNullCheckAllocation () &&
360
+ (!allocType.isPODType (getContext ()) || e->hasInitializer ());
361
+ assert (!cir::MissingFeatures::exprNewNullCheck ());
362
+ if (nullCheck)
363
+ cgm.errorNYI (e->getSourceRange (), " emitCXXNewExpr: null check" );
364
+
365
+ // If there's an operator delete, enter a cleanup to call it if an
366
+ // exception is thrown.
367
+ if (e->getOperatorDelete () &&
368
+ !e->getOperatorDelete ()->isReservedGlobalPlacementOperator ())
369
+ cgm.errorNYI (e->getSourceRange (), " emitCXXNewExpr: operator delete" );
370
+
371
+ if (allocSize != allocSizeWithoutCookie)
372
+ cgm.errorNYI (e->getSourceRange (), " emitCXXNewExpr: array with cookies" );
373
+
374
+ mlir::Type elementTy = convertTypeForMem (allocType);
375
+ Address result = builder.createElementBitCast (getLoc (e->getSourceRange ()),
376
+ allocation, elementTy);
377
+
378
+ // Passing pointer through launder.invariant.group to avoid propagation of
379
+ // vptrs information which may be included in previous type.
380
+ // To not break LTO with different optimizations levels, we do it regardless
381
+ // of optimization level.
382
+ if (cgm.getCodeGenOpts ().StrictVTablePointers &&
383
+ allocator->isReservedGlobalPlacementOperator ())
384
+ cgm.errorNYI (e->getSourceRange (), " emitCXXNewExpr: strict vtable pointers" );
385
+
386
+ assert (!cir::MissingFeatures::sanitizers ());
387
+
388
+ emitNewInitializer (*this , e, allocType, elementTy, result, numElements,
389
+ allocSizeWithoutCookie);
390
+ return result.getPointer ();
391
+ }
0 commit comments