@@ -58,8 +58,6 @@ type glue_fns = rec(ValueRef activate_glue,
58
58
vec[ ValueRef ] native_glues_pure_rust ,
59
59
vec[ ValueRef ] native_glues_cdecl ,
60
60
ValueRef no_op_type_glue ,
61
- ValueRef memcpy_glue ,
62
- ValueRef bzero_glue ,
63
61
ValueRef vec_append_glue ) ;
64
62
65
63
type tydesc_info = rec ( ValueRef tydesc,
@@ -2921,31 +2919,58 @@ fn drop_ty(@block_ctxt cx,
2921
2919
fn call_memcpy( @block_ctxt cx,
2922
2920
ValueRef dst,
2923
2921
ValueRef src,
2924
- ValueRef n_bytes) -> result {
2922
+ ValueRef n_bytes,
2923
+ ValueRef align_bytes) -> result {
2924
+ // FIXME: switch to the 64-bit variant when on such a platform.
2925
+ auto i = cx. fcx. lcx. ccx. intrinsics;
2926
+ assert ( i. contains_key( "llvm. memcpy. p0i8. p0i8. i32 ") ) ;
2927
+ auto memcpy = i. get( "llvm. memcpy. p0i8. p0i8. i32 ") ;
2925
2928
auto src_ptr = cx. build. PointerCast ( src, T_ptr ( T_i8 ( ) ) ) ;
2926
2929
auto dst_ptr = cx. build. PointerCast ( dst, T_ptr ( T_i8 ( ) ) ) ;
2927
- auto size = cx. build. IntCast ( n_bytes, T_int ( ) ) ;
2928
- ret res( cx, cx. build. FastCall ( cx. fcx. lcx. ccx. glues. memcpy_glue,
2929
- vec( dst_ptr, src_ptr, size) ) ) ;
2930
+ auto size = cx. build. IntCast ( n_bytes, T_i32 ( ) ) ;
2931
+ auto align =
2932
+ if ( lib. llvm. llvm. LLVMIsConstant ( align_bytes) == True )
2933
+ { cx. build. IntCast ( align_bytes, T_i32 ( ) ) }
2934
+ else
2935
+ { cx. build. IntCast ( C_int ( 0 ) , T_i32 ( ) ) } ;
2936
+
2937
+ auto volatile = C_bool ( false) ;
2938
+ ret res( cx, cx. build. Call ( memcpy,
2939
+ vec( dst_ptr, src_ptr,
2940
+ size, align, volatile) ) ) ;
2930
2941
}
2931
2942
2932
2943
fn call_bzero( @block_ctxt cx,
2933
2944
ValueRef dst,
2934
- ValueRef n_bytes) -> result {
2945
+ ValueRef n_bytes,
2946
+ ValueRef align_bytes) -> result {
2947
+
2948
+ // FIXME: switch to the 64-bit variant when on such a platform.
2949
+ auto i = cx. fcx. lcx. ccx. intrinsics;
2950
+ assert ( i. contains_key( "llvm. memset. p0i8. i32 ") ) ;
2951
+ auto memset = i. get( "llvm. memset. p0i8. i32 ") ;
2935
2952
auto dst_ptr = cx. build. PointerCast ( dst, T_ptr ( T_i8 ( ) ) ) ;
2936
- auto size = cx. build. IntCast ( n_bytes, T_int ( ) ) ;
2937
- ret res( cx, cx. build. FastCall ( cx. fcx. lcx. ccx. glues. bzero_glue,
2938
- vec( dst_ptr, size) ) ) ;
2953
+ auto size = cx. build. IntCast ( n_bytes, T_i32 ( ) ) ;
2954
+ auto align =
2955
+ if ( lib. llvm. llvm. LLVMIsConstant ( align_bytes) == True )
2956
+ { cx. build. IntCast ( align_bytes, T_i32 ( ) ) }
2957
+ else
2958
+ { cx. build. IntCast ( C_int ( 0 ) , T_i32 ( ) ) } ;
2959
+
2960
+ auto volatile = C_bool ( false) ;
2961
+ ret res( cx, cx. build. Call ( memset,
2962
+ vec( dst_ptr, C_u8 ( 0 u) ,
2963
+ size, align, volatile) ) ) ;
2939
2964
}
2940
2965
2941
2966
fn memcpy_ty( @block_ctxt cx,
2942
2967
ValueRef dst,
2943
2968
ValueRef src,
2944
2969
ty. t t) -> result {
2945
2970
if ( ty. type_has_dynamic_size( cx. fcx. lcx. ccx. tcx, t) ) {
2946
- auto llszptr = field_of_tydesc ( cx, t, false , abi . tydesc_field_size ) ;
2947
- auto llsz = llszptr . bcx . build . Load ( llszptr . val ) ;
2948
- ret call_memcpy( llszptr . bcx, dst, src, llsz) ;
2971
+ auto llsz = size_of ( cx, t) ;
2972
+ auto llalign = align_of ( llsz . bcx , t ) ;
2973
+ ret call_memcpy( llalign . bcx, dst, src, llsz. val , llalign . val ) ;
2949
2974
2950
2975
} else {
2951
2976
ret res( cx, cx. build. Store ( cx. build. Load ( src) , dst) ) ;
@@ -5553,7 +5578,8 @@ fn trans_break_cont(@block_ctxt cx, bool to_end) -> result {
5553
5578
}
5554
5579
}
5555
5580
}
5556
- ret res( new_sub_block_ctxt( bcx, "unreachable" ) , C_nil ( ) ) ;
5581
+ ret res( new_sub_block_ctxt( bcx, "break_cont.unreachable" ) ,
5582
+ C_nil ( ) ) ;
5557
5583
}
5558
5584
case ( _) {
5559
5585
alt ( cleanup_cx. parent) {
@@ -5609,7 +5635,7 @@ fn trans_ret(@block_ctxt cx, &Option.t[@ast.expr] e) -> result {
5609
5635
}
5610
5636
5611
5637
bcx. build. RetVoid ( ) ;
5612
- ret res( new_sub_block_ctxt( bcx, "unreachable" ) , C_nil ( ) ) ;
5638
+ ret res( new_sub_block_ctxt( bcx, "ret. unreachable" ) , C_nil ( ) ) ;
5613
5639
}
5614
5640
5615
5641
fn trans_be( @block_ctxt cx, @ast. expr e) -> result {
@@ -5773,7 +5799,9 @@ fn zero_alloca(@block_ctxt cx, ValueRef llptr, ty.t t) -> result {
5773
5799
auto bcx = cx;
5774
5800
if ( ty. type_has_dynamic_size( cx. fcx. lcx. ccx. tcx, t) ) {
5775
5801
auto llsz = size_of( bcx, t) ;
5776
- bcx = call_bzero( llsz. bcx, llptr, llsz. val) . bcx;
5802
+ auto llalign = align_of( llsz. bcx, t) ;
5803
+ bcx = call_bzero( llalign. bcx, llptr,
5804
+ llsz. val, llalign. val) . bcx;
5777
5805
} else {
5778
5806
auto llty = type_of( bcx. fcx. lcx. ccx, t) ;
5779
5807
auto null = lib. llvm. llvm. LLVMConstNull ( llty) ;
@@ -7285,11 +7313,36 @@ fn trans_main_fn(@local_ctxt cx, ValueRef llcrate, ValueRef crate_map) {
7285
7313
7286
7314
fn declare_intrinsics ( ModuleRef llmod) -> hashmap [ str, ValueRef ] {
7287
7315
7316
+ let vec[ TypeRef ] T_memcpy32_args = vec ( T_ptr ( T_i8 ( ) ) , T_ptr ( T_i8 ( ) ) ,
7317
+ T_i32 ( ) , T_i32 ( ) , T_i1 ( ) ) ;
7318
+ let vec[ TypeRef ] T_memcpy64_args = vec ( T_ptr ( T_i8 ( ) ) , T_ptr ( T_i8 ( ) ) ,
7319
+ T_i64 ( ) , T_i32 ( ) , T_i1 ( ) ) ;
7320
+
7321
+ let vec[ TypeRef ] T_memset32_args = vec ( T_ptr ( T_i8 ( ) ) , T_i8 ( ) ,
7322
+ T_i32 ( ) , T_i32 ( ) , T_i1 ( ) ) ;
7323
+ let vec[ TypeRef ] T_memset64_args = vec ( T_ptr ( T_i8 ( ) ) , T_i8 ( ) ,
7324
+ T_i64 ( ) , T_i32 ( ) , T_i1 ( ) ) ;
7325
+
7288
7326
let vec[ TypeRef ] T_trap_args = vec ( ) ;
7327
+
7328
+ auto memcpy32 = decl_cdecl_fn ( llmod, "llvm.memcpy.p0i8.p0i8.i32" ,
7329
+ T_fn ( T_memcpy32_args , T_void ( ) ) ) ;
7330
+ auto memcpy64 = decl_cdecl_fn ( llmod, "llvm.memcpy.p0i8.p0i8.i64" ,
7331
+ T_fn ( T_memcpy64_args , T_void ( ) ) ) ;
7332
+
7333
+ auto memset32 = decl_cdecl_fn ( llmod, "llvm.memset.p0i8.i32" ,
7334
+ T_fn ( T_memset32_args , T_void ( ) ) ) ;
7335
+ auto memset64 = decl_cdecl_fn ( llmod, "llvm.memset.p0i8.i64" ,
7336
+ T_fn ( T_memset64_args , T_void ( ) ) ) ;
7337
+
7289
7338
auto trap = decl_cdecl_fn ( llmod, "llvm.trap" ,
7290
7339
T_fn ( T_trap_args , T_void ( ) ) ) ;
7291
7340
7292
7341
auto intrinsics = new_str_hash[ ValueRef ] ( ) ;
7342
+ intrinsics. insert ( "llvm.memcpy.p0i8.p0i8.i32" , memcpy32) ;
7343
+ intrinsics. insert ( "llvm.memcpy.p0i8.p0i8.i64" , memcpy64) ;
7344
+ intrinsics. insert ( "llvm.memset.p0i8.i32" , memset32) ;
7345
+ intrinsics. insert ( "llvm.memset.p0i8.i64" , memset64) ;
7293
7346
intrinsics. insert ( "llvm.trap" , trap) ;
7294
7347
ret intrinsics;
7295
7348
}
@@ -7324,91 +7377,6 @@ fn make_no_op_type_glue(ValueRef fun) {
7324
7377
new_builder ( llbb) . RetVoid ( ) ;
7325
7378
}
7326
7379
7327
- fn decl_memcpy_glue ( ModuleRef llmod) -> ValueRef {
7328
- auto p8 = T_ptr ( T_i8 ( ) ) ;
7329
-
7330
- auto ty = T_fn ( vec ( p8, p8, T_int ( ) ) , T_void ( ) ) ;
7331
- ret decl_fastcall_fn ( llmod, abi. memcpy_glue_name ( ) , ty) ;
7332
- }
7333
-
7334
- fn make_memcpy_glue ( ValueRef fun) {
7335
- // We're not using the LLVM memcpy intrinsic. It appears to call through
7336
- // to the platform memcpy in some cases, which is not terribly safe to run
7337
- // on a rust stack.
7338
- auto initbb = llvm. LLVMAppendBasicBlock ( fun, Str . buf ( "init" ) ) ;
7339
- auto hdrbb = llvm. LLVMAppendBasicBlock ( fun, Str . buf ( "hdr" ) ) ;
7340
- auto loopbb = llvm. LLVMAppendBasicBlock ( fun, Str . buf ( "loop" ) ) ;
7341
- auto endbb = llvm. LLVMAppendBasicBlock ( fun, Str . buf ( "end" ) ) ;
7342
-
7343
- auto dst = llvm. LLVMGetParam ( fun, 0 u) ;
7344
- auto src = llvm. LLVMGetParam ( fun, 1 u) ;
7345
- auto count = llvm. LLVMGetParam ( fun, 2 u) ;
7346
-
7347
- // Init block.
7348
- auto ib = new_builder ( initbb) ;
7349
- auto ip = ib. Alloca ( T_int ( ) ) ;
7350
- ib. Store ( C_int ( 0 ) , ip) ;
7351
- ib. Br ( hdrbb) ;
7352
-
7353
- // Loop-header block
7354
- auto hb = new_builder ( hdrbb) ;
7355
- auto i = hb. Load ( ip) ;
7356
- hb. CondBr ( hb. ICmp ( lib. llvm . LLVMIntEQ , count, i) , endbb, loopbb) ;
7357
-
7358
- // Loop-body block
7359
- auto lb = new_builder ( loopbb) ;
7360
- i = lb. Load ( ip) ;
7361
- lb. Store ( lb. Load ( lb. GEP ( src, vec ( i) ) ) ,
7362
- lb. GEP ( dst, vec ( i) ) ) ;
7363
- lb. Store ( lb. Add ( i, C_int ( 1 ) ) , ip) ;
7364
- lb. Br ( hdrbb) ;
7365
-
7366
- // End block
7367
- auto eb = new_builder ( endbb) ;
7368
- eb. RetVoid ( ) ;
7369
- }
7370
-
7371
- fn decl_bzero_glue ( ModuleRef llmod) -> ValueRef {
7372
- auto p8 = T_ptr ( T_i8 ( ) ) ;
7373
-
7374
- auto ty = T_fn ( vec ( p8, T_int ( ) ) , T_void ( ) ) ;
7375
- ret decl_fastcall_fn ( llmod, abi. bzero_glue_name ( ) , ty) ;
7376
- }
7377
-
7378
- fn make_bzero_glue ( ValueRef fun) -> ValueRef {
7379
- // We're not using the LLVM memset intrinsic. Same as with memcpy.
7380
- auto initbb = llvm. LLVMAppendBasicBlock ( fun, Str . buf ( "init" ) ) ;
7381
- auto hdrbb = llvm. LLVMAppendBasicBlock ( fun, Str . buf ( "hdr" ) ) ;
7382
- auto loopbb = llvm. LLVMAppendBasicBlock ( fun, Str . buf ( "loop" ) ) ;
7383
- auto endbb = llvm. LLVMAppendBasicBlock ( fun, Str . buf ( "end" ) ) ;
7384
-
7385
- auto dst = llvm. LLVMGetParam ( fun, 0 u) ;
7386
- auto count = llvm. LLVMGetParam ( fun, 1 u) ;
7387
-
7388
- // Init block.
7389
- auto ib = new_builder ( initbb) ;
7390
- auto ip = ib. Alloca ( T_int ( ) ) ;
7391
- ib. Store ( C_int ( 0 ) , ip) ;
7392
- ib. Br ( hdrbb) ;
7393
-
7394
- // Loop-header block
7395
- auto hb = new_builder ( hdrbb) ;
7396
- auto i = hb. Load ( ip) ;
7397
- hb. CondBr ( hb. ICmp ( lib. llvm . LLVMIntEQ , count, i) , endbb, loopbb) ;
7398
-
7399
- // Loop-body block
7400
- auto lb = new_builder ( loopbb) ;
7401
- i = lb. Load ( ip) ;
7402
- lb. Store ( C_u8 ( 0 u) , lb. GEP ( dst, vec ( i) ) ) ;
7403
- lb. Store ( lb. Add ( i, C_int ( 1 ) ) , ip) ;
7404
- lb. Br ( hdrbb) ;
7405
-
7406
- // End block
7407
- auto eb = new_builder ( endbb) ;
7408
- eb. RetVoid ( ) ;
7409
- ret fun;
7410
- }
7411
-
7412
7380
fn make_vec_append_glue ( ModuleRef llmod, type_names tn) -> ValueRef {
7413
7381
/*
7414
7382
* Args to vec_append_glue:
@@ -7559,6 +7527,13 @@ fn trans_vec_append_glue(@local_ctxt cx) {
7559
7527
C_int ( abi. tydesc_field_size ) ) ) ) ;
7560
7528
llvm. LLVMSetValueName ( elt_llsz, Str . buf ( "elt_llsz" ) ) ;
7561
7529
7530
+ auto elt_llalign =
7531
+ cx. build . Load ( cx. build . GEP ( elt_tydesc,
7532
+ vec ( C_int ( 0 ) ,
7533
+ C_int ( abi. tydesc_field_align ) ) ) ) ;
7534
+ llvm. LLVMSetValueName ( elt_llsz, Str . buf ( "elt_llalign" ) ) ;
7535
+
7536
+
7562
7537
fn take_one ( ValueRef elt_tydesc ,
7563
7538
@block_ctxt cx ,
7564
7539
ValueRef dst, ValueRef src) -> result {
@@ -7572,7 +7547,7 @@ fn trans_vec_append_glue(@local_ctxt cx) {
7572
7547
elt_llsz, bind take_one ( elt_tydesc,
7573
7548
_, _, _) ) . bcx ;
7574
7549
7575
- ret call_memcpy ( bcx, dst, src, n_bytes) ;
7550
+ ret call_memcpy ( bcx, dst, src, n_bytes, elt_llalign ) ;
7576
7551
}
7577
7552
7578
7553
// Copy any dst elements in, omitting null if doing str.
@@ -7637,8 +7612,6 @@ fn make_glues(ModuleRef llmod, type_names tn) -> @glue_fns {
7637
7612
Vec . init_fn [ ValueRef ] ( bind decl_native_glue ( llmod, tn,
7638
7613
abi. ngt_cdecl , _) , abi. n_native_glues + 1 as uint ) ,
7639
7614
no_op_type_glue = decl_no_op_type_glue ( llmod, tn) ,
7640
- memcpy_glue = decl_memcpy_glue ( llmod) ,
7641
- bzero_glue = decl_bzero_glue ( llmod) ,
7642
7615
vec_append_glue = make_vec_append_glue ( llmod, tn) ) ;
7643
7616
}
7644
7617
@@ -7663,8 +7636,6 @@ fn make_common_glue(session.session sess, str output) {
7663
7636
7664
7637
auto glues = make_glues ( llmod, tn) ;
7665
7638
create_crate_constant ( crate_ptr, glues) ;
7666
- make_memcpy_glue ( glues. memcpy_glue ) ;
7667
- make_bzero_glue ( glues. bzero_glue ) ;
7668
7639
7669
7640
trans. trans_exit_task_glue ( glues, new_str_hash[ ValueRef ] ( ) , tn,
7670
7641
llmod) ;
0 commit comments