@@ -16,7 +16,7 @@ pub use self::ExprOrMethodCall::*;
16
16
17
17
use session:: Session ;
18
18
use llvm;
19
- use llvm:: { ValueRef , BasicBlockRef , BuilderRef , ContextRef } ;
19
+ use llvm:: { ValueRef , BasicBlockRef , BuilderRef , ContextRef , TypeKind } ;
20
20
use llvm:: { True , False , Bool } ;
21
21
use middle:: cfg;
22
22
use middle:: def;
@@ -40,6 +40,7 @@ use middle::traits;
40
40
use middle:: ty:: { self , HasTypeFlags , Ty } ;
41
41
use middle:: ty:: fold:: { TypeFolder , TypeFoldable } ;
42
42
use rustc_front:: hir;
43
+ use rustc_mir:: repr:: Mir ;
43
44
use util:: nodemap:: { FnvHashMap , NodeMap } ;
44
45
45
46
use arena:: TypedArena ;
@@ -328,6 +329,11 @@ impl<'tcx> DropFlagHintsMap<'tcx> {
328
329
// Function context. Every LLVM function we create will have one of
329
330
// these.
330
331
pub struct FunctionContext < ' a , ' tcx : ' a > {
332
+ // The MIR for this function. At present, this is optional because
333
+ // we only have MIR available for things that are local to the
334
+ // crate.
335
+ pub mir : Option < & ' a Mir < ' tcx > > ,
336
+
331
337
// The ValueRef returned from a call to llvm::LLVMAddFunction; the
332
338
// address of the first instruction in the sequence of
333
339
// instructions for this function that will go in the .text
@@ -407,6 +413,10 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
407
413
}
408
414
409
415
impl < ' a , ' tcx > FunctionContext < ' a , ' tcx > {
416
+ pub fn mir ( & self ) -> & ' a Mir < ' tcx > {
417
+ self . mir . unwrap ( )
418
+ }
419
+
410
420
pub fn arg_offset ( & self ) -> usize {
411
421
self . env_arg_pos ( ) + if self . llenv . is_some ( ) { 1 } else { 0 }
412
422
}
@@ -644,6 +654,10 @@ impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
644
654
}
645
655
pub fn sess ( & self ) -> & ' blk Session { self . fcx . ccx . sess ( ) }
646
656
657
+ pub fn mir ( & self ) -> & ' blk Mir < ' tcx > {
658
+ self . fcx . mir ( )
659
+ }
660
+
647
661
pub fn name ( & self , name : ast:: Name ) -> String {
648
662
name. to_string ( )
649
663
}
@@ -1132,3 +1146,65 @@ pub fn inlined_variant_def<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1132
1146
ccx. sess ( ) . bug ( & format ! ( "no variant for {:?}::{}" , adt_def, inlined_vid) )
1133
1147
} )
1134
1148
}
1149
+
1150
+ // To avoid UB from LLVM, these two functions mask RHS with an
1151
+ // appropriate mask unconditionally (i.e. the fallback behavior for
1152
+ // all shifts). For 32- and 64-bit types, this matches the semantics
1153
+ // of Java. (See related discussion on #1877 and #10183.)
1154
+
1155
+ pub fn build_unchecked_lshift < ' blk , ' tcx > ( bcx : Block < ' blk , ' tcx > ,
1156
+ lhs : ValueRef ,
1157
+ rhs : ValueRef ,
1158
+ binop_debug_loc : DebugLoc ) -> ValueRef {
1159
+ let rhs = base:: cast_shift_expr_rhs ( bcx, hir:: BinOp_ :: BiShl , lhs, rhs) ;
1160
+ // #1877, #10183: Ensure that input is always valid
1161
+ let rhs = shift_mask_rhs ( bcx, rhs, binop_debug_loc) ;
1162
+ build:: Shl ( bcx, lhs, rhs, binop_debug_loc)
1163
+ }
1164
+
1165
+ pub fn build_unchecked_rshift < ' blk , ' tcx > ( bcx : Block < ' blk , ' tcx > ,
1166
+ lhs_t : Ty < ' tcx > ,
1167
+ lhs : ValueRef ,
1168
+ rhs : ValueRef ,
1169
+ binop_debug_loc : DebugLoc ) -> ValueRef {
1170
+ let rhs = base:: cast_shift_expr_rhs ( bcx, hir:: BinOp_ :: BiShr , lhs, rhs) ;
1171
+ // #1877, #10183: Ensure that input is always valid
1172
+ let rhs = shift_mask_rhs ( bcx, rhs, binop_debug_loc) ;
1173
+ let is_signed = lhs_t. is_signed ( ) ;
1174
+ if is_signed {
1175
+ build:: AShr ( bcx, lhs, rhs, binop_debug_loc)
1176
+ } else {
1177
+ build:: LShr ( bcx, lhs, rhs, binop_debug_loc)
1178
+ }
1179
+ }
1180
+
1181
+ fn shift_mask_rhs < ' blk , ' tcx > ( bcx : Block < ' blk , ' tcx > ,
1182
+ rhs : ValueRef ,
1183
+ debug_loc : DebugLoc ) -> ValueRef {
1184
+ let rhs_llty = val_ty ( rhs) ;
1185
+ build:: And ( bcx, rhs, shift_mask_val ( bcx, rhs_llty, rhs_llty, false ) , debug_loc)
1186
+ }
1187
+
1188
+ pub fn shift_mask_val < ' blk , ' tcx > ( bcx : Block < ' blk , ' tcx > ,
1189
+ llty : Type ,
1190
+ mask_llty : Type ,
1191
+ invert : bool ) -> ValueRef {
1192
+ let kind = llty. kind ( ) ;
1193
+ match kind {
1194
+ TypeKind :: Integer => {
1195
+ // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
1196
+ let val = llty. int_width ( ) - 1 ;
1197
+ if invert {
1198
+ C_integral ( mask_llty, !val, true )
1199
+ } else {
1200
+ C_integral ( mask_llty, val, false )
1201
+ }
1202
+ } ,
1203
+ TypeKind :: Vector => {
1204
+ let mask = shift_mask_val ( bcx, llty. element_type ( ) , mask_llty. element_type ( ) , invert) ;
1205
+ build:: VectorSplat ( bcx, mask_llty. vector_length ( ) , mask)
1206
+ } ,
1207
+ _ => panic ! ( "shift_mask_val: expected Integer or Vector, found {:?}" , kind) ,
1208
+ }
1209
+ }
1210
+
0 commit comments