@@ -1765,7 +1765,6 @@ fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1765
1765
} ;
1766
1766
let is_float = ty:: type_is_fp ( intype) ;
1767
1767
let is_signed = ty:: type_is_signed ( intype) ;
1768
- let rhs = base:: cast_shift_expr_rhs ( bcx, op, lhs, rhs) ;
1769
1768
let info = expr_info ( binop_expr) ;
1770
1769
1771
1770
let binop_debug_loc = binop_expr. debug_loc ( ) ;
@@ -1838,13 +1837,17 @@ fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1838
1837
ast:: BiBitOr => Or ( bcx, lhs, rhs, binop_debug_loc) ,
1839
1838
ast:: BiBitAnd => And ( bcx, lhs, rhs, binop_debug_loc) ,
1840
1839
ast:: BiBitXor => Xor ( bcx, lhs, rhs, binop_debug_loc) ,
1841
- ast:: BiShl => Shl ( bcx, lhs, rhs, binop_debug_loc) ,
1840
+ ast:: BiShl => {
1841
+ let ( newbcx, res) = with_overflow_check (
1842
+ bcx, OverflowOp :: Shl , info, lhs_t, lhs, rhs, binop_debug_loc) ;
1843
+ bcx = newbcx;
1844
+ res
1845
+ }
1842
1846
ast:: BiShr => {
1843
- if is_signed {
1844
- AShr ( bcx, lhs, rhs, binop_debug_loc)
1845
- } else {
1846
- LShr ( bcx, lhs, rhs, binop_debug_loc)
1847
- }
1847
+ let ( newbcx, res) = with_overflow_check (
1848
+ bcx, OverflowOp :: Shr , info, lhs_t, lhs, rhs, binop_debug_loc) ;
1849
+ bcx = newbcx;
1850
+ res
1848
1851
}
1849
1852
ast:: BiEq | ast:: BiNe | ast:: BiLt | ast:: BiGe | ast:: BiLe | ast:: BiGt => {
1850
1853
if is_simd {
@@ -2384,9 +2387,38 @@ enum OverflowOp {
2384
2387
Add ,
2385
2388
Sub ,
2386
2389
Mul ,
2390
+ Shl ,
2391
+ Shr ,
2387
2392
}
2388
2393
2389
2394
impl OverflowOp {
2395
+ fn codegen_strategy ( & self ) -> OverflowCodegen {
2396
+ use self :: OverflowCodegen :: { ViaIntrinsic , ViaInputCheck } ;
2397
+ match * self {
2398
+ OverflowOp :: Add => ViaIntrinsic ( OverflowOpViaIntrinsic :: Add ) ,
2399
+ OverflowOp :: Sub => ViaIntrinsic ( OverflowOpViaIntrinsic :: Sub ) ,
2400
+ OverflowOp :: Mul => ViaIntrinsic ( OverflowOpViaIntrinsic :: Mul ) ,
2401
+
2402
+ OverflowOp :: Shl => ViaInputCheck ( OverflowOpViaInputCheck :: Shl ) ,
2403
+ OverflowOp :: Shr => ViaInputCheck ( OverflowOpViaInputCheck :: Shr ) ,
2404
+ }
2405
+ }
2406
+ }
2407
+
2408
+ enum OverflowCodegen {
2409
+ ViaIntrinsic ( OverflowOpViaIntrinsic ) ,
2410
+ ViaInputCheck ( OverflowOpViaInputCheck ) ,
2411
+ }
2412
+
2413
+ enum OverflowOpViaInputCheck { Shl , Shr , }
2414
+
2415
+ enum OverflowOpViaIntrinsic { Add , Sub , Mul , }
2416
+
2417
+ impl OverflowOpViaIntrinsic {
2418
+ fn to_intrinsic < ' blk , ' tcx > ( & self , bcx : Block < ' blk , ' tcx > , lhs_ty : Ty ) -> ValueRef {
2419
+ let name = self . to_intrinsic_name ( bcx. tcx ( ) , lhs_ty) ;
2420
+ bcx. ccx ( ) . get_intrinsic ( & name)
2421
+ }
2390
2422
fn to_intrinsic_name ( & self , tcx : & ty:: ctxt , ty : Ty ) -> & ' static str {
2391
2423
use syntax:: ast:: IntTy :: * ;
2392
2424
use syntax:: ast:: UintTy :: * ;
@@ -2408,7 +2440,7 @@ impl OverflowOp {
2408
2440
} ;
2409
2441
2410
2442
match * self {
2411
- OverflowOp :: Add => match new_sty {
2443
+ OverflowOpViaIntrinsic :: Add => match new_sty {
2412
2444
ty_int( TyI8 ) => "llvm.sadd.with.overflow.i8" ,
2413
2445
ty_int( TyI16 ) => "llvm.sadd.with.overflow.i16" ,
2414
2446
ty_int( TyI32 ) => "llvm.sadd.with.overflow.i32" ,
@@ -2421,7 +2453,7 @@ impl OverflowOp {
2421
2453
2422
2454
_ => unreachable ! ( ) ,
2423
2455
} ,
2424
- OverflowOp :: Sub => match new_sty {
2456
+ OverflowOpViaIntrinsic :: Sub => match new_sty {
2425
2457
ty_int( TyI8 ) => "llvm.ssub.with.overflow.i8" ,
2426
2458
ty_int( TyI16 ) => "llvm.ssub.with.overflow.i16" ,
2427
2459
ty_int( TyI32 ) => "llvm.ssub.with.overflow.i32" ,
@@ -2434,7 +2466,7 @@ impl OverflowOp {
2434
2466
2435
2467
_ => unreachable ! ( ) ,
2436
2468
} ,
2437
- OverflowOp :: Mul => match new_sty {
2469
+ OverflowOpViaIntrinsic :: Mul => match new_sty {
2438
2470
ty_int( TyI8 ) => "llvm.smul.with.overflow.i8" ,
2439
2471
ty_int( TyI16 ) => "llvm.smul.with.overflow.i16" ,
2440
2472
ty_int( TyI32 ) => "llvm.smul.with.overflow.i32" ,
@@ -2449,16 +2481,14 @@ impl OverflowOp {
2449
2481
} ,
2450
2482
}
2451
2483
}
2452
- }
2453
2484
2454
-
2455
- fn with_overflow_check < ' a , ' b > ( bcx : Block < ' a , ' b > , oop : OverflowOp , info : NodeIdAndSpan ,
2456
- lhs_t : Ty , lhs : ValueRef , rhs : ValueRef , binop_debug_loc : DebugLoc )
2457
- -> ( Block < ' a , ' b > , ValueRef ) {
2458
- if bcx. unreachable . get ( ) { return ( bcx, _Undef ( lhs) ) ; }
2459
- if bcx. ccx ( ) . check_overflow ( ) {
2460
- let name = oop. to_intrinsic_name ( bcx. tcx ( ) , lhs_t) ;
2461
- let llfn = bcx. ccx ( ) . get_intrinsic ( & name) ;
2485
+ fn build_intrinsic_call < ' blk , ' tcx > ( & self , bcx : Block < ' blk , ' tcx > ,
2486
+ info : NodeIdAndSpan ,
2487
+ lhs_t : Ty < ' tcx > , lhs : ValueRef ,
2488
+ rhs : ValueRef ,
2489
+ binop_debug_loc : DebugLoc )
2490
+ -> ( Block < ' blk , ' tcx > , ValueRef ) {
2491
+ let llfn = self . to_intrinsic ( bcx, lhs_t) ;
2462
2492
2463
2493
let val = Call ( bcx, llfn, & [ lhs, rhs] , None , binop_debug_loc) ;
2464
2494
let result = ExtractValue ( bcx, val, 0 ) ; // iN operation result
@@ -2477,11 +2507,118 @@ fn with_overflow_check<'a, 'b>(bcx: Block<'a, 'b>, oop: OverflowOp, info: NodeId
2477
2507
InternedString :: new ( "arithmetic operation overflowed" ) ) ) ;
2478
2508
2479
2509
( bcx, result)
2510
+ }
2511
+ }
2512
+
2513
+ impl OverflowOpViaInputCheck {
2514
+ fn build_with_input_check < ' blk , ' tcx > ( & self ,
2515
+ bcx : Block < ' blk , ' tcx > ,
2516
+ info : NodeIdAndSpan ,
2517
+ lhs_t : Ty < ' tcx > ,
2518
+ lhs : ValueRef ,
2519
+ rhs : ValueRef ,
2520
+ binop_debug_loc : DebugLoc )
2521
+ -> ( Block < ' blk , ' tcx > , ValueRef )
2522
+ {
2523
+ let lhs_llty = val_ty ( lhs) ;
2524
+ let rhs_llty = val_ty ( rhs) ;
2525
+
2526
+ // Panic if any bits are set outside of bits that we always
2527
+ // mask in.
2528
+ //
2529
+ // Note that the mask's value is derived from the LHS type
2530
+ // (since that is where the 32/64 distinction is relevant) but
2531
+ // the mask's type must match the RHS type (since they will
2532
+ // both be fed into a and-binop)
2533
+ let invert_mask = !shift_mask_val ( lhs_llty) ;
2534
+ let invert_mask = C_integral ( rhs_llty, invert_mask, true ) ;
2535
+
2536
+ let outer_bits = And ( bcx, rhs, invert_mask, binop_debug_loc) ;
2537
+ let cond = ICmp ( bcx, llvm:: IntNE , outer_bits,
2538
+ C_integral ( rhs_llty, 0 , false ) , binop_debug_loc) ;
2539
+ let result = match * self {
2540
+ OverflowOpViaInputCheck :: Shl =>
2541
+ build_unchecked_lshift ( bcx, lhs, rhs, binop_debug_loc) ,
2542
+ OverflowOpViaInputCheck :: Shr =>
2543
+ build_unchecked_rshift ( bcx, lhs_t, lhs, rhs, binop_debug_loc) ,
2544
+ } ;
2545
+ let bcx =
2546
+ base:: with_cond ( bcx, cond, |bcx|
2547
+ controlflow:: trans_fail ( bcx, info,
2548
+ InternedString :: new ( "shift operation overflowed" ) ) ) ;
2549
+
2550
+ ( bcx, result)
2551
+ }
2552
+ }
2553
+
2554
+ fn shift_mask_val ( llty : Type ) -> u64 {
2555
+ // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
2556
+ llty. int_width ( ) - 1
2557
+ }
2558
+
2559
+ // To avoid UB from LLVM, these two functions mask RHS with an
2560
+ // appropriate mask unconditionally (i.e. the fallback behavior for
2561
+ // all shifts). For 32- and 64-bit types, this matches the semantics
2562
+ // of Java. (See related discussion on #1877 and #10183.)
2563
+
2564
+ fn build_unchecked_lshift < ' blk , ' tcx > ( bcx : Block < ' blk , ' tcx > ,
2565
+ lhs : ValueRef ,
2566
+ rhs : ValueRef ,
2567
+ binop_debug_loc : DebugLoc ) -> ValueRef {
2568
+ let rhs = base:: cast_shift_expr_rhs ( bcx, ast:: BinOp_ :: BiShl , lhs, rhs) ;
2569
+ // #1877, #10183: Ensure that input is always valid
2570
+ let rhs = shift_mask_rhs ( bcx, rhs, binop_debug_loc) ;
2571
+ Shl ( bcx, lhs, rhs, binop_debug_loc)
2572
+ }
2573
+
2574
+ fn build_unchecked_rshift < ' blk , ' tcx > ( bcx : Block < ' blk , ' tcx > ,
2575
+ lhs_t : Ty < ' tcx > ,
2576
+ lhs : ValueRef ,
2577
+ rhs : ValueRef ,
2578
+ binop_debug_loc : DebugLoc ) -> ValueRef {
2579
+ let rhs = base:: cast_shift_expr_rhs ( bcx, ast:: BinOp_ :: BiShr , lhs, rhs) ;
2580
+ // #1877, #10183: Ensure that input is always valid
2581
+ let rhs = shift_mask_rhs ( bcx, rhs, binop_debug_loc) ;
2582
+ let is_signed = ty:: type_is_signed ( lhs_t) ;
2583
+ if is_signed {
2584
+ AShr ( bcx, lhs, rhs, binop_debug_loc)
2585
+ } else {
2586
+ LShr ( bcx, lhs, rhs, binop_debug_loc)
2587
+ }
2588
+ }
2589
+
2590
+ fn shift_mask_rhs < ' blk , ' tcx > ( bcx : Block < ' blk , ' tcx > ,
2591
+ rhs : ValueRef ,
2592
+ debug_loc : DebugLoc ) -> ValueRef {
2593
+ let rhs_llty = val_ty ( rhs) ;
2594
+ let mask = shift_mask_val ( rhs_llty) ;
2595
+ And ( bcx, rhs, C_integral ( rhs_llty, mask, false ) , debug_loc)
2596
+ }
2597
+
2598
+ fn with_overflow_check < ' blk , ' tcx > ( bcx : Block < ' blk , ' tcx > , oop : OverflowOp , info : NodeIdAndSpan ,
2599
+ lhs_t : Ty < ' tcx > , lhs : ValueRef ,
2600
+ rhs : ValueRef ,
2601
+ binop_debug_loc : DebugLoc )
2602
+ -> ( Block < ' blk , ' tcx > , ValueRef ) {
2603
+ if bcx. unreachable . get ( ) { return ( bcx, _Undef ( lhs) ) ; }
2604
+ if bcx. ccx ( ) . check_overflow ( ) {
2605
+
2606
+ match oop. codegen_strategy ( ) {
2607
+ OverflowCodegen :: ViaIntrinsic ( oop) =>
2608
+ oop. build_intrinsic_call ( bcx, info, lhs_t, lhs, rhs, binop_debug_loc) ,
2609
+ OverflowCodegen :: ViaInputCheck ( oop) =>
2610
+ oop. build_with_input_check ( bcx, info, lhs_t, lhs, rhs, binop_debug_loc) ,
2611
+ }
2480
2612
} else {
2481
2613
let res = match oop {
2482
2614
OverflowOp :: Add => Add ( bcx, lhs, rhs, binop_debug_loc) ,
2483
2615
OverflowOp :: Sub => Sub ( bcx, lhs, rhs, binop_debug_loc) ,
2484
2616
OverflowOp :: Mul => Mul ( bcx, lhs, rhs, binop_debug_loc) ,
2617
+
2618
+ OverflowOp :: Shl =>
2619
+ build_unchecked_lshift ( bcx, lhs, rhs, binop_debug_loc) ,
2620
+ OverflowOp :: Shr =>
2621
+ build_unchecked_rshift ( bcx, lhs_t, lhs, rhs, binop_debug_loc) ,
2485
2622
} ;
2486
2623
( bcx, res)
2487
2624
}
0 commit comments