Skip to content

[EscapeAnalysis] Handle atomic instructions in escape analysis #71997

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -420,6 +420,41 @@ fileprivate struct EscapeWalker<V: EscapeVisitor> : ValueDefUseWalker,
return .continueWalk
}
return isEscaping

case .AtomicLoad:
// Treat atomic loads as regular loads and just walk down their uses.
if !followLoads(at: path) {
return .continueWalk
}

// Even when analyzing atomics, a loaded trivial value can be ignored.
if hasRelevantType(bi, at: path.projectionPath) {
return .continueWalk
}

return walkDownUses(ofValue: bi, path: path.with(knownType: nil))

case .AtomicStore, .AtomicRMW:
// If we shouldn't follow the store, then we can keep walking.
if !path.followStores {
return .continueWalk
}

// Be conservative and just say the store is escaping.
return isEscaping

case .CmpXChg:
// If we have to follow loads or stores of a cmpxchg, then just bail.
if followLoads(at: path) || path.followStores {
return isEscaping
}

return .continueWalk

case .Fence:
// Fences do not affect escape analysis.
return .continueWalk

default:
return isEscaping
}
Expand Down
125 changes: 125 additions & 0 deletions test/SILOptimizer/mem-behavior.sil
Original file line number Diff line number Diff line change
Expand Up @@ -1100,6 +1100,131 @@ bb0:
return %4 : $()
}

// CHECK-LABEL: @test_builtin_zeroInitializer_atomicload
// CHECK: PAIR #0.
// CHECK-NEXT: %2 = builtin "zeroInitializer"<C>(%1 : $*C)
// CHECK-NEXT: %0 = alloc_stack
// CHECK-NEXT: r=0,w=0
// CHECK: PAIR #1.
// CHECK-NEXT: %2 = builtin "zeroInitializer"<C>(%1 : $*C)
// CHECK-NEXT: %1 = alloc_stack
// CHECK-NEXT: r=0,w=1
sil @test_builtin_zeroInitializer_atomicload : $@convention(thin) () -> Builtin.Int64 {
bb0:
%0 = alloc_stack $C
%1 = alloc_stack $C
%2 = builtin "zeroInitializer"<C>(%1 : $*C) : $()
%3 = apply undef<C>(%1) : $@convention(thin) <C> () -> @out C
copy_addr [take] %1 to [init] %0 : $*C
dealloc_stack %1 : $*C
%4 = address_to_pointer %0 : $*C to $Builtin.RawPointer
%5 = builtin "atomicload_monotonic_Int64"(%4 : $Builtin.RawPointer) : $Builtin.Int64
destroy_addr %0 : $*C
dealloc_stack %0 : $*C
return %5 : $Builtin.Int64
}

// CHECK-LABEL: @test_builtin_zeroInitializer_atomicstore
// CHECK: PAIR #0.
// CHECK-NEXT: %2 = builtin "zeroInitializer"<C>(%1 : $*C)
// CHECK-NEXT: %0 = alloc_stack
// CHECK-NEXT: r=0,w=0
// CHECK: PAIR #1.
// CHECK-NEXT: %2 = builtin "zeroInitializer"<C>(%1 : $*C)
// CHECK-NEXT: %1 = alloc_stack
// CHECK-NEXT: r=0,w=1
sil @test_builtin_zeroInitializer_atomicstore : $@convention(thin) () -> () {
bb0:
%0 = alloc_stack $C
%1 = alloc_stack $C
%2 = builtin "zeroInitializer"<C>(%1 : $*C) : $()
%3 = apply undef<C>(%1) : $@convention(thin) <C> () -> @out C
copy_addr [take] %1 to [init] %0 : $*C
dealloc_stack %1 : $*C
%4 = address_to_pointer %0 : $*C to $Builtin.RawPointer
%5 = integer_literal $Builtin.Int64, 1
%6 = builtin "atomicstore_monotonic_Int64"(%4 : $Builtin.RawPointer, %5 : $Builtin.Int64) : $()
destroy_addr %0 : $*C
dealloc_stack %0 : $*C
%7 = tuple ()
return %7 : $()
}

// CHECK-LABEL: @test_builtin_zeroInitializer_atomicrmw
// CHECK: PAIR #0.
// CHECK-NEXT: %2 = builtin "zeroInitializer"<C>(%1 : $*C)
// CHECK-NEXT: %0 = alloc_stack
// CHECK-NEXT: r=0,w=0
// CHECK: PAIR #1.
// CHECK-NEXT: %2 = builtin "zeroInitializer"<C>(%1 : $*C)
// CHECK-NEXT: %1 = alloc_stack
// CHECK-NEXT: r=0,w=1
sil @test_builtin_zeroInitializer_atomicrmw : $@convention(thin) () -> (Builtin.Int64, Builtin.Int64) {
bb0:
%0 = alloc_stack $C
%1 = alloc_stack $C
%2 = builtin "zeroInitializer"<C>(%1 : $*C) : $()
%3 = apply undef<C>(%1) : $@convention(thin) <C> () -> @out C
copy_addr [take] %1 to [init] %0 : $*C
dealloc_stack %1 : $*C
%4 = address_to_pointer %0 : $*C to $Builtin.RawPointer
%5 = integer_literal $Builtin.Int64, 1
%6 = builtin "atomicrmw_xchg_monotonic_Int64"(%4 : $Builtin.RawPointer, %5 : $Builtin.Int64) : $Builtin.Int64
%7 = builtin "atomicrmw_add_monotonic_Int64"(%4 : $Builtin.RawPointer, %5 : $Builtin.Int64) : $Builtin.Int64
destroy_addr %0 : $*C
dealloc_stack %0 : $*C
%8 = tuple (%6 : $Builtin.Int64, %7 : $Builtin.Int64)
return %8 : $(Builtin.Int64, Builtin.Int64)
}

// CHECK-LABEL: @test_builtin_zeroInitializer_cmpxchg
// CHECK: PAIR #0.
// CHECK-NEXT: %2 = builtin "zeroInitializer"<C>(%1 : $*C)
// CHECK-NEXT: %0 = alloc_stack
// CHECK-NEXT: r=0,w=0
// CHECK: PAIR #1.
// CHECK-NEXT: %2 = builtin "zeroInitializer"<C>(%1 : $*C)
// CHECK-NEXT: %1 = alloc_stack
// CHECK-NEXT: r=0,w=1
sil @test_builtin_zeroInitializer_cmpxchg : $@convention(thin) () -> (Builtin.Int64, Builtin.Int1) {
bb0:
%0 = alloc_stack $C
%1 = alloc_stack $C
%2 = builtin "zeroInitializer"<C>(%1 : $*C) : $()
%3 = apply undef<C>(%1) : $@convention(thin) <C> () -> @out C
copy_addr [take] %1 to [init] %0 : $*C
dealloc_stack %1 : $*C
%4 = address_to_pointer %0 : $*C to $Builtin.RawPointer
%5 = integer_literal $Builtin.Int64, 1
%6 = builtin "cmpxchg_monotonic_monotonic_Int64"(%4 : $Builtin.RawPointer, %5 : $Builtin.Int64) : $(Builtin.Int64, Builtin.Int1)
destroy_addr %0 : $*C
dealloc_stack %0 : $*C
return %6 : $(Builtin.Int64, Builtin.Int1)
}

// CHECK-LABEL: @test_builtin_zeroInitializer_fence
// CHECK: PAIR #0.
// CHECK-NEXT: %2 = builtin "zeroInitializer"<C>(%1 : $*C)
// CHECK-NEXT: %0 = alloc_stack
// CHECK-NEXT: r=0,w=0
// CHECK: PAIR #1.
// CHECK-NEXT: %2 = builtin "zeroInitializer"<C>(%1 : $*C)
// CHECK-NEXT: %1 = alloc_stack
// CHECK-NEXT: r=0,w=1
sil @test_builtin_zeroInitializer_fence : $@convention(thin) () -> () {
bb0:
%0 = alloc_stack $C
%1 = alloc_stack $C
%2 = builtin "zeroInitializer"<C>(%1 : $*C) : $()
%3 = apply undef<C>(%1) : $@convention(thin) <C> () -> @out C
copy_addr [take] %1 to [init] %0 : $*C
dealloc_stack %1 : $*C
%5 = builtin "fence_release"() : $()
destroy_addr %0 : $*C
dealloc_stack %0 : $*C
return %5 : $()
}

// CHECK-LABEL: @test_stored_pointer
// CHECK: PAIR #3.
// CHECK-NEXT: %5 = apply %4(%2) : $@convention(thin) (@in Builtin.RawPointer) -> ()
Expand Down
10 changes: 10 additions & 0 deletions test/SILOptimizer/templvalueopt.sil
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,16 @@ bb0(%ret_addr : $*T):
apply undef<T>(%temporary) : $@convention(thin) <T> () -> @out T
copy_addr [take] %temporary to [init] %ret_addr : $*T
dealloc_stack %temporary : $*T

// Ensure that the following builtin instructions don't interfere with
// temp l value from getting rid of the temporary.
%empty = builtin "fence_release"() : $()
%ptr = address_to_pointer %ret_addr : $*T to $Builtin.RawPointer
%load = builtin "atomicload_monotonic_Int64"(%ptr : $Builtin.RawPointer) : $Builtin.Int64
%onetwoeight = integer_literal $Builtin.Int64, 128
%empty2 = builtin "atomicstore_monotonic_Int64"(%ptr : $Builtin.RawPointer, %onetwoeight : $Builtin.Int64) : $()
%add = builtin "atomicrmw_add_monotonic_Int64"(%ptr : $Builtin.RawPointer, %onetwoeight : $Builtin.Int64) : $Builtin.Int64
%cmpxchg = builtin "cmpxchg_monotonic_monotonic_Int64"(%ptr : $Builtin.RawPointer, %onetwoeight : $Builtin.Int64, %onetwoeight : $Builtin.Int64) : $(Builtin.Int64, Builtin.Int1)
%15 = tuple ()
return %15 : $()
}
Expand Down