@@ -17,7 +17,7 @@ use option::{Option, Some, None};
17
17
use prelude:: * ;
18
18
use rt:: task:: Task ;
19
19
use to_bytes:: IterBytes ;
20
- use unstable:: atomics:: { AtomicUint , Acquire , SeqCst } ;
20
+ use unstable:: atomics:: { AtomicUint , Relaxed } ;
21
21
use unstable:: sync:: { UnsafeAtomicRcBox , LittleLock } ;
22
22
use util;
23
23
@@ -95,7 +95,7 @@ impl Drop for KillFlag {
95
95
// Letting a KillFlag with a task inside get dropped would leak the task.
96
96
// We could free it here, but the task should get awoken by hand somehow.
97
97
fn drop ( & self ) {
98
- match self . load ( Acquire ) {
98
+ match self . load ( Relaxed ) {
99
99
KILL_RUNNING | KILL_KILLED => { } ,
100
100
_ => rtabort ! ( "can't drop kill flag with a blocked task inside!" ) ,
101
101
}
@@ -124,7 +124,7 @@ impl BlockedTask {
124
124
Unkillable ( task) => Some ( task) ,
125
125
Killable ( flag_arc) => {
126
126
let flag = unsafe { & mut * * flag_arc. get ( ) } ;
127
- match flag. swap ( KILL_RUNNING , SeqCst ) {
127
+ match flag. swap ( KILL_RUNNING , Relaxed ) {
128
128
KILL_RUNNING => None , // woken from select(), perhaps
129
129
KILL_KILLED => None , // a killer stole it already
130
130
task_ptr =>
@@ -159,7 +159,7 @@ impl BlockedTask {
159
159
let flag = & mut * * flag_arc. get ( ) ;
160
160
let task_ptr = cast:: transmute ( task) ;
161
161
// Expect flag to contain RUNNING. If KILLED, it should stay KILLED.
162
- match flag. compare_and_swap ( KILL_RUNNING , task_ptr, SeqCst ) {
162
+ match flag. compare_and_swap ( KILL_RUNNING , task_ptr, Relaxed ) {
163
163
KILL_RUNNING => Right ( Killable ( flag_arc) ) ,
164
164
KILL_KILLED => Left ( revive_task_ptr ( task_ptr, Some ( flag_arc) ) ) ,
165
165
x => rtabort ! ( "can't block task! kill flag = %?" , x) ,
@@ -257,7 +257,7 @@ impl KillHandle {
257
257
let inner = unsafe { & mut * self . get ( ) } ;
258
258
// Expect flag to contain RUNNING. If KILLED, it should stay KILLED.
259
259
// FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
260
- match inner. unkillable . compare_and_swap ( KILL_RUNNING , KILL_UNKILLABLE , SeqCst ) {
260
+ match inner. unkillable . compare_and_swap ( KILL_RUNNING , KILL_UNKILLABLE , Relaxed ) {
261
261
KILL_RUNNING => { } , // normal case
262
262
KILL_KILLED => if !already_failing { fail ! ( KILLED_MSG ) } ,
263
263
_ => rtabort ! ( "inhibit_kill: task already unkillable" ) ,
@@ -270,7 +270,7 @@ impl KillHandle {
270
270
let inner = unsafe { & mut * self . get ( ) } ;
271
271
// Expect flag to contain UNKILLABLE. If KILLED, it should stay KILLED.
272
272
// FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
273
- match inner. unkillable . compare_and_swap ( KILL_UNKILLABLE , KILL_RUNNING , SeqCst ) {
273
+ match inner. unkillable . compare_and_swap ( KILL_UNKILLABLE , KILL_RUNNING , Relaxed ) {
274
274
KILL_UNKILLABLE => { } , // normal case
275
275
KILL_KILLED => if !already_failing { fail ! ( KILLED_MSG ) } ,
276
276
_ => rtabort ! ( "allow_kill: task already killable" ) ,
@@ -281,10 +281,10 @@ impl KillHandle {
281
281
// if it was blocked and needs punted awake. To be called by other tasks.
282
282
pub fn kill ( & mut self ) -> Option < ~Task > {
283
283
let inner = unsafe { & mut * self . get ( ) } ;
284
- if inner. unkillable . swap ( KILL_KILLED , SeqCst ) == KILL_RUNNING {
284
+ if inner. unkillable . swap ( KILL_KILLED , Relaxed ) == KILL_RUNNING {
285
285
// Got in. Allowed to try to punt the task awake.
286
286
let flag = unsafe { & mut * inner. killed . get ( ) } ;
287
- match flag. swap ( KILL_KILLED , SeqCst ) {
287
+ match flag. swap ( KILL_KILLED , Relaxed ) {
288
288
// Task either not blocked or already taken care of.
289
289
KILL_RUNNING | KILL_KILLED => None ,
290
290
// Got ownership of the blocked task.
@@ -306,8 +306,11 @@ impl KillHandle {
306
306
// is unkillable with a kill signal pending.
307
307
let inner = unsafe { & * self . get ( ) } ;
308
308
let flag = unsafe { & * inner. killed . get ( ) } ;
309
- // FIXME(#6598): can use relaxed ordering (i think)
310
- flag. load ( Acquire ) == KILL_KILLED
309
+ // A barrier-related concern here is that a task that gets killed
310
+ // awake needs to see the killer's write of KILLED to this flag. This
311
+ // is analogous to receiving a pipe payload; the appropriate barrier
312
+ // should happen when enqueueing the task.
313
+ flag. load ( Relaxed ) == KILL_KILLED
311
314
}
312
315
313
316
pub fn notify_immediate_failure ( & mut self ) {
0 commit comments