Skip to content

Commit 8802466

Browse files
committed
Relax some atomic barriers. Loosen up all that tension. There, doesn't that feel good?
1 parent 479809a commit 8802466

File tree

3 files changed

+23
-22
lines changed

3 files changed

+23
-22
lines changed

src/libstd/rt/comm.rs

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ use kinds::Send;
1818
use rt::sched::Scheduler;
1919
use rt::local::Local;
2020
use rt::select::{Select, SelectPort};
21-
use unstable::atomics::{AtomicUint, AtomicOption, Acquire, Release, SeqCst};
21+
use unstable::atomics::{AtomicUint, AtomicOption, Acquire, Relaxed, SeqCst};
2222
use unstable::sync::UnsafeAtomicRcBox;
2323
use util::Void;
2424
use comm::{GenericChan, GenericSmartChan, GenericPort, Peekable};
@@ -216,15 +216,15 @@ impl<T> Select for PortOne<T> {
216216
}
217217
STATE_ONE => {
218218
// Re-record that we are the only owner of the packet.
219-
// Release barrier needed in case the task gets reawoken
220-
// on a different core (this is analogous to writing a
221-
// payload; a barrier in enqueueing the task protects it).
219+
// No barrier needed, even if the task gets reawoken
220+
// on a different core -- this is analogous to writing a
221+
// payload; a barrier in enqueueing the task protects it.
222222
// NB(#8132). This *must* occur before the enqueue below.
223223
// FIXME(#6842, #8130) This is usually only needed for the
224224
// assertion in recv_ready, except in the case of select().
225225
// This won't actually ever have cacheline contention, but
226226
// maybe should be optimized out with a cfg(test) anyway?
227-
(*self.packet()).state.store(STATE_ONE, Release);
227+
(*self.packet()).state.store(STATE_ONE, Relaxed);
228228

229229
rtdebug!("rendezvous recv");
230230
sched.metrics.rendezvous_recvs += 1;
@@ -299,7 +299,7 @@ impl<T> SelectPort<T> for PortOne<T> {
299299
unsafe {
300300
// See corresponding store() above in block_on for rationale.
301301
// FIXME(#8130) This can happen only in test builds.
302-
assert!((*packet).state.load(Acquire) == STATE_ONE);
302+
assert!((*packet).state.load(Relaxed) == STATE_ONE);
303303

304304
let payload = (*packet).payload.take();
305305

src/libstd/rt/kill.rs

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ use option::{Option, Some, None};
1717
use prelude::*;
1818
use rt::task::Task;
1919
use to_bytes::IterBytes;
20-
use unstable::atomics::{AtomicUint, Acquire, SeqCst};
20+
use unstable::atomics::{AtomicUint, Relaxed};
2121
use unstable::sync::{UnsafeAtomicRcBox, LittleLock};
2222
use util;
2323

@@ -95,7 +95,7 @@ impl Drop for KillFlag {
9595
// Letting a KillFlag with a task inside get dropped would leak the task.
9696
// We could free it here, but the task should get awoken by hand somehow.
9797
fn drop(&self) {
98-
match self.load(Acquire) {
98+
match self.load(Relaxed) {
9999
KILL_RUNNING | KILL_KILLED => { },
100100
_ => rtabort!("can't drop kill flag with a blocked task inside!"),
101101
}
@@ -124,7 +124,7 @@ impl BlockedTask {
124124
Unkillable(task) => Some(task),
125125
Killable(flag_arc) => {
126126
let flag = unsafe { &mut **flag_arc.get() };
127-
match flag.swap(KILL_RUNNING, SeqCst) {
127+
match flag.swap(KILL_RUNNING, Relaxed) {
128128
KILL_RUNNING => None, // woken from select(), perhaps
129129
KILL_KILLED => None, // a killer stole it already
130130
task_ptr =>
@@ -159,7 +159,7 @@ impl BlockedTask {
159159
let flag = &mut **flag_arc.get();
160160
let task_ptr = cast::transmute(task);
161161
// Expect flag to contain RUNNING. If KILLED, it should stay KILLED.
162-
match flag.compare_and_swap(KILL_RUNNING, task_ptr, SeqCst) {
162+
match flag.compare_and_swap(KILL_RUNNING, task_ptr, Relaxed) {
163163
KILL_RUNNING => Right(Killable(flag_arc)),
164164
KILL_KILLED => Left(revive_task_ptr(task_ptr, Some(flag_arc))),
165165
x => rtabort!("can't block task! kill flag = %?", x),
@@ -257,7 +257,7 @@ impl KillHandle {
257257
let inner = unsafe { &mut *self.get() };
258258
// Expect flag to contain RUNNING. If KILLED, it should stay KILLED.
259259
// FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
260-
match inner.unkillable.compare_and_swap(KILL_RUNNING, KILL_UNKILLABLE, SeqCst) {
260+
match inner.unkillable.compare_and_swap(KILL_RUNNING, KILL_UNKILLABLE, Relaxed) {
261261
KILL_RUNNING => { }, // normal case
262262
KILL_KILLED => if !already_failing { fail!(KILLED_MSG) },
263263
_ => rtabort!("inhibit_kill: task already unkillable"),
@@ -270,7 +270,7 @@ impl KillHandle {
270270
let inner = unsafe { &mut *self.get() };
271271
// Expect flag to contain UNKILLABLE. If KILLED, it should stay KILLED.
272272
// FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
273-
match inner.unkillable.compare_and_swap(KILL_UNKILLABLE, KILL_RUNNING, SeqCst) {
273+
match inner.unkillable.compare_and_swap(KILL_UNKILLABLE, KILL_RUNNING, Relaxed) {
274274
KILL_UNKILLABLE => { }, // normal case
275275
KILL_KILLED => if !already_failing { fail!(KILLED_MSG) },
276276
_ => rtabort!("allow_kill: task already killable"),
@@ -281,10 +281,10 @@ impl KillHandle {
281281
// if it was blocked and needs punted awake. To be called by other tasks.
282282
pub fn kill(&mut self) -> Option<~Task> {
283283
let inner = unsafe { &mut *self.get() };
284-
if inner.unkillable.swap(KILL_KILLED, SeqCst) == KILL_RUNNING {
284+
if inner.unkillable.swap(KILL_KILLED, Relaxed) == KILL_RUNNING {
285285
// Got in. Allowed to try to punt the task awake.
286286
let flag = unsafe { &mut *inner.killed.get() };
287-
match flag.swap(KILL_KILLED, SeqCst) {
287+
match flag.swap(KILL_KILLED, Relaxed) {
288288
// Task either not blocked or already taken care of.
289289
KILL_RUNNING | KILL_KILLED => None,
290290
// Got ownership of the blocked task.
@@ -306,8 +306,11 @@ impl KillHandle {
306306
// is unkillable with a kill signal pending.
307307
let inner = unsafe { &*self.get() };
308308
let flag = unsafe { &*inner.killed.get() };
309-
// FIXME(#6598): can use relaxed ordering (i think)
310-
flag.load(Acquire) == KILL_KILLED
309+
// A barrier-related concern here is that a task that gets killed
310+
// awake needs to see the killer's write of KILLED to this flag. This
311+
// is analogous to receiving a pipe payload; the appropriate barrier
312+
// should happen when enqueueing the task.
313+
flag.load(Relaxed) == KILL_KILLED
311314
}
312315

313316
pub fn notify_immediate_failure(&mut self) {

src/libstd/unstable/sync.rs

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ use ptr;
1616
use option::*;
1717
use either::{Either, Left, Right};
1818
use task;
19-
use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,SeqCst};
19+
use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,Relaxed,SeqCst};
2020
use unstable::finally::Finally;
2121
use ops::Drop;
2222
use clone::Clone;
@@ -95,8 +95,7 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
9595
pub fn get(&self) -> *mut T {
9696
unsafe {
9797
let mut data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
98-
// FIXME(#6598) Change Acquire to Relaxed.
99-
assert!(data.count.load(Acquire) > 0);
98+
assert!(data.count.load(Relaxed) > 0);
10099
let r: *mut T = data.data.get_mut_ref();
101100
cast::forget(data);
102101
return r;
@@ -107,7 +106,7 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
107106
pub fn get_immut(&self) -> *T {
108107
unsafe {
109108
let data: ~AtomicRcBoxData<T> = cast::transmute(self.data);
110-
assert!(data.count.load(Acquire) > 0); // no barrier is really needed
109+
assert!(data.count.load(Relaxed) > 0);
111110
let r: *T = data.data.get_ref();
112111
cast::forget(data);
113112
return r;
@@ -130,8 +129,7 @@ impl<T: Send> UnsafeAtomicRcBox<T> {
130129
// Try to put our server end in the unwrapper slot.
131130
// This needs no barrier -- it's protected by the release barrier on
132131
// the xadd, and the acquire+release barrier in the destructor's xadd.
133-
// FIXME(#6598) Change Acquire to Relaxed.
134-
if data.unwrapper.fill(~(c1,p2), Acquire).is_none() {
132+
if data.unwrapper.fill(~(c1,p2), Relaxed).is_none() {
135133
// Got in. Tell this handle's destructor not to run (we are now it).
136134
this.data = ptr::mut_null();
137135
// Drop our own reference.

0 commit comments

Comments
 (0)