Merge branch 'master' of http://darcs.haskell.org/ghc
[ghc.git] / rts / STM.c
1 /* -----------------------------------------------------------------------------
2 * (c) The GHC Team 1998-2005
3 *
4 * STM implementation.
5 *
6 * Overview
7 * --------
8 *
9 * See the PPoPP 2005 paper "Composable memory transactions". In summary,
10 * each transcation has a TRec (transaction record) holding entries for each of the
11 * TVars (transactional variables) that it has accessed. Each entry records
12 * (a) the TVar, (b) the expected value seen in the TVar, (c) the new value that
13 * the transaction wants to write to the TVar, (d) during commit, the identity of
14 * the TRec that wrote the expected value.
15 *
16 * Separate TRecs are used for each level in a nest of transactions. This allows
17 * a nested transaction to be aborted without condemning its enclosing transactions.
18 * This is needed in the implementation of catchRetry. Note that the "expected value"
19 * in a nested transaction's TRec is the value expected to be *held in memory* if
20 * the transaction commits -- not the "new value" stored in one of the enclosing
21 * transactions. This means that validation can be done without searching through
22 * a nest of TRecs.
23 *
24 * Concurrency control
25 * -------------------
26 *
27 * Three different concurrency control schemes can be built according to the settings
28 * in STM.h:
29 *
30 * STM_UNIPROC assumes that the caller serialises invocations on the STM interface.
31 * In the Haskell RTS this means it is suitable only for non-THREADED_RTS builds.
32 *
33 * STM_CG_LOCK uses coarse-grained locking -- a single 'stm lock' is acquired during
34 * an invocation on the STM interface. Note that this does not mean that
35 * transactions are simply serialized -- the lock is only held *within* the
36 * implementation of stmCommitTransaction, stmWait etc.
37 *
38 * STM_FG_LOCKS uses fine-grained locking -- locking is done on a per-TVar basis
39 * and, when committing a transaction, no locks are acquired for TVars that have
40 * been read but not updated.
41 *
42 * Concurrency control is implemented in the functions:
43 *
44 * lock_stm
45 * unlock_stm
46 * lock_tvar / cond_lock_tvar
47 * unlock_tvar
48 *
49 * The choice between STM_UNIPROC / STM_CG_LOCK / STM_FG_LOCKS affects the
50 * implementation of these functions.
51 *
52 * lock_stm & unlock_stm are straightforward : they acquire a simple spin-lock
53 * using STM_CG_LOCK, and otherwise they are no-ops.
54 *
55 * lock_tvar / cond_lock_tvar and unlock_tvar are more complex because they
56 * have other effects (present in STM_UNIPROC and STM_CG_LOCK builds) as well
57 * as the actual business of maniupultaing a lock (present only in STM_FG_LOCKS
58 * builds). This is because locking a TVar is implemented by writing the lock
59 * holder's TRec into the TVar's current_value field:
60 *
61 * lock_tvar - lock a specified TVar (STM_FG_LOCKS only), returning the value
62 * it contained.
63 *
64 * cond_lock_tvar - lock a specified TVar (STM_FG_LOCKS only) if it
65 * contains a specified value. Return TRUE if this succeeds,
66 * FALSE otherwise.
67 *
68 * unlock_tvar - release the lock on a specified TVar (STM_FG_LOCKS only),
69 * storing a specified value in place of the lock entry.
70 *
71 * Using these operations, the typcial pattern of a commit/validate/wait operation
72 * is to (a) lock the STM, (b) lock all the TVars being updated, (c) check that
73 * the TVars that were only read from still contain their expected values,
74 * (d) release the locks on the TVars, writing updates to them in the case of a
75 * commit, (e) unlock the STM.
76 *
77 * Queues of waiting threads hang off the first_watch_queue_entry
78 * field of each TVar. This may only be manipulated when holding that
79 * TVar's lock. In particular, when a thread is putting itself to
80 * sleep, it mustn't release the TVar's lock until it has added itself
81 * to the wait queue and marked its TSO as BlockedOnSTM -- this makes
82 * sure that other threads will know to wake it.
83 *
84 * ---------------------------------------------------------------------------*/
85
86 #include "PosixSource.h"
87 #include "Rts.h"
88
89 #include "RtsUtils.h"
90 #include "Schedule.h"
91 #include "STM.h"
92 #include "Trace.h"
93 #include "Threads.h"
94 #include "sm/Storage.h"
95
96 #include <stdio.h>
97
98 #define TRUE 1
99 #define FALSE 0
100
101 // ACQ_ASSERT is used for assertions which are only required for
102 // THREADED_RTS builds with fine-grained locking.
103
104 #if defined(STM_FG_LOCKS)
105 #define ACQ_ASSERT(_X) ASSERT(_X)
106 #define NACQ_ASSERT(_X) /*Nothing*/
107 #else
108 #define ACQ_ASSERT(_X) /*Nothing*/
109 #define NACQ_ASSERT(_X) ASSERT(_X)
110 #endif
111
112 /*......................................................................*/
113
114 // If SHAKE is defined then validation will sometime spuriously fail. They helps test
115 // unusualy code paths if genuine contention is rare
116
117 #define TRACE(_x...) debugTrace(DEBUG_stm, "STM: " _x)
118
119 #ifdef SHAKE
120 static const int do_shake = TRUE;
121 #else
122 static const int do_shake = FALSE;
123 #endif
124 static int shake_ctr = 0;
125 static int shake_lim = 1;
126
127 static int shake(void) {
128 if (do_shake) {
129 if (((shake_ctr++) % shake_lim) == 0) {
130 shake_ctr = 1;
131 shake_lim ++;
132 return TRUE;
133 }
134 return FALSE;
135 } else {
136 return FALSE;
137 }
138 }
139
140 /*......................................................................*/
141
142 // Helper macros for iterating over entries within a transaction
143 // record
144
145 #define FOR_EACH_ENTRY(_t,_x,CODE) do { \
146 StgTRecHeader *__t = (_t); \
147 StgTRecChunk *__c = __t -> current_chunk; \
148 StgWord __limit = __c -> next_entry_idx; \
149 TRACE("%p : FOR_EACH_ENTRY, current_chunk=%p limit=%ld", __t, __c, __limit); \
150 while (__c != END_STM_CHUNK_LIST) { \
151 StgWord __i; \
152 for (__i = 0; __i < __limit; __i ++) { \
153 TRecEntry *_x = &(__c -> entries[__i]); \
154 do { CODE } while (0); \
155 } \
156 __c = __c -> prev_chunk; \
157 __limit = TREC_CHUNK_NUM_ENTRIES; \
158 } \
159 exit_for_each: \
160 if (FALSE) goto exit_for_each; \
161 } while (0)
162
163 #define BREAK_FOR_EACH goto exit_for_each
164
165 /*......................................................................*/
166
167 // if REUSE_MEMORY is defined then attempt to re-use descriptors, log chunks,
168 // and wait queue entries without GC
169
170 #define REUSE_MEMORY
171
172 /*......................................................................*/
173
174 #define IF_STM_UNIPROC(__X) do { } while (0)
175 #define IF_STM_CG_LOCK(__X) do { } while (0)
176 #define IF_STM_FG_LOCKS(__X) do { } while (0)
177
178 #if defined(STM_UNIPROC)
179 #undef IF_STM_UNIPROC
180 #define IF_STM_UNIPROC(__X) do { __X } while (0)
181 static const StgBool config_use_read_phase = FALSE;
182
183 static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
184 TRACE("%p : lock_stm()", trec);
185 }
186
187 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
188 TRACE("%p : unlock_stm()", trec);
189 }
190
191 static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
192 StgTVar *s STG_UNUSED) {
193 StgClosure *result;
194 TRACE("%p : lock_tvar(%p)", trec, s);
195 result = s -> current_value;
196 return result;
197 }
198
199 static void unlock_tvar(Capability *cap,
200 StgTRecHeader *trec STG_UNUSED,
201 StgTVar *s,
202 StgClosure *c,
203 StgBool force_update) {
204 TRACE("%p : unlock_tvar(%p)", trec, s);
205 if (force_update) {
206 s -> current_value = c;
207 dirty_TVAR(cap,s);
208 }
209 }
210
211 static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
212 StgTVar *s STG_UNUSED,
213 StgClosure *expected) {
214 StgClosure *result;
215 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
216 result = s -> current_value;
217 TRACE("%p : %s", trec, (result == expected) ? "success" : "failure");
218 return (result == expected);
219 }
220
221 static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
222 // Nothing -- uniproc
223 return TRUE;
224 }
225
226 static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
227 // Nothing -- uniproc
228 }
229 #endif
230
231 #if defined(STM_CG_LOCK) /*........................................*/
232
233 #undef IF_STM_CG_LOCK
234 #define IF_STM_CG_LOCK(__X) do { __X } while (0)
235 static const StgBool config_use_read_phase = FALSE;
236 static volatile StgTRecHeader *smp_locked = NULL;
237
238 static void lock_stm(StgTRecHeader *trec) {
239 while (cas(&smp_locked, NULL, trec) != NULL) { }
240 TRACE("%p : lock_stm()", trec);
241 }
242
243 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
244 TRACE("%p : unlock_stm()", trec);
245 ASSERT (smp_locked == trec);
246 smp_locked = 0;
247 }
248
249 static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
250 StgTVar *s STG_UNUSED) {
251 StgClosure *result;
252 TRACE("%p : lock_tvar(%p)", trec, s);
253 ASSERT (smp_locked == trec);
254 result = s -> current_value;
255 return result;
256 }
257
258 static void *unlock_tvar(Capability *cap,
259 StgTRecHeader *trec STG_UNUSED,
260 StgTVar *s,
261 StgClosure *c,
262 StgBool force_update) {
263 TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
264 ASSERT (smp_locked == trec);
265 if (force_update) {
266 s -> current_value = c;
267 dirty_TVAR(cap,s);
268 }
269 }
270
271 static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
272 StgTVar *s STG_UNUSED,
273 StgClosure *expected) {
274 StgClosure *result;
275 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
276 ASSERT (smp_locked == trec);
277 result = s -> current_value;
278 TRACE("%p : %d", result ? "success" : "failure");
279 return (result == expected);
280 }
281
282 static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
283 // Nothing -- protected by STM lock
284 return TRUE;
285 }
286
287 static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
288 // Nothing -- protected by STM lock
289 }
290 #endif
291
292 #if defined(STM_FG_LOCKS) /*...................................*/
293
294 #undef IF_STM_FG_LOCKS
295 #define IF_STM_FG_LOCKS(__X) do { __X } while (0)
296 static const StgBool config_use_read_phase = TRUE;
297
298 static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
299 TRACE("%p : lock_stm()", trec);
300 }
301
302 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
303 TRACE("%p : unlock_stm()", trec);
304 }
305
306 static StgClosure *lock_tvar(StgTRecHeader *trec,
307 StgTVar *s STG_UNUSED) {
308 StgClosure *result;
309 TRACE("%p : lock_tvar(%p)", trec, s);
310 do {
311 do {
312 result = s -> current_value;
313 } while (GET_INFO(UNTAG_CLOSURE(result)) == &stg_TREC_HEADER_info);
314 } while (cas((void *)&(s -> current_value),
315 (StgWord)result, (StgWord)trec) != (StgWord)result);
316 return result;
317 }
318
319 static void unlock_tvar(Capability *cap,
320 StgTRecHeader *trec STG_UNUSED,
321 StgTVar *s,
322 StgClosure *c,
323 StgBool force_update STG_UNUSED) {
324 TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
325 ASSERT(s -> current_value == (StgClosure *)trec);
326 s -> current_value = c;
327 dirty_TVAR(cap,s);
328 }
329
330 static StgBool cond_lock_tvar(StgTRecHeader *trec,
331 StgTVar *s,
332 StgClosure *expected) {
333 StgClosure *result;
334 StgWord w;
335 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
336 w = cas((void *)&(s -> current_value), (StgWord)expected, (StgWord)trec);
337 result = (StgClosure *)w;
338 TRACE("%p : %s", trec, result ? "success" : "failure");
339 return (result == expected);
340 }
341
342 static StgBool lock_inv(StgAtomicInvariant *inv) {
343 return (cas(&(inv -> lock), 0, 1) == 0);
344 }
345
346 static void unlock_inv(StgAtomicInvariant *inv) {
347 ASSERT(inv -> lock == 1);
348 inv -> lock = 0;
349 }
350 #endif
351
352 /*......................................................................*/
353
354 static StgBool watcher_is_tso(StgTVarWatchQueue *q) {
355 StgClosure *c = q -> closure;
356 StgInfoTable *info = get_itbl(c);
357 return (info -> type) == TSO;
358 }
359
360 static StgBool watcher_is_invariant(StgTVarWatchQueue *q) {
361 StgClosure *c = q -> closure;
362 return (c->header.info == &stg_ATOMIC_INVARIANT_info);
363 }
364
365 /*......................................................................*/
366
367 // Helper functions for thread blocking and unblocking
368
369 static void park_tso(StgTSO *tso) {
370 ASSERT(tso -> why_blocked == NotBlocked);
371 tso -> why_blocked = BlockedOnSTM;
372 tso -> block_info.closure = (StgClosure *) END_TSO_QUEUE;
373 TRACE("park_tso on tso=%p", tso);
374 }
375
376 static void unpark_tso(Capability *cap, StgTSO *tso) {
377 // We will continue unparking threads while they remain on one of the wait
378 // queues: it's up to the thread itself to remove it from the wait queues
379 // if it decides to do so when it is scheduled.
380
381 // Unblocking a TSO from BlockedOnSTM is done under the TSO lock,
382 // to avoid multiple CPUs unblocking the same TSO, and also to
383 // synchronise with throwTo(). The first time the TSO is unblocked
384 // we mark this fact by setting block_info.closure == STM_AWOKEN.
385 // This way we can avoid sending further wakeup messages in the
386 // future.
387 lockTSO(tso);
388 if (tso->why_blocked == BlockedOnSTM &&
389 tso->block_info.closure == &stg_STM_AWOKEN_closure) {
390 TRACE("unpark_tso already woken up tso=%p", tso);
391 } else if (tso -> why_blocked == BlockedOnSTM) {
392 TRACE("unpark_tso on tso=%p", tso);
393 tso->block_info.closure = &stg_STM_AWOKEN_closure;
394 tryWakeupThread(cap,tso);
395 } else {
396 TRACE("spurious unpark_tso on tso=%p", tso);
397 }
398 unlockTSO(tso);
399 }
400
401 static void unpark_waiters_on(Capability *cap, StgTVar *s) {
402 StgTVarWatchQueue *q;
403 StgTVarWatchQueue *trail;
404 TRACE("unpark_waiters_on tvar=%p", s);
405 // unblock TSOs in reverse order, to be a bit fairer (#2319)
406 for (q = s -> first_watch_queue_entry, trail = q;
407 q != END_STM_WATCH_QUEUE;
408 q = q -> next_queue_entry) {
409 trail = q;
410 }
411 q = trail;
412 for (;
413 q != END_STM_WATCH_QUEUE;
414 q = q -> prev_queue_entry) {
415 if (watcher_is_tso(q)) {
416 unpark_tso(cap, (StgTSO *)(q -> closure));
417 }
418 }
419 }
420
421 /*......................................................................*/
422
423 // Helper functions for downstream allocation and initialization
424
425 static StgInvariantCheckQueue *new_stg_invariant_check_queue(Capability *cap,
426 StgAtomicInvariant *invariant) {
427 StgInvariantCheckQueue *result;
428 result = (StgInvariantCheckQueue *)allocate(cap, sizeofW(StgInvariantCheckQueue));
429 SET_HDR (result, &stg_INVARIANT_CHECK_QUEUE_info, CCS_SYSTEM);
430 result -> invariant = invariant;
431 result -> my_execution = NO_TREC;
432 return result;
433 }
434
435 static StgTVarWatchQueue *new_stg_tvar_watch_queue(Capability *cap,
436 StgClosure *closure) {
437 StgTVarWatchQueue *result;
438 result = (StgTVarWatchQueue *)allocate(cap, sizeofW(StgTVarWatchQueue));
439 SET_HDR (result, &stg_TVAR_WATCH_QUEUE_info, CCS_SYSTEM);
440 result -> closure = closure;
441 return result;
442 }
443
444 static StgTRecChunk *new_stg_trec_chunk(Capability *cap) {
445 StgTRecChunk *result;
446 result = (StgTRecChunk *)allocate(cap, sizeofW(StgTRecChunk));
447 SET_HDR (result, &stg_TREC_CHUNK_info, CCS_SYSTEM);
448 result -> prev_chunk = END_STM_CHUNK_LIST;
449 result -> next_entry_idx = 0;
450 return result;
451 }
452
453 static StgTRecHeader *new_stg_trec_header(Capability *cap,
454 StgTRecHeader *enclosing_trec) {
455 StgTRecHeader *result;
456 result = (StgTRecHeader *) allocate(cap, sizeofW(StgTRecHeader));
457 SET_HDR (result, &stg_TREC_HEADER_info, CCS_SYSTEM);
458
459 result -> enclosing_trec = enclosing_trec;
460 result -> current_chunk = new_stg_trec_chunk(cap);
461 result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
462
463 if (enclosing_trec == NO_TREC) {
464 result -> state = TREC_ACTIVE;
465 } else {
466 ASSERT(enclosing_trec -> state == TREC_ACTIVE ||
467 enclosing_trec -> state == TREC_CONDEMNED);
468 result -> state = enclosing_trec -> state;
469 }
470
471 return result;
472 }
473
474 /*......................................................................*/
475
476 // Allocation / deallocation functions that retain per-capability lists
477 // of closures that can be re-used
478
479 static StgInvariantCheckQueue *alloc_stg_invariant_check_queue(Capability *cap,
480 StgAtomicInvariant *invariant) {
481 StgInvariantCheckQueue *result = NULL;
482 if (cap -> free_invariant_check_queues == END_INVARIANT_CHECK_QUEUE) {
483 result = new_stg_invariant_check_queue(cap, invariant);
484 } else {
485 result = cap -> free_invariant_check_queues;
486 result -> invariant = invariant;
487 result -> my_execution = NO_TREC;
488 cap -> free_invariant_check_queues = result -> next_queue_entry;
489 }
490 return result;
491 }
492
493 static StgTVarWatchQueue *alloc_stg_tvar_watch_queue(Capability *cap,
494 StgClosure *closure) {
495 StgTVarWatchQueue *result = NULL;
496 if (cap -> free_tvar_watch_queues == END_STM_WATCH_QUEUE) {
497 result = new_stg_tvar_watch_queue(cap, closure);
498 } else {
499 result = cap -> free_tvar_watch_queues;
500 result -> closure = closure;
501 cap -> free_tvar_watch_queues = result -> next_queue_entry;
502 }
503 return result;
504 }
505
506 static void free_stg_tvar_watch_queue(Capability *cap,
507 StgTVarWatchQueue *wq) {
508 #if defined(REUSE_MEMORY)
509 wq -> next_queue_entry = cap -> free_tvar_watch_queues;
510 cap -> free_tvar_watch_queues = wq;
511 #endif
512 }
513
514 static StgTRecChunk *alloc_stg_trec_chunk(Capability *cap) {
515 StgTRecChunk *result = NULL;
516 if (cap -> free_trec_chunks == END_STM_CHUNK_LIST) {
517 result = new_stg_trec_chunk(cap);
518 } else {
519 result = cap -> free_trec_chunks;
520 cap -> free_trec_chunks = result -> prev_chunk;
521 result -> prev_chunk = END_STM_CHUNK_LIST;
522 result -> next_entry_idx = 0;
523 }
524 return result;
525 }
526
527 static void free_stg_trec_chunk(Capability *cap,
528 StgTRecChunk *c) {
529 #if defined(REUSE_MEMORY)
530 c -> prev_chunk = cap -> free_trec_chunks;
531 cap -> free_trec_chunks = c;
532 #endif
533 }
534
535 static StgTRecHeader *alloc_stg_trec_header(Capability *cap,
536 StgTRecHeader *enclosing_trec) {
537 StgTRecHeader *result = NULL;
538 if (cap -> free_trec_headers == NO_TREC) {
539 result = new_stg_trec_header(cap, enclosing_trec);
540 } else {
541 result = cap -> free_trec_headers;
542 cap -> free_trec_headers = result -> enclosing_trec;
543 result -> enclosing_trec = enclosing_trec;
544 result -> current_chunk -> next_entry_idx = 0;
545 result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
546 if (enclosing_trec == NO_TREC) {
547 result -> state = TREC_ACTIVE;
548 } else {
549 ASSERT(enclosing_trec -> state == TREC_ACTIVE ||
550 enclosing_trec -> state == TREC_CONDEMNED);
551 result -> state = enclosing_trec -> state;
552 }
553 }
554 return result;
555 }
556
557 static void free_stg_trec_header(Capability *cap,
558 StgTRecHeader *trec) {
559 #if defined(REUSE_MEMORY)
560 StgTRecChunk *chunk = trec -> current_chunk -> prev_chunk;
561 while (chunk != END_STM_CHUNK_LIST) {
562 StgTRecChunk *prev_chunk = chunk -> prev_chunk;
563 free_stg_trec_chunk(cap, chunk);
564 chunk = prev_chunk;
565 }
566 trec -> current_chunk -> prev_chunk = END_STM_CHUNK_LIST;
567 trec -> enclosing_trec = cap -> free_trec_headers;
568 cap -> free_trec_headers = trec;
569 #endif
570 }
571
572 /*......................................................................*/
573
574 // Helper functions for managing waiting lists
575
576 static void build_watch_queue_entries_for_trec(Capability *cap,
577 StgTSO *tso,
578 StgTRecHeader *trec) {
579 ASSERT(trec != NO_TREC);
580 ASSERT(trec -> enclosing_trec == NO_TREC);
581 ASSERT(trec -> state == TREC_ACTIVE);
582
583 TRACE("%p : build_watch_queue_entries_for_trec()", trec);
584
585 FOR_EACH_ENTRY(trec, e, {
586 StgTVar *s;
587 StgTVarWatchQueue *q;
588 StgTVarWatchQueue *fq;
589 s = e -> tvar;
590 TRACE("%p : adding tso=%p to watch queue for tvar=%p", trec, tso, s);
591 ACQ_ASSERT(s -> current_value == (StgClosure *)trec);
592 NACQ_ASSERT(s -> current_value == e -> expected_value);
593 fq = s -> first_watch_queue_entry;
594 q = alloc_stg_tvar_watch_queue(cap, (StgClosure*) tso);
595 q -> next_queue_entry = fq;
596 q -> prev_queue_entry = END_STM_WATCH_QUEUE;
597 if (fq != END_STM_WATCH_QUEUE) {
598 fq -> prev_queue_entry = q;
599 }
600 s -> first_watch_queue_entry = q;
601 e -> new_value = (StgClosure *) q;
602 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
603 });
604 }
605
606 static void remove_watch_queue_entries_for_trec(Capability *cap,
607 StgTRecHeader *trec) {
608 ASSERT(trec != NO_TREC);
609 ASSERT(trec -> enclosing_trec == NO_TREC);
610 ASSERT(trec -> state == TREC_WAITING ||
611 trec -> state == TREC_CONDEMNED);
612
613 TRACE("%p : remove_watch_queue_entries_for_trec()", trec);
614
615 FOR_EACH_ENTRY(trec, e, {
616 StgTVar *s;
617 StgTVarWatchQueue *pq;
618 StgTVarWatchQueue *nq;
619 StgTVarWatchQueue *q;
620 StgClosure *saw;
621 s = e -> tvar;
622 saw = lock_tvar(trec, s);
623 q = (StgTVarWatchQueue *) (e -> new_value);
624 TRACE("%p : removing tso=%p from watch queue for tvar=%p",
625 trec,
626 q -> closure,
627 s);
628 ACQ_ASSERT(s -> current_value == (StgClosure *)trec);
629 nq = q -> next_queue_entry;
630 pq = q -> prev_queue_entry;
631 if (nq != END_STM_WATCH_QUEUE) {
632 nq -> prev_queue_entry = pq;
633 }
634 if (pq != END_STM_WATCH_QUEUE) {
635 pq -> next_queue_entry = nq;
636 } else {
637 ASSERT (s -> first_watch_queue_entry == q);
638 s -> first_watch_queue_entry = nq;
639 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
640 }
641 free_stg_tvar_watch_queue(cap, q);
642 unlock_tvar(cap, trec, s, saw, FALSE);
643 });
644 }
645
646 /*......................................................................*/
647
648 static TRecEntry *get_new_entry(Capability *cap,
649 StgTRecHeader *t) {
650 TRecEntry *result;
651 StgTRecChunk *c;
652 int i;
653
654 c = t -> current_chunk;
655 i = c -> next_entry_idx;
656 ASSERT(c != END_STM_CHUNK_LIST);
657
658 if (i < TREC_CHUNK_NUM_ENTRIES) {
659 // Continue to use current chunk
660 result = &(c -> entries[i]);
661 c -> next_entry_idx ++;
662 } else {
663 // Current chunk is full: allocate a fresh one
664 StgTRecChunk *nc;
665 nc = alloc_stg_trec_chunk(cap);
666 nc -> prev_chunk = c;
667 nc -> next_entry_idx = 1;
668 t -> current_chunk = nc;
669 result = &(nc -> entries[0]);
670 }
671
672 return result;
673 }
674
675 /*......................................................................*/
676
677 static void merge_update_into(Capability *cap,
678 StgTRecHeader *t,
679 StgTVar *tvar,
680 StgClosure *expected_value,
681 StgClosure *new_value) {
682 int found;
683
684 // Look for an entry in this trec
685 found = FALSE;
686 FOR_EACH_ENTRY(t, e, {
687 StgTVar *s;
688 s = e -> tvar;
689 if (s == tvar) {
690 found = TRUE;
691 if (e -> expected_value != expected_value) {
692 // Must abort if the two entries start from different values
693 TRACE("%p : update entries inconsistent at %p (%p vs %p)",
694 t, tvar, e -> expected_value, expected_value);
695 t -> state = TREC_CONDEMNED;
696 }
697 e -> new_value = new_value;
698 BREAK_FOR_EACH;
699 }
700 });
701
702 if (!found) {
703 // No entry so far in this trec
704 TRecEntry *ne;
705 ne = get_new_entry(cap, t);
706 ne -> tvar = tvar;
707 ne -> expected_value = expected_value;
708 ne -> new_value = new_value;
709 }
710 }
711
712 /*......................................................................*/
713
714 static void merge_read_into(Capability *cap,
715 StgTRecHeader *trec,
716 StgTVar *tvar,
717 StgClosure *expected_value)
718 {
719 int found;
720 StgTRecHeader *t;
721
722 found = FALSE;
723
724 //
725 // See #7493
726 //
727 // We need to look for an existing entry *anywhere* in the stack of
728 // nested transactions. Otherwise, in stmCommitNestedTransaction()
729 // we can't tell the difference between
730 //
731 // (1) a read-only entry
732 // (2) an entry that writes back the original value
733 //
734 // Since in both cases e->new_value == e->expected_value. But in (1)
735 // we want to do nothing, and in (2) we want to update e->new_value
736 // in the outer transaction.
737 //
738 // Here we deal with the first possibility: we never create a
739 // read-only entry in an inner transaction if there is an existing
740 // outer entry; so we never have an inner read and an outer update.
741 // So then in stmCommitNestedTransaction() we know we can always
742 // write e->new_value over the outer entry, because the inner entry
743 // is the most up to date.
744 //
745 for (t = trec; !found && t != NO_TREC; t = t -> enclosing_trec)
746 {
747 FOR_EACH_ENTRY(t, e, {
748 if (e -> tvar == tvar) {
749 found = TRUE;
750 if (e -> expected_value != expected_value) {
751 // Must abort if the two entries start from different values
752 TRACE("%p : read entries inconsistent at %p (%p vs %p)",
753 t, tvar, e -> expected_value, expected_value);
754 t -> state = TREC_CONDEMNED;
755 }
756 BREAK_FOR_EACH;
757 }
758 });
759 }
760
761 if (!found) {
762 // No entry found
763 TRecEntry *ne;
764 ne = get_new_entry(cap, trec);
765 ne -> tvar = tvar;
766 ne -> expected_value = expected_value;
767 ne -> new_value = expected_value;
768 }
769 }
770
771 /*......................................................................*/
772
773 static StgBool entry_is_update(TRecEntry *e) {
774 StgBool result;
775 result = (e -> expected_value != e -> new_value);
776 return result;
777 }
778
779 #if defined(STM_FG_LOCKS)
780 static StgBool entry_is_read_only(TRecEntry *e) {
781 StgBool result;
782 result = (e -> expected_value == e -> new_value);
783 return result;
784 }
785
786 static StgBool tvar_is_locked(StgTVar *s, StgTRecHeader *h) {
787 StgClosure *c;
788 StgBool result;
789 c = s -> current_value;
790 result = (c == (StgClosure *) h);
791 return result;
792 }
793 #endif
794
795 // revert_ownership : release a lock on a TVar, storing back
796 // the value that it held when the lock was acquired. "revert_all"
797 // is set in stmWait and stmReWait when we acquired locks on all of
798 // the TVars involved. "revert_all" is not set in commit operations
799 // where we don't lock TVars that have been read from but not updated.
800
801 static void revert_ownership(Capability *cap STG_UNUSED,
802 StgTRecHeader *trec STG_UNUSED,
803 StgBool revert_all STG_UNUSED) {
804 #if defined(STM_FG_LOCKS)
805 FOR_EACH_ENTRY(trec, e, {
806 if (revert_all || entry_is_update(e)) {
807 StgTVar *s;
808 s = e -> tvar;
809 if (tvar_is_locked(s, trec)) {
810 unlock_tvar(cap, trec, s, e -> expected_value, TRUE);
811 }
812 }
813 });
814 #endif
815 }
816
817 /*......................................................................*/
818
819 // validate_and_acquire_ownership : this performs the twin functions
820 // of checking that the TVars referred to by entries in trec hold the
821 // expected values and:
822 //
823 // - locking the TVar (on updated TVars during commit, or all TVars
824 // during wait)
825 //
826 // - recording the identity of the TRec who wrote the value seen in the
827 // TVar (on non-updated TVars during commit). These values are
828 // stashed in the TRec entries and are then checked in check_read_only
829 // to ensure that an atomic snapshot of all of these locations has been
830 // seen.
831
832 static StgBool validate_and_acquire_ownership (Capability *cap,
833 StgTRecHeader *trec,
834 int acquire_all,
835 int retain_ownership) {
836 StgBool result;
837
838 if (shake()) {
839 TRACE("%p : shake, pretending trec is invalid when it may not be", trec);
840 return FALSE;
841 }
842
843 ASSERT ((trec -> state == TREC_ACTIVE) ||
844 (trec -> state == TREC_WAITING) ||
845 (trec -> state == TREC_CONDEMNED));
846 result = !((trec -> state) == TREC_CONDEMNED);
847 if (result) {
848 FOR_EACH_ENTRY(trec, e, {
849 StgTVar *s;
850 s = e -> tvar;
851 if (acquire_all || entry_is_update(e)) {
852 TRACE("%p : trying to acquire %p", trec, s);
853 if (!cond_lock_tvar(trec, s, e -> expected_value)) {
854 TRACE("%p : failed to acquire %p", trec, s);
855 result = FALSE;
856 BREAK_FOR_EACH;
857 }
858 } else {
859 ASSERT(config_use_read_phase);
860 IF_STM_FG_LOCKS({
861 TRACE("%p : will need to check %p", trec, s);
862 if (s -> current_value != e -> expected_value) {
863 TRACE("%p : doesn't match", trec);
864 result = FALSE;
865 BREAK_FOR_EACH;
866 }
867 e -> num_updates = s -> num_updates;
868 if (s -> current_value != e -> expected_value) {
869 TRACE("%p : doesn't match (race)", trec);
870 result = FALSE;
871 BREAK_FOR_EACH;
872 } else {
873 TRACE("%p : need to check version %ld", trec, e -> num_updates);
874 }
875 });
876 }
877 });
878 }
879
880 if ((!result) || (!retain_ownership)) {
881 revert_ownership(cap, trec, acquire_all);
882 }
883
884 return result;
885 }
886
887 // check_read_only : check that we've seen an atomic snapshot of the
888 // non-updated TVars accessed by a trec. This checks that the last TRec to
889 // commit an update to the TVar is unchanged since the value was stashed in
890 // validate_and_acquire_ownership. If no udpate is seen to any TVar than
891 // all of them contained their expected values at the start of the call to
892 // check_read_only.
893 //
894 // The paper "Concurrent programming without locks" (under submission), or
895 // Keir Fraser's PhD dissertation "Practical lock-free programming" discuss
896 // this kind of algorithm.
897
898 static StgBool check_read_only(StgTRecHeader *trec STG_UNUSED) {
899 StgBool result = TRUE;
900
901 ASSERT (config_use_read_phase);
902 IF_STM_FG_LOCKS({
903 FOR_EACH_ENTRY(trec, e, {
904 StgTVar *s;
905 s = e -> tvar;
906 if (entry_is_read_only(e)) {
907 TRACE("%p : check_read_only for TVar %p, saw %ld", trec, s, e -> num_updates);
908 if (s -> num_updates != e -> num_updates) {
909 // ||s -> current_value != e -> expected_value) {
910 TRACE("%p : mismatch", trec);
911 result = FALSE;
912 BREAK_FOR_EACH;
913 }
914 }
915 });
916 });
917
918 return result;
919 }
920
921
922 /************************************************************************/
923
924 void stmPreGCHook (Capability *cap) {
925 lock_stm(NO_TREC);
926 TRACE("stmPreGCHook");
927 cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
928 cap->free_trec_chunks = END_STM_CHUNK_LIST;
929 cap->free_trec_headers = NO_TREC;
930 unlock_stm(NO_TREC);
931 }
932
933 /************************************************************************/
934
935 // check_read_only relies on version numbers held in TVars' "num_updates"
936 // fields not wrapping around while a transaction is committed. The version
937 // number is incremented each time an update is committed to the TVar
938 // This is unlikely to wrap around when 32-bit integers are used for the counts,
939 // but to ensure correctness we maintain a shared count on the maximum
940 // number of commit operations that may occur and check that this has
941 // not increased by more than 2^32 during a commit.
942
943 #define TOKEN_BATCH_SIZE 1024
944
945 static volatile StgInt64 max_commits = 0;
946
947 #if defined(THREADED_RTS)
948 static volatile StgBool token_locked = FALSE;
949
950 static void getTokenBatch(Capability *cap) {
951 while (cas((void *)&token_locked, FALSE, TRUE) == TRUE) { /* nothing */ }
952 max_commits += TOKEN_BATCH_SIZE;
953 TRACE("%p : cap got token batch, max_commits=%" FMT_Int64, cap, max_commits);
954 cap -> transaction_tokens = TOKEN_BATCH_SIZE;
955 token_locked = FALSE;
956 }
957
958 static void getToken(Capability *cap) {
959 if (cap -> transaction_tokens == 0) {
960 getTokenBatch(cap);
961 }
962 cap -> transaction_tokens --;
963 }
964 #else
965 static void getToken(Capability *cap STG_UNUSED) {
966 // Nothing
967 }
968 #endif
969
970 /*......................................................................*/
971
972 StgTRecHeader *stmStartTransaction(Capability *cap,
973 StgTRecHeader *outer) {
974 StgTRecHeader *t;
975 TRACE("%p : stmStartTransaction with %d tokens",
976 outer,
977 cap -> transaction_tokens);
978
979 getToken(cap);
980
981 t = alloc_stg_trec_header(cap, outer);
982 TRACE("%p : stmStartTransaction()=%p", outer, t);
983 return t;
984 }
985
986 /*......................................................................*/
987
988 void stmAbortTransaction(Capability *cap,
989 StgTRecHeader *trec) {
990 StgTRecHeader *et;
991 TRACE("%p : stmAbortTransaction", trec);
992 ASSERT (trec != NO_TREC);
993 ASSERT ((trec -> state == TREC_ACTIVE) ||
994 (trec -> state == TREC_WAITING) ||
995 (trec -> state == TREC_CONDEMNED));
996
997 lock_stm(trec);
998
999 et = trec -> enclosing_trec;
1000 if (et == NO_TREC) {
1001 // We're a top-level transaction: remove any watch queue entries that
1002 // we may have.
1003 TRACE("%p : aborting top-level transaction", trec);
1004
1005 if (trec -> state == TREC_WAITING) {
1006 ASSERT (trec -> enclosing_trec == NO_TREC);
1007 TRACE("%p : stmAbortTransaction aborting waiting transaction", trec);
1008 remove_watch_queue_entries_for_trec(cap, trec);
1009 }
1010
1011 } else {
1012 // We're a nested transaction: merge our read set into our parent's
1013 TRACE("%p : retaining read-set into parent %p", trec, et);
1014
1015 FOR_EACH_ENTRY(trec, e, {
1016 StgTVar *s = e -> tvar;
1017 merge_read_into(cap, et, s, e -> expected_value);
1018 });
1019 }
1020
1021 trec -> state = TREC_ABORTED;
1022 unlock_stm(trec);
1023
1024 TRACE("%p : stmAbortTransaction done", trec);
1025 }
1026
1027 /*......................................................................*/
1028
1029 void stmFreeAbortedTRec(Capability *cap,
1030 StgTRecHeader *trec) {
1031 TRACE("%p : stmFreeAbortedTRec", trec);
1032 ASSERT (trec != NO_TREC);
1033 ASSERT ((trec -> state == TREC_CONDEMNED) ||
1034 (trec -> state == TREC_ABORTED));
1035
1036 free_stg_trec_header(cap, trec);
1037
1038 TRACE("%p : stmFreeAbortedTRec done", trec);
1039 }
1040
1041 /*......................................................................*/
1042
1043 void stmCondemnTransaction(Capability *cap,
1044 StgTRecHeader *trec) {
1045 TRACE("%p : stmCondemnTransaction", trec);
1046 ASSERT (trec != NO_TREC);
1047 ASSERT ((trec -> state == TREC_ACTIVE) ||
1048 (trec -> state == TREC_WAITING) ||
1049 (trec -> state == TREC_CONDEMNED));
1050
1051 lock_stm(trec);
1052 if (trec -> state == TREC_WAITING) {
1053 ASSERT (trec -> enclosing_trec == NO_TREC);
1054 TRACE("%p : stmCondemnTransaction condemning waiting transaction", trec);
1055 remove_watch_queue_entries_for_trec(cap, trec);
1056 }
1057 trec -> state = TREC_CONDEMNED;
1058 unlock_stm(trec);
1059
1060 TRACE("%p : stmCondemnTransaction done", trec);
1061 }
1062
1063 /*......................................................................*/
1064
1065 StgBool stmValidateNestOfTransactions(Capability *cap, StgTRecHeader *trec) {
1066 StgTRecHeader *t;
1067 StgBool result;
1068
1069 TRACE("%p : stmValidateNestOfTransactions", trec);
1070 ASSERT(trec != NO_TREC);
1071 ASSERT((trec -> state == TREC_ACTIVE) ||
1072 (trec -> state == TREC_WAITING) ||
1073 (trec -> state == TREC_CONDEMNED));
1074
1075 lock_stm(trec);
1076
1077 t = trec;
1078 result = TRUE;
1079 while (t != NO_TREC) {
1080 result &= validate_and_acquire_ownership(cap, t, TRUE, FALSE);
1081 t = t -> enclosing_trec;
1082 }
1083
1084 if (!result && trec -> state != TREC_WAITING) {
1085 trec -> state = TREC_CONDEMNED;
1086 }
1087
1088 unlock_stm(trec);
1089
1090 TRACE("%p : stmValidateNestOfTransactions()=%d", trec, result);
1091 return result;
1092 }
1093
1094 /*......................................................................*/
1095
1096 static TRecEntry *get_entry_for(StgTRecHeader *trec, StgTVar *tvar, StgTRecHeader **in) {
1097 TRecEntry *result = NULL;
1098
1099 TRACE("%p : get_entry_for TVar %p", trec, tvar);
1100 ASSERT(trec != NO_TREC);
1101
1102 do {
1103 FOR_EACH_ENTRY(trec, e, {
1104 if (e -> tvar == tvar) {
1105 result = e;
1106 if (in != NULL) {
1107 *in = trec;
1108 }
1109 BREAK_FOR_EACH;
1110 }
1111 });
1112 trec = trec -> enclosing_trec;
1113 } while (result == NULL && trec != NO_TREC);
1114
1115 return result;
1116 }
1117
1118 /*......................................................................*/
1119
1120 /*
1121 * Add/remove links between an invariant TVars. The caller must have
1122 * locked the TVars involved and the invariant.
1123 */
1124
1125 static void disconnect_invariant(Capability *cap,
1126 StgAtomicInvariant *inv) {
1127 StgTRecHeader *last_execution = inv -> last_execution;
1128
1129 TRACE("unhooking last execution inv=%p trec=%p", inv, last_execution);
1130
1131 FOR_EACH_ENTRY(last_execution, e, {
1132 StgTVar *s = e -> tvar;
1133 StgTVarWatchQueue *q = s -> first_watch_queue_entry;
1134 DEBUG_ONLY( StgBool found = FALSE );
1135 TRACE(" looking for trec on tvar=%p", s);
1136 for (q = s -> first_watch_queue_entry;
1137 q != END_STM_WATCH_QUEUE;
1138 q = q -> next_queue_entry) {
1139 if (q -> closure == (StgClosure*)inv) {
1140 StgTVarWatchQueue *pq;
1141 StgTVarWatchQueue *nq;
1142 nq = q -> next_queue_entry;
1143 pq = q -> prev_queue_entry;
1144 if (nq != END_STM_WATCH_QUEUE) {
1145 nq -> prev_queue_entry = pq;
1146 }
1147 if (pq != END_STM_WATCH_QUEUE) {
1148 pq -> next_queue_entry = nq;
1149 } else {
1150 ASSERT (s -> first_watch_queue_entry == q);
1151 s -> first_watch_queue_entry = nq;
1152 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
1153 }
1154 TRACE(" found it in watch queue entry %p", q);
1155 free_stg_tvar_watch_queue(cap, q);
1156 DEBUG_ONLY( found = TRUE );
1157 break;
1158 }
1159 }
1160 ASSERT(found);
1161 });
1162 inv -> last_execution = NO_TREC;
1163 }
1164
1165 static void connect_invariant_to_trec(Capability *cap,
1166 StgAtomicInvariant *inv,
1167 StgTRecHeader *my_execution) {
1168 TRACE("connecting execution inv=%p trec=%p", inv, my_execution);
1169
1170 ASSERT(inv -> last_execution == NO_TREC);
1171
1172 FOR_EACH_ENTRY(my_execution, e, {
1173 StgTVar *s = e -> tvar;
1174 StgTVarWatchQueue *q = alloc_stg_tvar_watch_queue(cap, (StgClosure*)inv);
1175 StgTVarWatchQueue *fq = s -> first_watch_queue_entry;
1176
1177 // We leave "last_execution" holding the values that will be
1178 // in the heap after the transaction we're in the process
1179 // of committing has finished.
1180 TRecEntry *entry = get_entry_for(my_execution -> enclosing_trec, s, NULL);
1181 if (entry != NULL) {
1182 e -> expected_value = entry -> new_value;
1183 e -> new_value = entry -> new_value;
1184 }
1185
1186 TRACE(" linking trec on tvar=%p value=%p q=%p", s, e -> expected_value, q);
1187 q -> next_queue_entry = fq;
1188 q -> prev_queue_entry = END_STM_WATCH_QUEUE;
1189 if (fq != END_STM_WATCH_QUEUE) {
1190 fq -> prev_queue_entry = q;
1191 }
1192 s -> first_watch_queue_entry = q;
1193 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
1194 });
1195
1196 inv -> last_execution = my_execution;
1197 }
1198
1199 /*
1200 * Add a new invariant to the trec's list of invariants to check on commit
1201 */
1202 void stmAddInvariantToCheck(Capability *cap,
1203 StgTRecHeader *trec,
1204 StgClosure *code) {
1205 StgAtomicInvariant *invariant;
1206 StgInvariantCheckQueue *q;
1207 TRACE("%p : stmAddInvariantToCheck closure=%p", trec, code);
1208 ASSERT(trec != NO_TREC);
1209 ASSERT(trec -> state == TREC_ACTIVE ||
1210 trec -> state == TREC_CONDEMNED);
1211
1212
1213 // 1. Allocate an StgAtomicInvariant, set last_execution to NO_TREC
1214 // to signal that this is a new invariant in the current atomic block
1215
1216 invariant = (StgAtomicInvariant *) allocate(cap, sizeofW(StgAtomicInvariant));
1217 TRACE("%p : stmAddInvariantToCheck allocated invariant=%p", trec, invariant);
1218 SET_HDR (invariant, &stg_ATOMIC_INVARIANT_info, CCS_SYSTEM);
1219 invariant -> code = code;
1220 invariant -> last_execution = NO_TREC;
1221 invariant -> lock = 0;
1222
1223 // 2. Allocate an StgInvariantCheckQueue entry, link it to the current trec
1224
1225 q = alloc_stg_invariant_check_queue(cap, invariant);
1226 TRACE("%p : stmAddInvariantToCheck allocated q=%p", trec, q);
1227 q -> invariant = invariant;
1228 q -> my_execution = NO_TREC;
1229 q -> next_queue_entry = trec -> invariants_to_check;
1230 trec -> invariants_to_check = q;
1231
1232 TRACE("%p : stmAddInvariantToCheck done", trec);
1233 }
1234
1235 /*
1236 * Fill in the trec's list of invariants that might be violated by the
1237 * current transaction.
1238 */
1239
1240 StgInvariantCheckQueue *stmGetInvariantsToCheck(Capability *cap, StgTRecHeader *trec) {
1241 StgTRecChunk *c;
1242 TRACE("%p : stmGetInvariantsToCheck, head was %p",
1243 trec,
1244 trec -> invariants_to_check);
1245
1246 ASSERT(trec != NO_TREC);
1247 ASSERT ((trec -> state == TREC_ACTIVE) ||
1248 (trec -> state == TREC_WAITING) ||
1249 (trec -> state == TREC_CONDEMNED));
1250 ASSERT(trec -> enclosing_trec == NO_TREC);
1251
1252 lock_stm(trec);
1253 c = trec -> current_chunk;
1254 while (c != END_STM_CHUNK_LIST) {
1255 unsigned int i;
1256 for (i = 0; i < c -> next_entry_idx; i ++) {
1257 TRecEntry *e = &(c -> entries[i]);
1258 if (entry_is_update(e)) {
1259 StgTVar *s = e -> tvar;
1260 StgClosure *old = lock_tvar(trec, s);
1261
1262 // Pick up any invariants on the TVar being updated
1263 // by entry "e"
1264
1265 StgTVarWatchQueue *q;
1266 TRACE("%p : checking for invariants on %p", trec, s);
1267 for (q = s -> first_watch_queue_entry;
1268 q != END_STM_WATCH_QUEUE;
1269 q = q -> next_queue_entry) {
1270 if (watcher_is_invariant(q)) {
1271 StgBool found = FALSE;
1272 StgInvariantCheckQueue *q2;
1273 TRACE("%p : Touching invariant %p", trec, q -> closure);
1274 for (q2 = trec -> invariants_to_check;
1275 q2 != END_INVARIANT_CHECK_QUEUE;
1276 q2 = q2 -> next_queue_entry) {
1277 if (q2 -> invariant == (StgAtomicInvariant*)(q -> closure)) {
1278 TRACE("%p : Already found %p", trec, q -> closure);
1279 found = TRUE;
1280 break;
1281 }
1282 }
1283
1284 if (!found) {
1285 StgInvariantCheckQueue *q3;
1286 TRACE("%p : Not already found %p", trec, q -> closure);
1287 q3 = alloc_stg_invariant_check_queue(cap,
1288 (StgAtomicInvariant*) q -> closure);
1289 q3 -> next_queue_entry = trec -> invariants_to_check;
1290 trec -> invariants_to_check = q3;
1291 }
1292 }
1293 }
1294
1295 unlock_tvar(cap, trec, s, old, FALSE);
1296 }
1297 }
1298 c = c -> prev_chunk;
1299 }
1300
1301 unlock_stm(trec);
1302
1303 TRACE("%p : stmGetInvariantsToCheck, head now %p",
1304 trec,
1305 trec -> invariants_to_check);
1306
1307 return (trec -> invariants_to_check);
1308 }
1309
1310 /*......................................................................*/
1311
1312 StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
1313 int result;
1314 StgInt64 max_commits_at_start = max_commits;
1315 StgBool touched_invariants;
1316 StgBool use_read_phase;
1317
1318 TRACE("%p : stmCommitTransaction()", trec);
1319 ASSERT (trec != NO_TREC);
1320
1321 lock_stm(trec);
1322
1323 ASSERT (trec -> enclosing_trec == NO_TREC);
1324 ASSERT ((trec -> state == TREC_ACTIVE) ||
1325 (trec -> state == TREC_CONDEMNED));
1326
1327 // touched_invariants is true if we've written to a TVar with invariants
1328 // attached to it, or if we're trying to add a new invariant to the system.
1329
1330 touched_invariants = (trec -> invariants_to_check != END_INVARIANT_CHECK_QUEUE);
1331
1332 // If we have touched invariants then (i) lock the invariant, and (ii) add
1333 // the invariant's read set to our own. Step (i) is needed to serialize
1334 // concurrent transactions that attempt to make conflicting updates
1335 // to the invariant's trec (suppose it read from t1 and t2, and that one
1336 // concurrent transcation writes only to t1, and a second writes only to
1337 // t2). Step (ii) is needed so that both transactions will lock t1 and t2
1338 // to gain access to their wait lists (and hence be able to unhook the
1339 // invariant from both tvars).
1340
1341 if (touched_invariants) {
1342 StgInvariantCheckQueue *q = trec -> invariants_to_check;
1343 TRACE("%p : locking invariants", trec);
1344 while (q != END_INVARIANT_CHECK_QUEUE) {
1345 StgTRecHeader *inv_old_trec;
1346 StgAtomicInvariant *inv;
1347 TRACE("%p : locking invariant %p", trec, q -> invariant);
1348 inv = q -> invariant;
1349 if (!lock_inv(inv)) {
1350 TRACE("%p : failed to lock %p", trec, inv);
1351 trec -> state = TREC_CONDEMNED;
1352 break;
1353 }
1354
1355 inv_old_trec = inv -> last_execution;
1356 if (inv_old_trec != NO_TREC) {
1357 StgTRecChunk *c = inv_old_trec -> current_chunk;
1358 while (c != END_STM_CHUNK_LIST) {
1359 unsigned int i;
1360 for (i = 0; i < c -> next_entry_idx; i ++) {
1361 TRecEntry *e = &(c -> entries[i]);
1362 TRACE("%p : ensuring we lock TVars for %p", trec, e -> tvar);
1363 merge_read_into (cap, trec, e -> tvar, e -> expected_value);
1364 }
1365 c = c -> prev_chunk;
1366 }
1367 }
1368 q = q -> next_queue_entry;
1369 }
1370 TRACE("%p : finished locking invariants", trec);
1371 }
1372
1373 // Use a read-phase (i.e. don't lock TVars we've read but not updated) if
1374 // (i) the configuration lets us use a read phase, and (ii) we've not
1375 // touched or introduced any invariants.
1376 //
1377 // In principle we could extend the implementation to support a read-phase
1378 // and invariants, but it complicates the logic: the links between
1379 // invariants and TVars are managed by the TVar watch queues which are
1380 // protected by the TVar's locks.
1381
1382 use_read_phase = ((config_use_read_phase) && (!touched_invariants));
1383
1384 result = validate_and_acquire_ownership(cap, trec, (!use_read_phase), TRUE);
1385 if (result) {
1386 // We now know that all the updated locations hold their expected values.
1387 ASSERT (trec -> state == TREC_ACTIVE);
1388
1389 if (use_read_phase) {
1390 StgInt64 max_commits_at_end;
1391 StgInt64 max_concurrent_commits;
1392 TRACE("%p : doing read check", trec);
1393 result = check_read_only(trec);
1394 TRACE("%p : read-check %s", trec, result ? "succeeded" : "failed");
1395
1396 max_commits_at_end = max_commits;
1397 max_concurrent_commits = ((max_commits_at_end - max_commits_at_start) +
1398 (n_capabilities * TOKEN_BATCH_SIZE));
1399 if (((max_concurrent_commits >> 32) > 0) || shake()) {
1400 result = FALSE;
1401 }
1402 }
1403
1404 if (result) {
1405 // We now know that all of the read-only locations held their exepcted values
1406 // at the end of the call to validate_and_acquire_ownership. This forms the
1407 // linearization point of the commit.
1408
1409 // 1. If we have touched or introduced any invariants then unhook them
1410 // from the TVars they depended on last time they were executed
1411 // and hook them on the TVars that they now depend on.
1412 if (touched_invariants) {
1413 StgInvariantCheckQueue *q = trec -> invariants_to_check;
1414 while (q != END_INVARIANT_CHECK_QUEUE) {
1415 StgAtomicInvariant *inv = q -> invariant;
1416 if (inv -> last_execution != NO_TREC) {
1417 disconnect_invariant(cap, inv);
1418 }
1419
1420 TRACE("%p : hooking up new execution trec=%p", trec, q -> my_execution);
1421 connect_invariant_to_trec(cap, inv, q -> my_execution);
1422
1423 TRACE("%p : unlocking invariant %p", trec, inv);
1424 unlock_inv(inv);
1425
1426 q = q -> next_queue_entry;
1427 }
1428 }
1429
1430 // 2. Make the updates required by the transaction
1431 FOR_EACH_ENTRY(trec, e, {
1432 StgTVar *s;
1433 s = e -> tvar;
1434 if ((!use_read_phase) || (e -> new_value != e -> expected_value)) {
1435 // Either the entry is an update or we're not using a read phase:
1436 // write the value back to the TVar, unlocking it if necessary.
1437
1438 ACQ_ASSERT(tvar_is_locked(s, trec));
1439 TRACE("%p : writing %p to %p, waking waiters", trec, e -> new_value, s);
1440 unpark_waiters_on(cap,s);
1441 IF_STM_FG_LOCKS({
1442 s -> num_updates ++;
1443 });
1444 unlock_tvar(cap, trec, s, e -> new_value, TRUE);
1445 }
1446 ACQ_ASSERT(!tvar_is_locked(s, trec));
1447 });
1448 } else {
1449 revert_ownership(cap, trec, FALSE);
1450 }
1451 }
1452
1453 unlock_stm(trec);
1454
1455 free_stg_trec_header(cap, trec);
1456
1457 TRACE("%p : stmCommitTransaction()=%d", trec, result);
1458
1459 return result;
1460 }
1461
1462 /*......................................................................*/
1463
1464 StgBool stmCommitNestedTransaction(Capability *cap, StgTRecHeader *trec) {
1465 StgTRecHeader *et;
1466 int result;
1467 ASSERT (trec != NO_TREC && trec -> enclosing_trec != NO_TREC);
1468 TRACE("%p : stmCommitNestedTransaction() into %p", trec, trec -> enclosing_trec);
1469 ASSERT ((trec -> state == TREC_ACTIVE) || (trec -> state == TREC_CONDEMNED));
1470
1471 lock_stm(trec);
1472
1473 et = trec -> enclosing_trec;
1474 result = validate_and_acquire_ownership(cap, trec, (!config_use_read_phase), TRUE);
1475 if (result) {
1476 // We now know that all the updated locations hold their expected values.
1477
1478 if (config_use_read_phase) {
1479 TRACE("%p : doing read check", trec);
1480 result = check_read_only(trec);
1481 }
1482 if (result) {
1483 // We now know that all of the read-only locations held their exepcted values
1484 // at the end of the call to validate_and_acquire_ownership. This forms the
1485 // linearization point of the commit.
1486
1487 TRACE("%p : read-check succeeded", trec);
1488 FOR_EACH_ENTRY(trec, e, {
1489 // Merge each entry into the enclosing transaction record, release all
1490 // locks.
1491
1492 StgTVar *s;
1493 s = e -> tvar;
1494 if (entry_is_update(e)) {
1495 unlock_tvar(cap, trec, s, e -> expected_value, FALSE);
1496 }
1497 merge_update_into(cap, et, s, e -> expected_value, e -> new_value);
1498 ACQ_ASSERT(s -> current_value != (StgClosure *)trec);
1499 });
1500 } else {
1501 revert_ownership(cap, trec, FALSE);
1502 }
1503 }
1504
1505 unlock_stm(trec);
1506
1507 free_stg_trec_header(cap, trec);
1508
1509 TRACE("%p : stmCommitNestedTransaction()=%d", trec, result);
1510
1511 return result;
1512 }
1513
1514 /*......................................................................*/
1515
1516 StgBool stmWait(Capability *cap, StgTSO *tso, StgTRecHeader *trec) {
1517 int result;
1518 TRACE("%p : stmWait(%p)", trec, tso);
1519 ASSERT (trec != NO_TREC);
1520 ASSERT (trec -> enclosing_trec == NO_TREC);
1521 ASSERT ((trec -> state == TREC_ACTIVE) ||
1522 (trec -> state == TREC_CONDEMNED));
1523
1524 lock_stm(trec);
1525 result = validate_and_acquire_ownership(cap, trec, TRUE, TRUE);
1526 if (result) {
1527 // The transaction is valid so far so we can actually start waiting.
1528 // (Otherwise the transaction was not valid and the thread will have to
1529 // retry it).
1530
1531 // Put ourselves to sleep. We retain locks on all the TVars involved
1532 // until we are sound asleep : (a) on the wait queues, (b) BlockedOnSTM
1533 // in the TSO, (c) TREC_WAITING in the Trec.
1534 build_watch_queue_entries_for_trec(cap, tso, trec);
1535 park_tso(tso);
1536 trec -> state = TREC_WAITING;
1537
1538 // We haven't released ownership of the transaction yet. The TSO
1539 // has been put on the wait queue for the TVars it is waiting for,
1540 // but we haven't yet tidied up the TSO's stack and made it safe
1541 // to wake up the TSO. Therefore, we must wait until the TSO is
1542 // safe to wake up before we release ownership - when all is well,
1543 // the runtime will call stmWaitUnlock() below, with the same
1544 // TRec.
1545
1546 } else {
1547 unlock_stm(trec);
1548 free_stg_trec_header(cap, trec);
1549 }
1550
1551 TRACE("%p : stmWait(%p)=%d", trec, tso, result);
1552 return result;
1553 }
1554
1555
1556 void
1557 stmWaitUnlock(Capability *cap, StgTRecHeader *trec) {
1558 revert_ownership(cap, trec, TRUE);
1559 unlock_stm(trec);
1560 }
1561
1562 /*......................................................................*/
1563
1564 StgBool stmReWait(Capability *cap, StgTSO *tso) {
1565 int result;
1566 StgTRecHeader *trec = tso->trec;
1567
1568 TRACE("%p : stmReWait", trec);
1569 ASSERT (trec != NO_TREC);
1570 ASSERT (trec -> enclosing_trec == NO_TREC);
1571 ASSERT ((trec -> state == TREC_WAITING) ||
1572 (trec -> state == TREC_CONDEMNED));
1573
1574 lock_stm(trec);
1575 result = validate_and_acquire_ownership(cap, trec, TRUE, TRUE);
1576 TRACE("%p : validation %s", trec, result ? "succeeded" : "failed");
1577 if (result) {
1578 // The transaction remains valid -- do nothing because it is already on
1579 // the wait queues
1580 ASSERT (trec -> state == TREC_WAITING);
1581 park_tso(tso);
1582 revert_ownership(cap, trec, TRUE);
1583 } else {
1584 // The transcation has become invalid. We can now remove it from the wait
1585 // queues.
1586 if (trec -> state != TREC_CONDEMNED) {
1587 remove_watch_queue_entries_for_trec (cap, trec);
1588 }
1589 free_stg_trec_header(cap, trec);
1590 }
1591 unlock_stm(trec);
1592
1593 TRACE("%p : stmReWait()=%d", trec, result);
1594 return result;
1595 }
1596
1597 /*......................................................................*/
1598
1599 static StgClosure *read_current_value(StgTRecHeader *trec STG_UNUSED, StgTVar *tvar) {
1600 StgClosure *result;
1601 result = tvar -> current_value;
1602
1603 #if defined(STM_FG_LOCKS)
1604 while (GET_INFO(UNTAG_CLOSURE(result)) == &stg_TREC_HEADER_info) {
1605 TRACE("%p : read_current_value(%p) saw %p", trec, tvar, result);
1606 result = tvar -> current_value;
1607 }
1608 #endif
1609
1610 TRACE("%p : read_current_value(%p)=%p", trec, tvar, result);
1611 return result;
1612 }
1613
1614 /*......................................................................*/
1615
1616 StgClosure *stmReadTVar(Capability *cap,
1617 StgTRecHeader *trec,
1618 StgTVar *tvar) {
1619 StgTRecHeader *entry_in = NULL;
1620 StgClosure *result = NULL;
1621 TRecEntry *entry = NULL;
1622 TRACE("%p : stmReadTVar(%p)", trec, tvar);
1623 ASSERT (trec != NO_TREC);
1624 ASSERT (trec -> state == TREC_ACTIVE ||
1625 trec -> state == TREC_CONDEMNED);
1626
1627 entry = get_entry_for(trec, tvar, &entry_in);
1628
1629 if (entry != NULL) {
1630 if (entry_in == trec) {
1631 // Entry found in our trec
1632 result = entry -> new_value;
1633 } else {
1634 // Entry found in another trec
1635 TRecEntry *new_entry = get_new_entry(cap, trec);
1636 new_entry -> tvar = tvar;
1637 new_entry -> expected_value = entry -> expected_value;
1638 new_entry -> new_value = entry -> new_value;
1639 result = new_entry -> new_value;
1640 }
1641 } else {
1642 // No entry found
1643 StgClosure *current_value = read_current_value(trec, tvar);
1644 TRecEntry *new_entry = get_new_entry(cap, trec);
1645 new_entry -> tvar = tvar;
1646 new_entry -> expected_value = current_value;
1647 new_entry -> new_value = current_value;
1648 result = current_value;
1649 }
1650
1651 TRACE("%p : stmReadTVar(%p)=%p", trec, tvar, result);
1652 return result;
1653 }
1654
1655 /*......................................................................*/
1656
1657 void stmWriteTVar(Capability *cap,
1658 StgTRecHeader *trec,
1659 StgTVar *tvar,
1660 StgClosure *new_value) {
1661
1662 StgTRecHeader *entry_in = NULL;
1663 TRecEntry *entry = NULL;
1664 TRACE("%p : stmWriteTVar(%p, %p)", trec, tvar, new_value);
1665 ASSERT (trec != NO_TREC);
1666 ASSERT (trec -> state == TREC_ACTIVE ||
1667 trec -> state == TREC_CONDEMNED);
1668
1669 entry = get_entry_for(trec, tvar, &entry_in);
1670
1671 if (entry != NULL) {
1672 if (entry_in == trec) {
1673 // Entry found in our trec
1674 entry -> new_value = new_value;
1675 } else {
1676 // Entry found in another trec
1677 TRecEntry *new_entry = get_new_entry(cap, trec);
1678 new_entry -> tvar = tvar;
1679 new_entry -> expected_value = entry -> expected_value;
1680 new_entry -> new_value = new_value;
1681 }
1682 } else {
1683 // No entry found
1684 StgClosure *current_value = read_current_value(trec, tvar);
1685 TRecEntry *new_entry = get_new_entry(cap, trec);
1686 new_entry -> tvar = tvar;
1687 new_entry -> expected_value = current_value;
1688 new_entry -> new_value = new_value;
1689 }
1690
1691 TRACE("%p : stmWriteTVar done", trec);
1692 }
1693
1694 /*......................................................................*/