Fix a bug in the handling of nested orElse
[ghc.git] / rts / STM.c
1 /* -----------------------------------------------------------------------------
2 * (c) The GHC Team 1998-2005
3 *
4 * STM implementation.
5 *
6 * Overview
7 * --------
8 *
9 * See the PPoPP 2005 paper "Composable memory transactions". In summary,
10 * each transcation has a TRec (transaction record) holding entries for each of the
11 * TVars (transactional variables) that it has accessed. Each entry records
12 * (a) the TVar, (b) the expected value seen in the TVar, (c) the new value that
13 * the transaction wants to write to the TVar, (d) during commit, the identity of
14 * the TRec that wrote the expected value.
15 *
16 * Separate TRecs are used for each level in a nest of transactions. This allows
17 * a nested transaction to be aborted without condemning its enclosing transactions.
18 * This is needed in the implementation of catchRetry. Note that the "expected value"
19 * in a nested transaction's TRec is the value expected to be *held in memory* if
20 * the transaction commits -- not the "new value" stored in one of the enclosing
21 * transactions. This means that validation can be done without searching through
22 * a nest of TRecs.
23 *
24 * Concurrency control
25 * -------------------
26 *
27 * Three different concurrency control schemes can be built according to the settings
28 * in STM.h:
29 *
30 * STM_UNIPROC assumes that the caller serialises invocations on the STM interface.
31 * In the Haskell RTS this means it is suitable only for non-THREADED_RTS builds.
32 *
33 * STM_CG_LOCK uses coarse-grained locking -- a single 'stm lock' is acquired during
34 * an invocation on the STM interface. Note that this does not mean that
35 * transactions are simply serialized -- the lock is only held *within* the
36 * implementation of stmCommitTransaction, stmWait etc.
37 *
38 * STM_FG_LOCKS uses fine-grained locking -- locking is done on a per-TVar basis
39 * and, when committing a transaction, no locks are acquired for TVars that have
40 * been read but not updated.
41 *
42 * Concurrency control is implemented in the functions:
43 *
44 * lock_stm
45 * unlock_stm
46 * lock_tvar / cond_lock_tvar
47 * unlock_tvar
48 *
49 * The choice between STM_UNIPROC / STM_CG_LOCK / STM_FG_LOCKS affects the
50 * implementation of these functions.
51 *
52 * lock_stm & unlock_stm are straightforward : they acquire a simple spin-lock
53 * using STM_CG_LOCK, and otherwise they are no-ops.
54 *
55 * lock_tvar / cond_lock_tvar and unlock_tvar are more complex because they
56 * have other effects (present in STM_UNIPROC and STM_CG_LOCK builds) as well
57 * as the actual business of maniupultaing a lock (present only in STM_FG_LOCKS
58 * builds). This is because locking a TVar is implemented by writing the lock
59 * holder's TRec into the TVar's current_value field:
60 *
61 * lock_tvar - lock a specified TVar (STM_FG_LOCKS only), returning the value
62 * it contained.
63 *
64 * cond_lock_tvar - lock a specified TVar (STM_FG_LOCKS only) if it
65 * contains a specified value. Return TRUE if this succeeds,
66 * FALSE otherwise.
67 *
68 * unlock_tvar - release the lock on a specified TVar (STM_FG_LOCKS only),
69 * storing a specified value in place of the lock entry.
70 *
71 * Using these operations, the typcial pattern of a commit/validate/wait operation
72 * is to (a) lock the STM, (b) lock all the TVars being updated, (c) check that
73 * the TVars that were only read from still contain their expected values,
74 * (d) release the locks on the TVars, writing updates to them in the case of a
75 * commit, (e) unlock the STM.
76 *
77 * Queues of waiting threads hang off the first_watch_queue_entry
78 * field of each TVar. This may only be manipulated when holding that
79 * TVar's lock. In particular, when a thread is putting itself to
80 * sleep, it mustn't release the TVar's lock until it has added itself
81 * to the wait queue and marked its TSO as BlockedOnSTM -- this makes
82 * sure that other threads will know to wake it.
83 *
84 * ---------------------------------------------------------------------------*/
85
86 #include "PosixSource.h"
87 #include "Rts.h"
88
89 #include "RtsUtils.h"
90 #include "Schedule.h"
91 #include "STM.h"
92 #include "Trace.h"
93 #include "Threads.h"
94 #include "sm/Storage.h"
95
96 #include <stdio.h>
97
98 #define TRUE 1
99 #define FALSE 0
100
101 // ACQ_ASSERT is used for assertions which are only required for
102 // THREADED_RTS builds with fine-grained locking.
103
104 #if defined(STM_FG_LOCKS)
105 #define ACQ_ASSERT(_X) ASSERT(_X)
106 #define NACQ_ASSERT(_X) /*Nothing*/
107 #else
108 #define ACQ_ASSERT(_X) /*Nothing*/
109 #define NACQ_ASSERT(_X) ASSERT(_X)
110 #endif
111
112 /*......................................................................*/
113
114 // If SHAKE is defined then validation will sometime spuriously fail. They helps test
115 // unusualy code paths if genuine contention is rare
116
117 #define TRACE(_x...) debugTrace(DEBUG_stm, "STM: " _x)
118
119 #ifdef SHAKE
120 static const int do_shake = TRUE;
121 #else
122 static const int do_shake = FALSE;
123 #endif
124 static int shake_ctr = 0;
125 static int shake_lim = 1;
126
127 static int shake(void) {
128 if (do_shake) {
129 if (((shake_ctr++) % shake_lim) == 0) {
130 shake_ctr = 1;
131 shake_lim ++;
132 return TRUE;
133 }
134 return FALSE;
135 } else {
136 return FALSE;
137 }
138 }
139
140 /*......................................................................*/
141
142 // Helper macros for iterating over entries within a transaction
143 // record
144
145 #define FOR_EACH_ENTRY(_t,_x,CODE) do { \
146 StgTRecHeader *__t = (_t); \
147 StgTRecChunk *__c = __t -> current_chunk; \
148 StgWord __limit = __c -> next_entry_idx; \
149 TRACE("%p : FOR_EACH_ENTRY, current_chunk=%p limit=%ld", __t, __c, __limit); \
150 while (__c != END_STM_CHUNK_LIST) { \
151 StgWord __i; \
152 for (__i = 0; __i < __limit; __i ++) { \
153 TRecEntry *_x = &(__c -> entries[__i]); \
154 do { CODE } while (0); \
155 } \
156 __c = __c -> prev_chunk; \
157 __limit = TREC_CHUNK_NUM_ENTRIES; \
158 } \
159 exit_for_each: \
160 if (FALSE) goto exit_for_each; \
161 } while (0)
162
163 #define BREAK_FOR_EACH goto exit_for_each
164
165 /*......................................................................*/
166
167 // if REUSE_MEMORY is defined then attempt to re-use descriptors, log chunks,
168 // and wait queue entries without GC
169
170 #define REUSE_MEMORY
171
172 /*......................................................................*/
173
174 #define IF_STM_UNIPROC(__X) do { } while (0)
175 #define IF_STM_CG_LOCK(__X) do { } while (0)
176 #define IF_STM_FG_LOCKS(__X) do { } while (0)
177
178 #if defined(STM_UNIPROC)
179 #undef IF_STM_UNIPROC
180 #define IF_STM_UNIPROC(__X) do { __X } while (0)
181 static const StgBool config_use_read_phase = FALSE;
182
183 static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
184 TRACE("%p : lock_stm()", trec);
185 }
186
187 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
188 TRACE("%p : unlock_stm()", trec);
189 }
190
191 static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
192 StgTVar *s STG_UNUSED) {
193 StgClosure *result;
194 TRACE("%p : lock_tvar(%p)", trec, s);
195 result = s -> current_value;
196 return result;
197 }
198
199 static void unlock_tvar(Capability *cap,
200 StgTRecHeader *trec STG_UNUSED,
201 StgTVar *s,
202 StgClosure *c,
203 StgBool force_update) {
204 TRACE("%p : unlock_tvar(%p)", trec, s);
205 if (force_update) {
206 s -> current_value = c;
207 dirty_TVAR(cap,s);
208 }
209 }
210
211 static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
212 StgTVar *s STG_UNUSED,
213 StgClosure *expected) {
214 StgClosure *result;
215 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
216 result = s -> current_value;
217 TRACE("%p : %s", trec, (result == expected) ? "success" : "failure");
218 return (result == expected);
219 }
220
221 static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
222 // Nothing -- uniproc
223 return TRUE;
224 }
225
226 static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
227 // Nothing -- uniproc
228 }
229 #endif
230
231 #if defined(STM_CG_LOCK) /*........................................*/
232
233 #undef IF_STM_CG_LOCK
234 #define IF_STM_CG_LOCK(__X) do { __X } while (0)
235 static const StgBool config_use_read_phase = FALSE;
236 static volatile StgTRecHeader *smp_locked = NULL;
237
238 static void lock_stm(StgTRecHeader *trec) {
239 while (cas(&smp_locked, NULL, trec) != NULL) { }
240 TRACE("%p : lock_stm()", trec);
241 }
242
243 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
244 TRACE("%p : unlock_stm()", trec);
245 ASSERT (smp_locked == trec);
246 smp_locked = 0;
247 }
248
249 static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
250 StgTVar *s STG_UNUSED) {
251 StgClosure *result;
252 TRACE("%p : lock_tvar(%p)", trec, s);
253 ASSERT (smp_locked == trec);
254 result = s -> current_value;
255 return result;
256 }
257
258 static void *unlock_tvar(Capability *cap,
259 StgTRecHeader *trec STG_UNUSED,
260 StgTVar *s,
261 StgClosure *c,
262 StgBool force_update) {
263 TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
264 ASSERT (smp_locked == trec);
265 if (force_update) {
266 s -> current_value = c;
267 dirty_TVAR(cap,s);
268 }
269 }
270
271 static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
272 StgTVar *s STG_UNUSED,
273 StgClosure *expected) {
274 StgClosure *result;
275 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
276 ASSERT (smp_locked == trec);
277 result = s -> current_value;
278 TRACE("%p : %d", result ? "success" : "failure");
279 return (result == expected);
280 }
281
282 static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
283 // Nothing -- protected by STM lock
284 return TRUE;
285 }
286
287 static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
288 // Nothing -- protected by STM lock
289 }
290 #endif
291
292 #if defined(STM_FG_LOCKS) /*...................................*/
293
294 #undef IF_STM_FG_LOCKS
295 #define IF_STM_FG_LOCKS(__X) do { __X } while (0)
296 static const StgBool config_use_read_phase = TRUE;
297
298 static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
299 TRACE("%p : lock_stm()", trec);
300 }
301
302 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
303 TRACE("%p : unlock_stm()", trec);
304 }
305
306 static StgClosure *lock_tvar(StgTRecHeader *trec,
307 StgTVar *s STG_UNUSED) {
308 StgClosure *result;
309 TRACE("%p : lock_tvar(%p)", trec, s);
310 do {
311 do {
312 result = s -> current_value;
313 } while (GET_INFO(UNTAG_CLOSURE(result)) == &stg_TREC_HEADER_info);
314 } while (cas((void *)&(s -> current_value),
315 (StgWord)result, (StgWord)trec) != (StgWord)result);
316 return result;
317 }
318
319 static void unlock_tvar(Capability *cap,
320 StgTRecHeader *trec STG_UNUSED,
321 StgTVar *s,
322 StgClosure *c,
323 StgBool force_update STG_UNUSED) {
324 TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
325 ASSERT(s -> current_value == (StgClosure *)trec);
326 s -> current_value = c;
327 dirty_TVAR(cap,s);
328 }
329
330 static StgBool cond_lock_tvar(StgTRecHeader *trec,
331 StgTVar *s,
332 StgClosure *expected) {
333 StgClosure *result;
334 StgWord w;
335 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
336 w = cas((void *)&(s -> current_value), (StgWord)expected, (StgWord)trec);
337 result = (StgClosure *)w;
338 TRACE("%p : %s", trec, result ? "success" : "failure");
339 return (result == expected);
340 }
341
342 static StgBool lock_inv(StgAtomicInvariant *inv) {
343 return (cas(&(inv -> lock), 0, 1) == 0);
344 }
345
346 static void unlock_inv(StgAtomicInvariant *inv) {
347 ASSERT(inv -> lock == 1);
348 inv -> lock = 0;
349 }
350 #endif
351
352 /*......................................................................*/
353
354 static StgBool watcher_is_tso(StgTVarWatchQueue *q) {
355 StgClosure *c = q -> closure;
356 StgInfoTable *info = get_itbl(c);
357 return (info -> type) == TSO;
358 }
359
360 static StgBool watcher_is_invariant(StgTVarWatchQueue *q) {
361 StgClosure *c = q -> closure;
362 return (c->header.info == &stg_ATOMIC_INVARIANT_info);
363 }
364
365 /*......................................................................*/
366
367 // Helper functions for thread blocking and unblocking
368
369 static void park_tso(StgTSO *tso) {
370 ASSERT(tso -> why_blocked == NotBlocked);
371 tso -> why_blocked = BlockedOnSTM;
372 tso -> block_info.closure = (StgClosure *) END_TSO_QUEUE;
373 TRACE("park_tso on tso=%p", tso);
374 }
375
376 static void unpark_tso(Capability *cap, StgTSO *tso) {
377 // We will continue unparking threads while they remain on one of the wait
378 // queues: it's up to the thread itself to remove it from the wait queues
379 // if it decides to do so when it is scheduled.
380
381 // Unblocking a TSO from BlockedOnSTM is done under the TSO lock,
382 // to avoid multiple CPUs unblocking the same TSO, and also to
383 // synchronise with throwTo().
384 lockTSO(tso);
385 if (tso -> why_blocked == BlockedOnSTM) {
386 TRACE("unpark_tso on tso=%p", tso);
387 tryWakeupThread(cap,tso);
388 } else {
389 TRACE("spurious unpark_tso on tso=%p", tso);
390 }
391 unlockTSO(tso);
392 }
393
394 static void unpark_waiters_on(Capability *cap, StgTVar *s) {
395 StgTVarWatchQueue *q;
396 StgTVarWatchQueue *trail;
397 TRACE("unpark_waiters_on tvar=%p", s);
398 // unblock TSOs in reverse order, to be a bit fairer (#2319)
399 for (q = s -> first_watch_queue_entry, trail = q;
400 q != END_STM_WATCH_QUEUE;
401 q = q -> next_queue_entry) {
402 trail = q;
403 }
404 q = trail;
405 for (;
406 q != END_STM_WATCH_QUEUE;
407 q = q -> prev_queue_entry) {
408 if (watcher_is_tso(q)) {
409 unpark_tso(cap, (StgTSO *)(q -> closure));
410 }
411 }
412 }
413
414 /*......................................................................*/
415
416 // Helper functions for downstream allocation and initialization
417
418 static StgInvariantCheckQueue *new_stg_invariant_check_queue(Capability *cap,
419 StgAtomicInvariant *invariant) {
420 StgInvariantCheckQueue *result;
421 result = (StgInvariantCheckQueue *)allocate(cap, sizeofW(StgInvariantCheckQueue));
422 SET_HDR (result, &stg_INVARIANT_CHECK_QUEUE_info, CCS_SYSTEM);
423 result -> invariant = invariant;
424 result -> my_execution = NO_TREC;
425 return result;
426 }
427
428 static StgTVarWatchQueue *new_stg_tvar_watch_queue(Capability *cap,
429 StgClosure *closure) {
430 StgTVarWatchQueue *result;
431 result = (StgTVarWatchQueue *)allocate(cap, sizeofW(StgTVarWatchQueue));
432 SET_HDR (result, &stg_TVAR_WATCH_QUEUE_info, CCS_SYSTEM);
433 result -> closure = closure;
434 return result;
435 }
436
437 static StgTRecChunk *new_stg_trec_chunk(Capability *cap) {
438 StgTRecChunk *result;
439 result = (StgTRecChunk *)allocate(cap, sizeofW(StgTRecChunk));
440 SET_HDR (result, &stg_TREC_CHUNK_info, CCS_SYSTEM);
441 result -> prev_chunk = END_STM_CHUNK_LIST;
442 result -> next_entry_idx = 0;
443 return result;
444 }
445
446 static StgTRecHeader *new_stg_trec_header(Capability *cap,
447 StgTRecHeader *enclosing_trec) {
448 StgTRecHeader *result;
449 result = (StgTRecHeader *) allocate(cap, sizeofW(StgTRecHeader));
450 SET_HDR (result, &stg_TREC_HEADER_info, CCS_SYSTEM);
451
452 result -> enclosing_trec = enclosing_trec;
453 result -> current_chunk = new_stg_trec_chunk(cap);
454 result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
455
456 if (enclosing_trec == NO_TREC) {
457 result -> state = TREC_ACTIVE;
458 } else {
459 ASSERT(enclosing_trec -> state == TREC_ACTIVE ||
460 enclosing_trec -> state == TREC_CONDEMNED);
461 result -> state = enclosing_trec -> state;
462 }
463
464 return result;
465 }
466
467 /*......................................................................*/
468
469 // Allocation / deallocation functions that retain per-capability lists
470 // of closures that can be re-used
471
472 static StgInvariantCheckQueue *alloc_stg_invariant_check_queue(Capability *cap,
473 StgAtomicInvariant *invariant) {
474 StgInvariantCheckQueue *result = NULL;
475 if (cap -> free_invariant_check_queues == END_INVARIANT_CHECK_QUEUE) {
476 result = new_stg_invariant_check_queue(cap, invariant);
477 } else {
478 result = cap -> free_invariant_check_queues;
479 result -> invariant = invariant;
480 result -> my_execution = NO_TREC;
481 cap -> free_invariant_check_queues = result -> next_queue_entry;
482 }
483 return result;
484 }
485
486 static StgTVarWatchQueue *alloc_stg_tvar_watch_queue(Capability *cap,
487 StgClosure *closure) {
488 StgTVarWatchQueue *result = NULL;
489 if (cap -> free_tvar_watch_queues == END_STM_WATCH_QUEUE) {
490 result = new_stg_tvar_watch_queue(cap, closure);
491 } else {
492 result = cap -> free_tvar_watch_queues;
493 result -> closure = closure;
494 cap -> free_tvar_watch_queues = result -> next_queue_entry;
495 }
496 return result;
497 }
498
499 static void free_stg_tvar_watch_queue(Capability *cap,
500 StgTVarWatchQueue *wq) {
501 #if defined(REUSE_MEMORY)
502 wq -> next_queue_entry = cap -> free_tvar_watch_queues;
503 cap -> free_tvar_watch_queues = wq;
504 #endif
505 }
506
507 static StgTRecChunk *alloc_stg_trec_chunk(Capability *cap) {
508 StgTRecChunk *result = NULL;
509 if (cap -> free_trec_chunks == END_STM_CHUNK_LIST) {
510 result = new_stg_trec_chunk(cap);
511 } else {
512 result = cap -> free_trec_chunks;
513 cap -> free_trec_chunks = result -> prev_chunk;
514 result -> prev_chunk = END_STM_CHUNK_LIST;
515 result -> next_entry_idx = 0;
516 }
517 return result;
518 }
519
520 static void free_stg_trec_chunk(Capability *cap,
521 StgTRecChunk *c) {
522 #if defined(REUSE_MEMORY)
523 c -> prev_chunk = cap -> free_trec_chunks;
524 cap -> free_trec_chunks = c;
525 #endif
526 }
527
528 static StgTRecHeader *alloc_stg_trec_header(Capability *cap,
529 StgTRecHeader *enclosing_trec) {
530 StgTRecHeader *result = NULL;
531 if (cap -> free_trec_headers == NO_TREC) {
532 result = new_stg_trec_header(cap, enclosing_trec);
533 } else {
534 result = cap -> free_trec_headers;
535 cap -> free_trec_headers = result -> enclosing_trec;
536 result -> enclosing_trec = enclosing_trec;
537 result -> current_chunk -> next_entry_idx = 0;
538 result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
539 if (enclosing_trec == NO_TREC) {
540 result -> state = TREC_ACTIVE;
541 } else {
542 ASSERT(enclosing_trec -> state == TREC_ACTIVE ||
543 enclosing_trec -> state == TREC_CONDEMNED);
544 result -> state = enclosing_trec -> state;
545 }
546 }
547 return result;
548 }
549
550 static void free_stg_trec_header(Capability *cap,
551 StgTRecHeader *trec) {
552 #if defined(REUSE_MEMORY)
553 StgTRecChunk *chunk = trec -> current_chunk -> prev_chunk;
554 while (chunk != END_STM_CHUNK_LIST) {
555 StgTRecChunk *prev_chunk = chunk -> prev_chunk;
556 free_stg_trec_chunk(cap, chunk);
557 chunk = prev_chunk;
558 }
559 trec -> current_chunk -> prev_chunk = END_STM_CHUNK_LIST;
560 trec -> enclosing_trec = cap -> free_trec_headers;
561 cap -> free_trec_headers = trec;
562 #endif
563 }
564
565 /*......................................................................*/
566
567 // Helper functions for managing waiting lists
568
569 static void build_watch_queue_entries_for_trec(Capability *cap,
570 StgTSO *tso,
571 StgTRecHeader *trec) {
572 ASSERT(trec != NO_TREC);
573 ASSERT(trec -> enclosing_trec == NO_TREC);
574 ASSERT(trec -> state == TREC_ACTIVE);
575
576 TRACE("%p : build_watch_queue_entries_for_trec()", trec);
577
578 FOR_EACH_ENTRY(trec, e, {
579 StgTVar *s;
580 StgTVarWatchQueue *q;
581 StgTVarWatchQueue *fq;
582 s = e -> tvar;
583 TRACE("%p : adding tso=%p to watch queue for tvar=%p", trec, tso, s);
584 ACQ_ASSERT(s -> current_value == (StgClosure *)trec);
585 NACQ_ASSERT(s -> current_value == e -> expected_value);
586 fq = s -> first_watch_queue_entry;
587 q = alloc_stg_tvar_watch_queue(cap, (StgClosure*) tso);
588 q -> next_queue_entry = fq;
589 q -> prev_queue_entry = END_STM_WATCH_QUEUE;
590 if (fq != END_STM_WATCH_QUEUE) {
591 fq -> prev_queue_entry = q;
592 }
593 s -> first_watch_queue_entry = q;
594 e -> new_value = (StgClosure *) q;
595 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
596 });
597 }
598
599 static void remove_watch_queue_entries_for_trec(Capability *cap,
600 StgTRecHeader *trec) {
601 ASSERT(trec != NO_TREC);
602 ASSERT(trec -> enclosing_trec == NO_TREC);
603 ASSERT(trec -> state == TREC_WAITING ||
604 trec -> state == TREC_CONDEMNED);
605
606 TRACE("%p : remove_watch_queue_entries_for_trec()", trec);
607
608 FOR_EACH_ENTRY(trec, e, {
609 StgTVar *s;
610 StgTVarWatchQueue *pq;
611 StgTVarWatchQueue *nq;
612 StgTVarWatchQueue *q;
613 StgClosure *saw;
614 s = e -> tvar;
615 saw = lock_tvar(trec, s);
616 q = (StgTVarWatchQueue *) (e -> new_value);
617 TRACE("%p : removing tso=%p from watch queue for tvar=%p",
618 trec,
619 q -> closure,
620 s);
621 ACQ_ASSERT(s -> current_value == (StgClosure *)trec);
622 nq = q -> next_queue_entry;
623 pq = q -> prev_queue_entry;
624 if (nq != END_STM_WATCH_QUEUE) {
625 nq -> prev_queue_entry = pq;
626 }
627 if (pq != END_STM_WATCH_QUEUE) {
628 pq -> next_queue_entry = nq;
629 } else {
630 ASSERT (s -> first_watch_queue_entry == q);
631 s -> first_watch_queue_entry = nq;
632 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
633 }
634 free_stg_tvar_watch_queue(cap, q);
635 unlock_tvar(cap, trec, s, saw, FALSE);
636 });
637 }
638
639 /*......................................................................*/
640
641 static TRecEntry *get_new_entry(Capability *cap,
642 StgTRecHeader *t) {
643 TRecEntry *result;
644 StgTRecChunk *c;
645 int i;
646
647 c = t -> current_chunk;
648 i = c -> next_entry_idx;
649 ASSERT(c != END_STM_CHUNK_LIST);
650
651 if (i < TREC_CHUNK_NUM_ENTRIES) {
652 // Continue to use current chunk
653 result = &(c -> entries[i]);
654 c -> next_entry_idx ++;
655 } else {
656 // Current chunk is full: allocate a fresh one
657 StgTRecChunk *nc;
658 nc = alloc_stg_trec_chunk(cap);
659 nc -> prev_chunk = c;
660 nc -> next_entry_idx = 1;
661 t -> current_chunk = nc;
662 result = &(nc -> entries[0]);
663 }
664
665 return result;
666 }
667
668 /*......................................................................*/
669
670 static void merge_update_into(Capability *cap,
671 StgTRecHeader *t,
672 StgTVar *tvar,
673 StgClosure *expected_value,
674 StgClosure *new_value) {
675 int found;
676
677 // Look for an entry in this trec
678 found = FALSE;
679 FOR_EACH_ENTRY(t, e, {
680 StgTVar *s;
681 s = e -> tvar;
682 if (s == tvar) {
683 found = TRUE;
684 if (e -> expected_value != expected_value) {
685 // Must abort if the two entries start from different values
686 TRACE("%p : update entries inconsistent at %p (%p vs %p)",
687 t, tvar, e -> expected_value, expected_value);
688 t -> state = TREC_CONDEMNED;
689 }
690 e -> new_value = new_value;
691 BREAK_FOR_EACH;
692 }
693 });
694
695 if (!found) {
696 // No entry so far in this trec
697 TRecEntry *ne;
698 ne = get_new_entry(cap, t);
699 ne -> tvar = tvar;
700 ne -> expected_value = expected_value;
701 ne -> new_value = new_value;
702 }
703 }
704
705 /*......................................................................*/
706
707 static void merge_read_into(Capability *cap,
708 StgTRecHeader *t,
709 StgTVar *tvar,
710 StgClosure *expected_value) {
711 int found;
712
713 // Look for an entry in this trec
714 found = FALSE;
715 FOR_EACH_ENTRY(t, e, {
716 StgTVar *s;
717 s = e -> tvar;
718 if (s == tvar) {
719 found = TRUE;
720 if (e -> expected_value != expected_value) {
721 // Must abort if the two entries start from different values
722 TRACE("%p : read entries inconsistent at %p (%p vs %p)",
723 t, tvar, e -> expected_value, expected_value);
724 t -> state = TREC_CONDEMNED;
725 }
726 BREAK_FOR_EACH;
727 }
728 });
729
730 if (!found) {
731 // No entry so far in this trec
732 TRecEntry *ne;
733 ne = get_new_entry(cap, t);
734 ne -> tvar = tvar;
735 ne -> expected_value = expected_value;
736 ne -> new_value = expected_value;
737 }
738 }
739
740 /*......................................................................*/
741
742 static StgBool entry_is_update(TRecEntry *e) {
743 StgBool result;
744 result = (e -> expected_value != e -> new_value);
745 return result;
746 }
747
748 #if defined(STM_FG_LOCKS)
749 static StgBool entry_is_read_only(TRecEntry *e) {
750 StgBool result;
751 result = (e -> expected_value == e -> new_value);
752 return result;
753 }
754
755 static StgBool tvar_is_locked(StgTVar *s, StgTRecHeader *h) {
756 StgClosure *c;
757 StgBool result;
758 c = s -> current_value;
759 result = (c == (StgClosure *) h);
760 return result;
761 }
762 #endif
763
764 // revert_ownership : release a lock on a TVar, storing back
765 // the value that it held when the lock was acquired. "revert_all"
766 // is set in stmWait and stmReWait when we acquired locks on all of
767 // the TVars involved. "revert_all" is not set in commit operations
768 // where we don't lock TVars that have been read from but not updated.
769
770 static void revert_ownership(Capability *cap STG_UNUSED,
771 StgTRecHeader *trec STG_UNUSED,
772 StgBool revert_all STG_UNUSED) {
773 #if defined(STM_FG_LOCKS)
774 FOR_EACH_ENTRY(trec, e, {
775 if (revert_all || entry_is_update(e)) {
776 StgTVar *s;
777 s = e -> tvar;
778 if (tvar_is_locked(s, trec)) {
779 unlock_tvar(cap, trec, s, e -> expected_value, TRUE);
780 }
781 }
782 });
783 #endif
784 }
785
786 /*......................................................................*/
787
788 // validate_and_acquire_ownership : this performs the twin functions
789 // of checking that the TVars referred to by entries in trec hold the
790 // expected values and:
791 //
792 // - locking the TVar (on updated TVars during commit, or all TVars
793 // during wait)
794 //
795 // - recording the identity of the TRec who wrote the value seen in the
796 // TVar (on non-updated TVars during commit). These values are
797 // stashed in the TRec entries and are then checked in check_read_only
798 // to ensure that an atomic snapshot of all of these locations has been
799 // seen.
800
801 static StgBool validate_and_acquire_ownership (Capability *cap,
802 StgTRecHeader *trec,
803 int acquire_all,
804 int retain_ownership) {
805 StgBool result;
806
807 if (shake()) {
808 TRACE("%p : shake, pretending trec is invalid when it may not be", trec);
809 return FALSE;
810 }
811
812 ASSERT ((trec -> state == TREC_ACTIVE) ||
813 (trec -> state == TREC_WAITING) ||
814 (trec -> state == TREC_CONDEMNED));
815 result = !((trec -> state) == TREC_CONDEMNED);
816 if (result) {
817 FOR_EACH_ENTRY(trec, e, {
818 StgTVar *s;
819 s = e -> tvar;
820 if (acquire_all || entry_is_update(e)) {
821 TRACE("%p : trying to acquire %p", trec, s);
822 if (!cond_lock_tvar(trec, s, e -> expected_value)) {
823 TRACE("%p : failed to acquire %p", trec, s);
824 result = FALSE;
825 BREAK_FOR_EACH;
826 }
827 } else {
828 ASSERT(config_use_read_phase);
829 IF_STM_FG_LOCKS({
830 TRACE("%p : will need to check %p", trec, s);
831 if (s -> current_value != e -> expected_value) {
832 TRACE("%p : doesn't match", trec);
833 result = FALSE;
834 BREAK_FOR_EACH;
835 }
836 e -> num_updates = s -> num_updates;
837 if (s -> current_value != e -> expected_value) {
838 TRACE("%p : doesn't match (race)", trec);
839 result = FALSE;
840 BREAK_FOR_EACH;
841 } else {
842 TRACE("%p : need to check version %ld", trec, e -> num_updates);
843 }
844 });
845 }
846 });
847 }
848
849 if ((!result) || (!retain_ownership)) {
850 revert_ownership(cap, trec, acquire_all);
851 }
852
853 return result;
854 }
855
856 // check_read_only : check that we've seen an atomic snapshot of the
857 // non-updated TVars accessed by a trec. This checks that the last TRec to
858 // commit an update to the TVar is unchanged since the value was stashed in
859 // validate_and_acquire_ownership. If no udpate is seen to any TVar than
860 // all of them contained their expected values at the start of the call to
861 // check_read_only.
862 //
863 // The paper "Concurrent programming without locks" (under submission), or
864 // Keir Fraser's PhD dissertation "Practical lock-free programming" discuss
865 // this kind of algorithm.
866
867 static StgBool check_read_only(StgTRecHeader *trec STG_UNUSED) {
868 StgBool result = TRUE;
869
870 ASSERT (config_use_read_phase);
871 IF_STM_FG_LOCKS({
872 FOR_EACH_ENTRY(trec, e, {
873 StgTVar *s;
874 s = e -> tvar;
875 if (entry_is_read_only(e)) {
876 TRACE("%p : check_read_only for TVar %p, saw %ld", trec, s, e -> num_updates);
877 if (s -> num_updates != e -> num_updates) {
878 // ||s -> current_value != e -> expected_value) {
879 TRACE("%p : mismatch", trec);
880 result = FALSE;
881 BREAK_FOR_EACH;
882 }
883 }
884 });
885 });
886
887 return result;
888 }
889
890
891 /************************************************************************/
892
893 void stmPreGCHook (Capability *cap) {
894 lock_stm(NO_TREC);
895 TRACE("stmPreGCHook");
896 cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
897 cap->free_trec_chunks = END_STM_CHUNK_LIST;
898 cap->free_trec_headers = NO_TREC;
899 unlock_stm(NO_TREC);
900 }
901
902 /************************************************************************/
903
904 // check_read_only relies on version numbers held in TVars' "num_updates"
905 // fields not wrapping around while a transaction is committed. The version
906 // number is incremented each time an update is committed to the TVar
907 // This is unlikely to wrap around when 32-bit integers are used for the counts,
908 // but to ensure correctness we maintain a shared count on the maximum
909 // number of commit operations that may occur and check that this has
910 // not increased by more than 2^32 during a commit.
911
912 #define TOKEN_BATCH_SIZE 1024
913
914 static volatile StgInt64 max_commits = 0;
915
916 #if defined(THREADED_RTS)
917 static volatile StgBool token_locked = FALSE;
918
919 static void getTokenBatch(Capability *cap) {
920 while (cas((void *)&token_locked, FALSE, TRUE) == TRUE) { /* nothing */ }
921 max_commits += TOKEN_BATCH_SIZE;
922 TRACE("%p : cap got token batch, max_commits=%" FMT_Int64, cap, max_commits);
923 cap -> transaction_tokens = TOKEN_BATCH_SIZE;
924 token_locked = FALSE;
925 }
926
927 static void getToken(Capability *cap) {
928 if (cap -> transaction_tokens == 0) {
929 getTokenBatch(cap);
930 }
931 cap -> transaction_tokens --;
932 }
933 #else
934 static void getToken(Capability *cap STG_UNUSED) {
935 // Nothing
936 }
937 #endif
938
939 /*......................................................................*/
940
941 StgTRecHeader *stmStartTransaction(Capability *cap,
942 StgTRecHeader *outer) {
943 StgTRecHeader *t;
944 TRACE("%p : stmStartTransaction with %d tokens",
945 outer,
946 cap -> transaction_tokens);
947
948 getToken(cap);
949
950 t = alloc_stg_trec_header(cap, outer);
951 TRACE("%p : stmStartTransaction()=%p", outer, t);
952 return t;
953 }
954
955 /*......................................................................*/
956
957 void stmAbortTransaction(Capability *cap,
958 StgTRecHeader *trec) {
959 StgTRecHeader *et;
960 TRACE("%p : stmAbortTransaction", trec);
961 ASSERT (trec != NO_TREC);
962 ASSERT ((trec -> state == TREC_ACTIVE) ||
963 (trec -> state == TREC_WAITING) ||
964 (trec -> state == TREC_CONDEMNED));
965
966 lock_stm(trec);
967
968 et = trec -> enclosing_trec;
969 if (et == NO_TREC) {
970 // We're a top-level transaction: remove any watch queue entries that
971 // we may have.
972 TRACE("%p : aborting top-level transaction", trec);
973
974 if (trec -> state == TREC_WAITING) {
975 ASSERT (trec -> enclosing_trec == NO_TREC);
976 TRACE("%p : stmAbortTransaction aborting waiting transaction", trec);
977 remove_watch_queue_entries_for_trec(cap, trec);
978 }
979
980 } else {
981 // We're a nested transaction: merge our read set into our parent's
982 TRACE("%p : retaining read-set into parent %p", trec, et);
983
984 FOR_EACH_ENTRY(trec, e, {
985 StgTVar *s = e -> tvar;
986 merge_read_into(cap, et, s, e -> expected_value);
987 });
988 }
989
990 trec -> state = TREC_ABORTED;
991 unlock_stm(trec);
992
993 TRACE("%p : stmAbortTransaction done", trec);
994 }
995
996 /*......................................................................*/
997
998 void stmFreeAbortedTRec(Capability *cap,
999 StgTRecHeader *trec) {
1000 TRACE("%p : stmFreeAbortedTRec", trec);
1001 ASSERT (trec != NO_TREC);
1002 ASSERT ((trec -> state == TREC_CONDEMNED) ||
1003 (trec -> state == TREC_ABORTED));
1004
1005 free_stg_trec_header(cap, trec);
1006
1007 TRACE("%p : stmFreeAbortedTRec done", trec);
1008 }
1009
1010 /*......................................................................*/
1011
1012 void stmCondemnTransaction(Capability *cap,
1013 StgTRecHeader *trec) {
1014 TRACE("%p : stmCondemnTransaction", trec);
1015 ASSERT (trec != NO_TREC);
1016 ASSERT ((trec -> state == TREC_ACTIVE) ||
1017 (trec -> state == TREC_WAITING) ||
1018 (trec -> state == TREC_CONDEMNED));
1019
1020 lock_stm(trec);
1021 if (trec -> state == TREC_WAITING) {
1022 ASSERT (trec -> enclosing_trec == NO_TREC);
1023 TRACE("%p : stmCondemnTransaction condemning waiting transaction", trec);
1024 remove_watch_queue_entries_for_trec(cap, trec);
1025 }
1026 trec -> state = TREC_CONDEMNED;
1027 unlock_stm(trec);
1028
1029 TRACE("%p : stmCondemnTransaction done", trec);
1030 }
1031
1032 /*......................................................................*/
1033
1034 StgBool stmValidateNestOfTransactions(Capability *cap, StgTRecHeader *trec) {
1035 StgTRecHeader *t;
1036 StgBool result;
1037
1038 TRACE("%p : stmValidateNestOfTransactions", trec);
1039 ASSERT(trec != NO_TREC);
1040 ASSERT((trec -> state == TREC_ACTIVE) ||
1041 (trec -> state == TREC_WAITING) ||
1042 (trec -> state == TREC_CONDEMNED));
1043
1044 lock_stm(trec);
1045
1046 t = trec;
1047 result = TRUE;
1048 while (t != NO_TREC) {
1049 result &= validate_and_acquire_ownership(cap, t, TRUE, FALSE);
1050 t = t -> enclosing_trec;
1051 }
1052
1053 if (!result && trec -> state != TREC_WAITING) {
1054 trec -> state = TREC_CONDEMNED;
1055 }
1056
1057 unlock_stm(trec);
1058
1059 TRACE("%p : stmValidateNestOfTransactions()=%d", trec, result);
1060 return result;
1061 }
1062
1063 /*......................................................................*/
1064
1065 static TRecEntry *get_entry_for(StgTRecHeader *trec, StgTVar *tvar, StgTRecHeader **in) {
1066 TRecEntry *result = NULL;
1067
1068 TRACE("%p : get_entry_for TVar %p", trec, tvar);
1069 ASSERT(trec != NO_TREC);
1070
1071 do {
1072 FOR_EACH_ENTRY(trec, e, {
1073 if (e -> tvar == tvar) {
1074 result = e;
1075 if (in != NULL) {
1076 *in = trec;
1077 }
1078 BREAK_FOR_EACH;
1079 }
1080 });
1081 trec = trec -> enclosing_trec;
1082 } while (result == NULL && trec != NO_TREC);
1083
1084 return result;
1085 }
1086
1087 /*......................................................................*/
1088
1089 /*
1090 * Add/remove links between an invariant TVars. The caller must have
1091 * locked the TVars involved and the invariant.
1092 */
1093
1094 static void disconnect_invariant(Capability *cap,
1095 StgAtomicInvariant *inv) {
1096 StgTRecHeader *last_execution = inv -> last_execution;
1097
1098 TRACE("unhooking last execution inv=%p trec=%p", inv, last_execution);
1099
1100 FOR_EACH_ENTRY(last_execution, e, {
1101 StgTVar *s = e -> tvar;
1102 StgTVarWatchQueue *q = s -> first_watch_queue_entry;
1103 DEBUG_ONLY( StgBool found = FALSE );
1104 TRACE(" looking for trec on tvar=%p", s);
1105 for (q = s -> first_watch_queue_entry;
1106 q != END_STM_WATCH_QUEUE;
1107 q = q -> next_queue_entry) {
1108 if (q -> closure == (StgClosure*)inv) {
1109 StgTVarWatchQueue *pq;
1110 StgTVarWatchQueue *nq;
1111 nq = q -> next_queue_entry;
1112 pq = q -> prev_queue_entry;
1113 if (nq != END_STM_WATCH_QUEUE) {
1114 nq -> prev_queue_entry = pq;
1115 }
1116 if (pq != END_STM_WATCH_QUEUE) {
1117 pq -> next_queue_entry = nq;
1118 } else {
1119 ASSERT (s -> first_watch_queue_entry == q);
1120 s -> first_watch_queue_entry = nq;
1121 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
1122 }
1123 TRACE(" found it in watch queue entry %p", q);
1124 free_stg_tvar_watch_queue(cap, q);
1125 DEBUG_ONLY( found = TRUE );
1126 break;
1127 }
1128 }
1129 ASSERT(found);
1130 });
1131 inv -> last_execution = NO_TREC;
1132 }
1133
1134 static void connect_invariant_to_trec(Capability *cap,
1135 StgAtomicInvariant *inv,
1136 StgTRecHeader *my_execution) {
1137 TRACE("connecting execution inv=%p trec=%p", inv, my_execution);
1138
1139 ASSERT(inv -> last_execution == NO_TREC);
1140
1141 FOR_EACH_ENTRY(my_execution, e, {
1142 StgTVar *s = e -> tvar;
1143 StgTVarWatchQueue *q = alloc_stg_tvar_watch_queue(cap, (StgClosure*)inv);
1144 StgTVarWatchQueue *fq = s -> first_watch_queue_entry;
1145
1146 // We leave "last_execution" holding the values that will be
1147 // in the heap after the transaction we're in the process
1148 // of committing has finished.
1149 TRecEntry *entry = get_entry_for(my_execution -> enclosing_trec, s, NULL);
1150 if (entry != NULL) {
1151 e -> expected_value = entry -> new_value;
1152 e -> new_value = entry -> new_value;
1153 }
1154
1155 TRACE(" linking trec on tvar=%p value=%p q=%p", s, e -> expected_value, q);
1156 q -> next_queue_entry = fq;
1157 q -> prev_queue_entry = END_STM_WATCH_QUEUE;
1158 if (fq != END_STM_WATCH_QUEUE) {
1159 fq -> prev_queue_entry = q;
1160 }
1161 s -> first_watch_queue_entry = q;
1162 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
1163 });
1164
1165 inv -> last_execution = my_execution;
1166 }
1167
1168 /*
1169 * Add a new invariant to the trec's list of invariants to check on commit
1170 */
1171 void stmAddInvariantToCheck(Capability *cap,
1172 StgTRecHeader *trec,
1173 StgClosure *code) {
1174 StgAtomicInvariant *invariant;
1175 StgInvariantCheckQueue *q;
1176 TRACE("%p : stmAddInvariantToCheck closure=%p", trec, code);
1177 ASSERT(trec != NO_TREC);
1178 ASSERT(trec -> state == TREC_ACTIVE ||
1179 trec -> state == TREC_CONDEMNED);
1180
1181
1182 // 1. Allocate an StgAtomicInvariant, set last_execution to NO_TREC
1183 // to signal that this is a new invariant in the current atomic block
1184
1185 invariant = (StgAtomicInvariant *) allocate(cap, sizeofW(StgAtomicInvariant));
1186 TRACE("%p : stmAddInvariantToCheck allocated invariant=%p", trec, invariant);
1187 SET_HDR (invariant, &stg_ATOMIC_INVARIANT_info, CCS_SYSTEM);
1188 invariant -> code = code;
1189 invariant -> last_execution = NO_TREC;
1190 invariant -> lock = 0;
1191
1192 // 2. Allocate an StgInvariantCheckQueue entry, link it to the current trec
1193
1194 q = alloc_stg_invariant_check_queue(cap, invariant);
1195 TRACE("%p : stmAddInvariantToCheck allocated q=%p", trec, q);
1196 q -> invariant = invariant;
1197 q -> my_execution = NO_TREC;
1198 q -> next_queue_entry = trec -> invariants_to_check;
1199 trec -> invariants_to_check = q;
1200
1201 TRACE("%p : stmAddInvariantToCheck done", trec);
1202 }
1203
1204 /*
1205 * Fill in the trec's list of invariants that might be violated by the
1206 * current transaction.
1207 */
1208
1209 StgInvariantCheckQueue *stmGetInvariantsToCheck(Capability *cap, StgTRecHeader *trec) {
1210 StgTRecChunk *c;
1211 TRACE("%p : stmGetInvariantsToCheck, head was %p",
1212 trec,
1213 trec -> invariants_to_check);
1214
1215 ASSERT(trec != NO_TREC);
1216 ASSERT ((trec -> state == TREC_ACTIVE) ||
1217 (trec -> state == TREC_WAITING) ||
1218 (trec -> state == TREC_CONDEMNED));
1219 ASSERT(trec -> enclosing_trec == NO_TREC);
1220
1221 lock_stm(trec);
1222 c = trec -> current_chunk;
1223 while (c != END_STM_CHUNK_LIST) {
1224 unsigned int i;
1225 for (i = 0; i < c -> next_entry_idx; i ++) {
1226 TRecEntry *e = &(c -> entries[i]);
1227 if (entry_is_update(e)) {
1228 StgTVar *s = e -> tvar;
1229 StgClosure *old = lock_tvar(trec, s);
1230
1231 // Pick up any invariants on the TVar being updated
1232 // by entry "e"
1233
1234 StgTVarWatchQueue *q;
1235 TRACE("%p : checking for invariants on %p", trec, s);
1236 for (q = s -> first_watch_queue_entry;
1237 q != END_STM_WATCH_QUEUE;
1238 q = q -> next_queue_entry) {
1239 if (watcher_is_invariant(q)) {
1240 StgBool found = FALSE;
1241 StgInvariantCheckQueue *q2;
1242 TRACE("%p : Touching invariant %p", trec, q -> closure);
1243 for (q2 = trec -> invariants_to_check;
1244 q2 != END_INVARIANT_CHECK_QUEUE;
1245 q2 = q2 -> next_queue_entry) {
1246 if (q2 -> invariant == (StgAtomicInvariant*)(q -> closure)) {
1247 TRACE("%p : Already found %p", trec, q -> closure);
1248 found = TRUE;
1249 break;
1250 }
1251 }
1252
1253 if (!found) {
1254 StgInvariantCheckQueue *q3;
1255 TRACE("%p : Not already found %p", trec, q -> closure);
1256 q3 = alloc_stg_invariant_check_queue(cap,
1257 (StgAtomicInvariant*) q -> closure);
1258 q3 -> next_queue_entry = trec -> invariants_to_check;
1259 trec -> invariants_to_check = q3;
1260 }
1261 }
1262 }
1263
1264 unlock_tvar(cap, trec, s, old, FALSE);
1265 }
1266 }
1267 c = c -> prev_chunk;
1268 }
1269
1270 unlock_stm(trec);
1271
1272 TRACE("%p : stmGetInvariantsToCheck, head now %p",
1273 trec,
1274 trec -> invariants_to_check);
1275
1276 return (trec -> invariants_to_check);
1277 }
1278
1279 /*......................................................................*/
1280
1281 StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
1282 int result;
1283 StgInt64 max_commits_at_start = max_commits;
1284 StgBool touched_invariants;
1285 StgBool use_read_phase;
1286
1287 TRACE("%p : stmCommitTransaction()", trec);
1288 ASSERT (trec != NO_TREC);
1289
1290 lock_stm(trec);
1291
1292 ASSERT (trec -> enclosing_trec == NO_TREC);
1293 ASSERT ((trec -> state == TREC_ACTIVE) ||
1294 (trec -> state == TREC_CONDEMNED));
1295
1296 // touched_invariants is true if we've written to a TVar with invariants
1297 // attached to it, or if we're trying to add a new invariant to the system.
1298
1299 touched_invariants = (trec -> invariants_to_check != END_INVARIANT_CHECK_QUEUE);
1300
1301 // If we have touched invariants then (i) lock the invariant, and (ii) add
1302 // the invariant's read set to our own. Step (i) is needed to serialize
1303 // concurrent transactions that attempt to make conflicting updates
1304 // to the invariant's trec (suppose it read from t1 and t2, and that one
1305 // concurrent transcation writes only to t1, and a second writes only to
1306 // t2). Step (ii) is needed so that both transactions will lock t1 and t2
1307 // to gain access to their wait lists (and hence be able to unhook the
1308 // invariant from both tvars).
1309
1310 if (touched_invariants) {
1311 StgInvariantCheckQueue *q = trec -> invariants_to_check;
1312 TRACE("%p : locking invariants", trec);
1313 while (q != END_INVARIANT_CHECK_QUEUE) {
1314 StgTRecHeader *inv_old_trec;
1315 StgAtomicInvariant *inv;
1316 TRACE("%p : locking invariant %p", trec, q -> invariant);
1317 inv = q -> invariant;
1318 if (!lock_inv(inv)) {
1319 TRACE("%p : failed to lock %p", trec, inv);
1320 trec -> state = TREC_CONDEMNED;
1321 break;
1322 }
1323
1324 inv_old_trec = inv -> last_execution;
1325 if (inv_old_trec != NO_TREC) {
1326 StgTRecChunk *c = inv_old_trec -> current_chunk;
1327 while (c != END_STM_CHUNK_LIST) {
1328 unsigned int i;
1329 for (i = 0; i < c -> next_entry_idx; i ++) {
1330 TRecEntry *e = &(c -> entries[i]);
1331 TRACE("%p : ensuring we lock TVars for %p", trec, e -> tvar);
1332 merge_read_into (cap, trec, e -> tvar, e -> expected_value);
1333 }
1334 c = c -> prev_chunk;
1335 }
1336 }
1337 q = q -> next_queue_entry;
1338 }
1339 TRACE("%p : finished locking invariants", trec);
1340 }
1341
1342 // Use a read-phase (i.e. don't lock TVars we've read but not updated) if
1343 // (i) the configuration lets us use a read phase, and (ii) we've not
1344 // touched or introduced any invariants.
1345 //
1346 // In principle we could extend the implementation to support a read-phase
1347 // and invariants, but it complicates the logic: the links between
1348 // invariants and TVars are managed by the TVar watch queues which are
1349 // protected by the TVar's locks.
1350
1351 use_read_phase = ((config_use_read_phase) && (!touched_invariants));
1352
1353 result = validate_and_acquire_ownership(cap, trec, (!use_read_phase), TRUE);
1354 if (result) {
1355 // We now know that all the updated locations hold their expected values.
1356 ASSERT (trec -> state == TREC_ACTIVE);
1357
1358 if (use_read_phase) {
1359 StgInt64 max_commits_at_end;
1360 StgInt64 max_concurrent_commits;
1361 TRACE("%p : doing read check", trec);
1362 result = check_read_only(trec);
1363 TRACE("%p : read-check %s", trec, result ? "succeeded" : "failed");
1364
1365 max_commits_at_end = max_commits;
1366 max_concurrent_commits = ((max_commits_at_end - max_commits_at_start) +
1367 (n_capabilities * TOKEN_BATCH_SIZE));
1368 if (((max_concurrent_commits >> 32) > 0) || shake()) {
1369 result = FALSE;
1370 }
1371 }
1372
1373 if (result) {
1374 // We now know that all of the read-only locations held their exepcted values
1375 // at the end of the call to validate_and_acquire_ownership. This forms the
1376 // linearization point of the commit.
1377
1378 // 1. If we have touched or introduced any invariants then unhook them
1379 // from the TVars they depended on last time they were executed
1380 // and hook them on the TVars that they now depend on.
1381 if (touched_invariants) {
1382 StgInvariantCheckQueue *q = trec -> invariants_to_check;
1383 while (q != END_INVARIANT_CHECK_QUEUE) {
1384 StgAtomicInvariant *inv = q -> invariant;
1385 if (inv -> last_execution != NO_TREC) {
1386 disconnect_invariant(cap, inv);
1387 }
1388
1389 TRACE("%p : hooking up new execution trec=%p", trec, q -> my_execution);
1390 connect_invariant_to_trec(cap, inv, q -> my_execution);
1391
1392 TRACE("%p : unlocking invariant %p", trec, inv);
1393 unlock_inv(inv);
1394
1395 q = q -> next_queue_entry;
1396 }
1397 }
1398
1399 // 2. Make the updates required by the transaction
1400 FOR_EACH_ENTRY(trec, e, {
1401 StgTVar *s;
1402 s = e -> tvar;
1403 if ((!use_read_phase) || (e -> new_value != e -> expected_value)) {
1404 // Either the entry is an update or we're not using a read phase:
1405 // write the value back to the TVar, unlocking it if necessary.
1406
1407 ACQ_ASSERT(tvar_is_locked(s, trec));
1408 TRACE("%p : writing %p to %p, waking waiters", trec, e -> new_value, s);
1409 unpark_waiters_on(cap,s);
1410 IF_STM_FG_LOCKS({
1411 s -> num_updates ++;
1412 });
1413 unlock_tvar(cap, trec, s, e -> new_value, TRUE);
1414 }
1415 ACQ_ASSERT(!tvar_is_locked(s, trec));
1416 });
1417 } else {
1418 revert_ownership(cap, trec, FALSE);
1419 }
1420 }
1421
1422 unlock_stm(trec);
1423
1424 free_stg_trec_header(cap, trec);
1425
1426 TRACE("%p : stmCommitTransaction()=%d", trec, result);
1427
1428 return result;
1429 }
1430
1431 /*......................................................................*/
1432
1433 StgBool stmCommitNestedTransaction(Capability *cap, StgTRecHeader *trec) {
1434 StgTRecHeader *et;
1435 int result;
1436 ASSERT (trec != NO_TREC && trec -> enclosing_trec != NO_TREC);
1437 TRACE("%p : stmCommitNestedTransaction() into %p", trec, trec -> enclosing_trec);
1438 ASSERT ((trec -> state == TREC_ACTIVE) || (trec -> state == TREC_CONDEMNED));
1439
1440 lock_stm(trec);
1441
1442 et = trec -> enclosing_trec;
1443 result = validate_and_acquire_ownership(cap, trec, (!config_use_read_phase), TRUE);
1444 if (result) {
1445 // We now know that all the updated locations hold their expected values.
1446
1447 if (config_use_read_phase) {
1448 TRACE("%p : doing read check", trec);
1449 result = check_read_only(trec);
1450 }
1451 if (result) {
1452 // We now know that all of the read-only locations held their exepcted values
1453 // at the end of the call to validate_and_acquire_ownership. This forms the
1454 // linearization point of the commit.
1455
1456 TRACE("%p : read-check succeeded", trec);
1457 FOR_EACH_ENTRY(trec, e, {
1458 // Merge each entry into the enclosing transaction record, release all
1459 // locks.
1460
1461 StgTVar *s;
1462 s = e -> tvar;
1463
1464 // Careful! We might have a read entry here that we don't want
1465 // to spam over the update entry in the enclosing TRec. e.g. in
1466 //
1467 // t <- newTVar 1
1468 // writeTVar t 2
1469 // ((readTVar t >> retry) `orElse` return ()) `orElse` return ()
1470 //
1471 // - the innermost txn first aborts, giving us a read-only entry
1472 // with e->expected_value == e->new_value == 1
1473 // - the inner orElse commits into the outer orElse, which
1474 // lands us here. If we unconditionally did
1475 // merge_update_into(), then we would overwrite the outer
1476 // TRec's update, so we must check whether the entry is an
1477 // update or not, and if not, just do merge_read_into.
1478 //
1479 if (entry_is_update(e)) {
1480 unlock_tvar(cap, trec, s, e -> expected_value, FALSE);
1481 merge_update_into(cap, et, s, e -> expected_value, e -> new_value);
1482 } else {
1483 merge_read_into(cap, et, s, e -> expected_value);
1484 }
1485 ACQ_ASSERT(s -> current_value != (StgClosure *)trec);
1486 });
1487 } else {
1488 revert_ownership(cap, trec, FALSE);
1489 }
1490 }
1491
1492 unlock_stm(trec);
1493
1494 free_stg_trec_header(cap, trec);
1495
1496 TRACE("%p : stmCommitNestedTransaction()=%d", trec, result);
1497
1498 return result;
1499 }
1500
1501 /*......................................................................*/
1502
1503 StgBool stmWait(Capability *cap, StgTSO *tso, StgTRecHeader *trec) {
1504 int result;
1505 TRACE("%p : stmWait(%p)", trec, tso);
1506 ASSERT (trec != NO_TREC);
1507 ASSERT (trec -> enclosing_trec == NO_TREC);
1508 ASSERT ((trec -> state == TREC_ACTIVE) ||
1509 (trec -> state == TREC_CONDEMNED));
1510
1511 lock_stm(trec);
1512 result = validate_and_acquire_ownership(cap, trec, TRUE, TRUE);
1513 if (result) {
1514 // The transaction is valid so far so we can actually start waiting.
1515 // (Otherwise the transaction was not valid and the thread will have to
1516 // retry it).
1517
1518 // Put ourselves to sleep. We retain locks on all the TVars involved
1519 // until we are sound asleep : (a) on the wait queues, (b) BlockedOnSTM
1520 // in the TSO, (c) TREC_WAITING in the Trec.
1521 build_watch_queue_entries_for_trec(cap, tso, trec);
1522 park_tso(tso);
1523 trec -> state = TREC_WAITING;
1524
1525 // We haven't released ownership of the transaction yet. The TSO
1526 // has been put on the wait queue for the TVars it is waiting for,
1527 // but we haven't yet tidied up the TSO's stack and made it safe
1528 // to wake up the TSO. Therefore, we must wait until the TSO is
1529 // safe to wake up before we release ownership - when all is well,
1530 // the runtime will call stmWaitUnlock() below, with the same
1531 // TRec.
1532
1533 } else {
1534 unlock_stm(trec);
1535 free_stg_trec_header(cap, trec);
1536 }
1537
1538 TRACE("%p : stmWait(%p)=%d", trec, tso, result);
1539 return result;
1540 }
1541
1542
1543 void
1544 stmWaitUnlock(Capability *cap, StgTRecHeader *trec) {
1545 revert_ownership(cap, trec, TRUE);
1546 unlock_stm(trec);
1547 }
1548
1549 /*......................................................................*/
1550
1551 StgBool stmReWait(Capability *cap, StgTSO *tso) {
1552 int result;
1553 StgTRecHeader *trec = tso->trec;
1554
1555 TRACE("%p : stmReWait", trec);
1556 ASSERT (trec != NO_TREC);
1557 ASSERT (trec -> enclosing_trec == NO_TREC);
1558 ASSERT ((trec -> state == TREC_WAITING) ||
1559 (trec -> state == TREC_CONDEMNED));
1560
1561 lock_stm(trec);
1562 result = validate_and_acquire_ownership(cap, trec, TRUE, TRUE);
1563 TRACE("%p : validation %s", trec, result ? "succeeded" : "failed");
1564 if (result) {
1565 // The transaction remains valid -- do nothing because it is already on
1566 // the wait queues
1567 ASSERT (trec -> state == TREC_WAITING);
1568 park_tso(tso);
1569 revert_ownership(cap, trec, TRUE);
1570 } else {
1571 // The transcation has become invalid. We can now remove it from the wait
1572 // queues.
1573 if (trec -> state != TREC_CONDEMNED) {
1574 remove_watch_queue_entries_for_trec (cap, trec);
1575 }
1576 free_stg_trec_header(cap, trec);
1577 }
1578 unlock_stm(trec);
1579
1580 TRACE("%p : stmReWait()=%d", trec, result);
1581 return result;
1582 }
1583
1584 /*......................................................................*/
1585
1586 static StgClosure *read_current_value(StgTRecHeader *trec STG_UNUSED, StgTVar *tvar) {
1587 StgClosure *result;
1588 result = tvar -> current_value;
1589
1590 #if defined(STM_FG_LOCKS)
1591 while (GET_INFO(UNTAG_CLOSURE(result)) == &stg_TREC_HEADER_info) {
1592 TRACE("%p : read_current_value(%p) saw %p", trec, tvar, result);
1593 result = tvar -> current_value;
1594 }
1595 #endif
1596
1597 TRACE("%p : read_current_value(%p)=%p", trec, tvar, result);
1598 return result;
1599 }
1600
1601 /*......................................................................*/
1602
1603 StgClosure *stmReadTVar(Capability *cap,
1604 StgTRecHeader *trec,
1605 StgTVar *tvar) {
1606 StgTRecHeader *entry_in = NULL;
1607 StgClosure *result = NULL;
1608 TRecEntry *entry = NULL;
1609 TRACE("%p : stmReadTVar(%p)", trec, tvar);
1610 ASSERT (trec != NO_TREC);
1611 ASSERT (trec -> state == TREC_ACTIVE ||
1612 trec -> state == TREC_CONDEMNED);
1613
1614 entry = get_entry_for(trec, tvar, &entry_in);
1615
1616 if (entry != NULL) {
1617 if (entry_in == trec) {
1618 // Entry found in our trec
1619 result = entry -> new_value;
1620 } else {
1621 // Entry found in another trec
1622 TRecEntry *new_entry = get_new_entry(cap, trec);
1623 new_entry -> tvar = tvar;
1624 new_entry -> expected_value = entry -> expected_value;
1625 new_entry -> new_value = entry -> new_value;
1626 result = new_entry -> new_value;
1627 }
1628 } else {
1629 // No entry found
1630 StgClosure *current_value = read_current_value(trec, tvar);
1631 TRecEntry *new_entry = get_new_entry(cap, trec);
1632 new_entry -> tvar = tvar;
1633 new_entry -> expected_value = current_value;
1634 new_entry -> new_value = current_value;
1635 result = current_value;
1636 }
1637
1638 TRACE("%p : stmReadTVar(%p)=%p", trec, tvar, result);
1639 return result;
1640 }
1641
1642 /*......................................................................*/
1643
1644 void stmWriteTVar(Capability *cap,
1645 StgTRecHeader *trec,
1646 StgTVar *tvar,
1647 StgClosure *new_value) {
1648
1649 StgTRecHeader *entry_in = NULL;
1650 TRecEntry *entry = NULL;
1651 TRACE("%p : stmWriteTVar(%p, %p)", trec, tvar, new_value);
1652 ASSERT (trec != NO_TREC);
1653 ASSERT (trec -> state == TREC_ACTIVE ||
1654 trec -> state == TREC_CONDEMNED);
1655
1656 entry = get_entry_for(trec, tvar, &entry_in);
1657
1658 if (entry != NULL) {
1659 if (entry_in == trec) {
1660 // Entry found in our trec
1661 entry -> new_value = new_value;
1662 } else {
1663 // Entry found in another trec
1664 TRecEntry *new_entry = get_new_entry(cap, trec);
1665 new_entry -> tvar = tvar;
1666 new_entry -> expected_value = entry -> expected_value;
1667 new_entry -> new_value = new_value;
1668 }
1669 } else {
1670 // No entry found
1671 StgClosure *current_value = read_current_value(trec, tvar);
1672 TRecEntry *new_entry = get_new_entry(cap, trec);
1673 new_entry -> tvar = tvar;
1674 new_entry -> expected_value = current_value;
1675 new_entry -> new_value = new_value;
1676 }
1677
1678 TRACE("%p : stmWriteTVar done", trec);
1679 }
1680
1681 /*......................................................................*/