A better fix for #7493 (see comment for details)
[ghc.git] / rts / STM.c
1 /* -----------------------------------------------------------------------------
2 * (c) The GHC Team 1998-2005
3 *
4 * STM implementation.
5 *
6 * Overview
7 * --------
8 *
9 * See the PPoPP 2005 paper "Composable memory transactions". In summary,
10 * each transcation has a TRec (transaction record) holding entries for each of the
11 * TVars (transactional variables) that it has accessed. Each entry records
12 * (a) the TVar, (b) the expected value seen in the TVar, (c) the new value that
13 * the transaction wants to write to the TVar, (d) during commit, the identity of
14 * the TRec that wrote the expected value.
15 *
16 * Separate TRecs are used for each level in a nest of transactions. This allows
17 * a nested transaction to be aborted without condemning its enclosing transactions.
18 * This is needed in the implementation of catchRetry. Note that the "expected value"
19 * in a nested transaction's TRec is the value expected to be *held in memory* if
20 * the transaction commits -- not the "new value" stored in one of the enclosing
21 * transactions. This means that validation can be done without searching through
22 * a nest of TRecs.
23 *
24 * Concurrency control
25 * -------------------
26 *
27 * Three different concurrency control schemes can be built according to the settings
28 * in STM.h:
29 *
30 * STM_UNIPROC assumes that the caller serialises invocations on the STM interface.
31 * In the Haskell RTS this means it is suitable only for non-THREADED_RTS builds.
32 *
33 * STM_CG_LOCK uses coarse-grained locking -- a single 'stm lock' is acquired during
34 * an invocation on the STM interface. Note that this does not mean that
35 * transactions are simply serialized -- the lock is only held *within* the
36 * implementation of stmCommitTransaction, stmWait etc.
37 *
38 * STM_FG_LOCKS uses fine-grained locking -- locking is done on a per-TVar basis
39 * and, when committing a transaction, no locks are acquired for TVars that have
40 * been read but not updated.
41 *
42 * Concurrency control is implemented in the functions:
43 *
44 * lock_stm
45 * unlock_stm
46 * lock_tvar / cond_lock_tvar
47 * unlock_tvar
48 *
49 * The choice between STM_UNIPROC / STM_CG_LOCK / STM_FG_LOCKS affects the
50 * implementation of these functions.
51 *
52 * lock_stm & unlock_stm are straightforward : they acquire a simple spin-lock
53 * using STM_CG_LOCK, and otherwise they are no-ops.
54 *
55 * lock_tvar / cond_lock_tvar and unlock_tvar are more complex because they
56 * have other effects (present in STM_UNIPROC and STM_CG_LOCK builds) as well
57 * as the actual business of maniupultaing a lock (present only in STM_FG_LOCKS
58 * builds). This is because locking a TVar is implemented by writing the lock
59 * holder's TRec into the TVar's current_value field:
60 *
61 * lock_tvar - lock a specified TVar (STM_FG_LOCKS only), returning the value
62 * it contained.
63 *
64 * cond_lock_tvar - lock a specified TVar (STM_FG_LOCKS only) if it
65 * contains a specified value. Return TRUE if this succeeds,
66 * FALSE otherwise.
67 *
68 * unlock_tvar - release the lock on a specified TVar (STM_FG_LOCKS only),
69 * storing a specified value in place of the lock entry.
70 *
71 * Using these operations, the typcial pattern of a commit/validate/wait operation
72 * is to (a) lock the STM, (b) lock all the TVars being updated, (c) check that
73 * the TVars that were only read from still contain their expected values,
74 * (d) release the locks on the TVars, writing updates to them in the case of a
75 * commit, (e) unlock the STM.
76 *
77 * Queues of waiting threads hang off the first_watch_queue_entry
78 * field of each TVar. This may only be manipulated when holding that
79 * TVar's lock. In particular, when a thread is putting itself to
80 * sleep, it mustn't release the TVar's lock until it has added itself
81 * to the wait queue and marked its TSO as BlockedOnSTM -- this makes
82 * sure that other threads will know to wake it.
83 *
84 * ---------------------------------------------------------------------------*/
85
86 #include "PosixSource.h"
87 #include "Rts.h"
88
89 #include "RtsUtils.h"
90 #include "Schedule.h"
91 #include "STM.h"
92 #include "Trace.h"
93 #include "Threads.h"
94 #include "sm/Storage.h"
95
96 #include <stdio.h>
97
98 #define TRUE 1
99 #define FALSE 0
100
101 // ACQ_ASSERT is used for assertions which are only required for
102 // THREADED_RTS builds with fine-grained locking.
103
104 #if defined(STM_FG_LOCKS)
105 #define ACQ_ASSERT(_X) ASSERT(_X)
106 #define NACQ_ASSERT(_X) /*Nothing*/
107 #else
108 #define ACQ_ASSERT(_X) /*Nothing*/
109 #define NACQ_ASSERT(_X) ASSERT(_X)
110 #endif
111
112 /*......................................................................*/
113
114 // If SHAKE is defined then validation will sometime spuriously fail. They helps test
115 // unusualy code paths if genuine contention is rare
116
117 #define TRACE(_x...) debugTrace(DEBUG_stm, "STM: " _x)
118
119 #ifdef SHAKE
120 static const int do_shake = TRUE;
121 #else
122 static const int do_shake = FALSE;
123 #endif
124 static int shake_ctr = 0;
125 static int shake_lim = 1;
126
127 static int shake(void) {
128 if (do_shake) {
129 if (((shake_ctr++) % shake_lim) == 0) {
130 shake_ctr = 1;
131 shake_lim ++;
132 return TRUE;
133 }
134 return FALSE;
135 } else {
136 return FALSE;
137 }
138 }
139
140 /*......................................................................*/
141
142 // Helper macros for iterating over entries within a transaction
143 // record
144
145 #define FOR_EACH_ENTRY(_t,_x,CODE) do { \
146 StgTRecHeader *__t = (_t); \
147 StgTRecChunk *__c = __t -> current_chunk; \
148 StgWord __limit = __c -> next_entry_idx; \
149 TRACE("%p : FOR_EACH_ENTRY, current_chunk=%p limit=%ld", __t, __c, __limit); \
150 while (__c != END_STM_CHUNK_LIST) { \
151 StgWord __i; \
152 for (__i = 0; __i < __limit; __i ++) { \
153 TRecEntry *_x = &(__c -> entries[__i]); \
154 do { CODE } while (0); \
155 } \
156 __c = __c -> prev_chunk; \
157 __limit = TREC_CHUNK_NUM_ENTRIES; \
158 } \
159 exit_for_each: \
160 if (FALSE) goto exit_for_each; \
161 } while (0)
162
163 #define BREAK_FOR_EACH goto exit_for_each
164
165 /*......................................................................*/
166
167 // if REUSE_MEMORY is defined then attempt to re-use descriptors, log chunks,
168 // and wait queue entries without GC
169
170 #define REUSE_MEMORY
171
172 /*......................................................................*/
173
174 #define IF_STM_UNIPROC(__X) do { } while (0)
175 #define IF_STM_CG_LOCK(__X) do { } while (0)
176 #define IF_STM_FG_LOCKS(__X) do { } while (0)
177
178 #if defined(STM_UNIPROC)
179 #undef IF_STM_UNIPROC
180 #define IF_STM_UNIPROC(__X) do { __X } while (0)
181 static const StgBool config_use_read_phase = FALSE;
182
183 static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
184 TRACE("%p : lock_stm()", trec);
185 }
186
187 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
188 TRACE("%p : unlock_stm()", trec);
189 }
190
191 static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
192 StgTVar *s STG_UNUSED) {
193 StgClosure *result;
194 TRACE("%p : lock_tvar(%p)", trec, s);
195 result = s -> current_value;
196 return result;
197 }
198
199 static void unlock_tvar(Capability *cap,
200 StgTRecHeader *trec STG_UNUSED,
201 StgTVar *s,
202 StgClosure *c,
203 StgBool force_update) {
204 TRACE("%p : unlock_tvar(%p)", trec, s);
205 if (force_update) {
206 s -> current_value = c;
207 dirty_TVAR(cap,s);
208 }
209 }
210
211 static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
212 StgTVar *s STG_UNUSED,
213 StgClosure *expected) {
214 StgClosure *result;
215 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
216 result = s -> current_value;
217 TRACE("%p : %s", trec, (result == expected) ? "success" : "failure");
218 return (result == expected);
219 }
220
221 static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
222 // Nothing -- uniproc
223 return TRUE;
224 }
225
226 static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
227 // Nothing -- uniproc
228 }
229 #endif
230
231 #if defined(STM_CG_LOCK) /*........................................*/
232
233 #undef IF_STM_CG_LOCK
234 #define IF_STM_CG_LOCK(__X) do { __X } while (0)
235 static const StgBool config_use_read_phase = FALSE;
236 static volatile StgTRecHeader *smp_locked = NULL;
237
238 static void lock_stm(StgTRecHeader *trec) {
239 while (cas(&smp_locked, NULL, trec) != NULL) { }
240 TRACE("%p : lock_stm()", trec);
241 }
242
243 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
244 TRACE("%p : unlock_stm()", trec);
245 ASSERT (smp_locked == trec);
246 smp_locked = 0;
247 }
248
249 static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
250 StgTVar *s STG_UNUSED) {
251 StgClosure *result;
252 TRACE("%p : lock_tvar(%p)", trec, s);
253 ASSERT (smp_locked == trec);
254 result = s -> current_value;
255 return result;
256 }
257
258 static void *unlock_tvar(Capability *cap,
259 StgTRecHeader *trec STG_UNUSED,
260 StgTVar *s,
261 StgClosure *c,
262 StgBool force_update) {
263 TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
264 ASSERT (smp_locked == trec);
265 if (force_update) {
266 s -> current_value = c;
267 dirty_TVAR(cap,s);
268 }
269 }
270
271 static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
272 StgTVar *s STG_UNUSED,
273 StgClosure *expected) {
274 StgClosure *result;
275 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
276 ASSERT (smp_locked == trec);
277 result = s -> current_value;
278 TRACE("%p : %d", result ? "success" : "failure");
279 return (result == expected);
280 }
281
282 static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
283 // Nothing -- protected by STM lock
284 return TRUE;
285 }
286
287 static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
288 // Nothing -- protected by STM lock
289 }
290 #endif
291
292 #if defined(STM_FG_LOCKS) /*...................................*/
293
294 #undef IF_STM_FG_LOCKS
295 #define IF_STM_FG_LOCKS(__X) do { __X } while (0)
296 static const StgBool config_use_read_phase = TRUE;
297
298 static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
299 TRACE("%p : lock_stm()", trec);
300 }
301
302 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
303 TRACE("%p : unlock_stm()", trec);
304 }
305
306 static StgClosure *lock_tvar(StgTRecHeader *trec,
307 StgTVar *s STG_UNUSED) {
308 StgClosure *result;
309 TRACE("%p : lock_tvar(%p)", trec, s);
310 do {
311 do {
312 result = s -> current_value;
313 } while (GET_INFO(UNTAG_CLOSURE(result)) == &stg_TREC_HEADER_info);
314 } while (cas((void *)&(s -> current_value),
315 (StgWord)result, (StgWord)trec) != (StgWord)result);
316 return result;
317 }
318
319 static void unlock_tvar(Capability *cap,
320 StgTRecHeader *trec STG_UNUSED,
321 StgTVar *s,
322 StgClosure *c,
323 StgBool force_update STG_UNUSED) {
324 TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
325 ASSERT(s -> current_value == (StgClosure *)trec);
326 s -> current_value = c;
327 dirty_TVAR(cap,s);
328 }
329
330 static StgBool cond_lock_tvar(StgTRecHeader *trec,
331 StgTVar *s,
332 StgClosure *expected) {
333 StgClosure *result;
334 StgWord w;
335 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
336 w = cas((void *)&(s -> current_value), (StgWord)expected, (StgWord)trec);
337 result = (StgClosure *)w;
338 TRACE("%p : %s", trec, result ? "success" : "failure");
339 return (result == expected);
340 }
341
342 static StgBool lock_inv(StgAtomicInvariant *inv) {
343 return (cas(&(inv -> lock), 0, 1) == 0);
344 }
345
346 static void unlock_inv(StgAtomicInvariant *inv) {
347 ASSERT(inv -> lock == 1);
348 inv -> lock = 0;
349 }
350 #endif
351
352 /*......................................................................*/
353
354 static StgBool watcher_is_tso(StgTVarWatchQueue *q) {
355 StgClosure *c = q -> closure;
356 StgInfoTable *info = get_itbl(c);
357 return (info -> type) == TSO;
358 }
359
360 static StgBool watcher_is_invariant(StgTVarWatchQueue *q) {
361 StgClosure *c = q -> closure;
362 return (c->header.info == &stg_ATOMIC_INVARIANT_info);
363 }
364
365 /*......................................................................*/
366
367 // Helper functions for thread blocking and unblocking
368
369 static void park_tso(StgTSO *tso) {
370 ASSERT(tso -> why_blocked == NotBlocked);
371 tso -> why_blocked = BlockedOnSTM;
372 tso -> block_info.closure = (StgClosure *) END_TSO_QUEUE;
373 TRACE("park_tso on tso=%p", tso);
374 }
375
376 static void unpark_tso(Capability *cap, StgTSO *tso) {
377 // We will continue unparking threads while they remain on one of the wait
378 // queues: it's up to the thread itself to remove it from the wait queues
379 // if it decides to do so when it is scheduled.
380
381 // Unblocking a TSO from BlockedOnSTM is done under the TSO lock,
382 // to avoid multiple CPUs unblocking the same TSO, and also to
383 // synchronise with throwTo().
384 lockTSO(tso);
385 if (tso -> why_blocked == BlockedOnSTM) {
386 TRACE("unpark_tso on tso=%p", tso);
387 tryWakeupThread(cap,tso);
388 } else {
389 TRACE("spurious unpark_tso on tso=%p", tso);
390 }
391 unlockTSO(tso);
392 }
393
394 static void unpark_waiters_on(Capability *cap, StgTVar *s) {
395 StgTVarWatchQueue *q;
396 StgTVarWatchQueue *trail;
397 TRACE("unpark_waiters_on tvar=%p", s);
398 // unblock TSOs in reverse order, to be a bit fairer (#2319)
399 for (q = s -> first_watch_queue_entry, trail = q;
400 q != END_STM_WATCH_QUEUE;
401 q = q -> next_queue_entry) {
402 trail = q;
403 }
404 q = trail;
405 for (;
406 q != END_STM_WATCH_QUEUE;
407 q = q -> prev_queue_entry) {
408 if (watcher_is_tso(q)) {
409 unpark_tso(cap, (StgTSO *)(q -> closure));
410 }
411 }
412 }
413
414 /*......................................................................*/
415
416 // Helper functions for downstream allocation and initialization
417
418 static StgInvariantCheckQueue *new_stg_invariant_check_queue(Capability *cap,
419 StgAtomicInvariant *invariant) {
420 StgInvariantCheckQueue *result;
421 result = (StgInvariantCheckQueue *)allocate(cap, sizeofW(StgInvariantCheckQueue));
422 SET_HDR (result, &stg_INVARIANT_CHECK_QUEUE_info, CCS_SYSTEM);
423 result -> invariant = invariant;
424 result -> my_execution = NO_TREC;
425 return result;
426 }
427
428 static StgTVarWatchQueue *new_stg_tvar_watch_queue(Capability *cap,
429 StgClosure *closure) {
430 StgTVarWatchQueue *result;
431 result = (StgTVarWatchQueue *)allocate(cap, sizeofW(StgTVarWatchQueue));
432 SET_HDR (result, &stg_TVAR_WATCH_QUEUE_info, CCS_SYSTEM);
433 result -> closure = closure;
434 return result;
435 }
436
437 static StgTRecChunk *new_stg_trec_chunk(Capability *cap) {
438 StgTRecChunk *result;
439 result = (StgTRecChunk *)allocate(cap, sizeofW(StgTRecChunk));
440 SET_HDR (result, &stg_TREC_CHUNK_info, CCS_SYSTEM);
441 result -> prev_chunk = END_STM_CHUNK_LIST;
442 result -> next_entry_idx = 0;
443 return result;
444 }
445
446 static StgTRecHeader *new_stg_trec_header(Capability *cap,
447 StgTRecHeader *enclosing_trec) {
448 StgTRecHeader *result;
449 result = (StgTRecHeader *) allocate(cap, sizeofW(StgTRecHeader));
450 SET_HDR (result, &stg_TREC_HEADER_info, CCS_SYSTEM);
451
452 result -> enclosing_trec = enclosing_trec;
453 result -> current_chunk = new_stg_trec_chunk(cap);
454 result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
455
456 if (enclosing_trec == NO_TREC) {
457 result -> state = TREC_ACTIVE;
458 } else {
459 ASSERT(enclosing_trec -> state == TREC_ACTIVE ||
460 enclosing_trec -> state == TREC_CONDEMNED);
461 result -> state = enclosing_trec -> state;
462 }
463
464 return result;
465 }
466
467 /*......................................................................*/
468
469 // Allocation / deallocation functions that retain per-capability lists
470 // of closures that can be re-used
471
472 static StgInvariantCheckQueue *alloc_stg_invariant_check_queue(Capability *cap,
473 StgAtomicInvariant *invariant) {
474 StgInvariantCheckQueue *result = NULL;
475 if (cap -> free_invariant_check_queues == END_INVARIANT_CHECK_QUEUE) {
476 result = new_stg_invariant_check_queue(cap, invariant);
477 } else {
478 result = cap -> free_invariant_check_queues;
479 result -> invariant = invariant;
480 result -> my_execution = NO_TREC;
481 cap -> free_invariant_check_queues = result -> next_queue_entry;
482 }
483 return result;
484 }
485
486 static StgTVarWatchQueue *alloc_stg_tvar_watch_queue(Capability *cap,
487 StgClosure *closure) {
488 StgTVarWatchQueue *result = NULL;
489 if (cap -> free_tvar_watch_queues == END_STM_WATCH_QUEUE) {
490 result = new_stg_tvar_watch_queue(cap, closure);
491 } else {
492 result = cap -> free_tvar_watch_queues;
493 result -> closure = closure;
494 cap -> free_tvar_watch_queues = result -> next_queue_entry;
495 }
496 return result;
497 }
498
499 static void free_stg_tvar_watch_queue(Capability *cap,
500 StgTVarWatchQueue *wq) {
501 #if defined(REUSE_MEMORY)
502 wq -> next_queue_entry = cap -> free_tvar_watch_queues;
503 cap -> free_tvar_watch_queues = wq;
504 #endif
505 }
506
507 static StgTRecChunk *alloc_stg_trec_chunk(Capability *cap) {
508 StgTRecChunk *result = NULL;
509 if (cap -> free_trec_chunks == END_STM_CHUNK_LIST) {
510 result = new_stg_trec_chunk(cap);
511 } else {
512 result = cap -> free_trec_chunks;
513 cap -> free_trec_chunks = result -> prev_chunk;
514 result -> prev_chunk = END_STM_CHUNK_LIST;
515 result -> next_entry_idx = 0;
516 }
517 return result;
518 }
519
520 static void free_stg_trec_chunk(Capability *cap,
521 StgTRecChunk *c) {
522 #if defined(REUSE_MEMORY)
523 c -> prev_chunk = cap -> free_trec_chunks;
524 cap -> free_trec_chunks = c;
525 #endif
526 }
527
528 static StgTRecHeader *alloc_stg_trec_header(Capability *cap,
529 StgTRecHeader *enclosing_trec) {
530 StgTRecHeader *result = NULL;
531 if (cap -> free_trec_headers == NO_TREC) {
532 result = new_stg_trec_header(cap, enclosing_trec);
533 } else {
534 result = cap -> free_trec_headers;
535 cap -> free_trec_headers = result -> enclosing_trec;
536 result -> enclosing_trec = enclosing_trec;
537 result -> current_chunk -> next_entry_idx = 0;
538 result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
539 if (enclosing_trec == NO_TREC) {
540 result -> state = TREC_ACTIVE;
541 } else {
542 ASSERT(enclosing_trec -> state == TREC_ACTIVE ||
543 enclosing_trec -> state == TREC_CONDEMNED);
544 result -> state = enclosing_trec -> state;
545 }
546 }
547 return result;
548 }
549
550 static void free_stg_trec_header(Capability *cap,
551 StgTRecHeader *trec) {
552 #if defined(REUSE_MEMORY)
553 StgTRecChunk *chunk = trec -> current_chunk -> prev_chunk;
554 while (chunk != END_STM_CHUNK_LIST) {
555 StgTRecChunk *prev_chunk = chunk -> prev_chunk;
556 free_stg_trec_chunk(cap, chunk);
557 chunk = prev_chunk;
558 }
559 trec -> current_chunk -> prev_chunk = END_STM_CHUNK_LIST;
560 trec -> enclosing_trec = cap -> free_trec_headers;
561 cap -> free_trec_headers = trec;
562 #endif
563 }
564
565 /*......................................................................*/
566
567 // Helper functions for managing waiting lists
568
569 static void build_watch_queue_entries_for_trec(Capability *cap,
570 StgTSO *tso,
571 StgTRecHeader *trec) {
572 ASSERT(trec != NO_TREC);
573 ASSERT(trec -> enclosing_trec == NO_TREC);
574 ASSERT(trec -> state == TREC_ACTIVE);
575
576 TRACE("%p : build_watch_queue_entries_for_trec()", trec);
577
578 FOR_EACH_ENTRY(trec, e, {
579 StgTVar *s;
580 StgTVarWatchQueue *q;
581 StgTVarWatchQueue *fq;
582 s = e -> tvar;
583 TRACE("%p : adding tso=%p to watch queue for tvar=%p", trec, tso, s);
584 ACQ_ASSERT(s -> current_value == (StgClosure *)trec);
585 NACQ_ASSERT(s -> current_value == e -> expected_value);
586 fq = s -> first_watch_queue_entry;
587 q = alloc_stg_tvar_watch_queue(cap, (StgClosure*) tso);
588 q -> next_queue_entry = fq;
589 q -> prev_queue_entry = END_STM_WATCH_QUEUE;
590 if (fq != END_STM_WATCH_QUEUE) {
591 fq -> prev_queue_entry = q;
592 }
593 s -> first_watch_queue_entry = q;
594 e -> new_value = (StgClosure *) q;
595 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
596 });
597 }
598
599 static void remove_watch_queue_entries_for_trec(Capability *cap,
600 StgTRecHeader *trec) {
601 ASSERT(trec != NO_TREC);
602 ASSERT(trec -> enclosing_trec == NO_TREC);
603 ASSERT(trec -> state == TREC_WAITING ||
604 trec -> state == TREC_CONDEMNED);
605
606 TRACE("%p : remove_watch_queue_entries_for_trec()", trec);
607
608 FOR_EACH_ENTRY(trec, e, {
609 StgTVar *s;
610 StgTVarWatchQueue *pq;
611 StgTVarWatchQueue *nq;
612 StgTVarWatchQueue *q;
613 StgClosure *saw;
614 s = e -> tvar;
615 saw = lock_tvar(trec, s);
616 q = (StgTVarWatchQueue *) (e -> new_value);
617 TRACE("%p : removing tso=%p from watch queue for tvar=%p",
618 trec,
619 q -> closure,
620 s);
621 ACQ_ASSERT(s -> current_value == (StgClosure *)trec);
622 nq = q -> next_queue_entry;
623 pq = q -> prev_queue_entry;
624 if (nq != END_STM_WATCH_QUEUE) {
625 nq -> prev_queue_entry = pq;
626 }
627 if (pq != END_STM_WATCH_QUEUE) {
628 pq -> next_queue_entry = nq;
629 } else {
630 ASSERT (s -> first_watch_queue_entry == q);
631 s -> first_watch_queue_entry = nq;
632 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
633 }
634 free_stg_tvar_watch_queue(cap, q);
635 unlock_tvar(cap, trec, s, saw, FALSE);
636 });
637 }
638
639 /*......................................................................*/
640
641 static TRecEntry *get_new_entry(Capability *cap,
642 StgTRecHeader *t) {
643 TRecEntry *result;
644 StgTRecChunk *c;
645 int i;
646
647 c = t -> current_chunk;
648 i = c -> next_entry_idx;
649 ASSERT(c != END_STM_CHUNK_LIST);
650
651 if (i < TREC_CHUNK_NUM_ENTRIES) {
652 // Continue to use current chunk
653 result = &(c -> entries[i]);
654 c -> next_entry_idx ++;
655 } else {
656 // Current chunk is full: allocate a fresh one
657 StgTRecChunk *nc;
658 nc = alloc_stg_trec_chunk(cap);
659 nc -> prev_chunk = c;
660 nc -> next_entry_idx = 1;
661 t -> current_chunk = nc;
662 result = &(nc -> entries[0]);
663 }
664
665 return result;
666 }
667
668 /*......................................................................*/
669
670 static void merge_update_into(Capability *cap,
671 StgTRecHeader *t,
672 StgTVar *tvar,
673 StgClosure *expected_value,
674 StgClosure *new_value) {
675 int found;
676
677 // Look for an entry in this trec
678 found = FALSE;
679 FOR_EACH_ENTRY(t, e, {
680 StgTVar *s;
681 s = e -> tvar;
682 if (s == tvar) {
683 found = TRUE;
684 if (e -> expected_value != expected_value) {
685 // Must abort if the two entries start from different values
686 TRACE("%p : update entries inconsistent at %p (%p vs %p)",
687 t, tvar, e -> expected_value, expected_value);
688 t -> state = TREC_CONDEMNED;
689 }
690 e -> new_value = new_value;
691 BREAK_FOR_EACH;
692 }
693 });
694
695 if (!found) {
696 // No entry so far in this trec
697 TRecEntry *ne;
698 ne = get_new_entry(cap, t);
699 ne -> tvar = tvar;
700 ne -> expected_value = expected_value;
701 ne -> new_value = new_value;
702 }
703 }
704
705 /*......................................................................*/
706
707 static void merge_read_into(Capability *cap,
708 StgTRecHeader *trec,
709 StgTVar *tvar,
710 StgClosure *expected_value)
711 {
712 int found;
713 StgTRecHeader *t;
714
715 found = FALSE;
716
717 //
718 // See #7493
719 //
720 // We need to look for an existing entry *anywhere* in the stack of
721 // nested transactions. Otherwise, in stmCommitNestedTransaction()
722 // we can't tell the difference between
723 //
724 // (1) a read-only entry
725 // (2) an entry that writes back the original value
726 //
727 // Since in both cases e->new_value == e->expected_value. But in (1)
728 // we want to do nothing, and in (2) we want to update e->new_value
729 // in the outer transaction.
730 //
731 // Here we deal with the first possibility: we never create a
732 // read-only entry in an inner transaction if there is an existing
733 // outer entry; so we never have an inner read and an outer update.
734 // So then in stmCommitNestedTransaction() we know we can always
735 // write e->new_value over the outer entry, because the inner entry
736 // is the most up to date.
737 //
738 for (t = trec; !found && t != NO_TREC; t = t -> enclosing_trec)
739 {
740 FOR_EACH_ENTRY(t, e, {
741 if (e -> tvar == tvar) {
742 found = TRUE;
743 if (e -> expected_value != expected_value) {
744 // Must abort if the two entries start from different values
745 TRACE("%p : read entries inconsistent at %p (%p vs %p)",
746 t, tvar, e -> expected_value, expected_value);
747 t -> state = TREC_CONDEMNED;
748 }
749 BREAK_FOR_EACH;
750 }
751 });
752 }
753
754 if (!found) {
755 // No entry found
756 TRecEntry *ne;
757 ne = get_new_entry(cap, trec);
758 ne -> tvar = tvar;
759 ne -> expected_value = expected_value;
760 ne -> new_value = expected_value;
761 }
762 }
763
764 /*......................................................................*/
765
766 static StgBool entry_is_update(TRecEntry *e) {
767 StgBool result;
768 result = (e -> expected_value != e -> new_value);
769 return result;
770 }
771
772 #if defined(STM_FG_LOCKS)
773 static StgBool entry_is_read_only(TRecEntry *e) {
774 StgBool result;
775 result = (e -> expected_value == e -> new_value);
776 return result;
777 }
778
779 static StgBool tvar_is_locked(StgTVar *s, StgTRecHeader *h) {
780 StgClosure *c;
781 StgBool result;
782 c = s -> current_value;
783 result = (c == (StgClosure *) h);
784 return result;
785 }
786 #endif
787
788 // revert_ownership : release a lock on a TVar, storing back
789 // the value that it held when the lock was acquired. "revert_all"
790 // is set in stmWait and stmReWait when we acquired locks on all of
791 // the TVars involved. "revert_all" is not set in commit operations
792 // where we don't lock TVars that have been read from but not updated.
793
794 static void revert_ownership(Capability *cap STG_UNUSED,
795 StgTRecHeader *trec STG_UNUSED,
796 StgBool revert_all STG_UNUSED) {
797 #if defined(STM_FG_LOCKS)
798 FOR_EACH_ENTRY(trec, e, {
799 if (revert_all || entry_is_update(e)) {
800 StgTVar *s;
801 s = e -> tvar;
802 if (tvar_is_locked(s, trec)) {
803 unlock_tvar(cap, trec, s, e -> expected_value, TRUE);
804 }
805 }
806 });
807 #endif
808 }
809
810 /*......................................................................*/
811
812 // validate_and_acquire_ownership : this performs the twin functions
813 // of checking that the TVars referred to by entries in trec hold the
814 // expected values and:
815 //
816 // - locking the TVar (on updated TVars during commit, or all TVars
817 // during wait)
818 //
819 // - recording the identity of the TRec who wrote the value seen in the
820 // TVar (on non-updated TVars during commit). These values are
821 // stashed in the TRec entries and are then checked in check_read_only
822 // to ensure that an atomic snapshot of all of these locations has been
823 // seen.
824
825 static StgBool validate_and_acquire_ownership (Capability *cap,
826 StgTRecHeader *trec,
827 int acquire_all,
828 int retain_ownership) {
829 StgBool result;
830
831 if (shake()) {
832 TRACE("%p : shake, pretending trec is invalid when it may not be", trec);
833 return FALSE;
834 }
835
836 ASSERT ((trec -> state == TREC_ACTIVE) ||
837 (trec -> state == TREC_WAITING) ||
838 (trec -> state == TREC_CONDEMNED));
839 result = !((trec -> state) == TREC_CONDEMNED);
840 if (result) {
841 FOR_EACH_ENTRY(trec, e, {
842 StgTVar *s;
843 s = e -> tvar;
844 if (acquire_all || entry_is_update(e)) {
845 TRACE("%p : trying to acquire %p", trec, s);
846 if (!cond_lock_tvar(trec, s, e -> expected_value)) {
847 TRACE("%p : failed to acquire %p", trec, s);
848 result = FALSE;
849 BREAK_FOR_EACH;
850 }
851 } else {
852 ASSERT(config_use_read_phase);
853 IF_STM_FG_LOCKS({
854 TRACE("%p : will need to check %p", trec, s);
855 if (s -> current_value != e -> expected_value) {
856 TRACE("%p : doesn't match", trec);
857 result = FALSE;
858 BREAK_FOR_EACH;
859 }
860 e -> num_updates = s -> num_updates;
861 if (s -> current_value != e -> expected_value) {
862 TRACE("%p : doesn't match (race)", trec);
863 result = FALSE;
864 BREAK_FOR_EACH;
865 } else {
866 TRACE("%p : need to check version %ld", trec, e -> num_updates);
867 }
868 });
869 }
870 });
871 }
872
873 if ((!result) || (!retain_ownership)) {
874 revert_ownership(cap, trec, acquire_all);
875 }
876
877 return result;
878 }
879
880 // check_read_only : check that we've seen an atomic snapshot of the
881 // non-updated TVars accessed by a trec. This checks that the last TRec to
882 // commit an update to the TVar is unchanged since the value was stashed in
883 // validate_and_acquire_ownership. If no udpate is seen to any TVar than
884 // all of them contained their expected values at the start of the call to
885 // check_read_only.
886 //
887 // The paper "Concurrent programming without locks" (under submission), or
888 // Keir Fraser's PhD dissertation "Practical lock-free programming" discuss
889 // this kind of algorithm.
890
891 static StgBool check_read_only(StgTRecHeader *trec STG_UNUSED) {
892 StgBool result = TRUE;
893
894 ASSERT (config_use_read_phase);
895 IF_STM_FG_LOCKS({
896 FOR_EACH_ENTRY(trec, e, {
897 StgTVar *s;
898 s = e -> tvar;
899 if (entry_is_read_only(e)) {
900 TRACE("%p : check_read_only for TVar %p, saw %ld", trec, s, e -> num_updates);
901 if (s -> num_updates != e -> num_updates) {
902 // ||s -> current_value != e -> expected_value) {
903 TRACE("%p : mismatch", trec);
904 result = FALSE;
905 BREAK_FOR_EACH;
906 }
907 }
908 });
909 });
910
911 return result;
912 }
913
914
915 /************************************************************************/
916
917 void stmPreGCHook (Capability *cap) {
918 lock_stm(NO_TREC);
919 TRACE("stmPreGCHook");
920 cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
921 cap->free_trec_chunks = END_STM_CHUNK_LIST;
922 cap->free_trec_headers = NO_TREC;
923 unlock_stm(NO_TREC);
924 }
925
926 /************************************************************************/
927
928 // check_read_only relies on version numbers held in TVars' "num_updates"
929 // fields not wrapping around while a transaction is committed. The version
930 // number is incremented each time an update is committed to the TVar
931 // This is unlikely to wrap around when 32-bit integers are used for the counts,
932 // but to ensure correctness we maintain a shared count on the maximum
933 // number of commit operations that may occur and check that this has
934 // not increased by more than 2^32 during a commit.
935
936 #define TOKEN_BATCH_SIZE 1024
937
938 static volatile StgInt64 max_commits = 0;
939
940 #if defined(THREADED_RTS)
941 static volatile StgBool token_locked = FALSE;
942
943 static void getTokenBatch(Capability *cap) {
944 while (cas((void *)&token_locked, FALSE, TRUE) == TRUE) { /* nothing */ }
945 max_commits += TOKEN_BATCH_SIZE;
946 TRACE("%p : cap got token batch, max_commits=%" FMT_Int64, cap, max_commits);
947 cap -> transaction_tokens = TOKEN_BATCH_SIZE;
948 token_locked = FALSE;
949 }
950
951 static void getToken(Capability *cap) {
952 if (cap -> transaction_tokens == 0) {
953 getTokenBatch(cap);
954 }
955 cap -> transaction_tokens --;
956 }
957 #else
958 static void getToken(Capability *cap STG_UNUSED) {
959 // Nothing
960 }
961 #endif
962
963 /*......................................................................*/
964
965 StgTRecHeader *stmStartTransaction(Capability *cap,
966 StgTRecHeader *outer) {
967 StgTRecHeader *t;
968 TRACE("%p : stmStartTransaction with %d tokens",
969 outer,
970 cap -> transaction_tokens);
971
972 getToken(cap);
973
974 t = alloc_stg_trec_header(cap, outer);
975 TRACE("%p : stmStartTransaction()=%p", outer, t);
976 return t;
977 }
978
979 /*......................................................................*/
980
981 void stmAbortTransaction(Capability *cap,
982 StgTRecHeader *trec) {
983 StgTRecHeader *et;
984 TRACE("%p : stmAbortTransaction", trec);
985 ASSERT (trec != NO_TREC);
986 ASSERT ((trec -> state == TREC_ACTIVE) ||
987 (trec -> state == TREC_WAITING) ||
988 (trec -> state == TREC_CONDEMNED));
989
990 lock_stm(trec);
991
992 et = trec -> enclosing_trec;
993 if (et == NO_TREC) {
994 // We're a top-level transaction: remove any watch queue entries that
995 // we may have.
996 TRACE("%p : aborting top-level transaction", trec);
997
998 if (trec -> state == TREC_WAITING) {
999 ASSERT (trec -> enclosing_trec == NO_TREC);
1000 TRACE("%p : stmAbortTransaction aborting waiting transaction", trec);
1001 remove_watch_queue_entries_for_trec(cap, trec);
1002 }
1003
1004 } else {
1005 // We're a nested transaction: merge our read set into our parent's
1006 TRACE("%p : retaining read-set into parent %p", trec, et);
1007
1008 FOR_EACH_ENTRY(trec, e, {
1009 StgTVar *s = e -> tvar;
1010 merge_read_into(cap, et, s, e -> expected_value);
1011 });
1012 }
1013
1014 trec -> state = TREC_ABORTED;
1015 unlock_stm(trec);
1016
1017 TRACE("%p : stmAbortTransaction done", trec);
1018 }
1019
1020 /*......................................................................*/
1021
1022 void stmFreeAbortedTRec(Capability *cap,
1023 StgTRecHeader *trec) {
1024 TRACE("%p : stmFreeAbortedTRec", trec);
1025 ASSERT (trec != NO_TREC);
1026 ASSERT ((trec -> state == TREC_CONDEMNED) ||
1027 (trec -> state == TREC_ABORTED));
1028
1029 free_stg_trec_header(cap, trec);
1030
1031 TRACE("%p : stmFreeAbortedTRec done", trec);
1032 }
1033
1034 /*......................................................................*/
1035
1036 void stmCondemnTransaction(Capability *cap,
1037 StgTRecHeader *trec) {
1038 TRACE("%p : stmCondemnTransaction", trec);
1039 ASSERT (trec != NO_TREC);
1040 ASSERT ((trec -> state == TREC_ACTIVE) ||
1041 (trec -> state == TREC_WAITING) ||
1042 (trec -> state == TREC_CONDEMNED));
1043
1044 lock_stm(trec);
1045 if (trec -> state == TREC_WAITING) {
1046 ASSERT (trec -> enclosing_trec == NO_TREC);
1047 TRACE("%p : stmCondemnTransaction condemning waiting transaction", trec);
1048 remove_watch_queue_entries_for_trec(cap, trec);
1049 }
1050 trec -> state = TREC_CONDEMNED;
1051 unlock_stm(trec);
1052
1053 TRACE("%p : stmCondemnTransaction done", trec);
1054 }
1055
1056 /*......................................................................*/
1057
1058 StgBool stmValidateNestOfTransactions(Capability *cap, StgTRecHeader *trec) {
1059 StgTRecHeader *t;
1060 StgBool result;
1061
1062 TRACE("%p : stmValidateNestOfTransactions", trec);
1063 ASSERT(trec != NO_TREC);
1064 ASSERT((trec -> state == TREC_ACTIVE) ||
1065 (trec -> state == TREC_WAITING) ||
1066 (trec -> state == TREC_CONDEMNED));
1067
1068 lock_stm(trec);
1069
1070 t = trec;
1071 result = TRUE;
1072 while (t != NO_TREC) {
1073 result &= validate_and_acquire_ownership(cap, t, TRUE, FALSE);
1074 t = t -> enclosing_trec;
1075 }
1076
1077 if (!result && trec -> state != TREC_WAITING) {
1078 trec -> state = TREC_CONDEMNED;
1079 }
1080
1081 unlock_stm(trec);
1082
1083 TRACE("%p : stmValidateNestOfTransactions()=%d", trec, result);
1084 return result;
1085 }
1086
1087 /*......................................................................*/
1088
1089 static TRecEntry *get_entry_for(StgTRecHeader *trec, StgTVar *tvar, StgTRecHeader **in) {
1090 TRecEntry *result = NULL;
1091
1092 TRACE("%p : get_entry_for TVar %p", trec, tvar);
1093 ASSERT(trec != NO_TREC);
1094
1095 do {
1096 FOR_EACH_ENTRY(trec, e, {
1097 if (e -> tvar == tvar) {
1098 result = e;
1099 if (in != NULL) {
1100 *in = trec;
1101 }
1102 BREAK_FOR_EACH;
1103 }
1104 });
1105 trec = trec -> enclosing_trec;
1106 } while (result == NULL && trec != NO_TREC);
1107
1108 return result;
1109 }
1110
1111 /*......................................................................*/
1112
1113 /*
1114 * Add/remove links between an invariant TVars. The caller must have
1115 * locked the TVars involved and the invariant.
1116 */
1117
1118 static void disconnect_invariant(Capability *cap,
1119 StgAtomicInvariant *inv) {
1120 StgTRecHeader *last_execution = inv -> last_execution;
1121
1122 TRACE("unhooking last execution inv=%p trec=%p", inv, last_execution);
1123
1124 FOR_EACH_ENTRY(last_execution, e, {
1125 StgTVar *s = e -> tvar;
1126 StgTVarWatchQueue *q = s -> first_watch_queue_entry;
1127 DEBUG_ONLY( StgBool found = FALSE );
1128 TRACE(" looking for trec on tvar=%p", s);
1129 for (q = s -> first_watch_queue_entry;
1130 q != END_STM_WATCH_QUEUE;
1131 q = q -> next_queue_entry) {
1132 if (q -> closure == (StgClosure*)inv) {
1133 StgTVarWatchQueue *pq;
1134 StgTVarWatchQueue *nq;
1135 nq = q -> next_queue_entry;
1136 pq = q -> prev_queue_entry;
1137 if (nq != END_STM_WATCH_QUEUE) {
1138 nq -> prev_queue_entry = pq;
1139 }
1140 if (pq != END_STM_WATCH_QUEUE) {
1141 pq -> next_queue_entry = nq;
1142 } else {
1143 ASSERT (s -> first_watch_queue_entry == q);
1144 s -> first_watch_queue_entry = nq;
1145 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
1146 }
1147 TRACE(" found it in watch queue entry %p", q);
1148 free_stg_tvar_watch_queue(cap, q);
1149 DEBUG_ONLY( found = TRUE );
1150 break;
1151 }
1152 }
1153 ASSERT(found);
1154 });
1155 inv -> last_execution = NO_TREC;
1156 }
1157
1158 static void connect_invariant_to_trec(Capability *cap,
1159 StgAtomicInvariant *inv,
1160 StgTRecHeader *my_execution) {
1161 TRACE("connecting execution inv=%p trec=%p", inv, my_execution);
1162
1163 ASSERT(inv -> last_execution == NO_TREC);
1164
1165 FOR_EACH_ENTRY(my_execution, e, {
1166 StgTVar *s = e -> tvar;
1167 StgTVarWatchQueue *q = alloc_stg_tvar_watch_queue(cap, (StgClosure*)inv);
1168 StgTVarWatchQueue *fq = s -> first_watch_queue_entry;
1169
1170 // We leave "last_execution" holding the values that will be
1171 // in the heap after the transaction we're in the process
1172 // of committing has finished.
1173 TRecEntry *entry = get_entry_for(my_execution -> enclosing_trec, s, NULL);
1174 if (entry != NULL) {
1175 e -> expected_value = entry -> new_value;
1176 e -> new_value = entry -> new_value;
1177 }
1178
1179 TRACE(" linking trec on tvar=%p value=%p q=%p", s, e -> expected_value, q);
1180 q -> next_queue_entry = fq;
1181 q -> prev_queue_entry = END_STM_WATCH_QUEUE;
1182 if (fq != END_STM_WATCH_QUEUE) {
1183 fq -> prev_queue_entry = q;
1184 }
1185 s -> first_watch_queue_entry = q;
1186 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
1187 });
1188
1189 inv -> last_execution = my_execution;
1190 }
1191
1192 /*
1193 * Add a new invariant to the trec's list of invariants to check on commit
1194 */
1195 void stmAddInvariantToCheck(Capability *cap,
1196 StgTRecHeader *trec,
1197 StgClosure *code) {
1198 StgAtomicInvariant *invariant;
1199 StgInvariantCheckQueue *q;
1200 TRACE("%p : stmAddInvariantToCheck closure=%p", trec, code);
1201 ASSERT(trec != NO_TREC);
1202 ASSERT(trec -> state == TREC_ACTIVE ||
1203 trec -> state == TREC_CONDEMNED);
1204
1205
1206 // 1. Allocate an StgAtomicInvariant, set last_execution to NO_TREC
1207 // to signal that this is a new invariant in the current atomic block
1208
1209 invariant = (StgAtomicInvariant *) allocate(cap, sizeofW(StgAtomicInvariant));
1210 TRACE("%p : stmAddInvariantToCheck allocated invariant=%p", trec, invariant);
1211 SET_HDR (invariant, &stg_ATOMIC_INVARIANT_info, CCS_SYSTEM);
1212 invariant -> code = code;
1213 invariant -> last_execution = NO_TREC;
1214 invariant -> lock = 0;
1215
1216 // 2. Allocate an StgInvariantCheckQueue entry, link it to the current trec
1217
1218 q = alloc_stg_invariant_check_queue(cap, invariant);
1219 TRACE("%p : stmAddInvariantToCheck allocated q=%p", trec, q);
1220 q -> invariant = invariant;
1221 q -> my_execution = NO_TREC;
1222 q -> next_queue_entry = trec -> invariants_to_check;
1223 trec -> invariants_to_check = q;
1224
1225 TRACE("%p : stmAddInvariantToCheck done", trec);
1226 }
1227
1228 /*
1229 * Fill in the trec's list of invariants that might be violated by the
1230 * current transaction.
1231 */
1232
1233 StgInvariantCheckQueue *stmGetInvariantsToCheck(Capability *cap, StgTRecHeader *trec) {
1234 StgTRecChunk *c;
1235 TRACE("%p : stmGetInvariantsToCheck, head was %p",
1236 trec,
1237 trec -> invariants_to_check);
1238
1239 ASSERT(trec != NO_TREC);
1240 ASSERT ((trec -> state == TREC_ACTIVE) ||
1241 (trec -> state == TREC_WAITING) ||
1242 (trec -> state == TREC_CONDEMNED));
1243 ASSERT(trec -> enclosing_trec == NO_TREC);
1244
1245 lock_stm(trec);
1246 c = trec -> current_chunk;
1247 while (c != END_STM_CHUNK_LIST) {
1248 unsigned int i;
1249 for (i = 0; i < c -> next_entry_idx; i ++) {
1250 TRecEntry *e = &(c -> entries[i]);
1251 if (entry_is_update(e)) {
1252 StgTVar *s = e -> tvar;
1253 StgClosure *old = lock_tvar(trec, s);
1254
1255 // Pick up any invariants on the TVar being updated
1256 // by entry "e"
1257
1258 StgTVarWatchQueue *q;
1259 TRACE("%p : checking for invariants on %p", trec, s);
1260 for (q = s -> first_watch_queue_entry;
1261 q != END_STM_WATCH_QUEUE;
1262 q = q -> next_queue_entry) {
1263 if (watcher_is_invariant(q)) {
1264 StgBool found = FALSE;
1265 StgInvariantCheckQueue *q2;
1266 TRACE("%p : Touching invariant %p", trec, q -> closure);
1267 for (q2 = trec -> invariants_to_check;
1268 q2 != END_INVARIANT_CHECK_QUEUE;
1269 q2 = q2 -> next_queue_entry) {
1270 if (q2 -> invariant == (StgAtomicInvariant*)(q -> closure)) {
1271 TRACE("%p : Already found %p", trec, q -> closure);
1272 found = TRUE;
1273 break;
1274 }
1275 }
1276
1277 if (!found) {
1278 StgInvariantCheckQueue *q3;
1279 TRACE("%p : Not already found %p", trec, q -> closure);
1280 q3 = alloc_stg_invariant_check_queue(cap,
1281 (StgAtomicInvariant*) q -> closure);
1282 q3 -> next_queue_entry = trec -> invariants_to_check;
1283 trec -> invariants_to_check = q3;
1284 }
1285 }
1286 }
1287
1288 unlock_tvar(cap, trec, s, old, FALSE);
1289 }
1290 }
1291 c = c -> prev_chunk;
1292 }
1293
1294 unlock_stm(trec);
1295
1296 TRACE("%p : stmGetInvariantsToCheck, head now %p",
1297 trec,
1298 trec -> invariants_to_check);
1299
1300 return (trec -> invariants_to_check);
1301 }
1302
1303 /*......................................................................*/
1304
1305 StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
1306 int result;
1307 StgInt64 max_commits_at_start = max_commits;
1308 StgBool touched_invariants;
1309 StgBool use_read_phase;
1310
1311 TRACE("%p : stmCommitTransaction()", trec);
1312 ASSERT (trec != NO_TREC);
1313
1314 lock_stm(trec);
1315
1316 ASSERT (trec -> enclosing_trec == NO_TREC);
1317 ASSERT ((trec -> state == TREC_ACTIVE) ||
1318 (trec -> state == TREC_CONDEMNED));
1319
1320 // touched_invariants is true if we've written to a TVar with invariants
1321 // attached to it, or if we're trying to add a new invariant to the system.
1322
1323 touched_invariants = (trec -> invariants_to_check != END_INVARIANT_CHECK_QUEUE);
1324
1325 // If we have touched invariants then (i) lock the invariant, and (ii) add
1326 // the invariant's read set to our own. Step (i) is needed to serialize
1327 // concurrent transactions that attempt to make conflicting updates
1328 // to the invariant's trec (suppose it read from t1 and t2, and that one
1329 // concurrent transcation writes only to t1, and a second writes only to
1330 // t2). Step (ii) is needed so that both transactions will lock t1 and t2
1331 // to gain access to their wait lists (and hence be able to unhook the
1332 // invariant from both tvars).
1333
1334 if (touched_invariants) {
1335 StgInvariantCheckQueue *q = trec -> invariants_to_check;
1336 TRACE("%p : locking invariants", trec);
1337 while (q != END_INVARIANT_CHECK_QUEUE) {
1338 StgTRecHeader *inv_old_trec;
1339 StgAtomicInvariant *inv;
1340 TRACE("%p : locking invariant %p", trec, q -> invariant);
1341 inv = q -> invariant;
1342 if (!lock_inv(inv)) {
1343 TRACE("%p : failed to lock %p", trec, inv);
1344 trec -> state = TREC_CONDEMNED;
1345 break;
1346 }
1347
1348 inv_old_trec = inv -> last_execution;
1349 if (inv_old_trec != NO_TREC) {
1350 StgTRecChunk *c = inv_old_trec -> current_chunk;
1351 while (c != END_STM_CHUNK_LIST) {
1352 unsigned int i;
1353 for (i = 0; i < c -> next_entry_idx; i ++) {
1354 TRecEntry *e = &(c -> entries[i]);
1355 TRACE("%p : ensuring we lock TVars for %p", trec, e -> tvar);
1356 merge_read_into (cap, trec, e -> tvar, e -> expected_value);
1357 }
1358 c = c -> prev_chunk;
1359 }
1360 }
1361 q = q -> next_queue_entry;
1362 }
1363 TRACE("%p : finished locking invariants", trec);
1364 }
1365
1366 // Use a read-phase (i.e. don't lock TVars we've read but not updated) if
1367 // (i) the configuration lets us use a read phase, and (ii) we've not
1368 // touched or introduced any invariants.
1369 //
1370 // In principle we could extend the implementation to support a read-phase
1371 // and invariants, but it complicates the logic: the links between
1372 // invariants and TVars are managed by the TVar watch queues which are
1373 // protected by the TVar's locks.
1374
1375 use_read_phase = ((config_use_read_phase) && (!touched_invariants));
1376
1377 result = validate_and_acquire_ownership(cap, trec, (!use_read_phase), TRUE);
1378 if (result) {
1379 // We now know that all the updated locations hold their expected values.
1380 ASSERT (trec -> state == TREC_ACTIVE);
1381
1382 if (use_read_phase) {
1383 StgInt64 max_commits_at_end;
1384 StgInt64 max_concurrent_commits;
1385 TRACE("%p : doing read check", trec);
1386 result = check_read_only(trec);
1387 TRACE("%p : read-check %s", trec, result ? "succeeded" : "failed");
1388
1389 max_commits_at_end = max_commits;
1390 max_concurrent_commits = ((max_commits_at_end - max_commits_at_start) +
1391 (n_capabilities * TOKEN_BATCH_SIZE));
1392 if (((max_concurrent_commits >> 32) > 0) || shake()) {
1393 result = FALSE;
1394 }
1395 }
1396
1397 if (result) {
1398 // We now know that all of the read-only locations held their exepcted values
1399 // at the end of the call to validate_and_acquire_ownership. This forms the
1400 // linearization point of the commit.
1401
1402 // 1. If we have touched or introduced any invariants then unhook them
1403 // from the TVars they depended on last time they were executed
1404 // and hook them on the TVars that they now depend on.
1405 if (touched_invariants) {
1406 StgInvariantCheckQueue *q = trec -> invariants_to_check;
1407 while (q != END_INVARIANT_CHECK_QUEUE) {
1408 StgAtomicInvariant *inv = q -> invariant;
1409 if (inv -> last_execution != NO_TREC) {
1410 disconnect_invariant(cap, inv);
1411 }
1412
1413 TRACE("%p : hooking up new execution trec=%p", trec, q -> my_execution);
1414 connect_invariant_to_trec(cap, inv, q -> my_execution);
1415
1416 TRACE("%p : unlocking invariant %p", trec, inv);
1417 unlock_inv(inv);
1418
1419 q = q -> next_queue_entry;
1420 }
1421 }
1422
1423 // 2. Make the updates required by the transaction
1424 FOR_EACH_ENTRY(trec, e, {
1425 StgTVar *s;
1426 s = e -> tvar;
1427 if ((!use_read_phase) || (e -> new_value != e -> expected_value)) {
1428 // Either the entry is an update or we're not using a read phase:
1429 // write the value back to the TVar, unlocking it if necessary.
1430
1431 ACQ_ASSERT(tvar_is_locked(s, trec));
1432 TRACE("%p : writing %p to %p, waking waiters", trec, e -> new_value, s);
1433 unpark_waiters_on(cap,s);
1434 IF_STM_FG_LOCKS({
1435 s -> num_updates ++;
1436 });
1437 unlock_tvar(cap, trec, s, e -> new_value, TRUE);
1438 }
1439 ACQ_ASSERT(!tvar_is_locked(s, trec));
1440 });
1441 } else {
1442 revert_ownership(cap, trec, FALSE);
1443 }
1444 }
1445
1446 unlock_stm(trec);
1447
1448 free_stg_trec_header(cap, trec);
1449
1450 TRACE("%p : stmCommitTransaction()=%d", trec, result);
1451
1452 return result;
1453 }
1454
1455 /*......................................................................*/
1456
1457 StgBool stmCommitNestedTransaction(Capability *cap, StgTRecHeader *trec) {
1458 StgTRecHeader *et;
1459 int result;
1460 ASSERT (trec != NO_TREC && trec -> enclosing_trec != NO_TREC);
1461 TRACE("%p : stmCommitNestedTransaction() into %p", trec, trec -> enclosing_trec);
1462 ASSERT ((trec -> state == TREC_ACTIVE) || (trec -> state == TREC_CONDEMNED));
1463
1464 lock_stm(trec);
1465
1466 et = trec -> enclosing_trec;
1467 result = validate_and_acquire_ownership(cap, trec, (!config_use_read_phase), TRUE);
1468 if (result) {
1469 // We now know that all the updated locations hold their expected values.
1470
1471 if (config_use_read_phase) {
1472 TRACE("%p : doing read check", trec);
1473 result = check_read_only(trec);
1474 }
1475 if (result) {
1476 // We now know that all of the read-only locations held their exepcted values
1477 // at the end of the call to validate_and_acquire_ownership. This forms the
1478 // linearization point of the commit.
1479
1480 TRACE("%p : read-check succeeded", trec);
1481 FOR_EACH_ENTRY(trec, e, {
1482 // Merge each entry into the enclosing transaction record, release all
1483 // locks.
1484
1485 StgTVar *s;
1486 s = e -> tvar;
1487 if (entry_is_update(e)) {
1488 unlock_tvar(cap, trec, s, e -> expected_value, FALSE);
1489 }
1490 merge_update_into(cap, et, s, e -> expected_value, e -> new_value);
1491 ACQ_ASSERT(s -> current_value != (StgClosure *)trec);
1492 });
1493 } else {
1494 revert_ownership(cap, trec, FALSE);
1495 }
1496 }
1497
1498 unlock_stm(trec);
1499
1500 free_stg_trec_header(cap, trec);
1501
1502 TRACE("%p : stmCommitNestedTransaction()=%d", trec, result);
1503
1504 return result;
1505 }
1506
1507 /*......................................................................*/
1508
1509 StgBool stmWait(Capability *cap, StgTSO *tso, StgTRecHeader *trec) {
1510 int result;
1511 TRACE("%p : stmWait(%p)", trec, tso);
1512 ASSERT (trec != NO_TREC);
1513 ASSERT (trec -> enclosing_trec == NO_TREC);
1514 ASSERT ((trec -> state == TREC_ACTIVE) ||
1515 (trec -> state == TREC_CONDEMNED));
1516
1517 lock_stm(trec);
1518 result = validate_and_acquire_ownership(cap, trec, TRUE, TRUE);
1519 if (result) {
1520 // The transaction is valid so far so we can actually start waiting.
1521 // (Otherwise the transaction was not valid and the thread will have to
1522 // retry it).
1523
1524 // Put ourselves to sleep. We retain locks on all the TVars involved
1525 // until we are sound asleep : (a) on the wait queues, (b) BlockedOnSTM
1526 // in the TSO, (c) TREC_WAITING in the Trec.
1527 build_watch_queue_entries_for_trec(cap, tso, trec);
1528 park_tso(tso);
1529 trec -> state = TREC_WAITING;
1530
1531 // We haven't released ownership of the transaction yet. The TSO
1532 // has been put on the wait queue for the TVars it is waiting for,
1533 // but we haven't yet tidied up the TSO's stack and made it safe
1534 // to wake up the TSO. Therefore, we must wait until the TSO is
1535 // safe to wake up before we release ownership - when all is well,
1536 // the runtime will call stmWaitUnlock() below, with the same
1537 // TRec.
1538
1539 } else {
1540 unlock_stm(trec);
1541 free_stg_trec_header(cap, trec);
1542 }
1543
1544 TRACE("%p : stmWait(%p)=%d", trec, tso, result);
1545 return result;
1546 }
1547
1548
1549 void
1550 stmWaitUnlock(Capability *cap, StgTRecHeader *trec) {
1551 revert_ownership(cap, trec, TRUE);
1552 unlock_stm(trec);
1553 }
1554
1555 /*......................................................................*/
1556
1557 StgBool stmReWait(Capability *cap, StgTSO *tso) {
1558 int result;
1559 StgTRecHeader *trec = tso->trec;
1560
1561 TRACE("%p : stmReWait", trec);
1562 ASSERT (trec != NO_TREC);
1563 ASSERT (trec -> enclosing_trec == NO_TREC);
1564 ASSERT ((trec -> state == TREC_WAITING) ||
1565 (trec -> state == TREC_CONDEMNED));
1566
1567 lock_stm(trec);
1568 result = validate_and_acquire_ownership(cap, trec, TRUE, TRUE);
1569 TRACE("%p : validation %s", trec, result ? "succeeded" : "failed");
1570 if (result) {
1571 // The transaction remains valid -- do nothing because it is already on
1572 // the wait queues
1573 ASSERT (trec -> state == TREC_WAITING);
1574 park_tso(tso);
1575 revert_ownership(cap, trec, TRUE);
1576 } else {
1577 // The transcation has become invalid. We can now remove it from the wait
1578 // queues.
1579 if (trec -> state != TREC_CONDEMNED) {
1580 remove_watch_queue_entries_for_trec (cap, trec);
1581 }
1582 free_stg_trec_header(cap, trec);
1583 }
1584 unlock_stm(trec);
1585
1586 TRACE("%p : stmReWait()=%d", trec, result);
1587 return result;
1588 }
1589
1590 /*......................................................................*/
1591
1592 static StgClosure *read_current_value(StgTRecHeader *trec STG_UNUSED, StgTVar *tvar) {
1593 StgClosure *result;
1594 result = tvar -> current_value;
1595
1596 #if defined(STM_FG_LOCKS)
1597 while (GET_INFO(UNTAG_CLOSURE(result)) == &stg_TREC_HEADER_info) {
1598 TRACE("%p : read_current_value(%p) saw %p", trec, tvar, result);
1599 result = tvar -> current_value;
1600 }
1601 #endif
1602
1603 TRACE("%p : read_current_value(%p)=%p", trec, tvar, result);
1604 return result;
1605 }
1606
1607 /*......................................................................*/
1608
1609 StgClosure *stmReadTVar(Capability *cap,
1610 StgTRecHeader *trec,
1611 StgTVar *tvar) {
1612 StgTRecHeader *entry_in = NULL;
1613 StgClosure *result = NULL;
1614 TRecEntry *entry = NULL;
1615 TRACE("%p : stmReadTVar(%p)", trec, tvar);
1616 ASSERT (trec != NO_TREC);
1617 ASSERT (trec -> state == TREC_ACTIVE ||
1618 trec -> state == TREC_CONDEMNED);
1619
1620 entry = get_entry_for(trec, tvar, &entry_in);
1621
1622 if (entry != NULL) {
1623 if (entry_in == trec) {
1624 // Entry found in our trec
1625 result = entry -> new_value;
1626 } else {
1627 // Entry found in another trec
1628 TRecEntry *new_entry = get_new_entry(cap, trec);
1629 new_entry -> tvar = tvar;
1630 new_entry -> expected_value = entry -> expected_value;
1631 new_entry -> new_value = entry -> new_value;
1632 result = new_entry -> new_value;
1633 }
1634 } else {
1635 // No entry found
1636 StgClosure *current_value = read_current_value(trec, tvar);
1637 TRecEntry *new_entry = get_new_entry(cap, trec);
1638 new_entry -> tvar = tvar;
1639 new_entry -> expected_value = current_value;
1640 new_entry -> new_value = current_value;
1641 result = current_value;
1642 }
1643
1644 TRACE("%p : stmReadTVar(%p)=%p", trec, tvar, result);
1645 return result;
1646 }
1647
1648 /*......................................................................*/
1649
1650 void stmWriteTVar(Capability *cap,
1651 StgTRecHeader *trec,
1652 StgTVar *tvar,
1653 StgClosure *new_value) {
1654
1655 StgTRecHeader *entry_in = NULL;
1656 TRecEntry *entry = NULL;
1657 TRACE("%p : stmWriteTVar(%p, %p)", trec, tvar, new_value);
1658 ASSERT (trec != NO_TREC);
1659 ASSERT (trec -> state == TREC_ACTIVE ||
1660 trec -> state == TREC_CONDEMNED);
1661
1662 entry = get_entry_for(trec, tvar, &entry_in);
1663
1664 if (entry != NULL) {
1665 if (entry_in == trec) {
1666 // Entry found in our trec
1667 entry -> new_value = new_value;
1668 } else {
1669 // Entry found in another trec
1670 TRecEntry *new_entry = get_new_entry(cap, trec);
1671 new_entry -> tvar = tvar;
1672 new_entry -> expected_value = entry -> expected_value;
1673 new_entry -> new_value = new_value;
1674 }
1675 } else {
1676 // No entry found
1677 StgClosure *current_value = read_current_value(trec, tvar);
1678 TRecEntry *new_entry = get_new_entry(cap, trec);
1679 new_entry -> tvar = tvar;
1680 new_entry -> expected_value = current_value;
1681 new_entry -> new_value = new_value;
1682 }
1683
1684 TRACE("%p : stmWriteTVar done", trec);
1685 }
1686
1687 /*......................................................................*/