micro-opt: replace stmGetEnclosingTRec() with a field access
[ghc.git] / rts / STM.c
1 /* -----------------------------------------------------------------------------
2 * (c) The GHC Team 1998-2005
3 *
4 * STM implementation.
5 *
6 * Overview
7 * --------
8 *
9 * See the PPoPP 2005 paper "Composable memory transactions". In summary,
10 * each transcation has a TRec (transaction record) holding entries for each of the
11 * TVars (transactional variables) that it has accessed. Each entry records
12 * (a) the TVar, (b) the expected value seen in the TVar, (c) the new value that
13 * the transaction wants to write to the TVar, (d) during commit, the identity of
14 * the TRec that wrote the expected value.
15 *
16 * Separate TRecs are used for each level in a nest of transactions. This allows
17 * a nested transaction to be aborted without condemning its enclosing transactions.
18 * This is needed in the implementation of catchRetry. Note that the "expected value"
19 * in a nested transaction's TRec is the value expected to be *held in memory* if
20 * the transaction commits -- not the "new value" stored in one of the enclosing
21 * transactions. This means that validation can be done without searching through
22 * a nest of TRecs.
23 *
24 * Concurrency control
25 * -------------------
26 *
27 * Three different concurrency control schemes can be built according to the settings
28 * in STM.h:
29 *
30 * STM_UNIPROC assumes that the caller serialises invocations on the STM interface.
31 * In the Haskell RTS this means it is suitable only for non-THREADED_RTS builds.
32 *
33 * STM_CG_LOCK uses coarse-grained locking -- a single 'stm lock' is acquired during
34 * an invocation on the STM interface. Note that this does not mean that
35 * transactions are simply serialized -- the lock is only held *within* the
36 * implementation of stmCommitTransaction, stmWait etc.
37 *
38 * STM_FG_LOCKS uses fine-grained locking -- locking is done on a per-TVar basis
39 * and, when committing a transaction, no locks are acquired for TVars that have
40 * been read but not updated.
41 *
42 * Concurrency control is implemented in the functions:
43 *
44 * lock_stm
45 * unlock_stm
46 * lock_tvar / cond_lock_tvar
47 * unlock_tvar
48 *
49 * The choice between STM_UNIPROC / STM_CG_LOCK / STM_FG_LOCKS affects the
50 * implementation of these functions.
51 *
52 * lock_stm & unlock_stm are straightforward : they acquire a simple spin-lock
53 * using STM_CG_LOCK, and otherwise they are no-ops.
54 *
55 * lock_tvar / cond_lock_tvar and unlock_tvar are more complex because they
56 * have other effects (present in STM_UNIPROC and STM_CG_LOCK builds) as well
57 * as the actual business of maniupultaing a lock (present only in STM_FG_LOCKS
58 * builds). This is because locking a TVar is implemented by writing the lock
59 * holder's TRec into the TVar's current_value field:
60 *
61 * lock_tvar - lock a specified TVar (STM_FG_LOCKS only), returning the value
62 * it contained.
63 *
64 * cond_lock_tvar - lock a specified TVar (STM_FG_LOCKS only) if it
65 * contains a specified value. Return TRUE if this succeeds,
66 * FALSE otherwise.
67 *
68 * unlock_tvar - release the lock on a specified TVar (STM_FG_LOCKS only),
69 * storing a specified value in place of the lock entry.
70 *
71 * Using these operations, the typcial pattern of a commit/validate/wait operation
72 * is to (a) lock the STM, (b) lock all the TVars being updated, (c) check that
73 * the TVars that were only read from still contain their expected values,
74 * (d) release the locks on the TVars, writing updates to them in the case of a
75 * commit, (e) unlock the STM.
76 *
77 * Queues of waiting threads hang off the first_watch_queue_entry
78 * field of each TVar. This may only be manipulated when holding that
79 * TVar's lock. In particular, when a thread is putting itself to
80 * sleep, it mustn't release the TVar's lock until it has added itself
81 * to the wait queue and marked its TSO as BlockedOnSTM -- this makes
82 * sure that other threads will know to wake it.
83 *
84 * ---------------------------------------------------------------------------*/
85
86 #include "PosixSource.h"
87 #include "Rts.h"
88
89 #include "RtsUtils.h"
90 #include "Schedule.h"
91 #include "STM.h"
92 #include "Trace.h"
93 #include "Threads.h"
94
95 #include <stdio.h>
96
97 #define TRUE 1
98 #define FALSE 0
99
100 // ACQ_ASSERT is used for assertions which are only required for
101 // THREADED_RTS builds with fine-grained locking.
102
103 #if defined(STM_FG_LOCKS)
104 #define ACQ_ASSERT(_X) ASSERT(_X)
105 #define NACQ_ASSERT(_X) /*Nothing*/
106 #else
107 #define ACQ_ASSERT(_X) /*Nothing*/
108 #define NACQ_ASSERT(_X) ASSERT(_X)
109 #endif
110
111 /*......................................................................*/
112
113 // If SHAKE is defined then validation will sometime spuriously fail. They helps test
114 // unusualy code paths if genuine contention is rare
115
116 #define TRACE(_x...) debugTrace(DEBUG_stm, "STM: " _x)
117
118 #ifdef SHAKE
119 static const int do_shake = TRUE;
120 #else
121 static const int do_shake = FALSE;
122 #endif
123 static int shake_ctr = 0;
124 static int shake_lim = 1;
125
126 static int shake(void) {
127 if (do_shake) {
128 if (((shake_ctr++) % shake_lim) == 0) {
129 shake_ctr = 1;
130 shake_lim ++;
131 return TRUE;
132 }
133 return FALSE;
134 } else {
135 return FALSE;
136 }
137 }
138
139 /*......................................................................*/
140
141 // Helper macros for iterating over entries within a transaction
142 // record
143
144 #define FOR_EACH_ENTRY(_t,_x,CODE) do { \
145 StgTRecHeader *__t = (_t); \
146 StgTRecChunk *__c = __t -> current_chunk; \
147 StgWord __limit = __c -> next_entry_idx; \
148 TRACE("%p : FOR_EACH_ENTRY, current_chunk=%p limit=%ld", __t, __c, __limit); \
149 while (__c != END_STM_CHUNK_LIST) { \
150 StgWord __i; \
151 for (__i = 0; __i < __limit; __i ++) { \
152 TRecEntry *_x = &(__c -> entries[__i]); \
153 do { CODE } while (0); \
154 } \
155 __c = __c -> prev_chunk; \
156 __limit = TREC_CHUNK_NUM_ENTRIES; \
157 } \
158 exit_for_each: \
159 if (FALSE) goto exit_for_each; \
160 } while (0)
161
162 #define BREAK_FOR_EACH goto exit_for_each
163
164 /*......................................................................*/
165
166 // if REUSE_MEMORY is defined then attempt to re-use descriptors, log chunks,
167 // and wait queue entries without GC
168
169 #define REUSE_MEMORY
170
171 /*......................................................................*/
172
173 #define IF_STM_UNIPROC(__X) do { } while (0)
174 #define IF_STM_CG_LOCK(__X) do { } while (0)
175 #define IF_STM_FG_LOCKS(__X) do { } while (0)
176
177 #if defined(STM_UNIPROC)
178 #undef IF_STM_UNIPROC
179 #define IF_STM_UNIPROC(__X) do { __X } while (0)
180 static const StgBool config_use_read_phase = FALSE;
181
182 static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
183 TRACE("%p : lock_stm()", trec);
184 }
185
186 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
187 TRACE("%p : unlock_stm()", trec);
188 }
189
190 static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
191 StgTVar *s STG_UNUSED) {
192 StgClosure *result;
193 TRACE("%p : lock_tvar(%p)", trec, s);
194 result = s -> current_value;
195 return result;
196 }
197
198 static void unlock_tvar(StgTRecHeader *trec STG_UNUSED,
199 StgTVar *s STG_UNUSED,
200 StgClosure *c,
201 StgBool force_update) {
202 TRACE("%p : unlock_tvar(%p)", trec, s);
203 if (force_update) {
204 s -> current_value = c;
205 }
206 }
207
208 static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
209 StgTVar *s STG_UNUSED,
210 StgClosure *expected) {
211 StgClosure *result;
212 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
213 result = s -> current_value;
214 TRACE("%p : %s", trec, (result == expected) ? "success" : "failure");
215 return (result == expected);
216 }
217
218 static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
219 // Nothing -- uniproc
220 return TRUE;
221 }
222
223 static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
224 // Nothing -- uniproc
225 }
226 #endif
227
228 #if defined(STM_CG_LOCK) /*........................................*/
229
230 #undef IF_STM_CG_LOCK
231 #define IF_STM_CG_LOCK(__X) do { __X } while (0)
232 static const StgBool config_use_read_phase = FALSE;
233 static volatile StgTRecHeader *smp_locked = NULL;
234
235 static void lock_stm(StgTRecHeader *trec) {
236 while (cas(&smp_locked, NULL, trec) != NULL) { }
237 TRACE("%p : lock_stm()", trec);
238 }
239
240 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
241 TRACE("%p : unlock_stm()", trec);
242 ASSERT (smp_locked == trec);
243 smp_locked = 0;
244 }
245
246 static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
247 StgTVar *s STG_UNUSED) {
248 StgClosure *result;
249 TRACE("%p : lock_tvar(%p)", trec, s);
250 ASSERT (smp_locked == trec);
251 result = s -> current_value;
252 return result;
253 }
254
255 static void *unlock_tvar(StgTRecHeader *trec STG_UNUSED,
256 StgTVar *s STG_UNUSED,
257 StgClosure *c,
258 StgBool force_update) {
259 TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
260 ASSERT (smp_locked == trec);
261 if (force_update) {
262 s -> current_value = c;
263 }
264 }
265
266 static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
267 StgTVar *s STG_UNUSED,
268 StgClosure *expected) {
269 StgClosure *result;
270 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
271 ASSERT (smp_locked == trec);
272 result = s -> current_value;
273 TRACE("%p : %d", result ? "success" : "failure");
274 return (result == expected);
275 }
276
277 static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
278 // Nothing -- protected by STM lock
279 return TRUE;
280 }
281
282 static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
283 // Nothing -- protected by STM lock
284 }
285 #endif
286
287 #if defined(STM_FG_LOCKS) /*...................................*/
288
289 #undef IF_STM_FG_LOCKS
290 #define IF_STM_FG_LOCKS(__X) do { __X } while (0)
291 static const StgBool config_use_read_phase = TRUE;
292
293 static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
294 TRACE("%p : lock_stm()", trec);
295 }
296
297 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
298 TRACE("%p : unlock_stm()", trec);
299 }
300
301 static StgClosure *lock_tvar(StgTRecHeader *trec,
302 StgTVar *s STG_UNUSED) {
303 StgClosure *result;
304 TRACE("%p : lock_tvar(%p)", trec, s);
305 do {
306 do {
307 result = s -> current_value;
308 } while (GET_INFO(UNTAG_CLOSURE(result)) == &stg_TREC_HEADER_info);
309 } while (cas((void *)&(s -> current_value),
310 (StgWord)result, (StgWord)trec) != (StgWord)result);
311 return result;
312 }
313
314 static void unlock_tvar(StgTRecHeader *trec STG_UNUSED,
315 StgTVar *s,
316 StgClosure *c,
317 StgBool force_update STG_UNUSED) {
318 TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
319 ASSERT(s -> current_value == (StgClosure *)trec);
320 s -> current_value = c;
321 }
322
323 static StgBool cond_lock_tvar(StgTRecHeader *trec,
324 StgTVar *s,
325 StgClosure *expected) {
326 StgClosure *result;
327 StgWord w;
328 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
329 w = cas((void *)&(s -> current_value), (StgWord)expected, (StgWord)trec);
330 result = (StgClosure *)w;
331 TRACE("%p : %s", trec, result ? "success" : "failure");
332 return (result == expected);
333 }
334
335 static StgBool lock_inv(StgAtomicInvariant *inv) {
336 return (cas(&(inv -> lock), 0, 1) == 0);
337 }
338
339 static void unlock_inv(StgAtomicInvariant *inv) {
340 ASSERT(inv -> lock == 1);
341 inv -> lock = 0;
342 }
343 #endif
344
345 /*......................................................................*/
346
347 static StgBool watcher_is_tso(StgTVarWatchQueue *q) {
348 StgClosure *c = q -> closure;
349 StgInfoTable *info = get_itbl(c);
350 return (info -> type) == TSO;
351 }
352
353 static StgBool watcher_is_invariant(StgTVarWatchQueue *q) {
354 StgClosure *c = q -> closure;
355 StgInfoTable *info = get_itbl(c);
356 return (info -> type) == ATOMIC_INVARIANT;
357 }
358
359 /*......................................................................*/
360
361 // Helper functions for thread blocking and unblocking
362
363 static void park_tso(StgTSO *tso) {
364 ASSERT(tso -> why_blocked == NotBlocked);
365 tso -> why_blocked = BlockedOnSTM;
366 tso -> block_info.closure = (StgClosure *) END_TSO_QUEUE;
367 TRACE("park_tso on tso=%p", tso);
368 }
369
370 static void unpark_tso(Capability *cap, StgTSO *tso) {
371 // We will continue unparking threads while they remain on one of the wait
372 // queues: it's up to the thread itself to remove it from the wait queues
373 // if it decides to do so when it is scheduled.
374
375 // Unblocking a TSO from BlockedOnSTM is done under the TSO lock,
376 // to avoid multiple CPUs unblocking the same TSO, and also to
377 // synchronise with throwTo().
378 lockTSO(tso);
379 if (tso -> why_blocked == BlockedOnSTM) {
380 TRACE("unpark_tso on tso=%p", tso);
381 unblockOne(cap,tso);
382 } else {
383 TRACE("spurious unpark_tso on tso=%p", tso);
384 }
385 unlockTSO(tso);
386 }
387
388 static void unpark_waiters_on(Capability *cap, StgTVar *s) {
389 StgTVarWatchQueue *q;
390 StgTVarWatchQueue *trail;
391 TRACE("unpark_waiters_on tvar=%p", s);
392 // unblock TSOs in reverse order, to be a bit fairer (#2319)
393 for (q = s -> first_watch_queue_entry, trail = q;
394 q != END_STM_WATCH_QUEUE;
395 q = q -> next_queue_entry) {
396 trail = q;
397 }
398 q = trail;
399 for (;
400 q != END_STM_WATCH_QUEUE;
401 q = q -> prev_queue_entry) {
402 if (watcher_is_tso(q)) {
403 unpark_tso(cap, (StgTSO *)(q -> closure));
404 }
405 }
406 }
407
408 /*......................................................................*/
409
410 // Helper functions for downstream allocation and initialization
411
412 static StgInvariantCheckQueue *new_stg_invariant_check_queue(Capability *cap,
413 StgAtomicInvariant *invariant) {
414 StgInvariantCheckQueue *result;
415 result = (StgInvariantCheckQueue *)allocateLocal(cap, sizeofW(StgInvariantCheckQueue));
416 SET_HDR (result, &stg_INVARIANT_CHECK_QUEUE_info, CCS_SYSTEM);
417 result -> invariant = invariant;
418 result -> my_execution = NO_TREC;
419 return result;
420 }
421
422 static StgTVarWatchQueue *new_stg_tvar_watch_queue(Capability *cap,
423 StgClosure *closure) {
424 StgTVarWatchQueue *result;
425 result = (StgTVarWatchQueue *)allocateLocal(cap, sizeofW(StgTVarWatchQueue));
426 SET_HDR (result, &stg_TVAR_WATCH_QUEUE_info, CCS_SYSTEM);
427 result -> closure = closure;
428 return result;
429 }
430
431 static StgTRecChunk *new_stg_trec_chunk(Capability *cap) {
432 StgTRecChunk *result;
433 result = (StgTRecChunk *)allocateLocal(cap, sizeofW(StgTRecChunk));
434 SET_HDR (result, &stg_TREC_CHUNK_info, CCS_SYSTEM);
435 result -> prev_chunk = END_STM_CHUNK_LIST;
436 result -> next_entry_idx = 0;
437 return result;
438 }
439
440 static StgTRecHeader *new_stg_trec_header(Capability *cap,
441 StgTRecHeader *enclosing_trec) {
442 StgTRecHeader *result;
443 result = (StgTRecHeader *) allocateLocal(cap, sizeofW(StgTRecHeader));
444 SET_HDR (result, &stg_TREC_HEADER_info, CCS_SYSTEM);
445
446 result -> enclosing_trec = enclosing_trec;
447 result -> current_chunk = new_stg_trec_chunk(cap);
448 result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
449
450 if (enclosing_trec == NO_TREC) {
451 result -> state = TREC_ACTIVE;
452 } else {
453 ASSERT(enclosing_trec -> state == TREC_ACTIVE ||
454 enclosing_trec -> state == TREC_CONDEMNED);
455 result -> state = enclosing_trec -> state;
456 }
457
458 return result;
459 }
460
461 /*......................................................................*/
462
463 // Allocation / deallocation functions that retain per-capability lists
464 // of closures that can be re-used
465
466 static StgInvariantCheckQueue *alloc_stg_invariant_check_queue(Capability *cap,
467 StgAtomicInvariant *invariant) {
468 StgInvariantCheckQueue *result = NULL;
469 if (cap -> free_invariant_check_queues == END_INVARIANT_CHECK_QUEUE) {
470 result = new_stg_invariant_check_queue(cap, invariant);
471 } else {
472 result = cap -> free_invariant_check_queues;
473 result -> invariant = invariant;
474 result -> my_execution = NO_TREC;
475 cap -> free_invariant_check_queues = result -> next_queue_entry;
476 }
477 return result;
478 }
479
480 static StgTVarWatchQueue *alloc_stg_tvar_watch_queue(Capability *cap,
481 StgClosure *closure) {
482 StgTVarWatchQueue *result = NULL;
483 if (cap -> free_tvar_watch_queues == END_STM_WATCH_QUEUE) {
484 result = new_stg_tvar_watch_queue(cap, closure);
485 } else {
486 result = cap -> free_tvar_watch_queues;
487 result -> closure = closure;
488 cap -> free_tvar_watch_queues = result -> next_queue_entry;
489 }
490 return result;
491 }
492
493 static void free_stg_tvar_watch_queue(Capability *cap,
494 StgTVarWatchQueue *wq) {
495 #if defined(REUSE_MEMORY)
496 wq -> next_queue_entry = cap -> free_tvar_watch_queues;
497 cap -> free_tvar_watch_queues = wq;
498 #endif
499 }
500
501 static StgTRecChunk *alloc_stg_trec_chunk(Capability *cap) {
502 StgTRecChunk *result = NULL;
503 if (cap -> free_trec_chunks == END_STM_CHUNK_LIST) {
504 result = new_stg_trec_chunk(cap);
505 } else {
506 result = cap -> free_trec_chunks;
507 cap -> free_trec_chunks = result -> prev_chunk;
508 result -> prev_chunk = END_STM_CHUNK_LIST;
509 result -> next_entry_idx = 0;
510 }
511 return result;
512 }
513
514 static void free_stg_trec_chunk(Capability *cap,
515 StgTRecChunk *c) {
516 #if defined(REUSE_MEMORY)
517 c -> prev_chunk = cap -> free_trec_chunks;
518 cap -> free_trec_chunks = c;
519 #endif
520 }
521
522 static StgTRecHeader *alloc_stg_trec_header(Capability *cap,
523 StgTRecHeader *enclosing_trec) {
524 StgTRecHeader *result = NULL;
525 if (cap -> free_trec_headers == NO_TREC) {
526 result = new_stg_trec_header(cap, enclosing_trec);
527 } else {
528 result = cap -> free_trec_headers;
529 cap -> free_trec_headers = result -> enclosing_trec;
530 result -> enclosing_trec = enclosing_trec;
531 result -> current_chunk -> next_entry_idx = 0;
532 result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
533 if (enclosing_trec == NO_TREC) {
534 result -> state = TREC_ACTIVE;
535 } else {
536 ASSERT(enclosing_trec -> state == TREC_ACTIVE ||
537 enclosing_trec -> state == TREC_CONDEMNED);
538 result -> state = enclosing_trec -> state;
539 }
540 }
541 return result;
542 }
543
544 static void free_stg_trec_header(Capability *cap,
545 StgTRecHeader *trec) {
546 #if defined(REUSE_MEMORY)
547 StgTRecChunk *chunk = trec -> current_chunk -> prev_chunk;
548 while (chunk != END_STM_CHUNK_LIST) {
549 StgTRecChunk *prev_chunk = chunk -> prev_chunk;
550 free_stg_trec_chunk(cap, chunk);
551 chunk = prev_chunk;
552 }
553 trec -> current_chunk -> prev_chunk = END_STM_CHUNK_LIST;
554 trec -> enclosing_trec = cap -> free_trec_headers;
555 cap -> free_trec_headers = trec;
556 #endif
557 }
558
559 /*......................................................................*/
560
561 // Helper functions for managing waiting lists
562
563 static void build_watch_queue_entries_for_trec(Capability *cap,
564 StgTSO *tso,
565 StgTRecHeader *trec) {
566 ASSERT(trec != NO_TREC);
567 ASSERT(trec -> enclosing_trec == NO_TREC);
568 ASSERT(trec -> state == TREC_ACTIVE);
569
570 TRACE("%p : build_watch_queue_entries_for_trec()", trec);
571
572 FOR_EACH_ENTRY(trec, e, {
573 StgTVar *s;
574 StgTVarWatchQueue *q;
575 StgTVarWatchQueue *fq;
576 s = e -> tvar;
577 TRACE("%p : adding tso=%p to watch queue for tvar=%p", trec, tso, s);
578 ACQ_ASSERT(s -> current_value == (StgClosure *)trec);
579 NACQ_ASSERT(s -> current_value == e -> expected_value);
580 fq = s -> first_watch_queue_entry;
581 q = alloc_stg_tvar_watch_queue(cap, (StgClosure*) tso);
582 q -> next_queue_entry = fq;
583 q -> prev_queue_entry = END_STM_WATCH_QUEUE;
584 if (fq != END_STM_WATCH_QUEUE) {
585 fq -> prev_queue_entry = q;
586 }
587 s -> first_watch_queue_entry = q;
588 e -> new_value = (StgClosure *) q;
589 });
590 }
591
592 static void remove_watch_queue_entries_for_trec(Capability *cap,
593 StgTRecHeader *trec) {
594 ASSERT(trec != NO_TREC);
595 ASSERT(trec -> enclosing_trec == NO_TREC);
596 ASSERT(trec -> state == TREC_WAITING ||
597 trec -> state == TREC_CONDEMNED);
598
599 TRACE("%p : remove_watch_queue_entries_for_trec()", trec);
600
601 FOR_EACH_ENTRY(trec, e, {
602 StgTVar *s;
603 StgTVarWatchQueue *pq;
604 StgTVarWatchQueue *nq;
605 StgTVarWatchQueue *q;
606 StgClosure *saw;
607 s = e -> tvar;
608 saw = lock_tvar(trec, s);
609 q = (StgTVarWatchQueue *) (e -> new_value);
610 TRACE("%p : removing tso=%p from watch queue for tvar=%p",
611 trec,
612 q -> closure,
613 s);
614 ACQ_ASSERT(s -> current_value == (StgClosure *)trec);
615 nq = q -> next_queue_entry;
616 pq = q -> prev_queue_entry;
617 if (nq != END_STM_WATCH_QUEUE) {
618 nq -> prev_queue_entry = pq;
619 }
620 if (pq != END_STM_WATCH_QUEUE) {
621 pq -> next_queue_entry = nq;
622 } else {
623 ASSERT (s -> first_watch_queue_entry == q);
624 s -> first_watch_queue_entry = nq;
625 }
626 free_stg_tvar_watch_queue(cap, q);
627 unlock_tvar(trec, s, saw, FALSE);
628 });
629 }
630
631 /*......................................................................*/
632
633 static TRecEntry *get_new_entry(Capability *cap,
634 StgTRecHeader *t) {
635 TRecEntry *result;
636 StgTRecChunk *c;
637 int i;
638
639 c = t -> current_chunk;
640 i = c -> next_entry_idx;
641 ASSERT(c != END_STM_CHUNK_LIST);
642
643 if (i < TREC_CHUNK_NUM_ENTRIES) {
644 // Continue to use current chunk
645 result = &(c -> entries[i]);
646 c -> next_entry_idx ++;
647 } else {
648 // Current chunk is full: allocate a fresh one
649 StgTRecChunk *nc;
650 nc = alloc_stg_trec_chunk(cap);
651 nc -> prev_chunk = c;
652 nc -> next_entry_idx = 1;
653 t -> current_chunk = nc;
654 result = &(nc -> entries[0]);
655 }
656
657 return result;
658 }
659
660 /*......................................................................*/
661
662 static void merge_update_into(Capability *cap,
663 StgTRecHeader *t,
664 StgTVar *tvar,
665 StgClosure *expected_value,
666 StgClosure *new_value) {
667 int found;
668
669 // Look for an entry in this trec
670 found = FALSE;
671 FOR_EACH_ENTRY(t, e, {
672 StgTVar *s;
673 s = e -> tvar;
674 if (s == tvar) {
675 found = TRUE;
676 if (e -> expected_value != expected_value) {
677 // Must abort if the two entries start from different values
678 TRACE("%p : update entries inconsistent at %p (%p vs %p)",
679 t, tvar, e -> expected_value, expected_value);
680 t -> state = TREC_CONDEMNED;
681 }
682 e -> new_value = new_value;
683 BREAK_FOR_EACH;
684 }
685 });
686
687 if (!found) {
688 // No entry so far in this trec
689 TRecEntry *ne;
690 ne = get_new_entry(cap, t);
691 ne -> tvar = tvar;
692 ne -> expected_value = expected_value;
693 ne -> new_value = new_value;
694 }
695 }
696
697 /*......................................................................*/
698
699 static void merge_read_into(Capability *cap,
700 StgTRecHeader *t,
701 StgTVar *tvar,
702 StgClosure *expected_value) {
703 int found;
704
705 // Look for an entry in this trec
706 found = FALSE;
707 FOR_EACH_ENTRY(t, e, {
708 StgTVar *s;
709 s = e -> tvar;
710 if (s == tvar) {
711 found = TRUE;
712 if (e -> expected_value != expected_value) {
713 // Must abort if the two entries start from different values
714 TRACE("%p : read entries inconsistent at %p (%p vs %p)",
715 t, tvar, e -> expected_value, expected_value);
716 t -> state = TREC_CONDEMNED;
717 }
718 BREAK_FOR_EACH;
719 }
720 });
721
722 if (!found) {
723 // No entry so far in this trec
724 TRecEntry *ne;
725 ne = get_new_entry(cap, t);
726 ne -> tvar = tvar;
727 ne -> expected_value = expected_value;
728 ne -> new_value = expected_value;
729 }
730 }
731
732 /*......................................................................*/
733
734 static StgBool entry_is_update(TRecEntry *e) {
735 StgBool result;
736 result = (e -> expected_value != e -> new_value);
737 return result;
738 }
739
740 #if defined(STM_FG_LOCKS)
741 static StgBool entry_is_read_only(TRecEntry *e) {
742 StgBool result;
743 result = (e -> expected_value == e -> new_value);
744 return result;
745 }
746
747 static StgBool tvar_is_locked(StgTVar *s, StgTRecHeader *h) {
748 StgClosure *c;
749 StgBool result;
750 c = s -> current_value;
751 result = (c == (StgClosure *) h);
752 return result;
753 }
754 #endif
755
756 // revert_ownership : release a lock on a TVar, storing back
757 // the value that it held when the lock was acquired. "revert_all"
758 // is set in stmWait and stmReWait when we acquired locks on all of
759 // the TVars involved. "revert_all" is not set in commit operations
760 // where we don't lock TVars that have been read from but not updated.
761
762 static void revert_ownership(StgTRecHeader *trec STG_UNUSED,
763 StgBool revert_all STG_UNUSED) {
764 #if defined(STM_FG_LOCKS)
765 FOR_EACH_ENTRY(trec, e, {
766 if (revert_all || entry_is_update(e)) {
767 StgTVar *s;
768 s = e -> tvar;
769 if (tvar_is_locked(s, trec)) {
770 unlock_tvar(trec, s, e -> expected_value, TRUE);
771 }
772 }
773 });
774 #endif
775 }
776
777 /*......................................................................*/
778
779 // validate_and_acquire_ownership : this performs the twin functions
780 // of checking that the TVars referred to by entries in trec hold the
781 // expected values and:
782 //
783 // - locking the TVar (on updated TVars during commit, or all TVars
784 // during wait)
785 //
786 // - recording the identity of the TRec who wrote the value seen in the
787 // TVar (on non-updated TVars during commit). These values are
788 // stashed in the TRec entries and are then checked in check_read_only
789 // to ensure that an atomic snapshot of all of these locations has been
790 // seen.
791
792 static StgBool validate_and_acquire_ownership (StgTRecHeader *trec,
793 int acquire_all,
794 int retain_ownership) {
795 StgBool result;
796
797 if (shake()) {
798 TRACE("%p : shake, pretending trec is invalid when it may not be", trec);
799 return FALSE;
800 }
801
802 ASSERT ((trec -> state == TREC_ACTIVE) ||
803 (trec -> state == TREC_WAITING) ||
804 (trec -> state == TREC_CONDEMNED));
805 result = !((trec -> state) == TREC_CONDEMNED);
806 if (result) {
807 FOR_EACH_ENTRY(trec, e, {
808 StgTVar *s;
809 s = e -> tvar;
810 if (acquire_all || entry_is_update(e)) {
811 TRACE("%p : trying to acquire %p", trec, s);
812 if (!cond_lock_tvar(trec, s, e -> expected_value)) {
813 TRACE("%p : failed to acquire %p", trec, s);
814 result = FALSE;
815 BREAK_FOR_EACH;
816 }
817 } else {
818 ASSERT(config_use_read_phase);
819 IF_STM_FG_LOCKS({
820 TRACE("%p : will need to check %p", trec, s);
821 if (s -> current_value != e -> expected_value) {
822 TRACE("%p : doesn't match", trec);
823 result = FALSE;
824 BREAK_FOR_EACH;
825 }
826 e -> num_updates = s -> num_updates;
827 if (s -> current_value != e -> expected_value) {
828 TRACE("%p : doesn't match (race)", trec);
829 result = FALSE;
830 BREAK_FOR_EACH;
831 } else {
832 TRACE("%p : need to check version %ld", trec, e -> num_updates);
833 }
834 });
835 }
836 });
837 }
838
839 if ((!result) || (!retain_ownership)) {
840 revert_ownership(trec, acquire_all);
841 }
842
843 return result;
844 }
845
846 // check_read_only : check that we've seen an atomic snapshot of the
847 // non-updated TVars accessed by a trec. This checks that the last TRec to
848 // commit an update to the TVar is unchanged since the value was stashed in
849 // validate_and_acquire_ownership. If no udpate is seen to any TVar than
850 // all of them contained their expected values at the start of the call to
851 // check_read_only.
852 //
853 // The paper "Concurrent programming without locks" (under submission), or
854 // Keir Fraser's PhD dissertation "Practical lock-free programming" discuss
855 // this kind of algorithm.
856
857 static StgBool check_read_only(StgTRecHeader *trec STG_UNUSED) {
858 StgBool result = TRUE;
859
860 ASSERT (config_use_read_phase);
861 IF_STM_FG_LOCKS({
862 FOR_EACH_ENTRY(trec, e, {
863 StgTVar *s;
864 s = e -> tvar;
865 if (entry_is_read_only(e)) {
866 TRACE("%p : check_read_only for TVar %p, saw %ld", trec, s, e -> num_updates);
867 if (s -> num_updates != e -> num_updates) {
868 // ||s -> current_value != e -> expected_value) {
869 TRACE("%p : mismatch", trec);
870 result = FALSE;
871 BREAK_FOR_EACH;
872 }
873 }
874 });
875 });
876
877 return result;
878 }
879
880
881 /************************************************************************/
882
883 void stmPreGCHook() {
884 nat i;
885
886 lock_stm(NO_TREC);
887 TRACE("stmPreGCHook");
888 for (i = 0; i < n_capabilities; i ++) {
889 Capability *cap = &capabilities[i];
890 cap -> free_tvar_watch_queues = END_STM_WATCH_QUEUE;
891 cap -> free_trec_chunks = END_STM_CHUNK_LIST;
892 cap -> free_trec_headers = NO_TREC;
893 }
894 unlock_stm(NO_TREC);
895 }
896
897 /************************************************************************/
898
899 // check_read_only relies on version numbers held in TVars' "num_updates"
900 // fields not wrapping around while a transaction is committed. The version
901 // number is incremented each time an update is committed to the TVar
902 // This is unlikely to wrap around when 32-bit integers are used for the counts,
903 // but to ensure correctness we maintain a shared count on the maximum
904 // number of commit operations that may occur and check that this has
905 // not increased by more than 2^32 during a commit.
906
907 #define TOKEN_BATCH_SIZE 1024
908
909 static volatile StgInt64 max_commits = 0;
910
911 #if defined(THREADED_RTS)
912 static volatile StgBool token_locked = FALSE;
913
914 static void getTokenBatch(Capability *cap) {
915 while (cas((void *)&token_locked, FALSE, TRUE) == TRUE) { /* nothing */ }
916 max_commits += TOKEN_BATCH_SIZE;
917 TRACE("%p : cap got token batch, max_commits=%" FMT_Int64, cap, max_commits);
918 cap -> transaction_tokens = TOKEN_BATCH_SIZE;
919 token_locked = FALSE;
920 }
921
922 static void getToken(Capability *cap) {
923 if (cap -> transaction_tokens == 0) {
924 getTokenBatch(cap);
925 }
926 cap -> transaction_tokens --;
927 }
928 #else
929 static void getToken(Capability *cap STG_UNUSED) {
930 // Nothing
931 }
932 #endif
933
934 /*......................................................................*/
935
936 StgTRecHeader *stmStartTransaction(Capability *cap,
937 StgTRecHeader *outer) {
938 StgTRecHeader *t;
939 TRACE("%p : stmStartTransaction with %d tokens",
940 outer,
941 cap -> transaction_tokens);
942
943 getToken(cap);
944
945 t = alloc_stg_trec_header(cap, outer);
946 TRACE("%p : stmStartTransaction()=%p", outer, t);
947 return t;
948 }
949
950 /*......................................................................*/
951
952 void stmAbortTransaction(Capability *cap,
953 StgTRecHeader *trec) {
954 StgTRecHeader *et;
955 TRACE("%p : stmAbortTransaction", trec);
956 ASSERT (trec != NO_TREC);
957 ASSERT ((trec -> state == TREC_ACTIVE) ||
958 (trec -> state == TREC_WAITING) ||
959 (trec -> state == TREC_CONDEMNED));
960
961 lock_stm(trec);
962
963 et = trec -> enclosing_trec;
964 if (et == NO_TREC) {
965 // We're a top-level transaction: remove any watch queue entries that
966 // we may have.
967 TRACE("%p : aborting top-level transaction", trec);
968
969 if (trec -> state == TREC_WAITING) {
970 ASSERT (trec -> enclosing_trec == NO_TREC);
971 TRACE("%p : stmAbortTransaction aborting waiting transaction", trec);
972 remove_watch_queue_entries_for_trec(cap, trec);
973 }
974
975 } else {
976 // We're a nested transaction: merge our read set into our parent's
977 TRACE("%p : retaining read-set into parent %p", trec, et);
978
979 FOR_EACH_ENTRY(trec, e, {
980 StgTVar *s = e -> tvar;
981 merge_read_into(cap, et, s, e -> expected_value);
982 });
983 }
984
985 trec -> state = TREC_ABORTED;
986 unlock_stm(trec);
987
988 TRACE("%p : stmAbortTransaction done", trec);
989 }
990
991 /*......................................................................*/
992
993 void stmFreeAbortedTRec(Capability *cap,
994 StgTRecHeader *trec) {
995 TRACE("%p : stmFreeAbortedTRec", trec);
996 ASSERT (trec != NO_TREC);
997 ASSERT ((trec -> state == TREC_CONDEMNED) ||
998 (trec -> state == TREC_ABORTED));
999
1000 free_stg_trec_header(cap, trec);
1001
1002 TRACE("%p : stmFreeAbortedTRec done", trec);
1003 }
1004
1005 /*......................................................................*/
1006
1007 void stmCondemnTransaction(Capability *cap,
1008 StgTRecHeader *trec) {
1009 TRACE("%p : stmCondemnTransaction", trec);
1010 ASSERT (trec != NO_TREC);
1011 ASSERT ((trec -> state == TREC_ACTIVE) ||
1012 (trec -> state == TREC_WAITING) ||
1013 (trec -> state == TREC_CONDEMNED));
1014
1015 lock_stm(trec);
1016 if (trec -> state == TREC_WAITING) {
1017 ASSERT (trec -> enclosing_trec == NO_TREC);
1018 TRACE("%p : stmCondemnTransaction condemning waiting transaction", trec);
1019 remove_watch_queue_entries_for_trec(cap, trec);
1020 }
1021 trec -> state = TREC_CONDEMNED;
1022 unlock_stm(trec);
1023
1024 TRACE("%p : stmCondemnTransaction done", trec);
1025 }
1026
1027 /*......................................................................*/
1028
1029 StgBool stmValidateNestOfTransactions(StgTRecHeader *trec) {
1030 StgTRecHeader *t;
1031 StgBool result;
1032
1033 TRACE("%p : stmValidateNestOfTransactions", trec);
1034 ASSERT(trec != NO_TREC);
1035 ASSERT((trec -> state == TREC_ACTIVE) ||
1036 (trec -> state == TREC_WAITING) ||
1037 (trec -> state == TREC_CONDEMNED));
1038
1039 lock_stm(trec);
1040
1041 t = trec;
1042 result = TRUE;
1043 while (t != NO_TREC) {
1044 result &= validate_and_acquire_ownership(t, TRUE, FALSE);
1045 t = t -> enclosing_trec;
1046 }
1047
1048 if (!result && trec -> state != TREC_WAITING) {
1049 trec -> state = TREC_CONDEMNED;
1050 }
1051
1052 unlock_stm(trec);
1053
1054 TRACE("%p : stmValidateNestOfTransactions()=%d", trec, result);
1055 return result;
1056 }
1057
1058 /*......................................................................*/
1059
1060 static TRecEntry *get_entry_for(StgTRecHeader *trec, StgTVar *tvar, StgTRecHeader **in) {
1061 TRecEntry *result = NULL;
1062
1063 TRACE("%p : get_entry_for TVar %p", trec, tvar);
1064 ASSERT(trec != NO_TREC);
1065
1066 do {
1067 FOR_EACH_ENTRY(trec, e, {
1068 if (e -> tvar == tvar) {
1069 result = e;
1070 if (in != NULL) {
1071 *in = trec;
1072 }
1073 BREAK_FOR_EACH;
1074 }
1075 });
1076 trec = trec -> enclosing_trec;
1077 } while (result == NULL && trec != NO_TREC);
1078
1079 return result;
1080 }
1081
1082 /*......................................................................*/
1083
1084 /*
1085 * Add/remove links between an invariant TVars. The caller must have
1086 * locked the TVars involved and the invariant.
1087 */
1088
1089 static void disconnect_invariant(Capability *cap,
1090 StgAtomicInvariant *inv) {
1091 StgTRecHeader *last_execution = inv -> last_execution;
1092
1093 TRACE("unhooking last execution inv=%p trec=%p", inv, last_execution);
1094
1095 FOR_EACH_ENTRY(last_execution, e, {
1096 StgTVar *s = e -> tvar;
1097 StgTVarWatchQueue *q = s -> first_watch_queue_entry;
1098 StgBool found = FALSE;
1099 TRACE(" looking for trec on tvar=%p", s);
1100 for (q = s -> first_watch_queue_entry;
1101 q != END_STM_WATCH_QUEUE;
1102 q = q -> next_queue_entry) {
1103 if (q -> closure == (StgClosure*)inv) {
1104 StgTVarWatchQueue *pq;
1105 StgTVarWatchQueue *nq;
1106 nq = q -> next_queue_entry;
1107 pq = q -> prev_queue_entry;
1108 if (nq != END_STM_WATCH_QUEUE) {
1109 nq -> prev_queue_entry = pq;
1110 }
1111 if (pq != END_STM_WATCH_QUEUE) {
1112 pq -> next_queue_entry = nq;
1113 } else {
1114 ASSERT (s -> first_watch_queue_entry == q);
1115 s -> first_watch_queue_entry = nq;
1116 }
1117 TRACE(" found it in watch queue entry %p", q);
1118 free_stg_tvar_watch_queue(cap, q);
1119 found = TRUE;
1120 break;
1121 }
1122 }
1123 ASSERT(found);
1124 });
1125 inv -> last_execution = NO_TREC;
1126 }
1127
1128 static void connect_invariant_to_trec(Capability *cap,
1129 StgAtomicInvariant *inv,
1130 StgTRecHeader *my_execution) {
1131 TRACE("connecting execution inv=%p trec=%p", inv, my_execution);
1132
1133 ASSERT(inv -> last_execution == NO_TREC);
1134
1135 FOR_EACH_ENTRY(my_execution, e, {
1136 StgTVar *s = e -> tvar;
1137 StgTVarWatchQueue *q = alloc_stg_tvar_watch_queue(cap, (StgClosure*)inv);
1138 StgTVarWatchQueue *fq = s -> first_watch_queue_entry;
1139
1140 // We leave "last_execution" holding the values that will be
1141 // in the heap after the transaction we're in the process
1142 // of committing has finished.
1143 TRecEntry *entry = get_entry_for(my_execution -> enclosing_trec, s, NULL);
1144 if (entry != NULL) {
1145 e -> expected_value = entry -> new_value;
1146 e -> new_value = entry -> new_value;
1147 }
1148
1149 TRACE(" linking trec on tvar=%p value=%p q=%p", s, e -> expected_value, q);
1150 q -> next_queue_entry = fq;
1151 q -> prev_queue_entry = END_STM_WATCH_QUEUE;
1152 if (fq != END_STM_WATCH_QUEUE) {
1153 fq -> prev_queue_entry = q;
1154 }
1155 s -> first_watch_queue_entry = q;
1156 });
1157
1158 inv -> last_execution = my_execution;
1159 }
1160
1161 /*
1162 * Add a new invariant to the trec's list of invariants to check on commit
1163 */
1164 void stmAddInvariantToCheck(Capability *cap,
1165 StgTRecHeader *trec,
1166 StgClosure *code) {
1167 StgAtomicInvariant *invariant;
1168 StgInvariantCheckQueue *q;
1169 TRACE("%p : stmAddInvariantToCheck closure=%p", trec, code);
1170 ASSERT(trec != NO_TREC);
1171 ASSERT(trec -> state == TREC_ACTIVE ||
1172 trec -> state == TREC_CONDEMNED);
1173
1174
1175 // 1. Allocate an StgAtomicInvariant, set last_execution to NO_TREC
1176 // to signal that this is a new invariant in the current atomic block
1177
1178 invariant = (StgAtomicInvariant *) allocateLocal(cap, sizeofW(StgAtomicInvariant));
1179 TRACE("%p : stmAddInvariantToCheck allocated invariant=%p", trec, invariant);
1180 SET_HDR (invariant, &stg_ATOMIC_INVARIANT_info, CCS_SYSTEM);
1181 invariant -> code = code;
1182 invariant -> last_execution = NO_TREC;
1183
1184 // 2. Allocate an StgInvariantCheckQueue entry, link it to the current trec
1185
1186 q = alloc_stg_invariant_check_queue(cap, invariant);
1187 TRACE("%p : stmAddInvariantToCheck allocated q=%p", trec, q);
1188 q -> invariant = invariant;
1189 q -> my_execution = NO_TREC;
1190 q -> next_queue_entry = trec -> invariants_to_check;
1191 trec -> invariants_to_check = q;
1192
1193 TRACE("%p : stmAddInvariantToCheck done", trec);
1194 }
1195
1196 /*
1197 * Fill in the trec's list of invariants that might be violated by the
1198 * current transaction.
1199 */
1200
1201 StgInvariantCheckQueue *stmGetInvariantsToCheck(Capability *cap, StgTRecHeader *trec) {
1202 StgTRecChunk *c;
1203 TRACE("%p : stmGetInvariantsToCheck, head was %p",
1204 trec,
1205 trec -> invariants_to_check);
1206
1207 ASSERT(trec != NO_TREC);
1208 ASSERT ((trec -> state == TREC_ACTIVE) ||
1209 (trec -> state == TREC_WAITING) ||
1210 (trec -> state == TREC_CONDEMNED));
1211 ASSERT(trec -> enclosing_trec == NO_TREC);
1212
1213 lock_stm(trec);
1214 c = trec -> current_chunk;
1215 while (c != END_STM_CHUNK_LIST) {
1216 unsigned int i;
1217 for (i = 0; i < c -> next_entry_idx; i ++) {
1218 TRecEntry *e = &(c -> entries[i]);
1219 if (entry_is_update(e)) {
1220 StgTVar *s = e -> tvar;
1221 StgClosure *old = lock_tvar(trec, s);
1222
1223 // Pick up any invariants on the TVar being updated
1224 // by entry "e"
1225
1226 StgTVarWatchQueue *q;
1227 TRACE("%p : checking for invariants on %p", trec, s);
1228 for (q = s -> first_watch_queue_entry;
1229 q != END_STM_WATCH_QUEUE;
1230 q = q -> next_queue_entry) {
1231 if (watcher_is_invariant(q)) {
1232 StgBool found = FALSE;
1233 StgInvariantCheckQueue *q2;
1234 TRACE("%p : Touching invariant %p", trec, q -> closure);
1235 for (q2 = trec -> invariants_to_check;
1236 q2 != END_INVARIANT_CHECK_QUEUE;
1237 q2 = q2 -> next_queue_entry) {
1238 if (q2 -> invariant == (StgAtomicInvariant*)(q -> closure)) {
1239 TRACE("%p : Already found %p", trec, q -> closure);
1240 found = TRUE;
1241 break;
1242 }
1243 }
1244
1245 if (!found) {
1246 StgInvariantCheckQueue *q3;
1247 TRACE("%p : Not already found %p", trec, q -> closure);
1248 q3 = alloc_stg_invariant_check_queue(cap,
1249 (StgAtomicInvariant*) q -> closure);
1250 q3 -> next_queue_entry = trec -> invariants_to_check;
1251 trec -> invariants_to_check = q3;
1252 }
1253 }
1254 }
1255
1256 unlock_tvar(trec, s, old, FALSE);
1257 }
1258 }
1259 c = c -> prev_chunk;
1260 }
1261
1262 unlock_stm(trec);
1263
1264 TRACE("%p : stmGetInvariantsToCheck, head now %p",
1265 trec,
1266 trec -> invariants_to_check);
1267
1268 return (trec -> invariants_to_check);
1269 }
1270
1271 /*......................................................................*/
1272
1273 StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
1274 int result;
1275 StgInt64 max_commits_at_start = max_commits;
1276 StgBool touched_invariants;
1277 StgBool use_read_phase;
1278
1279 TRACE("%p : stmCommitTransaction()", trec);
1280 ASSERT (trec != NO_TREC);
1281
1282 lock_stm(trec);
1283
1284 ASSERT (trec -> enclosing_trec == NO_TREC);
1285 ASSERT ((trec -> state == TREC_ACTIVE) ||
1286 (trec -> state == TREC_CONDEMNED));
1287
1288 // touched_invariants is true if we've written to a TVar with invariants
1289 // attached to it, or if we're trying to add a new invariant to the system.
1290
1291 touched_invariants = (trec -> invariants_to_check != END_INVARIANT_CHECK_QUEUE);
1292
1293 // If we have touched invariants then (i) lock the invariant, and (ii) add
1294 // the invariant's read set to our own. Step (i) is needed to serialize
1295 // concurrent transactions that attempt to make conflicting updates
1296 // to the invariant's trec (suppose it read from t1 and t2, and that one
1297 // concurrent transcation writes only to t1, and a second writes only to
1298 // t2). Step (ii) is needed so that both transactions will lock t1 and t2
1299 // to gain access to their wait lists (and hence be able to unhook the
1300 // invariant from both tvars).
1301
1302 if (touched_invariants) {
1303 StgInvariantCheckQueue *q = trec -> invariants_to_check;
1304 TRACE("%p : locking invariants", trec);
1305 while (q != END_INVARIANT_CHECK_QUEUE) {
1306 StgTRecHeader *inv_old_trec;
1307 StgAtomicInvariant *inv;
1308 TRACE("%p : locking invariant %p", trec, q -> invariant);
1309 inv = q -> invariant;
1310 if (!lock_inv(inv)) {
1311 TRACE("%p : failed to lock %p", trec, inv);
1312 trec -> state = TREC_CONDEMNED;
1313 break;
1314 }
1315
1316 inv_old_trec = inv -> last_execution;
1317 if (inv_old_trec != NO_TREC) {
1318 StgTRecChunk *c = inv_old_trec -> current_chunk;
1319 while (c != END_STM_CHUNK_LIST) {
1320 unsigned int i;
1321 for (i = 0; i < c -> next_entry_idx; i ++) {
1322 TRecEntry *e = &(c -> entries[i]);
1323 TRACE("%p : ensuring we lock TVars for %p", trec, e -> tvar);
1324 merge_read_into (cap, trec, e -> tvar, e -> expected_value);
1325 }
1326 c = c -> prev_chunk;
1327 }
1328 }
1329 q = q -> next_queue_entry;
1330 }
1331 TRACE("%p : finished locking invariants", trec);
1332 }
1333
1334 // Use a read-phase (i.e. don't lock TVars we've read but not updated) if
1335 // (i) the configuration lets us use a read phase, and (ii) we've not
1336 // touched or introduced any invariants.
1337 //
1338 // In principle we could extend the implementation to support a read-phase
1339 // and invariants, but it complicates the logic: the links between
1340 // invariants and TVars are managed by the TVar watch queues which are
1341 // protected by the TVar's locks.
1342
1343 use_read_phase = ((config_use_read_phase) && (!touched_invariants));
1344
1345 result = validate_and_acquire_ownership(trec, (!use_read_phase), TRUE);
1346 if (result) {
1347 // We now know that all the updated locations hold their expected values.
1348 ASSERT (trec -> state == TREC_ACTIVE);
1349
1350 if (use_read_phase) {
1351 StgInt64 max_commits_at_end;
1352 StgInt64 max_concurrent_commits;
1353 TRACE("%p : doing read check", trec);
1354 result = check_read_only(trec);
1355 TRACE("%p : read-check %s", trec, result ? "succeeded" : "failed");
1356
1357 max_commits_at_end = max_commits;
1358 max_concurrent_commits = ((max_commits_at_end - max_commits_at_start) +
1359 (n_capabilities * TOKEN_BATCH_SIZE));
1360 if (((max_concurrent_commits >> 32) > 0) || shake()) {
1361 result = FALSE;
1362 }
1363 }
1364
1365 if (result) {
1366 // We now know that all of the read-only locations held their exepcted values
1367 // at the end of the call to validate_and_acquire_ownership. This forms the
1368 // linearization point of the commit.
1369
1370 // 1. If we have touched or introduced any invariants then unhook them
1371 // from the TVars they depended on last time they were executed
1372 // and hook them on the TVars that they now depend on.
1373 if (touched_invariants) {
1374 StgInvariantCheckQueue *q = trec -> invariants_to_check;
1375 while (q != END_INVARIANT_CHECK_QUEUE) {
1376 StgAtomicInvariant *inv = q -> invariant;
1377 if (inv -> last_execution != NO_TREC) {
1378 disconnect_invariant(cap, inv);
1379 }
1380
1381 TRACE("%p : hooking up new execution trec=%p", trec, q -> my_execution);
1382 connect_invariant_to_trec(cap, inv, q -> my_execution);
1383
1384 TRACE("%p : unlocking invariant %p", trec, inv);
1385 unlock_inv(inv);
1386
1387 q = q -> next_queue_entry;
1388 }
1389 }
1390
1391 // 2. Make the updates required by the transaction
1392 FOR_EACH_ENTRY(trec, e, {
1393 StgTVar *s;
1394 s = e -> tvar;
1395 if ((!use_read_phase) || (e -> new_value != e -> expected_value)) {
1396 // Either the entry is an update or we're not using a read phase:
1397 // write the value back to the TVar, unlocking it if necessary.
1398
1399 ACQ_ASSERT(tvar_is_locked(s, trec));
1400 TRACE("%p : writing %p to %p, waking waiters", trec, e -> new_value, s);
1401 unpark_waiters_on(cap,s);
1402 IF_STM_FG_LOCKS({
1403 s -> num_updates ++;
1404 });
1405 unlock_tvar(trec, s, e -> new_value, TRUE);
1406 }
1407 ACQ_ASSERT(!tvar_is_locked(s, trec));
1408 });
1409 } else {
1410 revert_ownership(trec, FALSE);
1411 }
1412 }
1413
1414 unlock_stm(trec);
1415
1416 free_stg_trec_header(cap, trec);
1417
1418 TRACE("%p : stmCommitTransaction()=%d", trec, result);
1419
1420 return result;
1421 }
1422
1423 /*......................................................................*/
1424
1425 StgBool stmCommitNestedTransaction(Capability *cap, StgTRecHeader *trec) {
1426 StgTRecHeader *et;
1427 int result;
1428 ASSERT (trec != NO_TREC && trec -> enclosing_trec != NO_TREC);
1429 TRACE("%p : stmCommitNestedTransaction() into %p", trec, trec -> enclosing_trec);
1430 ASSERT ((trec -> state == TREC_ACTIVE) || (trec -> state == TREC_CONDEMNED));
1431
1432 lock_stm(trec);
1433
1434 et = trec -> enclosing_trec;
1435 result = validate_and_acquire_ownership(trec, (!config_use_read_phase), TRUE);
1436 if (result) {
1437 // We now know that all the updated locations hold their expected values.
1438
1439 if (config_use_read_phase) {
1440 TRACE("%p : doing read check", trec);
1441 result = check_read_only(trec);
1442 }
1443 if (result) {
1444 // We now know that all of the read-only locations held their exepcted values
1445 // at the end of the call to validate_and_acquire_ownership. This forms the
1446 // linearization point of the commit.
1447
1448 TRACE("%p : read-check succeeded", trec);
1449 FOR_EACH_ENTRY(trec, e, {
1450 // Merge each entry into the enclosing transaction record, release all
1451 // locks.
1452
1453 StgTVar *s;
1454 s = e -> tvar;
1455 if (entry_is_update(e)) {
1456 unlock_tvar(trec, s, e -> expected_value, FALSE);
1457 }
1458 merge_update_into(cap, et, s, e -> expected_value, e -> new_value);
1459 ACQ_ASSERT(s -> current_value != (StgClosure *)trec);
1460 });
1461 } else {
1462 revert_ownership(trec, FALSE);
1463 }
1464 }
1465
1466 unlock_stm(trec);
1467
1468 free_stg_trec_header(cap, trec);
1469
1470 TRACE("%p : stmCommitNestedTransaction()=%d", trec, result);
1471
1472 return result;
1473 }
1474
1475 /*......................................................................*/
1476
1477 StgBool stmWait(Capability *cap, StgTSO *tso, StgTRecHeader *trec) {
1478 int result;
1479 TRACE("%p : stmWait(%p)", trec, tso);
1480 ASSERT (trec != NO_TREC);
1481 ASSERT (trec -> enclosing_trec == NO_TREC);
1482 ASSERT ((trec -> state == TREC_ACTIVE) ||
1483 (trec -> state == TREC_CONDEMNED));
1484
1485 lock_stm(trec);
1486 result = validate_and_acquire_ownership(trec, TRUE, TRUE);
1487 if (result) {
1488 // The transaction is valid so far so we can actually start waiting.
1489 // (Otherwise the transaction was not valid and the thread will have to
1490 // retry it).
1491
1492 // Put ourselves to sleep. We retain locks on all the TVars involved
1493 // until we are sound asleep : (a) on the wait queues, (b) BlockedOnSTM
1494 // in the TSO, (c) TREC_WAITING in the Trec.
1495 build_watch_queue_entries_for_trec(cap, tso, trec);
1496 park_tso(tso);
1497 trec -> state = TREC_WAITING;
1498
1499 // We haven't released ownership of the transaction yet. The TSO
1500 // has been put on the wait queue for the TVars it is waiting for,
1501 // but we haven't yet tidied up the TSO's stack and made it safe
1502 // to wake up the TSO. Therefore, we must wait until the TSO is
1503 // safe to wake up before we release ownership - when all is well,
1504 // the runtime will call stmWaitUnlock() below, with the same
1505 // TRec.
1506
1507 } else {
1508 unlock_stm(trec);
1509 free_stg_trec_header(cap, trec);
1510 }
1511
1512 TRACE("%p : stmWait(%p)=%d", trec, tso, result);
1513 return result;
1514 }
1515
1516
1517 void
1518 stmWaitUnlock(Capability *cap STG_UNUSED, StgTRecHeader *trec) {
1519 revert_ownership(trec, TRUE);
1520 unlock_stm(trec);
1521 }
1522
1523 /*......................................................................*/
1524
1525 StgBool stmReWait(Capability *cap, StgTSO *tso) {
1526 int result;
1527 StgTRecHeader *trec = tso->trec;
1528
1529 TRACE("%p : stmReWait", trec);
1530 ASSERT (trec != NO_TREC);
1531 ASSERT (trec -> enclosing_trec == NO_TREC);
1532 ASSERT ((trec -> state == TREC_WAITING) ||
1533 (trec -> state == TREC_CONDEMNED));
1534
1535 lock_stm(trec);
1536 result = validate_and_acquire_ownership(trec, TRUE, TRUE);
1537 TRACE("%p : validation %s", trec, result ? "succeeded" : "failed");
1538 if (result) {
1539 // The transaction remains valid -- do nothing because it is already on
1540 // the wait queues
1541 ASSERT (trec -> state == TREC_WAITING);
1542 park_tso(tso);
1543 revert_ownership(trec, TRUE);
1544 } else {
1545 // The transcation has become invalid. We can now remove it from the wait
1546 // queues.
1547 if (trec -> state != TREC_CONDEMNED) {
1548 remove_watch_queue_entries_for_trec (cap, trec);
1549 }
1550 free_stg_trec_header(cap, trec);
1551 }
1552 unlock_stm(trec);
1553
1554 TRACE("%p : stmReWait()=%d", trec, result);
1555 return result;
1556 }
1557
1558 /*......................................................................*/
1559
1560 static StgClosure *read_current_value(StgTRecHeader *trec STG_UNUSED, StgTVar *tvar) {
1561 StgClosure *result;
1562 result = tvar -> current_value;
1563
1564 #if defined(STM_FG_LOCKS)
1565 while (GET_INFO(UNTAG_CLOSURE(result)) == &stg_TREC_HEADER_info) {
1566 TRACE("%p : read_current_value(%p) saw %p", trec, tvar, result);
1567 result = tvar -> current_value;
1568 }
1569 #endif
1570
1571 TRACE("%p : read_current_value(%p)=%p", trec, tvar, result);
1572 return result;
1573 }
1574
1575 /*......................................................................*/
1576
1577 StgClosure *stmReadTVar(Capability *cap,
1578 StgTRecHeader *trec,
1579 StgTVar *tvar) {
1580 StgTRecHeader *entry_in = NULL;
1581 StgClosure *result = NULL;
1582 TRecEntry *entry = NULL;
1583 TRACE("%p : stmReadTVar(%p)", trec, tvar);
1584 ASSERT (trec != NO_TREC);
1585 ASSERT (trec -> state == TREC_ACTIVE ||
1586 trec -> state == TREC_CONDEMNED);
1587
1588 entry = get_entry_for(trec, tvar, &entry_in);
1589
1590 if (entry != NULL) {
1591 if (entry_in == trec) {
1592 // Entry found in our trec
1593 result = entry -> new_value;
1594 } else {
1595 // Entry found in another trec
1596 TRecEntry *new_entry = get_new_entry(cap, trec);
1597 new_entry -> tvar = tvar;
1598 new_entry -> expected_value = entry -> expected_value;
1599 new_entry -> new_value = entry -> new_value;
1600 result = new_entry -> new_value;
1601 }
1602 } else {
1603 // No entry found
1604 StgClosure *current_value = read_current_value(trec, tvar);
1605 TRecEntry *new_entry = get_new_entry(cap, trec);
1606 new_entry -> tvar = tvar;
1607 new_entry -> expected_value = current_value;
1608 new_entry -> new_value = current_value;
1609 result = current_value;
1610 }
1611
1612 TRACE("%p : stmReadTVar(%p)=%p", trec, tvar, result);
1613 return result;
1614 }
1615
1616 /*......................................................................*/
1617
1618 void stmWriteTVar(Capability *cap,
1619 StgTRecHeader *trec,
1620 StgTVar *tvar,
1621 StgClosure *new_value) {
1622
1623 StgTRecHeader *entry_in = NULL;
1624 TRecEntry *entry = NULL;
1625 TRACE("%p : stmWriteTVar(%p, %p)", trec, tvar, new_value);
1626 ASSERT (trec != NO_TREC);
1627 ASSERT (trec -> state == TREC_ACTIVE ||
1628 trec -> state == TREC_CONDEMNED);
1629
1630 entry = get_entry_for(trec, tvar, &entry_in);
1631
1632 if (entry != NULL) {
1633 if (entry_in == trec) {
1634 // Entry found in our trec
1635 entry -> new_value = new_value;
1636 } else {
1637 // Entry found in another trec
1638 TRecEntry *new_entry = get_new_entry(cap, trec);
1639 new_entry -> tvar = tvar;
1640 new_entry -> expected_value = entry -> expected_value;
1641 new_entry -> new_value = new_value;
1642 }
1643 } else {
1644 // No entry found
1645 StgClosure *current_value = read_current_value(trec, tvar);
1646 TRecEntry *new_entry = get_new_entry(cap, trec);
1647 new_entry -> tvar = tvar;
1648 new_entry -> expected_value = current_value;
1649 new_entry -> new_value = new_value;
1650 }
1651
1652 TRACE("%p : stmWriteTVar done", trec);
1653 }
1654
1655 /*......................................................................*/
1656
1657 StgTVar *stmNewTVar(Capability *cap,
1658 StgClosure *new_value) {
1659 StgTVar *result;
1660 result = (StgTVar *)allocateLocal(cap, sizeofW(StgTVar));
1661 SET_HDR (result, &stg_TVAR_info, CCS_SYSTEM);
1662 result -> current_value = new_value;
1663 result -> first_watch_queue_entry = END_STM_WATCH_QUEUE;
1664 #if defined(THREADED_RTS)
1665 result -> num_updates = 0;
1666 #endif
1667 return result;
1668 }
1669
1670 /*......................................................................*/