Fix the +RTS -V0 option introduced recently; it didn't work at all, now it does.
[ghc.git] / rts / STM.c
1 /* -----------------------------------------------------------------------------
2 * (c) The GHC Team 1998-2005
3 *
4 * STM implementation.
5 *
6 * Overview
7 * --------
8 *
9 * See the PPoPP 2005 paper "Composable memory transactions". In summary,
10 * each transcation has a TRec (transaction record) holding entries for each of the
11 * TVars (transactional variables) that it has accessed. Each entry records
12 * (a) the TVar, (b) the expected value seen in the TVar, (c) the new value that
13 * the transaction wants to write to the TVar, (d) during commit, the identity of
14 * the TRec that wrote the expected value.
15 *
16 * Separate TRecs are used for each level in a nest of transactions. This allows
17 * a nested transaction to be aborted without condemning its enclosing transactions.
18 * This is needed in the implementation of catchRetry. Note that the "expected value"
19 * in a nested transaction's TRec is the value expected to be *held in memory* if
20 * the transaction commits -- not the "new value" stored in one of the enclosing
21 * transactions. This means that validation can be done without searching through
22 * a nest of TRecs.
23 *
24 * Concurrency control
25 * -------------------
26 *
27 * Three different concurrency control schemes can be built according to the settings
28 * in STM.h:
29 *
30 * STM_UNIPROC assumes that the caller serialises invocations on the STM interface.
31 * In the Haskell RTS this means it is suitable only for non-THREADED_RTS builds.
32 *
33 * STM_CG_LOCK uses coarse-grained locking -- a single 'stm lock' is acquired during
34 * an invocation on the STM interface. Note that this does not mean that
35 * transactions are simply serialized -- the lock is only held *within* the
36 * implementation of stmCommitTransaction, stmWait etc.
37 *
38 * STM_FG_LOCKS uses fine-grained locking -- locking is done on a per-TVar basis
39 * and, when committing a transaction, no locks are acquired for TVars that have
40 * been read but not updated.
41 *
42 * Concurrency control is implemented in the functions:
43 *
44 * lock_stm
45 * unlock_stm
46 * lock_tvar / cond_lock_tvar
47 * unlock_tvar
48 *
49 * The choice between STM_UNIPROC / STM_CG_LOCK / STM_FG_LOCKS affects the
50 * implementation of these functions.
51 *
52 * lock_stm & unlock_stm are straightforward : they acquire a simple spin-lock
53 * using STM_CG_LOCK, and otherwise they are no-ops.
54 *
55 * lock_tvar / cond_lock_tvar and unlock_tvar are more complex because they
56 * have other effects (present in STM_UNIPROC and STM_CG_LOCK builds) as well
57 * as the actual business of maniupultaing a lock (present only in STM_FG_LOCKS
58 * builds). This is because locking a TVar is implemented by writing the lock
59 * holder's TRec into the TVar's current_value field:
60 *
61 * lock_tvar - lock a specified TVar (STM_FG_LOCKS only), returning the value
62 * it contained.
63 *
64 * cond_lock_tvar - lock a specified TVar (STM_FG_LOCKS only) if it
65 * contains a specified value. Return TRUE if this succeeds,
66 * FALSE otherwise.
67 *
68 * unlock_tvar - release the lock on a specified TVar (STM_FG_LOCKS only),
69 * storing a specified value in place of the lock entry.
70 *
71 * Using these operations, the typcial pattern of a commit/validate/wait operation
72 * is to (a) lock the STM, (b) lock all the TVars being updated, (c) check that
73 * the TVars that were only read from still contain their expected values,
74 * (d) release the locks on the TVars, writing updates to them in the case of a
75 * commit, (e) unlock the STM.
76 *
77 * Queues of waiting threads hang off the first_watch_queue_entry field of each
78 * TVar. This may only be manipulated when holding that TVar's lock. In
79 * particular, when a thread is putting itself to sleep, it mustn't release
80 * the TVar's lock until it has added itself to the wait queue and marked its
81 * TSO as BlockedOnSTM -- this makes sure that other threads will know to wake it.
82 *
83 * ---------------------------------------------------------------------------*/
84
85 #include "PosixSource.h"
86 #include "Rts.h"
87 #include "RtsFlags.h"
88 #include "RtsUtils.h"
89 #include "Storage.h"
90 #include "Schedule.h"
91 #include "SMP.h"
92 #include "STM.h"
93 #include "Trace.h"
94
95 #include <stdlib.h>
96 #include <stdio.h>
97
98 #define TRUE 1
99 #define FALSE 0
100
101 // ACQ_ASSERT is used for assertions which are only required for
102 // THREADED_RTS builds with fine-grained locking.
103
104 #if defined(STM_FG_LOCKS)
105 #define ACQ_ASSERT(_X) ASSERT(_X)
106 #define NACQ_ASSERT(_X) /*Nothing*/
107 #else
108 #define ACQ_ASSERT(_X) /*Nothing*/
109 #define NACQ_ASSERT(_X) ASSERT(_X)
110 #endif
111
112 /*......................................................................*/
113
114 // If SHAKE is defined then validation will sometime spuriously fail. They helps test
115 // unusualy code paths if genuine contention is rare
116
117 #define TRACE(_x...) debugTrace(DEBUG_stm, "STM: " _x)
118
119 #ifdef SHAKE
120 static const int do_shake = TRUE;
121 #else
122 static const int do_shake = FALSE;
123 #endif
124 static int shake_ctr = 0;
125 static int shake_lim = 1;
126
127 static int shake(void) {
128 if (do_shake) {
129 if (((shake_ctr++) % shake_lim) == 0) {
130 shake_ctr = 1;
131 shake_lim ++;
132 return TRUE;
133 }
134 return FALSE;
135 } else {
136 return FALSE;
137 }
138 }
139
140 /*......................................................................*/
141
142 // Helper macros for iterating over entries within a transaction
143 // record
144
145 #define FOR_EACH_ENTRY(_t,_x,CODE) do { \
146 StgTRecHeader *__t = (_t); \
147 StgTRecChunk *__c = __t -> current_chunk; \
148 StgWord __limit = __c -> next_entry_idx; \
149 TRACE("%p : FOR_EACH_ENTRY, current_chunk=%p limit=%ld", __t, __c, __limit); \
150 while (__c != END_STM_CHUNK_LIST) { \
151 StgWord __i; \
152 for (__i = 0; __i < __limit; __i ++) { \
153 TRecEntry *_x = &(__c -> entries[__i]); \
154 do { CODE } while (0); \
155 } \
156 __c = __c -> prev_chunk; \
157 __limit = TREC_CHUNK_NUM_ENTRIES; \
158 } \
159 exit_for_each: \
160 if (FALSE) goto exit_for_each; \
161 } while (0)
162
163 #define BREAK_FOR_EACH goto exit_for_each
164
165 /*......................................................................*/
166
167 // if REUSE_MEMORY is defined then attempt to re-use descriptors, log chunks,
168 // and wait queue entries without GC
169
170 #define REUSE_MEMORY
171
172 /*......................................................................*/
173
174 #define IF_STM_UNIPROC(__X) do { } while (0)
175 #define IF_STM_CG_LOCK(__X) do { } while (0)
176 #define IF_STM_FG_LOCKS(__X) do { } while (0)
177
178 #if defined(STM_UNIPROC)
179 #undef IF_STM_UNIPROC
180 #define IF_STM_UNIPROC(__X) do { __X } while (0)
181 static const StgBool config_use_read_phase = FALSE;
182
183 static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
184 TRACE("%p : lock_stm()", trec);
185 }
186
187 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
188 TRACE("%p : unlock_stm()", trec);
189 }
190
191 static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
192 StgTVar *s STG_UNUSED) {
193 StgClosure *result;
194 TRACE("%p : lock_tvar(%p)", trec, s);
195 result = s -> current_value;
196 return result;
197 }
198
199 static void unlock_tvar(StgTRecHeader *trec STG_UNUSED,
200 StgTVar *s STG_UNUSED,
201 StgClosure *c,
202 StgBool force_update) {
203 TRACE("%p : unlock_tvar(%p)", trec, s);
204 if (force_update) {
205 s -> current_value = c;
206 }
207 }
208
209 static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
210 StgTVar *s STG_UNUSED,
211 StgClosure *expected) {
212 StgClosure *result;
213 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
214 result = s -> current_value;
215 TRACE("%p : %s", trec, (result == expected) ? "success" : "failure");
216 return (result == expected);
217 }
218
219 static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
220 // Nothing -- uniproc
221 return TRUE;
222 }
223
224 static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
225 // Nothing -- uniproc
226 }
227 #endif
228
229 #if defined(STM_CG_LOCK) /*........................................*/
230
231 #undef IF_STM_CG_LOCK
232 #define IF_STM_CG_LOCK(__X) do { __X } while (0)
233 static const StgBool config_use_read_phase = FALSE;
234 static volatile StgTRecHeader *smp_locked = NULL;
235
236 static void lock_stm(StgTRecHeader *trec) {
237 while (cas(&smp_locked, NULL, trec) != NULL) { }
238 TRACE("%p : lock_stm()", trec);
239 }
240
241 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
242 TRACE("%p : unlock_stm()", trec);
243 ASSERT (smp_locked == trec);
244 smp_locked = 0;
245 }
246
247 static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
248 StgTVar *s STG_UNUSED) {
249 StgClosure *result;
250 TRACE("%p : lock_tvar(%p)", trec, s);
251 ASSERT (smp_locked == trec);
252 result = s -> current_value;
253 return result;
254 }
255
256 static void *unlock_tvar(StgTRecHeader *trec STG_UNUSED,
257 StgTVar *s STG_UNUSED,
258 StgClosure *c,
259 StgBool force_update) {
260 TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
261 ASSERT (smp_locked == trec);
262 if (force_update) {
263 s -> current_value = c;
264 }
265 }
266
267 static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
268 StgTVar *s STG_UNUSED,
269 StgClosure *expected) {
270 StgClosure *result;
271 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
272 ASSERT (smp_locked == trec);
273 result = s -> current_value;
274 TRACE("%p : %d", result ? "success" : "failure");
275 return (result == expected);
276 }
277
278 static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
279 // Nothing -- protected by STM lock
280 return TRUE;
281 }
282
283 static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
284 // Nothing -- protected by STM lock
285 }
286 #endif
287
288 #if defined(STM_FG_LOCKS) /*...................................*/
289
290 #undef IF_STM_FG_LOCKS
291 #define IF_STM_FG_LOCKS(__X) do { __X } while (0)
292 static const StgBool config_use_read_phase = TRUE;
293
294 static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
295 TRACE("%p : lock_stm()", trec);
296 }
297
298 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
299 TRACE("%p : unlock_stm()", trec);
300 }
301
302 static StgClosure *lock_tvar(StgTRecHeader *trec,
303 StgTVar *s STG_UNUSED) {
304 StgClosure *result;
305 TRACE("%p : lock_tvar(%p)", trec, s);
306 do {
307 do {
308 result = s -> current_value;
309 } while (GET_INFO(result) == &stg_TREC_HEADER_info);
310 } while (cas((void *)&(s -> current_value),
311 (StgWord)result, (StgWord)trec) != (StgWord)result);
312 return result;
313 }
314
315 static void unlock_tvar(StgTRecHeader *trec STG_UNUSED,
316 StgTVar *s,
317 StgClosure *c,
318 StgBool force_update STG_UNUSED) {
319 TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
320 ASSERT(s -> current_value == (StgClosure *)trec);
321 s -> current_value = c;
322 }
323
324 static StgBool cond_lock_tvar(StgTRecHeader *trec,
325 StgTVar *s,
326 StgClosure *expected) {
327 StgClosure *result;
328 StgWord w;
329 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
330 w = cas((void *)&(s -> current_value), (StgWord)expected, (StgWord)trec);
331 result = (StgClosure *)w;
332 TRACE("%p : %s", trec, result ? "success" : "failure");
333 return (result == expected);
334 }
335
336 static StgBool lock_inv(StgAtomicInvariant *inv) {
337 return (cas(&(inv -> lock), 0, 1) == 0);
338 }
339
340 static void unlock_inv(StgAtomicInvariant *inv) {
341 ASSERT(inv -> lock == 1);
342 inv -> lock = 0;
343 }
344 #endif
345
346 /*......................................................................*/
347
348 static StgBool watcher_is_tso(StgTVarWatchQueue *q) {
349 StgClosure *c = q -> closure;
350 StgInfoTable *info = get_itbl(c);
351 return (info -> type) == TSO;
352 }
353
354 static StgBool watcher_is_invariant(StgTVarWatchQueue *q) {
355 StgClosure *c = q -> closure;
356 StgInfoTable *info = get_itbl(c);
357 return (info -> type) == ATOMIC_INVARIANT;
358 }
359
360 /*......................................................................*/
361
362 // Helper functions for thread blocking and unblocking
363
364 static void park_tso(StgTSO *tso) {
365 ASSERT(tso -> why_blocked == NotBlocked);
366 tso -> why_blocked = BlockedOnSTM;
367 tso -> block_info.closure = (StgClosure *) END_TSO_QUEUE;
368 TRACE("park_tso on tso=%p", tso);
369 }
370
371 static void unpark_tso(Capability *cap, StgTSO *tso) {
372 // We will continue unparking threads while they remain on one of the wait
373 // queues: it's up to the thread itself to remove it from the wait queues
374 // if it decides to do so when it is scheduled.
375
376 // Unblocking a TSO from BlockedOnSTM is done under the TSO lock,
377 // to avoid multiple CPUs unblocking the same TSO, and also to
378 // synchronise with throwTo().
379 lockTSO(tso);
380 if (tso -> why_blocked == BlockedOnSTM) {
381 TRACE("unpark_tso on tso=%p", tso);
382 unblockOne(cap,tso);
383 } else {
384 TRACE("spurious unpark_tso on tso=%p", tso);
385 }
386 unlockTSO(tso);
387 }
388
389 static void unpark_waiters_on(Capability *cap, StgTVar *s) {
390 StgTVarWatchQueue *q;
391 TRACE("unpark_waiters_on tvar=%p", s);
392 for (q = s -> first_watch_queue_entry;
393 q != END_STM_WATCH_QUEUE;
394 q = q -> next_queue_entry) {
395 if (watcher_is_tso(q)) {
396 unpark_tso(cap, (StgTSO *)(q -> closure));
397 }
398 }
399 }
400
401 /*......................................................................*/
402
403 // Helper functions for downstream allocation and initialization
404
405 static StgInvariantCheckQueue *new_stg_invariant_check_queue(Capability *cap,
406 StgAtomicInvariant *invariant) {
407 StgInvariantCheckQueue *result;
408 result = (StgInvariantCheckQueue *)allocateLocal(cap, sizeofW(StgInvariantCheckQueue));
409 SET_HDR (result, &stg_INVARIANT_CHECK_QUEUE_info, CCS_SYSTEM);
410 result -> invariant = invariant;
411 result -> my_execution = NO_TREC;
412 return result;
413 }
414
415 static StgTVarWatchQueue *new_stg_tvar_watch_queue(Capability *cap,
416 StgClosure *closure) {
417 StgTVarWatchQueue *result;
418 result = (StgTVarWatchQueue *)allocateLocal(cap, sizeofW(StgTVarWatchQueue));
419 SET_HDR (result, &stg_TVAR_WATCH_QUEUE_info, CCS_SYSTEM);
420 result -> closure = closure;
421 return result;
422 }
423
424 static StgTRecChunk *new_stg_trec_chunk(Capability *cap) {
425 StgTRecChunk *result;
426 result = (StgTRecChunk *)allocateLocal(cap, sizeofW(StgTRecChunk));
427 SET_HDR (result, &stg_TREC_CHUNK_info, CCS_SYSTEM);
428 result -> prev_chunk = END_STM_CHUNK_LIST;
429 result -> next_entry_idx = 0;
430 return result;
431 }
432
433 static StgTRecHeader *new_stg_trec_header(Capability *cap,
434 StgTRecHeader *enclosing_trec) {
435 StgTRecHeader *result;
436 result = (StgTRecHeader *) allocateLocal(cap, sizeofW(StgTRecHeader));
437 SET_HDR (result, &stg_TREC_HEADER_info, CCS_SYSTEM);
438
439 result -> enclosing_trec = enclosing_trec;
440 result -> current_chunk = new_stg_trec_chunk(cap);
441 result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
442
443 if (enclosing_trec == NO_TREC) {
444 result -> state = TREC_ACTIVE;
445 } else {
446 ASSERT(enclosing_trec -> state == TREC_ACTIVE ||
447 enclosing_trec -> state == TREC_CONDEMNED);
448 result -> state = enclosing_trec -> state;
449 }
450
451 return result;
452 }
453
454 /*......................................................................*/
455
456 // Allocation / deallocation functions that retain per-capability lists
457 // of closures that can be re-used
458
459 static StgInvariantCheckQueue *alloc_stg_invariant_check_queue(Capability *cap,
460 StgAtomicInvariant *invariant) {
461 StgInvariantCheckQueue *result = NULL;
462 if (cap -> free_invariant_check_queues == END_INVARIANT_CHECK_QUEUE) {
463 result = new_stg_invariant_check_queue(cap, invariant);
464 } else {
465 result = cap -> free_invariant_check_queues;
466 result -> invariant = invariant;
467 result -> my_execution = NO_TREC;
468 cap -> free_invariant_check_queues = result -> next_queue_entry;
469 }
470 return result;
471 }
472
473 static StgTVarWatchQueue *alloc_stg_tvar_watch_queue(Capability *cap,
474 StgClosure *closure) {
475 StgTVarWatchQueue *result = NULL;
476 if (cap -> free_tvar_watch_queues == END_STM_WATCH_QUEUE) {
477 result = new_stg_tvar_watch_queue(cap, closure);
478 } else {
479 result = cap -> free_tvar_watch_queues;
480 result -> closure = closure;
481 cap -> free_tvar_watch_queues = result -> next_queue_entry;
482 }
483 return result;
484 }
485
486 static void free_stg_tvar_watch_queue(Capability *cap,
487 StgTVarWatchQueue *wq) {
488 #if defined(REUSE_MEMORY)
489 wq -> next_queue_entry = cap -> free_tvar_watch_queues;
490 cap -> free_tvar_watch_queues = wq;
491 #endif
492 }
493
494 static StgTRecChunk *alloc_stg_trec_chunk(Capability *cap) {
495 StgTRecChunk *result = NULL;
496 if (cap -> free_trec_chunks == END_STM_CHUNK_LIST) {
497 result = new_stg_trec_chunk(cap);
498 } else {
499 result = cap -> free_trec_chunks;
500 cap -> free_trec_chunks = result -> prev_chunk;
501 result -> prev_chunk = END_STM_CHUNK_LIST;
502 result -> next_entry_idx = 0;
503 }
504 return result;
505 }
506
507 static void free_stg_trec_chunk(Capability *cap,
508 StgTRecChunk *c) {
509 #if defined(REUSE_MEMORY)
510 c -> prev_chunk = cap -> free_trec_chunks;
511 cap -> free_trec_chunks = c;
512 #endif
513 }
514
515 static StgTRecHeader *alloc_stg_trec_header(Capability *cap,
516 StgTRecHeader *enclosing_trec) {
517 StgTRecHeader *result = NULL;
518 if (cap -> free_trec_headers == NO_TREC) {
519 result = new_stg_trec_header(cap, enclosing_trec);
520 } else {
521 result = cap -> free_trec_headers;
522 cap -> free_trec_headers = result -> enclosing_trec;
523 result -> enclosing_trec = enclosing_trec;
524 result -> current_chunk -> next_entry_idx = 0;
525 result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
526 if (enclosing_trec == NO_TREC) {
527 result -> state = TREC_ACTIVE;
528 } else {
529 ASSERT(enclosing_trec -> state == TREC_ACTIVE ||
530 enclosing_trec -> state == TREC_CONDEMNED);
531 result -> state = enclosing_trec -> state;
532 }
533 }
534 return result;
535 }
536
537 static void free_stg_trec_header(Capability *cap,
538 StgTRecHeader *trec) {
539 #if defined(REUSE_MEMORY)
540 StgTRecChunk *chunk = trec -> current_chunk -> prev_chunk;
541 while (chunk != END_STM_CHUNK_LIST) {
542 StgTRecChunk *prev_chunk = chunk -> prev_chunk;
543 free_stg_trec_chunk(cap, chunk);
544 chunk = prev_chunk;
545 }
546 trec -> current_chunk -> prev_chunk = END_STM_CHUNK_LIST;
547 trec -> enclosing_trec = cap -> free_trec_headers;
548 cap -> free_trec_headers = trec;
549 #endif
550 }
551
552 /*......................................................................*/
553
554 // Helper functions for managing waiting lists
555
556 static void build_watch_queue_entries_for_trec(Capability *cap,
557 StgTSO *tso,
558 StgTRecHeader *trec) {
559 ASSERT(trec != NO_TREC);
560 ASSERT(trec -> enclosing_trec == NO_TREC);
561 ASSERT(trec -> state == TREC_ACTIVE);
562
563 TRACE("%p : build_watch_queue_entries_for_trec()", trec);
564
565 FOR_EACH_ENTRY(trec, e, {
566 StgTVar *s;
567 StgTVarWatchQueue *q;
568 StgTVarWatchQueue *fq;
569 s = e -> tvar;
570 TRACE("%p : adding tso=%p to watch queue for tvar=%p", trec, tso, s);
571 ACQ_ASSERT(s -> current_value == (StgClosure *)trec);
572 NACQ_ASSERT(s -> current_value == e -> expected_value);
573 fq = s -> first_watch_queue_entry;
574 q = alloc_stg_tvar_watch_queue(cap, (StgClosure*) tso);
575 q -> next_queue_entry = fq;
576 q -> prev_queue_entry = END_STM_WATCH_QUEUE;
577 if (fq != END_STM_WATCH_QUEUE) {
578 fq -> prev_queue_entry = q;
579 }
580 s -> first_watch_queue_entry = q;
581 e -> new_value = (StgClosure *) q;
582 });
583 }
584
585 static void remove_watch_queue_entries_for_trec(Capability *cap,
586 StgTRecHeader *trec) {
587 ASSERT(trec != NO_TREC);
588 ASSERT(trec -> enclosing_trec == NO_TREC);
589 ASSERT(trec -> state == TREC_WAITING ||
590 trec -> state == TREC_CONDEMNED);
591
592 TRACE("%p : remove_watch_queue_entries_for_trec()", trec);
593
594 FOR_EACH_ENTRY(trec, e, {
595 StgTVar *s;
596 StgTVarWatchQueue *pq;
597 StgTVarWatchQueue *nq;
598 StgTVarWatchQueue *q;
599 s = e -> tvar;
600 StgClosure *saw = lock_tvar(trec, s);
601 q = (StgTVarWatchQueue *) (e -> new_value);
602 TRACE("%p : removing tso=%p from watch queue for tvar=%p",
603 trec,
604 q -> closure,
605 s);
606 ACQ_ASSERT(s -> current_value == (StgClosure *)trec);
607 nq = q -> next_queue_entry;
608 pq = q -> prev_queue_entry;
609 if (nq != END_STM_WATCH_QUEUE) {
610 nq -> prev_queue_entry = pq;
611 }
612 if (pq != END_STM_WATCH_QUEUE) {
613 pq -> next_queue_entry = nq;
614 } else {
615 ASSERT (s -> first_watch_queue_entry == q);
616 s -> first_watch_queue_entry = nq;
617 }
618 free_stg_tvar_watch_queue(cap, q);
619 unlock_tvar(trec, s, saw, FALSE);
620 });
621 }
622
623 /*......................................................................*/
624
625 static TRecEntry *get_new_entry(Capability *cap,
626 StgTRecHeader *t) {
627 TRecEntry *result;
628 StgTRecChunk *c;
629 int i;
630
631 c = t -> current_chunk;
632 i = c -> next_entry_idx;
633 ASSERT(c != END_STM_CHUNK_LIST);
634
635 if (i < TREC_CHUNK_NUM_ENTRIES) {
636 // Continue to use current chunk
637 result = &(c -> entries[i]);
638 c -> next_entry_idx ++;
639 } else {
640 // Current chunk is full: allocate a fresh one
641 StgTRecChunk *nc;
642 nc = alloc_stg_trec_chunk(cap);
643 nc -> prev_chunk = c;
644 nc -> next_entry_idx = 1;
645 t -> current_chunk = nc;
646 result = &(nc -> entries[0]);
647 }
648
649 return result;
650 }
651
652 /*......................................................................*/
653
654 static void merge_update_into(Capability *cap,
655 StgTRecHeader *t,
656 StgTVar *tvar,
657 StgClosure *expected_value,
658 StgClosure *new_value) {
659 int found;
660
661 // Look for an entry in this trec
662 found = FALSE;
663 FOR_EACH_ENTRY(t, e, {
664 StgTVar *s;
665 s = e -> tvar;
666 if (s == tvar) {
667 found = TRUE;
668 if (e -> expected_value != expected_value) {
669 // Must abort if the two entries start from different values
670 TRACE("%p : update entries inconsistent at %p (%p vs %p)",
671 t, tvar, e -> expected_value, expected_value);
672 t -> state = TREC_CONDEMNED;
673 }
674 e -> new_value = new_value;
675 BREAK_FOR_EACH;
676 }
677 });
678
679 if (!found) {
680 // No entry so far in this trec
681 TRecEntry *ne;
682 ne = get_new_entry(cap, t);
683 ne -> tvar = tvar;
684 ne -> expected_value = expected_value;
685 ne -> new_value = new_value;
686 }
687 }
688
689 /*......................................................................*/
690
691 static void merge_read_into(Capability *cap,
692 StgTRecHeader *t,
693 StgTVar *tvar,
694 StgClosure *expected_value) {
695 int found;
696
697 // Look for an entry in this trec
698 found = FALSE;
699 FOR_EACH_ENTRY(t, e, {
700 StgTVar *s;
701 s = e -> tvar;
702 if (s == tvar) {
703 found = TRUE;
704 if (e -> expected_value != expected_value) {
705 // Must abort if the two entries start from different values
706 TRACE("%p : read entries inconsistent at %p (%p vs %p)",
707 t, tvar, e -> expected_value, expected_value);
708 t -> state = TREC_CONDEMNED;
709 }
710 BREAK_FOR_EACH;
711 }
712 });
713
714 if (!found) {
715 // No entry so far in this trec
716 TRecEntry *ne;
717 ne = get_new_entry(cap, t);
718 ne -> tvar = tvar;
719 ne -> expected_value = expected_value;
720 ne -> new_value = expected_value;
721 }
722 }
723
724 /*......................................................................*/
725
726 static StgBool entry_is_update(TRecEntry *e) {
727 StgBool result;
728 result = (e -> expected_value != e -> new_value);
729 return result;
730 }
731
732 #if defined(STM_FG_LOCKS)
733 static StgBool entry_is_read_only(TRecEntry *e) {
734 StgBool result;
735 result = (e -> expected_value == e -> new_value);
736 return result;
737 }
738
739 static StgBool tvar_is_locked(StgTVar *s, StgTRecHeader *h) {
740 StgClosure *c;
741 StgBool result;
742 c = s -> current_value;
743 result = (c == (StgClosure *) h);
744 return result;
745 }
746 #endif
747
748 // revert_ownership : release a lock on a TVar, storing back
749 // the value that it held when the lock was acquired. "revert_all"
750 // is set in stmWait and stmReWait when we acquired locks on all of
751 // the TVars involved. "revert_all" is not set in commit operations
752 // where we don't lock TVars that have been read from but not updated.
753
754 static void revert_ownership(StgTRecHeader *trec STG_UNUSED,
755 StgBool revert_all STG_UNUSED) {
756 #if defined(STM_FG_LOCKS)
757 FOR_EACH_ENTRY(trec, e, {
758 if (revert_all || entry_is_update(e)) {
759 StgTVar *s;
760 s = e -> tvar;
761 if (tvar_is_locked(s, trec)) {
762 unlock_tvar(trec, s, e -> expected_value, TRUE);
763 }
764 }
765 });
766 #endif
767 }
768
769 /*......................................................................*/
770
771 // validate_and_acquire_ownership : this performs the twin functions
772 // of checking that the TVars referred to by entries in trec hold the
773 // expected values and:
774 //
775 // - locking the TVar (on updated TVars during commit, or all TVars
776 // during wait)
777 //
778 // - recording the identity of the TRec who wrote the value seen in the
779 // TVar (on non-updated TVars during commit). These values are
780 // stashed in the TRec entries and are then checked in check_read_only
781 // to ensure that an atomic snapshot of all of these locations has been
782 // seen.
783
784 static StgBool validate_and_acquire_ownership (StgTRecHeader *trec,
785 int acquire_all,
786 int retain_ownership) {
787 StgBool result;
788
789 if (shake()) {
790 TRACE("%p : shake, pretending trec is invalid when it may not be", trec);
791 return FALSE;
792 }
793
794 ASSERT ((trec -> state == TREC_ACTIVE) ||
795 (trec -> state == TREC_WAITING) ||
796 (trec -> state == TREC_CONDEMNED));
797 result = !((trec -> state) == TREC_CONDEMNED);
798 if (result) {
799 FOR_EACH_ENTRY(trec, e, {
800 StgTVar *s;
801 s = e -> tvar;
802 if (acquire_all || entry_is_update(e)) {
803 TRACE("%p : trying to acquire %p", trec, s);
804 if (!cond_lock_tvar(trec, s, e -> expected_value)) {
805 TRACE("%p : failed to acquire %p", trec, s);
806 result = FALSE;
807 BREAK_FOR_EACH;
808 }
809 } else {
810 ASSERT(config_use_read_phase);
811 IF_STM_FG_LOCKS({
812 TRACE("%p : will need to check %p", trec, s);
813 if (s -> current_value != e -> expected_value) {
814 TRACE("%p : doesn't match", trec);
815 result = FALSE;
816 BREAK_FOR_EACH;
817 }
818 e -> num_updates = s -> num_updates;
819 if (s -> current_value != e -> expected_value) {
820 TRACE("%p : doesn't match (race)", trec);
821 result = FALSE;
822 BREAK_FOR_EACH;
823 } else {
824 TRACE("%p : need to check version %ld", trec, e -> num_updates);
825 }
826 });
827 }
828 });
829 }
830
831 if ((!result) || (!retain_ownership)) {
832 revert_ownership(trec, acquire_all);
833 }
834
835 return result;
836 }
837
838 // check_read_only : check that we've seen an atomic snapshot of the
839 // non-updated TVars accessed by a trec. This checks that the last TRec to
840 // commit an update to the TVar is unchanged since the value was stashed in
841 // validate_and_acquire_ownership. If no udpate is seen to any TVar than
842 // all of them contained their expected values at the start of the call to
843 // check_read_only.
844 //
845 // The paper "Concurrent programming without locks" (under submission), or
846 // Keir Fraser's PhD dissertation "Practical lock-free programming" discuss
847 // this kind of algorithm.
848
849 static StgBool check_read_only(StgTRecHeader *trec STG_UNUSED) {
850 StgBool result = TRUE;
851
852 ASSERT (config_use_read_phase);
853 IF_STM_FG_LOCKS({
854 FOR_EACH_ENTRY(trec, e, {
855 StgTVar *s;
856 s = e -> tvar;
857 if (entry_is_read_only(e)) {
858 TRACE("%p : check_read_only for TVar %p, saw %ld", trec, s, e -> num_updates);
859 if (s -> num_updates != e -> num_updates) {
860 // ||s -> current_value != e -> expected_value) {
861 TRACE("%p : mismatch", trec);
862 result = FALSE;
863 BREAK_FOR_EACH;
864 }
865 }
866 });
867 });
868
869 return result;
870 }
871
872
873 /************************************************************************/
874
875 void stmPreGCHook() {
876 nat i;
877
878 lock_stm(NO_TREC);
879 TRACE("stmPreGCHook");
880 for (i = 0; i < n_capabilities; i ++) {
881 Capability *cap = &capabilities[i];
882 cap -> free_tvar_watch_queues = END_STM_WATCH_QUEUE;
883 cap -> free_trec_chunks = END_STM_CHUNK_LIST;
884 cap -> free_trec_headers = NO_TREC;
885 }
886 unlock_stm(NO_TREC);
887 }
888
889 /************************************************************************/
890
891 // check_read_only relies on version numbers held in TVars' "num_updates"
892 // fields not wrapping around while a transaction is committed. The version
893 // number is incremented each time an update is committed to the TVar
894 // This is unlikely to wrap around when 32-bit integers are used for the counts,
895 // but to ensure correctness we maintain a shared count on the maximum
896 // number of commit operations that may occur and check that this has
897 // not increased by more than 2^32 during a commit.
898
899 #define TOKEN_BATCH_SIZE 1024
900
901 static volatile StgInt64 max_commits = 0;
902
903 #if defined(THREADED_RTS)
904 static volatile StgBool token_locked = FALSE;
905
906 static void getTokenBatch(Capability *cap) {
907 while (cas((void *)&token_locked, FALSE, TRUE) == TRUE) { /* nothing */ }
908 max_commits += TOKEN_BATCH_SIZE;
909 TRACE("%p : cap got token batch, max_commits=%" FMT_Int64, cap, max_commits);
910 cap -> transaction_tokens = TOKEN_BATCH_SIZE;
911 token_locked = FALSE;
912 }
913
914 static void getToken(Capability *cap) {
915 if (cap -> transaction_tokens == 0) {
916 getTokenBatch(cap);
917 }
918 cap -> transaction_tokens --;
919 }
920 #else
921 static void getToken(Capability *cap STG_UNUSED) {
922 // Nothing
923 }
924 #endif
925
926 /*......................................................................*/
927
928 StgTRecHeader *stmStartTransaction(Capability *cap,
929 StgTRecHeader *outer) {
930 StgTRecHeader *t;
931 TRACE("%p : stmStartTransaction with %d tokens",
932 outer,
933 cap -> transaction_tokens);
934
935 getToken(cap);
936
937 t = alloc_stg_trec_header(cap, outer);
938 TRACE("%p : stmStartTransaction()=%p", outer, t);
939 return t;
940 }
941
942 /*......................................................................*/
943
944 void stmAbortTransaction(Capability *cap,
945 StgTRecHeader *trec) {
946 TRACE("%p : stmAbortTransaction", trec);
947 ASSERT (trec != NO_TREC);
948 ASSERT ((trec -> state == TREC_ACTIVE) ||
949 (trec -> state == TREC_WAITING) ||
950 (trec -> state == TREC_CONDEMNED));
951
952 lock_stm(trec);
953
954 StgTRecHeader *et = trec -> enclosing_trec;
955 if (et == NO_TREC) {
956 // We're a top-level transaction: remove any watch queue entries that
957 // we may have.
958 TRACE("%p : aborting top-level transaction", trec);
959
960 if (trec -> state == TREC_WAITING) {
961 ASSERT (trec -> enclosing_trec == NO_TREC);
962 TRACE("%p : stmAbortTransaction aborting waiting transaction", trec);
963 remove_watch_queue_entries_for_trec(cap, trec);
964 }
965
966 } else {
967 // We're a nested transaction: merge our read set into our parent's
968 TRACE("%p : retaining read-set into parent %p", trec, et);
969
970 FOR_EACH_ENTRY(trec, e, {
971 StgTVar *s = e -> tvar;
972 merge_read_into(cap, et, s, e -> expected_value);
973 });
974 }
975
976 trec -> state = TREC_ABORTED;
977 unlock_stm(trec);
978
979 TRACE("%p : stmAbortTransaction done", trec);
980 }
981
982 /*......................................................................*/
983
984 void stmFreeAbortedTRec(Capability *cap,
985 StgTRecHeader *trec) {
986 TRACE("%p : stmFreeAbortedTRec", trec);
987 ASSERT (trec != NO_TREC);
988 ASSERT ((trec -> state == TREC_CONDEMNED) ||
989 (trec -> state == TREC_ABORTED));
990
991 free_stg_trec_header(cap, trec);
992
993 TRACE("%p : stmFreeAbortedTRec done", trec);
994 }
995
996 /*......................................................................*/
997
998 void stmCondemnTransaction(Capability *cap,
999 StgTRecHeader *trec) {
1000 TRACE("%p : stmCondemnTransaction", trec);
1001 ASSERT (trec != NO_TREC);
1002 ASSERT ((trec -> state == TREC_ACTIVE) ||
1003 (trec -> state == TREC_WAITING) ||
1004 (trec -> state == TREC_CONDEMNED));
1005
1006 lock_stm(trec);
1007 if (trec -> state == TREC_WAITING) {
1008 ASSERT (trec -> enclosing_trec == NO_TREC);
1009 TRACE("%p : stmCondemnTransaction condemning waiting transaction", trec);
1010 remove_watch_queue_entries_for_trec(cap, trec);
1011 }
1012 trec -> state = TREC_CONDEMNED;
1013 unlock_stm(trec);
1014
1015 TRACE("%p : stmCondemnTransaction done", trec);
1016 }
1017
1018 /*......................................................................*/
1019
1020 StgTRecHeader *stmGetEnclosingTRec(StgTRecHeader *trec) {
1021 StgTRecHeader *outer;
1022 TRACE("%p : stmGetEnclosingTRec", trec);
1023 outer = trec -> enclosing_trec;
1024 TRACE("%p : stmGetEnclosingTRec()=%p", trec, outer);
1025 return outer;
1026 }
1027
1028 /*......................................................................*/
1029
1030 StgBool stmValidateNestOfTransactions(StgTRecHeader *trec) {
1031 StgTRecHeader *t;
1032 StgBool result;
1033
1034 TRACE("%p : stmValidateNestOfTransactions", trec);
1035 ASSERT(trec != NO_TREC);
1036 ASSERT((trec -> state == TREC_ACTIVE) ||
1037 (trec -> state == TREC_WAITING) ||
1038 (trec -> state == TREC_CONDEMNED));
1039
1040 lock_stm(trec);
1041
1042 t = trec;
1043 result = TRUE;
1044 while (t != NO_TREC) {
1045 result &= validate_and_acquire_ownership(t, TRUE, FALSE);
1046 t = t -> enclosing_trec;
1047 }
1048
1049 if (!result && trec -> state != TREC_WAITING) {
1050 trec -> state = TREC_CONDEMNED;
1051 }
1052
1053 unlock_stm(trec);
1054
1055 TRACE("%p : stmValidateNestOfTransactions()=%d", trec, result);
1056 return result;
1057 }
1058
1059 /*......................................................................*/
1060
1061 static TRecEntry *get_entry_for(StgTRecHeader *trec, StgTVar *tvar, StgTRecHeader **in) {
1062 TRecEntry *result = NULL;
1063
1064 TRACE("%p : get_entry_for TVar %p", trec, tvar);
1065 ASSERT(trec != NO_TREC);
1066
1067 do {
1068 FOR_EACH_ENTRY(trec, e, {
1069 if (e -> tvar == tvar) {
1070 result = e;
1071 if (in != NULL) {
1072 *in = trec;
1073 }
1074 BREAK_FOR_EACH;
1075 }
1076 });
1077 trec = trec -> enclosing_trec;
1078 } while (result == NULL && trec != NO_TREC);
1079
1080 return result;
1081 }
1082
1083 /*......................................................................*/
1084
1085 /*
1086 * Add/remove links between an invariant TVars. The caller must have
1087 * locked the TVars involved and the invariant.
1088 */
1089
1090 static void disconnect_invariant(Capability *cap,
1091 StgAtomicInvariant *inv) {
1092 StgTRecHeader *last_execution = inv -> last_execution;
1093
1094 TRACE("unhooking last execution inv=%p trec=%p", inv, last_execution);
1095
1096 FOR_EACH_ENTRY(last_execution, e, {
1097 StgTVar *s = e -> tvar;
1098 StgTVarWatchQueue *q = s -> first_watch_queue_entry;
1099 StgBool found = FALSE;
1100 TRACE(" looking for trec on tvar=%p", s);
1101 for (q = s -> first_watch_queue_entry;
1102 q != END_STM_WATCH_QUEUE;
1103 q = q -> next_queue_entry) {
1104 if (q -> closure == (StgClosure*)inv) {
1105 StgTVarWatchQueue *pq;
1106 StgTVarWatchQueue *nq;
1107 nq = q -> next_queue_entry;
1108 pq = q -> prev_queue_entry;
1109 if (nq != END_STM_WATCH_QUEUE) {
1110 nq -> prev_queue_entry = pq;
1111 }
1112 if (pq != END_STM_WATCH_QUEUE) {
1113 pq -> next_queue_entry = nq;
1114 } else {
1115 ASSERT (s -> first_watch_queue_entry == q);
1116 s -> first_watch_queue_entry = nq;
1117 }
1118 TRACE(" found it in watch queue entry %p", q);
1119 free_stg_tvar_watch_queue(cap, q);
1120 found = TRUE;
1121 break;
1122 }
1123 }
1124 ASSERT(found);
1125 });
1126 inv -> last_execution = NO_TREC;
1127 }
1128
1129 static void connect_invariant_to_trec(Capability *cap,
1130 StgAtomicInvariant *inv,
1131 StgTRecHeader *my_execution) {
1132 TRACE("connecting execution inv=%p trec=%p", inv, my_execution);
1133
1134 ASSERT(inv -> last_execution == NO_TREC);
1135
1136 FOR_EACH_ENTRY(my_execution, e, {
1137 StgTVar *s = e -> tvar;
1138 StgTVarWatchQueue *q = alloc_stg_tvar_watch_queue(cap, (StgClosure*)inv);
1139 StgTVarWatchQueue *fq = s -> first_watch_queue_entry;
1140
1141 // We leave "last_execution" holding the values that will be
1142 // in the heap after the transaction we're in the process
1143 // of committing has finished.
1144 TRecEntry *entry = get_entry_for(my_execution -> enclosing_trec, s, NULL);
1145 if (entry != NULL) {
1146 e -> expected_value = entry -> new_value;
1147 e -> new_value = entry -> new_value;
1148 }
1149
1150 TRACE(" linking trec on tvar=%p value=%p q=%p", s, e -> expected_value, q);
1151 q -> next_queue_entry = fq;
1152 q -> prev_queue_entry = END_STM_WATCH_QUEUE;
1153 if (fq != END_STM_WATCH_QUEUE) {
1154 fq -> prev_queue_entry = q;
1155 }
1156 s -> first_watch_queue_entry = q;
1157 });
1158
1159 inv -> last_execution = my_execution;
1160 }
1161
1162 /*
1163 * Add a new invariant to the trec's list of invariants to check on commit
1164 */
1165 void stmAddInvariantToCheck(Capability *cap,
1166 StgTRecHeader *trec,
1167 StgClosure *code) {
1168 TRACE("%p : stmAddInvariantToCheck closure=%p", trec, code);
1169 ASSERT(trec != NO_TREC);
1170 ASSERT(trec -> state == TREC_ACTIVE ||
1171 trec -> state == TREC_CONDEMNED);
1172
1173 StgAtomicInvariant *invariant;
1174 StgInvariantCheckQueue *q;
1175
1176 // 1. Allocate an StgAtomicInvariant, set last_execution to NO_TREC
1177 // to signal that this is a new invariant in the current atomic block
1178
1179 invariant = (StgAtomicInvariant *) allocateLocal(cap, sizeofW(StgAtomicInvariant));
1180 TRACE("%p : stmAddInvariantToCheck allocated invariant=%p", trec, invariant);
1181 SET_HDR (invariant, &stg_ATOMIC_INVARIANT_info, CCS_SYSTEM);
1182 invariant -> code = code;
1183 invariant -> last_execution = NO_TREC;
1184
1185 // 2. Allocate an StgInvariantCheckQueue entry, link it to the current trec
1186
1187 q = alloc_stg_invariant_check_queue(cap, invariant);
1188 TRACE("%p : stmAddInvariantToCheck allocated q=%p", trec, q);
1189 q -> invariant = invariant;
1190 q -> my_execution = NO_TREC;
1191 q -> next_queue_entry = trec -> invariants_to_check;
1192 trec -> invariants_to_check = q;
1193
1194 TRACE("%p : stmAddInvariantToCheck done", trec);
1195 }
1196
1197 /*
1198 * Fill in the trec's list of invariants that might be violated by the
1199 * current transaction.
1200 */
1201
1202 StgInvariantCheckQueue *stmGetInvariantsToCheck(Capability *cap, StgTRecHeader *trec) {
1203 TRACE("%p : stmGetInvariantsToCheck, head was %p",
1204 trec,
1205 trec -> invariants_to_check);
1206
1207 ASSERT(trec != NO_TREC);
1208 ASSERT ((trec -> state == TREC_ACTIVE) ||
1209 (trec -> state == TREC_WAITING) ||
1210 (trec -> state == TREC_CONDEMNED));
1211 ASSERT(trec -> enclosing_trec == NO_TREC);
1212
1213 lock_stm(trec);
1214 StgTRecChunk *c = trec -> current_chunk;
1215 while (c != END_STM_CHUNK_LIST) {
1216 unsigned int i;
1217 for (i = 0; i < c -> next_entry_idx; i ++) {
1218 TRecEntry *e = &(c -> entries[i]);
1219 if (entry_is_update(e)) {
1220 StgTVar *s = e -> tvar;
1221 StgClosure *old = lock_tvar(trec, s);
1222
1223 // Pick up any invariants on the TVar being updated
1224 // by entry "e"
1225
1226 TRACE("%p : checking for invariants on %p", trec, s);
1227 StgTVarWatchQueue *q;
1228 for (q = s -> first_watch_queue_entry;
1229 q != END_STM_WATCH_QUEUE;
1230 q = q -> next_queue_entry) {
1231 if (watcher_is_invariant(q)) {
1232 TRACE("%p : Touching invariant %p", trec, q -> closure);
1233 StgBool found = FALSE;
1234 StgInvariantCheckQueue *q2;
1235 for (q2 = trec -> invariants_to_check;
1236 q2 != END_INVARIANT_CHECK_QUEUE;
1237 q2 = q2 -> next_queue_entry) {
1238 if (q2 -> invariant == (StgAtomicInvariant*)(q -> closure)) {
1239 TRACE("%p : Already found %p", trec, q -> closure);
1240 found = TRUE;
1241 break;
1242 }
1243 }
1244
1245 if (!found) {
1246 TRACE("%p : Not already found %p", trec, q -> closure);
1247 StgInvariantCheckQueue *q3;
1248 q3 = alloc_stg_invariant_check_queue(cap,
1249 (StgAtomicInvariant*) q -> closure);
1250 q3 -> next_queue_entry = trec -> invariants_to_check;
1251 trec -> invariants_to_check = q3;
1252 }
1253 }
1254 }
1255
1256 unlock_tvar(trec, s, old, FALSE);
1257 }
1258 }
1259 c = c -> prev_chunk;
1260 }
1261
1262 unlock_stm(trec);
1263
1264 TRACE("%p : stmGetInvariantsToCheck, head now %p",
1265 trec,
1266 trec -> invariants_to_check);
1267
1268 return (trec -> invariants_to_check);
1269 }
1270
1271 /*......................................................................*/
1272
1273 StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
1274 int result;
1275 StgInt64 max_commits_at_start = max_commits;
1276
1277 TRACE("%p : stmCommitTransaction()", trec);
1278 ASSERT (trec != NO_TREC);
1279
1280 lock_stm(trec);
1281
1282 ASSERT (trec -> enclosing_trec == NO_TREC);
1283 ASSERT ((trec -> state == TREC_ACTIVE) ||
1284 (trec -> state == TREC_CONDEMNED));
1285
1286 // touched_invariants is true if we've written to a TVar with invariants
1287 // attached to it, or if we're trying to add a new invariant to the system.
1288
1289 StgBool touched_invariants = (trec -> invariants_to_check != END_INVARIANT_CHECK_QUEUE);
1290
1291 // If we have touched invariants then (i) lock the invariant, and (ii) add
1292 // the invariant's read set to our own. Step (i) is needed to serialize
1293 // concurrent transactions that attempt to make conflicting updates
1294 // to the invariant's trec (suppose it read from t1 and t2, and that one
1295 // concurrent transcation writes only to t1, and a second writes only to
1296 // t2). Step (ii) is needed so that both transactions will lock t1 and t2
1297 // to gain access to their wait lists (and hence be able to unhook the
1298 // invariant from both tvars).
1299
1300 if (touched_invariants) {
1301 TRACE("%p : locking invariants", trec);
1302 StgInvariantCheckQueue *q = trec -> invariants_to_check;
1303 while (q != END_INVARIANT_CHECK_QUEUE) {
1304 TRACE("%p : locking invariant %p", trec, q -> invariant);
1305 StgAtomicInvariant *inv = q -> invariant;
1306 if (!lock_inv(inv)) {
1307 TRACE("%p : failed to lock %p", trec, inv);
1308 trec -> state = TREC_CONDEMNED;
1309 break;
1310 }
1311
1312 StgTRecHeader *inv_old_trec = inv -> last_execution;
1313 if (inv_old_trec != NO_TREC) {
1314 StgTRecChunk *c = inv_old_trec -> current_chunk;
1315 while (c != END_STM_CHUNK_LIST) {
1316 unsigned int i;
1317 for (i = 0; i < c -> next_entry_idx; i ++) {
1318 TRecEntry *e = &(c -> entries[i]);
1319 TRACE("%p : ensuring we lock TVars for %p", trec, e -> tvar);
1320 merge_read_into (cap, trec, e -> tvar, e -> expected_value);
1321 }
1322 c = c -> prev_chunk;
1323 }
1324 }
1325 q = q -> next_queue_entry;
1326 }
1327 TRACE("%p : finished locking invariants", trec);
1328 }
1329
1330 // Use a read-phase (i.e. don't lock TVars we've read but not updated) if
1331 // (i) the configuration lets us use a read phase, and (ii) we've not
1332 // touched or introduced any invariants.
1333 //
1334 // In principle we could extend the implementation to support a read-phase
1335 // and invariants, but it complicates the logic: the links between
1336 // invariants and TVars are managed by the TVar watch queues which are
1337 // protected by the TVar's locks.
1338
1339 StgBool use_read_phase = ((config_use_read_phase) && (!touched_invariants));
1340
1341 result = validate_and_acquire_ownership(trec, (!use_read_phase), TRUE);
1342 if (result) {
1343 // We now know that all the updated locations hold their expected values.
1344 ASSERT (trec -> state == TREC_ACTIVE);
1345
1346 if (use_read_phase) {
1347 TRACE("%p : doing read check", trec);
1348 result = check_read_only(trec);
1349 TRACE("%p : read-check %s", trec, result ? "succeeded" : "failed");
1350
1351 StgInt64 max_commits_at_end = max_commits;
1352 StgInt64 max_concurrent_commits;
1353 max_concurrent_commits = ((max_commits_at_end - max_commits_at_start) +
1354 (n_capabilities * TOKEN_BATCH_SIZE));
1355 if (((max_concurrent_commits >> 32) > 0) || shake()) {
1356 result = FALSE;
1357 }
1358 }
1359
1360 if (result) {
1361 // We now know that all of the read-only locations held their exepcted values
1362 // at the end of the call to validate_and_acquire_ownership. This forms the
1363 // linearization point of the commit.
1364
1365 // 1. If we have touched or introduced any invariants then unhook them
1366 // from the TVars they depended on last time they were executed
1367 // and hook them on the TVars that they now depend on.
1368 if (touched_invariants) {
1369 StgInvariantCheckQueue *q = trec -> invariants_to_check;
1370 while (q != END_INVARIANT_CHECK_QUEUE) {
1371 StgAtomicInvariant *inv = q -> invariant;
1372 if (inv -> last_execution != NO_TREC) {
1373 disconnect_invariant(cap, inv);
1374 }
1375
1376 TRACE("%p : hooking up new execution trec=%p", trec, q -> my_execution);
1377 connect_invariant_to_trec(cap, inv, q -> my_execution);
1378
1379 TRACE("%p : unlocking invariant %p", trec, inv);
1380 unlock_inv(inv);
1381
1382 q = q -> next_queue_entry;
1383 }
1384 }
1385
1386 // 2. Make the updates required by the transaction
1387 FOR_EACH_ENTRY(trec, e, {
1388 StgTVar *s;
1389 s = e -> tvar;
1390 if ((!use_read_phase) || (e -> new_value != e -> expected_value)) {
1391 // Either the entry is an update or we're not using a read phase:
1392 // write the value back to the TVar, unlocking it if necessary.
1393
1394 ACQ_ASSERT(tvar_is_locked(s, trec));
1395 TRACE("%p : writing %p to %p, waking waiters", trec, e -> new_value, s);
1396 unpark_waiters_on(cap,s);
1397 IF_STM_FG_LOCKS({
1398 s -> num_updates ++;
1399 });
1400 unlock_tvar(trec, s, e -> new_value, TRUE);
1401 }
1402 ACQ_ASSERT(!tvar_is_locked(s, trec));
1403 });
1404 } else {
1405 revert_ownership(trec, FALSE);
1406 }
1407 }
1408
1409 unlock_stm(trec);
1410
1411 free_stg_trec_header(cap, trec);
1412
1413 TRACE("%p : stmCommitTransaction()=%d", trec, result);
1414
1415 return result;
1416 }
1417
1418 /*......................................................................*/
1419
1420 StgBool stmCommitNestedTransaction(Capability *cap, StgTRecHeader *trec) {
1421 StgTRecHeader *et;
1422 int result;
1423 ASSERT (trec != NO_TREC && trec -> enclosing_trec != NO_TREC);
1424 TRACE("%p : stmCommitNestedTransaction() into %p", trec, trec -> enclosing_trec);
1425 ASSERT ((trec -> state == TREC_ACTIVE) || (trec -> state == TREC_CONDEMNED));
1426
1427 lock_stm(trec);
1428
1429 et = trec -> enclosing_trec;
1430 result = validate_and_acquire_ownership(trec, (!config_use_read_phase), TRUE);
1431 if (result) {
1432 // We now know that all the updated locations hold their expected values.
1433
1434 if (config_use_read_phase) {
1435 TRACE("%p : doing read check", trec);
1436 result = check_read_only(trec);
1437 }
1438 if (result) {
1439 // We now know that all of the read-only locations held their exepcted values
1440 // at the end of the call to validate_and_acquire_ownership. This forms the
1441 // linearization point of the commit.
1442
1443 TRACE("%p : read-check succeeded", trec);
1444 FOR_EACH_ENTRY(trec, e, {
1445 // Merge each entry into the enclosing transaction record, release all
1446 // locks.
1447
1448 StgTVar *s;
1449 s = e -> tvar;
1450 if (entry_is_update(e)) {
1451 unlock_tvar(trec, s, e -> expected_value, FALSE);
1452 }
1453 merge_update_into(cap, et, s, e -> expected_value, e -> new_value);
1454 ACQ_ASSERT(s -> current_value != (StgClosure *)trec);
1455 });
1456 } else {
1457 revert_ownership(trec, FALSE);
1458 }
1459 }
1460
1461 unlock_stm(trec);
1462
1463 free_stg_trec_header(cap, trec);
1464
1465 TRACE("%p : stmCommitNestedTransaction()=%d", trec, result);
1466
1467 return result;
1468 }
1469
1470 /*......................................................................*/
1471
1472 StgBool stmWait(Capability *cap, StgTSO *tso, StgTRecHeader *trec) {
1473 int result;
1474 TRACE("%p : stmWait(%p)", trec, tso);
1475 ASSERT (trec != NO_TREC);
1476 ASSERT (trec -> enclosing_trec == NO_TREC);
1477 ASSERT ((trec -> state == TREC_ACTIVE) ||
1478 (trec -> state == TREC_CONDEMNED));
1479
1480 lock_stm(trec);
1481 result = validate_and_acquire_ownership(trec, TRUE, TRUE);
1482 if (result) {
1483 // The transaction is valid so far so we can actually start waiting.
1484 // (Otherwise the transaction was not valid and the thread will have to
1485 // retry it).
1486
1487 // Put ourselves to sleep. We retain locks on all the TVars involved
1488 // until we are sound asleep : (a) on the wait queues, (b) BlockedOnSTM
1489 // in the TSO, (c) TREC_WAITING in the Trec.
1490 build_watch_queue_entries_for_trec(cap, tso, trec);
1491 park_tso(tso);
1492 trec -> state = TREC_WAITING;
1493
1494 // We haven't released ownership of the transaction yet. The TSO
1495 // has been put on the wait queue for the TVars it is waiting for,
1496 // but we haven't yet tidied up the TSO's stack and made it safe
1497 // to wake up the TSO. Therefore, we must wait until the TSO is
1498 // safe to wake up before we release ownership - when all is well,
1499 // the runtime will call stmWaitUnlock() below, with the same
1500 // TRec.
1501
1502 } else {
1503 unlock_stm(trec);
1504 free_stg_trec_header(cap, trec);
1505 }
1506
1507 TRACE("%p : stmWait(%p)=%d", trec, tso, result);
1508 return result;
1509 }
1510
1511
1512 void
1513 stmWaitUnlock(Capability *cap STG_UNUSED, StgTRecHeader *trec) {
1514 revert_ownership(trec, TRUE);
1515 unlock_stm(trec);
1516 }
1517
1518 /*......................................................................*/
1519
1520 StgBool stmReWait(Capability *cap, StgTSO *tso) {
1521 int result;
1522 StgTRecHeader *trec = tso->trec;
1523
1524 TRACE("%p : stmReWait", trec);
1525 ASSERT (trec != NO_TREC);
1526 ASSERT (trec -> enclosing_trec == NO_TREC);
1527 ASSERT ((trec -> state == TREC_WAITING) ||
1528 (trec -> state == TREC_CONDEMNED));
1529
1530 lock_stm(trec);
1531 result = validate_and_acquire_ownership(trec, TRUE, TRUE);
1532 TRACE("%p : validation %s", trec, result ? "succeeded" : "failed");
1533 if (result) {
1534 // The transaction remains valid -- do nothing because it is already on
1535 // the wait queues
1536 ASSERT (trec -> state == TREC_WAITING);
1537 park_tso(tso);
1538 revert_ownership(trec, TRUE);
1539 } else {
1540 // The transcation has become invalid. We can now remove it from the wait
1541 // queues.
1542 if (trec -> state != TREC_CONDEMNED) {
1543 remove_watch_queue_entries_for_trec (cap, trec);
1544 }
1545 free_stg_trec_header(cap, trec);
1546 }
1547 unlock_stm(trec);
1548
1549 TRACE("%p : stmReWait()=%d", trec, result);
1550 return result;
1551 }
1552
1553 /*......................................................................*/
1554
1555 static StgClosure *read_current_value(StgTRecHeader *trec STG_UNUSED, StgTVar *tvar) {
1556 StgClosure *result;
1557 result = tvar -> current_value;
1558
1559 #if defined(STM_FG_LOCKS)
1560 while (GET_INFO(result) == &stg_TREC_HEADER_info) {
1561 TRACE("%p : read_current_value(%p) saw %p", trec, tvar, result);
1562 result = tvar -> current_value;
1563 }
1564 #endif
1565
1566 TRACE("%p : read_current_value(%p)=%p", trec, tvar, result);
1567 return result;
1568 }
1569
1570 /*......................................................................*/
1571
1572 StgClosure *stmReadTVar(Capability *cap,
1573 StgTRecHeader *trec,
1574 StgTVar *tvar) {
1575 StgTRecHeader *entry_in;
1576 StgClosure *result = NULL;
1577 TRecEntry *entry = NULL;
1578 TRACE("%p : stmReadTVar(%p)", trec, tvar);
1579 ASSERT (trec != NO_TREC);
1580 ASSERT (trec -> state == TREC_ACTIVE ||
1581 trec -> state == TREC_CONDEMNED);
1582
1583 entry = get_entry_for(trec, tvar, &entry_in);
1584
1585 if (entry != NULL) {
1586 if (entry_in == trec) {
1587 // Entry found in our trec
1588 result = entry -> new_value;
1589 } else {
1590 // Entry found in another trec
1591 TRecEntry *new_entry = get_new_entry(cap, trec);
1592 new_entry -> tvar = tvar;
1593 new_entry -> expected_value = entry -> expected_value;
1594 new_entry -> new_value = entry -> new_value;
1595 result = new_entry -> new_value;
1596 }
1597 } else {
1598 // No entry found
1599 StgClosure *current_value = read_current_value(trec, tvar);
1600 TRecEntry *new_entry = get_new_entry(cap, trec);
1601 new_entry -> tvar = tvar;
1602 new_entry -> expected_value = current_value;
1603 new_entry -> new_value = current_value;
1604 result = current_value;
1605 }
1606
1607 TRACE("%p : stmReadTVar(%p)=%p", trec, tvar, result);
1608 return result;
1609 }
1610
1611 /*......................................................................*/
1612
1613 void stmWriteTVar(Capability *cap,
1614 StgTRecHeader *trec,
1615 StgTVar *tvar,
1616 StgClosure *new_value) {
1617
1618 StgTRecHeader *entry_in;
1619 TRecEntry *entry = NULL;
1620 TRACE("%p : stmWriteTVar(%p, %p)", trec, tvar, new_value);
1621 ASSERT (trec != NO_TREC);
1622 ASSERT (trec -> state == TREC_ACTIVE ||
1623 trec -> state == TREC_CONDEMNED);
1624
1625 entry = get_entry_for(trec, tvar, &entry_in);
1626
1627 if (entry != NULL) {
1628 if (entry_in == trec) {
1629 // Entry found in our trec
1630 entry -> new_value = new_value;
1631 } else {
1632 // Entry found in another trec
1633 TRecEntry *new_entry = get_new_entry(cap, trec);
1634 new_entry -> tvar = tvar;
1635 new_entry -> expected_value = entry -> expected_value;
1636 new_entry -> new_value = new_value;
1637 }
1638 } else {
1639 // No entry found
1640 StgClosure *current_value = read_current_value(trec, tvar);
1641 TRecEntry *new_entry = get_new_entry(cap, trec);
1642 new_entry -> tvar = tvar;
1643 new_entry -> expected_value = current_value;
1644 new_entry -> new_value = new_value;
1645 }
1646
1647 TRACE("%p : stmWriteTVar done", trec);
1648 }
1649
1650 /*......................................................................*/
1651
1652 StgTVar *stmNewTVar(Capability *cap,
1653 StgClosure *new_value) {
1654 StgTVar *result;
1655 result = (StgTVar *)allocateLocal(cap, sizeofW(StgTVar));
1656 SET_HDR (result, &stg_TVAR_info, CCS_SYSTEM);
1657 result -> current_value = new_value;
1658 result -> first_watch_queue_entry = END_STM_WATCH_QUEUE;
1659 #if defined(THREADED_RTS)
1660 result -> num_updates = 0;
1661 #endif
1662 return result;
1663 }
1664
1665 /*......................................................................*/