rts/Linker.c: distinct between DATA and CODE labels when importing
[ghc.git] / rts / STM.c
1 /* -----------------------------------------------------------------------------
2 * (c) The GHC Team 1998-2005
3 *
4 * STM implementation.
5 *
6 * Overview
7 * --------
8 *
9 * See the PPoPP 2005 paper "Composable memory transactions". In summary, each
10 * transaction has a TRec (transaction record) holding entries for each of the
11 * TVars (transactional variables) that it has accessed. Each entry records (a)
12 * the TVar, (b) the expected value seen in the TVar, (c) the new value that the
13 * transaction wants to write to the TVar, (d) during commit, the identity of
14 * the TRec that wrote the expected value.
15 *
16 * Separate TRecs are used for each level in a nest of transactions. This
17 * allows a nested transaction to be aborted without condemning its enclosing
18 * transactions. This is needed in the implementation of catchRetry. Note that
19 * the "expected value" in a nested transaction's TRec is the value expected to
20 * be *held in memory* if the transaction commits -- not the "new value" stored
21 * in one of the enclosing transactions. This means that validation can be done
22 * without searching through a nest of TRecs.
23 *
24 * Concurrency control
25 * -------------------
26 *
27 * Three different concurrency control schemes can be built according to the
28 * settings in STM.h:
29 *
30 * STM_UNIPROC assumes that the caller serialises invocations on the STM
31 * interface. In the Haskell RTS this means it is suitable only for
32 * non-THREADED_RTS builds.
33 *
34 * STM_CG_LOCK uses coarse-grained locking -- a single 'stm lock' is acquired
35 * during an invocation on the STM interface. Note that this does not mean that
36 * transactions are simply serialized -- the lock is only held *within* the
37 * implementation of stmCommitTransaction, stmWait etc.
38 *
39 * STM_FG_LOCKS uses fine-grained locking -- locking is done on a per-TVar basis
40 * and, when committing a transaction, no locks are acquired for TVars that have
41 * been read but not updated.
42 *
43 * Concurrency control is implemented in the functions:
44 *
45 * lock_stm
46 * unlock_stm
47 * lock_tvar / cond_lock_tvar
48 * unlock_tvar
49 *
50 * The choice between STM_UNIPROC / STM_CG_LOCK / STM_FG_LOCKS affects the
51 * implementation of these functions.
52 *
53 * lock_stm & unlock_stm are straightforward : they acquire a simple spin-lock
54 * using STM_CG_LOCK, and otherwise they are no-ops.
55 *
56 * lock_tvar / cond_lock_tvar and unlock_tvar are more complex because they have
57 * other effects (present in STM_UNIPROC and STM_CG_LOCK builds) as well as the
58 * actual business of manipulating a lock (present only in STM_FG_LOCKS builds).
59 * This is because locking a TVar is implemented by writing the lock holder's
60 * TRec into the TVar's current_value field:
61 *
62 * lock_tvar - lock a specified TVar (STM_FG_LOCKS only), returning the value
63 * it contained.
64 *
65 * cond_lock_tvar - lock a specified TVar (STM_FG_LOCKS only) if it
66 * contains a specified value. Return TRUE if this succeeds,
67 * FALSE otherwise.
68 *
69 * unlock_tvar - release the lock on a specified TVar (STM_FG_LOCKS only),
70 * storing a specified value in place of the lock entry.
71 *
72 * Using these operations, the typical pattern of a commit/validate/wait
73 * operation is to (a) lock the STM, (b) lock all the TVars being updated, (c)
74 * check that the TVars that were only read from still contain their expected
75 * values, (d) release the locks on the TVars, writing updates to them in the
76 * case of a commit, (e) unlock the STM.
77 *
78 * Queues of waiting threads hang off the first_watch_queue_entry field of each
79 * TVar. This may only be manipulated when holding that TVar's lock. In
80 * particular, when a thread is putting itself to sleep, it mustn't release the
81 * TVar's lock until it has added itself to the wait queue and marked its TSO as
82 * BlockedOnSTM -- this makes sure that other threads will know to wake it.
83 *
84 * ---------------------------------------------------------------------------*/
85
86 #include "PosixSource.h"
87 #include "Rts.h"
88
89 #include "RtsUtils.h"
90 #include "Schedule.h"
91 #include "STM.h"
92 #include "Trace.h"
93 #include "Threads.h"
94 #include "sm/Storage.h"
95
96 #include <stdio.h>
97
98 #define TRUE 1
99 #define FALSE 0
100
101 // ACQ_ASSERT is used for assertions which are only required for
102 // THREADED_RTS builds with fine-grained locking.
103
104 #if defined(STM_FG_LOCKS)
105 #define ACQ_ASSERT(_X) ASSERT(_X)
106 #define NACQ_ASSERT(_X) /*Nothing*/
107 #else
108 #define ACQ_ASSERT(_X) /*Nothing*/
109 #define NACQ_ASSERT(_X) ASSERT(_X)
110 #endif
111
112 /*......................................................................*/
113
114 // If SHAKE is defined then validation will sometime spuriously fail. They helps test
115 // unusualy code paths if genuine contention is rare
116
117 #define TRACE(_x...) debugTrace(DEBUG_stm, "STM: " _x)
118
119 #ifdef SHAKE
120 static const int do_shake = TRUE;
121 #else
122 static const int do_shake = FALSE;
123 #endif
124 static int shake_ctr = 0;
125 static int shake_lim = 1;
126
127 static int shake(void) {
128 if (do_shake) {
129 if (((shake_ctr++) % shake_lim) == 0) {
130 shake_ctr = 1;
131 shake_lim ++;
132 return TRUE;
133 }
134 return FALSE;
135 } else {
136 return FALSE;
137 }
138 }
139
140 /*......................................................................*/
141
142 // Helper macros for iterating over entries within a transaction
143 // record
144
145 #define FOR_EACH_ENTRY(_t,_x,CODE) do { \
146 StgTRecHeader *__t = (_t); \
147 StgTRecChunk *__c = __t -> current_chunk; \
148 StgWord __limit = __c -> next_entry_idx; \
149 TRACE("%p : FOR_EACH_ENTRY, current_chunk=%p limit=%ld", __t, __c, __limit); \
150 while (__c != END_STM_CHUNK_LIST) { \
151 StgWord __i; \
152 for (__i = 0; __i < __limit; __i ++) { \
153 TRecEntry *_x = &(__c -> entries[__i]); \
154 do { CODE } while (0); \
155 } \
156 __c = __c -> prev_chunk; \
157 __limit = TREC_CHUNK_NUM_ENTRIES; \
158 } \
159 exit_for_each: \
160 if (FALSE) goto exit_for_each; \
161 } while (0)
162
163 #define BREAK_FOR_EACH goto exit_for_each
164
165 /*......................................................................*/
166
167 // if REUSE_MEMORY is defined then attempt to re-use descriptors, log chunks,
168 // and wait queue entries without GC
169
170 #define REUSE_MEMORY
171
172 /*......................................................................*/
173
174 #define IF_STM_UNIPROC(__X) do { } while (0)
175 #define IF_STM_CG_LOCK(__X) do { } while (0)
176 #define IF_STM_FG_LOCKS(__X) do { } while (0)
177
178 #if defined(STM_UNIPROC)
179 #undef IF_STM_UNIPROC
180 #define IF_STM_UNIPROC(__X) do { __X } while (0)
181 static const StgBool config_use_read_phase = FALSE;
182
183 static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
184 TRACE("%p : lock_stm()", trec);
185 }
186
187 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
188 TRACE("%p : unlock_stm()", trec);
189 }
190
191 static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
192 StgTVar *s STG_UNUSED) {
193 StgClosure *result;
194 TRACE("%p : lock_tvar(%p)", trec, s);
195 result = s -> current_value;
196 return result;
197 }
198
199 static void unlock_tvar(Capability *cap,
200 StgTRecHeader *trec STG_UNUSED,
201 StgTVar *s,
202 StgClosure *c,
203 StgBool force_update) {
204 TRACE("%p : unlock_tvar(%p)", trec, s);
205 if (force_update) {
206 s -> current_value = c;
207 dirty_TVAR(cap,s);
208 }
209 }
210
211 static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
212 StgTVar *s STG_UNUSED,
213 StgClosure *expected) {
214 StgClosure *result;
215 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
216 result = s -> current_value;
217 TRACE("%p : %s", trec, (result == expected) ? "success" : "failure");
218 return (result == expected);
219 }
220
221 static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
222 // Nothing -- uniproc
223 return TRUE;
224 }
225
226 static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
227 // Nothing -- uniproc
228 }
229 #endif
230
231 #if defined(STM_CG_LOCK) /*........................................*/
232
233 #undef IF_STM_CG_LOCK
234 #define IF_STM_CG_LOCK(__X) do { __X } while (0)
235 static const StgBool config_use_read_phase = FALSE;
236 static volatile StgTRecHeader *smp_locked = NULL;
237
238 static void lock_stm(StgTRecHeader *trec) {
239 while (cas(&smp_locked, NULL, trec) != NULL) { }
240 TRACE("%p : lock_stm()", trec);
241 }
242
243 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
244 TRACE("%p : unlock_stm()", trec);
245 ASSERT (smp_locked == trec);
246 smp_locked = 0;
247 }
248
249 static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
250 StgTVar *s STG_UNUSED) {
251 StgClosure *result;
252 TRACE("%p : lock_tvar(%p)", trec, s);
253 ASSERT (smp_locked == trec);
254 result = s -> current_value;
255 return result;
256 }
257
258 static void *unlock_tvar(Capability *cap,
259 StgTRecHeader *trec STG_UNUSED,
260 StgTVar *s,
261 StgClosure *c,
262 StgBool force_update) {
263 TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
264 ASSERT (smp_locked == trec);
265 if (force_update) {
266 s -> current_value = c;
267 dirty_TVAR(cap,s);
268 }
269 }
270
271 static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
272 StgTVar *s STG_UNUSED,
273 StgClosure *expected) {
274 StgClosure *result;
275 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
276 ASSERT (smp_locked == trec);
277 result = s -> current_value;
278 TRACE("%p : %d", result ? "success" : "failure");
279 return (result == expected);
280 }
281
282 static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
283 // Nothing -- protected by STM lock
284 return TRUE;
285 }
286
287 static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
288 // Nothing -- protected by STM lock
289 }
290 #endif
291
292 #if defined(STM_FG_LOCKS) /*...................................*/
293
294 #undef IF_STM_FG_LOCKS
295 #define IF_STM_FG_LOCKS(__X) do { __X } while (0)
296 static const StgBool config_use_read_phase = TRUE;
297
298 static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
299 TRACE("%p : lock_stm()", trec);
300 }
301
302 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
303 TRACE("%p : unlock_stm()", trec);
304 }
305
306 static StgClosure *lock_tvar(StgTRecHeader *trec,
307 StgTVar *s STG_UNUSED) {
308 StgClosure *result;
309 TRACE("%p : lock_tvar(%p)", trec, s);
310 do {
311 do {
312 result = s -> current_value;
313 } while (GET_INFO(UNTAG_CLOSURE(result)) == &stg_TREC_HEADER_info);
314 } while (cas((void *)&(s -> current_value),
315 (StgWord)result, (StgWord)trec) != (StgWord)result);
316 return result;
317 }
318
319 static void unlock_tvar(Capability *cap,
320 StgTRecHeader *trec STG_UNUSED,
321 StgTVar *s,
322 StgClosure *c,
323 StgBool force_update STG_UNUSED) {
324 TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
325 ASSERT(s -> current_value == (StgClosure *)trec);
326 s -> current_value = c;
327 dirty_TVAR(cap,s);
328 }
329
330 static StgBool cond_lock_tvar(StgTRecHeader *trec,
331 StgTVar *s,
332 StgClosure *expected) {
333 StgClosure *result;
334 StgWord w;
335 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
336 w = cas((void *)&(s -> current_value), (StgWord)expected, (StgWord)trec);
337 result = (StgClosure *)w;
338 TRACE("%p : %s", trec, result ? "success" : "failure");
339 return (result == expected);
340 }
341
342 static StgBool lock_inv(StgAtomicInvariant *inv) {
343 return (cas(&(inv -> lock), 0, 1) == 0);
344 }
345
346 static void unlock_inv(StgAtomicInvariant *inv) {
347 ASSERT(inv -> lock == 1);
348 inv -> lock = 0;
349 }
350 #endif
351
352 /*......................................................................*/
353
354 static StgBool watcher_is_tso(StgTVarWatchQueue *q) {
355 StgClosure *c = q -> closure;
356 StgInfoTable *info = get_itbl(c);
357 return (info -> type) == TSO;
358 }
359
360 static StgBool watcher_is_invariant(StgTVarWatchQueue *q) {
361 StgClosure *c = q -> closure;
362 return (c->header.info == &stg_ATOMIC_INVARIANT_info);
363 }
364
365 /*......................................................................*/
366
367 // Helper functions for thread blocking and unblocking
368
369 static void park_tso(StgTSO *tso) {
370 ASSERT(tso -> why_blocked == NotBlocked);
371 tso -> why_blocked = BlockedOnSTM;
372 tso -> block_info.closure = (StgClosure *) END_TSO_QUEUE;
373 TRACE("park_tso on tso=%p", tso);
374 }
375
376 static void unpark_tso(Capability *cap, StgTSO *tso) {
377 // We will continue unparking threads while they remain on one of the wait
378 // queues: it's up to the thread itself to remove it from the wait queues
379 // if it decides to do so when it is scheduled.
380
381 // Unblocking a TSO from BlockedOnSTM is done under the TSO lock,
382 // to avoid multiple CPUs unblocking the same TSO, and also to
383 // synchronise with throwTo(). The first time the TSO is unblocked
384 // we mark this fact by setting block_info.closure == STM_AWOKEN.
385 // This way we can avoid sending further wakeup messages in the
386 // future.
387 lockTSO(tso);
388 if (tso->why_blocked == BlockedOnSTM &&
389 tso->block_info.closure == &stg_STM_AWOKEN_closure) {
390 TRACE("unpark_tso already woken up tso=%p", tso);
391 } else if (tso -> why_blocked == BlockedOnSTM) {
392 TRACE("unpark_tso on tso=%p", tso);
393 tso->block_info.closure = &stg_STM_AWOKEN_closure;
394 tryWakeupThread(cap,tso);
395 } else {
396 TRACE("spurious unpark_tso on tso=%p", tso);
397 }
398 unlockTSO(tso);
399 }
400
401 static void unpark_waiters_on(Capability *cap, StgTVar *s) {
402 StgTVarWatchQueue *q;
403 StgTVarWatchQueue *trail;
404 TRACE("unpark_waiters_on tvar=%p", s);
405 // unblock TSOs in reverse order, to be a bit fairer (#2319)
406 for (q = s -> first_watch_queue_entry, trail = q;
407 q != END_STM_WATCH_QUEUE;
408 q = q -> next_queue_entry) {
409 trail = q;
410 }
411 q = trail;
412 for (;
413 q != END_STM_WATCH_QUEUE;
414 q = q -> prev_queue_entry) {
415 if (watcher_is_tso(q)) {
416 unpark_tso(cap, (StgTSO *)(q -> closure));
417 }
418 }
419 }
420
421 /*......................................................................*/
422
423 // Helper functions for downstream allocation and initialization
424
425 static StgInvariantCheckQueue *new_stg_invariant_check_queue(Capability *cap,
426 StgAtomicInvariant *invariant) {
427 StgInvariantCheckQueue *result;
428 result = (StgInvariantCheckQueue *)allocate(cap, sizeofW(StgInvariantCheckQueue));
429 SET_HDR (result, &stg_INVARIANT_CHECK_QUEUE_info, CCS_SYSTEM);
430 result -> invariant = invariant;
431 result -> my_execution = NO_TREC;
432 return result;
433 }
434
435 static StgTVarWatchQueue *new_stg_tvar_watch_queue(Capability *cap,
436 StgClosure *closure) {
437 StgTVarWatchQueue *result;
438 result = (StgTVarWatchQueue *)allocate(cap, sizeofW(StgTVarWatchQueue));
439 SET_HDR (result, &stg_TVAR_WATCH_QUEUE_info, CCS_SYSTEM);
440 result -> closure = closure;
441 return result;
442 }
443
444 static StgTRecChunk *new_stg_trec_chunk(Capability *cap) {
445 StgTRecChunk *result;
446 result = (StgTRecChunk *)allocate(cap, sizeofW(StgTRecChunk));
447 SET_HDR (result, &stg_TREC_CHUNK_info, CCS_SYSTEM);
448 result -> prev_chunk = END_STM_CHUNK_LIST;
449 result -> next_entry_idx = 0;
450 return result;
451 }
452
453 static StgTRecHeader *new_stg_trec_header(Capability *cap,
454 StgTRecHeader *enclosing_trec) {
455 StgTRecHeader *result;
456 result = (StgTRecHeader *) allocate(cap, sizeofW(StgTRecHeader));
457 SET_HDR (result, &stg_TREC_HEADER_info, CCS_SYSTEM);
458
459 result -> enclosing_trec = enclosing_trec;
460 result -> current_chunk = new_stg_trec_chunk(cap);
461 result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
462
463 if (enclosing_trec == NO_TREC) {
464 result -> state = TREC_ACTIVE;
465 } else {
466 ASSERT(enclosing_trec -> state == TREC_ACTIVE ||
467 enclosing_trec -> state == TREC_CONDEMNED);
468 result -> state = enclosing_trec -> state;
469 }
470
471 return result;
472 }
473
474 /*......................................................................*/
475
476 // Allocation / deallocation functions that retain per-capability lists
477 // of closures that can be re-used
478
479 static StgInvariantCheckQueue *alloc_stg_invariant_check_queue(Capability *cap,
480 StgAtomicInvariant *invariant) {
481 StgInvariantCheckQueue *result = NULL;
482 if (cap -> free_invariant_check_queues == END_INVARIANT_CHECK_QUEUE) {
483 result = new_stg_invariant_check_queue(cap, invariant);
484 } else {
485 result = cap -> free_invariant_check_queues;
486 result -> invariant = invariant;
487 result -> my_execution = NO_TREC;
488 cap -> free_invariant_check_queues = result -> next_queue_entry;
489 }
490 return result;
491 }
492
493 static StgTVarWatchQueue *alloc_stg_tvar_watch_queue(Capability *cap,
494 StgClosure *closure) {
495 StgTVarWatchQueue *result = NULL;
496 if (cap -> free_tvar_watch_queues == END_STM_WATCH_QUEUE) {
497 result = new_stg_tvar_watch_queue(cap, closure);
498 } else {
499 result = cap -> free_tvar_watch_queues;
500 result -> closure = closure;
501 cap -> free_tvar_watch_queues = result -> next_queue_entry;
502 }
503 return result;
504 }
505
506 static void free_stg_tvar_watch_queue(Capability *cap,
507 StgTVarWatchQueue *wq) {
508 #if defined(REUSE_MEMORY)
509 wq -> next_queue_entry = cap -> free_tvar_watch_queues;
510 cap -> free_tvar_watch_queues = wq;
511 #endif
512 }
513
514 static StgTRecChunk *alloc_stg_trec_chunk(Capability *cap) {
515 StgTRecChunk *result = NULL;
516 if (cap -> free_trec_chunks == END_STM_CHUNK_LIST) {
517 result = new_stg_trec_chunk(cap);
518 } else {
519 result = cap -> free_trec_chunks;
520 cap -> free_trec_chunks = result -> prev_chunk;
521 result -> prev_chunk = END_STM_CHUNK_LIST;
522 result -> next_entry_idx = 0;
523 }
524 return result;
525 }
526
527 static void free_stg_trec_chunk(Capability *cap,
528 StgTRecChunk *c) {
529 #if defined(REUSE_MEMORY)
530 c -> prev_chunk = cap -> free_trec_chunks;
531 cap -> free_trec_chunks = c;
532 #endif
533 }
534
535 static StgTRecHeader *alloc_stg_trec_header(Capability *cap,
536 StgTRecHeader *enclosing_trec) {
537 StgTRecHeader *result = NULL;
538 if (cap -> free_trec_headers == NO_TREC) {
539 result = new_stg_trec_header(cap, enclosing_trec);
540 } else {
541 result = cap -> free_trec_headers;
542 cap -> free_trec_headers = result -> enclosing_trec;
543 result -> enclosing_trec = enclosing_trec;
544 result -> current_chunk -> next_entry_idx = 0;
545 result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
546 if (enclosing_trec == NO_TREC) {
547 result -> state = TREC_ACTIVE;
548 } else {
549 ASSERT(enclosing_trec -> state == TREC_ACTIVE ||
550 enclosing_trec -> state == TREC_CONDEMNED);
551 result -> state = enclosing_trec -> state;
552 }
553 }
554 return result;
555 }
556
557 static void free_stg_trec_header(Capability *cap,
558 StgTRecHeader *trec) {
559 #if defined(REUSE_MEMORY)
560 StgTRecChunk *chunk = trec -> current_chunk -> prev_chunk;
561 while (chunk != END_STM_CHUNK_LIST) {
562 StgTRecChunk *prev_chunk = chunk -> prev_chunk;
563 free_stg_trec_chunk(cap, chunk);
564 chunk = prev_chunk;
565 }
566 trec -> current_chunk -> prev_chunk = END_STM_CHUNK_LIST;
567 trec -> enclosing_trec = cap -> free_trec_headers;
568 cap -> free_trec_headers = trec;
569 #endif
570 }
571
572 /*......................................................................*/
573
574 // Helper functions for managing waiting lists
575
576 static void build_watch_queue_entries_for_trec(Capability *cap,
577 StgTSO *tso,
578 StgTRecHeader *trec) {
579 ASSERT(trec != NO_TREC);
580 ASSERT(trec -> enclosing_trec == NO_TREC);
581 ASSERT(trec -> state == TREC_ACTIVE);
582
583 TRACE("%p : build_watch_queue_entries_for_trec()", trec);
584
585 FOR_EACH_ENTRY(trec, e, {
586 StgTVar *s;
587 StgTVarWatchQueue *q;
588 StgTVarWatchQueue *fq;
589 s = e -> tvar;
590 TRACE("%p : adding tso=%p to watch queue for tvar=%p", trec, tso, s);
591 ACQ_ASSERT(s -> current_value == (StgClosure *)trec);
592 NACQ_ASSERT(s -> current_value == e -> expected_value);
593 fq = s -> first_watch_queue_entry;
594 q = alloc_stg_tvar_watch_queue(cap, (StgClosure*) tso);
595 q -> next_queue_entry = fq;
596 q -> prev_queue_entry = END_STM_WATCH_QUEUE;
597 if (fq != END_STM_WATCH_QUEUE) {
598 fq -> prev_queue_entry = q;
599 }
600 s -> first_watch_queue_entry = q;
601 e -> new_value = (StgClosure *) q;
602 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
603 });
604 }
605
606 static void remove_watch_queue_entries_for_trec(Capability *cap,
607 StgTRecHeader *trec) {
608 ASSERT(trec != NO_TREC);
609 ASSERT(trec -> enclosing_trec == NO_TREC);
610 ASSERT(trec -> state == TREC_WAITING ||
611 trec -> state == TREC_CONDEMNED);
612
613 TRACE("%p : remove_watch_queue_entries_for_trec()", trec);
614
615 FOR_EACH_ENTRY(trec, e, {
616 StgTVar *s;
617 StgTVarWatchQueue *pq;
618 StgTVarWatchQueue *nq;
619 StgTVarWatchQueue *q;
620 StgClosure *saw;
621 s = e -> tvar;
622 saw = lock_tvar(trec, s);
623 q = (StgTVarWatchQueue *) (e -> new_value);
624 TRACE("%p : removing tso=%p from watch queue for tvar=%p",
625 trec,
626 q -> closure,
627 s);
628 ACQ_ASSERT(s -> current_value == (StgClosure *)trec);
629 nq = q -> next_queue_entry;
630 pq = q -> prev_queue_entry;
631 if (nq != END_STM_WATCH_QUEUE) {
632 nq -> prev_queue_entry = pq;
633 }
634 if (pq != END_STM_WATCH_QUEUE) {
635 pq -> next_queue_entry = nq;
636 } else {
637 ASSERT (s -> first_watch_queue_entry == q);
638 s -> first_watch_queue_entry = nq;
639 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
640 }
641 free_stg_tvar_watch_queue(cap, q);
642 unlock_tvar(cap, trec, s, saw, FALSE);
643 });
644 }
645
646 /*......................................................................*/
647
648 static TRecEntry *get_new_entry(Capability *cap,
649 StgTRecHeader *t) {
650 TRecEntry *result;
651 StgTRecChunk *c;
652 int i;
653
654 c = t -> current_chunk;
655 i = c -> next_entry_idx;
656 ASSERT(c != END_STM_CHUNK_LIST);
657
658 if (i < TREC_CHUNK_NUM_ENTRIES) {
659 // Continue to use current chunk
660 result = &(c -> entries[i]);
661 c -> next_entry_idx ++;
662 } else {
663 // Current chunk is full: allocate a fresh one
664 StgTRecChunk *nc;
665 nc = alloc_stg_trec_chunk(cap);
666 nc -> prev_chunk = c;
667 nc -> next_entry_idx = 1;
668 t -> current_chunk = nc;
669 result = &(nc -> entries[0]);
670 }
671
672 return result;
673 }
674
675 /*......................................................................*/
676
677 static void merge_update_into(Capability *cap,
678 StgTRecHeader *t,
679 StgTVar *tvar,
680 StgClosure *expected_value,
681 StgClosure *new_value) {
682 int found;
683
684 // Look for an entry in this trec
685 found = FALSE;
686 FOR_EACH_ENTRY(t, e, {
687 StgTVar *s;
688 s = e -> tvar;
689 if (s == tvar) {
690 found = TRUE;
691 if (e -> expected_value != expected_value) {
692 // Must abort if the two entries start from different values
693 TRACE("%p : update entries inconsistent at %p (%p vs %p)",
694 t, tvar, e -> expected_value, expected_value);
695 t -> state = TREC_CONDEMNED;
696 }
697 e -> new_value = new_value;
698 BREAK_FOR_EACH;
699 }
700 });
701
702 if (!found) {
703 // No entry so far in this trec
704 TRecEntry *ne;
705 ne = get_new_entry(cap, t);
706 ne -> tvar = tvar;
707 ne -> expected_value = expected_value;
708 ne -> new_value = new_value;
709 }
710 }
711
712 /*......................................................................*/
713
714 static void merge_read_into(Capability *cap,
715 StgTRecHeader *trec,
716 StgTVar *tvar,
717 StgClosure *expected_value)
718 {
719 int found;
720 StgTRecHeader *t;
721
722 found = FALSE;
723
724 //
725 // See #7493
726 //
727 // We need to look for an existing entry *anywhere* in the stack of
728 // nested transactions. Otherwise, in stmCommitNestedTransaction()
729 // we can't tell the difference between
730 //
731 // (1) a read-only entry
732 // (2) an entry that writes back the original value
733 //
734 // Since in both cases e->new_value == e->expected_value. But in (1)
735 // we want to do nothing, and in (2) we want to update e->new_value
736 // in the outer transaction.
737 //
738 // Here we deal with the first possibility: we never create a
739 // read-only entry in an inner transaction if there is an existing
740 // outer entry; so we never have an inner read and an outer update.
741 // So then in stmCommitNestedTransaction() we know we can always
742 // write e->new_value over the outer entry, because the inner entry
743 // is the most up to date.
744 //
745 for (t = trec; !found && t != NO_TREC; t = t -> enclosing_trec)
746 {
747 FOR_EACH_ENTRY(t, e, {
748 if (e -> tvar == tvar) {
749 found = TRUE;
750 if (e -> expected_value != expected_value) {
751 // Must abort if the two entries start from different values
752 TRACE("%p : read entries inconsistent at %p (%p vs %p)",
753 t, tvar, e -> expected_value, expected_value);
754 t -> state = TREC_CONDEMNED;
755 }
756 BREAK_FOR_EACH;
757 }
758 });
759 }
760
761 if (!found) {
762 // No entry found
763 TRecEntry *ne;
764 ne = get_new_entry(cap, trec);
765 ne -> tvar = tvar;
766 ne -> expected_value = expected_value;
767 ne -> new_value = expected_value;
768 }
769 }
770
771 /*......................................................................*/
772
773 static StgBool entry_is_update(TRecEntry *e) {
774 StgBool result;
775 result = (e -> expected_value != e -> new_value);
776 return result;
777 }
778
779 #if defined(STM_FG_LOCKS)
780 static StgBool entry_is_read_only(TRecEntry *e) {
781 StgBool result;
782 result = (e -> expected_value == e -> new_value);
783 return result;
784 }
785
786 static StgBool tvar_is_locked(StgTVar *s, StgTRecHeader *h) {
787 StgClosure *c;
788 StgBool result;
789 c = s -> current_value;
790 result = (c == (StgClosure *) h);
791 return result;
792 }
793 #endif
794
795 // revert_ownership : release a lock on a TVar, storing back
796 // the value that it held when the lock was acquired. "revert_all"
797 // is set in stmWait and stmReWait when we acquired locks on all of
798 // the TVars involved. "revert_all" is not set in commit operations
799 // where we don't lock TVars that have been read from but not updated.
800
801 static void revert_ownership(Capability *cap STG_UNUSED,
802 StgTRecHeader *trec STG_UNUSED,
803 StgBool revert_all STG_UNUSED) {
804 #if defined(STM_FG_LOCKS)
805 FOR_EACH_ENTRY(trec, e, {
806 if (revert_all || entry_is_update(e)) {
807 StgTVar *s;
808 s = e -> tvar;
809 if (tvar_is_locked(s, trec)) {
810 unlock_tvar(cap, trec, s, e -> expected_value, TRUE);
811 }
812 }
813 });
814 #endif
815 }
816
817 /*......................................................................*/
818
819 // validate_and_acquire_ownership : this performs the twin functions
820 // of checking that the TVars referred to by entries in trec hold the
821 // expected values and:
822 //
823 // - locking the TVar (on updated TVars during commit, or all TVars
824 // during wait)
825 //
826 // - recording the identity of the TRec who wrote the value seen in the
827 // TVar (on non-updated TVars during commit). These values are
828 // stashed in the TRec entries and are then checked in check_read_only
829 // to ensure that an atomic snapshot of all of these locations has been
830 // seen.
831
832 static StgBool validate_and_acquire_ownership (Capability *cap,
833 StgTRecHeader *trec,
834 int acquire_all,
835 int retain_ownership) {
836 StgBool result;
837
838 if (shake()) {
839 TRACE("%p : shake, pretending trec is invalid when it may not be", trec);
840 return FALSE;
841 }
842
843 ASSERT ((trec -> state == TREC_ACTIVE) ||
844 (trec -> state == TREC_WAITING) ||
845 (trec -> state == TREC_CONDEMNED));
846 result = !((trec -> state) == TREC_CONDEMNED);
847 if (result) {
848 FOR_EACH_ENTRY(trec, e, {
849 StgTVar *s;
850 s = e -> tvar;
851 if (acquire_all || entry_is_update(e)) {
852 TRACE("%p : trying to acquire %p", trec, s);
853 if (!cond_lock_tvar(trec, s, e -> expected_value)) {
854 TRACE("%p : failed to acquire %p", trec, s);
855 result = FALSE;
856 BREAK_FOR_EACH;
857 }
858 } else {
859 ASSERT(config_use_read_phase);
860 IF_STM_FG_LOCKS({
861 TRACE("%p : will need to check %p", trec, s);
862 if (s -> current_value != e -> expected_value) {
863 TRACE("%p : doesn't match", trec);
864 result = FALSE;
865 BREAK_FOR_EACH;
866 }
867 e -> num_updates = s -> num_updates;
868 if (s -> current_value != e -> expected_value) {
869 TRACE("%p : doesn't match (race)", trec);
870 result = FALSE;
871 BREAK_FOR_EACH;
872 } else {
873 TRACE("%p : need to check version %ld", trec, e -> num_updates);
874 }
875 });
876 }
877 });
878 }
879
880 if ((!result) || (!retain_ownership)) {
881 revert_ownership(cap, trec, acquire_all);
882 }
883
884 return result;
885 }
886
887 // check_read_only : check that we've seen an atomic snapshot of the
888 // non-updated TVars accessed by a trec. This checks that the last TRec to
889 // commit an update to the TVar is unchanged since the value was stashed in
890 // validate_and_acquire_ownership. If no udpate is seen to any TVar than
891 // all of them contained their expected values at the start of the call to
892 // check_read_only.
893 //
894 // The paper "Concurrent programming without locks" (under submission), or
895 // Keir Fraser's PhD dissertation "Practical lock-free programming" discuss
896 // this kind of algorithm.
897
898 static StgBool check_read_only(StgTRecHeader *trec STG_UNUSED) {
899 StgBool result = TRUE;
900
901 ASSERT (config_use_read_phase);
902 IF_STM_FG_LOCKS({
903 FOR_EACH_ENTRY(trec, e, {
904 StgTVar *s;
905 s = e -> tvar;
906 if (entry_is_read_only(e)) {
907 TRACE("%p : check_read_only for TVar %p, saw %ld", trec, s, e -> num_updates);
908
909 // Note we need both checks and in this order as the TVar could be
910 // locked by another transaction that is committing but has not yet
911 // incremented `num_updates` (See #7815).
912 if (s -> current_value != e -> expected_value ||
913 s -> num_updates != e -> num_updates) {
914 TRACE("%p : mismatch", trec);
915 result = FALSE;
916 BREAK_FOR_EACH;
917 }
918 }
919 });
920 });
921
922 return result;
923 }
924
925
926 /************************************************************************/
927
928 void stmPreGCHook (Capability *cap) {
929 lock_stm(NO_TREC);
930 TRACE("stmPreGCHook");
931 cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
932 cap->free_trec_chunks = END_STM_CHUNK_LIST;
933 cap->free_trec_headers = NO_TREC;
934 unlock_stm(NO_TREC);
935 }
936
937 /************************************************************************/
938
939 // check_read_only relies on version numbers held in TVars' "num_updates"
940 // fields not wrapping around while a transaction is committed. The version
941 // number is incremented each time an update is committed to the TVar
942 // This is unlikely to wrap around when 32-bit integers are used for the counts,
943 // but to ensure correctness we maintain a shared count on the maximum
944 // number of commit operations that may occur and check that this has
945 // not increased by more than 2^32 during a commit.
946
947 #define TOKEN_BATCH_SIZE 1024
948
949 static volatile StgInt64 max_commits = 0;
950
951 #if defined(THREADED_RTS)
952 static volatile StgWord token_locked = FALSE;
953
954 static void getTokenBatch(Capability *cap) {
955 while (cas((void *)&token_locked, FALSE, TRUE) == TRUE) { /* nothing */ }
956 max_commits += TOKEN_BATCH_SIZE;
957 TRACE("%p : cap got token batch, max_commits=%" FMT_Int64, cap, max_commits);
958 cap -> transaction_tokens = TOKEN_BATCH_SIZE;
959 token_locked = FALSE;
960 }
961
962 static void getToken(Capability *cap) {
963 if (cap -> transaction_tokens == 0) {
964 getTokenBatch(cap);
965 }
966 cap -> transaction_tokens --;
967 }
968 #else
969 static void getToken(Capability *cap STG_UNUSED) {
970 // Nothing
971 }
972 #endif
973
974 /*......................................................................*/
975
976 StgTRecHeader *stmStartTransaction(Capability *cap,
977 StgTRecHeader *outer) {
978 StgTRecHeader *t;
979 TRACE("%p : stmStartTransaction with %d tokens",
980 outer,
981 cap -> transaction_tokens);
982
983 getToken(cap);
984
985 t = alloc_stg_trec_header(cap, outer);
986 TRACE("%p : stmStartTransaction()=%p", outer, t);
987 return t;
988 }
989
990 /*......................................................................*/
991
992 void stmAbortTransaction(Capability *cap,
993 StgTRecHeader *trec) {
994 StgTRecHeader *et;
995 TRACE("%p : stmAbortTransaction", trec);
996 ASSERT (trec != NO_TREC);
997 ASSERT ((trec -> state == TREC_ACTIVE) ||
998 (trec -> state == TREC_WAITING) ||
999 (trec -> state == TREC_CONDEMNED));
1000
1001 lock_stm(trec);
1002
1003 et = trec -> enclosing_trec;
1004 if (et == NO_TREC) {
1005 // We're a top-level transaction: remove any watch queue entries that
1006 // we may have.
1007 TRACE("%p : aborting top-level transaction", trec);
1008
1009 if (trec -> state == TREC_WAITING) {
1010 ASSERT (trec -> enclosing_trec == NO_TREC);
1011 TRACE("%p : stmAbortTransaction aborting waiting transaction", trec);
1012 remove_watch_queue_entries_for_trec(cap, trec);
1013 }
1014
1015 } else {
1016 // We're a nested transaction: merge our read set into our parent's
1017 TRACE("%p : retaining read-set into parent %p", trec, et);
1018
1019 FOR_EACH_ENTRY(trec, e, {
1020 StgTVar *s = e -> tvar;
1021 merge_read_into(cap, et, s, e -> expected_value);
1022 });
1023 }
1024
1025 trec -> state = TREC_ABORTED;
1026 unlock_stm(trec);
1027
1028 TRACE("%p : stmAbortTransaction done", trec);
1029 }
1030
1031 /*......................................................................*/
1032
1033 void stmFreeAbortedTRec(Capability *cap,
1034 StgTRecHeader *trec) {
1035 TRACE("%p : stmFreeAbortedTRec", trec);
1036 ASSERT (trec != NO_TREC);
1037 ASSERT ((trec -> state == TREC_CONDEMNED) ||
1038 (trec -> state == TREC_ABORTED));
1039
1040 free_stg_trec_header(cap, trec);
1041
1042 TRACE("%p : stmFreeAbortedTRec done", trec);
1043 }
1044
1045 /*......................................................................*/
1046
1047 void stmCondemnTransaction(Capability *cap,
1048 StgTRecHeader *trec) {
1049 TRACE("%p : stmCondemnTransaction", trec);
1050 ASSERT (trec != NO_TREC);
1051 ASSERT ((trec -> state == TREC_ACTIVE) ||
1052 (trec -> state == TREC_WAITING) ||
1053 (trec -> state == TREC_CONDEMNED));
1054
1055 lock_stm(trec);
1056 if (trec -> state == TREC_WAITING) {
1057 ASSERT (trec -> enclosing_trec == NO_TREC);
1058 TRACE("%p : stmCondemnTransaction condemning waiting transaction", trec);
1059 remove_watch_queue_entries_for_trec(cap, trec);
1060 }
1061 trec -> state = TREC_CONDEMNED;
1062 unlock_stm(trec);
1063
1064 TRACE("%p : stmCondemnTransaction done", trec);
1065 }
1066
1067 /*......................................................................*/
1068
1069 StgBool stmValidateNestOfTransactions(Capability *cap, StgTRecHeader *trec) {
1070 StgTRecHeader *t;
1071 StgBool result;
1072
1073 TRACE("%p : stmValidateNestOfTransactions", trec);
1074 ASSERT(trec != NO_TREC);
1075 ASSERT((trec -> state == TREC_ACTIVE) ||
1076 (trec -> state == TREC_WAITING) ||
1077 (trec -> state == TREC_CONDEMNED));
1078
1079 lock_stm(trec);
1080
1081 t = trec;
1082 result = TRUE;
1083 while (t != NO_TREC) {
1084 result &= validate_and_acquire_ownership(cap, t, TRUE, FALSE);
1085 t = t -> enclosing_trec;
1086 }
1087
1088 if (!result && trec -> state != TREC_WAITING) {
1089 trec -> state = TREC_CONDEMNED;
1090 }
1091
1092 unlock_stm(trec);
1093
1094 TRACE("%p : stmValidateNestOfTransactions()=%d", trec, result);
1095 return result;
1096 }
1097
1098 /*......................................................................*/
1099
1100 static TRecEntry *get_entry_for(StgTRecHeader *trec, StgTVar *tvar, StgTRecHeader **in) {
1101 TRecEntry *result = NULL;
1102
1103 TRACE("%p : get_entry_for TVar %p", trec, tvar);
1104 ASSERT(trec != NO_TREC);
1105
1106 do {
1107 FOR_EACH_ENTRY(trec, e, {
1108 if (e -> tvar == tvar) {
1109 result = e;
1110 if (in != NULL) {
1111 *in = trec;
1112 }
1113 BREAK_FOR_EACH;
1114 }
1115 });
1116 trec = trec -> enclosing_trec;
1117 } while (result == NULL && trec != NO_TREC);
1118
1119 return result;
1120 }
1121
1122 /*......................................................................*/
1123
1124 /*
1125 * Add/remove links between an invariant TVars. The caller must have
1126 * locked the TVars involved and the invariant.
1127 */
1128
1129 static void disconnect_invariant(Capability *cap,
1130 StgAtomicInvariant *inv) {
1131 StgTRecHeader *last_execution = inv -> last_execution;
1132
1133 TRACE("unhooking last execution inv=%p trec=%p", inv, last_execution);
1134
1135 FOR_EACH_ENTRY(last_execution, e, {
1136 StgTVar *s = e -> tvar;
1137 StgTVarWatchQueue *q = s -> first_watch_queue_entry;
1138 DEBUG_ONLY( StgBool found = FALSE );
1139 TRACE(" looking for trec on tvar=%p", s);
1140 for (q = s -> first_watch_queue_entry;
1141 q != END_STM_WATCH_QUEUE;
1142 q = q -> next_queue_entry) {
1143 if (q -> closure == (StgClosure*)inv) {
1144 StgTVarWatchQueue *pq;
1145 StgTVarWatchQueue *nq;
1146 nq = q -> next_queue_entry;
1147 pq = q -> prev_queue_entry;
1148 if (nq != END_STM_WATCH_QUEUE) {
1149 nq -> prev_queue_entry = pq;
1150 }
1151 if (pq != END_STM_WATCH_QUEUE) {
1152 pq -> next_queue_entry = nq;
1153 } else {
1154 ASSERT (s -> first_watch_queue_entry == q);
1155 s -> first_watch_queue_entry = nq;
1156 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
1157 }
1158 TRACE(" found it in watch queue entry %p", q);
1159 free_stg_tvar_watch_queue(cap, q);
1160 DEBUG_ONLY( found = TRUE );
1161 break;
1162 }
1163 }
1164 ASSERT(found);
1165 });
1166 inv -> last_execution = NO_TREC;
1167 }
1168
1169 static void connect_invariant_to_trec(Capability *cap,
1170 StgAtomicInvariant *inv,
1171 StgTRecHeader *my_execution) {
1172 TRACE("connecting execution inv=%p trec=%p", inv, my_execution);
1173
1174 ASSERT(inv -> last_execution == NO_TREC);
1175
1176 FOR_EACH_ENTRY(my_execution, e, {
1177 StgTVar *s = e -> tvar;
1178 StgTVarWatchQueue *q = alloc_stg_tvar_watch_queue(cap, (StgClosure*)inv);
1179 StgTVarWatchQueue *fq = s -> first_watch_queue_entry;
1180
1181 // We leave "last_execution" holding the values that will be
1182 // in the heap after the transaction we're in the process
1183 // of committing has finished.
1184 TRecEntry *entry = get_entry_for(my_execution -> enclosing_trec, s, NULL);
1185 if (entry != NULL) {
1186 e -> expected_value = entry -> new_value;
1187 e -> new_value = entry -> new_value;
1188 }
1189
1190 TRACE(" linking trec on tvar=%p value=%p q=%p", s, e -> expected_value, q);
1191 q -> next_queue_entry = fq;
1192 q -> prev_queue_entry = END_STM_WATCH_QUEUE;
1193 if (fq != END_STM_WATCH_QUEUE) {
1194 fq -> prev_queue_entry = q;
1195 }
1196 s -> first_watch_queue_entry = q;
1197 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
1198 });
1199
1200 inv -> last_execution = my_execution;
1201 }
1202
1203 /*
1204 * Add a new invariant to the trec's list of invariants to check on commit
1205 */
1206 void stmAddInvariantToCheck(Capability *cap,
1207 StgTRecHeader *trec,
1208 StgClosure *code) {
1209 StgAtomicInvariant *invariant;
1210 StgInvariantCheckQueue *q;
1211 TRACE("%p : stmAddInvariantToCheck closure=%p", trec, code);
1212 ASSERT(trec != NO_TREC);
1213 ASSERT(trec -> state == TREC_ACTIVE ||
1214 trec -> state == TREC_CONDEMNED);
1215
1216
1217 // 1. Allocate an StgAtomicInvariant, set last_execution to NO_TREC
1218 // to signal that this is a new invariant in the current atomic block
1219
1220 invariant = (StgAtomicInvariant *) allocate(cap, sizeofW(StgAtomicInvariant));
1221 TRACE("%p : stmAddInvariantToCheck allocated invariant=%p", trec, invariant);
1222 SET_HDR (invariant, &stg_ATOMIC_INVARIANT_info, CCS_SYSTEM);
1223 invariant -> code = code;
1224 invariant -> last_execution = NO_TREC;
1225 invariant -> lock = 0;
1226
1227 // 2. Allocate an StgInvariantCheckQueue entry, link it to the current trec
1228
1229 q = alloc_stg_invariant_check_queue(cap, invariant);
1230 TRACE("%p : stmAddInvariantToCheck allocated q=%p", trec, q);
1231 q -> invariant = invariant;
1232 q -> my_execution = NO_TREC;
1233 q -> next_queue_entry = trec -> invariants_to_check;
1234 trec -> invariants_to_check = q;
1235
1236 TRACE("%p : stmAddInvariantToCheck done", trec);
1237 }
1238
1239 /*
1240 * Fill in the trec's list of invariants that might be violated by the
1241 * current transaction.
1242 */
1243
1244 StgInvariantCheckQueue *stmGetInvariantsToCheck(Capability *cap, StgTRecHeader *trec) {
1245 StgTRecChunk *c;
1246 TRACE("%p : stmGetInvariantsToCheck, head was %p",
1247 trec,
1248 trec -> invariants_to_check);
1249
1250 ASSERT(trec != NO_TREC);
1251 ASSERT ((trec -> state == TREC_ACTIVE) ||
1252 (trec -> state == TREC_WAITING) ||
1253 (trec -> state == TREC_CONDEMNED));
1254 ASSERT(trec -> enclosing_trec == NO_TREC);
1255
1256 lock_stm(trec);
1257 c = trec -> current_chunk;
1258 while (c != END_STM_CHUNK_LIST) {
1259 unsigned int i;
1260 for (i = 0; i < c -> next_entry_idx; i ++) {
1261 TRecEntry *e = &(c -> entries[i]);
1262 if (entry_is_update(e)) {
1263 StgTVar *s = e -> tvar;
1264 StgClosure *old = lock_tvar(trec, s);
1265
1266 // Pick up any invariants on the TVar being updated
1267 // by entry "e"
1268
1269 StgTVarWatchQueue *q;
1270 TRACE("%p : checking for invariants on %p", trec, s);
1271 for (q = s -> first_watch_queue_entry;
1272 q != END_STM_WATCH_QUEUE;
1273 q = q -> next_queue_entry) {
1274 if (watcher_is_invariant(q)) {
1275 StgBool found = FALSE;
1276 StgInvariantCheckQueue *q2;
1277 TRACE("%p : Touching invariant %p", trec, q -> closure);
1278 for (q2 = trec -> invariants_to_check;
1279 q2 != END_INVARIANT_CHECK_QUEUE;
1280 q2 = q2 -> next_queue_entry) {
1281 if (q2 -> invariant == (StgAtomicInvariant*)(q -> closure)) {
1282 TRACE("%p : Already found %p", trec, q -> closure);
1283 found = TRUE;
1284 break;
1285 }
1286 }
1287
1288 if (!found) {
1289 StgInvariantCheckQueue *q3;
1290 TRACE("%p : Not already found %p", trec, q -> closure);
1291 q3 = alloc_stg_invariant_check_queue(cap,
1292 (StgAtomicInvariant*) q -> closure);
1293 q3 -> next_queue_entry = trec -> invariants_to_check;
1294 trec -> invariants_to_check = q3;
1295 }
1296 }
1297 }
1298
1299 unlock_tvar(cap, trec, s, old, FALSE);
1300 }
1301 }
1302 c = c -> prev_chunk;
1303 }
1304
1305 unlock_stm(trec);
1306
1307 TRACE("%p : stmGetInvariantsToCheck, head now %p",
1308 trec,
1309 trec -> invariants_to_check);
1310
1311 return (trec -> invariants_to_check);
1312 }
1313
1314 /*......................................................................*/
1315
1316 StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
1317 int result;
1318 StgInt64 max_commits_at_start = max_commits;
1319 StgBool touched_invariants;
1320 StgBool use_read_phase;
1321
1322 TRACE("%p : stmCommitTransaction()", trec);
1323 ASSERT (trec != NO_TREC);
1324
1325 lock_stm(trec);
1326
1327 ASSERT (trec -> enclosing_trec == NO_TREC);
1328 ASSERT ((trec -> state == TREC_ACTIVE) ||
1329 (trec -> state == TREC_CONDEMNED));
1330
1331 // touched_invariants is true if we've written to a TVar with invariants
1332 // attached to it, or if we're trying to add a new invariant to the system.
1333
1334 touched_invariants = (trec -> invariants_to_check != END_INVARIANT_CHECK_QUEUE);
1335
1336 // If we have touched invariants then (i) lock the invariant, and (ii) add
1337 // the invariant's read set to our own. Step (i) is needed to serialize
1338 // concurrent transactions that attempt to make conflicting updates
1339 // to the invariant's trec (suppose it read from t1 and t2, and that one
1340 // concurrent transcation writes only to t1, and a second writes only to
1341 // t2). Step (ii) is needed so that both transactions will lock t1 and t2
1342 // to gain access to their wait lists (and hence be able to unhook the
1343 // invariant from both tvars).
1344
1345 if (touched_invariants) {
1346 StgInvariantCheckQueue *q = trec -> invariants_to_check;
1347 TRACE("%p : locking invariants", trec);
1348 while (q != END_INVARIANT_CHECK_QUEUE) {
1349 StgTRecHeader *inv_old_trec;
1350 StgAtomicInvariant *inv;
1351 TRACE("%p : locking invariant %p", trec, q -> invariant);
1352 inv = q -> invariant;
1353 if (!lock_inv(inv)) {
1354 TRACE("%p : failed to lock %p", trec, inv);
1355 trec -> state = TREC_CONDEMNED;
1356 break;
1357 }
1358
1359 inv_old_trec = inv -> last_execution;
1360 if (inv_old_trec != NO_TREC) {
1361 StgTRecChunk *c = inv_old_trec -> current_chunk;
1362 while (c != END_STM_CHUNK_LIST) {
1363 unsigned int i;
1364 for (i = 0; i < c -> next_entry_idx; i ++) {
1365 TRecEntry *e = &(c -> entries[i]);
1366 TRACE("%p : ensuring we lock TVars for %p", trec, e -> tvar);
1367 merge_read_into (cap, trec, e -> tvar, e -> expected_value);
1368 }
1369 c = c -> prev_chunk;
1370 }
1371 }
1372 q = q -> next_queue_entry;
1373 }
1374 TRACE("%p : finished locking invariants", trec);
1375 }
1376
1377 // Use a read-phase (i.e. don't lock TVars we've read but not updated) if
1378 // (i) the configuration lets us use a read phase, and (ii) we've not
1379 // touched or introduced any invariants.
1380 //
1381 // In principle we could extend the implementation to support a read-phase
1382 // and invariants, but it complicates the logic: the links between
1383 // invariants and TVars are managed by the TVar watch queues which are
1384 // protected by the TVar's locks.
1385
1386 use_read_phase = ((config_use_read_phase) && (!touched_invariants));
1387
1388 result = validate_and_acquire_ownership(cap, trec, (!use_read_phase), TRUE);
1389 if (result) {
1390 // We now know that all the updated locations hold their expected values.
1391 ASSERT (trec -> state == TREC_ACTIVE);
1392
1393 if (use_read_phase) {
1394 StgInt64 max_commits_at_end;
1395 StgInt64 max_concurrent_commits;
1396 TRACE("%p : doing read check", trec);
1397 result = check_read_only(trec);
1398 TRACE("%p : read-check %s", trec, result ? "succeeded" : "failed");
1399
1400 max_commits_at_end = max_commits;
1401 max_concurrent_commits = ((max_commits_at_end - max_commits_at_start) +
1402 (n_capabilities * TOKEN_BATCH_SIZE));
1403 if (((max_concurrent_commits >> 32) > 0) || shake()) {
1404 result = FALSE;
1405 }
1406 }
1407
1408 if (result) {
1409 // We now know that all of the read-only locations held their exepcted values
1410 // at the end of the call to validate_and_acquire_ownership. This forms the
1411 // linearization point of the commit.
1412
1413 // 1. If we have touched or introduced any invariants then unhook them
1414 // from the TVars they depended on last time they were executed
1415 // and hook them on the TVars that they now depend on.
1416 if (touched_invariants) {
1417 StgInvariantCheckQueue *q = trec -> invariants_to_check;
1418 while (q != END_INVARIANT_CHECK_QUEUE) {
1419 StgAtomicInvariant *inv = q -> invariant;
1420 if (inv -> last_execution != NO_TREC) {
1421 disconnect_invariant(cap, inv);
1422 }
1423
1424 TRACE("%p : hooking up new execution trec=%p", trec, q -> my_execution);
1425 connect_invariant_to_trec(cap, inv, q -> my_execution);
1426
1427 TRACE("%p : unlocking invariant %p", trec, inv);
1428 unlock_inv(inv);
1429
1430 q = q -> next_queue_entry;
1431 }
1432 }
1433
1434 // 2. Make the updates required by the transaction
1435 FOR_EACH_ENTRY(trec, e, {
1436 StgTVar *s;
1437 s = e -> tvar;
1438 if ((!use_read_phase) || (e -> new_value != e -> expected_value)) {
1439 // Either the entry is an update or we're not using a read phase:
1440 // write the value back to the TVar, unlocking it if necessary.
1441
1442 ACQ_ASSERT(tvar_is_locked(s, trec));
1443 TRACE("%p : writing %p to %p, waking waiters", trec, e -> new_value, s);
1444 unpark_waiters_on(cap,s);
1445 IF_STM_FG_LOCKS({
1446 s -> num_updates ++;
1447 });
1448 unlock_tvar(cap, trec, s, e -> new_value, TRUE);
1449 }
1450 ACQ_ASSERT(!tvar_is_locked(s, trec));
1451 });
1452 } else {
1453 revert_ownership(cap, trec, FALSE);
1454 }
1455 }
1456
1457 unlock_stm(trec);
1458
1459 free_stg_trec_header(cap, trec);
1460
1461 TRACE("%p : stmCommitTransaction()=%d", trec, result);
1462
1463 return result;
1464 }
1465
1466 /*......................................................................*/
1467
1468 StgBool stmCommitNestedTransaction(Capability *cap, StgTRecHeader *trec) {
1469 StgTRecHeader *et;
1470 int result;
1471 ASSERT (trec != NO_TREC && trec -> enclosing_trec != NO_TREC);
1472 TRACE("%p : stmCommitNestedTransaction() into %p", trec, trec -> enclosing_trec);
1473 ASSERT ((trec -> state == TREC_ACTIVE) || (trec -> state == TREC_CONDEMNED));
1474
1475 lock_stm(trec);
1476
1477 et = trec -> enclosing_trec;
1478 result = validate_and_acquire_ownership(cap, trec, (!config_use_read_phase), TRUE);
1479 if (result) {
1480 // We now know that all the updated locations hold their expected values.
1481
1482 if (config_use_read_phase) {
1483 TRACE("%p : doing read check", trec);
1484 result = check_read_only(trec);
1485 }
1486 if (result) {
1487 // We now know that all of the read-only locations held their exepcted values
1488 // at the end of the call to validate_and_acquire_ownership. This forms the
1489 // linearization point of the commit.
1490
1491 TRACE("%p : read-check succeeded", trec);
1492 FOR_EACH_ENTRY(trec, e, {
1493 // Merge each entry into the enclosing transaction record, release all
1494 // locks.
1495
1496 StgTVar *s;
1497 s = e -> tvar;
1498 if (entry_is_update(e)) {
1499 unlock_tvar(cap, trec, s, e -> expected_value, FALSE);
1500 }
1501 merge_update_into(cap, et, s, e -> expected_value, e -> new_value);
1502 ACQ_ASSERT(s -> current_value != (StgClosure *)trec);
1503 });
1504 } else {
1505 revert_ownership(cap, trec, FALSE);
1506 }
1507 }
1508
1509 unlock_stm(trec);
1510
1511 free_stg_trec_header(cap, trec);
1512
1513 TRACE("%p : stmCommitNestedTransaction()=%d", trec, result);
1514
1515 return result;
1516 }
1517
1518 /*......................................................................*/
1519
1520 StgBool stmWait(Capability *cap, StgTSO *tso, StgTRecHeader *trec) {
1521 int result;
1522 TRACE("%p : stmWait(%p)", trec, tso);
1523 ASSERT (trec != NO_TREC);
1524 ASSERT (trec -> enclosing_trec == NO_TREC);
1525 ASSERT ((trec -> state == TREC_ACTIVE) ||
1526 (trec -> state == TREC_CONDEMNED));
1527
1528 lock_stm(trec);
1529 result = validate_and_acquire_ownership(cap, trec, TRUE, TRUE);
1530 if (result) {
1531 // The transaction is valid so far so we can actually start waiting.
1532 // (Otherwise the transaction was not valid and the thread will have to
1533 // retry it).
1534
1535 // Put ourselves to sleep. We retain locks on all the TVars involved
1536 // until we are sound asleep : (a) on the wait queues, (b) BlockedOnSTM
1537 // in the TSO, (c) TREC_WAITING in the Trec.
1538 build_watch_queue_entries_for_trec(cap, tso, trec);
1539 park_tso(tso);
1540 trec -> state = TREC_WAITING;
1541
1542 // We haven't released ownership of the transaction yet. The TSO
1543 // has been put on the wait queue for the TVars it is waiting for,
1544 // but we haven't yet tidied up the TSO's stack and made it safe
1545 // to wake up the TSO. Therefore, we must wait until the TSO is
1546 // safe to wake up before we release ownership - when all is well,
1547 // the runtime will call stmWaitUnlock() below, with the same
1548 // TRec.
1549
1550 } else {
1551 unlock_stm(trec);
1552 free_stg_trec_header(cap, trec);
1553 }
1554
1555 TRACE("%p : stmWait(%p)=%d", trec, tso, result);
1556 return result;
1557 }
1558
1559
1560 void
1561 stmWaitUnlock(Capability *cap, StgTRecHeader *trec) {
1562 revert_ownership(cap, trec, TRUE);
1563 unlock_stm(trec);
1564 }
1565
1566 /*......................................................................*/
1567
1568 StgBool stmReWait(Capability *cap, StgTSO *tso) {
1569 int result;
1570 StgTRecHeader *trec = tso->trec;
1571
1572 TRACE("%p : stmReWait", trec);
1573 ASSERT (trec != NO_TREC);
1574 ASSERT (trec -> enclosing_trec == NO_TREC);
1575 ASSERT ((trec -> state == TREC_WAITING) ||
1576 (trec -> state == TREC_CONDEMNED));
1577
1578 lock_stm(trec);
1579 result = validate_and_acquire_ownership(cap, trec, TRUE, TRUE);
1580 TRACE("%p : validation %s", trec, result ? "succeeded" : "failed");
1581 if (result) {
1582 // The transaction remains valid -- do nothing because it is already on
1583 // the wait queues
1584 ASSERT (trec -> state == TREC_WAITING);
1585 park_tso(tso);
1586 revert_ownership(cap, trec, TRUE);
1587 } else {
1588 // The transcation has become invalid. We can now remove it from the wait
1589 // queues.
1590 if (trec -> state != TREC_CONDEMNED) {
1591 remove_watch_queue_entries_for_trec (cap, trec);
1592 }
1593 free_stg_trec_header(cap, trec);
1594 }
1595 unlock_stm(trec);
1596
1597 TRACE("%p : stmReWait()=%d", trec, result);
1598 return result;
1599 }
1600
1601 /*......................................................................*/
1602
1603 static StgClosure *read_current_value(StgTRecHeader *trec STG_UNUSED, StgTVar *tvar) {
1604 StgClosure *result;
1605 result = tvar -> current_value;
1606
1607 #if defined(STM_FG_LOCKS)
1608 while (GET_INFO(UNTAG_CLOSURE(result)) == &stg_TREC_HEADER_info) {
1609 TRACE("%p : read_current_value(%p) saw %p", trec, tvar, result);
1610 result = tvar -> current_value;
1611 }
1612 #endif
1613
1614 TRACE("%p : read_current_value(%p)=%p", trec, tvar, result);
1615 return result;
1616 }
1617
1618 /*......................................................................*/
1619
1620 StgClosure *stmReadTVar(Capability *cap,
1621 StgTRecHeader *trec,
1622 StgTVar *tvar) {
1623 StgTRecHeader *entry_in = NULL;
1624 StgClosure *result = NULL;
1625 TRecEntry *entry = NULL;
1626 TRACE("%p : stmReadTVar(%p)", trec, tvar);
1627 ASSERT (trec != NO_TREC);
1628 ASSERT (trec -> state == TREC_ACTIVE ||
1629 trec -> state == TREC_CONDEMNED);
1630
1631 entry = get_entry_for(trec, tvar, &entry_in);
1632
1633 if (entry != NULL) {
1634 if (entry_in == trec) {
1635 // Entry found in our trec
1636 result = entry -> new_value;
1637 } else {
1638 // Entry found in another trec
1639 TRecEntry *new_entry = get_new_entry(cap, trec);
1640 new_entry -> tvar = tvar;
1641 new_entry -> expected_value = entry -> expected_value;
1642 new_entry -> new_value = entry -> new_value;
1643 result = new_entry -> new_value;
1644 }
1645 } else {
1646 // No entry found
1647 StgClosure *current_value = read_current_value(trec, tvar);
1648 TRecEntry *new_entry = get_new_entry(cap, trec);
1649 new_entry -> tvar = tvar;
1650 new_entry -> expected_value = current_value;
1651 new_entry -> new_value = current_value;
1652 result = current_value;
1653 }
1654
1655 TRACE("%p : stmReadTVar(%p)=%p", trec, tvar, result);
1656 return result;
1657 }
1658
1659 /*......................................................................*/
1660
1661 void stmWriteTVar(Capability *cap,
1662 StgTRecHeader *trec,
1663 StgTVar *tvar,
1664 StgClosure *new_value) {
1665
1666 StgTRecHeader *entry_in = NULL;
1667 TRecEntry *entry = NULL;
1668 TRACE("%p : stmWriteTVar(%p, %p)", trec, tvar, new_value);
1669 ASSERT (trec != NO_TREC);
1670 ASSERT (trec -> state == TREC_ACTIVE ||
1671 trec -> state == TREC_CONDEMNED);
1672
1673 entry = get_entry_for(trec, tvar, &entry_in);
1674
1675 if (entry != NULL) {
1676 if (entry_in == trec) {
1677 // Entry found in our trec
1678 entry -> new_value = new_value;
1679 } else {
1680 // Entry found in another trec
1681 TRecEntry *new_entry = get_new_entry(cap, trec);
1682 new_entry -> tvar = tvar;
1683 new_entry -> expected_value = entry -> expected_value;
1684 new_entry -> new_value = new_value;
1685 }
1686 } else {
1687 // No entry found
1688 StgClosure *current_value = read_current_value(trec, tvar);
1689 TRecEntry *new_entry = get_new_entry(cap, trec);
1690 new_entry -> tvar = tvar;
1691 new_entry -> expected_value = current_value;
1692 new_entry -> new_value = new_value;
1693 }
1694
1695 TRACE("%p : stmWriteTVar done", trec);
1696 }
1697
1698 /*......................................................................*/