@@ -229,6 +229,9 @@ static void _gil_initialize(struct _gil_runtime_state *gil)
229
229
230
230
static int gil_created (struct _gil_runtime_state * gil )
231
231
{
232
+ if (gil == NULL ) {
233
+ return 0 ;
234
+ }
232
235
return (_Py_atomic_load_explicit (& gil -> locked , _Py_memory_order_acquire ) >= 0 );
233
236
}
234
237
@@ -273,10 +276,9 @@ static void recreate_gil(struct _gil_runtime_state *gil)
273
276
#endif
274
277
275
278
static void
276
- drop_gil (struct _ceval_runtime_state * ceval , struct _ceval_state * ceval2 ,
277
- PyThreadState * tstate )
279
+ drop_gil (struct _ceval_state * ceval , PyThreadState * tstate )
278
280
{
279
- struct _gil_runtime_state * gil = & ceval -> gil ;
281
+ struct _gil_runtime_state * gil = ceval -> gil ;
280
282
if (!_Py_atomic_load_relaxed (& gil -> locked )) {
281
283
Py_FatalError ("drop_gil: GIL is not locked" );
282
284
}
@@ -296,7 +298,7 @@ drop_gil(struct _ceval_runtime_state *ceval, struct _ceval_state *ceval2,
296
298
MUTEX_UNLOCK (gil -> mutex );
297
299
298
300
#ifdef FORCE_SWITCHING
299
- if (_Py_atomic_load_relaxed (& ceval2 -> gil_drop_request ) && tstate != NULL ) {
301
+ if (_Py_atomic_load_relaxed (& ceval -> gil_drop_request ) && tstate != NULL ) {
300
302
MUTEX_LOCK (gil -> switch_mutex );
301
303
/* Not switched yet => wait */
302
304
if (((PyThreadState * )_Py_atomic_load_relaxed (& gil -> last_holder )) == tstate )
@@ -358,9 +360,8 @@ take_gil(PyThreadState *tstate)
358
360
359
361
assert (is_tstate_valid (tstate ));
360
362
PyInterpreterState * interp = tstate -> interp ;
361
- struct _ceval_runtime_state * ceval = & interp -> runtime -> ceval ;
362
- struct _ceval_state * ceval2 = & interp -> ceval ;
363
- struct _gil_runtime_state * gil = & ceval -> gil ;
363
+ struct _ceval_state * ceval = & interp -> ceval ;
364
+ struct _gil_runtime_state * gil = ceval -> gil ;
364
365
365
366
/* Check that _PyEval_InitThreads() was called to create the lock */
366
367
assert (gil_created (gil ));
@@ -434,12 +435,12 @@ take_gil(PyThreadState *tstate)
434
435
in take_gil() while the main thread called
435
436
wait_for_thread_shutdown() from Py_Finalize(). */
436
437
MUTEX_UNLOCK (gil -> mutex );
437
- drop_gil (ceval , ceval2 , tstate );
438
+ drop_gil (ceval , tstate );
438
439
PyThread_exit_thread ();
439
440
}
440
441
assert (is_tstate_valid (tstate ));
441
442
442
- if (_Py_atomic_load_relaxed (& ceval2 -> gil_drop_request )) {
443
+ if (_Py_atomic_load_relaxed (& ceval -> gil_drop_request )) {
443
444
RESET_GIL_DROP_REQUEST (interp );
444
445
}
445
446
else {
@@ -448,7 +449,7 @@ take_gil(PyThreadState *tstate)
448
449
handle signals.
449
450
450
451
Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */
451
- COMPUTE_EVAL_BREAKER (interp , ceval , ceval2 );
452
+ COMPUTE_EVAL_BREAKER (interp , & _PyRuntime . ceval , ceval );
452
453
}
453
454
454
455
/* Don't access tstate if the thread must exit */
@@ -463,33 +464,47 @@ take_gil(PyThreadState *tstate)
463
464
464
465
void _PyEval_SetSwitchInterval (unsigned long microseconds )
465
466
{
466
- struct _gil_runtime_state * gil = & _PyRuntime .ceval .gil ;
467
+ /* XXX per-interpreter GIL */
468
+ PyInterpreterState * interp = _PyInterpreterState_Main ();
469
+ struct _gil_runtime_state * gil = interp -> ceval .gil ;
470
+ assert (gil != NULL );
467
471
gil -> interval = microseconds ;
468
472
}
469
473
470
474
unsigned long _PyEval_GetSwitchInterval (void )
471
475
{
472
- struct _gil_runtime_state * gil = & _PyRuntime .ceval .gil ;
476
+ /* XXX per-interpreter GIL */
477
+ PyInterpreterState * interp = _PyInterpreterState_Main ();
478
+ struct _gil_runtime_state * gil = interp -> ceval .gil ;
479
+ assert (gil != NULL );
473
480
return gil -> interval ;
474
481
}
475
482
476
483
477
484
int
478
- _PyEval_ThreadsInitialized (_PyRuntimeState * runtime )
485
+ _PyEval_ThreadsInitialized (void )
479
486
{
480
- return gil_created (& runtime -> ceval .gil );
487
+ /* XXX per-interpreter GIL */
488
+ PyInterpreterState * interp = _PyInterpreterState_Main ();
489
+ if (interp == NULL ) {
490
+ return 0 ;
491
+ }
492
+ struct _gil_runtime_state * gil = interp -> ceval .gil ;
493
+ return gil_created (gil );
481
494
}
482
495
483
496
int
484
497
PyEval_ThreadsInitialized (void )
485
498
{
486
- _PyRuntimeState * runtime = & _PyRuntime ;
487
- return _PyEval_ThreadsInitialized (runtime );
499
+ return _PyEval_ThreadsInitialized ();
488
500
}
489
501
490
502
PyStatus
491
503
_PyEval_InitGIL (PyThreadState * tstate )
492
504
{
505
+ assert (tstate -> interp -> ceval .gil == NULL );
506
+
507
+ /* XXX per-interpreter GIL */
493
508
struct _gil_runtime_state * gil = & tstate -> interp -> runtime -> ceval .gil ;
494
509
if (!_Py_IsMainInterpreter (tstate -> interp )) {
495
510
/* Currently, the GIL is shared by all interpreters,
@@ -504,16 +519,21 @@ _PyEval_InitGIL(PyThreadState *tstate)
504
519
505
520
PyThread_init_thread ();
506
521
create_gil (gil );
507
- take_gil (tstate );
508
522
assert (gil_created (gil ));
509
-
510
523
tstate -> interp -> ceval .gil = gil ;
524
+ take_gil (tstate );
511
525
return _PyStatus_OK ();
512
526
}
513
527
514
528
void
515
529
_PyEval_FiniGIL (PyInterpreterState * interp )
516
530
{
531
+ if (interp -> ceval .gil == NULL ) {
532
+ /* It was already finalized (or hasn't been initialized yet). */
533
+ return ;
534
+ }
535
+
536
+ /* XXX per-interpreter GIL */
517
537
struct _gil_runtime_state * gil = & interp -> runtime -> ceval .gil ;
518
538
if (!_Py_IsMainInterpreter (interp )) {
519
539
/* Currently, the GIL is shared by all interpreters,
@@ -560,22 +580,19 @@ PyEval_AcquireLock(void)
560
580
void
561
581
PyEval_ReleaseLock (void )
562
582
{
563
- _PyRuntimeState * runtime = & _PyRuntime ;
564
583
PyThreadState * tstate = _PyThreadState_GET ();
565
584
/* This function must succeed when the current thread state is NULL.
566
585
We therefore avoid PyThreadState_Get() which dumps a fatal error
567
586
in debug mode. */
568
- struct _ceval_runtime_state * ceval = & runtime -> ceval ;
569
- struct _ceval_state * ceval2 = & tstate -> interp -> ceval ;
570
- drop_gil (ceval , ceval2 , tstate );
587
+ struct _ceval_state * ceval = & tstate -> interp -> ceval ;
588
+ drop_gil (ceval , tstate );
571
589
}
572
590
573
591
void
574
592
_PyEval_ReleaseLock (PyThreadState * tstate )
575
593
{
576
- struct _ceval_runtime_state * ceval = & tstate -> interp -> runtime -> ceval ;
577
- struct _ceval_state * ceval2 = & tstate -> interp -> ceval ;
578
- drop_gil (ceval , ceval2 , tstate );
594
+ struct _ceval_state * ceval = & tstate -> interp -> ceval ;
595
+ drop_gil (ceval , tstate );
579
596
}
580
597
581
598
void
@@ -600,9 +617,8 @@ PyEval_ReleaseThread(PyThreadState *tstate)
600
617
if (new_tstate != tstate ) {
601
618
Py_FatalError ("wrong thread state" );
602
619
}
603
- struct _ceval_runtime_state * ceval = & runtime -> ceval ;
604
- struct _ceval_state * ceval2 = & tstate -> interp -> ceval ;
605
- drop_gil (ceval , ceval2 , tstate );
620
+ struct _ceval_state * ceval = & tstate -> interp -> ceval ;
621
+ drop_gil (ceval , tstate );
606
622
}
607
623
608
624
#ifdef HAVE_FORK
@@ -612,9 +628,9 @@ PyEval_ReleaseThread(PyThreadState *tstate)
612
628
PyStatus
613
629
_PyEval_ReInitThreads (PyThreadState * tstate )
614
630
{
615
- _PyRuntimeState * runtime = tstate -> interp -> runtime ;
631
+ assert ( tstate -> interp == _PyInterpreterState_Main ()) ;
616
632
617
- struct _gil_runtime_state * gil = & runtime -> ceval .gil ;
633
+ struct _gil_runtime_state * gil = tstate -> interp -> ceval .gil ;
618
634
if (!gil_created (gil )) {
619
635
return _PyStatus_OK ();
620
636
}
@@ -649,10 +665,9 @@ PyEval_SaveThread(void)
649
665
PyThreadState * tstate = _PyThreadState_Swap (runtime , NULL );
650
666
_Py_EnsureTstateNotNULL (tstate );
651
667
652
- struct _ceval_runtime_state * ceval = & runtime -> ceval ;
653
- struct _ceval_state * ceval2 = & tstate -> interp -> ceval ;
654
- assert (gil_created (& ceval -> gil ));
655
- drop_gil (ceval , ceval2 , tstate );
668
+ struct _ceval_state * ceval = & tstate -> interp -> ceval ;
669
+ assert (gil_created (ceval -> gil ));
670
+ drop_gil (ceval , tstate );
656
671
return tstate ;
657
672
}
658
673
@@ -911,6 +926,7 @@ Py_MakePendingCalls(void)
911
926
void
912
927
_PyEval_InitRuntimeState (struct _ceval_runtime_state * ceval )
913
928
{
929
+ /* XXX per-interpreter GIL */
914
930
_gil_initialize (& ceval -> gil );
915
931
}
916
932
@@ -969,7 +985,7 @@ _Py_HandlePending(PyThreadState *tstate)
969
985
if (_PyThreadState_Swap (runtime , NULL ) != tstate ) {
970
986
Py_FatalError ("tstate mix-up" );
971
987
}
972
- drop_gil (ceval , interp_ceval_state , tstate );
988
+ drop_gil (interp_ceval_state , tstate );
973
989
974
990
/* Other threads may run now */
975
991
0 commit comments