@@ -140,7 +140,7 @@ _PyOptimizer_Optimize(
140140 _PyThreadStateImpl * _tstate = (_PyThreadStateImpl * )tstate ;
141141 int chain_depth = _tstate -> jit_tracer_state .initial_state .chain_depth ;
142142 if (!FT_ATOMIC_LOAD_CHAR_RELAXED (_tstate -> jit_executor_state .jit )) {
143- // gh-140936: It is possible that interp-> jit will become false during
143+ // gh-140936: It is possible that jit_executor_state. jit will become false during
144144 // interpreter finalization. However, the specialized JUMP_BACKWARD_JIT
145145 // instruction may still be present. In this case, we should
146146 // return immediately without optimization.
@@ -1546,7 +1546,7 @@ unlink_executor(_PyExecutorObject *executor)
15461546 return ;
15471547 }
15481548 _PyExecutorLinkListNode * links = & executor -> vm_data .links ;
1549- assert (executor -> vm_data .valid );
1549+ assert (FT_ATOMIC_LOAD_UINT8_RELAXED ( executor -> vm_data .valid ) == 0 );
15501550 _PyExecutorObject * next = links -> next ;
15511551 _PyExecutorObject * prev = links -> previous ;
15521552 if (next != NULL ) {
@@ -1657,9 +1657,7 @@ _Py_ExecutorDetach(_PyExecutorObject *executor)
16571657 if (code == NULL ) {
16581658 return ;
16591659 }
1660- #ifdef Py_GIL_DISABLED
1661- assert (_PyInterpreterState_GET ()-> stoptheworld .world_stopped || PyMutex_IsLocked (& ((PyObject * )code )-> ob_mutex ));
1662- #endif
1660+ Py_BEGIN_CRITICAL_SECTION (code );
16631661 _Py_CODEUNIT * instruction = & _PyCode_CODE (code )[executor -> vm_data .index ];
16641662 assert (instruction -> op .code == ENTER_EXECUTOR );
16651663 int index = instruction -> op .arg ;
@@ -1668,33 +1666,27 @@ _Py_ExecutorDetach(_PyExecutorObject *executor)
16681666 instruction -> op .arg = executor -> vm_data .oparg ;
16691667 executor -> vm_data .code = NULL ;
16701668 code -> co_executors -> executors [index ] = NULL ;
1669+ Py_END_CRITICAL_SECTION ();
16711670}
16721671
16731672static int
16741673executor_clear (PyObject * op )
16751674{
16761675 _PyExecutorObject * executor = _PyExecutorObject_CAST (op );
1677- if (!executor -> vm_data .valid ) {
1678- return 0 ;
1679- }
1680- assert (executor -> vm_data .valid == 1 );
1676+ assert (FT_ATOMIC_LOAD_UINT8_RELAXED (executor -> vm_data .valid ) == 0 );
16811677 unlink_executor (executor );
1682- executor -> vm_data .valid = 0 ;
16831678
16841679 /* It is possible for an executor to form a reference
16851680 * cycle with itself, so decref'ing a side exit could
16861681 * free the executor unless we hold a strong reference to it
16871682 */
16881683 _PyExecutorObject * cold = _PyExecutor_GetColdExecutor ();
1689- Py_INCREF (executor );
16901684 for (uint32_t i = 0 ; i < executor -> exit_count ; i ++ ) {
16911685 executor -> exits [i ].temperature = initial_unreachable_backoff_counter ();
16921686 _PyExecutorObject * e = executor -> exits [i ].executor ;
16931687 executor -> exits [i ].executor = cold ;
1694- Py_DECREF (e );
16951688 }
16961689 _Py_ExecutorDetach (executor );
1697- Py_DECREF (executor );
16981690 return 0 ;
16991691}
17001692
@@ -1705,8 +1697,8 @@ _Py_Executor_DependsOn(_PyExecutorObject *executor, void *obj)
17051697 _Py_BloomFilter_Add (& executor -> vm_data .bloom , obj );
17061698}
17071699
1708- void
1709- _PyJit_Tracer_InvalidateDependency (PyThreadState * tstate , void * obj )
1700+ static void
1701+ jit_tracer_invalidate_dependency (PyThreadState * tstate , void * obj )
17101702{
17111703 _PyBloomFilter obj_filter ;
17121704 _Py_BloomFilter_Init (& obj_filter );
@@ -1726,7 +1718,7 @@ invalidate_sub_executors(_PyThreadStateImpl *tstate, _PyExecutorObject *executor
17261718 if (!executor -> vm_data .valid ) {
17271719 return ;
17281720 }
1729- executor -> vm_data .valid = 0 ;
1721+ FT_ATOMIC_STORE_UINT8 ( executor -> vm_data .valid , 0 ) ;
17301722 for (uint32_t i = 0 ; i < executor -> exit_count ; i ++ ) {
17311723 _PyExecutorObject * next = executor -> exits [i ].executor ;
17321724 if (next != tstate -> jit_executor_state .cold_dynamic_executor && next != tstate -> jit_executor_state .cold_executor ) {
@@ -1751,10 +1743,13 @@ _Py_Executors_InvalidateDependencyLockHeld(PyInterpreterState *interp, void *obj
17511743 /* Clearing an executor can deallocate others, so we need to make a list of
17521744 * executors to invalidate first */
17531745 _Py_FOR_EACH_TSTATE_UNLOCKED (interp , p ) {
1754- _PyJit_Tracer_InvalidateDependency (p , obj );
1746+ jit_tracer_invalidate_dependency (p , obj );
17551747 for (_PyExecutorObject * exec = ((_PyThreadStateImpl * )p )-> jit_executor_state .executor_list_head ; exec != NULL ;) {
17561748 if (bloom_filter_may_contain (& exec -> vm_data .bloom , & obj_filter )) {
17571749 invalidate_sub_executors ((_PyThreadStateImpl * )p , exec );
1750+ if (is_invalidation ) {
1751+ OPT_STAT_INC (executors_invalidated );
1752+ }
17581753 }
17591754 _PyExecutorObject * next = exec -> vm_data .links .next ;
17601755 exec = next ;
@@ -1781,7 +1776,10 @@ _Py_Executors_InvalidateAllLockHeld(PyInterpreterState *interp, int is_invalidat
17811776 FT_ATOMIC_STORE_UINT8 (((_PyThreadStateImpl * )p )-> jit_tracer_state .prev_state .dependencies_still_valid , 0 );
17821777 for (_PyExecutorObject * exec = ((_PyThreadStateImpl * )p )-> jit_executor_state .executor_list_head ; exec != NULL ;) {
17831778 assert (exec -> tstate == p );
1784- exec -> vm_data .valid = 0 ;
1779+ FT_ATOMIC_STORE_UINT8 (exec -> vm_data .valid , 0 );
1780+ if (is_invalidation ) {
1781+ OPT_STAT_INC (executors_invalidated );
1782+ }
17851783 _PyExecutorObject * next = exec -> vm_data .links .next ;
17861784 exec = next ;
17871785 }
@@ -1816,6 +1814,7 @@ _Py_Executors_InvalidateCold(PyThreadState *tstate)
18161814
18171815 if (!exec -> vm_data .warm || !exec -> vm_data .valid ) {
18181816 if (PyList_Append (invalidate , (PyObject * )exec ) < 0 ) {
1817+ FT_ATOMIC_STORE_UINT8 (exec -> vm_data .valid , 0 );
18191818 goto error ;
18201819 }
18211820 }
0 commit comments