Skip to content

Commit cb87676

Browse files
address review, fix bug
1 parent 7240b15 commit cb87676

File tree

4 files changed

+20
-22
lines changed

4 files changed

+20
-22
lines changed

Include/internal/pycore_optimizer.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -378,8 +378,6 @@ _PyJit_TryInitializeTracing(PyThreadState *tstate, _PyInterpreterFrame *frame,
378378

379379
void _PyJit_FinalizeTracing(PyThreadState *tstate);
380380

381-
void _PyJit_Tracer_InvalidateDependency(PyThreadState *old_tstate, void *obj);
382-
383381
#ifdef __cplusplus
384382
}
385383
#endif

Objects/funcobject.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
#include "pycore_setobject.h" // _PySet_NextEntry()
1212
#include "pycore_stats.h"
1313
#include "pycore_weakref.h" // FT_CLEAR_WEAKREFS()
14-
#include "pycore_optimizer.h" // _PyJit_Tracer_InvalidateDependency
14+
#include "pycore_optimizer.h" // _Py_Executors_InvalidateDependency
1515

1616
static const char *
1717
func_event_name(PyFunction_WatchEvent event) {

Objects/listobject.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,8 @@ ensure_shared_on_resize(PyListObject *self)
7979
// We can't use _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED here because
8080
// the `CALL_LIST_APPEND` bytecode handler may lock the list without
8181
// a critical section.
82-
assert((_Py_IsOwnedByCurrentThread((PyObject *)self) && !_PyObject_GC_IS_SHARED(self)) ||
82+
assert(Py_REFCNT(self) == 1 ||
83+
(_Py_IsOwnedByCurrentThread((PyObject *)self) && !_PyObject_GC_IS_SHARED(self)) ||
8384
PyMutex_IsLocked(&_PyObject_CAST(self)->ob_mutex));
8485

8586
// Ensure that the list array is freed using QSBR if we are not the

Python/optimizer.c

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ _PyOptimizer_Optimize(
140140
_PyThreadStateImpl *_tstate = (_PyThreadStateImpl *)tstate;
141141
int chain_depth = _tstate->jit_tracer_state.initial_state.chain_depth;
142142
if (!FT_ATOMIC_LOAD_CHAR_RELAXED(_tstate->jit_executor_state.jit)) {
143-
// gh-140936: It is possible that interp->jit will become false during
143+
// gh-140936: It is possible that jit_executor_state.jit will become false during
144144
// interpreter finalization. However, the specialized JUMP_BACKWARD_JIT
145145
// instruction may still be present. In this case, we should
146146
// return immediately without optimization.
@@ -1546,7 +1546,7 @@ unlink_executor(_PyExecutorObject *executor)
15461546
return;
15471547
}
15481548
_PyExecutorLinkListNode *links = &executor->vm_data.links;
1549-
assert(executor->vm_data.valid);
1549+
assert(FT_ATOMIC_LOAD_UINT8_RELAXED(executor->vm_data.valid) == 0);
15501550
_PyExecutorObject *next = links->next;
15511551
_PyExecutorObject *prev = links->previous;
15521552
if (next != NULL) {
@@ -1657,9 +1657,7 @@ _Py_ExecutorDetach(_PyExecutorObject *executor)
16571657
if (code == NULL) {
16581658
return;
16591659
}
1660-
#ifdef Py_GIL_DISABLED
1661-
assert(_PyInterpreterState_GET()->stoptheworld.world_stopped || PyMutex_IsLocked(&((PyObject *)code)->ob_mutex));
1662-
#endif
1660+
Py_BEGIN_CRITICAL_SECTION(code);
16631661
_Py_CODEUNIT *instruction = &_PyCode_CODE(code)[executor->vm_data.index];
16641662
assert(instruction->op.code == ENTER_EXECUTOR);
16651663
int index = instruction->op.arg;
@@ -1668,33 +1666,27 @@ _Py_ExecutorDetach(_PyExecutorObject *executor)
16681666
instruction->op.arg = executor->vm_data.oparg;
16691667
executor->vm_data.code = NULL;
16701668
code->co_executors->executors[index] = NULL;
1669+
Py_END_CRITICAL_SECTION();
16711670
}
16721671

16731672
static int
16741673
executor_clear(PyObject *op)
16751674
{
16761675
_PyExecutorObject *executor = _PyExecutorObject_CAST(op);
1677-
if (!executor->vm_data.valid) {
1678-
return 0;
1679-
}
1680-
assert(executor->vm_data.valid == 1);
1676+
assert(FT_ATOMIC_LOAD_UINT8_RELAXED(executor->vm_data.valid) == 0);
16811677
unlink_executor(executor);
1682-
executor->vm_data.valid = 0;
16831678

16841679
/* It is possible for an executor to form a reference
16851680
* cycle with itself, so decref'ing a side exit could
16861681
* free the executor unless we hold a strong reference to it
16871682
*/
16881683
_PyExecutorObject *cold = _PyExecutor_GetColdExecutor();
1689-
Py_INCREF(executor);
16901684
for (uint32_t i = 0; i < executor->exit_count; i++) {
16911685
executor->exits[i].temperature = initial_unreachable_backoff_counter();
16921686
_PyExecutorObject *e = executor->exits[i].executor;
16931687
executor->exits[i].executor = cold;
1694-
Py_DECREF(e);
16951688
}
16961689
_Py_ExecutorDetach(executor);
1697-
Py_DECREF(executor);
16981690
return 0;
16991691
}
17001692

@@ -1705,8 +1697,8 @@ _Py_Executor_DependsOn(_PyExecutorObject *executor, void *obj)
17051697
_Py_BloomFilter_Add(&executor->vm_data.bloom, obj);
17061698
}
17071699

1708-
void
1709-
_PyJit_Tracer_InvalidateDependency(PyThreadState *tstate, void *obj)
1700+
static void
1701+
jit_tracer_invalidate_dependency(PyThreadState *tstate, void *obj)
17101702
{
17111703
_PyBloomFilter obj_filter;
17121704
_Py_BloomFilter_Init(&obj_filter);
@@ -1726,7 +1718,7 @@ invalidate_sub_executors(_PyThreadStateImpl *tstate, _PyExecutorObject *executor
17261718
if (!executor->vm_data.valid) {
17271719
return;
17281720
}
1729-
executor->vm_data.valid = 0;
1721+
FT_ATOMIC_STORE_UINT8(executor->vm_data.valid, 0);
17301722
for (uint32_t i = 0; i < executor->exit_count; i++) {
17311723
_PyExecutorObject *next = executor->exits[i].executor;
17321724
if (next != tstate->jit_executor_state.cold_dynamic_executor && next != tstate->jit_executor_state.cold_executor) {
@@ -1751,10 +1743,13 @@ _Py_Executors_InvalidateDependencyLockHeld(PyInterpreterState *interp, void *obj
17511743
/* Clearing an executor can deallocate others, so we need to make a list of
17521744
* executors to invalidate first */
17531745
_Py_FOR_EACH_TSTATE_UNLOCKED(interp, p) {
1754-
_PyJit_Tracer_InvalidateDependency(p, obj);
1746+
jit_tracer_invalidate_dependency(p, obj);
17551747
for (_PyExecutorObject *exec = ((_PyThreadStateImpl *)p)->jit_executor_state.executor_list_head; exec != NULL;) {
17561748
if (bloom_filter_may_contain(&exec->vm_data.bloom, &obj_filter)) {
17571749
invalidate_sub_executors((_PyThreadStateImpl *)p, exec);
1750+
if (is_invalidation) {
1751+
OPT_STAT_INC(executors_invalidated);
1752+
}
17581753
}
17591754
_PyExecutorObject *next = exec->vm_data.links.next;
17601755
exec = next;
@@ -1781,7 +1776,10 @@ _Py_Executors_InvalidateAllLockHeld(PyInterpreterState *interp, int is_invalidat
17811776
FT_ATOMIC_STORE_UINT8(((_PyThreadStateImpl *)p)->jit_tracer_state.prev_state.dependencies_still_valid, 0);
17821777
for (_PyExecutorObject *exec = ((_PyThreadStateImpl *)p)->jit_executor_state.executor_list_head; exec != NULL;) {
17831778
assert(exec->tstate == p);
1784-
exec->vm_data.valid = 0;
1779+
FT_ATOMIC_STORE_UINT8(exec->vm_data.valid, 0);
1780+
if (is_invalidation) {
1781+
OPT_STAT_INC(executors_invalidated);
1782+
}
17851783
_PyExecutorObject *next = exec->vm_data.links.next;
17861784
exec = next;
17871785
}
@@ -1816,6 +1814,7 @@ _Py_Executors_InvalidateCold(PyThreadState *tstate)
18161814

18171815
if (!exec->vm_data.warm || !exec->vm_data.valid) {
18181816
if (PyList_Append(invalidate, (PyObject *)exec) < 0) {
1817+
FT_ATOMIC_STORE_UINT8(exec->vm_data.valid, 0);
18191818
goto error;
18201819
}
18211820
}

0 commit comments

Comments
 (0)