Skip to content

Commit a3881cf

Browse files
committed
Use atomic locks instead of pthread locks to avoid library calls
Signed-off-by: Joseph Schuchart <schuchart@icl.utk.edu>
1 parent ade3b2f commit a3881cf

File tree

1 file changed

+9
-9
lines changed

1 file changed

+9
-9
lines changed

ompi/mpiext/continue/c/continuation.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -551,7 +551,7 @@ ompi_continue_enqueue_runnable(ompi_continuation_t *cont)
551551
opal_list_remove_item(&cont_req->cont_incomplete_list, &cont->super.super);
552552
if (using_threads) { opal_atomic_unlock(&cont_req->cont_lock); }
553553
}
554-
if (using_threads) { opal_mutex_lock(&request_cont_lock); }
554+
if (using_threads) { opal_mutex_atomic_lock(&request_cont_lock); }
555555
opal_list_append(&continuation_list, &cont->super.super);
556556
if (OPAL_UNLIKELY(!progress_callback_registered)) {
557557
/* TODO: Ideally, we want to ensure that the callback is called *after*
@@ -562,7 +562,7 @@ ompi_continue_enqueue_runnable(ompi_continuation_t *cont)
562562
opal_progress_register(&ompi_continue_progress_callback);
563563
progress_callback_registered = true;
564564
}
565-
if (using_threads) { opal_mutex_unlock(&request_cont_lock); }
565+
if (using_threads) { opal_mutex_atomic_unlock(&request_cont_lock); }
566566
}
567567
}
568568

@@ -753,7 +753,7 @@ static int request_completion_cb(ompi_request_t *request)
753753
/* release all continuations, either by checking the requests for failure or just marking
754754
* the continuation as failed if the requests are not available */
755755
int ompi_continue_global_wakeup(int status) {
756-
opal_mutex_lock(&cont_req_list_mtx);
756+
opal_mutex_atomic_lock(&cont_req_list_mtx);
757757
opal_list_item_t *item;
758758
while (NULL != (item = opal_list_remove_first(&cont_req_list))) {
759759

@@ -782,7 +782,7 @@ int ompi_continue_global_wakeup(int status) {
782782
opal_atomic_unlock(&cont_req->cont_lock);
783783
}
784784

785-
opal_mutex_unlock(&cont_req_list_mtx);
785+
opal_mutex_atomic_unlock(&cont_req_list_mtx);
786786
}
787787

788788
int ompi_continue_attach(
@@ -938,9 +938,9 @@ int ompi_continue_allocate_request(
938938
}
939939
*cont_req_ptr = &cont_req->super;
940940

941-
opal_mutex_lock(&cont_req_list_mtx);
941+
opal_mutex_atomic_lock(&cont_req_list_mtx);
942942
opal_list_append(&cont_req_list, &cont_req->cont_list_item);
943-
opal_mutex_unlock(&cont_req_list_mtx);
943+
opal_mutex_atomic_unlock(&cont_req_list_mtx);
944944

945945
return MPI_SUCCESS;
946946
}
@@ -962,15 +962,15 @@ static int ompi_continue_request_start(size_t count, ompi_request_t** cont_req_p
962962
}
963963
}
964964
if (lock_continuation_list) {
965-
opal_mutex_lock(&request_cont_lock);
965+
opal_mutex_atomic_lock(&request_cont_lock);
966966
}
967967
}
968968

969969
for (size_t i = 0; i < count; ++i) {
970970
ompi_cont_request_t *cont_req = (ompi_cont_request_t*)cont_req_ptr[i];
971971
if (cont_req->super.req_state != OMPI_REQUEST_INACTIVE) {
972972
if (lock_continuation_list) {
973-
opal_mutex_unlock(&request_cont_lock);
973+
opal_mutex_atomic_unlock(&request_cont_lock);
974974
}
975975
return OMPI_ERR_REQUEST;
976976
}
@@ -992,7 +992,7 @@ static int ompi_continue_request_start(size_t count, ompi_request_t** cont_req_p
992992
}
993993

994994
if (lock_continuation_list) {
995-
opal_mutex_unlock(&request_cont_lock);
995+
opal_mutex_atomic_unlock(&request_cont_lock);
996996
}
997997
return OMPI_SUCCESS;
998998
}

0 commit comments

Comments
 (0)