Skip to content

Commit 283d843

Browse files
Tyler Retzlafftmonjalo
authored andcommitted
lib: use atomic thread fence recommended API
Use rte_atomic_thread_fence() instead of directly using __atomic_thread_fence() builtin GCC intrinsic or __rte_atomic_thread_fence() internal function. Signed-off-by: Tyler Retzlaff <[email protected]> Acked-by: Morten Brørup <[email protected]> Acked-by: Chengwen Feng <[email protected]> Acked-by: Thomas Monjalon <[email protected]>
1 parent 93998f3 commit 283d843

File tree

7 files changed

+14
-14
lines changed

7 files changed

+14
-14
lines changed

lib/distributor/rte_distributor.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ rte_distributor_return_pkt(struct rte_distributor *d,
187187
}
188188

189189
/* Sync with distributor to acquire retptrs */
190-
__atomic_thread_fence(rte_memory_order_acquire);
190+
rte_atomic_thread_fence(rte_memory_order_acquire);
191191
for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
192192
/* Switch off the return bit first */
193193
buf->retptr64[i] = 0;

lib/eal/common/eal_common_trace.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -526,7 +526,7 @@ __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
526526

527527
/* Add the trace point at tail */
528528
STAILQ_INSERT_TAIL(&tp_list, tp, next);
529-
__atomic_thread_fence(rte_memory_order_release);
529+
rte_atomic_thread_fence(rte_memory_order_release);
530530

531531
/* All Good !!! */
532532
return 0;

lib/eal/include/rte_mcslock.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ rte_mcslock_lock(RTE_ATOMIC(rte_mcslock_t *) *msl, rte_mcslock_t *me)
8383
* store to prev->next. Otherwise it will cause a deadlock. Need a
8484
* store-load barrier.
8585
*/
86-
__rte_atomic_thread_fence(rte_memory_order_acq_rel);
86+
rte_atomic_thread_fence(rte_memory_order_acq_rel);
8787
/* If the lock has already been acquired, it first atomically
8888
* places the node at the end of the queue and then proceeds
8989
* to spin on me->locked until the previous lock holder resets
@@ -117,7 +117,7 @@ rte_mcslock_unlock(RTE_ATOMIC(rte_mcslock_t *) *msl, RTE_ATOMIC(rte_mcslock_t *)
117117
* while-loop first. This has the potential to cause a
118118
* deadlock. Need a load barrier.
119119
*/
120-
__rte_atomic_thread_fence(rte_memory_order_acquire);
120+
rte_atomic_thread_fence(rte_memory_order_acquire);
121121
/* More nodes added to the queue by other CPUs.
122122
* Wait until the next pointer is set.
123123
*/

lib/hash/rte_cuckoo_hash.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -878,7 +878,7 @@ rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
878878
/* The store to sig_current should not
879879
* move above the store to tbl_chng_cnt.
880880
*/
881-
__atomic_thread_fence(rte_memory_order_release);
881+
rte_atomic_thread_fence(rte_memory_order_release);
882882
}
883883

884884
/* Need to swap current/alt sig to allow later
@@ -910,7 +910,7 @@ rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
910910
/* The store to sig_current should not
911911
* move above the store to tbl_chng_cnt.
912912
*/
913-
__atomic_thread_fence(rte_memory_order_release);
913+
rte_atomic_thread_fence(rte_memory_order_release);
914914
}
915915

916916
curr_bkt->sig_current[curr_slot] = sig;
@@ -1403,7 +1403,7 @@ __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
14031403
/* The loads of sig_current in search_one_bucket
14041404
* should not move below the load from tbl_chng_cnt.
14051405
*/
1406-
__atomic_thread_fence(rte_memory_order_acquire);
1406+
rte_atomic_thread_fence(rte_memory_order_acquire);
14071407
/* Re-read the table change counter to check if the
14081408
* table has changed during search. If yes, re-do
14091409
* the search.
@@ -1632,7 +1632,7 @@ __rte_hash_compact_ll(const struct rte_hash *h,
16321632
/* The store to sig_current should
16331633
* not move above the store to tbl_chng_cnt.
16341634
*/
1635-
__atomic_thread_fence(rte_memory_order_release);
1635+
rte_atomic_thread_fence(rte_memory_order_release);
16361636
}
16371637
last_bkt->sig_current[i] = NULL_SIGNATURE;
16381638
rte_atomic_store_explicit(&last_bkt->key_idx[i],
@@ -2223,7 +2223,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
22232223
/* The loads of sig_current in compare_signatures
22242224
* should not move below the load from tbl_chng_cnt.
22252225
*/
2226-
__atomic_thread_fence(rte_memory_order_acquire);
2226+
rte_atomic_thread_fence(rte_memory_order_acquire);
22272227
/* Re-read the table change counter to check if the
22282228
* table has changed during search. If yes, re-do
22292229
* the search.

lib/lpm/rte_lpm.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1116,7 +1116,7 @@ delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked,
11161116
* Prevent the free of the tbl8 group from hoisting.
11171117
*/
11181118
i_lpm->lpm.tbl24[tbl24_index].valid = 0;
1119-
__atomic_thread_fence(__ATOMIC_RELEASE);
1119+
rte_atomic_thread_fence(rte_memory_order_release);
11201120
status = tbl8_free(i_lpm, tbl8_group_start);
11211121
} else if (tbl8_recycle_index > -1) {
11221122
/* Update tbl24 entry. */
@@ -1132,7 +1132,7 @@ delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked,
11321132
*/
11331133
__atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
11341134
__ATOMIC_RELAXED);
1135-
__atomic_thread_fence(__ATOMIC_RELEASE);
1135+
rte_atomic_thread_fence(rte_memory_order_release);
11361136
status = tbl8_free(i_lpm, tbl8_group_start);
11371137
}
11381138
#undef group_idx

lib/ring/rte_ring_c11_pvt.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ __rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
6868
n = max;
6969

7070
/* Ensure the head is read before tail */
71-
__atomic_thread_fence(rte_memory_order_acquire);
71+
rte_atomic_thread_fence(rte_memory_order_acquire);
7272

7373
/* load-acquire synchronize with store-release of ht->tail
7474
* in update_tail.
@@ -145,7 +145,7 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
145145
n = max;
146146

147147
/* Ensure the head is read before tail */
148-
__atomic_thread_fence(rte_memory_order_acquire);
148+
rte_atomic_thread_fence(rte_memory_order_acquire);
149149

150150
/* this load-acquire synchronize with store-release of ht->tail
151151
* in update_tail.

lib/stack/rte_stack_lf_c11.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ __rte_stack_lf_pop_elems(struct rte_stack_lf_list *list,
110110
* elements are properly ordered with respect to the head
111111
* pointer read.
112112
*/
113-
__atomic_thread_fence(rte_memory_order_acquire);
113+
rte_atomic_thread_fence(rte_memory_order_acquire);
114114

115115
rte_prefetch0(old_head.top);
116116

0 commit comments

Comments
 (0)