22
22
typedef enum ucp_mt_type {
23
23
UCP_MT_TYPE_NONE = 0 ,
24
24
UCP_MT_TYPE_SPINLOCK ,
25
- UCP_MT_TYPE_MUTEX
25
+ UCP_MT_TYPE_MUTEX ,
26
+ UCP_MT_TYPE_WORKER_ASYNC
26
27
} ucp_mt_type_t ;
27
28
28
29
@@ -36,6 +37,13 @@ typedef struct ucp_mt_lock {
36
37
at one time. Spinlock is the default option. */
37
38
ucs_recursive_spinlock_t mt_spinlock ;
38
39
pthread_mutex_t mt_mutex ;
40
+ /* Lock for MULTI_THREAD_WORKER case, when mt-single context is used by
41
+ * a single mt-shared worker. In this case the worker progress flow is
42
+ * already protected by worker mutex, and we don't need to lock inside
43
+ * that flow. This is to protect certain API calls that can be triggered
44
+ * from the user thread without holding a worker mutex.
45
+ * Essentially this mutex is a pointer to a worker mutex */
46
+ ucs_async_context_t * mt_worker_async ;
39
47
} lock ;
40
48
} ucp_mt_lock_t ;
41
49
@@ -58,21 +66,44 @@ typedef struct ucp_mt_lock {
58
66
pthread_mutex_destroy(&((_lock_ptr)->lock.mt_mutex)); \
59
67
} \
60
68
} while (0)
61
- #define UCP_THREAD_CS_ENTER (_lock_ptr ) \
69
+
70
+ static UCS_F_ALWAYS_INLINE void ucp_mt_lock_lock (ucp_mt_lock_t * lock )
71
+ {
72
+ if (lock -> mt_type == UCP_MT_TYPE_SPINLOCK ) {
73
+ ucs_recursive_spin_lock (& lock -> lock .mt_spinlock );
74
+ } else if (lock -> mt_type == UCP_MT_TYPE_MUTEX ) {
75
+ pthread_mutex_lock (& lock -> lock .mt_mutex );
76
+ }
77
+ }
78
+
79
+ static UCS_F_ALWAYS_INLINE void ucp_mt_lock_unlock (ucp_mt_lock_t * lock )
80
+ {
81
+ if (lock -> mt_type == UCP_MT_TYPE_SPINLOCK ) {
82
+ ucs_recursive_spin_unlock (& lock -> lock .mt_spinlock );
83
+ } else if (lock -> mt_type == UCP_MT_TYPE_MUTEX ) {
84
+ pthread_mutex_unlock (& lock -> lock .mt_mutex );
85
+ }
86
+ }
87
+
88
+ #define UCP_THREAD_CS_ENTER (_lock_ptr ) ucp_mt_lock_lock(_lock_ptr)
89
+ #define UCP_THREAD_CS_EXIT (_lock_ptr ) ucp_mt_lock_unlock(_lock_ptr)
90
+
91
+ #define UCP_THREAD_CS_ASYNC_ENTER (_lock_ptr ) \
62
92
do { \
63
- if ((_lock_ptr)->mt_type == UCP_MT_TYPE_SPINLOCK ) { \
64
- ucs_recursive_spin_lock(&(( _lock_ptr)->lock.mt_spinlock) ); \
65
- } else if ((_lock_ptr)->mt_type == UCP_MT_TYPE_MUTEX) { \
66
- pthread_mutex_lock(&(( _lock_ptr)->lock.mt_mutex) ); \
93
+ if ((_lock_ptr)->mt_type == UCP_MT_TYPE_WORKER_ASYNC ) { \
94
+ UCS_ASYNC_BLOCK(( _lock_ptr)->lock.mt_worker_async ); \
95
+ } else { \
96
+ ucp_mt_lock_lock( _lock_ptr); \
67
97
} \
68
- } while (0)
69
- #define UCP_THREAD_CS_EXIT (_lock_ptr ) \
98
+ } while(0)
99
+
100
+ #define UCP_THREAD_CS_ASYNC_EXIT (_lock_ptr ) \
70
101
do { \
71
- if ((_lock_ptr)->mt_type == UCP_MT_TYPE_SPINLOCK ) { \
72
- ucs_recursive_spin_unlock(&(( _lock_ptr)->lock.mt_spinlock) ); \
73
- } else if ((_lock_ptr)->mt_type == UCP_MT_TYPE_MUTEX) { \
74
- pthread_mutex_unlock(&(( _lock_ptr)->lock.mt_mutex) ); \
102
+ if ((_lock_ptr)->mt_type == UCP_MT_TYPE_WORKER_ASYNC ) { \
103
+ UCS_ASYNC_UNBLOCK(( _lock_ptr)->lock.mt_worker_async ); \
104
+ } else { \
105
+ ucp_mt_lock_unlock( _lock_ptr); \
75
106
} \
76
- } while (0)
107
+ } while(0)
77
108
78
109
#endif
0 commit comments