Skip to content

Commit

Permalink
threads_win: fix build error with VS2010 x86
Browse files Browse the repository at this point in the history
InterlockedAnd64 and InterlockedAdd64 are not available on VS2010 x86.
We already have implemented replacements for other functions, such as
InterlockedOr64. Apply the same approach to fix the errors.
A CRYPTO_RWLOCK rw_lock is added to rcu_lock_st.

Replace InterlockedOr64 and InterlockedOr with CRYPTO_atomic_load and
CRYPTO_atomic_load_int, using the existing design pattern.

libcrypto.lib(libcrypto-lib-threads_win.obj) : error LNK2019: unresolved external symbol _InterlockedAdd64 referenced in function _get_hold_current_qp
libcrypto.lib(libcrypto-lib-threads_win.obj) : error LNK2019: unresolved external symbol _InterlockedOr referenced in function _get_hold_current_qp
libcrypto.lib(libcrypto-lib-threads_win.obj) : error LNK2019: unresolved external symbol _InterlockedAnd64 referenced in function _update_qp
libcrypto.lib(libcrypto-lib-threads_win.obj) : error LNK2019: unresolved external symbol _InterlockedOr64 referenced in function _ossl_synchronize_rcu

Signed-off-by: Georgi Valkov <[email protected]>
  • Loading branch information
httpstorm committed May 3, 2024
1 parent 439fddb commit 8a2db04
Show file tree
Hide file tree
Showing 2 changed files with 57 additions and 8 deletions.
61 changes: 53 additions & 8 deletions crypto/threads_win.c
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ struct rcu_lock_st {
CRYPTO_CONDVAR *alloc_signal;
CRYPTO_MUTEX *prior_lock;
CRYPTO_CONDVAR *prior_signal;
CRYPTO_RWLOCK * rw_lock;
};

static struct rcu_qp *allocate_new_qp_group(struct rcu_lock_st *lock,
Expand Down Expand Up @@ -143,6 +144,7 @@ CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
return NULL;

new->ctx = ctx;
new->rw_lock = CRYPTO_THREAD_lock_new();
new->write_lock = ossl_crypto_mutex_new();
new->alloc_signal = ossl_crypto_condvar_new();
new->prior_signal = ossl_crypto_condvar_new();
Expand Down Expand Up @@ -170,6 +172,7 @@ CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)

void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
{
CRYPTO_THREAD_lock_free(lock->rw_lock);
OPENSSL_free(lock->qp_group);
ossl_crypto_condvar_free(&lock->alloc_signal);
ossl_crypto_condvar_free(&lock->prior_signal);
Expand All @@ -182,14 +185,17 @@ void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
static INLINE_VS struct rcu_qp *get_hold_current_qp(CRYPTO_RCU_LOCK *lock)
{
uint32_t qp_idx;
uint32_t tmp;
uint64_t tmp64;

/* get the current qp index */
for (;;) {
qp_idx = InterlockedOr(&lock->reader_idx, 0);
InterlockedAdd64(&lock->qp_group[qp_idx].users, VAL_READER);
if (qp_idx == InterlockedOr(&lock->reader_idx, 0))
CRYPTO_atomic_load_int(&lock->reader_idx, (int *)&qp_idx, lock->rw_lock);
CRYPTO_atomic_add64(&lock->qp_group[qp_idx].users, VAL_READER, &tmp64, lock->rw_lock);
CRYPTO_atomic_load_int(&lock->reader_idx, (int *)&tmp, lock->rw_lock);
if (qp_idx == tmp)
break;
InterlockedAdd64(&lock->qp_group[qp_idx].users, -VAL_READER);
CRYPTO_atomic_add64(&lock->qp_group[qp_idx].users, -VAL_READER, &tmp64, lock->rw_lock);
}

return &lock->qp_group[qp_idx];
Expand Down Expand Up @@ -264,7 +270,7 @@ void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
if (data->thread_qps[i].lock == lock) {
data->thread_qps[i].depth--;
if (data->thread_qps[i].depth == 0) {
ret = InterlockedAdd64(&data->thread_qps[i].qp->users, -VAL_READER);
CRYPTO_atomic_add64(&data->thread_qps[i].qp->users, -VAL_READER, (uint64_t *)&ret, lock->rw_lock);
OPENSSL_assert(ret >= 0);
data->thread_qps[i].qp = NULL;
data->thread_qps[i].lock = NULL;
Expand All @@ -279,6 +285,7 @@ static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock)
uint64_t new_id;
uint32_t current_idx;
uint32_t tmp;
uint64_t tmp64;

ossl_crypto_mutex_lock(lock->alloc_lock);
/*
Expand All @@ -302,8 +309,8 @@ static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock)
lock->id_ctr++;

new_id = VAL_ID(new_id);
InterlockedAnd64(&lock->qp_group[current_idx].users, ID_MASK);
InterlockedAdd64(&lock->qp_group[current_idx].users, new_id);
CRYPTO_atomic_and(&lock->qp_group[current_idx].users, ID_MASK, &tmp64, lock->rw_lock);
CRYPTO_atomic_add64(&lock->qp_group[current_idx].users, new_id, &tmp64, lock->rw_lock);

/* update the reader index to be the prior qp */
tmp = lock->current_alloc_idx;
Expand Down Expand Up @@ -338,7 +345,7 @@ void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)

/* wait for the reader count to reach zero */
do {
count = InterlockedOr64(&qp->users, 0);
CRYPTO_atomic_load(&qp->users, &count, lock->rw_lock);
} while (READER_COUNT(count) != 0);

/* retire in order */
Expand Down Expand Up @@ -569,6 +576,44 @@ int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
return 1;
}

int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
CRYPTO_RWLOCK *lock)
{
#if (defined(NO_INTERLOCKEDOR64))
if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
return 0;
*val += op;
*ret = *val;

if (!CRYPTO_THREAD_unlock(lock))
return 0;

return 1;
#else
*ret = (uint64_t)InterlockedAdd64((LONG64 volatile *)val, (LONG64)op);
return 1;
#endif
}

int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
CRYPTO_RWLOCK *lock)
{
#if (defined(NO_INTERLOCKEDOR64))
if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
return 0;
*val &= op;
*ret = *val;

if (!CRYPTO_THREAD_unlock(lock))
return 0;

return 1;
#else
*ret = (uint64_t)InterlockedAnd64((LONG64 volatile *)val, (LONG64)op);
return 1;
#endif
}

int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
CRYPTO_RWLOCK *lock)
{
Expand Down
4 changes: 4 additions & 0 deletions include/openssl/crypto.h.in
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,10 @@ int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock);
void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock);

int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock);
int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
CRYPTO_RWLOCK *lock);
int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
CRYPTO_RWLOCK *lock);
int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
CRYPTO_RWLOCK *lock);
int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock);
Expand Down

0 comments on commit 8a2db04

Please sign in to comment.