/* * Wait for all schedulers to start */ static inline void _lthread_schedulers_sync_start(void) { rte_atomic16_inc(&active_schedulers); /* wait for lthread schedulers * Note we use sched_yield() rather than pthread_yield() to allow * for the possibility of a pthread wrapper on lthread_yield(), * something that is not possible unless the scheduler is running. */ while (rte_atomic16_read(&active_schedulers) < rte_atomic16_read(&num_schedulers)) sched_yield(); }
/** * shutdown all schedulers */ void lthread_scheduler_shutdown_all(void) { uint64_t i; /* * give time for all schedulers to have started * Note we use sched_yield() rather than pthread_yield() to allow * for the possibility of a pthread wrapper on lthread_yield(), * something that is not possible unless the scheduler is running. */ while (rte_atomic16_read(&active_schedulers) < rte_atomic16_read(&num_schedulers)) sched_yield(); for (i = 0; i < LTHREAD_MAX_LCORES; i++) { if (schedcore[i] != NULL) schedcore[i]->run_flag = 0; } }
int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) { struct qat_qp *qp = (struct qat_qp *)dev->data->queue_pairs[queue_pair_id]; PMD_INIT_FUNC_TRACE(); if (qp == NULL) { PMD_DRV_LOG(DEBUG, "qp already freed"); return 0; } /* Don't free memory if there are still responses to be processed */ if (rte_atomic16_read(&(qp->inflights16)) == 0) { qat_queue_delete(&(qp->tx_q)); qat_queue_delete(&(qp->rx_q)); } else { return -EAGAIN; } adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr); rte_free(qp); dev->data->queue_pairs[queue_pair_id] = NULL; return 0; }
/* * Return the number of schedulers active */ int lthread_active_schedulers(void) { return (int)rte_atomic16_read(&active_schedulers); }
/* * Set the number of schedulers in the system */ int lthread_num_schedulers_set(int num) { rte_atomic16_set(&num_schedulers, num); return (int)rte_atomic16_read(&num_schedulers); }