Esempio n. 1
0
void pkt_retune2_init()
{
    reset_queue(&rx_queue);
    reset_queue(&tx_queue);

#ifndef BLADERF_NIOS_PC_SIMULATION

    /* Register RX Time Tamer ISR */
    alt_ic_isr_register(
        RX_TAMER_IRQ_INTERRUPT_CONTROLLER_ID,
        RX_TAMER_IRQ,
        retune_rx,
        NULL,
        NULL
    ) ;

    /* Register TX Time Tamer ISR */
    alt_ic_isr_register(
        TX_TAMER_IRQ_INTERRUPT_CONTROLLER_ID,
        TX_TAMER_IRQ,
        retune_tx,
        NULL,
        NULL
    ) ;
#endif
}
Esempio n. 2
0
/*
 * Allocate and initialize the queue.
 */
static struct queue_t *
alloc_queue(const struct grid_t *grid)
{
    struct queue_t *queue;
    queue = calloc(1, sizeof(struct queue_t));
    queue->e = calloc(grid->size, sizeof(struct queue_element_t));

    assert(queue->e);
    reset_queue(queue);
    return queue;
}
Esempio n. 3
0
void init_queue (job_queue_t *q, unsigned long max_size, int (*repopulate_queue)(void*), void *repopulate_queue_par) {
	q->max_size = max_size;
	q->status = QUEUE_OK;
	q->repopulate_queue = repopulate_queue;
	q->repopulate_queue_par = repopulate_queue_par;
	reset_queue(q);
	
	q->buffer = (job_queue_node_t *) malloc(sizeof(job_queue_node_t) * q->max_size);
	LOG("Trying to allocate %lu bytes for the queue (max_size = %lu)\n", sizeof(job_queue_node_t) * q->max_size, q->max_size);
	assert(q->buffer != NULL);
	COND_VAR_INIT(q->cond_var);
}
	message_dispatcher::~message_dispatcher()
	{
		scoped_lock lock(m_mutex);
		reset_queue();
		if (m_looper) {
			ALooper_removeFd(m_looper, m_readfd);
			ALooper_release(m_looper);
		}
		close(m_writefd);
		close(m_readfd);
		LOGV("%s:%d> message_dispatcher(%p) is destroyed\n", __FILE__, __LINE__, this);
	}
Esempio n. 5
0
int get_job (job_queue_t *q, job_t *j) {
	int index;
	
#ifdef NO_CACHE_COHERENCE
	__k1_rmb();
#endif

	if(q->begin == q->end && q->status == QUEUE_CLOSED)
		return 0;
	
	COND_VAR_MUTEX_LOCK(q->cond_var);
	while (q->begin == q->end) {
		switch (q->status) {
			case QUEUE_CLOSED:
				COND_VAR_MUTEX_UNLOCK(q->cond_var);
				return 0;
			case QUEUE_WAIT:
				#ifdef NO_CACHE_COHERENCE
				waiting_threads++; //see close_queue()
				COND_VAR_WAIT(q->cond_var);
				waiting_threads--;
				#else
				COND_VAR_WAIT(q->cond_var);
				#endif
				break;
			case QUEUE_OK:
				q->status = QUEUE_WAIT;
				reset_queue(q);
				COND_VAR_MUTEX_UNLOCK(q->cond_var);
				int jobs_added = q->repopulate_queue(q->repopulate_queue_par);
				COND_VAR_MUTEX_LOCK(q->cond_var);
				if (jobs_added)
					q->status = QUEUE_OK;
				else
					close_queue(q);
		}
	}

	index = q->begin++;
	COND_VAR_MUTEX_UNLOCK(q->cond_var);
	memcpy(j, &q->buffer[index].tsp_job, sizeof(job_t));
	return 1;
} 
Esempio n. 6
0
void pkt_retune2(struct pkt_buf *b)
{
    int status = -1;
    bladerf_module module;
    uint8_t flags;
    uint64_t timestamp;
    uint64_t start_time;
    uint64_t end_time;
    uint64_t duration = 0;
    uint16_t nios_profile;
    uint8_t rffe_profile;
    uint8_t port;
    uint8_t spdt;
    fastlock_profile *profile;

    flags = NIOS_PKT_RETUNE2_RESP_FLAG_SUCCESS;

    nios_pkt_retune2_unpack(b->req, &module, &timestamp,
                            &nios_profile, &rffe_profile, &port, &spdt);

    switch (module) {
        case BLADERF_MODULE_RX:
            profile = &fastlocks_rx[nios_profile];
            break;
        case BLADERF_MODULE_TX:
            profile = &fastlocks_tx[nios_profile];
            break;
        default:
            profile = NULL;
    }

    if (profile == NULL) {
        INCREMENT_ERROR_COUNT();
        status = -1;
    } else {
        /* Update the fastlock profile data */
        profile->profile_num = rffe_profile;
        profile->port = port;
        profile->spdt = spdt;
    }

    start_time = time_tamer_read(module);

    if (timestamp == NIOS_PKT_RETUNE2_NOW) {
        /* Fire off this retune operation now */
        switch (module) {
            case BLADERF_MODULE_RX:
            case BLADERF_MODULE_TX:

                /* Load the profile data into RFFE memory */
                profile_load(module, profile);

                /* Activate the fast lock profile for this retune */
                profile_activate(module, profile);

                flags |= NIOS_PKT_RETUNE2_RESP_FLAG_TSVTUNE_VALID;

                status = 0;
                break;

            default:
                INCREMENT_ERROR_COUNT();
                status = -1;
        }

    } else if (timestamp == NIOS_PKT_RETUNE2_CLEAR_QUEUE) {
        switch (module) {
            case BLADERF_MODULE_RX:
                reset_queue(&rx_queue);
                status = 0;
                break;

            case BLADERF_MODULE_TX:
                reset_queue(&tx_queue);
                status = 0;
                break;

            default:
                INCREMENT_ERROR_COUNT();
                status = -1;
        }
    } else {
        uint8_t queue_size;

        switch (module) {
            case BLADERF_MODULE_RX:
                queue_size = enqueue_retune(&rx_queue, profile, timestamp);
                profile_load_scheduled(&rx_queue, module);
                break;

            case BLADERF_MODULE_TX:
                queue_size = enqueue_retune(&tx_queue, profile, timestamp);
                profile_load_scheduled(&tx_queue, module);
                break;

            default:
                INCREMENT_ERROR_COUNT();
                queue_size = QUEUE_FULL;

        }

        if (queue_size == QUEUE_FULL) {
            status = -1;
        } else {
            status = 0;
        }
    }

    end_time = time_tamer_read(module);
    duration = end_time - start_time;

    if (status != 0) {
        flags &= ~(NIOS_PKT_RETUNE2_RESP_FLAG_SUCCESS);
    }

    nios_pkt_retune2_resp_pack(b->resp, duration, flags);
}