Пример #1
0
static int __clhepfl_mutex_lock(clhepfl_mutex_t *impl, clhepfl_context_t *me) {
    clhepfl_node_t *p = me->current;
    p->spin           = LOCKED;

    MEMORY_BARRIER();
    // The thread enqueues
    clhepfl_node_t *pred = xchg_64((void *)&impl->tail, (void *)p);
    if (pred == NULL)
        return 0;

    // If the previous thread was locked, we wait on its context
    PREFETCHW(pred);
    while (pred->spin == LOCKED) {
        CPU_PAUSE();
        pause_rep(REP_VAL);
        PREFETCHW(pred);
    }

    impl->head = p;
    COMPILER_BARRIER();
    // We take the context of the previous thread
    me->current = pred;

    return 0;
}
Пример #2
0
/** Return the state of a tracker channel.
 *
 * \note This function performs an acquire operation, meaning that it ensures
 * the returned state was read before any subsequent memory accesses.
 *
 * \param tracker_channel   Tracker channel to use.
 *
 * \return state of the decoder channel.
 */
static state_t tracker_channel_state_get(const tracker_channel_t *
                                         tracker_channel)
{
  state_t state = tracker_channel->state;
  COMPILER_BARRIER(); /* Prevent compiler reordering */
  return state;
}
Пример #3
0
/** Write to a NAP track channel's INIT register.
 * Sets PRN (deprecated), initial carrier phase, and initial code phase of a
 * NAP track channel. The tracking channel will start correlating with these
 * parameters at the falling edge of the next NAP internal timing strobe.
 * Also write CA code to track channel's code ram.
 *
 * \note The track channel's UPDATE register, which sets the carrier and
 *       code phase rates, must also be written to before the internal timing
 *       strobe goes low.
 *
 * \param channel       NAP track channel whose INIT register to write.
 * \param prn           CA code PRN (0-31) to track. (deprecated)
 * \param carrier_phase Initial code phase.
 * \param code_phase    Initial carrier phase.
 */
void nap_track_init(u8 channel, gnss_signal_t sid, u32 ref_timing_count,
                    float carrier_freq, float code_phase)
{
  struct nap_ch_state *s = &nap_ch_state[channel];
  memset(s, 0, sizeof(*s));

  u32 track_count = nap_timing_count() + 20000;
  float cp = propagate_code_phase(code_phase, carrier_freq,
                                  track_count - ref_timing_count);

  /* Contrive for the timing strobe to occur at or close to a
   * PRN edge (code phase = 0) */
  track_count += (NAP_FRONTEND_SAMPLE_RATE_Hz / GPS_CA_CHIPPING_RATE) * (1023.0-cp) *
                 (1.0 + carrier_freq / GPS_L1_HZ);

  nap_track_code_wr_blocking(channel, sid);
  nap_track_init_wr_blocking(channel, 0, 0, 0);

  double cp_rate = (1.0 + carrier_freq / GPS_L1_HZ) * GPS_CA_CHIPPING_RATE;
  nap_track_update(channel, carrier_freq, cp_rate, 0, 0);

  /* Schedule the timing strobe for start_sample_count. */
  track_count -= NAP_FRONTEND_SAMPLE_RATE_Hz / (2 * GPS_CA_CHIPPING_RATE);
  
  s->count_snapshot = track_count;
  s->carrier_phase = -s->carr_pinc;
  s->carr_pinc_prev = s->carr_pinc;
  s->code_pinc_prev = s->code_pinc;

  COMPILER_BARRIER();

  nap_timing_strobe(track_count);
  nap_timing_strobe_wait(100);
}
Пример #4
0
/** Return the unsigned difference between update_count and *val for a
 * tracker channel.
 *
 * \note This function allows some margin to avoid glitches in case values
 * are not read atomically from the tracking channel data.
 *
 * \param tracker_channel   Tracker channel to use.
 * \param val               Pointer to the value to be subtracted
 *                          from update_count.
 *
 * \return The unsigned difference between update_count and *val.
 */
static update_count_t update_count_diff(const tracker_channel_t *
                                        tracker_channel,
                                        const update_count_t *val)
{
  const tracker_common_data_t *common_data = &tracker_channel->common_data;
  update_count_t result = (update_count_t)(common_data->update_count - *val);
  COMPILER_BARRIER(); /* Prevent compiler reordering */
  /* Allow some margin in case values were not read atomically.
   * Treat a difference of [-10000, 0) as zero. */
  if (result > (update_count_t)(UINT32_MAX - 10000))
    return 0;
  else
    return result;
}
Пример #5
0
/** Begin an AXI DMA transfer.
 *
 * \param ddp           Pointer to the axi_dma_dir_driver_t object.
 * \param data          Data to be transferred.
 * \param data_length   Length of the data to be transferred.
 * \param callback      Callback to be executed on completion.
 *
 * \note The callback will be executed from interrupt context.
 */
static void axi_dma_dir_transfer_begin(axi_dma_dir_driver_t *ddp,
                                       const uint8_t *data,
                                       uint32_t data_length,
                                       axi_dma_callback_t callback)
{
  axi_dma_dir_t *axi_dma_dir = (axi_dma_dir_t *)ddp->axi_dma_dir;
  osalDbgAssert(axi_dma_dir != 0, "DMA dir not present");

  ddp->callback = callback;

  axi_dma_dir->ADDR_LSB = (uint32_t)data;
  COMPILER_BARRIER(); /* Make sure LENGTH field is written last */
  axi_dma_dir->LENGTH = data_length;
}
Пример #6
0
/** Update the state of a tracker channel and its associated tracker instance.
 *
 * \note This function performs a release operation, meaning that it ensures
 * all prior memory accesses have completed before updating state information.
 *
 * \param tracker_channel   Tracker channel to use.
 * \param event             Event to process.
 */
static void event(tracker_channel_t *tracker_channel, event_t event)
{
  switch (event) {
  case EVENT_ENABLE: {
    assert(tracker_channel->state == STATE_DISABLED);
    assert(tracker_channel->tracker->active == false);
    tracker_channel->tracker->active = true;
    /* Sequence point for enable is setting channel state = STATE_ENABLED */
    COMPILER_BARRIER(); /* Prevent compiler reordering */
    tracker_channel->state = STATE_ENABLED;
  }
  break;

  case EVENT_DISABLE_REQUEST: {
    assert(tracker_channel->state == STATE_ENABLED);
    tracker_channel->state = STATE_DISABLE_REQUESTED;
  }
  break;

  case EVENT_DISABLE: {
    assert(tracker_channel->state == STATE_DISABLE_REQUESTED);
    tracker_channel->state = STATE_DISABLE_WAIT;
  }
  break;

  case EVENT_DISABLE_WAIT_COMPLETE: {
    assert(tracker_channel->state == STATE_DISABLE_WAIT);
    assert(tracker_channel->tracker->active == true);
    /* Sequence point for disable is setting channel state = STATE_DISABLED
     * and/or tracker active = false (order of these two is irrelevant here) */
    COMPILER_BARRIER(); /* Prevent compiler reordering */
    tracker_channel->tracker->active = false;
    tracker_channel->state = STATE_DISABLED;
  }
  break;
  }
}
Пример #7
0
static void usart_support_init_rx(struct usart_support_s *sd)
{
  struct usart_rx_dma_state *s = &sd->rx;
  s->rd = s->rd_wraps = s->wr_wraps = 0;
  COMPILER_BARRIER();

  /* Setup RX DMA */
  dmaStreamSetMode(sd->rx.dma, sd->dmamode | STM32_DMA_CR_DIR_P2M |
                               STM32_DMA_CR_MINC | STM32_DMA_CR_TCIE |
                               STM32_DMA_CR_CIRC);
  dmaStreamSetTransactionSize(sd->rx.dma, USART_RX_BUFFER_LEN);
  dmaStreamSetPeripheral(sd->rx.dma, &sd->usart->DR);
  dmaStreamSetMemory0(sd->rx.dma, sd->rx.buff);
  chBSemObjectInit(&sd->rx.ready, TRUE);
  dmaStreamEnable(sd->rx.dma);

}
Пример #8
0
int nvmed_queue_complete(NVMED_QUEUE* nvmed_queue) {
	NVMED* nvmed;
	NVMED_IOD* iod;
	volatile struct nvme_completion *cqe;
	u16 head, phase;
	int num_proc = 0;
	nvmed = nvmed_queue->nvmed;
	
	pthread_spin_lock(&nvmed_queue->cq_lock);
	head = nvmed_queue->cq_head;
	phase = nvmed_queue->cq_phase;
	for(;;) {
		cqe = (volatile struct nvme_completion *)&nvmed_queue->cqes[head];
		if((cqe->status & 1) != nvmed_queue->cq_phase)
			break;

		if(++head == nvmed->dev_info->q_depth) {
			head = 0;
			phase = !phase;
		}
		
		iod = nvmed_queue->iod_arr + cqe->command_id;
		nvmed_complete_iod(iod);
		num_proc++;
		if(head == 0 || num_proc == COMPLETE_QUEUE_MAX_PROC) break;
	}
	if(head == nvmed_queue->cq_head && phase == nvmed_queue->cq_phase) {
		pthread_spin_unlock(&nvmed_queue->cq_lock);
		return num_proc;
	}

	COMPILER_BARRIER();
	*(volatile u32 *)nvmed_queue->cq_db = head;
	nvmed_queue->cq_head = head;
	nvmed_queue->cq_phase = phase;
	pthread_spin_unlock(&nvmed_queue->cq_lock);

	return num_proc;
}
Пример #9
0
/*
 * I/O Completion of specific I/O
 * target_id : submission id
 */
void nvmed_io_polling(NVMED_HANDLE* nvmed_handle, u16 target_id) {
	NVMED* nvmed;
	NVMED_QUEUE* nvmed_queue;
	NVMED_IOD* iod;
	volatile struct nvme_completion *cqe;
	u16 head, phase;
	nvmed_queue = HtoQ(nvmed_handle);
	nvmed = HtoD(nvmed_handle);

	pthread_spin_lock(&nvmed_queue->cq_lock);
	while(1) {
		head = nvmed_queue->cq_head;
		phase = nvmed_queue->cq_phase;
		iod = nvmed_queue->iod_arr + target_id;
		if(iod->status == IO_COMPLETE) {
			break;
		}
		cqe = (volatile struct nvme_completion *)&nvmed_queue->cqes[head];
		for (;;) {
			if((cqe->status & 1) == nvmed_queue->cq_phase)
				break;
		}

		if(++head == nvmed->dev_info->q_depth) {
			head = 0;
			phase = !phase;
		}

		iod = nvmed_queue->iod_arr + cqe->command_id;
		nvmed_complete_iod(iod);

		COMPILER_BARRIER();
		*(volatile u32 *)nvmed_queue->cq_db = head;
		nvmed_queue->cq_head = head;
		nvmed_queue->cq_phase = phase;
	}
	pthread_spin_unlock(&nvmed_queue->cq_lock);
}
Пример #10
0
/*
 * Send I/O to submission queue and ring SQ Doorbell
 */
ssize_t nvmed_io(NVMED_HANDLE* nvmed_handle, u8 opcode, 
		u64 prp1, u64 prp2, void* prp2_addr, NVMED_CACHE *__cache, 
		unsigned long start_lba, unsigned int len, int flags, NVMED_AIO_CTX* context) {
	NVMED_QUEUE* nvmed_queue;
	NVMED* nvmed;
	struct nvme_command *cmnd;
	NVMED_IOD* iod;
	u16	target_id;
	NVMED_CACHE *cache = NULL;
	int i, num_cache;

	nvmed_queue = HtoQ(nvmed_handle);
	nvmed = HtoD(nvmed_handle);

	pthread_spin_lock(&nvmed_queue->sq_lock);

	while(1) {
		target_id = nvmed_queue->iod_pos++;
		iod = nvmed_queue->iod_arr + target_id;
		if(nvmed_queue->iod_pos == nvmed->dev_info->q_depth)
			nvmed_queue->iod_pos = 0;
		if(iod->status != IO_INIT)
			break;
	}

	iod->sq_id = nvmed_queue->sq_tail;
	iod->prp_addr = prp2_addr;
	iod->prp_pa = prp2;
	iod->status = IO_INIT;
	iod->num_cache = 0;
	iod->cache = NULL;
	iod->nvmed_handle = nvmed_handle;
	iod->context = context;
	if(iod->context!=NULL) {
		iod->context->num_init_io++;
		iod->context->status = AIO_PROCESS;
	}

	if(__cache != NULL) {
		num_cache = len / PAGE_SIZE;
		cache = __cache;
		iod->cache = calloc(len / PAGE_SIZE, sizeof(NVMED_CACHE*));
		for(i=0; i<num_cache; i++) {
			iod->cache[i] = cache;
			cache = cache->cache_list.tqe_next;
		}
		iod->num_cache = num_cache;
	}

	cmnd = &nvmed_queue->sq_cmds[nvmed_queue->sq_tail];
	memset(cmnd, 0, sizeof(*cmnd));

	//remap start_lba
	start_lba += nvmed->dev_info->start_sect;

	switch(opcode) {
		case nvme_cmd_flush:
			cmnd->rw.opcode = nvme_cmd_flush;
			cmnd->rw.command_id = target_id;
			cmnd->rw.nsid = nvmed->dev_info->ns_id;
			
			break;

		case nvme_cmd_write:
		case nvme_cmd_read:
			cmnd->rw.opcode = opcode;
			cmnd->rw.command_id = target_id;
			cmnd->rw.nsid = nvmed->dev_info->ns_id;
			cmnd->rw.prp1 = prp1;
			cmnd->rw.prp2 = prp2;
			cmnd->rw.slba = start_lba >> nvmed->dev_info->lba_shift;
			cmnd->rw.length = (len >> nvmed->dev_info->lba_shift) - 1;
			cmnd->rw.control = 0;
			cmnd->rw.dsmgmt = 0;
			
			break;
		
		case nvme_cmd_dsm:
			cmnd->dsm.opcode = nvme_cmd_dsm;
			cmnd->dsm.command_id = target_id;
			cmnd->dsm.nsid = nvmed->dev_info->ns_id;
			cmnd->dsm.prp1 = prp1;
			cmnd->dsm.prp2 = 0;
			cmnd->dsm.nr = 0;
			cmnd->dsm.attributes = NVME_DSMGMT_AD;
			
			break;
	}

	if(++nvmed_queue->sq_tail == nvmed->dev_info->q_depth) 
		nvmed_queue->sq_tail = 0;

	COMPILER_BARRIER();
	*(volatile u32 *)nvmed_queue->sq_db = nvmed_queue->sq_tail;

	pthread_spin_unlock(&nvmed_queue->sq_lock);
	
	/* If Sync I/O => Polling */
	if(__FLAG_ISSET(flags, HANDLE_SYNC_IO)) {
		nvmed_io_polling(nvmed_handle, target_id);
	}

	return len;
}
Пример #11
0
/** Return the state of a tracker instance.
 *
 * \note This function performs an acquire operation, meaning that it ensures
 * the returned state was read before any subsequent memory accesses.
 *
 * \param tracker   Tracker to use.
 *
 * \return true if the tracker is active, false if inactive.
 */
static bool tracker_active(const tracker_t *tracker)
{
  bool active = tracker->active;
  COMPILER_BARRIER(); /* Prevent compiler reordering */
  return active;
}
Пример #12
0
static void __clhepfl_mutex_unlock(clhepfl_mutex_t *impl,
                                   clhepfl_context_t *me) {
    COMPILER_BARRIER();
    impl->head->spin = UNLOCKED;
}
Пример #13
0
void __ticketepfl_mutex_unlock(ticketepfl_mutex_t *impl) {
    PREFETCHW(&impl->u.u);
    COMPILER_BARRIER();
    impl->u.s.grant++;
}
Пример #14
0
void __ttasepfl_mutex_unlock(ttasepfl_mutex_t *impl) {
    COMPILER_BARRIER();
    impl->spin_lock = UNLOCKED;
}