Пример #1
0
void bbData(uint8_t d)
{
	memory_barrier();
	gpio_set(PIN_DC);
	gpio_clear(PIN_CE0);
	uint8_t bit = 0;
	// fast bit-bang data write on MOSI
	for(bit = 0x80; bit; bit >>= 1) 
	{
		if(d & bit) 
		{
			//digitalWrite(_mosi, HIGH); 
			// *mosiport |=  mosipinmask;
			gpio_set(PIN_MOSI) ;
		} 
		else 
		{
			//digitalWrite(_mosi, LOW); 
			// *mosiport &= ~mosipinmask;
			gpio_clear(PIN_MOSI) ;
		}
		//digitalWrite(_sclk, HIGH);
		// *clkport |=  clkpinmask;
		gpio_set(PIN_SCLK);
		//digitalWrite(_sclk, LOW);
		//*clkport &= ~clkpinmask;
		gpio_clear(PIN_SCLK);
	}
	gpio_set(PIN_CE0);
	memory_barrier();

}
Пример #2
0
    void enqueue(T const & t)
    {
        node * n = alloc_node(t);

        for (;;)
        {
            atomic_node_ptr tail (tail_);
            memory_barrier();
            atomic_node_ptr next (tail->next);
            memory_barrier();

            if (likely(tail == tail_))
            {
                if (next.get_ptr() == 0)
                {
                    if ( tail->next.CAS(next, n) )
                    {
                        tail_.CAS(tail, n);
                        return;
                    }
                }
                else
                    tail_.CAS(tail, next.get_ptr());
            }
        }
    }
Пример #3
0
/**
 * Unregister MMIO region from a cell.
 * @param cell		Cell the region belongs to.
 * @param start		Region start address as it was passed to
 * 			mmio_region_register().
 *
 * @see mmio_region_register
 */
void mmio_region_unregister(struct cell *cell, unsigned long start)
{
	int index;

	spin_lock(&cell->mmio_region_lock);

	index = find_region(cell, start, 1, NULL, NULL);
	if (index >= 0) {
		/*
		 * Advance the generation to odd value, indicating that
		 * modifications are ongoing. Commit this change via a barrier
		 * so that other CPUs will see it before we start.
		 */
		cell->mmio_generation++;
		memory_barrier();

		for (/* empty */; index < cell->num_mmio_regions; index++)
			copy_region(cell, index + 1, index);

		cell->num_mmio_regions--;

		/*
		 * Ensure all regions and their number are committed before
		 * advancing the generation.
		 */
		memory_barrier();
		cell->mmio_generation++;
	}
	spin_unlock(&cell->mmio_region_lock);
}
Пример #4
0
void shm_update(struct gps_context_t *context, struct gps_data_t *gpsdata)
/* export an update to all listeners */
{
    if (context->shmexport != NULL)
    {
	static int tick;
	volatile struct shmexport_t *shared = (struct shmexport_t *)context->shmexport;

	++tick;
	/*
	 * Following block of instructions must not be reordered, otherwise
	 * havoc will ensue.
	 *
	 * This is a simple optimistic-concurrency technique.  We write
	 * the second bookend first, then the data, then the first bookend.
	 * Reader copies what it sees in normal order; that way, if we
	 * start to write the segment during the read, the second bookend will
	 * get clobbered first and the data can be detected as bad.
	 */
	shared->bookend2 = tick;
	memory_barrier();
	memcpy((void *)(context->shmexport + offsetof(struct shmexport_t, gpsdata)),
	       (void *)gpsdata,
	       sizeof(struct gps_data_t));
	memory_barrier();
#ifndef USE_QT
	shared->gpsdata.gps_fd = SHM_PSEUDO_FD;
#else
	shared->gpsdata.gps_fd = (void *)(intptr_t)SHM_PSEUDO_FD;
#endif /* USE_QT */
	memory_barrier();
	shared->bookend1 = tick;
    }
}
Пример #5
0
    bool dequeue (T * ret)
    {
        for (;;)
        {
            atomic_node_ptr head (head_);
            memory_barrier();

            atomic_node_ptr tail(tail_);
            node * next = head->next.get_ptr();
            memory_barrier();

            if (likely(head == head_))
            {
                if (head.get_ptr() == tail.get_ptr())
                {
                    if (next == 0)
                        return false;

                    tail_.CAS(tail, next);
                }
                else
                {
                    *ret = next->data;
                    if (head_.CAS(head, next))
                    {
                        dealloc_node(head.get_ptr());

                        return true;
                    }
                }
            }
        }
    }
Пример #6
0
void Carrier::clear ()
{
    memory_barrier();
    m_support.zero();
    zero_blocks(m_reps, 1 + item_dim());
    m_rep_count = 0;
    m_item_count = 0;
    memory_barrier();
}
Пример #7
0
int gps_shm_read(struct gps_data_t *gpsdata)
/* read an update from the shared-memory segment */
{
    if (gpsdata->privdata == NULL)
	return -1;
    else
    {
	int before, after;
	void *private_save = gpsdata->privdata;
	volatile struct shmexport_t *shared = (struct shmexport_t *)PRIVATE(gpsdata)->shmseg;
	struct gps_data_t noclobber;

	/*
	 * Following block of instructions must not be reordered,
	 * otherwise havoc will ensue.  The memory_barrier() call
	 * should prevent reordering of the data accesses.
	 *
	 * This is a simple optimistic-concurrency technique.  We wrote
	 * the second bookend first, then the data, then the first bookend.
	 * Reader copies what it sees in normal order; that way, if we
	 * start to write the segment during the read, the second bookend will
	 * get clobbered first and the data can be detected as bad.
	 */
	before = shared->bookend1;
	memory_barrier();
	(void)memcpy((void *)&noclobber,
		     (void *)&shared->gpsdata,
		     sizeof(struct gps_data_t));
	memory_barrier();
	after = shared->bookend2;

	if (before != after)
	    return 0;
	else {
	    (void)memcpy((void *)gpsdata,
			 (void *)&noclobber,
			 sizeof(struct gps_data_t));
	    gpsdata->privdata = private_save;
#ifndef USE_QT
	    gpsdata->gps_fd = SHM_PSEUDO_FD;
#else
	    gpsdata->gps_fd = (void *)(intptr_t)SHM_PSEUDO_FD;
#endif /* USE_QT */
	    PRIVATE(gpsdata)->tick = after;
	    if ((gpsdata->set & REPORT_IS)!=0) {
		if (gpsdata->fix.mode >= 2)
		    gpsdata->status = STATUS_FIX;
		else
		    gpsdata->status = STATUS_NO_FIX;
		gpsdata->set = STATUS_SET;
	    }
	    return (int)sizeof(struct gps_data_t);
	}
    }
}
Пример #8
0
void ntp_write(volatile struct shmTime *shmseg,
	       struct timedelta_t *td, int precision, int leap_notify)
/* put a received fix time into shared memory for NTP */
{
    struct tm tm;

    /*
     * insist that leap seconds only happen in june and december
     * GPS emits leap pending for 3 months prior to insertion
     * NTP expects leap pending for only 1 month prior to insertion
     * Per http://bugs.ntp.org/1090
     *
     * ITU-R TF.460-6, Section 2.1, says laep seconds can be primarily
     * in Jun/Dec but may be in March or September
     */
    (void)gmtime_r( &(td->real.tv_sec), &tm);
    if ( 5 != tm.tm_mon && 11 != tm.tm_mon ) {
        /* Not june, not December, no way */
        leap_notify = LEAP_NOWARNING;
    }

    /* we use the shmTime mode 1 protocol
     *
     * ntpd does this:
     *
     * reads valid.
     * IFF valid is 1
     *    reads count
     *    reads values
     *    reads count
     *    IFF count unchanged
     *        use values
     *    clear valid
     *
     */

    shmseg->valid = 0;
    shmseg->count++;
    /* We need a memory barrier here to prevent write reordering by
     * the compiler or CPU cache */
    memory_barrier();
    shmseg->clockTimeStampSec = (time_t)td->real.tv_sec;
    shmseg->clockTimeStampUSec = (int)(td->real.tv_nsec/1000);
    shmseg->clockTimeStampNSec = (unsigned)td->real.tv_nsec;
    shmseg->receiveTimeStampSec = (time_t)td->clock.tv_sec;
    shmseg->receiveTimeStampUSec = (int)(td->clock.tv_nsec/1000);
    shmseg->receiveTimeStampNSec = (unsigned)td->clock.tv_nsec;
    shmseg->leap = leap_notify;
    shmseg->precision = precision;
    memory_barrier();
    shmseg->count++;
    shmseg->valid = 1;
}
Пример #9
0
/**
 * Register a MMIO region access handler for a cell.
 * @param cell		Cell than can access the region.
 * @param start		Region start address in cell address space.
 * @param size		Region size.
 * @param handler	Access handler.
 * @param handler_arg	Opaque argument to pass to handler.
 *
 * @see mmio_region_unregister
 */
void mmio_region_register(struct cell *cell, unsigned long start,
			  unsigned long size, mmio_handler handler,
			  void *handler_arg)
{
	unsigned int index, n;

	spin_lock(&cell->mmio_region_lock);

	if (cell->num_mmio_regions >= cell->max_mmio_regions) {
		spin_unlock(&cell->mmio_region_lock);

		printk("WARNING: Overflow during MMIO region registration!\n");
		return;
	}

	for (index = 0; index < cell->num_mmio_regions; index++)
		if (cell->mmio_locations[index].start > start)
			break;

	/*
	 * Set and commit a dummy region at the end of the list so that
	 * we can safely grow it.
	 */
	cell->mmio_locations[cell->num_mmio_regions].start = -1;
	cell->mmio_locations[cell->num_mmio_regions].size = 0;
	memory_barrier();

	/*
	 * Extend region list by one so that we can start moving entries.
	 * Commit this change via a barrier so that the current last element
	 * will remain visible when moving it up.
	 */
	cell->num_mmio_regions++;
	memory_barrier();

	for (n = cell->num_mmio_regions - 1; n > index; n--)
		copy_region(cell, n - 1, n);

	/* Invalidate the new region entry first (see also copy_region()). */
	cell->mmio_locations[index].size = 0;
	memory_barrier();

	cell->mmio_locations[index].start = start;
	cell->mmio_handlers[index].handler = handler;
	cell->mmio_handlers[index].arg = handler_arg;
	/* Ensure all fields are committed before activating the region. */
	memory_barrier();

	cell->mmio_locations[index].size = size;

	spin_unlock(&cell->mmio_region_lock);
}
Пример #10
0
void setupBitBanger()
{
	if (g_HWSPI)
	{
		return;
	}
	memory_barrier();
	int i;
	for (i = 7; i <= 11; i++) gpio_configure(i, Output);
	// GPIO 10 (MOSI), GPIO 11 (SCLK), GPIO 9(MISO), GPIO 8(CE0), GPIO 7(CE1)
	memory_barrier();
	g_HWSPI=0;
}
Пример #11
0
uint32_t UARTD_DisableTxChannels( UartDma *pUartd, UartChannel *pTxCh)
{
	assert(pTxCh);
	
	/* Enables the USART to transfer data. */
	UART_SetTransmitterEnabled ( pUartd->pUartHw , DISABLE);
	
	XDMAD_StopTransfer(pUartd->pXdmad, pTxCh->ChNum);
	
	XDMAD_SetCallback(pUartd->pXdmad, pTxCh->ChNum, NULL, NULL);
	
	 /* Free allocated DMA channel for USART TX. */
	if(XDMAD_FreeChannel( pUartd->pXdmad, pTxCh->ChNum) != XDMAD_OK) {
	  return USARTD_ERROR;
	}
		
	if (pTxCh->dmaProgrammingMode == XDMAD_LLI) {
		free(pTxCh->pLLIview);
		pTxCh->pLLIview = NULL;
	}
	
	pTxCh->sempaphore = 1;
	memory_barrier();
	return 0;
}
Пример #12
0
void setupSPI()
{
	memory_barrier(); // Dont know if other peripherals have been accessed
	spi_init(8000000); // init SPI at 32MHz div8
	HW.SPI0.CS.B.LEN=1 ; // Setup LoSSI mode
	//HW.SPI0.CS.B.LEN_LONG=1;
	//HW.SPI0.CS.B.CSPOL0=0; // CS is active-low
	//HW.SPI0.CS.B.CSPOL=0; // CS is active-low
	// ILI9341 likes CPHA=1, CPOL=0
	//HW.SPI0.CS.B.CPHA=0;	
	//HW.SPI0.CS.B.CPOL=0;
	//HW.SPI0.CS.B.ADCS=0;
	HW.SPI0.CS.B.CLEAR=0b11;
	memory_barrier();
	g_HWSPI = 1;
}
Пример #13
0
uint8_t readCommand8(uint8_t c)
{
	if (!g_HWSPI)
	{
		return 0;
	}
	writeCommand(0xd9);
	writeData(0x10);
	writeCommand(c);
	memory_barrier();
	HW.SPI0.CS.B.REN=1;		// BCM2835 ref: Set REN to read from tristate
	uint8_t r = spi_read();
	HW.SPI0.CS.B.REN=0;		// Set 0 to be safe
	memory_barrier();
	return r;
}
Пример #14
0
static void cpu_init(struct per_cpu *cpu_data)
{
	int err = -EINVAL;

	printk(" CPU %d... ", cpu_data->cpu_id);

	if (!cpu_id_valid(cpu_data->cpu_id))
		goto failed;

	cpu_data->cell = &root_cell;

	err = arch_cpu_init(cpu_data);
	if (err)
		goto failed;

	printk("OK\n");

	/*
	 * If this CPU is last, make sure everything was committed before we
	 * signal the other CPUs spinning on initialized_cpus that they can
	 * continue.
	 */
	memory_barrier();
	initialized_cpus++;
	return;

failed:
	printk("FAILED\n");
	if (!error)
		error = err;
}
Пример #15
0
void arch_resume_cpu(unsigned int cpu_id)
{
	/* make any state changes visible before releasing the CPU */
	memory_barrier();

	per_cpu(cpu_id)->stop_cpu = false;
}
Пример #16
0
static void copy_region(struct cell *cell, unsigned int src, unsigned dst)
{
	/*
	 * Invalidate destination region by shrinking it to size 0. This has to
	 * be made visible to other CPUs via a memory barrier before
	 * manipulating other destination fields.
	 */
	cell->mmio_locations[dst].size = 0;
	memory_barrier();

	cell->mmio_locations[dst].start = cell->mmio_locations[src].start;
	cell->mmio_handlers[dst] = cell->mmio_handlers[src];
	/* Ensure all fields are committed before activating the region. */
	memory_barrier();

	cell->mmio_locations[dst].size = cell->mmio_locations[src].size;
}
Пример #17
0
/**
 * \brief Send instrucion over SPI or QSPI
 *
 * \param qspi  Pointer to an Qspi instance.
 *
 * \return Returns 1 if At least one instruction end has been detected since the last read of QSPI_SR.; otherwise
 * returns 0.
 */
extern void QSPI_SendFrame( Qspi* qspi, qspiFrame *pFrame, AccesType  ReadWrite)
{  
    uint32_t regIFR, regICR, DummyRead;
    uint32_t *pQspiBuffer = (uint32_t *)QSPIMEM_ADDR;

    assert((qspi->QSPI_MR) & QSPI_MR_SMM);

    regIFR = (pFrame->spiMode | QSPI_IFR_INSTEN | (pFrame->OptionLen << QSPI_IFR_OPTL_Pos) | (pFrame->DummyCycles << QSPI_IFR_NBDUM_Pos)  | (pFrame->ContinuousRead << 14)) ;
    // Write the instruction to reg
    regICR = ( QSPI_ICR_OPT(pFrame->Option) | QSPI_ICR_INST(pFrame->Instruction));

    if(pFrame->OptionEn)
    {
        regIFR|=QSPI_IFR_OPTEN;
    }

    /* Instruction frame without Data, only Instruction**/  
    if(!(pFrame->DataSize))               
    {
        if(pFrame->InstAddrFlag)                            // If contain Address, put in IAr reg        
        {
            qspi->QSPI_IAR = pFrame->InstAddr;
            regIFR |= QSPI_IFR_ADDREN;
        }    
        qspi->QSPI_ICR = regICR;                            //  update Instruction code reg
        qspi->QSPI_IFR = regIFR;                            // Instruction Frame reg 
    }
    else  /* Instruction frame with Data and Instruction**/
    {    
        regIFR |= QSPI_IFR_DATAEN;    
        if(ReadWrite)
        {
            regIFR |= QSPI_IFR_TFRTYP_TRSFR_WRITE;      
            qspi->QSPI_ICR = regICR;
            qspi->QSPI_IFR = regIFR ;
            DummyRead =  qspi->QSPI_IFR;                        // to synchronize system bus accesses   
            if(pFrame->InstAddrFlag)
            {
                pQspiBuffer +=  pFrame->InstAddr;
            }
            memcpy(pQspiBuffer  ,pFrame->pData,  pFrame->DataSize); 
        } 
        else
        {      
            qspi->QSPI_ICR = regICR;
            qspi->QSPI_IFR = regIFR ;
            DummyRead =  qspi->QSPI_IFR;                        // to synchronize system bus accesses   
            memcpy(pFrame->pData,  pQspiBuffer,  pFrame->DataSize); 
        }

    }
    memory_barrier();
    qspi->QSPI_CR = QSPI_CR_LASTXFER;                     // End transmission after all data has been sent
    while(!(qspi->QSPI_SR & QSPI_SR_INSTRE));             // poll CR reg to know status if Intrustion has end



}
Пример #18
0
static inline int _get_height(struct skiplist *sl)
{
    int h;

    h = sl->height;
    memory_barrier();

    return h;
}
Пример #19
0
static inline struct skipnode *_get_next(struct skipnode *x, int height)
{
    struct skipnode *next;

    next = x->next[height];
    memory_barrier();

    return next;
}
Пример #20
0
/**
 * Register a MMIO region access handler for a cell.
 * @param cell		Cell than can access the region.
 * @param start		Region start address in cell address space.
 * @param size		Region size.
 * @param handler	Access handler.
 * @param handler_arg	Opaque argument to pass to handler.
 *
 * @see mmio_region_unregister
 */
void mmio_region_register(struct cell *cell, unsigned long start,
			  unsigned long size, mmio_handler handler,
			  void *handler_arg)
{
	unsigned int index, n;

	spin_lock(&cell->mmio_region_lock);

	if (cell->num_mmio_regions >= cell->max_mmio_regions) {
		spin_unlock(&cell->mmio_region_lock);

		printk("WARNING: Overflow during MMIO region registration!\n");
		return;
	}

	for (index = 0; index < cell->num_mmio_regions; index++)
		if (cell->mmio_locations[index].start > start)
			break;

	/*
	 * Advance the generation to odd value, indicating that modifications
	 * are ongoing. Commit this change via a barrier so that other CPUs
	 * will see this before we start changing any field.
	 */
	cell->mmio_generation++;
	memory_barrier();

	for (n = cell->num_mmio_regions; n > index; n--)
		copy_region(cell, n - 1, n);

	cell->mmio_locations[index].start = start;
	cell->mmio_locations[index].size = size;
	cell->mmio_handlers[index].function = handler;
	cell->mmio_handlers[index].arg = handler_arg;

	cell->num_mmio_regions++;

	/* Ensure all fields are committed before advancing the generation. */
	memory_barrier();
	cell->mmio_generation++;

	spin_unlock(&cell->mmio_region_lock);
}
Пример #21
0
/**
  Performs a processor specific memory barrier operation.

  Performs a processor specific memory barrier operation.

  @param  None

  @return
  None.

*/
BAM_API_NON_PAGED void bam_osal_memorybarrier(void)
{
#if defined(BAM_MBA)
    __asm__ __volatile__( "barrier\n" );
#elif defined(BAM_TZOS)
    memory_barrier();
#else
    DALFW_MemoryBarrier();
#endif
}
Пример #22
0
/**
 * \brief Starts a UART master transfer. This is a non blocking function. It will
 *  return as soon as the transfer is started.
 *
 * \param pUartd  Pointer to a UartDma instance.
 * \returns 0 if the transfer has been started successfully; otherwise returns
 * UARTD_ERROR_LOCK is the driver is in use, or UARTD_ERROR if the command is not
 * valid.
 */
uint32_t UARTD_RcvData( UartDma *pUartd)
{    

    pUartd->pRxChannel->sempaphore=0;
    memory_barrier();
    /* Start DMA 0(RX) && 1(TX) */
    if (XDMAD_StartTransfer( pUartd->pXdmad, pUartd->pRxChannel->ChNum )) 
        return USARTD_ERROR_LOCK;
    
    return 0;
}
Пример #23
0
/**
 * \brief USART xDMA Rx callback
 * Invoked on USART DMA reception done.
 * \param channel DMA channel.
 * \param pArg Pointer to callback argument - Pointer to USARTDma instance.   
 */ 
static void UARTD_Tx_Cb(uint32_t channel, UartDma* pArg)
{
    UartChannel *pUartdCh = pArg->pTxChannel;
    if (channel != pUartdCh->ChNum)
        return;

    /* Release the DMA channels */
    XDMAD_FreeChannel(pArg->pXdmad, pUartdCh->ChNum);
    pUartdCh->sempaphore = 1;
    memory_barrier();
}
Пример #24
0
/**
 * \brief Starts a UART master transfer. This is a non blocking function. It 
 * will return as soon as the transfer is started.
 *
 * \param pUartd  Pointer to a UartDma instance.
 * \returns 0 if the transfer has been started successfully; otherwise returns
 * UARTD_ERROR_LOCK is the driver is in use, or UARTD_ERROR if the command is 
 * not valid.
 */
uint32_t UARTD_SendData( UartDma *pUartd)
{
	/* Start DMA 0(RX) && 1(TX) */
	SCB_CleanInvalidateDCache();
	pUartd->pTxChannel->sempaphore=0;
	memory_barrier();
	if (XDMAD_StartTransfer( pUartd->pXdmad, pUartd->pTxChannel->ChNum )) 
		return USARTD_ERROR_LOCK;
	
	return 0;
}
Пример #25
0
/**
 * \brief Starts a USART master transfer. This is a non blocking function. It will
 *  return as soon as the transfer is started.
 *
 * \param pUSARTd  Pointer to a USARTDma instance.
 * \param pCommand Pointer to the USART command to execute.
 * \returns 0 if the transfer has been started successfully; otherwise returns
 * USARTD_ERROR_LOCK is the driver is in use, or USARTD_ERROR if the command is not
 * valid.
 */
uint32_t USARTD_RcvData( UsartDma *pUsartd)
{

    while(!pUsartd->pRxChannel->Done);
    /* Start DMA 0(RX) && 1(TX) */
    if (XDMAD_StartTransfer( pUsartd->pXdmad, pUsartd->pRxChannel->ChNum ))
        return USARTD_ERROR_LOCK;
    pUsartd->pRxChannel->Done=0;
    memory_barrier();
    return 0;
}
Пример #26
0
/**
 * \brief Send instrucion over SPI or QSPI
 *
 * \param qspi  Pointer to an Qspi instance.
 *
 * \return Returns 1 if At least one instruction end has been detected since the last read of QSPI_SR.; otherwise
 * returns 0.
 */
extern void QSPI_SendFrameToMem( Qspi* qspi, qspiFrame *pFrame, AccesType  ReadWrite)
{
    uint32_t regIFR, regICR, DummyRead ;
    uint8_t *pQspiMem = (uint8_t *)QSPIMEM_ADDR;

    assert((qspi->QSPI_MR) & QSPI_MR_SMM);  

    regIFR = (pFrame->spiMode | QSPI_IFR_INSTEN | QSPI_IFR_DATAEN | QSPI_IFR_ADDREN | (pFrame->OptionLen << QSPI_IFR_OPTL_Pos) | (pFrame->DummyCycles << QSPI_IFR_NBDUM_Pos) | (pFrame->ContinuousRead << 14)) ;
    // Write the instruction to reg
    regICR = ( QSPI_ICR_OPT(pFrame->Option) | QSPI_ICR_INST(pFrame->Instruction));
    if(pFrame->OptionEn)
    {
        regIFR|=QSPI_IFR_OPTEN;
    }
    pQspiMem +=  pFrame->InstAddr;
    if(ReadWrite)
    {   
        regIFR |= QSPI_IFR_TFRTYP_TRSFR_WRITE_MEMORY;
        memory_barrier();
        qspi->QSPI_ICR = regICR;
        qspi->QSPI_IFR = regIFR ;
        DummyRead =  qspi->QSPI_IFR;                // to synchronize system bus accesses  

        memcpy(pQspiMem  ,pFrame->pData,  pFrame->DataSize); 

    }
    else
    {
        regIFR |= QSPI_IFR_TFRTYP_TRSFR_READ_MEMORY;
        memory_barrier();
        qspi->QSPI_ICR = regICR;
        qspi->QSPI_IFR = regIFR ;
        DummyRead =  qspi->QSPI_IFR;                                                // to synchronize system bus accesses 
        memcpy(pFrame->pData, pQspiMem , pFrame->DataSize);   //  Read QSPI AHB memory space 

    } 
    memory_barrier();
    qspi->QSPI_CR = QSPI_CR_LASTXFER;             // End transmission after all data has been sent
    while(!(qspi->QSPI_SR & QSPI_SR_INSTRE));     // poll CR reg to know status if Intrustion has end

}
Пример #27
0
Файл: main.c Проект: gstroe/Arm
/**
* \brief This function updates the Count variable of ring buffer
**/
__STATIC_INLINE void _UpdateCount(void)
{
	/* check if there is detain ring buffer  */
	if (pUsartBuffer->pTail != *pUsartBuffer->pHead) {
		if (pUsartBuffer->pTail > *pUsartBuffer->pHead)
			pUsartBuffer->Count = (pUsartBuffer->BuffSize -
				(pUsartBuffer->pTail % *pUsartBuffer->pHead));
		else
			pUsartBuffer->Count = (*pUsartBuffer->pHead % pUsartBuffer->pTail);
	}
	memory_barrier();
	TRACE_DEBUG("COUNT is %d \n\r",pUsartBuffer->Count);
}
Пример #28
0
void ArchBoardSpecific::frameBufferInit()
{
  // frame buffer initialization from http://elinux.org/RPi_Framebuffer#Notes
  for (uint32 i = 0; i < 10 && (fbs.pointer == 0 || fbs.size == 0); ++i)
  {
    fbs.width = 640;
    fbs.height = 480;
    fbs.vwidth = fbs.width;
    fbs.vheight = fbs.height;
    fbs.pitch = 0;
    fbs.depth = 16;
    fbs.xoffset = 0;
    fbs.yoffset = 0;
    fbs.pointer = 0;
    fbs.size = 0;
    uint32* MAIL0_READ = (uint32*)0x9000b880;
    uint32* MAIL0_WRITE = (uint32*)0x9000b8A0;
    uint32* MAIL0_STATUS = (uint32*)0x9000b898;
    memory_barrier();
    while (*MAIL0_STATUS & (1 << 31));
    assert((((uint32)&fbs) & 0xF) == 0);
    *MAIL0_WRITE = VIRTUAL_TO_PHYSICAL_BOOT(((uint32)&fbs) & ~0xF) | (0x1);
    memory_barrier();
    uint32 read = 0;
    while ((read & 0xF) != 1)
    {
      while (*MAIL0_STATUS & (1 << 30));
      read = *MAIL0_READ;
    }
    memory_barrier();
    for (uint32 i = 0; i < 0x10000; ++i);
  }
  assert(fbs.pointer != 0);
  assert(fbs.width == fbs.vwidth);
  assert(fbs.height == fbs.vheight);
  assert(fbs.size == (fbs.width * fbs.height * fbs.depth / 8));
  framebuffer = (fbs.pointer & ~0xC0000000) + 0xC0000000;
}
Пример #29
0
void writeData(uint8_t d)
{
	if (!g_HWSPI)
	{
		bbData(d);
		return;
	}
	memory_barrier();
	gpio_set(PIN_DC);
	gpio_clear(24);
	memory_barrier();
	pack.command = 0x100 | d;	// LoSSI 9-bit Parameter mode
	spi_start(0);				// Start SPI transfer to CS0 destination
	// Bypass spi_write function here
	//while (!HW.SPI0.CS.B.TXD); // ensure no reads
	//HW.SPI0.FIFO = pack.command;
	spi_write(d);
	spi_flush();
	memory_barrier();
	gpio_set(24);
	memory_barrier();
	//printf("Data:%.2X \n",pack.command);
}
Пример #30
0
int entry(unsigned int cpu_id, struct per_cpu *cpu_data)
{
	static volatile bool activate;
	bool master = false;

	cpu_data->cpu_id = cpu_id;

	spin_lock(&init_lock);

	if (master_cpu_id == -1) {
		master = true;
		init_early(cpu_id);
	}

	if (!error)
		cpu_init(cpu_data);

	spin_unlock(&init_lock);

	while (!error && initialized_cpus < hypervisor_header.online_cpus)
		cpu_relax();

	if (!error && master) {
		init_late(cpu_data);
		if (!error) {
			/*
			 * Make sure everything was committed before we signal
			 * the other CPUs that they can continue.
			 */
			memory_barrier();
			activate = true;
		}
	} else {
		while (!error && !activate)
			cpu_relax();
	}

	if (error) {
		if (master)
			arch_shutdown();
		arch_cpu_restore(cpu_data);
		return error;
	}

	if (master)
		printk("Activating hypervisor\n");

	/* point of no return */
	arch_cpu_activate_vmm(cpu_data);
}