void _psp_set_spr ( /* [IN] the spr register to modify */ uint_32 spr_register, /* [IN] the new value for the spr register */ uint_32 spr_value ) { /* Body */ uint_32 ramcode[5]; ramcode[0] = 0x7C0004AC; /* sync */ ramcode[1] = 0x7C0003A6 | (3 << 21) | ((spr_register & 0x1F) << 16) | ((spr_register & 0x3E0) << 6); ramcode[2] = 0x7C0004AC; /* sync */ ramcode[3] = 0x4C00012C; /* isync */ ramcode[4] = 0x4E800020; /* blr */ _dcache_flush_line(ramcode); _icache_invalidate_line(ramcode); _dcache_flush_line(&ramcode[4]); _icache_invalidate_line(&ramcode[4]); ((void(_CODE_PTR_)(uint_32)) ramcode)(spr_value); } /* Endbody */
uint_32 _psp_get_spr ( /* [IN] the spr register to read */ uint_32 spr_register ) { /* Body */ uint_32 ramcode[5]; ramcode[0] = 0x7C0004AC; /* sync */ ramcode[1] = 0x7C0002A6 | (3 << 21) | ((spr_register & 0x1F) << 16) | ((spr_register & 0x3E0) << 6); ramcode[2] = 0x7C0004AC; /* sync */ ramcode[3] = 0x4C00012C; /* isync */ ramcode[4] = 0x4E800020; /* blr */ _dcache_flush_line(ramcode); _icache_invalidate_line(ramcode); _dcache_flush_line(&ramcode[4]); _icache_invalidate_line(&ramcode[4]); return ( ((uint_32(_PTR_)(void)) ramcode)() ); } /* Endbody */
void _dcache_flush ( void ) { /* Body */ #if defined(PSP_PPC403) || defined(PSP_PPC401) || defined(PSP_IOP480) /* hardcoded branches are used, insert lines with care */ asm(" li r5, 0"); /* line 0 */ asm(" mtcrf 0xFF, r5"); /* Set whole CR to zero */ asm(" mfcdbcr r3"); /* CDBCR -> R3*/ asm(" clrrwi r4, r3, 1"); /* set Way A */ asm("_dcache_flush_next_way:"); asm(" ori r4, r4, 0x10"); /* set CIS to tag */ asm(" mtcdbcr r4"); /* R4 -> CDBCR */ asm("_dcache_flush_next_line:"); asm(" dcread r4, 0, r5"); /* read cache TAG info*/ asm(" andi. r6, r4,0x10"); /* check the valid bit */ asm(" beq CR0, _dcache_flush_invalid"); /* skip line if invalid */ asm(" clrrwi r4, r4, 9"); /* form EA that is flushed*/ asm(" dcbf r4, r5"); /* flush line */ asm("_dcache_flush_invalid:"); asm(" addi r5, r5, 16"); /* set r5 for next line*/ asm(" andi. r6, r5, 0xFE00"); /* last flushed ?*/ asm(" beq CR0, _dcache_flush_next_line"); /* next line */ asm(" bgt CR1, _dcache_flush_end_flush"); /* end ? */ asm(" mcrf CR1, CR0"); /* set CR1[gt] */ asm(" li r5, 0"); /* line 0 */ asm(" ori r4, r3, 1"); /* set Way B*/ asm(" b _dcache_flush_next_way"); /* next line */ asm("_dcache_flush_end_flush:"); asm(" mtcdbcr r3"); /* restore CDBCR */ _PSP_SYNC(); #elif defined(PSP_MPC555) || defined(PSP_MPC566) /* Has no cache */ #elif defined(PSP_PPC603) || defined(PSP_MPC8240) || defined(PSP_G2_CORE) || \ defined(PSP_E300_CORE) || defined(PSP_E500_CORE) volatile uint_32 _PTR_ tmp_ptr; volatile uint_32 tmp; uint_32 i; /* ** We just stored our link register %LR on the top of the stack. ** When debugging with the cache enabled we want to force it to ** memory now. Problems were found on the MPC8266 where the LR ** value restored was not the value written. */ _PSP_SYNC(); _PSP_ISYNC(); asm(" dcbf r0, r1"); /* Flush top of stack + 0x00 */ asm(" li r0, 32"); asm(" dcbf r1, r0"); /* Flush top of stack + 0x20 */ asm(" li r0, 64"); asm(" dcbf r1, r0"); /* Flush top of stack + 0x40 */ asm(" li r0, 96"); asm(" dcbf r1, r0"); /* Flush top of stack + 0x60 */ _PSP_SYNC(); /* In order to "Flush" the entire data cache, fake reads ** need to be executed for a select number of memory locations, ** then those locations need to be "flushed" ** there are 128 sets of 4 blocks each. ** the sets are addressed by bits A20 to A26 */ tmp_ptr = 0; i = PSP_DCACHE_SIZE / PSP_CACHE_LINE_SIZE + 1; while (--i) { tmp = *tmp_ptr; tmp_ptr = (pointer)((uchar_ptr)tmp_ptr + PSP_CACHE_LINE_SIZE); } /* Endwhile */ tmp_ptr = 0; i = PSP_DCACHE_SIZE / PSP_CACHE_LINE_SIZE; while (--i) { _PSP_FLUSH_LINE((pointer)tmp_ptr); tmp_ptr = (pointer)((uchar_ptr)tmp_ptr + PSP_CACHE_LINE_SIZE); } /* Endwhile */ _dcache_flush_line((pointer)tmp_ptr); #elif defined(PSP_E200_CORE) register uint_32 i; uint_32 j; volatile uint_32 _PTR_ tmp_ptr; /* In order to "Flush" the entire data cache, we need to flush ** either by the way or by the set (line). */ tmp_ptr = 0; asm ("msync"); _PSP_ISYNC(); _PSP_SPR_GET(i,E200CORE_L1FINV0); /* L1FINV0 */ for (j = 0; j < PSP_DCACHE_NUM_LINES; j++) { i = j << 5; /* Specify the cache set to be selected */ i |= 0x00000001; /* Flush and valid without invalidation */ _PSP_FLUSH_LINE((pointer)tmp_ptr); tmp_ptr = (pointer)((uchar_ptr)tmp_ptr + PSP_CACHE_LINE_SIZE); _PSP_SPR_SET(E200CORE_L1FINV0,i); } /* Endfor */ _dcache_flush_line((pointer)tmp_ptr); #elif defined(PSP_PPC740) || defined(PSP_PPC750) volatile uint_32 _PTR_ tmp_ptr; volatile uint_32 tmp; uint_32 i; uint_32 l2_cache; uint_32 init; /* In order to "Flush" the entire data cache, fake reads ** need to be executed for a select number of memory locations, ** then those locations need to be "flushed" ** there are 128 sets of 8 blocks each. ** the sets are addressed by bits A20 to A26 ** However if the L2 cached is enabled, then it can be of ** size 128K, 512K or 1024K. */ tmp_ptr = 0; _PSP_SPR_GET(l2_cache, PPC750_L2CR); if (l2_cache & PPC750_L2_CACHE_L2E) { if ((l2_cache & PPC750_L2_CACHE_L2SIZ_MASK) == PPC750_L2_CACHE_L2SIZ_256K) { init = 8193; } else if ((l2_cache & PPC750_L2_CACHE_L2SIZ_MASK) == PPC750_L2_CACHE_L2SIZ_512K) { init = 16385; } else { init = 32769; }/* Endif */ } else { init = 1025; }/* Endif */ i = init; while (--i) { tmp = *tmp_ptr; tmp_ptr = (pointer)((uchar_ptr)tmp_ptr + 0x20); } /* Endwhile */ tmp_ptr = 0; i = init - 1; while (--i) { _PSP_FLUSH_LINE((pointer)tmp_ptr); tmp_ptr = (pointer)((uchar_ptr)tmp_ptr + 0x20); } /* Endwhile */ _dcache_flush_line((pointer)tmp_ptr); #elif defined(PSP_PPC7400) /* Flush L1 and L2 cache */ _ppc7400_flushL1(); _ppc7400_flushL2(); #elif defined(PSP_MPC821) || defined(PSP_MPC823) || defined(PSP_MPC850) || \ defined(PSP_MPC855) || defined(PSP_MPC860) || defined(PSP_MPC866) || \ defined(PSP_MPC875) uint_32 i,way0 = 0x0000, way1 = 0x1000, cmd; _PSP_SPR_GET(cmd,568); cmd |= 0x0E000000; for (i = 0; i < 256; i++) { _PSP_SPR_SET(569,way0); _PSP_SPR_SET(568,cmd); _PSP_SPR_SET(569,way1); _PSP_SPR_SET(568,cmd); way0 += 16; way1 += 16; } /* Endfor */ _PSP_SYNC(); #elif defined(PSP_PPC405) register uchar_ptr p; uint_32 i; // I'm assuming KERNEL_DATA is a large enough block of memory // and that it can be aligned to the beginning of a 4 * dcache // sized memory block. _GET_KERNEL_DATA(p); p = (uchar_ptr)(((uint_32)p) & ~((PSP_DCACHE_SIZE*4)-1)); _int_disable(); for (i = 0; i < PSP_DCACHE_NUM_LINES*4; i++) { register uint_32 zero = 0; (void) *(volatile _mqx_int _PTR_)p; // Load from the address __dcbf((pointer)p, zero); // Data cache block flush asm { iccci p, zero } p += PSP_CACHE_LINE_SIZE; } /* Endfor */ _PSP_SYNC(); _PSP_ISYNC(); _int_enable(); #elif defined(PSP_PPC440) uchar_ptr p; _mqx_int i, old_msr = _MFMSR(); // I'm assuming KERNEL_DATA is a large enough block of memory // and that it can be aligned to the beginning of a dcache // sized memory block. _GET_KERNEL_DATA(p); p = (uchar_ptr)(((uint_32)p) & ~(PSP_DCACHE_SIZE-1)); // Must be atomic _MTMSR(old_msr & ~(PPC440_MSR_EE|PPC440_MSR_CE|PPC440_MSR_ME)); // Finish all data writes _PSP_SYNC(); // There are 1024 lines in the dcache. // Twice as much data must be read in. for (i = 0; i < PPC440_DCACHE_NUM_LINES2X; i++) { (void) *(volatile _mqx_int _PTR_)p; // Load from the address _DCBF((pointer)p,0); // Data cache block flush _ICBI((pointer)p,0); // Instruction cache block invalidate p += PPC440_DCACHE_LINE_SIZE; } /* Endfor */ _PSP_SYNC(); _MTMSR(old_msr); /* catch ERROR : undefined cpu type " */ #else #error "CPU NOT DEFINED" #endif } /* Endbody */
static _mqx_int _dspi_dma_tx_rx ( /* [IN] Device specific context structure */ void *io_info_ptr, /* [IN] Data to transmit */ uint8_t *txbuf, /* [OUT] Received data */ uint8_t *rxbuf, /* [IN] Length of transfer in bytes */ uint32_t len ) { DSPI_DMA_INFO_STRUCT_PTR dspi_info_ptr = (DSPI_DMA_INFO_STRUCT_PTR)io_info_ptr; int regw; uint32_t head_len; uint32_t tail_len; uint32_t zero_copy_len; _mqx_int result; /* Check whether there is at least something to transfer */ if (0 == len) { return 0; } /* Check frame width */ if (DSPI_CTAR_FMSZ_GET(dspi_info_ptr->DSPI_PTR->CTAR[0]) > 7) { len = len & (~1UL); /* Round down to whole frames */ regw = 2; } else { regw = 1; } /* If there is no data to transmit, prepare dummy pattern in proper byte order */ if (NULL == txbuf) { if (regw == 1) { dspi_info_ptr->TX_BUF[0] = dspi_info_ptr->DUMMY_PATTERN & 0xFF; } else { dspi_info_ptr->TX_BUF[0] = (dspi_info_ptr->DUMMY_PATTERN>>8) & 0xFF; dspi_info_ptr->TX_BUF[1] = dspi_info_ptr->DUMMY_PATTERN & 0xFF; } _dcache_flush_line(dspi_info_ptr->TX_BUF); } if (!(len % PSP_CACHE_LINE_SIZE) && !((uint32_t)txbuf % PSP_CACHE_LINE_SIZE) && !((uint32_t)rxbuf % PSP_CACHE_LINE_SIZE)) { /* Everything is perfectly aligned, perform single zero copy operation without any head or tail */ head_len = 0; tail_len = 0; } else if (len <= 2*PSP_CACHE_LINE_SIZE) { /* The whole transfer fits into intermediate buffers, perform single transfer (head only) */ head_len = len; tail_len = 0; } else { /* Split the transfer into head, zero copy portion and tail */ uint32_t cache_line_offset; uint32_t tx_head_len; uint32_t tx_tail_len; uint32_t rx_head_len; uint32_t rx_tail_len; if (NULL != rxbuf) { cache_line_offset = (uint32_t)rxbuf % PSP_CACHE_LINE_SIZE; rx_head_len = cache_line_offset ? PSP_CACHE_LINE_SIZE - cache_line_offset : 0; rx_tail_len = (((uint32_t)rxbuf + len) % PSP_CACHE_LINE_SIZE); } else { rx_head_len = 0; rx_tail_len = 0; } if (NULL != txbuf) { cache_line_offset = (uint32_t)txbuf % PSP_CACHE_LINE_SIZE; tx_head_len = cache_line_offset ? PSP_CACHE_LINE_SIZE - cache_line_offset : 0; tx_tail_len = (((uint32_t)txbuf + len) % PSP_CACHE_LINE_SIZE); } else { tx_head_len = 0; tx_tail_len = 0; } head_len = (rx_head_len > tx_head_len) ? rx_head_len : tx_head_len; tail_len = (rx_tail_len > tx_tail_len) ? rx_tail_len : tx_tail_len; if (regw > 1) { head_len += (head_len & 1); tail_len += (tail_len & 1); } } zero_copy_len = len - head_len - tail_len; /* Head processed through intermediate buffers */ if (head_len) { if (txbuf) { _mem_copy(txbuf, dspi_info_ptr->TX_BUF, head_len); _dcache_flush_mlines(dspi_info_ptr->TX_BUF, len); result = _dspi_dma_transfer(dspi_info_ptr, dspi_info_ptr->TX_BUF, dspi_info_ptr->RX_BUF, head_len, regw); } else { result = _dspi_dma_transfer(dspi_info_ptr, NULL, dspi_info_ptr->RX_BUF, head_len, regw); } if (result != head_len) { return IO_ERROR; } /* * Copy to application buffer intentionally ommited. * It is done later after invalidation of zero copy area as it may overlap into it. */ } /* Zero copy area */ if (zero_copy_len) { uint8_t *txbuf_real; uint8_t *rxbuf_real; txbuf_real = txbuf ? txbuf + head_len : NULL; rxbuf_real = rxbuf ? rxbuf + head_len : NULL; if (txbuf_real) { _dcache_flush_mlines(txbuf_real, zero_copy_len); } result = _dspi_dma_transfer(dspi_info_ptr, txbuf_real, rxbuf_real, zero_copy_len, regw); if (rxbuf_real) { _dcache_invalidate_mlines(rxbuf_real, zero_copy_len); } if (result != zero_copy_len) { return IO_ERROR; } } /* Copy head data into application buffer if desired */ if (head_len && rxbuf) { _dcache_invalidate_mlines(dspi_info_ptr->RX_BUF, head_len); _mem_copy(dspi_info_ptr->RX_BUF, rxbuf, head_len); } /* Tail processed through intermediate buffers */ if (tail_len) { if (txbuf) { _mem_copy(txbuf + len - tail_len, dspi_info_ptr->TX_BUF, tail_len); _dcache_flush_mlines(dspi_info_ptr->TX_BUF, tail_len); result = _dspi_dma_transfer(dspi_info_ptr, dspi_info_ptr->TX_BUF, dspi_info_ptr->RX_BUF, tail_len, regw); } else { result = _dspi_dma_transfer(dspi_info_ptr, NULL, dspi_info_ptr->RX_BUF, tail_len, regw); } if (result != tail_len) { return IO_ERROR; } if (rxbuf) { _dcache_invalidate_mlines(dspi_info_ptr->RX_BUF, tail_len); _mem_copy(dspi_info_ptr->RX_BUF, rxbuf + len - tail_len, tail_len); } } return len; }