void plog_cache_writeback(unsigned long start, long size) { long linesz, linemsk; unsigned long end; #if defined(CONFIG_BMIPS5000) linesz = cpu_scache_line_size(); #else linesz = cpu_dcache_line_size(); #endif /* * Set up the loop counters so the address is cache line aligned (do * we really need to do that?) and the length is a multiple of the * cache line size. */ linemsk = linesz - 1; start &= ~linemsk; size += linesz; end = start + size; while (start < end) { #if defined(CONFIG_BMIPS5000) cache_op(HitWbSc, start); #else cache_op(Hit_Writeback_D, start); #endif start += linesz; } __sync(); }
void *CX360SmallBlockPool::Alloc() { void *pResult = m_FreeList.Pop(); if ( !pResult ) { if ( !m_pNextAlloc && gm_pPhysicalBlock >= gm_pPhysicalLimit ) { return NULL; } int nBlockSize = m_nBlockSize; byte *pCurBlockEnd; byte *pNextAlloc; for (;;) { pCurBlockEnd = m_pCurBlockEnd; pNextAlloc = m_pNextAlloc; if ( pNextAlloc + nBlockSize <= pCurBlockEnd ) { if ( m_pNextAlloc.AssignIf( pNextAlloc, pNextAlloc + m_nBlockSize ) ) { pResult = pNextAlloc; break; } } else { AUTO_LOCK( m_CommitMutex ); if ( pCurBlockEnd == m_pCurBlockEnd ) { for (;;) { if ( gm_pPhysicalBlock >= gm_pPhysicalLimit ) { m_pCurBlockEnd = m_pNextAlloc = NULL; return NULL; } byte *pPhysicalBlock = gm_pPhysicalBlock; if ( ThreadInterlockedAssignPointerIf( (void **)&gm_pPhysicalBlock, (void *)(pPhysicalBlock + PAGESIZE_X360_SBH), (void *)pPhysicalBlock ) ) { int index = (size_t)((byte *)pPhysicalBlock - gm_pPhysicalBase) / PAGESIZE_X360_SBH; gm_AddressToPool[index] = this; m_pNextAlloc = pPhysicalBlock; m_CommittedSize += PAGESIZE_X360_SBH; __sync(); m_pCurBlockEnd = pPhysicalBlock + PAGESIZE_X360_SBH; break; } } } } } } return pResult; }
static void* pcu_dma_tasklet_read_get_data(uint32_t virtual_addr_buffer, uint32_t pcu_dma_len) { __sync(); if (KSEGX(virtual_addr_buffer) != KSEG0) { memcpy((void *)virtual_addr_buffer, pcu_dma_buf, pcu_dma_len); } //else the data is already in virtual_addr_buffer return (void *)virtual_addr_buffer; }
static void bcm63xx_fixup_cpu1(void) { /* * The bootloader has set up the CPU1 reset vector at * 0xa000_0200. * This conflicts with the special interrupt vector (IV). * The bootloader has also set up CPU1 to respond to the wrong * IPI interrupt. * Here we will start up CPU1 in the background and ask it to * reconfigure itself then go back to sleep. */ memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20); __sync(); set_c0_cause(C_SW0); cpumask_set_cpu(1, &bmips_booted_mask); }
/******************************************************************************* * Function: edu_tasklet_edu_tasklet * * Parameters: * data: IN: A disguised pointer to an eduTaskletData_t* variable * * Description: * This is the edu_tasklet that is scheduled from the Interrupt Service Routine * *******************************************************************************/ static void edu_tasklet_edu_tasklet(unsigned long data) { int i, thislen; int ecc = 0; int error = 0; int wrStatus = 0; int outp_needBBT = 0; struct brcmnand_chip *this = gEduTaskletData.pMtd->priv; uint32_t* p32 = NULL; if ((eduTaskletData_t*)data != &gEduTaskletData) { printk(KERN_DEBUG"%s: data is not gEduTaskletData!\n", __FUNCTION__); return; } if (gEduTaskletData.cmd == EDU_READ) { if ((gEduTaskletData.winslice <= this->eccsteps) && (gEduTaskletData.dataRead < gEduTaskletData.len)) { //Get the intr_status: gEduTaskletData.intr_status = ISR_getStatus(); ecc = this->process_read_isr_status(this, gEduTaskletData.intr_status); //save value: error = gEduTaskletData.error; //Fetch the data: this->EDU_read_get_data((uint32_t)&gEduTaskletData.pDataBuf[gEduTaskletData.dataRead]); gEduTaskletData.error = this->process_error(gEduTaskletData.pMtd, (uint8_t*)&gEduTaskletData.pDataBuf[gEduTaskletData.dataRead], &gEduTaskletData.pOobBuf[gEduTaskletData.oobRead], ecc); if ((gEduTaskletData.error == 0) && (error != 0)) {//If we found an error before this pass: gEduTaskletData.error = error; } else if (gEduTaskletData.error == -EECCCOR) {//If we found an error during this pass: if (error == -EECCUNCOR) //EECCUNCOR has higher priority than EECCCOR { gEduTaskletData.error = -EECCUNCOR; } } if ((gEduTaskletData.error == -ETIMEDOUT) && (gEduTaskletData.retries > 0)) { gEduTaskletData.retries--; printk(KERN_DEBUG"%s: Doing a read retry (status time out), retries= %d\n", __FUNCTION__, gEduTaskletData.retries); gEduTaskletData.error = 0; } else if (gEduTaskletData.error == -ETIMEDOUT) { printk(KERN_ERR"%s: too much retries, give up! \n", __FUNCTION__); //More than "gEduTaskletData.retries" timeout: gEduTaskletData.opComplete = 0; goto disable_and_wake_up; } else //all other cases { if (gEduTaskletData.pOobBuf) { p32 = (uint32_t*) &gEduTaskletData.pOobBuf[gEduTaskletData.oobRead]; __sync(); //PLATFORM_IOFLUSH_WAR(); for (i = 0; i < 4; i++) { p32[i] = be32_to_cpu (this->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_0 + i*4)); } #if CONFIG_MTD_BRCMNAND_VERSION > CONFIG_MTD_BRCMNAND_VERS_3_3 if(this->eccOobSize == NAND_CONTROLLER_27B_OOB_BOUNDARY) { for (i = 0;i < 4; i++) { p32[i+4] = be32_to_cpu (this->ctrl_read(BCHP_NAND_SPARE_AREA_READ_OFS_10 + (i*4))); } } #endif /* printk("%s: offset=%s, oobRead= %d, oob=", __FUNCTION__, __ll_sprintf(brcmNandMsg,gEduTaskletData.offset, this->xor_invert_val), gEduTaskletData.oobRead); print_oobbuf(&gEduTaskletData.pOobBuf[gEduTaskletData.oobRead], 16); */ gEduTaskletData.oobRead += this->eccOobSize; } //Increment values for next call to this function: thislen = min_t(int, gEduTaskletData.len - gEduTaskletData.dataRead, gEduTaskletData.pMtd->eccsize); gEduTaskletData.dataRead += thislen; gEduTaskletData.offset += thislen; gEduTaskletData.winslice++; gEduTaskletData.retries = 5; //Reset retries to 5 - mfi 04/26/2009 } //If dataRead is still lower than len: if (gEduTaskletData.dataRead < gEduTaskletData.len) { //Trig next read: edu_tasklet_trig_read_intr(); } else { goto op_complete; } } else { goto op_complete;
void __init prom_init(void) { u32 reg, mask; bcm63xx_cpu_init(); /* stop any running watchdog */ bcm_wdt_writel(WDT_STOP_1, WDT_CTL_REG); bcm_wdt_writel(WDT_STOP_2, WDT_CTL_REG); /* disable all hardware blocks clock for now */ if (BCMCPU_IS_3368()) mask = CKCTL_3368_ALL_SAFE_EN; else if (BCMCPU_IS_6328()) mask = CKCTL_6328_ALL_SAFE_EN; else if (BCMCPU_IS_6338()) mask = CKCTL_6338_ALL_SAFE_EN; else if (BCMCPU_IS_6345()) mask = CKCTL_6345_ALL_SAFE_EN; else if (BCMCPU_IS_6348()) mask = CKCTL_6348_ALL_SAFE_EN; else if (BCMCPU_IS_6358()) mask = CKCTL_6358_ALL_SAFE_EN; else if (BCMCPU_IS_6362()) mask = CKCTL_6362_ALL_SAFE_EN; else if (BCMCPU_IS_6368()) mask = CKCTL_6368_ALL_SAFE_EN; else mask = 0; reg = bcm_perf_readl(PERF_CKCTL_REG); reg &= ~mask; bcm_perf_writel(reg, PERF_CKCTL_REG); /* register gpiochip */ bcm63xx_gpio_init(); /* do low level board init */ board_prom_init(); /* set up SMP */ if (!register_bmips_smp_ops()) { /* * BCM6328 might not have its second CPU enabled, while BCM3368 * and BCM6358 need special handling for their shared TLB, so * disable SMP for now. */ if (BCMCPU_IS_6328()) { reg = bcm_readl(BCM_6328_OTP_BASE + OTP_USER_BITS_6328_REG(3)); if (reg & OTP_6328_REG3_TP1_DISABLED) bmips_smp_enabled = 0; } else if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) { bmips_smp_enabled = 0; } if (!bmips_smp_enabled) return; /* * The bootloader has set up the CPU1 reset vector at * 0xa000_0200. * This conflicts with the special interrupt vector (IV). * The bootloader has also set up CPU1 to respond to the wrong * IPI interrupt. * Here we will start up CPU1 in the background and ask it to * reconfigure itself then go back to sleep. */ memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20); __sync(); set_c0_cause(C_SW0); cpumask_set_cpu(1, &bmips_booted_mask); /* * FIXME: we really should have some sort of hazard barrier here */ } }
unsigned long __getxtal(void) { unsigned long res = *((volatile unsigned long *)KSEG1ADDR(REG_BASE_system_block + SYS_xtal_in_cnt)); __sync(); return res; }