/* * mt_devapc_set_permission: set module permission on device apc. * @module: the moudle to specify permission * @domain_num: domain index number * @permission_control: specified permission * no return value. */ int mt_devapc_set_permission(unsigned int module, E_MASK_DOM domain_num, APC_ATTR permission) { volatile unsigned int* base; unsigned int clr_bit = 0x3 << ((module % 16) * 2); unsigned int set_bit = permission << ((module % 16) * 2); if(module >= DEVAPC_DEVICE_NUMBER) { pr_warn("[DEVAPC] ERROR, device number %d exceeds the max number!\n", module); return -1; } if (DEVAPC_DOMAIN_AP == domain_num) { base = DEVAPC0_D0_APC_0 + (module / 16) * 4; } else if (DEVAPC_DOMAIN_MD == domain_num) { base = DEVAPC0_D1_APC_0 + (module / 16) * 4; } else if (DEVAPC_DOMAIN_CONN == domain_num) { base = DEVAPC0_D2_APC_0 + (module / 16) * 4; } else if (DEVAPC_DOMAIN_MM == domain_num) { base = DEVAPC0_D3_APC_0 + (module / 16) * 4; } else { pr_warn("[DEVAPC] ERROR, domain number %d exceeds the max number!\n", domain_num); return -2; } mt_reg_sync_writel(readl(base) & ~clr_bit, base); mt_reg_sync_writel(readl(base) | set_bit, base); return 0; }
int MET_BM_SetIDSelect(const unsigned int counter_num, const unsigned int id, const unsigned int enable) { unsigned int value, addr, shift_num; if ((counter_num < 1 || counter_num > BM_COUNTER_MAX) || (id > 0x1FFF) || (enable > 1)) { return BM_ERR_WRONG_REQ; } addr = EMI_BMID0 + (counter_num - 1) / 2 * 4; /* field's offset in the target EMI_BMIDx register */ shift_num = ((counter_num - 1) % 2) * 16; /* clear SELx_ID field */ value = readl(IOMEM(ADDR_EMI+addr)) & ~(0x1FFF << shift_num); /* set SELx_ID field */ value |= id << shift_num; mt_reg_sync_writel(value, ADDR_EMI+addr); value = (readl(IOMEM(ADDR_EMI+EMI_BMEN2)) & ~(1 << (counter_num - 1))) | (enable << (counter_num - 1)); mt_reg_sync_writel(value, ADDR_EMI+EMI_BMEN2); return BM_REQ_OK; }
static void set_module_apc(unsigned int module, E_MASK_DOM domain_num , APC_ATTR permission_control) { volatile unsigned int* base= 0; unsigned int clr_bit = 0x3 << ((module % 16) * 2); unsigned int set_bit = permission_control << ((module % 16) * 2); if(module >= DEVAPC_DEVICE_NUMBER) { pr_warn("set_module_apc : device number %d exceeds the max number!\n", module); return; } if (DEVAPC_DOMAIN_AP == domain_num) { base = (unsigned int*)((uintptr_t)DEVAPC0_D0_APC_0 + (module / 16) * 4); } else if (DEVAPC_DOMAIN_MD == domain_num) { base = (unsigned int*)((uintptr_t)DEVAPC0_D1_APC_0 + (module / 16) * 4); } else if (DEVAPC_DOMAIN_CONN == domain_num) { base = (unsigned int*)((uintptr_t)DEVAPC0_D2_APC_0 + (module / 16) * 4); } else if (DEVAPC_DOMAIN_MM == domain_num) { base = (unsigned int*)((uintptr_t)DEVAPC0_D3_APC_0 + (module / 16) * 4); } else { pr_warn("set_module_apc : domain number %d exceeds the max number!\n", domain_num); return; } mt_reg_sync_writel( readl(base) & ~clr_bit, base); mt_reg_sync_writel( readl(base) | set_bit, base); }
static void init_devpac(void) { /* clear violation*/ mt_reg_sync_writel(0x80000000, DEVAPC0_VIO_DBG0); mt_reg_sync_writel(readl(DEVAPC0_APC_CON) & (0xFFFFFFFF ^ (1<<2)), DEVAPC0_APC_CON); mt_reg_sync_writel(readl(DEVAPC0_PD_APC_CON) & (0xFFFFFFFF ^ (1<<2)), DEVAPC0_PD_APC_CON); }
static void __gpt_set_cmp(struct gpt_device *dev, unsigned int cmpl, unsigned int cmph) { mt_reg_sync_writel(cmpl, dev->base_addr + GPT_CMP); dev->cmp[0] = cmpl; if (dev->features & GPT_FEAT_64_BIT) { mt_reg_sync_writel(cmph, dev->base_addr + GPT_CMPH); dev->cmp[1] = cmpl; } }
static void __gpt_reset(struct gpt_device *dev) { mt_reg_sync_writel(0x0, dev->base_addr + GPT_CON); __gpt_disable_irq(dev); __gpt_ack_irq(dev); mt_reg_sync_writel(0x0, dev->base_addr + GPT_CLK); mt_reg_sync_writel(0x2, dev->base_addr + GPT_CON); mt_reg_sync_writel(0x0, dev->base_addr + GPT_CMP); if (dev->features & GPT_FEAT_64_BIT) mt_reg_sync_writel(0, dev->base_addr + GPT_CMPH); }
int mt_devapc_emi_initial(void) { pr_warn("EMI_DAPC Init start \n"); devapc_ioremap(); mt_reg_sync_writel(readl(IOMEM(DEVAPC0_APC_CON)) & (0xFFFFFFFF ^ (1 << 2)), DEVAPC0_APC_CON); mt_reg_sync_writel(readl(IOMEM(DEVAPC0_PD_APC_CON)) & (0xFFFFFFFF ^ (1 << 2)), DEVAPC0_PD_APC_CON); mt_reg_sync_writel(ABORT_EMI, DEVAPC0_D0_VIO_STA_3); mt_reg_sync_writel(readl(IOMEM(DEVAPC0_D0_VIO_MASK_3)) & (0xFFFFFFFF ^ (ABORT_EMI)), DEVAPC0_D0_VIO_MASK_3); pr_warn("EMI_DAPC Init done \n"); return 0; }
/* * mt_start_gdma: start the DMA stransfer for the specified GDMA channel * @channel: GDMA channel to start * Return 0 for success; return negative errot code for failure. */ int mt_start_gdma(int channel) { if ((channel < GDMA_START) || (channel >= (GDMA_START + NR_GDMA_CHANNEL))) { return -DMA_ERR_INVALID_CH; }else if (dma_ctrl[channel].in_use == 0) { return -DMA_ERR_CH_FREE; } mt_reg_sync_writel(DMA_INT_FLAG_CLR_BIT, DMA_INT_FLAG); mt_reg_sync_writel(DMA_START_BIT, DMA_START); return 0; }
/* * mtk_mem_bw_ctrl: set EMI bandwidth limiter for memory bandwidth control * @sce: concurrency scenario ID * @op: either ENABLE_CON_SCE or DISABLE_CON_SCE * Return 0 for success; return negative values for failure. */ int mtk_mem_bw_ctrl(int sce, int op) { int i, highest; if (sce >= NR_CON_SCE) { return -1; } if (op != ENABLE_CON_SCE && op != DISABLE_CON_SCE) { return -1; } if (in_interrupt()) { return -1; } down(&emi_bwl_sem); if (op == ENABLE_CON_SCE) { ctrl_tbl[sce].ref_cnt++; } else if (op == DISABLE_CON_SCE) { if (ctrl_tbl[sce].ref_cnt != 0) { ctrl_tbl[sce].ref_cnt--; } } /* find the scenario with the highest priority */ highest = -1; for (i = 0; i < NR_CON_SCE; i++) { if (ctrl_tbl[i].ref_cnt != 0) { highest = i; break; } } if (highest == -1) { highest = CON_SCE_NORMAL; } /* set new EMI bandwidth limiter value */ if (highest != cur_con_sce) { mt_reg_sync_writel(emi_arba_lpddr3_1600_val[highest], EMI_ARBA); mt_reg_sync_writel(emi_arbb_lpddr3_1600_val[highest], EMI_ARBB); mt_reg_sync_writel(emi_arbc_lpddr3_1600_val[highest], EMI_ARBC); mt_reg_sync_writel(emi_arbd_lpddr3_1600_val[highest], EMI_ARBD); mt_reg_sync_writel(emi_arbe_lpddr3_1600_val[highest], EMI_ARBE); mt_reg_sync_writel(emi_arbf_lpddr3_1600_val[highest], EMI_ARBF); mt_reg_sync_writel(emi_arbg_lpddr3_1600_val[highest], EMI_ARBG); mt_reg_sync_writel(emi_arbh_lpddr3_1600_val[highest], EMI_ARBH); cur_con_sce = highest; } up(&emi_bwl_sem); return 0; }
int MET_BM_SetMaster(const unsigned int counter_num, const unsigned int master) { unsigned int value, addr; const unsigned int iMask = 0x7F; if (counter_num < 1 || counter_num > BM_COUNTER_MAX) { return BM_ERR_WRONG_REQ; } if (counter_num == 1) { addr = EMI_BMEN; value = (readl(IOMEM(ADDR_EMI+addr)) & ~(iMask << 16)) | ((master & iMask) << 16); } else { addr = (counter_num <= 3) ? EMI_MSEL : (EMI_MSEL2 + (counter_num / 2 - 2) * 8); /* clear master and transaction type fields */ value = readl(IOMEM(ADDR_EMI+addr)) & ~(iMask << ((counter_num % 2) * 16)); /* set master and transaction type fields */ value |= ((master & iMask) << ((counter_num % 2) * 16)); } mt_reg_sync_writel(value, ADDR_EMI+addr); return BM_REQ_OK; }
int MET_BM_SetMonitorCounter(const unsigned int counter_num, const unsigned int master, const unsigned int trans_type) { unsigned int value, addr; const unsigned int iMask = (MASK_TRANS_TYPE << 8) | MASK_MASTER; if (counter_num < 1 || counter_num > BM_COUNTER_MAX) { return BM_ERR_WRONG_REQ; } if (counter_num == 1) { addr = EMI_BMEN; value = (readl(IOMEM(ADDR_EMI+addr)) & ~(iMask << 16)) | ((trans_type & MASK_TRANS_TYPE) << 24) | ((master & MASK_MASTER) << 16); } else { addr = (counter_num <= 3) ? EMI_MSEL : (EMI_MSEL2 + (counter_num / 2 - 2) * 8); /* clear master and transaction type fields */ value = readl(IOMEM(ADDR_EMI+addr)) & ~(iMask << ((counter_num % 2) * 16)); /* set master and transaction type fields */ value |= (((trans_type & MASK_TRANS_TYPE) << 8) | (master & MASK_MASTER)) << ((counter_num % 2) * 16); } mt_reg_sync_writel(value, ADDR_EMI+addr); return BM_REQ_OK; }
void Afe_Set_Reg(uint32 offset, uint32 value, uint32 mask) { extern void *AFE_BASE_ADDRESS; volatile long address; volatile uint32 *AFE_Register; volatile uint32 val_tmp; if (CheckOffset(offset) == false) { return; } #ifdef AUDIO_MEM_IOREMAP PRINTK_AUDDRV("Afe_Set_Reg AUDIO_MEM_IOREMAP AFE_BASE_ADDRESS = %p\n",AFE_BASE_ADDRESS); address = (long)((char *)AFE_BASE_ADDRESS + offset); #else printk("%s check \n", __func__); address = (long)(AFE_BASE + offset); #endif AFE_Register = (volatile uint32 *)address; PRINTK_AFE_REG("Afe_Set_Reg offset=%x, value=%x, mask=%x \n",offset,value,mask); val_tmp = Afe_Get_Reg(offset); val_tmp &= (~mask); val_tmp |= (value & mask); mt_reg_sync_writel(val_tmp, AFE_Register); }
void MET_BM_Enable(const unsigned int enable) { const unsigned int value = readl(IOMEM(ADDR_EMI+EMI_BMEN)); mt_reg_sync_writel((value & ~(BUS_MON_PAUSE | BUS_MON_EN)) | (enable ? BUS_MON_EN : 0), ADDR_EMI+EMI_BMEN); }
void Afe_Set_Reg(uint32 offset, uint32 value, uint32 mask) { volatile long address; volatile uint32 *AFE_Register; volatile uint32 val_tmp; unsigned long flags = 0; if (CheckOffset(offset) == false) return; #ifdef AUDIO_MEM_IOREMAP /* PRINTK_AUDDRV("Afe_Set_Reg AUDIO_MEM_IOREMAP AFE_BASE_ADDRESS = %p\n",AFE_BASE_ADDRESS); */ address = (long)((char *)AFE_BASE_ADDRESS + offset); #else address = (long)(AFE_BASE + offset); #endif AFE_Register = (volatile uint32 *)address; /* PRINTK_AFE_REG("Afe_Set_Reg offset=%x, value=%x, mask=%x\n",offset,value,mask); */ spin_lock_irqsave(&afe_set_reg_lock, flags); val_tmp = Afe_Get_Reg(offset); val_tmp &= (~mask); val_tmp |= (value & mask); mt_reg_sync_writel(val_tmp, AFE_Register); spin_unlock_irqrestore(&afe_set_reg_lock, flags); }
void mt_devapc_clear_emi_violation(void) { if ((readl(IOMEM(DEVAPC0_D0_VIO_STA_4)) & ABORT_EMI) != 0) { mt_reg_sync_writel(ABORT_EMI, DEVAPC0_D0_VIO_STA_4); } }
/* * gdma1_irq_handler: general DMA channel 1 interrupt service routine. * @irq: DMA IRQ number * @dev_id: * Return IRQ returned code. */ static irqreturn_t gdma1_irq_handler(int irq, void *dev_id) { volatile unsigned glbsta = readl(DMA_INT_FLAG); dbgmsg(KERN_DEBUG"DMA Module - %s ISR Start\n", __func__); dbgmsg(KERN_DEBUG"DMA Module - GLBSTA = 0x%x\n", glbsta); if (glbsta & 0x1){ if (dma_ctrl[G_DMA_1].isr_cb) { dma_ctrl[G_DMA_1].isr_cb(dma_ctrl[G_DMA_1].data); } mt_reg_sync_writel(DMA_INT_FLAG_CLR_BIT, DMA_INT_FLAG); #if(DMA_DEBUG == 1) glbsta = readl(DMA_INT_FLAG); printk(KERN_DEBUG"DMA Module - GLBSTA after ack = 0x%x\n", glbsta); #endif } else { printk("[CQDMA] discard interrupt\n"); return IRQ_NONE; } dbgmsg(KERN_DEBUG"DMA Module - %s ISR END\n", __func__); return IRQ_HANDLED; }
void AudDrv_Clk_Power_On(void) { volatile uint32 *AFE_Register = (volatile uint32 *)Get_Afe_Powertop_Pointer(); volatile uint32 val_tmp; printk("%s", __func__); val_tmp = 0xd; mt_reg_sync_writel(val_tmp, AFE_Register); }
void restore_dbg_regs(unsigned int data[]) { //register unsigned int cpu_id; int i; //__asm__ __volatile__ ("MRC p15, 0, %0, c0, c0, 5" :"=r"(cpu_id) ); //cpu_id &= 0xf; // actually only cpu0 will execute this function //*(volatile unsigned int *)(DBGLAR + cpu_id * 0x2000) = UNLOCK_KEY; mt_reg_sync_writel(UNLOCK_KEY, DBGLAR); //*(volatile unsigned int *)(DBGOSLAR + cpu_id * 0x2000) = ~UNLOCK_KEY; mt_reg_sync_writel(~UNLOCK_KEY, DBGOSLAR); //*(volatile unsigned int *)(DBGDSCR + cpu_id * 0x2000) = data[0]; mt_reg_sync_writel(data[0], DBGDSCR); for(i = 0; i < MAX_NR_WATCH_POINT; i++) { //*(((volatile unsigned int *)(DBGWVR_BASE + cpu_id * 0x2000)) + i) = data[i*2+1]; mt_reg_sync_writel(data[i*2+1], DBGWVR_BASE + i * sizeof(unsigned int *)); //*(((volatile unsigned int *)(DBGWCR_BASE + cpu_id * 0x2000)) + i) = data[i*2+2]; mt_reg_sync_writel(data[i*2+2], DBGWCR_BASE + i * sizeof(unsigned int *)); } for(i = 0; i < MAX_NR_BREAK_POINT; i++) { //*(((volatile unsigned int *)(DBGBVR_BASE + cpu_id * 0x2000)) + i) = data[i*2+9]; mt_reg_sync_writel(data[i*2+9], DBGBVR_BASE + i * sizeof(unsigned int *)); //*(((volatile unsigned int *)(DBGBCR_BASE + cpu_id * 0x2000)) + i) = data[i*2+10]; mt_reg_sync_writel(data[i*2+10], DBGBCR_BASE + i * sizeof(unsigned int *)); } }
int mt_devapc_emi_initial(void) { /*IO remap*/ struct device_node *node = NULL; xlog_printk(ANDROID_LOG_ERROR, EMIMPU_TAG ," AO_ADDRESS %x, \n",DEVAPC0_AO_BASE); xlog_printk(ANDROID_LOG_ERROR, EMIMPU_TAG ," PD_ADDRESS %x \n",DEVAPC0_PD_BASE); if( DEVAPC0_AO_BASE == 0 || DEVAPC0_PD_BASE == 0 ) { xlog_printk(ANDROID_LOG_ERROR, EMIMPU_TAG ," DPAC driver not initial yet \n"); node = of_find_compatible_node(NULL, NULL, "mediatek,DEVAPC_AO"); if(node){ DEVAPC0_AO_BASE = of_iomap(node, 0); xlog_printk(ANDROID_LOG_ERROR, EMIMPU_TAG ," AO_ADDRESS %x \n",DEVAPC0_AO_BASE ); } else{ xlog_printk(ANDROID_LOG_ERROR, EMIMPU_TAG ," can't find DAPC_AO compatible node \n"); return -1; } node = of_find_compatible_node(NULL, NULL, "mediatek,DEVAPC"); if(node){ DEVAPC0_PD_BASE = of_iomap(node, 0); xlog_printk(ANDROID_LOG_ERROR, EMIMPU_TAG ," PD_ADDRESS %x \n",DEVAPC0_PD_BASE ); } else{ xlog_printk(ANDROID_LOG_ERROR, EMIMPU_TAG ," can't find DAPC_PD compatible node \n"); return -1; } } mt_reg_sync_writel(readl(IOMEM(DEVAPC0_APC_CON)) & (0xFFFFFFFF ^ (1<<2)), DEVAPC0_APC_CON); mt_reg_sync_writel(readl(IOMEM(DEVAPC0_PD_APC_CON)) & (0xFFFFFFFF ^ (1<<2)), DEVAPC0_PD_APC_CON); mt_reg_sync_writel(ABORT_EMI, DEVAPC0_D0_VIO_STA_4); mt_reg_sync_writel(readl(IOMEM(DEVAPC0_D0_VIO_MASK_4)) & (0xFFFFFFFF ^ (ABORT_EMI)), DEVAPC0_D0_VIO_MASK_4); xlog_printk(ANDROID_LOG_ERROR, EMIMPU_TAG ,"EMI_DAPC Init done \n"); return 0; }
static void __gpt_set_clk(struct gpt_device *dev, unsigned int clksrc, unsigned int clkdiv) { unsigned int clk = (clksrc << GPT_CLKSRC_OFFSET) | clkdiv; mt_reg_sync_writel(clk, dev->base_addr + GPT_CLK); dev->clksrc = clksrc; dev->clkdiv = clkdiv; }
int MET_BM_SetEmiDcm(const unsigned int setting) { unsigned int value; value = readl(IOMEM(ADDR_EMI+EMI_CONM)); mt_reg_sync_writel((value & 0x00FFFFFF) | (setting << 24), ADDR_EMI+EMI_CONM); return BM_REQ_OK; }
/* * mt_devapc_set_permission: set module permission on device apc. * @module: the moudle to specify permission * @domain_num: domain index number * @permission_control: specified permission * no return value. */ void mt_devapc_set_permission(unsigned int module, E_MASK_DOM domain_num, APC_ATTR permission_control) { unsigned long irq_flag; volatile unsigned int* base; unsigned int clr_bit = 0x3 << ((module % 16) * 2); unsigned int set_bit = permission_control << ((module % 16) * 2); if( module >= DEVAPC_DEVICE_NUMBER ) { printk(KERN_WARNING "[DEVAPC] ERROR, device number %d exceeds the max number!\n", module); return; } if (DEVAPC_DOMAIN_AP == domain_num) { base = DEVAPC0_D0_APC_0 + (module / 16) * 4; } else if (DEVAPC_DOMAIN_MD == domain_num) { base = DEVAPC0_D1_APC_0 + (module / 16) * 4; } else if (DEVAPC_DOMAIN_CONN == domain_num) { base = DEVAPC0_D2_APC_0 + (module / 16) * 4; } else if (DEVAPC_DOMAIN_MM == domain_num) { base = DEVAPC0_D3_APC_0 + (module / 16) * 4; } else { printk(KERN_WARNING "[DEVAPC] ERROR, domain number %d exceeds the max number!\n", domain_num); return; } spin_lock_irqsave(&g_mt_devapc_lock, irq_flag); mt_reg_sync_writel(readl(base) & ~clr_bit, base); mt_reg_sync_writel(readl(base) | set_bit, base); spin_unlock_irqrestore(&g_mt_devapc_lock, irq_flag); return; }
void __init mt_smp_prepare_cpus(unsigned int max_cpus) { #if !defined (CONFIG_ARM_PSCI) /* * 20140512 marc.huang * 1. only need to get core count if !defined(CONFIG_OF) * 2. only set possible cpumask in mt_smp_init_cpus() if !defined(CONFIG_OF) * 3. only set present cpumask in mt_smp_prepare_cpus() if !defined(CONFIG_OF) */ #if !defined(CONFIG_OF) int i; for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); #endif //#if !defined(CONFIG_OF) #ifdef CONFIG_MTK_FPGA /* write the address of slave startup into the system-wide flags register */ mt_reg_sync_writel(virt_to_phys(mt_secondary_startup), SLAVE_JUMP_REG); #endif /* Set all cpus into AArch32 */ mcusys_smc_write(MP0_MISC_CONFIG3, REG_READ(MP0_MISC_CONFIG3) & 0xFFFF0FFF); // mcusys_smc_write(MP1_MISC_CONFIG3, REG_READ(MP1_MISC_CONFIG3) & 0xFFFF0FFF); //#ifndef CONFIG_MTK_FPGA /* enable bootrom power down mode */ REG_WRITE(BOOTROM_SEC_CTRL, REG_READ(BOOTROM_SEC_CTRL) | SW_ROM_PD); //#endif /* write the address of slave startup into boot address register for bootrom power down mode */ #if defined (MT_SMP_VIRTUAL_BOOT_ADDR) mt_reg_sync_writel(virt_to_phys(mt_smp_boot), BOOTROM_BOOT_ADDR); #else mt_reg_sync_writel(virt_to_phys(mt_secondary_startup), BOOTROM_BOOT_ADDR); #endif #endif //#if !defined (CONFIG_ARM_PSCI) /* initial spm_mtcmos memory map */ spm_mtcmos_cpu_init(); }
void SetInfraCfg(uint32 offset, uint32 value, uint32 mask) { volatile long address = (long)((char *)AFE_INFRA_ADDRESS + offset); volatile uint32 *AFE_Register = (volatile uint32 *)address; volatile uint32 val_tmp; //printk("SetInfraCfg offset=%x, value=%x, mask=%x \n",offset,value,mask); val_tmp = GetInfraCfg(offset); val_tmp &= (~mask); val_tmp |= (value & mask); mt_reg_sync_writel(val_tmp, AFE_Register); }
void MET_BM_SetReadWriteType(const unsigned int ReadWriteType) { const unsigned int value = readl(IOMEM(ADDR_EMI+EMI_BMEN)); /* * ReadWriteType: 00/11 --> both R/W * 01 --> only R * 10 --> only W */ mt_reg_sync_writel((value & 0xFFFFFFCF) | (ReadWriteType << 4), ADDR_EMI+EMI_BMEN); }
void SetpllCfg(uint32 offset, uint32 value, uint32 mask) { volatile long address = (long)(APLL_BASE + offset); volatile uint32 *AFE_Register = (volatile uint32 *)address; volatile uint32 val_tmp; //printk("SetpllCfg offset=%x, value=%x, mask=%x \n",offset,value,mask); val_tmp = GetpllCfg(offset); val_tmp &= (~mask); val_tmp |= (value & mask); mt_reg_sync_writel(val_tmp, AFE_Register); }
void SetApmixedCfg(uint32 offset, uint32 value, uint32 mask) { volatile long address = (long)((char *)APMIXEDSYS_ADDRESS + offset); volatile uint32 *AFE_Register = (volatile uint32 *)address; volatile uint32 val_tmp; /* pr_debug("SetpllCfg offset=%x, value=%x, mask=%x\n",offset,value,mask); */ val_tmp = GetApmixedCfg(offset); val_tmp &= (~mask); val_tmp |= (value & mask); mt_reg_sync_writel(val_tmp, AFE_Register); }
/* * mt_hard_reset_gdma: hard reset the specified GDMA channel * @channel: GDMA channel to hard reset * Return 0 for success; return negative errot code for failure. */ int mt_hard_reset_gdma(int channel) { if (channel < GDMA_START) { return -DMA_ERR_INVALID_CH; } if (channel >= (GDMA_START + NR_GDMA_CHANNEL)) { return -DMA_ERR_INVALID_CH; } if (dma_ctrl[channel].in_use == 0) { return -DMA_ERR_CH_FREE; } printk(KERN_ERR "GDMA_%d Hard Reset !!\n", channel); mt_reg_sync_writel(DMA_HARD_RST_BIT, DMA_RESET); mt_reg_sync_writel(DMA_HARD_RST_CLR_BIT, DMA_RESET); return 0; }
/* * mt_stop_gdma: stop the DMA stransfer for the specified GDMA channel * @channel: GDMA channel to stop * Return 0 for success; return negative errot code for failure. */ int mt_stop_gdma(int channel) { if (channel < GDMA_START) { return -DMA_ERR_INVALID_CH; } if (channel >= (GDMA_START + NR_GDMA_CHANNEL)) { return -DMA_ERR_INVALID_CH; } if (dma_ctrl[channel].in_use == 0) { return -DMA_ERR_CH_FREE; } mt_reg_sync_writel(DMA_FLUSH_BIT, DMA_FLUSH); while (readl(DMA_START)); mt_reg_sync_writel(DMA_FLUSH_CLR_BIT, DMA_FLUSH); mt_reg_sync_writel(DMA_INT_FLAG_CLR_BIT, DMA_INT_FLAG); return 0; }
int gpt_check_and_ack_irq(unsigned int id) { unsigned int mask = 0x1 << id; unsigned int status = __raw_readl(GPT_IRQSTA); if (status & mask) { mt_reg_sync_writel(mask, GPT_IRQACK); return 1; } else { return 0; } }