bool CPTLogEx(CPT_Event event, CPT_LogType type, unsigned int data1, unsigned int data2) { if(!gbInit) { return true; } switch(type){ case CPTFlagStart: MMProfileLogEx(gMMPEvent[event], MMProfileFlagStart, data1, data2); break; case CPTFlagEnd: MMProfileLogEx(gMMPEvent[event], MMProfileFlagEnd, data1, data2); break; case CPTFlagPulse: MMProfileLogEx(gMMPEvent[event], MMProfileFlagPulse, data1, data2); break; case CPTFlagSeparator: MMProfileLogEx(gMMPEvent[event], MMProfileFlagEventSeparator, data1, data2); break; default: break; } return true; }
void dprec_to_mmp(unsigned int type_logsrc, MMP_LogType mmp_log, unsigned int data1, unsigned data2) { int MMP_Event = dprec_mmp_event_spy(type_logsrc); if(MMP_Event < 0xffff) MMProfileLogEx(MMP_Event, mmp_log, data1, data2); }
/* set ovl1 status */ void ovl_set_status(DISP_OVL1_STATUS status) { DDPMSG("cascade, set_ovl1 from %s to %s!\n", ovl_get_status_name(ovl1_status), ovl_get_status_name(status)); MMProfileLogEx(ddp_mmp_get_events()->ovl1_status, MMProfileFlagPulse, ovl1_status, status); ovl1_status = status; /* atomic operation */ }
int ext_disp_trigger(int blocking, void *callback, unsigned int userdata) { int ret = 0; EXT_DISP_FUNC(); if((is_hdmi_active() == false)|| pgc->state == EXTD_DEINIT || pgc->state == EXTD_SUSPEND || pgc->need_trigger_overlay < 1) { EXT_DISP_LOG("trigger ext display is already sleeped\n"); MMProfileLogEx(ddp_mmp_get_events()->Extd_ErrorInfo, MMProfileFlagPulse, Trigger, 0); return -1; } _ext_disp_path_lock(); if(_should_trigger_interface()) { _trigger_display_interface(blocking, callback, userdata); } else { _trigger_overlay_engine(); } pgc->state = EXTD_RESUME; _ext_disp_path_unlock(); EXT_DISP_LOG("ext_disp_trigger done \n"); return ret; }
static irqreturn_t _DPI_InterruptHandler(int irq, void *dev_id) { static int counter = 0; DPI_REG_INTERRUPT status = DPI_REG->INT_STATUS; MMProfileLogEx(DDP_MMP_Events.ROT_IRQ, MMProfileFlagPulse, AS_UINT32(&status), 0); // if (status.FIFO_EMPTY) ++ counter; OUTREG32(&DPI_REG->INT_STATUS, 0); if(status.VSYNC) { if(dpiIntCallback) dpiIntCallback(DISP_DPI_VSYNC_INT); #ifndef BUILD_UBOOT if(wait_dpi_vsync){ if(-1 != hrtimer_try_to_cancel(&hrtimer_vsync_dpi)){ dpi_vsync = true; // hrtimer_try_to_cancel(&hrtimer_vsync_dpi); wake_up_interruptible(&_vsync_wait_queue_dpi); } } #endif } if (status.VSYNC && counter) { DISP_LOG_PRINT(ANDROID_LOG_ERROR, "DPI", "[Error] DPI FIFO is empty, " "received %d times interrupt !!!\n", counter); counter = 0; } _DPI_LogRefreshRate(status); OUTREG32(&DPI_REG->INT_STATUS, 0); return IRQ_HANDLED; }
static inline int soidle_handler(int cpu) { if (idle_switch[IDLE_TYPE_SO]) { #ifdef SPM_SODI_PROFILE_TIME gpt_get_cnt(SPM_SODI_PROFILE_APXGPT,&soidle_profile[0]); #endif if (soidle_can_enter(cpu)) { soidle_pre_handler(); #ifdef DEFAULT_MMP_ENABLE MMProfileLogEx(sodi_mmp_get_events()->sodi_enable, MMProfileFlagStart, 0, 0); #endif //DEFAULT_MMP_ENABLE spm_go_to_sodi(slp_spm_SODI_flags, 0); #ifdef DEFAULT_MMP_ENABLE MMProfileLogEx(sodi_mmp_get_events()->sodi_enable, MMProfileFlagEnd, 0, spm_read(SPM_PCM_PASR_DPD_3)); #endif //DEFAULT_MMP_ENABLE soidle_post_handler(); #if 0 //removed unused log #ifdef CONFIG_SMP idle_ver("SO:timer_left=%d, timer_left2=%d, delta=%d\n", soidle_timer_left, soidle_timer_left2, soidle_timer_left-soidle_timer_left2); #else idle_ver("SO:timer_left=%d, timer_left2=%d, delta=%d,timeout val=%d\n", soidle_timer_left, soidle_timer_left2, soidle_timer_left2-soidle_timer_left,soidle_timer_cmp-soidle_timer_left); #endif #endif #if 0 //for DVT test only idle_switch[IDLE_TYPE_SO] = 0; #endif #ifdef SPM_SODI_PROFILE_TIME gpt_get_cnt(SPM_SODI_PROFILE_APXGPT,&soidle_profile[3]); idle_ver("SODI: cpu_freq:%u, 1=>2:%u, 2=>3:%u, 3=>4:%u\n", mt_cpufreq_get_cur_freq(0), soidle_profile[1]-soidle_profile[0], soidle_profile[2]-soidle_profile[1], soidle_profile[3]-soidle_profile[2]); #endif return 1; } } return 0; }
void dprec_reg_op(void* cmdq, unsigned int reg, unsigned int val, unsigned int mask) { int len = 0; if(!cmdq && !in_interrupt()) MMProfileLogEx(ddp_mmp_get_events()->dprec_cpu_write_reg, MMProfileFlagPulse, reg, val); if(cmdq) { if(mask) { DISPPR_HWOP("%s/0x%08x/0x%08x=0x%08x&0x%08x\n", _find_module_by_reg_addr(reg), (unsigned int)cmdq, reg, val ,mask); } else { DISPPR_HWOP("%s/0x%08x/0x%08x=0x%08x\n", _find_module_by_reg_addr(reg), (unsigned int)cmdq, reg, val); } } else { if(!in_interrupt()) { if(mask) { DISPDBG("%s/%08x=%08x&%08x\n", _find_module_by_reg_addr(reg), reg, val ,mask); } else { DISPDBG("%s/%08x=%08x\n", _find_module_by_reg_addr(reg), reg, val); } } } if(_control.overall_switch == 0) { return; } len += scnprintf(dprec_string_buffer+len, dprec_string_max_length - len, "[DPREC]"); len += scnprintf(dprec_string_buffer+len, dprec_string_max_length - len, "[%s]", _find_module_by_reg_addr(reg)); len += scnprintf(dprec_string_buffer+len, dprec_string_max_length - len, "[%s]", cmdq?"CMDQ":"CPU"); if(cmdq) len += scnprintf(dprec_string_buffer+len, dprec_string_max_length - len, "[0x%08x]", cmdq); len += scnprintf(dprec_string_buffer+len, dprec_string_max_length - len, "0x%08x=0x%08x", reg, val); if(mask) len += scnprintf(dprec_string_buffer+len, dprec_string_max_length - len, "&0x%08x", mask); len += scnprintf(dprec_string_buffer+len, dprec_string_max_length - len, "\n"); printk(dprec_string_buffer); if(_control.cmm_dump) { printk("[CMM]D.S SD:0x%08x %\LE %\LONG 0x%08x; write %s\n", (_control.cmm_dump_use_va)?reg:(reg&0x1fffffff), mask?(val|mask):val,_find_module_by_reg_addr(reg)); }
AutoCPTLog::AutoCPTLog(CPT_Event event, unsigned int data1, unsigned int data2) { mEvent = event; mData1 = data1; mData2 = data2; if(!gbInit) { return; } MMProfileLogEx(gMMPEvent[mEvent], MMProfileFlagStart, mData1, mData2); }
static inline int soidle_handler(int cpu) { if (idle_switch[IDLE_TYPE_SO]) { if (soidle_can_enter(cpu)) { soidle_pre_handler(); #ifdef DEFAULT_MMP_ENABLE MMProfileLogEx(sodi_mmp_get_events()->sodi_enable, MMProfileFlagStart, 0, 0); #endif //DEFAULT_MMP_ENABLE spm_go_to_sodi(slp_spm_SODI_flags, 0); #ifdef DEFAULT_MMP_ENABLE MMProfileLogEx(sodi_mmp_get_events()->sodi_enable, MMProfileFlagEnd, 0, spm_read(SPM_PCM_PASR_DPD_3)); #endif //DEFAULT_MMP_ENABLE soidle_post_handler(); #if 0 //removed unused log #ifdef CONFIG_SMP idle_ver("SO:timer_left=%d, timer_left2=%d, delta=%d\n", soidle_timer_left, soidle_timer_left2, soidle_timer_left-soidle_timer_left2); #else idle_ver("SO:timer_left=%d, timer_left2=%d, delta=%d,timeout val=%d\n", soidle_timer_left, soidle_timer_left2, soidle_timer_left2-soidle_timer_left,soidle_timer_cmp-soidle_timer_left); #endif #endif #if 0 //for DVT test only idle_switch[IDLE_TYPE_SO] = 0; #endif return 1; } } return 0; }
void dprec_done(dprec_logger_event *event, unsigned int val1, unsigned int val2) { if(event) { if(event->level & DPREC_LOGGER_LEVEL_MMP) { MMProfileLogEx(event->mmp, MMProfileFlagEnd, val1, val2); } if(event->level & DPREC_LOGGER_LEVEL_LOGGER) { unsigned long flags = 0; spin_lock_irqsave(&gdprec_logger_spinlock, flags); dprec_logger *l = &(event->logger); unsigned long long time = get_current_time_us(); if(l->ts_start != 0) { l->period_frame = time- l->ts_trigger; if(l->period_frame > l->period_max_frame) l->period_max_frame = l->period_frame; if(l->period_frame < l->period_min_frame) l->period_min_frame = l->period_frame; l->ts_trigger = 0; l->period_total += l->period_frame; l->count ++; } spin_unlock_irqrestore(&gdprec_logger_spinlock, flags); } if(event->level & DPREC_LOGGER_LEVEL_MOBILE_LOG) { //xlog_printk(ANDROID_LOG_DEBUG,"DPREC"LOG_TAG, "DISP/"LOG_TAG,"done,0x%08x,0x%08x\n",event->name, val1, val2); } if(event->level & DPREC_LOGGER_LEVEL_UART_LOG) { printk("DISP/%s done,0x%08x,0x%08x\n",event->name, val1, val2); } #ifdef CONFIG_TRACING if(event->level & DPREC_LOGGER_LEVEL_SYSTRACE && _control.systrace) { mmp_kernel_trace_end(); //trace_printk("E|%s\n", event->name); } #endif } }
void dprec_start(dprec_logger_event *event, unsigned int val1, unsigned int val2) { if(event) { if(event->level & DPREC_LOGGER_LEVEL_MMP) { MMProfileLogEx(event->mmp, MMProfileFlagStart, val1, val2); } if(event->level & DPREC_LOGGER_LEVEL_LOGGER) { unsigned long flags = 0; spin_lock_irqsave(&gdprec_logger_spinlock, flags); dprec_logger *l = &(event->logger); unsigned long long time = get_current_time_us(); if(l->count == 0) { l->ts_start = time; l->period_min_frame = 0xffffffffffffffff; } l->ts_trigger = time; spin_unlock_irqrestore(&gdprec_logger_spinlock, flags); } if(event->level & DPREC_LOGGER_LEVEL_MOBILE_LOG) { //xlog_printk(ANDROID_LOG_DEBUG, "DPREC/"LOG_TAG, "DISP/%s start,0x%08x,0x%08x\n",event->name, val1, val2); } if(event->level & DPREC_LOGGER_LEVEL_UART_LOG) { printk("DISP/%s start,0x%08x,0x%08x\n",event->name, val1, val2); } #ifdef CONFIG_TRACING if(event->level & DPREC_LOGGER_LEVEL_SYSTRACE && _control.systrace) { char name[256]; scnprintf(name, sizeof(name)/sizeof(name[0]), "K_%s_0x%x_0x%x", event->name, val1, val2); mmp_kernel_trace_begin(name); //trace_printk("B|%d|%s\n", current->pid, event->name); } #endif } }
void dprec_submit(dprec_logger_event *event, unsigned int val1, unsigned int val2) { if(event) { if(event->level & DPREC_LOGGER_LEVEL_MMP) { MMProfileLogEx(event->mmp, MMProfileFlagPulse, val1, val2); } if(event->level & DPREC_LOGGER_LEVEL_LOGGER) { } if(event->level & DPREC_LOGGER_LEVEL_MOBILE_LOG) { //xlog_printk(ANDROID_LOG_DEBUG, "DPREC/"LOG_TAG,"%s trigger,0x%08x,0x%08x\n",event->name, val1, val2); } if(event->level & DPREC_LOGGER_LEVEL_UART_LOG) { printk("DISP/%s trigger,0x%08x,0x%08x\n",event->name, val1, val2); } } }
int __m4u_get_user_pages(int eModuleID, struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { int i; unsigned long vm_flags; int trycnt; if (nr_pages <= 0) return 0; //VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); if(!!pages != !!(gup_flags & FOLL_GET)) { M4UMSG(" error: __m4u_get_user_pages !!pages != !!(gup_flags & FOLL_GET), pages=0x%x, gup_flags & FOLL_GET=0x%x \n", (unsigned int)pages, gup_flags & FOLL_GET); } /* * Require read or write permissions. * If FOLL_FORCE is set, we only require the "MAY" flags. */ vm_flags = (gup_flags & FOLL_WRITE) ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); vm_flags &= (gup_flags & FOLL_FORCE) ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); i = 0; M4UDBG("Trying to get_user_pages from start vaddr 0x%08x with %d pages\n", start, nr_pages); do { struct vm_area_struct *vma; M4UDBG("For a new vma area from 0x%08x\n", start); vma = find_extend_vma(mm, start); if (!vma) { M4UMSG("error: the vma is not found, start=0x%x, module=%d \n", (unsigned int)start, eModuleID); return i ? i : -EFAULT; } if( ((~vma->vm_flags) & (VM_IO|VM_PFNMAP|VM_SHARED|VM_WRITE)) == 0 ) { M4UMSG("error: m4u_get_pages(): bypass pmem garbage pages! vma->vm_flags=0x%x, start=0x%x, module=%d \n", (unsigned int)(vma->vm_flags), (unsigned int)start, eModuleID); return i ? i : -EFAULT;; } if(vma->vm_flags & VM_IO) { M4UDBG("warning: vma is marked as VM_IO \n"); } if(vma->vm_flags & VM_PFNMAP) { M4UMSG("error: vma permission is not correct, vma->vm_flags=0x%x, start=0x%x, module=%d \n", (unsigned int)(vma->vm_flags), (unsigned int)start, eModuleID); M4UMSG("hint: maybe the memory is remapped with un-permitted vma->vm_flags! \n"); //m4u_dump_maps(start); return i ? i : -EFAULT;; } if(!(vm_flags & vma->vm_flags)) { M4UMSG("error: vm_flags invalid, vm_flags=0x%x, vma->vm_flags=0x%x, start=0x%x, module=%d \n", (unsigned int)vm_flags, (unsigned int)(vma->vm_flags), (unsigned int)start, eModuleID); //m4u_dump_maps(start); return i ? : -EFAULT; } do { struct page *page; unsigned int foll_flags = gup_flags; /* * If we have a pending SIGKILL, don't keep faulting * pages and potentially allocating memory. */ if (unlikely(fatal_signal_pending(current))) return i ? i : -ERESTARTSYS; MMProfileLogEx(M4U_MMP_Events[PROFILE_FOLLOW_PAGE], MMProfileFlagStart, eModuleID, start&(~0xFFF)); page = follow_page(vma, start, foll_flags); MMProfileLogEx(M4U_MMP_Events[PROFILE_FOLLOW_PAGE], MMProfileFlagEnd, eModuleID, 0x1000); while (!page) { int ret; M4UDBG("Trying to allocate for %dth page(vaddr: 0x%08x)\n", i, start); MMProfileLogEx(M4U_MMP_Events[PROFILE_FORCE_PAGING], MMProfileFlagStart, eModuleID, start&(~0xFFF)); ret = handle_mm_fault(mm, vma, start, (foll_flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0); MMProfileLogEx(M4U_MMP_Events[PROFILE_FORCE_PAGING], MMProfileFlagEnd, eModuleID, 0x1000); if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) { M4UMSG("handle_mm_fault() error: no memory, aaddr:0x%08lx (%d pages are allocated), module=%d\n", start, i, eModuleID); //m4u_dump_maps(start); return i ? i : -ENOMEM; } if (ret & (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS)) { M4UMSG("handle_mm_fault() error: invalide memory address, vaddr:0x%lx (%d pages are allocated), module=%d\n", start, i, eModuleID); //m4u_dump_maps(start); return i ? i : -EFAULT; } BUG(); } if (ret & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; /* * The VM_FAULT_WRITE bit tells us that * do_wp_page has broken COW when necessary, * even if maybe_mkwrite decided not to set * pte_write. We can thus safely do subsequent * page lookups as if they were reads. But only * do so when looping for pte_write is futile: * in some cases userspace may also be wanting * to write to the gotten user page, which a * read fault here might prevent (a readonly * page might get reCOWed by userspace write). */ if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) foll_flags &= ~FOLL_WRITE; MMProfileLogEx(M4U_MMP_Events[PROFILE_FOLLOW_PAGE], MMProfileFlagStart, eModuleID, start&(~0xFFF)); page = follow_page(vma, start, foll_flags); MMProfileLogEx(M4U_MMP_Events[PROFILE_FOLLOW_PAGE], MMProfileFlagEnd, eModuleID, 0x1000); } if (IS_ERR(page)) { M4UMSG("handle_mm_fault() error: faulty page is returned, vaddr:0x%lx (%d pages are allocated), module=%d \n", start, i, eModuleID); //m4u_dump_maps(start); return i ? i : PTR_ERR(page); } if (pages) { pages[i] = page; MMProfileLogEx(M4U_MMP_Events[PROFILE_MLOCK], MMProfileFlagStart, eModuleID, start&(~0xFFF)); /* Use retry version to guarantee it will succeed in getting the lock */ trycnt = 3000; do { if (trylock_page(page)) { mlock_vma_page(page); unlock_page(page); //make sure hw pte is not 0 { int i; for(i=0; i<3000; i++) { if(!m4u_user_v2p(start)) { handle_mm_fault(mm, vma, start, (foll_flags & FOLL_WRITE)? FAULT_FLAG_WRITE : 0); cond_resched(); } else break; } if(i==3000) M4UMSG("error: cannot handle_mm_fault to get hw pte: va=0x%x\n", start); } break; } } while (trycnt-- > 0); if(PageMlocked(page)==0) { M4UMSG("Can't mlock page\n"); dump_page(page); } else { unsigned int pfn = page_to_pfn(page); if(pfn < mlock_cnt_size) { pMlock_cnt[page_to_pfn(page)]++; } else { M4UERR("mlock_cnt_size is too small: pfn=%d, size=%d\n", pfn, mlock_cnt_size); } //M4UMSG("lock page:\n"); //dump_page(page); } MMProfileLogEx(M4U_MMP_Events[PROFILE_MLOCK], MMProfileFlagEnd, eModuleID, 0x1000); } if (vmas) vmas[i] = vma; i++; start += PAGE_SIZE; nr_pages--; } while (nr_pages && start < vma->vm_end); } while (nr_pages);
AutoCPTLog:: ~AutoCPTLog() { MMProfileLogEx(gMMPEvent[mEvent], MMProfileFlagEnd, mData1, mData2); }
///TODO: move each irq to module driver irqreturn_t disp_irq_handler(int irq, void *dev_id) { DISP_MODULE_ENUM module = DISP_MODULE_UNKNOWN; unsigned int reg_val = 0; unsigned int index = 0; unsigned int mutexID = 0; //MMProfileLogEx(ddp_mmp_get_events()->DDP_IRQ, MMProfileFlagStart, irq, 0); if(irq==dispsys_irq[DISP_REG_DSI0] || irq==dispsys_irq[DISP_REG_DSI1]) { index = (irq == dispsys_irq[DISP_REG_DSI0]) ? 0 : 1; module = (irq == dispsys_irq[DISP_REG_DSI0]) ? DISP_MODULE_DSI0 : DISP_MODULE_DSI1; reg_val = DISP_REG_GET(DDP_REG_BASE_DSI0+0xC + index * DISP_DSI_INDEX_OFFSET) & 0xff; DISP_CPU_REG_SET(DDP_REG_BASE_DSI0+0xC + index * DISP_DSI_INDEX_OFFSET, ~reg_val); DDPIRQ("IRQ: DSI%d 0x%x!\n", index, reg_val); //MMProfileLogEx(ddp_mmp_get_events()->DSI_IRQ[index], MMProfileFlagPulse, reg_val, 0); } else if(irq==dispsys_irq[DISP_REG_OVL0] || irq==dispsys_irq[DISP_REG_OVL1]) { index = (irq == dispsys_irq[DISP_REG_OVL0]) ? 0 : 1; module = (irq == dispsys_irq[DISP_REG_OVL0]) ? DISP_MODULE_OVL0 : DISP_MODULE_OVL1; reg_val = DISP_REG_GET(DISP_REG_OVL_INTSTA + index * DISP_OVL_INDEX_OFFSET); if(reg_val&(1<<1)) DDPIRQ("IRQ: OVL%d frame done!\n", index); if (reg_val & (1 << 2)) { DDPERR("IRQ: OVL%d frame underrun! cnt=%d\n", index, cnt_ovl_underflow[index]++); disp_irq_log_module |= 1<<module; } if (reg_val & (1 << 3)) { DDPIRQ("IRQ: OVL%d sw reset done\n", index); } if (reg_val & (1 << 4)) { DDPIRQ("IRQ: OVL%d hw reset done\n", index); } if (reg_val & (1 << 5)) { DDPERR("IRQ: OVL%d-RDMA0 not complete untill EOF!\n", index); disp_irq_log_module |= 1 << module; } if (reg_val & (1 << 6)) { DDPERR("IRQ: OVL%d-RDMA1 not complete untill EOF!\n", index); disp_irq_log_module |= 1 << module; } if (reg_val & (1 << 7)) { DDPERR("IRQ: OVL%d-RDMA2 not complete untill EOF!\n", index); disp_irq_log_module |= 1 << module; } if (reg_val & (1 << 8)) { DDPERR("IRQ: OVL%d-RDMA3 not complete untill EOF!\n", index); disp_irq_log_module |= 1 << module; } if (reg_val & (1 << 9)) { DDPERR("IRQ: OVL%d-RDMA0 fifo underflow!\n", index); disp_irq_log_module |= 1 << module; } if (reg_val & (1 << 10)) { DDPERR("IRQ: OVL%d-RDMA1 fifo underflow!\n", index); disp_irq_log_module |= 1 << module; } if (reg_val & (1 << 11)) { DDPERR("IRQ: OVL%d-RDMA2 fifo underflow!\n", index); disp_irq_log_module |= 1 << module; } if (reg_val & (1 << 12)) { DDPERR("IRQ: OVL%d-RDMA3 fifo underflow!\n", index); disp_irq_log_module |= 1 << module; } DISP_CPU_REG_SET(DISP_REG_OVL_INTSTA + index * DISP_OVL_INDEX_OFFSET, ~reg_val); MMProfileLogEx(ddp_mmp_get_events()->OVL_IRQ[index], MMProfileFlagPulse, reg_val, DISP_REG_GET(DISP_REG_OVL_INTSTA+index*DISP_OVL_INDEX_OFFSET)); } else if(irq==dispsys_irq[DISP_REG_WDMA0] || irq==dispsys_irq[DISP_REG_WDMA1]) { index = (irq==dispsys_irq[DISP_REG_WDMA0]) ? 0 : 1; module =(irq==dispsys_irq[DISP_REG_WDMA0]) ? DISP_MODULE_WDMA0 : DISP_MODULE_WDMA1; reg_val = DISP_REG_GET(DISP_REG_WDMA_INTSTA+index*DISP_WDMA_INDEX_OFFSET); if (reg_val & (1 << 0)) { DDPIRQ("IRQ: WDMA%d frame done!\n", index); } if (reg_val & (1 << 1)) { DDPERR("IRQ: WDMA%d underrun! cnt=%d\n", index, cnt_wdma_underflow[index]++); disp_irq_log_module |= 1 << module; } DISP_CPU_REG_SET(DISP_REG_WDMA_INTSTA + index * DISP_WDMA_INDEX_OFFSET, ~reg_val); MMProfileLogEx(ddp_mmp_get_events()->WDMA_IRQ[index], MMProfileFlagPulse, reg_val, DISP_REG_GET(DISP_REG_WDMA_CLIP_SIZE)); } else if(irq==dispsys_irq[DISP_REG_RDMA0] || irq==dispsys_irq[DISP_REG_RDMA1] || irq==dispsys_irq[DISP_REG_RDMA2] ) { if(dispsys_irq[DISP_REG_RDMA0]==irq) { index = 0; module = DISP_MODULE_RDMA0; } else if(dispsys_irq[DISP_REG_RDMA1]==irq) { index = 1; module = DISP_MODULE_RDMA1; } else if (dispsys_irq[DISP_REG_RDMA2] == irq) { index = 2; module = DISP_MODULE_RDMA2; } reg_val = DISP_REG_GET(DISP_REG_RDMA_INT_STATUS+index*DISP_RDMA_INDEX_OFFSET); if (reg_val & (1 << 0)) { DDPIRQ("IRQ: RDMA%d reg update done!\n", index); } /* deal with end first */ if (reg_val & (1 << 2)) { MMProfileLogEx(ddp_mmp_get_events()->SCREEN_UPDATE[index], MMProfileFlagEnd, reg_val, 0); rdma_end_time[index] = sched_clock(); DDPIRQ("IRQ: RDMA%d frame done!\n", index); } if (reg_val & (1 << 1)) { MMProfileLogEx(ddp_mmp_get_events()->SCREEN_UPDATE[index], MMProfileFlagStart, reg_val, 0); MMProfileLogEx(ddp_mmp_get_events()->layer[0], MMProfileFlagPulse, DISP_REG_GET(DISP_REG_OVL_L0_ADDR), DISP_REG_GET(DISP_REG_OVL_SRC_CON) & 0x1); MMProfileLogEx(ddp_mmp_get_events()->layer[1], MMProfileFlagPulse, DISP_REG_GET(DISP_REG_OVL_L1_ADDR), DISP_REG_GET(DISP_REG_OVL_SRC_CON) & 0x2); MMProfileLogEx(ddp_mmp_get_events()->layer[2], MMProfileFlagPulse, DISP_REG_GET(DISP_REG_OVL_L2_ADDR), DISP_REG_GET(DISP_REG_OVL_SRC_CON) & 0x4); MMProfileLogEx(ddp_mmp_get_events()->layer[3], MMProfileFlagPulse, DISP_REG_GET(DISP_REG_OVL_L3_ADDR), DISP_REG_GET(DISP_REG_OVL_SRC_CON) & 0x8); rdma_start_time[index] = sched_clock(); DDPIRQ("IRQ: RDMA%d frame start!\n", index); } if (reg_val & (1 << 3)) { DDPERR("IRQ: RDMA%d abnormal! cnt=%d\n", index, cnt_rdma_abnormal[index]++); disp_irq_log_module |= 1 << module; MMProfileLogEx(ddp_mmp_get_events()->SCREEN_UPDATE[index], MMProfileFlagPulse, reg_val, 0); } if (reg_val & (1 << 4)) { MMProfileLogEx(ddp_mmp_get_events()->rdma_underflow, MMProfileFlagPulse,cnt_rdma_underflow, 0); MMProfileLogEx(ddp_mmp_get_events()->SCREEN_UPDATE[index], MMProfileFlagPulse, reg_val, 0); DDPERR("IRQ: RDMA%d underflow! cnt=%d dsi0_cur(%d,%d)\n", index, cnt_rdma_underflow[index]++, DISP_REG_GET(DDP_REG_BASE_DSI0+0x168), DISP_REG_GET(DDP_REG_BASE_DSI0+0x16C)); disp_irq_log_module |= module; } if (reg_val & (1 << 5)) { DDPIRQ("IRQ: RDMA%d target line!\n", index); } /* clear intr */ DISP_CPU_REG_SET(DISP_REG_RDMA_INT_STATUS + index * DISP_RDMA_INDEX_OFFSET, ~reg_val); } else if(irq==dispsys_irq[DISP_REG_COLOR0] || irq==dispsys_irq[DISP_REG_COLOR1]) { index = (irq == dispsys_irq[DISP_REG_COLOR0]) ? 0 : 1; module = (irq == dispsys_irq[DISP_REG_COLOR0]) ? DISP_MODULE_COLOR0 : DISP_MODULE_COLOR1; reg_val = 0; } else if(irq==dispsys_irq[DISP_REG_MM_MUTEX]) { module = DISP_MODULE_MUTEX; reg_val = DISP_REG_GET(DISP_REG_CONFIG_MUTEX_INTSTA) & 0x7C1F; for (mutexID = 0; mutexID < 5; mutexID++) { if (reg_val & (0x1 << mutexID)) { DDPIRQ("IRQ: mutex%d sof!\n", mutexID); MMProfileLogEx(ddp_mmp_get_events()->MUTEX_IRQ[mutexID], MMProfileFlagPulse, reg_val, 0); } } if (reg_val & (0x1 << (mutexID + DISP_MUTEX_TOTAL))) { DDPIRQ("IRQ: mutex%d eof!\n", mutexID); MMProfileLogEx(ddp_mmp_get_events()->MUTEX_IRQ[mutexID], MMProfileFlagPulse, reg_val, 1); } DISP_CPU_REG_SET(DISP_REG_CONFIG_MUTEX_INTSTA, ~reg_val); } else if(irq==dispsys_irq[DISP_REG_AAL]) { module = DISP_MODULE_AAL; reg_val = DISP_REG_GET(DISP_AAL_INTSTA); disp_aal_on_end_of_frame(); } else { module = DISP_MODULE_UNKNOWN; reg_val = 0; DDPERR("invalid irq=%d\n ", irq); } disp_invoke_irq_callbacks(module, reg_val); if (disp_irq_log_module != 0) { wake_up_interruptible(&disp_irq_log_wq); } //MMProfileLogEx(ddp_mmp_get_events()->DDP_IRQ, MMProfileFlagEnd, irq, reg_val); return IRQ_HANDLED; }
void ddp_mmp_ovl_layer(OVL_CONFIG_STRUCT* pLayer,unsigned int down_sample_x,unsigned int down_sample_y,unsigned int session/*1:primary, 2:external, 3:memory*/) { MMP_MetaDataBitmap_t Bitmap; MMP_MetaData_t meta; int raw = 0; if(session == 1) MMProfileLogEx(DDP_MMP_Events.layer_dump_parent,MMProfileFlagStart, pLayer->layer, pLayer->layer_en); else if(session == 2) MMProfileLogEx(DDP_MMP_Events.Extd_layer_dump_parent,MMProfileFlagStart, pLayer->layer, pLayer->layer_en); if (pLayer->layer_en) { Bitmap.data1 = pLayer->vaddr; Bitmap.width = pLayer->dst_w; Bitmap.height = pLayer->dst_h; switch (pLayer->fmt) { case eRGB565: case eBGR565: Bitmap.format = MMProfileBitmapRGB565; Bitmap.bpp = 16; break; case eRGB888: Bitmap.format = MMProfileBitmapRGB888; Bitmap.bpp = 24; break; case eBGRA8888: Bitmap.format = MMProfileBitmapBGRA8888; Bitmap.bpp = 32; break; case eBGR888: Bitmap.format = MMProfileBitmapBGR888; Bitmap.bpp = 24; break; case eRGBA8888: Bitmap.format = MMProfileBitmapRGBA8888; Bitmap.bpp = 32; break; default: DDPERR("ddp_mmp_ovl_layer(), unknow fmt=%d, dump raw\n", pLayer->fmt); raw = 1; } if(!raw) { Bitmap.start_pos =0; Bitmap.pitch = pLayer->src_pitch; Bitmap.data_size = Bitmap.pitch * Bitmap.height; Bitmap.down_sample_x = down_sample_x; Bitmap.down_sample_y = down_sample_y; if (m4u_mva_map_kernel(pLayer->addr, Bitmap.data_size,(unsigned long*)&Bitmap.pData, &Bitmap.data_size)==0) { if(session == 1) MMProfileLogMetaBitmap(DDP_MMP_Events.layer_dump[pLayer->layer], MMProfileFlagPulse, &Bitmap); else if(session == 2) MMProfileLogMetaBitmap(DDP_MMP_Events.ovl1layer_dump[pLayer->layer], MMProfileFlagPulse, &Bitmap); m4u_mva_unmap_kernel(pLayer->addr, Bitmap.data_size, (unsigned long)Bitmap.pData); } else { DDPERR("ddp_mmp_ovl_layer(),fail to dump rgb(0x%x)\n", pLayer->fmt); } } else { meta.data_type = MMProfileMetaRaw; meta.size = pLayer->src_pitch * pLayer->src_h; if(m4u_mva_map_kernel(pLayer->addr, meta.size, (unsigned long*)&meta.pData, &meta.size)==0) { if(session == 1) MMProfileLogMeta(DDP_MMP_Events.layer_dump[pLayer->layer], MMProfileFlagPulse, &meta); else if(session == 2) MMProfileLogMeta(DDP_MMP_Events.ovl1layer_dump[pLayer->layer], MMProfileFlagPulse, &meta); m4u_mva_unmap_kernel(pLayer->addr, meta.size, (unsigned long)meta.pData); } else { DDPERR("ddp_mmp_ovl_layer(),fail to dump raw(0x%x)\n", pLayer->fmt); } } } if(session == 1) MMProfileLogEx(DDP_MMP_Events.layer_dump_parent,MMProfileFlagEnd, pLayer->fmt, pLayer->addr); else if(session == 2) MMProfileLogEx(DDP_MMP_Events.Extd_layer_dump_parent,MMProfileFlagEnd, pLayer->fmt, pLayer->addr); return ; }
static int _trigger_display_interface(int blocking, void *callback, unsigned int userdata) { ///EXT_DISP_FUNC(); int i = 0; bool reg_flush = false; if(_should_wait_path_idle()) { dpmgr_wait_event_timeout(pgc->dpmgr_handle, DISP_PATH_EVENT_FRAME_DONE, HZ/2); } if(_should_update_lcm()) { extd_drv_update(pgc->plcm, 0, 0, pgc->plcm->params->width, pgc->plcm->params->height, 0); } if(_should_start_path()) { reg_flush = true; dpmgr_path_start(pgc->dpmgr_handle, ext_disp_cmdq_enabled()); MMProfileLogEx(ddp_mmp_get_events()->Extd_State, MMProfileFlagPulse, Trigger, 1); } if(_should_trigger_path()) { // trigger_loop_handle is used only for build trigger loop, which should always be NULL for config thread dpmgr_path_trigger(pgc->dpmgr_handle, NULL, ext_disp_cmdq_enabled()); } if(_should_set_cmdq_dirty()) { _cmdq_set_config_handle_dirty(); } ///if(reg_flush == false) { #if 0 if(reg_flush == false) { if(_should_insert_wait_frame_done_token()) { _cmdq_insert_wait_frame_done_token(); } } if(_should_flush_cmdq_config_handle()) { _cmdq_flush_config_handle(reg_flush); } if(_should_reset_cmdq_config_handle()) { _cmdq_reset_config_handle(); } if(reg_flush == true) { if(_should_insert_wait_frame_done_token()) { _cmdq_insert_wait_frame_done_token(); } } ///cmdqRecDumpCommand(cmdqRecHandle handle) #else if(_should_flush_cmdq_config_handle()) { if(reg_flush) { MMProfileLogEx(ddp_mmp_get_events()->Extd_State, MMProfileFlagPulse, Trigger, 2); } if(_should_start_path()) { EXT_DISP_LOG("Wait Main Display Vsync\n"); disp_session_vsync_config vsync_config; primary_display_wait_for_vsync(&vsync_config); } _cmdq_flush_config_handle(blocking, callback, userdata); } if(_should_reset_cmdq_config_handle()) { _cmdq_reset_config_handle(); } if(_should_insert_wait_frame_done_token()) { _cmdq_insert_wait_frame_done_token(); } #endif } return 0; }
int ext_disp_config_input_multiple(ext_disp_input_config* input, int idx) { int ret = 0; int i=0; int layer =0; ///DISPFUNC(); disp_ddp_path_config *data_config; if((is_hdmi_active() == false) || (pgc->state != EXTD_RESUME) ) { DISPMSG("config ext disp is already sleeped\n"); MMProfileLogEx(ddp_mmp_get_events()->Extd_ErrorInfo, MMProfileFlagPulse, Config, idx ); return 0; } _ext_disp_path_lock(); // all dirty should be cleared in dpmgr_path_get_last_config() data_config = dpmgr_path_get_last_config(pgc->dpmgr_handle); data_config->dst_dirty = 0; data_config->ovl_dirty = 0; data_config->rdma_dirty = 0; data_config->wdma_dirty = 0; // hope we can use only 1 input struct for input config, just set layer number if(_should_config_ovl_input()) { for(i = 0;i<HW_OVERLAY_COUNT;i++) { ///dprec_logger_start(DPREC_LOGGER_PRIMARY_CONFIG, input->layer|(input->layer_en<<16), input->addr); if(input[i].dirty) { dprec_mmp_dump_ovl_layer(&(data_config->ovl_config[input[i].layer]), input[i].layer, 2); ret = _convert_disp_input_to_ovl(&(data_config->ovl_config[input[i].layer]), &input[i]); } /* else { data_config->ovl_config[input[i].layer].layer_en = input[i].layer_en; data_config->ovl_config[input[i].layer].layer = input[i].layer; } */ data_config->ovl_dirty = 1; ///dprec_logger_done(DPREC_LOGGER_PRIMARY_CONFIG, input->src_x, input->src_y); } } else { ret = _convert_disp_input_to_rdma(&(data_config->rdma_config), input); data_config->rdma_dirty= 1; } if(_should_wait_path_idle()) { dpmgr_wait_event_timeout(pgc->dpmgr_handle, DISP_PATH_EVENT_FRAME_DONE, HZ/2); } memcpy(&(data_config->dispif_config), &(extd_dpi_params.dispif_config), sizeof(LCM_PARAMS)); ret = dpmgr_path_config(pgc->dpmgr_handle, data_config, ext_disp_cmdq_enabled()? pgc->cmdq_handle_config : NULL); // this is used for decouple mode, to indicate whether we need to trigger ovl pgc->need_trigger_overlay = 1; _ext_disp_path_unlock(); DISPMSG("config_input_multiple idx %x -w %d, h %d\n", idx ,data_config->ovl_config[0].src_w, data_config->ovl_config[0].src_h); return ret; }
void update_frm_seq_info(unsigned int session_id, unsigned int addr, unsigned int addr_offset,unsigned int seq, DISP_FRM_SEQ_STATE state) { int i= 0; unsigned device_type = DISP_SESSION_TYPE(session_id); //printk("update_frm_seq_info, 0x%x/0x%x/0x%x/%d/%s/%d\n", session_id, addr, addr_offset, seq, get_frame_seq_state_string(state), frm_update_cnt); if(seq < 0 || session_id < 0 || state > FRM_RDMA0_EOF) return ; if(device_type > DISP_SESSION_MEMORY) { printk("seq_end session_id(0x%x) , seq(%d) \n",session_id, seq); return; } if(device_type == DISP_SESSION_PRIMARY || device_type == DISP_SESSION_PRIMARY - 1) { if(FRM_CONFIG == state) { frm_update_sequence[frm_update_cnt].state = state; frm_update_sequence[frm_update_cnt].session_type = device_type; frm_update_sequence[frm_update_cnt].mva= addr; frm_update_sequence[frm_update_cnt].max_offset= addr_offset; if(seq > 0) frm_update_sequence[frm_update_cnt].seq= seq; MMProfileLogEx(MTKFB_MMP_Events.primary_seq_config, MMProfileFlagPulse, addr, seq); } else if(FRM_TRIGGER == state) { frm_update_sequence[frm_update_cnt].state = FRM_TRIGGER; MMProfileLogEx(MTKFB_MMP_Events.primary_seq_trigger, MMProfileFlagPulse, addr, seq); dprec_logger_frame_seq_begin(device_type, frm_update_sequence[frm_update_cnt].seq); frm_update_cnt++; frm_update_cnt%=FRM_UPDATE_SEQ_CACHE_NUM; } else if(FRM_WDMA0_EOF == state) { for(i= 0; i< FRM_UPDATE_SEQ_CACHE_NUM; i++) { // if((absabs(addr -frm_update_sequence[i].mva) <= frm_update_sequence[i].max_offset) && (frm_update_sequence[i].state == FRM_TRIGGER)) if(frm_update_sequence[i].state == FRM_TRIGGER) { frm_update_sequence[i].state = FRM_WDMA0_EOF; frm_update_sequence[i].mva = addr; MMProfileLogEx(MTKFB_MMP_Events.primary_seq_wdma0_efo, MMProfileFlagPulse, frm_update_sequence[i].mva, frm_update_sequence[i].seq); ///break; } } } else if(FRM_RDMA0_SOF == state) { for(i= 0; i< FRM_UPDATE_SEQ_CACHE_NUM; i++) { if(FRM_WDMA0_EOF == frm_update_sequence[i].state && device_type == DISP_SESSION_PRIMARY && frm_update_sequence[i].mva == addr) { frm_update_sequence[i].state = FRM_RDMA0_SOF; dprec_logger_frame_seq_end(device_type, frm_update_sequence[i].seq); dprec_logger_frame_seq_begin(DISP_SESSION_PRIMARY-1, frm_update_sequence[i].seq); MMProfileLogEx(MTKFB_MMP_Events.primary_seq_rdma0_sof, MMProfileFlagPulse, frm_update_sequence[i].mva, frm_update_sequence[i].seq); } } } else if(FRM_RDMA0_EOF == state) { for(i= 0; i< FRM_UPDATE_SEQ_CACHE_NUM; i++) { if(FRM_RDMA0_SOF == frm_update_sequence[i].state && device_type == DISP_SESSION_PRIMARY && frm_update_sequence[i].mva == addr) { frm_update_sequence[i].state = FRM_RDMA0_EOF; dprec_logger_frame_seq_end(DISP_SESSION_PRIMARY-1, frm_update_sequence[i].seq ); MMProfileLogEx(MTKFB_MMP_Events.primary_seq_rdma0_eof, MMProfileFlagPulse, frm_update_sequence[i].mva, frm_update_sequence[i].seq); } } } } else if(device_type == DISP_SESSION_MEMORY) { if(FRM_CONFIG == state) { frm_update_sequence_mem[frm_update_cnt_mem].state = state; frm_update_sequence_mem[frm_update_cnt_mem].session_type = device_type; frm_update_sequence_mem[frm_update_cnt_mem].mva= addr; frm_update_sequence_mem[frm_update_cnt_mem].max_offset= addr_offset; if(seq > 0) frm_update_sequence_mem[frm_update_cnt_mem].seq= seq; MMProfileLogEx(MTKFB_MMP_Events.external_seq_config, MMProfileFlagPulse, addr, seq); } else if(FRM_TRIGGER == state) { frm_update_sequence_mem[frm_update_cnt_mem].state = FRM_TRIGGER; MMProfileLogEx(MTKFB_MMP_Events.external_seq_trigger, MMProfileFlagPulse, addr, seq); dprec_logger_frame_seq_begin(device_type, frm_update_sequence_mem[frm_update_cnt_mem].seq); frm_update_cnt_mem++; frm_update_cnt_mem%=FRM_UPDATE_SEQ_CACHE_NUM; } else if(FRM_WDMA0_EOF == state) { for(i= 0; i< FRM_UPDATE_SEQ_CACHE_NUM; i++) { // if((absabs(addr -frm_update_sequence_mem[i].mva) <= frm_update_sequence_mem[i].max_offset) && (frm_update_sequence_mem[i].state == FRM_TRIGGER)) if(frm_update_sequence_mem[i].state == FRM_TRIGGER) { MMProfileLogEx(MTKFB_MMP_Events.external_seq_wdma0_efo, MMProfileFlagPulse, frm_update_sequence_mem[i].mva, frm_update_sequence_mem[i].seq); frm_update_sequence_mem[i].state = FRM_WDMA0_EOF; frm_update_sequence_mem[i].mva = addr; dprec_logger_frame_seq_end(device_type, frm_update_sequence_mem[i].seq); ///break; } } } } }
irqreturn_t disp_irq_handler(int irq, void *dev_id) { DISP_MODULE_ENUM module = DISP_MODULE_UNKNOWN; unsigned long reg_val = 0; unsigned int index = 0; unsigned int mutexID = 0; unsigned long reg_temp_val = 0; DDPDBG("disp_irq_handler, irq=%d, module=%s \n", irq, disp_irq_module(irq)); MMProfileLogEx(ddp_mmp_get_events()->DDP_IRQ, MMProfileFlagStart, irq, 0); //switch(irq) { if(irq==dispsys_irq[DISP_REG_DSI0]) { module = DISP_MODULE_DSI0; reg_val = (DISP_REG_GET(dsi_reg_va + 0xC) & 0xff); if(atomic_read(&ESDCheck_byCPU) == 0) { reg_temp_val=reg_val&0xfffe;//rd_rdy don't clear and wait for ESD & Read LCM will clear the bit. DISP_CPU_REG_SET(dsi_reg_va + 0xC, ~reg_temp_val); } else { DISP_CPU_REG_SET(dsi_reg_va + 0xC, ~reg_val); } MMProfileLogEx(ddp_mmp_get_events()->DSI_IRQ[0], MMProfileFlagPulse, reg_val, 0); } else if(irq==dispsys_irq[DISP_REG_OVL0] || irq==dispsys_irq[DISP_REG_OVL1]) { index = (irq==dispsys_irq[DISP_REG_OVL0]) ? 0 : 1; module= (irq==dispsys_irq[DISP_REG_OVL0]) ? DISP_MODULE_OVL0 : DISP_MODULE_OVL1; reg_val = DISP_REG_GET(DISP_REG_OVL_INTSTA+index*DISP_OVL_INDEX_OFFSET); if(reg_val&(1<<1)) { DDPIRQ("IRQ: OVL%d frame done! \n",index); ovl_complete_irq_cnt[index]++; // update OVL addr { unsigned int i = 0; if(index==0) { for(i=0;i<4;i++) { if(DISP_REG_GET(DISP_REG_OVL_SRC_CON)&(0x1<<i)) MMProfileLogEx(ddp_mmp_get_events()->layer[i], MMProfileFlagPulse, DISP_REG_GET(DISP_REG_OVL_L0_ADDR+i*0x20), 0); } } if(index==1) { for(i=0;i<4;i++) { if(DISP_REG_GET(DISP_REG_OVL_SRC_CON+DISP_OVL_INDEX_OFFSET)&(0x1<<i)) MMProfileLogEx(ddp_mmp_get_events()->ovl1_layer[i], MMProfileFlagPulse, DISP_REG_GET(DISP_REG_OVL_L0_ADDR+DISP_OVL_INDEX_OFFSET+i*0x20), 0); } } } } if(reg_val&(1<<2)) { //DDPERR("IRQ: OVL%d frame underrun! cnt=%d \n",index, cnt_ovl_underflow[index]++); //disp_irq_log_module |= 1<<module; } if(reg_val&(1<<3)) { DDPIRQ("IRQ: OVL%d sw reset done\n",index); } if(reg_val&(1<<4)) { DDPIRQ("IRQ: OVL%d hw reset done\n",index); } if(reg_val&(1<<5)) { DDPERR("IRQ: OVL%d-L0 not complete untill EOF!\n",index); //disp_irq_log_module |= 1<<module; } if(reg_val&(1<<6)) { DDPERR("IRQ: OVL%d-L1 not complete untill EOF!\n",index); //disp_irq_log_module |= 1<<module; } if(reg_val&(1<<7)) { DDPERR("IRQ: OVL%d-L2 not complete untill EOF!\n",index); //disp_irq_log_module |= 1<<module; } if(reg_val&(1<<8)) { DDPERR("IRQ: OVL%d-L3 not complete untill EOF!\n",index); //disp_irq_log_module |= 1<<module; } if(reg_val&(1<<9)) { //DDPERR("IRQ: OVL%d-L0 fifo underflow!\n",index); //disp_irq_log_module |= 1<<module; } if(reg_val&(1<<10)) { //DDPERR("IRQ: OVL%d-L1 fifo underflow!\n",index); //disp_irq_log_module |= 1<<module; } if(reg_val&(1<<11)) { //DDPERR("IRQ: OVL%d-L2 fifo underflow!\n",index); //disp_irq_log_module |= 1<<module; } if(reg_val&(1<<12)) { //DDPERR("IRQ: OVL%d-L3 fifo underflow!\n",index); //disp_irq_log_module |= 1<<module; } //clear intr if(reg_val&(0xf<<5)) { ddp_dump_analysis(DISP_MODULE_CONFIG); if(index==0) { ddp_dump_analysis(DISP_MODULE_OVL1); ddp_dump_analysis(DISP_MODULE_OVL0); ddp_dump_analysis(DISP_MODULE_COLOR0); ddp_dump_analysis(DISP_MODULE_AAL); ddp_dump_analysis(DISP_MODULE_RDMA0); } else { ddp_dump_analysis(DISP_MODULE_OVL1); ddp_dump_analysis(DISP_MODULE_RDMA1); ddp_dump_reg(DISP_MODULE_CONFIG); } } DISP_CPU_REG_SET(DISP_REG_OVL_INTSTA+index*DISP_OVL_INDEX_OFFSET, ~reg_val); MMProfileLogEx(ddp_mmp_get_events()->OVL_IRQ[index], MMProfileFlagPulse, reg_val, 0); if(reg_val&0x1e0) { MMProfileLogEx(ddp_mmp_get_events()->ddp_abnormal_irq, MMProfileFlagPulse, (index<<16)|reg_val, module<<24); } } else if(irq==dispsys_irq[DISP_REG_WDMA0] || irq==dispsys_irq[DISP_REG_WDMA1]) { index = (irq==dispsys_irq[DISP_REG_WDMA0]) ? 0 : 1; module =(irq==dispsys_irq[DISP_REG_WDMA0]) ? DISP_MODULE_WDMA0 : DISP_MODULE_WDMA1; reg_val = DISP_REG_GET(DISP_REG_WDMA_INTSTA+index*DISP_WDMA_INDEX_OFFSET); if(reg_val&(1<<0)) { DDPIRQ("IRQ: WDMA%d frame done!\n",index); } if(reg_val&(1<<1)) { DDPERR("IRQ: WDMA%d underrun! cnt=%d\n",index,cnt_wdma_underflow[index]++); disp_irq_log_module |= 1<<module; } //clear intr DISP_CPU_REG_SET(DISP_REG_WDMA_INTSTA+index*DISP_WDMA_INDEX_OFFSET,~reg_val); MMProfileLogEx(ddp_mmp_get_events()->WDMA_IRQ[index], MMProfileFlagPulse, reg_val, DISP_REG_GET(DISP_REG_WDMA_CLIP_SIZE)); if(reg_val&0x2) { MMProfileLogEx(ddp_mmp_get_events()->ddp_abnormal_irq, MMProfileFlagPulse, (index<<16)|reg_val, cnt_wdma_underflow[index]|(module<<24)); } } else if(irq==dispsys_irq[DISP_REG_RDMA0] || irq==dispsys_irq[DISP_REG_RDMA1]) { if(dispsys_irq[DISP_REG_RDMA0]==irq) { index = 0; module = DISP_MODULE_RDMA0; } else if(dispsys_irq[DISP_REG_RDMA1]==irq) { index = 1; module = DISP_MODULE_RDMA1; } reg_val = DISP_REG_GET(DISP_REG_RDMA_INT_STATUS+index*DISP_RDMA_INDEX_OFFSET); if(reg_val&(1<<0)) { DDPIRQ("IRQ: RDMA%d reg update done! \n",index); } if(reg_val&(1<<1)) { MMProfileLogEx(ddp_mmp_get_events()->SCREEN_UPDATE[index], MMProfileFlagStart, reg_val, DISP_REG_GET(DISP_REG_RDMA_MEM_START_ADDR)); rdma_start_time[index]= sched_clock(); DDPIRQ("IRQ: RDMA%d frame start! \n",index); rdma_start_irq_cnt[index]++; // rdma start/end irq should equal, else need reset ovl if(gResetRDMAEnable == 1 && is_hwc_enabled == 1 && index ==0 && primary_display_is_video_mode()==1 && rdma_start_irq_cnt[0] > rdma_done_irq_cnt[0]+3) { ovl_reset(DISP_MODULE_OVL0, NULL); if(ovl_get_status()!=DDP_OVL1_STATUS_SUB) { ovl_reset(DISP_MODULE_OVL1, NULL); } rdma_done_irq_cnt[0] = rdma_start_irq_cnt[0]; DDPERR("warning: reset ovl!\n"); } #ifdef CONFIG_MTK_SEGMENT_TEST if(record_rdma_end_interval == 1) { if(rdma_end_begin_time == 0) { rdma_end_begin_time = sched_clock(); //printk("[display_test]====RDMA frame end time1:%lld\n",rdma_end_begin_time); } else { unsigned long long time_now = sched_clock(); //printk("[display_test]====RDMA frame end time2:%lld\n",time_now); //printk("[display_test]====RDMA frame end time3:this=%lld,max=%lld,min=%lld\n",time_now - rdma_end_begin_time,rdma_end_max_interval,rdma_end_min_interval); if((time_now - rdma_end_begin_time) > rdma_end_max_interval) { rdma_end_max_interval = time_now - rdma_end_begin_time; } if((time_now - rdma_end_begin_time) < rdma_end_min_interval) { rdma_end_min_interval = time_now - rdma_end_begin_time; } rdma_end_begin_time = time_now; } } #endif } if(reg_val&(1<<2)) { MMProfileLogEx(ddp_mmp_get_events()->SCREEN_UPDATE[index], MMProfileFlagEnd, reg_val, 0); rdma_end_time[index]= sched_clock(); DDPIRQ("IRQ: RDMA%d frame done! \n",index); //rdma_done_irq_cnt[index] ++; rdma_done_irq_cnt[index] = rdma_start_irq_cnt[index]; } if(reg_val&(1<<3)) { DDPERR("IRQ: RDMA%d abnormal! cnt=%d \n",index, cnt_rdma_abnormal[index]++); disp_irq_log_module |= 1<<module; } if(reg_val&(1<<4)) { DDPERR("IRQ: RDMA%d underflow! cnt=%d \n",index, cnt_rdma_underflow[index]++); disp_irq_log_module |= 1<<module; rdma_underflow_irq_cnt[index]++; } if(reg_val&(1<<5)) { DDPIRQ("IRQ: RDMA%d target line! \n",index); rdma_targetline_irq_cnt[index]++; } //clear intr DISP_CPU_REG_SET(DISP_REG_RDMA_INT_STATUS+index*DISP_RDMA_INDEX_OFFSET,~reg_val); MMProfileLogEx(ddp_mmp_get_events()->RDMA_IRQ[index], MMProfileFlagPulse, reg_val, 0); if(reg_val&0x18) { MMProfileLogEx(ddp_mmp_get_events()->ddp_abnormal_irq, MMProfileFlagPulse, (index<<16)|reg_val, rdma_underflow_irq_cnt[index]|(cnt_rdma_abnormal[index]<<8)||(module<<24)); } } else if(irq==dispsys_irq[DISP_REG_COLOR]) { } else if(irq==dispsys_irq[DISP_REG_MUTEX]) { // mutex0: perimary disp // mutex1: sub disp // mutex2: aal module = DISP_MODULE_MUTEX; reg_val = DISP_REG_GET(DISP_REG_CONFIG_MUTEX_INTSTA) & 0x7C1F; for(mutexID = 0; mutexID<5; mutexID++) { if(reg_val & (0x1<<mutexID)) { DDPIRQ("IRQ: mutex%d sof!\n",mutexID); MMProfileLogEx(ddp_mmp_get_events()->MUTEX_IRQ[mutexID], MMProfileFlagPulse, reg_val, 0); } if(reg_val & (0x1<<(mutexID+DISP_MUTEX_TOTAL))) { DDPIRQ("IRQ: mutex%d eof!\n",mutexID); MMProfileLogEx(ddp_mmp_get_events()->MUTEX_IRQ[mutexID], MMProfileFlagPulse, reg_val, 1); } } DISP_CPU_REG_SET(DISP_REG_CONFIG_MUTEX_INTSTA, ~reg_val); } else if(irq==dispsys_irq[DISP_REG_AAL]) { module = DISP_MODULE_AAL; reg_val = DISP_REG_GET(DISP_AAL_INTSTA); disp_aal_on_end_of_frame(); } else if(irq==dispsys_irq[DISP_REG_CONFIG]) // MMSYS error intr { reg_val = DISP_REG_GET(DISP_REG_CONFIG_MMSYS_INTSTA) & 0x7; if(reg_val&(1<<0)) { DDPERR("MMSYS to MFG APB TX Error, MMSYS clock off but MFG clock on! \n"); } if(reg_val&(1<<1)) { DDPERR("MMSYS to MJC APB TX Error, MMSYS clock off but MJC clock on! \n"); } if(reg_val&(1<<2)) { DDPERR("PWM APB TX Error! \n"); } DISP_CPU_REG_SET(DISP_REG_CONFIG_MMSYS_INTSTA, ~reg_val); } else { module = DISP_MODULE_UNKNOWN; reg_val = 0; DDPERR("invalid irq=%d \n ", irq); } } disp_invoke_irq_callbacks(module, reg_val); if(disp_irq_log_module!=0) { wake_up_interruptible(&disp_irq_log_wq); } MMProfileLogEx(ddp_mmp_get_events()->DDP_IRQ, MMProfileFlagEnd, irq, reg_val); return IRQ_HANDLED; }
static int dpmgr_module_notify(DISP_MODULE_ENUM module, DISP_PATH_EVENT event) { ddp_path_handle handle = find_handle_by_module(module); MMProfileLogEx(ddp_mmp_get_events()->primary_display_aalod_trigger, MMProfileFlagPulse, module, 0); return dpmgr_signal_event(handle,event); }
int ext_disp_config_input_multiple(ext_disp_input_config* input, int idx) { int ret = 0; int i=0; int layer =0; ///EXT_DISP_FUNC(); disp_ddp_path_config *data_config; if((is_hdmi_active() == false) || (pgc->state != EXTD_INIT && pgc->state != EXTD_RESUME) ) { EXT_DISP_LOG("[Donglei]config ext disp is already sleeped, hdmi_active:%d, state:%d\n", is_hdmi_active(), pgc->state); MMProfileLogEx(ddp_mmp_get_events()->Extd_ErrorInfo, MMProfileFlagPulse, Config, idx ); return -2; } _ext_disp_path_lock(); // all dirty should be cleared in dpmgr_path_get_last_config() data_config = dpmgr_path_get_last_config(pgc->dpmgr_handle); // hope we can use only 1 input struct for input config, just set layer number if(_should_config_ovl_input()) { for(i = 0;i<HW_OVERLAY_COUNT;i++) { if(input[i].dirty) { ret = _convert_disp_input_to_ovl(&(data_config->ovl_config[input[i].layer]), &input[i]); dprec_mmp_dump_ovl_layer(&(data_config->ovl_config[input[i].layer]), input[i].layer, 2); } if (init_roi == 1) { LCM_PARAMS *lcm_param = extd_drv_get_params(pgc->plcm); memcpy(&(data_config->dispif_config), &(extd_dpi_params.dispif_config), sizeof(LCM_PARAMS)); if(lcm_param != NULL) { EXT_DISP_LOG("set dest w:%d, h:%d\n", lcm_param->width, lcm_param->height); data_config->dst_w = lcm_param->width; data_config->dst_h = lcm_param->height; } data_config->dst_dirty = 1; data_config->rdma_config.address = 0; } data_config->ovl_dirty = 1; } } else { OVL_CONFIG_STRUCT ovl_config; _convert_disp_input_to_ovl(&ovl_config, input); dprec_mmp_dump_ovl_layer(&ovl_config, input->layer, 2); ret = _convert_disp_input_to_rdma(&(data_config->rdma_config), input); if (data_config->rdma_config.address) { data_config->rdma_dirty = 1; } } if(_should_wait_path_idle()) { dpmgr_wait_event_timeout(pgc->dpmgr_handle, DISP_PATH_EVENT_FRAME_DONE, HZ/2); } memcpy(&(data_config->dispif_config), &(extd_dpi_params.dispif_config), sizeof(LCM_PARAMS)); ret = dpmgr_path_config(pgc->dpmgr_handle, data_config, ext_disp_cmdq_enabled()? pgc->cmdq_handle_config : NULL); // this is used for decouple mode, to indicate whether we need to trigger ovl pgc->need_trigger_overlay = 1; init_roi = 0; _ext_disp_path_unlock(); if (data_config->ovl_dirty) { EXT_DISP_LOG("config_input_multiple idx:%d -w:%d, h:%d, pitch:%d\n", idx ,data_config->ovl_config[0].src_w, data_config->ovl_config[0].src_h, data_config->ovl_config[0].src_pitch); }else{ EXT_DISP_LOG("config_input_multiple idx:%d -w:%d, h:%d, pitch:%d, mva:%p\n", idx ,data_config->rdma_config.width, data_config->rdma_config.height, data_config->rdma_config.pitch, data_config->rdma_config.address); } return ret; }
static long ion_sys_cache_sync(struct ion_client *client, ion_sys_cache_sync_param_t* pParam, int from_kernel) { ION_FUNC_ENTER; if (pParam->sync_type < ION_CACHE_CLEAN_ALL) { // By range operation unsigned int start; size_t size; unsigned int end, page_num, page_start; struct ion_handle *kernel_handle; kernel_handle = ion_drv_get_kernel_handle(client, pParam->handle, from_kernel); if(IS_ERR(kernel_handle)) { IONMSG("ion cache sync fail! \n"); return -EINVAL; } #ifdef __ION_CACHE_SYNC_USER_VA_EN__ if(pParam->sync_type < ION_CACHE_CLEAN_BY_RANGE_USE_VA) #else if(1) #endif { start = (unsigned int) ion_map_kernel(client, kernel_handle); if(IS_ERR_OR_NULL((void*)start)) { IONMSG("cannot do cachesync, unable to map_kernel: ret=%d\n", start); return -EFAULT; } size = ion_handle_buffer(kernel_handle)->size; } else { start = pParam->va; size = pParam->size; } // Cache line align end = start + size; start = (start / L1_CACHE_BYTES * L1_CACHE_BYTES); size = (end - start + L1_CACHE_BYTES - 1) / L1_CACHE_BYTES * L1_CACHE_BYTES; page_num = ((start&(~PAGE_MASK))+size+(~PAGE_MASK))>>PAGE_ORDER; page_start = start & PAGE_MASK; // L1 cache sync if((pParam->sync_type==ION_CACHE_CLEAN_BY_RANGE) || (pParam->sync_type==ION_CACHE_CLEAN_BY_RANGE_USE_VA)) { MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_CLEAN_RANGE], MMProfileFlagStart, size, 0); //printk("[ion_sys_cache_sync]: ION cache clean by range. start=0x%08X size=0x%08X\n", start, size); dmac_map_area((void*)start, size, DMA_TO_DEVICE); } else if ((pParam->sync_type == ION_CACHE_INVALID_BY_RANGE)||(pParam->sync_type == ION_CACHE_INVALID_BY_RANGE_USE_VA)) { MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_INVALID_RANGE], MMProfileFlagStart, size, 0); //printk("[ion_sys_cache_sync]: ION cache invalid by range. start=0x%08X size=0x%08X\n", start, size); dmac_unmap_area((void*)start, size, DMA_FROM_DEVICE); } else if ((pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE)||(pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE_USE_VA)) { MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_FLUSH_RANGE], MMProfileFlagStart, size, 0); //printk("[ion_sys_cache_sync]: ION cache flush by range. start=0x%08X size=0x%08X\n", start, size); dmac_flush_range((void*)start, (void*)(start+size-1)); } #if 0 // L2 cache sync //printk("[ion_sys_cache_sync]: page_start=0x%08X, page_num=%d\n", page_start, page_num); for (i=0; i<page_num; i++, page_start+=DEFAULT_PAGE_SIZE) { phys_addr_t phys_addr; if (page_start>=VMALLOC_START && page_start<=VMALLOC_END) { ppage = vmalloc_to_page((void*)page_start); if (!ppage) { printk("[ion_sys_cache_sync]: Cannot get vmalloc page. addr=0x%08X\n", page_start); ion_unmap_kernel(client, pParam->handle); return -EFAULT; } phys_addr = page_to_phys(ppage); } else phys_addr = virt_to_phys((void*)page_start); if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE) outer_clean_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE); else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE) outer_inv_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE); else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE) outer_flush_range(phys_addr, phys_addr+DEFAULT_PAGE_SIZE); } #endif #ifdef __ION_CACHE_SYNC_USER_VA_EN__ if(pParam->sync_type < ION_CACHE_CLEAN_BY_RANGE_USE_VA) #endif { ion_unmap_kernel(client, kernel_handle); } if (pParam->sync_type == ION_CACHE_CLEAN_BY_RANGE) MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_CLEAN_RANGE], MMProfileFlagEnd, size, 0); else if (pParam->sync_type == ION_CACHE_INVALID_BY_RANGE) MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_INVALID_RANGE], MMProfileFlagEnd, size, 0); else if (pParam->sync_type == ION_CACHE_FLUSH_BY_RANGE) MMProfileLogEx(ION_MMP_Events[PROFILE_DMA_FLUSH_RANGE], MMProfileFlagEnd, size, 0); }
DAL_STATUS DAL_Clean(void) { const UINT32 BG_COLOR = MAKE_TWO_RGB565_COLOR(DAL_BG_COLOR, DAL_BG_COLOR); DAL_STATUS ret = DAL_STATUS_OK; static int dal_clean_cnt = 0; MFC_CONTEXT *ctxt = (MFC_CONTEXT *)mfc_handle; printk("[MTKFB_DAL] DAL_Clean\n"); if (NULL == mfc_handle) return DAL_STATUS_NOT_READY; // if (LCD_STATE_POWER_OFF == LCD_GetState()) // return DAL_STATUS_LCD_IN_SUSPEND; MMProfileLogEx(ddp_mmp_get_events()->dal_clean, MMProfileFlagStart, 0, 0); DAL_LOCK(); DAL_CHECK_MFC_RET(MFC_ResetCursor(mfc_handle)); ctxt->screen_color=0; DAL_SetScreenColor(DAL_COLOR_RED); /* if (LCD_STATE_POWER_OFF == LCD_GetState()) { DISP_LOG_PRINT(ANDROID_LOG_INFO, "DAL", "dal_clean in power off\n"); dal_disable_when_resume = TRUE; ret = DAL_STATUS_LCD_IN_SUSPEND; goto End; } */ //xuecheng, for debug #if 0 if(is_early_suspended){ up(&sem_early_suspend); DISP_LOG_PRINT(ANDROID_LOG_INFO, "DAL", "dal_clean in power off\n"); goto End; } #endif //TODO: if dal_shown=false, and 3D enabled, mtkfb may disable UI layer, please modify 3D driver if(isAEEEnabled==1) { primary_disp_input_config input; memset((void*)&input, 0, sizeof(primary_disp_input_config)); input.addr = (unsigned long)dal_fb_pa; input.layer = primary_display_get_option("ASSERT_LAYER"); input.layer_en = 0; input.src_x = 0; input.src_y = 0; input.src_w = DAL_WIDTH; input.src_h = DAL_HEIGHT; input.dst_x = 0; input.dst_y = 0; input.dst_w = DAL_WIDTH; input.dst_h = DAL_HEIGHT; input.alpha = 0x80; input.aen = 1; input.buff_idx = -1; input.src_pitch = DAL_WIDTH * DAL_BPP; input.isDirty = 1; input.fmt = DAL_FORMAT; ret = primary_display_config_input(&input); // DAL disable, switch UI layer to default layer 3 printk("[DDP]* isAEEEnabled from 1 to 0, %d \n", dal_clean_cnt++); isAEEEnabled = 0; DAL_Dynamic_Change_FB_Layer(isAEEEnabled); // restore UI layer to DEFAULT_UI_LAYER } dal_shown = FALSE; dal_disable_when_resume = FALSE; primary_display_trigger(0, NULL, 0); End: DAL_UNLOCK(); MMProfileLogEx(ddp_mmp_get_events()->dal_clean, MMProfileFlagEnd, 0, 0); return ret; }
DAL_STATUS DAL_Printf(const char *fmt, ...) { va_list args; uint i; DAL_STATUS ret = DAL_STATUS_OK; printk("%s", __func__); //printk("[MTKFB_DAL] DAL_Printf mfc_handle=0x%08X, fmt=0x%08X\n", mfc_handle, fmt); if (NULL == mfc_handle) return DAL_STATUS_NOT_READY; if (NULL == fmt) return DAL_STATUS_INVALID_ARGUMENT; MMProfileLogEx(ddp_mmp_get_events()->dal_printf, MMProfileFlagStart, 0, 0); DAL_LOCK(); if(isAEEEnabled==0) { printk("[DDP] isAEEEnabled from 0 to 1, ASSERT_LAYER=%d, dal_fb_pa %lx\n", ASSERT_LAYER, dal_fb_pa); isAEEEnabled = 1; DAL_Dynamic_Change_FB_Layer(isAEEEnabled); // default_ui_ layer coniig to changed_ui_layer DAL_CHECK_MFC_RET(MFC_Open(&mfc_handle, dal_fb_addr, DAL_WIDTH, DAL_HEIGHT, DAL_BPP, DAL_FG_COLOR, DAL_BG_COLOR)); //DAL_Clean(); primary_disp_input_config input; memset((void*)&input, 0, sizeof(primary_disp_input_config)); input.addr = (unsigned long)dal_fb_pa; input.layer = primary_display_get_option("ASSERT_LAYER"); input.layer_en = 1; input.src_x = 0; input.src_y = 0; input.src_w = DAL_WIDTH; input.src_h = DAL_HEIGHT; input.dst_x = 0; input.dst_y = 0; input.dst_w = DAL_WIDTH; input.dst_h = DAL_HEIGHT; input.alpha = 0x80; input.aen = 1; input.buff_idx = -1; input.src_pitch = DAL_WIDTH * DAL_BPP; input.isDirty = 1; input.fmt = DAL_FORMAT; ret = primary_display_config_input(&input); } va_start (args, fmt); i = vsprintf(dal_print_buffer, fmt, args); BUG_ON(i>=ARRAY_SIZE(dal_print_buffer)); va_end (args); DAL_CHECK_MFC_RET(MFC_Print(mfc_handle, dal_print_buffer)); flush_cache_all(); /* if (LCD_STATE_POWER_OFF == LCD_GetState()) { ret = DAL_STATUS_LCD_IN_SUSPEND; dal_enable_when_resume = TRUE; goto End; } */ #if 0 if(is_early_suspended){ up(&sem_early_suspend); DISP_LOG_PRINT(ANDROID_LOG_INFO, "DAL", "DAL_Printf in power off\n"); goto End; } #endif if (!dal_shown) { dal_shown = TRUE; } ret = primary_display_trigger(0, NULL, 0); End: DAL_UNLOCK(); MMProfileLogEx(ddp_mmp_get_events()->dal_printf, MMProfileFlagEnd, 0, 0); return ret; }
void MMProfileLogEx_J(JNIEnv *env, jobject thiz, jint event, jint type, jint data1, jint data2) { MMProfileLogEx((MMP_Event)event, (MMP_LogType)type, (unsigned int)data1, (unsigned int)data2); }
unsigned int m4u_do_mva_alloc(unsigned long va, unsigned int size, void *priv) { short s,end; short new_start, new_end; short nr = 0; unsigned int mvaRegionStart; unsigned long startRequire, endRequire, sizeRequire; unsigned long irq_flags; if(size == 0) return 0; ///----------------------------------------------------- ///calculate mva block number startRequire = va & (~M4U_PAGE_MASK); endRequire = (va+size-1)| M4U_PAGE_MASK; sizeRequire = endRequire-startRequire+1; nr = (sizeRequire+MVA_BLOCK_ALIGN_MASK)>>MVA_BLOCK_SIZE_ORDER;//(sizeRequire>>MVA_BLOCK_SIZE_ORDER) + ((sizeRequire&MVA_BLOCK_ALIGN_MASK)!=0); spin_lock_irqsave(&gMvaGraph_lock, irq_flags); ///----------------------------------------------- ///find first match free region for(s=1; (s<(MVA_MAX_BLOCK_NR+1))&&(mvaGraph[s]<nr); s+=(mvaGraph[s]&MVA_BLOCK_NR_MASK)) ; if(s > MVA_MAX_BLOCK_NR) { spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags); M4UMSG("mva_alloc error: no available MVA region for %d blocks!\n", nr); MMProfileLogEx(M4U_MMP_Events[M4U_MMP_M4U_ERROR], MMProfileFlagPulse, size, s); return 0; } ///----------------------------------------------- ///alloc a mva region end = s + mvaGraph[s] - 1; if(unlikely(nr == mvaGraph[s])) { MVA_SET_BUSY(s); MVA_SET_BUSY(end); mvaInfoGraph[s] = priv; mvaInfoGraph[end] = priv; } else { new_end = s + nr - 1; new_start = new_end + 1; //note: new_start may equals to end mvaGraph[new_start] = (mvaGraph[s]-nr); mvaGraph[new_end] = nr | MVA_BUSY_MASK; mvaGraph[s] = mvaGraph[new_end]; mvaGraph[end] = mvaGraph[new_start]; mvaInfoGraph[s] = priv; mvaInfoGraph[new_end] = priv; } spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags); mvaRegionStart = (unsigned int)s; return (mvaRegionStart<<MVA_BLOCK_SIZE_ORDER) + mva_pageOffset(va); }