Example #1
0
int vAllocate_Deallocate_basic()
{
    TEST_START();
    unsigned int BufSize = 1024*1024;
    unsigned int BufAddr = (unsigned int)new unsigned char[BufSize];
    unsigned int BufMVA;

    ///> --------------------2. allocate MVA
    MTKM4UDrv CM4u;

    CM4u.m4u_enable_m4u_func(M4U_CLNTMOD_RDMA);
    
    // allocate MVA buffer for m4u module                    
    CM4u.m4u_alloc_mva(M4U_CLNTMOD_RDMA,     // Module ID
                       BufAddr,              // buffer virtual start address
                       BufSize,              // buffer size
                       0,
                       0,
                       &BufMVA);             // return MVA address

    M4UDBG(" after m4u_alloc_mva(), BufMVA=0x%x \r\n", BufMVA);

    CM4u.m4u_dump_info(M4U_CLNTMOD_RDMA);

    CM4u.m4u_dealloc_mva(M4U_CLNTMOD_RDMA, BufAddr, BufSize, BufMVA);
    M4UDBG(" after m4u_dealloc_mva() \r\n");
    CM4u.m4u_dump_info(M4U_CLNTMOD_RDMA);
    
    TEST_END();

    return 0;
}
Example #2
0
int main (int argc, char *argv[])
{
    M4UDBG("enter m4u_ut main \n");
    vAllocate_Deallocate_basic();


#if 0
    
    unsigned int i;
    M4UDBG("argc=%d \n", argc);
    for(i=0;i<argc;i++)
    {
    	  M4UDBG("argv[%d]=%s \n", i, argv[i]);
    }
    
    unsigned int* pDataIn = new unsigned int[argc];
    for(i=0;i<argc;i++)
    {
    	  sscanf(argv[i],"%d",pDataIn+i);
    }
    for(i=0;i<argc;i++)
    {
    	  M4UDBG("pDataIn[%d]=%d \n", i, *(pDataIn+i));
    }
    delete[] pDataIn;
        
    ///> --------------------1. allocate memory as LCD's source buffer
    unsigned int BufSize = 390*210*2;
    int pmem_fd_src, pmem_fd_dst;
    
#ifdef M4U_MEM_USE_PMEM    
    unsigned int BufAddr = (unsigned int)pmem_alloc_sync(BufSize, &pmem_fd_src );
    if(BufAddr==NULL)
    {
        M4UDBG("alloc pmem failed! \n");
    }
    else
    {
    	  M4UDBG("alloc pmem success! BufAddrDst=0x%x\n", BufAddr);
    }    
    unsigned int BufAddrPA = (unsigned int)pmem_get_phys(pmem_fd_src);
    M4UDBG("src use pmem, BufAddr=0x%x,  BufAddrPA=0x%x \n", BufAddr, BufAddrPA);
#else
    unsigned int BufAddr = (unsigned int)new unsigned char[BufSize];
    M4UDBG("src use new, BufAddr=0x%x \n", BufAddr);
#endif    


    unsigned int BufMVA;
    FILE *fp;
    fp = fopen("/system/bin/data_rgb565_720x480.bin", "r");
    if(NULL==fp)
    {
        M4UDBG("open /system/bin/data_rgb565_720x480.bin failed! \n");
        //memset((unsigned char*)BufAddr, 0xff, BufSize);
        memcpy((unsigned char*)BufAddr, rgb565_390x210, BufSize);
    }
    else
    {
        fread((unsigned char*)BufAddr , 1 , BufSize , fp);
        fclose(fp);
    }

    // save image
    {
        char* pFile;
        unsigned int index=0;
        pFile = new char[30];
        memset(pFile, 0, 30);
        sprintf(pFile, "/data/source_rgb_%d.bin", 1);
        fp = fopen(pFile, "w");
        for(index = 0 ; index < BufSize ; index++)
        {
            fprintf(fp, "%c", *(unsigned char*)(BufAddr+index));
        }
        fclose(fp);
    } 
    
    unsigned int BufSizeDst = 390*210*2;

#ifdef M4U_MEM_USE_PMEM   
    unsigned int BufAddrDstPA;
    unsigned int BufAddrDst = (unsigned int)pmem_alloc_sync(BufSize, &pmem_fd_dst );
    if(BufAddrDst==NULL)
    {
        M4UDBG("alloc pmem failed! \n");
    }
    else
    {
    	  M4UDBG("alloc pmem success! BufAddrDst=0x%x\n", BufAddrDst);
    }
    BufAddrDstPA = (unsigned int)pmem_get_phys(pmem_fd_dst);
    M4UDBG("dst use pmem, BufAddrDst=0x%x,  BufAddrDstPA=0x%x \n", BufAddrDst, BufAddrDstPA);
#else
    unsigned int BufAddrDst = (unsigned int)new unsigned char[BufSize];
#endif
    
    memset((unsigned char*)BufAddrDst, 0x55, BufSizeDst);
    M4UDBG("src addr=0x%x, dst addr=0x%x \r\n", BufAddr, BufAddrDst);

    ///> --------------------2. allocate MVA
    MTKM4UDrv CM4u;
    CM4u.m4u_power_on(M4U_CLNTMOD_LCDC);
    // allocate MVA buffer for LCDC module                    
    CM4u.m4u_alloc_mva(M4U_CLNTMOD_LCDC,     // Module ID
                       BufAddr,              // buffer virtual start address
                       BufSize,              // buffer size
                       &BufMVA);             // return MVA address
    M4UDBG(" after m4u_alloc_mva(), BufMVA=0x%x \r\n", BufMVA);
    
    ///> --------------------3. insert tlb range and tlb entry
    // manual insert MVA start page address
    CM4u.m4u_manual_insert_entry(M4U_CLNTMOD_LCDC, 
                                 BufMVA,   // MVA address
                                 true);    // lock the entry for circuling access

    // insert TLB uni-update range
    CM4u.m4u_insert_tlb_range(M4U_CLNTMOD_LCDC, 
                              BufMVA,                // range start MVA
                              BufMVA + BufSize - 1,  // range end MVA
                              RT_RANGE_HIGH_PRIORITY,
                              1);
    M4UDBG(" after m4u_manual_insert_entry() and m4u_insert_tlb_range() \r\n");
    CM4u.m4u_dump_reg(M4U_CLNTMOD_LCDC);
    CM4u.m4u_dump_info(M4U_CLNTMOD_LCDC);

    ///> --------------------4. config LCD port                       
    // config LCD port 
    M4U_PORT_STRUCT M4uPort;
    M4uPort.ePortID = M4U_PORT_LCD_R;
    M4uPort.Virtuality = 1;						   
    M4uPort.Security = 0;
    M4uPort.Distance = 1;
    M4uPort.Direction = 0;
    CM4u.m4u_config_port(&M4uPort);

/*
    M4uPort.ePortID = M4U_PORT_LCD_W;
    M4uPort.Virtuality = 1;						   
    M4uPort.Security = 0;
    M4uPort.Distance = 1;
    M4uPort.Direction = 0;
    CM4u.m4u_config_port(&M4uPort);
*/

/*
    CM4u.m4u_dump_reg(M4U_CLNTMOD_LCDC);
    CM4u.m4u_dump_info(M4U_CLNTMOD_LCDC);

    ///> --------------------5. start hardware engine
    // ...
    CM4u.m4u_monitor_start(M4U_PORT_LCD_R);
    int fp_fb;
    struct fb_overlay_layer ut_layer;
    fp_fb = open("/dev/graphics/fb0",O_RDONLY);
    if(fp_fb<0) return 0;
    	
    ut_layer.src_fmt = MTK_FB_FORMAT_RGB565;
    ut_layer.layer_id = 0;
    ut_layer.layer_enable = 1;
    ut_layer.src_base_addr = (void*)BufMVA;
    ut_layer.src_phy_addr = (void*)BufAddrDstPA; //////////////dest addr
    ut_layer.src_direct_link = 0;
    ut_layer.src_offset_x = ut_layer.src_offset_y = 0;
    ut_layer.tgt_offset_x = ut_layer.tgt_offset_y = 0;
    ut_layer.tgt_height = ut_layer.src_height = 390;
    ut_layer.tgt_width = ut_layer.src_width = ut_layer.src_pitch = 210;
    ut_layer.src_color_key = 0;
    ut_layer.layer_rotation = MTK_FB_ORIENTATION_0;
    
    ioctl(fp_fb, MTKFB_M4U_UT, &ut_layer);
    CM4u.m4u_monitor_stop(M4U_PORT_LCD_R);
*/

    M4UDBG("src_va=0x%x, dst_va=0x%x, *dst_va=0x%x \n", 
        BufAddr, BufAddrDst, *(unsigned char*)BufAddrDst);
    
    // save image
    {
        char* pFile;
        unsigned int index=0;
        pFile = new char[30];
        memset(pFile, 0, 30);
        sprintf(pFile, "/data/result_rgb_%d.bin", 1);
        fp = fopen(pFile, "w");
        for(index = 0 ; index < BufSize ; index++)
        {
            fprintf(fp, "%c", *(unsigned char*)(BufAddrDst+index));
        }
        fclose(fp);
    }    

    ///> --------------------6. de-allocate MVA and release tlb resource
    CM4u.m4u_invalid_tlb_range(M4U_CLNTMOD_LCDC, BufMVA, BufMVA+BufSize-1);
    CM4u.m4u_dealloc_mva(M4U_CLNTMOD_LCDC, BufAddr, BufSize, BufMVA);
    M4UDBG(" after m4u_dealloc_mva() \r\n");
    CM4u.m4u_dump_reg(M4U_CLNTMOD_LCDC);
    CM4u.m4u_dump_info(M4U_CLNTMOD_LCDC);
    CM4u.m4u_power_off(M4U_CLNTMOD_LCDC);
    
    int cnt=0;
    //while(1)
    {
    	sleep(2);
    	M4UDBG("m4u_ut sleep! %d\n", cnt++);
    }

#ifdef M4U_MEM_USE_PMEM     

#else
    delete[] (unsigned char*)BufAddr;
    delete[] (unsigned char*)BufAddrDst;
#endif

#endif


    return 0;
}
Example #3
0
unsigned int m4u_user_v2p(unsigned int va)
{
    unsigned int pageOffset = (va & (PAGE_SIZE - 1));
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte;
    unsigned int pa;
    //M4UMSG("Enter m4u_user_v2p()! \n", va);

    if(NULL==current)
    {
    	  M4UMSG("warning: m4u_user_v2p, current is NULL! \n");
    	  return 0;
    }
    if(NULL==current->mm)
    {
    	  M4UMSG("warning: m4u_user_v2p, current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
    	  return 0;
    }
        
    pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
    if(pgd_none(*pgd)||pgd_bad(*pgd))
    {
        M4UMSG("m4u_user_v2p(), va=0x%x, pgd invalid! \n", va);
        return 0;
    }

    pud = pud_offset(pgd, va);
    if(pud_none(*pud)||pud_bad(*pud))
    {
        M4UDBG("m4u_user_v2p(), va=0x%x, pud invalid! \n", va);
        return 0;
    }
    
    pmd = pmd_offset(pud, va);
    if(pmd_none(*pmd)||pmd_bad(*pmd))
    {
        M4UDBG("m4u_user_v2p(), va=0x%x, pmd invalid! \n", va);
        return 0;
    }
        
    pte = pte_offset_map(pmd, va);
    if(pte_present(*pte)) 
    { 
/*
        if((long long)pte_val(pte[PTE_HWTABLE_PTRS]) == (long long)0)
        {
        	M4UMSG("user_v2p, va=0x%x, *ppte=%08llx", va,
        	       (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
            pte_unmap(pte);
            return 0;
        }
*/        
        pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset; 
        pte_unmap(pte);
        return pa; 
    }   

    pte_unmap(pte);


    M4UDBG("m4u_user_v2p(), va=0x%x, pte invalid! \n", va);
    // m4u_dump_maps(va);
    
    return 0;
}
Example #4
0
int __m4u_get_user_pages(int eModuleID, struct task_struct *tsk, struct mm_struct *mm, 
                     unsigned long start, int nr_pages, unsigned int gup_flags,
                     struct page **pages, struct vm_area_struct **vmas)
{
        int i;
        unsigned long vm_flags;
	int trycnt;

        if (nr_pages <= 0)
                return 0;

        //VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
        if(!!pages != !!(gup_flags & FOLL_GET)) {
            M4UMSG(" error: __m4u_get_user_pages !!pages != !!(gup_flags & FOLL_GET), pages=0x%x, gup_flags & FOLL_GET=0x%x \n",
                    (unsigned int)pages, gup_flags & FOLL_GET);
        }

        /*   
         * Require read or write permissions.
         * If FOLL_FORCE is set, we only require the "MAY" flags.
         */
        vm_flags  = (gup_flags & FOLL_WRITE) ?
                        (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
        vm_flags &= (gup_flags & FOLL_FORCE) ?
                        (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
        i = 0; 

        M4UDBG("Trying to get_user_pages from start vaddr 0x%08x with %d pages\n", start, nr_pages);

        do { 
                struct vm_area_struct *vma;
                M4UDBG("For a new vma area from 0x%08x\n", start);
                vma = find_extend_vma(mm, start);

                if (!vma)
                {
                    M4UMSG("error: the vma is not found, start=0x%x, module=%d \n", 
                           (unsigned int)start, eModuleID);
                    return i ? i : -EFAULT;
                } 
                if( ((~vma->vm_flags) & (VM_IO|VM_PFNMAP|VM_SHARED|VM_WRITE)) == 0 )
                {
                    M4UMSG("error: m4u_get_pages(): bypass pmem garbage pages! vma->vm_flags=0x%x, start=0x%x, module=%d \n", 
                            (unsigned int)(vma->vm_flags), (unsigned int)start, eModuleID);
                	return i ? i : -EFAULT;;
                }                     
                if(vma->vm_flags & VM_IO)
                {
                	  M4UDBG("warning: vma is marked as VM_IO \n");
                }
                if(vma->vm_flags & VM_PFNMAP)
                {
                    M4UMSG("error: vma permission is not correct, vma->vm_flags=0x%x, start=0x%x, module=%d \n", 
                            (unsigned int)(vma->vm_flags), (unsigned int)start, eModuleID);
                    M4UMSG("hint: maybe the memory is remapped with un-permitted vma->vm_flags! \n");          
                    //m4u_dump_maps(start);
                    return i ? i : -EFAULT;;
                }
                if(!(vm_flags & vma->vm_flags)) 
                {
                    M4UMSG("error: vm_flags invalid, vm_flags=0x%x, vma->vm_flags=0x%x, start=0x%x, module=%d \n", 
                           (unsigned int)vm_flags,
                           (unsigned int)(vma->vm_flags), 
                           (unsigned int)start,
                            eModuleID);
                    //m4u_dump_maps(start);                  
                    return i ? : -EFAULT;
                }

                do {
                        struct page *page;
                        unsigned int foll_flags = gup_flags;
                        /*
                         * If we have a pending SIGKILL, don't keep faulting
                         * pages and potentially allocating memory.
                         */
                        if (unlikely(fatal_signal_pending(current)))
                                return i ? i : -ERESTARTSYS;
                        MMProfileLogEx(M4U_MMP_Events[PROFILE_FOLLOW_PAGE], MMProfileFlagStart, eModuleID, start&(~0xFFF));
                        page = follow_page(vma, start, foll_flags);
                        MMProfileLogEx(M4U_MMP_Events[PROFILE_FOLLOW_PAGE], MMProfileFlagEnd, eModuleID, 0x1000);
                        while (!page) {
                                int ret;

                                M4UDBG("Trying to allocate for %dth page(vaddr: 0x%08x)\n", i, start);
                                MMProfileLogEx(M4U_MMP_Events[PROFILE_FORCE_PAGING], MMProfileFlagStart, eModuleID, start&(~0xFFF));
                                ret = handle_mm_fault(mm, vma, start,
                                        (foll_flags & FOLL_WRITE) ?
                                        FAULT_FLAG_WRITE : 0);
                                MMProfileLogEx(M4U_MMP_Events[PROFILE_FORCE_PAGING], MMProfileFlagEnd, eModuleID, 0x1000);
                                if (ret & VM_FAULT_ERROR) {
                                        if (ret & VM_FAULT_OOM) {
                                                M4UMSG("handle_mm_fault() error: no memory, aaddr:0x%08lx (%d pages are allocated), module=%d\n", 
                                                start, i, eModuleID);
                                                //m4u_dump_maps(start);
                                                return i ? i : -ENOMEM;
					                    }
                                        if (ret &
                                            (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS)) {
                                                M4UMSG("handle_mm_fault() error: invalide memory address, vaddr:0x%lx (%d pages are allocated), module=%d\n", 
                                                start, i, eModuleID);
                                                //m4u_dump_maps(start);
                                                return i ? i : -EFAULT;
					                    }
                                        BUG();
                                }
                                if (ret & VM_FAULT_MAJOR)
                                        tsk->maj_flt++;
                                else
                                        tsk->min_flt++;

                                /*
                                 * The VM_FAULT_WRITE bit tells us that
                                 * do_wp_page has broken COW when necessary,
                                 * even if maybe_mkwrite decided not to set
                                 * pte_write. We can thus safely do subsequent
                                 * page lookups as if they were reads. But only
                                 * do so when looping for pte_write is futile:
                                 * in some cases userspace may also be wanting
                                 * to write to the gotten user page, which a
                                 * read fault here might prevent (a readonly
                                 * page might get reCOWed by userspace write).
                                 */
                                if ((ret & VM_FAULT_WRITE) &&
                                    !(vma->vm_flags & VM_WRITE))
                                        foll_flags &= ~FOLL_WRITE;
                                MMProfileLogEx(M4U_MMP_Events[PROFILE_FOLLOW_PAGE], MMProfileFlagStart, eModuleID, start&(~0xFFF));
                                page = follow_page(vma, start, foll_flags);
                                MMProfileLogEx(M4U_MMP_Events[PROFILE_FOLLOW_PAGE], MMProfileFlagEnd, eModuleID, 0x1000);
                        }
                        if (IS_ERR(page)) {
                                M4UMSG("handle_mm_fault() error: faulty page is returned, vaddr:0x%lx (%d pages are allocated), module=%d \n", 
                                        start, i, eModuleID);
                                //m4u_dump_maps(start);
                                return i ? i : PTR_ERR(page);
			            }
                        if (pages) {
                                pages[i] = page;
                                MMProfileLogEx(M4U_MMP_Events[PROFILE_MLOCK], MMProfileFlagStart, eModuleID, start&(~0xFFF));
				
				/* Use retry version to guarantee it will succeed in getting the lock */
				trycnt = 3000;
				do {
					if (trylock_page(page)) {
						mlock_vma_page(page);
						unlock_page(page);

                        //make sure hw pte is not 0
                        {
                            int i;
                            for(i=0; i<3000; i++)
                            {   
                                if(!m4u_user_v2p(start))
                                {
                                    handle_mm_fault(mm, vma, start, (foll_flags & FOLL_WRITE)? FAULT_FLAG_WRITE : 0);
                                    cond_resched();
                                }
                                else
                                    break;
                            }
                            if(i==3000)
                                M4UMSG("error: cannot handle_mm_fault to get hw pte: va=0x%x\n", start);
                        }

                        break;
					}
				} while (trycnt-- > 0);

                                if(PageMlocked(page)==0)
                                {
                                    M4UMSG("Can't mlock page\n");
                                    dump_page(page);
                                }
                                else
                                {
                                    unsigned int pfn = page_to_pfn(page);
                                    if(pfn < mlock_cnt_size)
                                    {
                                        pMlock_cnt[page_to_pfn(page)]++;
                                    }
                                    else
                                    {
                                        M4UERR("mlock_cnt_size is too small: pfn=%d, size=%d\n", pfn, mlock_cnt_size);
                                    }
                                    
                                    //M4UMSG("lock page:\n");
                                    //dump_page(page);
                                }
                                MMProfileLogEx(M4U_MMP_Events[PROFILE_MLOCK], MMProfileFlagEnd, eModuleID, 0x1000);

                        }
                        if (vmas)
                                vmas[i] = vma;
                        i++;
                        start += PAGE_SIZE;
                        nr_pages--;
                } while (nr_pages && start < vma->vm_end);
        } while (nr_pages);
Example #5
0
unsigned int m4u_user_v2p(unsigned int va)
{
    unsigned int pmdOffset = (va & (PMD_SIZE - 1));
    unsigned int pageOffset = (va & (PAGE_SIZE - 1));
    pgd_t *pgd;
    pmd_t *pmd;
    pte_t *pte;
    unsigned int pa;
    
    if(NULL==current)
    {
    	  M4UMSG("error: m4u_user_v2p, current is NULL! \n");
    	  return 0;
    }
    if(NULL==current->mm)
    {
    	  M4UMSG("error: m4u_user_v2p, current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
    	  return 0;
    }
        
    pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
    M4UDBG("m4u_user_v2p(), pgd 0x%x\n", pgd);    
    M4UDBG("pgd_none=%d, pgd_bad=%d\n", pgd_none(*pgd), pgd_bad(*pgd));
    
    if(pgd_none(*pgd)||pgd_bad(*pgd))
    {
        M4UMSG("warning: m4u_user_v2p(), va=0x%x, pgd invalid! \n", va);
        return 0;
    }
    
    pmd = pmd_offset(pgd, va);
    M4UDBG("m4u_user_v2p(), pmd 0x%x\n", pmd);
    M4UDBG("pmd_none=%d, pmd_bad=%d, pmd_val=0x%x\n", pmd_none(*pmd), pmd_bad(*pmd), pmd_val(*pmd));
   
    /* If this is a page table entry, keep on walking to the next level */ 
    if (( (unsigned int)pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
    {
        if(pmd_none(*pmd)||pmd_bad(*pmd))
        {
            M4UDBG("warning: m4u_user_v2p(), va=0x%x, pmd invalid! \n", va);
            return 0;
        }
        
        // we encounter some pte not preset issue, do not know why    
        pte = pte_offset_map(pmd, va);        
        if(pte_present(*pte)) 
        { 
            pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset; 
            M4UDBG("PA = 0x%8x\n", pa);
            return pa; 
        }
    }
    else /* Only 1 level page table */
    {
       if(pmd_none(*pmd))
       {
          M4UDBG("Error: m4u_user_v2p(), virtual addr 0x%x, pmd invalid! \n", va);
          return 0;
       }
       pa=(pte_val(*pmd) & (PMD_MASK)) | pmdOffset; 
       M4UDBG("PA = 0x%8x\n", pa);
       return pa;    
    }
    
    M4UDBG("warning: m4u_user_v2p(), pte invalid! \n");
    // m4u_dump_maps(va);
    
    return 0;
}