void msm_gem_purge(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj); WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!is_purgeable(msm_obj)); WARN_ON(obj->import_attach); put_iova(obj); msm_gem_vunmap(obj); put_pages(obj); msm_obj->madv = __MSM_MADV_PURGED; drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); drm_gem_free_mmap_offset(obj); /* Our goal here is to return as much of the memory as * is possible back to the system as we are called from OOM. * To do this we must instruct the shmfs to drop all of its * backing pages, *now*. */ shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); }
void msm_gem_free_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct msm_drm_private *priv = obj->dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); int id; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); /* object should not be on active list: */ WARN_ON(is_active(msm_obj)); list_del(&msm_obj->mm_list); for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { struct msm_mmu *mmu = priv->mmus[id]; if (mmu && msm_obj->domain[id].iova) { uint32_t offset = msm_obj->domain[id].iova; mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); } } if (obj->import_attach) { if (msm_obj->vaddr) dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); /* Don't drop the pages for imported dmabuf, as they are not * ours, just free the array we allocated: */ if (msm_obj->pages) drm_free_large(msm_obj->pages); drm_prime_gem_destroy(obj, msm_obj->sgt); } else { vunmap(msm_obj->vaddr); put_pages(obj); } if (msm_obj->resv == &msm_obj->_resv) reservation_object_fini(msm_obj->resv); drm_gem_object_release(obj); kfree(msm_obj); }
void msm_gem_free_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj); WARN_ON(!mutex_is_locked(&dev->struct_mutex)); /* object should not be on active list: */ WARN_ON(is_active(msm_obj)); list_del(&msm_obj->mm_list); put_iova(obj); if (obj->import_attach) { if (msm_obj->vaddr) dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); /* Don't drop the pages for imported dmabuf, as they are not * ours, just free the array we allocated: */ if (msm_obj->pages) drm_free_large(msm_obj->pages); drm_prime_gem_destroy(obj, msm_obj->sgt); } else { msm_gem_vunmap(obj); put_pages(obj); } if (msm_obj->resv == &msm_obj->_resv) reservation_object_fini(msm_obj->resv); drm_gem_object_release(obj); kfree(msm_obj); }
void free_map(struct mapping *map) { put_pages((void *)map->m_pgtable); put_pages((void *)map->m_pgdir); }
// slot_size MUST be a 4k multiple // Creates a system service, based on the initfs image. // Returns address of first page assigned to the task. ADDR create_service(UINT16 task, UINT16 thread, INT32 invoke_level, UINT32 size, BOOL low_mem, BOOL load_all, char *image_name) { struct pm_task *ptask = NULL; struct pm_thread *pthread = NULL; UINT32 psize = 0, first_page, i = 0; char *path = NULL; struct vmm_page_table *ptbl = NULL; struct thread mk_thread; BOOL isld = FALSE; if(strcmp(image_name,"ld")) { ld_task = task; isld = TRUE; } while(image_name[psize] != '\0'){ psize++; } path = kmalloc(psize); if(path == NULL) pman_print_and_stop("Could not allocate memory for path task: %s", image_name); while(image_name[i] != '\0'){ path[i] = image_name[i]; i++; } path[i] = '\0'; // Create a service task ptask = tsk_create(task); if(ptask == NULL) pman_print_and_stop("Error allocating task for %s", image_name); if(loader_create_task(ptask, path, psize, 0, 1, LOADER_CTASK_TYPE_SYS) != PM_OK) pman_print_and_stop("Error creating task for %s", image_name); /* Create task gave us a page directory, the first page table, and initialized task structure as a service. But since sysservice is TRUE, it did not begin fetching from FS. */ ptask->flags = 0; ptask->flags |= TSK_FLAG_SYS_SERVICE; if(low_mem) ptask->flags |= TSK_LOW_MEM; /* Setup the task */ ptask->creator_task = 0xFFFF; ptask->creator_task_port = 0xFFFF; ptask->command_inf.command_req_id = 0; ptask->command_inf.command_sender_id = 0xFFFFFFFF; /* Parse elf */ if(elf_begin(ptask, pminit_elf_read, pminit_elf_seek) == -1) pman_print_and_stop("Elf parsing failed for %s", image_name); /* Put pages for the Service */ UINT32 max_addr = put_pages(ptask, !load_all, low_mem, isld); /* Get first page */ ptbl = (struct vmm_page_table*)PHYSICAL2LINEAR(PG_ADDRESS(ptask->vmm_info.page_directory->tables[PM_LINEAR_TO_DIR(SARTORIS_PROCBASE_LINEAR)].b)); first_page = PG_ADDRESS(ptbl->pages[PM_LINEAR_TO_TAB(SARTORIS_PROCBASE_LINEAR)].entry.phy_page_addr); /* Setup first thread */ if(!isld) { pthread = thr_create(thread, ptask); pthread->state = THR_WAITING; /* Create microkernel thread */ mk_thread.task_num = task; mk_thread.invoke_mode = PRIV_LEVEL_ONLY; mk_thread.invoke_level = 0; mk_thread.ep = (ADDR)ptask->loader_inf.elf_header.e_entry; mk_thread.stack = pthread->stack_addr = (ADDR)STACK_ADDR(PMAN_THREAD_STACK_BASE); if(create_thread(thread, &mk_thread)) pman_print_and_stop("Could not create thread for %s", image_name); /* Schedule and activate thread */ sch_add(pthread); sch_activate(pthread); } else { ld_size = max_addr; ptask->vmm_info.max_addr = max_addr; } ptask->state = TSK_NORMAL; return (ADDR)first_page; }