void low_memory_killer(unsigned int uiRequsetBlock, unsigned int uiMemMask,unsigned int id, int need_safe) { alloc_info *s_info = alloc_info_head; alloc_info *k_info = NULL; unsigned int kill_id; int loop_i; int chunk_num; // printk("low_memory_killer uiRequsetBlock=%d uiMemMask=0x%x\n",uiRequsetBlock, uiMemMask); if(!s_info) { printk("surfaceflinger's memory not found\n"); return; } if(need_safe) chunk_num = G3D_SAFE_CHUNK_NUM; else chunk_num = G3D_CHUNK_NUM; s_info=s_info->next; // the first s_info is surfaceflinger while(s_info) { for(loop_i = 0; loop_i < chunk_num - (uiRequsetBlock -1) ; loop_i++ ) { if ((s_info->uiAllocedMemMap & (uiMemMask << loop_i)) == (uiMemMask << loop_i)){ k_info=s_info; break; } } if(k_info) break; s_info=s_info->next; } if(!k_info || k_info->file_desc_id==id) { if(k_info==alloc_info_tail) printk("k_info==self\n"); else printk("k_info==NULL\n"); printk("oldest 3D process's memory not found\n"); return; } kill_id = k_info->file_desc_id; // printk("low momory killer : kill the oldest process(0x%x)\n",kill_id); for(loop_i = 0; loop_i < G3D_CHUNK_NUM; loop_i++ ){ if((g3d_bootm[loop_i].file_desc_id) == (unsigned int)kill_id){ if (g3d_bootm[loop_i].in_used == G3D_CHUCNK_RESERVED){ s3c_g3d_release_chunk(g3d_bootm[loop_i].phy_addr, g3d_bootm[loop_i].size); } g3d_bootm[loop_i].file_desc_id = 0; } } }
void garbageCollect(int* newid) { int loop_i; // printk("====Garbage Collect is executed====\n"); mutex_lock(&mem_alloc_lock); for(loop_i = 0; loop_i < G3D_CHUNK_NUM; loop_i++ ){ if((g3d_bootm[loop_i].file_desc_id) == (unsigned int)newid){ if (g3d_bootm[loop_i].in_used == G3D_CHUCNK_RESERVED){ s3c_g3d_release_chunk(g3d_bootm[loop_i].phy_addr, g3d_bootm[loop_i].size); } g3d_bootm[loop_i].file_desc_id = 0; } } mutex_unlock(&mem_alloc_lock); }
static int s3c_g3d_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { u32 val; DMA_BLOCK_STRUCT dma_block; s3c_3d_dma_info dma_info; DECLARE_COMPLETION_ONSTACK(complete); struct mm_struct *mm = current->mm; struct s3c_3d_mem_alloc param; struct s3c_3d_pm_status param_pm; unsigned int timer; switch (cmd) { case WAIT_FOR_FLUSH: //if fifo has already been flushed, return; val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE); //printk("read pipestate = 0x%x\n",val); if((val & arg) ==0) break; // enable interrupt interrupt_already_recevied = 0; __raw_writel(0x0001171f,s3c_g3d_base+FGGB_PIPEMASK); __raw_writel(1,s3c_g3d_base+FGGB_INTMASK); //printk("wait for flush (arg=0x%lx)\n",arg); timer = 1000000; while(timer) { wait_event_interruptible_timeout(waitq, (interrupt_already_recevied>0), 1*HZ); __raw_writel(0,s3c_g3d_base+FGGB_INTMASK); interrupt_already_recevied = 0; //if(interrupt_already_recevied==0)interruptible_sleep_on(&waitq); val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE); //printk("in while read pipestate = 0x%x\n",val); if(val & arg){ } else{ break; } __raw_writel(1,s3c_g3d_base+FGGB_INTMASK); timer --; } break; case GET_CONFIG: if (copy_to_user((void *)arg,&g3d_config,sizeof(G3D_CONFIG_STRUCT))) { printk("G3D: copy_to_user failed to get g3d_config\n"); return -EFAULT; } break; case START_DMA_BLOCK: if (copy_from_user(&dma_block,(void *)arg,sizeof(DMA_BLOCK_STRUCT))) { printk("G3D: copy_to_user failed to get dma_block\n"); return -EFAULT; } if (dma_block.offset%4!=0) { printk("G3D: dma offset is not aligned by word\n"); return -EINVAL; } if (dma_block.size%4!=0) { printk("G3D: dma size is not aligned by word\n"); return -EINVAL; } if (dma_block.offset+dma_block.size >g3d_config.dma_buffer_size) { printk("G3D: offset+size exceeds dam buffer\n"); return -EINVAL; } dma_info.src = g3d_config.dma_buffer_addr+dma_block.offset; dma_info.len = dma_block.size; dma_info.dst = s3c_g3d_base_physical+FGGB_HOSTINTERFACE; DEBUG(" dma src=0x%x\n", dma_info.src); DEBUG(" dma len =%u\n", dma_info.len); DEBUG(" dma dst = 0x%x\n", dma_info.dst); dma_3d_done = &complete; if (s3c2410_dma_request(DMACH_3D_M2M, &s3c6410_3d_dma_client, NULL)) { printk(KERN_WARNING "Unable to get DMA channel(DMACH_3D_M2M).\n"); return -EFAULT; } s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_g3d_dma_finish); s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM, 1, (u_long) dma_info.src); s3c2410_dma_config(DMACH_3D_M2M, 4, 4); s3c2410_dma_setflags(DMACH_3D_M2M, S3C2410_DMAF_AUTOSTART); //consistent_sync((void *) dma_info.dst, dma_info.len, DMA_FROM_DEVICE); // s3c2410_dma_enqueue(DMACH_3D_M2M, NULL, (dma_addr_t) virt_to_dma(NULL, dma_info.dst), dma_info.len); s3c2410_dma_enqueue(DMACH_3D_M2M, NULL, (dma_addr_t) dma_info.dst, dma_info.len); // printk("wait for end of dma operation\n"); wait_for_completion(&complete); // printk("dma operation is performed\n"); s3c2410_dma_free(DMACH_3D_M2M, &s3c6410_3d_dma_client); break; case S3C_3D_MEM_ALLOC: mutex_lock(&mem_alloc_lock); if(copy_from_user(¶m, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){ mutex_unlock(&mem_alloc_lock); return -EFAULT; } flag = MEM_ALLOC; param.size = s3c_g3d_available_chunk_size(param.size,(unsigned int)file->private_data); if (param.size == 0){ printk("S3C_3D_MEM_ALLOC FAILED because there is no block memory bigger than you request\n"); flag = 0; mutex_unlock(&mem_alloc_lock); return -EFAULT; } param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0); DEBUG("param.vir_addr = %08x\n", param.vir_addr); if(param.vir_addr == -EINVAL) { printk("S3C_3D_MEM_ALLOC FAILED\n"); flag = 0; mutex_unlock(&mem_alloc_lock); return -EFAULT; } param.phy_addr = physical_address; // printk("alloc %d\n", param.size); DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr); if(copy_to_user((struct s3c_3d_mem_alloc *)arg, ¶m, sizeof(struct s3c_3d_mem_alloc))){ flag = 0; mutex_unlock(&mem_alloc_lock); return -EFAULT; } flag = 0; // printk("\n\n====Success the malloc from kernel=====\n"); mutex_unlock(&mem_alloc_lock); break; case S3C_3D_MEM_FREE: mutex_lock(&mem_free_lock); if(copy_from_user(¶m, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){ mutex_unlock(&mem_free_lock); return -EFAULT; } DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr); /* if (do_munmap(mm, param.vir_addr, param.size) < 0) { printk("do_munmap() failed !!\n"); mutex_unlock(&mem_free_lock); return -EINVAL; } */ s3c_g3d_release_chunk(param.phy_addr, param.size); //printk("KERNEL : virt_addr = 0x%X\n", virt_addr); //printk("free %d\n", param.size); param.size = 0; DEBUG("do_munmap() succeed !!\n"); if(copy_to_user((struct s3c_3d_mem_alloc *)arg, ¶m, sizeof(struct s3c_3d_mem_alloc))){ mutex_unlock(&mem_free_lock); return -EFAULT; } mutex_unlock(&mem_free_lock); break; case S3C_3D_SFR_LOCK: mutex_lock(&mem_sfr_lock); mutex_lock_processID = (unsigned int)file->private_data; DEBUG("s3c_g3d_ioctl() : You got a muxtex lock !!\n"); break; case S3C_3D_SFR_UNLOCK: mutex_lock_processID = 0; mutex_unlock(&mem_sfr_lock); DEBUG("s3c_g3d_ioctl() : The muxtex unlock called !!\n"); break; case S3C_3D_MEM_ALLOC_SHARE: mutex_lock(&mem_alloc_share_lock); if(copy_from_user(¶m, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){ mutex_unlock(&mem_alloc_share_lock); return -EFAULT; } flag = MEM_ALLOC_SHARE; physical_address = param.phy_addr; DEBUG("param.phy_addr = %08x\n", physical_address); param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0); DEBUG("param.vir_addr = %08x\n", param.vir_addr); if(param.vir_addr == -EINVAL) { printk("S3C_3D_MEM_ALLOC_SHARE FAILED\n"); flag = 0; mutex_unlock(&mem_alloc_share_lock); return -EFAULT; } DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr); if(copy_to_user((struct s3c_3d_mem_alloc *)arg, ¶m, sizeof(struct s3c_3d_mem_alloc))){ flag = 0; mutex_unlock(&mem_alloc_share_lock); return -EFAULT; } flag = 0; mutex_unlock(&mem_alloc_share_lock); break; case S3C_3D_MEM_SHARE_FREE: mutex_lock(&mem_share_free_lock); if(copy_from_user(¶m, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){ mutex_unlock(&mem_share_free_lock); return -EFAULT; } DEBUG("MEM_SHARE_FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr); if (do_munmap(mm, param.vir_addr, param.size) < 0) { printk("do_munmap() failed - MEM_SHARE_FREE!!\n"); mutex_unlock(&mem_share_free_lock); return -EINVAL; } param.vir_addr = 0; DEBUG("do_munmap() succeed !! - MEM_SHARE_FREE\n"); if(copy_to_user((struct s3c_3d_mem_alloc *)arg, ¶m, sizeof(struct s3c_3d_mem_alloc))){ mutex_unlock(&mem_share_free_lock); return -EFAULT; } mutex_unlock(&mem_share_free_lock); break; case S3C_3D_CACHE_INVALID: mutex_lock(&cache_invalid_lock); if(copy_from_user(¶m, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){ printk("ERR: Invalid Cache Error\n"); mutex_unlock(&cache_invalid_lock); return -EFAULT; } dmac_inv_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size); mutex_unlock(&cache_invalid_lock); break; case S3C_3D_CACHE_CLEAN: mutex_lock(&cache_clean_lock); if(copy_from_user(¶m, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){ printk("ERR: Invalid Cache Error\n"); mutex_unlock(&cache_clean_lock); return -EFAULT; } dmac_clean_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size); mutex_unlock(&cache_clean_lock); break; case S3C_3D_CACHE_CLEAN_INVALID: mutex_lock(&cache_clean_invalid_lock); if(copy_from_user(¶m, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){ mutex_unlock(&cache_clean_invalid_lock); printk("ERR: Invalid Cache Error\n"); return -EFAULT; } dmac_flush_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size); mutex_unlock(&cache_clean_invalid_lock); break; case S3C_3D_POWER_INIT: if(copy_from_user(¶m_pm, (struct s3c_3d_pm_status *)arg, sizeof(struct s3c_3d_pm_status))){ printk("ERR: Invalid Cache Error\n"); return -EFAULT; } break; case S3C_3D_CRITICAL_SECTION: #ifdef USE_G3D_DOMAIN_GATING mutex_lock(&pm_critical_section_lock); if(copy_from_user(¶m_pm, (struct s3c_3d_pm_status *)arg, sizeof(struct s3c_3d_pm_status))){ printk("ERR: Invalid Cache Error\n"); mutex_unlock(&pm_critical_section_lock); return -EFAULT; } // param_pm.memStatus = check_memStatus((unsigned int)file->private_data); if(param_pm.criticalSection) g_G3D_CriticalFlag++; else g_G3D_CriticalFlag--; if(g_G3D_CriticalFlag==0) {/*kick power off*/ /*power off*/ /*kick timer*/ mod_timer(&g3d_pm_timer, jiffies + TIMER_INTERVAL); } else if(g_G3D_CriticalFlag>0) {/*kick power on*/ if(domain_off_check(S3C64XX_DOMAIN_G)) {/*if powered off*/ if(g_G3D_SelfPowerOFF) {/*powered off by 3D PM or by Resume*/ /*power on*/ s3c_set_normal_cfg(S3C64XX_DOMAIN_G, S3C64XX_ACTIVE_MODE, S3C64XX_3D); if(s3c_wait_blk_pwr_ready(S3C64XX_BLK_G)) { printk("[3D] s3c_wait_blk_pwr_ready err\n"); mutex_unlock(&pm_critical_section_lock); return -EFAULT; } clk_g3d_enable(); /*Need here??*/ softReset_g3d(); // printk("[3D] Power on\n"); } else { /*powered off by the system :: error*/ printk("Error on the system :: app tries to work during sleep\n"); mutex_unlock(&pm_critical_section_lock); return -EFAULT; } } else { /*already powered on : nothing to do*/ //g_G3D_SelfPowerOFF=0; } } else if(g_G3D_CriticalFlag < 0) { printk("Error on the system :: g_G3D_CriticalFlag < 0\n"); } // printk("S3C_3D_CRITICAL_SECTION: param_pm.criticalSection=%d\n",param_pm.criticalSection); if (copy_to_user((void *)arg,¶m_pm,sizeof(struct s3c_3d_pm_status))) { printk("G3D: copy_to_user failed to get s3c_3d_pm_status\n"); mutex_unlock(&pm_critical_section_lock); return -EFAULT; } mutex_unlock(&pm_critical_section_lock); #endif /* USE_G3D_DOMAIN_GATING */ break; default: DEBUG("s3c_g3d_ioctl() : default !!\n"); return -EINVAL; } return 0; }
void low_memory_killer(unsigned int uiRequsetBlock, u64 uiMemMask,unsigned int id) { alloc_info *s_info = alloc_info_head; alloc_info *k_info = NULL; unsigned int kill_id; int loop_i; int chunk_start_num; int chunk_end_num; // printk("low_memory_killer uiRequsetBlock=%d uiMemMask=0x%x\n",uiRequsetBlock, uiMemMask); if(!s_info) { printk("surfaceflinger's memory not found\n"); return; } chunk_start_num = G3D_UI_CHUNK_NUM; chunk_end_num = G3D_CHUNK_NUM; s_info=s_info->next; // the first s_info is surfaceflinger #ifndef LMK_PRC_KILL while(s_info) { for(loop_i = chunk_start_num; loop_i < chunk_end_num - (uiRequsetBlock -1) ; loop_i++ ) { if ((s_info->uiAllocedMemMap & (uiMemMask << loop_i)) == (uiMemMask << loop_i)){ k_info=s_info; break; } } if(k_info) break; s_info=s_info->next; } #else k_info = s_info; #endif if(!k_info || k_info->file_desc_id==id) { #ifndef LMK_PRC_KILL if(k_info==alloc_info_tail) printk("k_info==self\n"); else printk("k_info==NULL\n"); //printk("oldest 3D process's memory not found\n"); #else if(!k_info) printk("k_info==NULL\n"); #endif return; } kill_id = k_info->file_desc_id; // printk("low momory killer : kill the oldest process(0x%x)\n",kill_id); #ifdef LMK_PRC_KILL kill_pid(k_info->pId , SIGTERM, 1); #endif for(loop_i = G3D_UI_CHUNK_NUM ; loop_i < G3D_CHUNK_NUM; loop_i++ ){ if((g3d_bootm[loop_i].file_desc_id) == (unsigned int)kill_id){ if (g3d_bootm[loop_i].in_used == G3D_CHUCNK_RESERVED){ s3c_g3d_release_chunk(g3d_bootm[loop_i].phy_addr, g3d_bootm[loop_i].size); } g3d_bootm[loop_i].file_desc_id = 0; } } }