static int shm_driver_release(struct inode *inode, struct file *filp) { shm_device_t *pDevice; pid_t taskid; struct shm_device_priv_data *priv_data = (struct shm_device_priv_data*)filp->private_data; if (NULL == priv_data) { shm_error("shm_driver_release private data is NULL\n"); return 0; } pDevice = priv_data->m_device; taskid = priv_data->m_taskid; /* free private data*/ kfree(priv_data); filp->private_data = NULL; shm_debug("shm_driver_release for pid:%d\n",taskid); if (NULL == pDevice) { shm_error("shm_driver_release device NULL\n"); return 0; } /* once the fd released, we should release allocated cache and noncache * memory device accordingly to avoid shm leak */ if ((pDevice == shm_device) || (pDevice == shm_device_noncache)) { shm_device_release_by_taskid(pDevice, taskid); } shm_debug("after shm_driver_release OK for pid:%d\n",taskid); return 0; }
int memory_engine_show_stat(memory_engine_t *engine, struct seq_file *file) { int len = 0, i = 0; int res = 0; struct shm_stat_info *stat_info = &gshm_stat; shm_debug("memory_engine_show_stat enter. \n"); if ((NULL == engine) || (NULL == file)) return -EINVAL; memset(stat_info,0,sizeof(struct shm_stat_info)); res = memory_engine_get_stat(engine, stat_info); if (0 != res) { shm_error("memory_engine_dump_stat fail to gat stat info\n"); goto err_exit; } len += seq_printf(file,"total size : %d \nused : %d \ntask count : %d\n", stat_info->m_size,stat_info->m_used,stat_info->m_count); len += seq_printf(file," No | task id( name ) | oom_adj | alloc | use |\n"); len += seq_printf(file,"-----------------------------------------------------------------------\n"); for (i = 0 ; i < stat_info->m_count ; i++) len += seq_printf(file, " %3d | %8d(%16s) | %3d | %10d | %10d |\n", i, stat_info->m_nodes[i].m_taskid, stat_info->m_nodes[i].m_taskname, stat_info->m_nodes[i].m_oom_adj, stat_info->m_nodes[i].m_size_alloc, stat_info->m_nodes[i].m_size_use); shm_debug("memory_engine_show_stat OK.\n"); return len; err_exit: shm_error("memory_engine_show_stat fail\n"); return res; }
int memory_engine_dump_stat(memory_engine_t *engine) { struct shm_stat_info *stat_info = NULL; int res = 0; int i = 0; shm_debug("memory_engine_dump_stat enter. \n"); if (NULL == engine) { shm_error("memory_engine_dump_stat invalid param\n"); return -EINVAL; } stat_info = kzalloc(sizeof(struct shm_stat_info),GFP_KERNEL); if (NULL == stat_info) { shm_error("memory_engine_dump_stat kmalloc fail\n"); return -ENOMEM; } res = memory_engine_get_stat(engine,stat_info); if (0 != res) { shm_error("memory_engine_dump_stat fail to gat stat info\n"); goto err_exit; } printk("total size : %d \nused : %d \ntask count : %d\n", stat_info->m_size,stat_info->m_used,stat_info->m_count); printk(" No | task id( name ) | oom_adj | alloc | use |\n"); printk("-----------------------------------------------------------------------\n"); for (i = 0 ; i < stat_info->m_count ; i++) printk(" %3d | %8d(%16s) | %3d | %10d | %10d |\n", i, stat_info->m_nodes[i].m_taskid, stat_info->m_nodes[i].m_taskname, stat_info->m_nodes[i].m_oom_adj, stat_info->m_nodes[i].m_size_alloc, stat_info->m_nodes[i].m_size_use); shm_debug("memory_engine_dump_stat OK.\n"); if (NULL != stat_info) { kfree(stat_info); stat_info = NULL; } return 0; err_exit: if (NULL != stat_info) { kfree(stat_info); stat_info = NULL; } shm_error("memory_engine_dump_stat fail\n"); return res; }
int memory_engine_gothrough(memory_engine_t *engine, char *buffer, int count) { int len = 0, i = 0; memory_node_t *new_node; char tag_used[] = " Yes "; char tag_free[] = " "; char *ptr_tag; shm_debug("memory_engine_gothrough start. ( 0x%08X, %d)\n", (size_t)buffer, count); if ((engine == NULL) || (buffer == NULL) || (count <= 0)) return -EINVAL; len += sprintf(buffer + len, " No | Alloc | Node | Addr (Aligned) | Offset | Size (Aligned) | Align |\n"); len += sprintf(buffer + len, "---------------------------------------------------------------------------------------------------\n"); down(&(engine->m_mutex)); /* Walk all nodes until we have reached the root. (root.m_size == 0) */ for (new_node = engine->m_root.m_next; new_node->m_size != 0; new_node = new_node->m_next) { /* check buffer length to avoid buffer overflow */ if (len > count - 102) { shm_debug("memory_engine_gothrough buffer is full !!!\n"); goto done; } if (new_node->m_next_free != NULL) ptr_tag = tag_free; else ptr_tag = tag_used; len += sprintf(buffer + len, " %3d | %5s | 0x%08X | 0x%08X (0x%08X) | %8d | %9d (%9d) | %8d |\n", ++i, ptr_tag, (size_t)new_node, new_node->m_addr, MEMNODE_ALIGN_ADDR(new_node), (MEMNODE_ALIGN_ADDR(new_node) - engine->m_base), new_node->m_size, MEMNODE_ALIGN_SIZE(new_node), new_node->m_alignment); } done: up(&(engine->m_mutex)); shm_debug("memory_engine_gothrough OK. (node = %d, len = %d)\n", i, len); return len; }
static int read_proc_baseinfo(char *page, char **start, off_t offset, int count, int *eof, void *data) { int len = 0, res; MV_SHM_BaseInfo_t baseinfo; res = MV_SHM_GetBaseInfo(&baseinfo); if (res != 0) { shm_error("shm_device_get_baseinfo failed. (%d)\n", res); len += sprintf(page, "shm_device_get_baseinfo failed. (%d)\n", res); goto done; } //!!!!! avoid buffer overflow !!! len += sprintf(page + len, "%20s : %10u %s\n", "memory size", baseinfo.m_size, "Bytes"); len += sprintf(page + len, "%20s : %10u %s\n", "threshold", baseinfo.m_threshold, "Bytes"); len += sprintf(page + len, "------------ physical address ------------\n"); len += sprintf(page + len, "%20s : 0x%08X\n", "cache physaddr", baseinfo.m_base_physaddr_cache); len += sprintf(page + len, "%20s : 0x%08X\n", "non-cache physaddr", baseinfo.m_base_physaddr_nocache); len += sprintf(page + len, "-------- kernel virtual address ----------\n"); len += sprintf(page + len, "%20s : 0x%08X\n", "cache virtaddr", baseinfo.m_base_virtaddr_cache); len += sprintf(page + len, "%20s : 0x%08X\n", "non-cache virtaddr", baseinfo.m_base_virtaddr_nocache); shm_debug("read_proc_baseinfo OK. (%d / %d)\n", len, count); done: *eof = 1; return ((count < len) ? count : len); }
static int read_proc_meminfo(char *page, char **start, off_t offset, int count, int *eof, void *data) { int len = 0, res; MV_SHM_MemInfo_t meminfo; res = shm_device_get_meminfo(shm_device, &meminfo); if (res != 0) { shm_error("shm_driver_read_proc_meminfo failed. (%d)\n", res); len += sprintf(page, "shm_driver_read_proc_meminfo failed. (%d)\n", res); goto done; } //!!!!! avoid buffer overflow !!! len += sprintf(page + len, "%20s : %10u %s\n", "total mem", meminfo.m_totalmem, "Bytes"); len += sprintf(page + len, "%20s : %10u %s\n", "free mem", meminfo.m_freemem, "Bytes"); len += sprintf(page + len, "%20s : %10u %s\n", "used mem", meminfo.m_usedmem, "Bytes"); len += sprintf(page + len, "%20s : %10u %s\n", "peak used mem", meminfo.m_peak_usedmem, "Bytes"); // len += sprintf(page + len, "%20s : %10u %s\n", "max free block", meminfo.m_max_freeblock, "Bytes"); // len += sprintf(page + len, "%20s : %10u %s\n", "min used block", meminfo.m_min_freeblock, "Bytes"); // len += sprintf(page + len, "%20s : %10u %s\n", "max used block", meminfo.m_max_usedblock, "Bytes"); // len += sprintf(page + len, "%20s : %10u %s\n", "min used block", meminfo.m_min_usedblock, "Bytes"); len += sprintf(page + len, "%20s : %10u %s\n", "num free block", meminfo.m_num_freeblock, "Blocks"); len += sprintf(page + len, "%20s : %10u %s\n", "num used block", meminfo.m_num_usedblock, "Blocks"); shm_debug("read_proc_meminfo OK. (%d / %d)\n", len, count); done: *eof = 1; return ((count < len) ? count : len); }
static int shm_driver_mmap_cache(struct file *filp, struct vm_area_struct *vma) { unsigned long pfn, vsize; shm_device_t *pDevice; struct shm_device_priv_data *priv_data = (struct shm_device_priv_data*)filp->private_data; if (NULL == priv_data) { shm_error("shm_driver_mmap_cache NULL private data\n"); return -ENOTTY; } pDevice = (shm_device_t*)priv_data->m_device; if (NULL == pDevice) { shm_error("shm_driver_mmap_cache NULL shm device\n"); return -ENOTTY; } pfn = pDevice->m_base >> PAGE_SHIFT; vsize = vma->vm_end - vma->vm_start; shm_debug("shm_driver_mmap_nocache size = 0x%08lX(0x%x, 0x%x), base:0x%x\n", vsize, shm_size_cache, pDevice->m_size, pDevice->m_base); if (vsize > shm_size_cache) return -EINVAL; vma->vm_pgoff = 0; // skip offset if (remap_pfn_range(vma, vma->vm_start, pfn, vsize, vma->vm_page_prot)) return -EAGAIN; return 0; }
static int shm_driver_release(struct inode *inode, struct file *filp) { filp->private_data = NULL; shm_debug("shm_driver_release ok\n"); return 0; }
static int shm_driver_open (struct inode *inode, struct file *filp) { filp->private_data = shm_device; shm_debug("shm_driver_open ok\n"); return 0; }
static void __exit shm_driver_exit(void) { int res, i; task_free_unregister(&shm_task_nb); /* destroy shm kernel API */ res = MV_SHM_Exit(); if (res != 0) shm_error("MV_SHM_Exit failed !!!\n"); /* remove shm device proc file */ remove_proc_entry("meminfo", shm_driver_procdir); remove_proc_entry("baseinfo", shm_driver_procdir); remove_proc_entry("detail", shm_driver_procdir); remove_proc_entry("stat", shm_driver_procdir); remove_proc_entry(SHM_DEVICE_NAME, NULL); if (shm_device_destroy(&shm_device) != 0) shm_error("shm_device_destroy cache mem failed.\n"); if (shm_device_destroy(&shm_device_noncache) != 0) shm_error("shm_device_destroy non-cache mem failed.\n"); /* del sysfs entries */ for (i = 0; i < ARRAY_SIZE(shm_driver_dev_list); i++) { device_destroy(shm_dev_class, MKDEV(GALOIS_SHM_MAJOR, shm_driver_dev_list[i].minor)); shm_debug("delete device sysfs [%s]\n", shm_driver_dev_list[i].name); } class_destroy(shm_dev_class); /* del cdev */ for (i = 0; i < ARRAY_SIZE(shm_driver_dev_list); i++) { cdev_del(shm_driver_dev_list[i].cdev); shm_debug("delete cdevs device minor [%d]\n", shm_driver_dev_list[i].minor); } unregister_chrdev_region(MKDEV(GALOIS_SHM_MAJOR, 0), GALOIS_SHM_MINORS); shm_debug("unregister cdev device major [%d]\n", GALOIS_SHM_MAJOR); shm_trace("shm_driver_exit OK\n"); }
int memory_engine_takeover(memory_engine_t *engine,int alignaddr) { int res = 0; memory_node_t *node = NULL; struct task_struct *grptask = NULL; shm_debug("mem_engine_takeover start, (0x%08X)\n",alignaddr); if ((NULL == engine) || (0 == alignaddr)) return -EINVAL; down(&(engine->m_mutex)); /* find alignaddr */ res = _FindNode_alignaddr(engine, alignaddr, &node); if (0 != res) goto err_exit; /* if the node found is a free one, there could be invalid operations */ if (NULL != node->m_next_free) { shm_error("node(%#.8x) already freed\n",alignaddr); res = -EFAULT; goto err_exit; } /* change usrtaskid to current */ node->m_usrtaskid = task_tgid_vnr(current); grptask = pid_task(task_tgid(current),PIDTYPE_PID); if (NULL == grptask) memset(node->m_usrtaskname,0,16); else strncpy(node->m_usrtaskname,grptask->comm,16); up(&(engine->m_mutex)); shm_debug("memory_engine_takeover OK.\n"); return 0; err_exit: up(&(engine->m_mutex)); shm_error("memory_engine_takeover failed!!! (0x%08X)\n", alignaddr); return res; }
int _FindNode_alignaddr(memory_engine_t *engine, size_t alignaddr, memory_node_t **node) { memory_node_t *new_node; shm_debug("memory_engine_find_alignaddr start. (0x%08X)\n", alignaddr); /* Walk all nodes until we have one which alignaddr is equal to or we have reached the root. (root.m_size == 0) */ for (new_node = engine->m_root.m_next; new_node->m_size != 0; new_node = new_node->m_next) { if (MEMNODE_ALIGN_ADDR(new_node) == alignaddr) { *node = new_node; shm_debug("memory_engine_find_alignaddr OK.\n"); return 0; } } *node = NULL; return -EFAULT; }
int memory_engine_giveup(memory_engine_t *engine, int alignaddr) { int res = 0; memory_node_t *node = NULL; shm_debug("memory_engine_giveup start, (0x%08X)\n",alignaddr); if ((NULL == engine) || (0 == alignaddr)) return -EINVAL; down(&(engine->m_mutex)); /* find note from alignaddr */ res = _FindNode_alignaddr(engine, alignaddr, &node); if (0 != res) goto err_exit; /* if the node found is a free one, there could be invalid operations */ if ((NULL == node) || (NULL != node->m_next_free)) { shm_error("node(%#.8x) not exists\n",alignaddr); res = -EFAULT; goto err_exit; } /* change usrtaskid to taskid */ node->m_usrtaskid = node->m_taskid; strncpy(node->m_usrtaskname,node->m_taskname,16); up(&(engine->m_mutex)); shm_debug("memory_engine_giveup OK.\n"); return 0; err_exit: up(&(engine->m_mutex)); shm_error("memory_engine_giveup fail!!! (0x%08X)\n", alignaddr); return res; }
int memory_engine_destroy(memory_engine_t **engine) { int res; memory_node_t *node, *next; shm_debug("memory_engine_destroy start.\n"); if (engine == NULL) { res = -EINVAL; goto err_del_engine; } /* Walk all the nodes until we reached the root. (root.m_size == 0) */ for (node = (*engine)->m_root.m_next; node->m_size != 0; node = next) { /* Save pointer to the next node. */ next = node->m_next; /* Free the node. */ kfree(node); } /* Free the mutex. */ /* Free the engine object. */ kfree(*engine); *engine = NULL; shm_debug("memory_engine_destroy OK.\n"); return 0; err_del_engine: shm_error("memory_engine_destroy failed !!!\n"); return res; }
static int shm_driver_open_noncache(struct inode *inode, struct file *filp) { /*save the device and opening task id to private_data * then from user space we will depend on the specified device * since there are cache and non-cache device */ struct shm_device_priv_data *priv_data = kzalloc(sizeof(struct shm_device_priv_data), GFP_KERNEL); if (NULL == priv_data) { shm_error("shm_driver_open_noncache fail to allocate memory\n"); return -ENOMEM; } priv_data->m_taskid = task_tgid_vnr(current); priv_data->m_device = shm_device_noncache; filp->private_data = priv_data; shm_debug("shm_driver_open_noncache ok\n"); return 0; }
static int read_proc_detail(char *page, char **start, off_t offset, int count, int *eof, void *data) { int len = 0; len = shm_device_get_detail(shm_device, page, count); if (len <= 0) { shm_error("read_proc_detail failed. (%d)\n", len); len += sprintf(page, "read_proc_detail failed. (%d)\n", len); goto done; } shm_debug("read_proc_detail OK. (%d / %d)\n", len, count); done: *eof = 1; return ((count < len) ? count : len); }
int memory_engine_show(memory_engine_t *engine, struct seq_file *file) { int len = 0, i = 0; memory_node_t *new_node; char tag_used[] = " Yes "; char tag_free[] = " "; char *ptr_tag; if ((engine == NULL) || (file == NULL)) return -EINVAL; len += seq_printf(file, " No | Alloc | Node | Addr (Aligned) | Offset | Size (Aligned) | Align | thread id(name) \n"); len += seq_printf(file, "---------------------------------------------------------------------------------------------------\n"); down(&(engine->m_mutex)); /* Walk all nodes until we have reached the root. (root.m_size == 0) */ for (new_node = engine->m_root.m_next; new_node->m_size != 0; new_node = new_node->m_next) { if (new_node->m_next_free != NULL) ptr_tag = tag_free; else ptr_tag = tag_used; len += seq_printf(file, " %3d | %5s | 0x%08X | 0x%08X (0x%08X) | %8d | %9d (%9d) | %8d | 0x%08X(%s) \n", ++i, ptr_tag, (size_t)new_node, new_node->m_addr, MEMNODE_ALIGN_ADDR(new_node), (MEMNODE_ALIGN_ADDR(new_node) - engine->m_base), new_node->m_size, MEMNODE_ALIGN_SIZE(new_node), new_node->m_alignment, new_node->m_threadid, new_node->m_threadname); } up(&(engine->m_mutex)); shm_debug("memory_engine_gothrough OK. (node = %d, len = %d)\n", i, len); return len; }
static int shm_driver_mmap_cache(struct file *filp, struct vm_area_struct *vma) { unsigned long pfn; unsigned long vsize; if (filp->private_data == NULL) return -ENOTTY; pfn = ((shm_device_t *)(filp->private_data))->m_base_cache >> PAGE_SHIFT; vsize = vma->vm_end - vma->vm_start; shm_debug("shm_driver_mmap_cache size = 0x%08lX\n", vsize); if (vsize > shm_size) return -EINVAL; vma->vm_pgoff = 0; // skip offset if (remap_pfn_range(vma, vma->vm_start, pfn, vsize, vma->vm_page_prot)) return -EAGAIN; return 0; }
int memory_engine_get_stat(memory_engine_t *engine, struct shm_stat_info *stat) { memory_node_t *mem_node = NULL; struct shm_usr_node *usr_node = NULL; int i = 0; shm_debug("memory_engine_get_stat enter. \n"); if (NULL == engine || NULL == stat) return -EINVAL; down(&(engine->m_mutex)); stat->m_size = engine->m_size; stat->m_used = engine->m_size_used; for (mem_node = engine->m_root.m_next ; mem_node->m_size != 0 ; mem_node = mem_node->m_next) { pid_t taskid; pid_t usrtaskid; /* free node*/ if (NULL != mem_node->m_next_free) continue; taskid = mem_node->m_taskid; usrtaskid = mem_node->m_usrtaskid; //update allocate size for (i = 0 ; i < MAX_SHM_USR_NODE_NUM ; i++) { usr_node = &(stat->m_nodes[i]); /* find a usr_node with the same taskid */ if (taskid == usr_node->m_taskid) break; /* occupy a usr_node for taskid */ if (0 == usr_node->m_taskid) { stat->m_count++; usr_node->m_taskid = taskid; strncpy(usr_node->m_taskname,mem_node->m_taskname,16); usr_node->m_oom_adj = shm_find_oomadj_by_pid(usr_node->m_taskid); break; } } /* by default the total number of usr_node for shm user should not exceed MAX_SHM_USR_NODE_NUM */ if (i >= MAX_SHM_USR_NODE_NUM) { shm_error("%s,%d stat out of memory\n",__FUNCTION__,__LINE__); goto err_exit; } usr_node->m_size_alloc += mem_node->m_size; /* if taskid equals to usrtaskid, then follow the shortcut*/ if (taskid == usrtaskid) { usr_node->m_size_use += mem_node->m_size; continue; } usr_node = NULL; for (i = 0 ; i < MAX_SHM_USR_NODE_NUM ; i++) { usr_node = &(stat->m_nodes[i]); if (usrtaskid == usr_node->m_taskid) break; if (0 == usr_node->m_taskid) { stat->m_count++; usr_node->m_taskid = usrtaskid; strncpy(usr_node->m_taskname,mem_node->m_usrtaskname,16); usr_node->m_oom_adj = shm_find_oomadj_by_pid(usr_node->m_taskid); break; } } if (i >= MAX_SHM_USR_NODE_NUM ) { shm_error("%s,%d stat out of memory\n",__FUNCTION__,__LINE__); goto err_exit; } usr_node->m_size_use += mem_node->m_size; } up(&(engine->m_mutex)); shm_debug("memory_engine_get_stat OK.\n"); return 0; err_exit: up(&(engine->m_mutex)); shm_error("memory_engine_get_stat fail\n"); return -EINVAL; }
static int __init shm_driver_init(void) { int res, i; struct device_node *np; struct resource r; struct proc_dir_entry *pent = NULL; struct proc_dir_entry *pstat = NULL; np = of_find_compatible_node(NULL, NULL, "mrvl,berlin-shm"); if (!np) goto err_node; res = of_address_to_resource(np, 0, &r); if (res) goto err_reg_device; shm_base_cache = r.start; shm_size_cache = resource_size(&r); res = of_address_to_resource(np, 1, &r); if (res) goto err_reg_device; shm_base_noncache = r.start; shm_size_noncache = resource_size(&r); of_node_put(np); /* Figure out our device number. */ res = register_chrdev_region(MKDEV(GALOIS_SHM_MAJOR, 0), GALOIS_SHM_MINORS, SHM_DEVICE_NAME); if (res < 0) { shm_error("unable to get shm device major [%d]\n", GALOIS_SHM_MAJOR); goto err_reg_device; } shm_debug("register cdev device major [%d]\n", GALOIS_SHM_MAJOR); /* Now setup cdevs. */ for (i = 0; i < ARRAY_SIZE(shm_driver_dev_list); i++) { res = shm_driver_setup_cdev(shm_driver_dev_list[i].cdev, GALOIS_SHM_MAJOR, shm_driver_dev_list[i].minor, shm_driver_dev_list[i].fops); if (res) { shm_error("shm_driver_setup_cdev failed in [%d].\n", i); goto err_add_device; } shm_debug("setup cdevs device minor [%d]\n", shm_driver_dev_list[i].minor); } /* add shm devices to sysfs */ shm_dev_class = class_create(THIS_MODULE, SHM_DEVICE_NAME); if (IS_ERR(shm_dev_class)) { shm_error("class_create failed.\n"); res = -ENODEV; goto err_add_device; } for (i = 0; i < ARRAY_SIZE(shm_driver_dev_list); i++) { device_create(shm_dev_class, NULL, MKDEV(GALOIS_SHM_MAJOR, shm_driver_dev_list[i].minor), NULL, shm_driver_dev_list[i].name); shm_debug("create device sysfs [%s]\n", shm_driver_dev_list[i].name); } /* create shm cache device */ res = shm_device_create(&shm_device, shm_base_cache, shm_size_cache, SHM_DEVICE_THRESHOLD); if (res != 0) { shm_error("shm_device_create failed.\n"); goto err_add_device; } /* init shrinker */ shm_device->m_shrinker = shm_lowmem_shrink_killer; /* create shm cache device */ res = shm_device_create(&shm_device_noncache, shm_base_noncache, shm_size_noncache, SHM_DEVICE_THRESHOLD); if (res != 0) { shm_error("shm_device_create failed.\n"); goto err_add_device; } /* init shrinker */ shm_device_noncache->m_shrinker = NULL; /* create shm kernel API, need map for noncache and cache device!!! */ res = MV_SHM_Init(shm_device_noncache, shm_device); if (res != 0) { shm_error("MV_SHM_Init failed !!!\n"); goto err_SHM_Init; } /* create shm device proc file */ shm_driver_procdir = proc_mkdir(SHM_DEVICE_NAME, NULL); if (!shm_driver_procdir) { shm_error(KERN_WARNING "Failed to mkdir /proc/%s\n", SHM_DEVICE_NAME); return 0; } proc_create("meminfo", 0, shm_driver_procdir, &meminfo_proc_fops); proc_create("baseinfo", 0, shm_driver_procdir, &baseinfo_proc_fops); pent = create_proc_entry("detail", 0, shm_driver_procdir); if (pent) pent->proc_fops = &detail_proc_ops; pstat = create_proc_entry("stat", 0, shm_driver_procdir); if (pstat) pstat->proc_fops = &shm_stat_file_ops; task_free_register(&shm_task_nb); shm_trace("shm_driver_init OK\n"); return 0; err_SHM_Init: shm_trace("shm_driver_init Undo ...\n"); shm_device_destroy(&shm_device); shm_device_destroy(&shm_device_noncache); /* del sysfs entries */ for (i = 0; i < ARRAY_SIZE(shm_driver_dev_list); i++) { device_destroy(shm_dev_class, MKDEV(GALOIS_SHM_MAJOR, shm_driver_dev_list[i].minor)); shm_debug("delete device sysfs [%s]\n", shm_driver_dev_list[i].name); } class_destroy(shm_dev_class); err_add_device: for (i = 0; i < ARRAY_SIZE(shm_driver_dev_list); i++) { cdev_del(shm_driver_dev_list[i].cdev); } unregister_chrdev_region(MKDEV(GALOIS_SHM_MAJOR, 0), GALOIS_SHM_MINORS); err_reg_device: of_node_put(np); err_node: shm_trace("shm_driver_init failed !!! (%d)\n", res); return res; }
static long shm_driver_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int res = 0; shm_driver_operation_t op; shm_device_t *shm_dev; MV_SHM_MemInfo_t meminfo; MV_SHM_BaseInfo_t baseinfo; struct shm_device_priv_data *priv_data = (struct shm_device_priv_data*)filp->private_data; if (NULL == priv_data) { shm_error("shm_driver_ioctl NULL private data\n"); return -ENOTTY; } shm_dev = (shm_device_t*)priv_data->m_device; if (NULL == shm_dev) { shm_error("shm_driver_ioctl NULL shm device\n"); return -ENOTTY; } shm_debug("shm_driver_ioctl cmd = 0x%08x\n, base:0x%08X size:0x%08X\n", cmd, shm_dev->m_base, shm_dev->m_size); switch (cmd) { case SHM_DEVICE_CMD_GET_MEMINFO: { res = shm_device_get_meminfo(shm_dev, &meminfo); if (res == 0) res = copy_to_user((void __user *)arg, &meminfo, sizeof(meminfo)); break; } case SHM_DEVICE_CMD_GET_DEVINFO: { res = shm_device_get_baseinfo(shm_dev, &baseinfo); if (res == 0) res = copy_to_user((void __user *)arg, &baseinfo, sizeof(baseinfo)); break; } case SHM_DEVICE_CMD_ALLOCATE: { res = copy_from_user(&op, (int __user *)arg, sizeof(op)); if (res != 0) break; op.m_param1 = shm_device_allocate(shm_dev, op.m_param1, op.m_param2); res = copy_to_user((void __user *)arg, &op, sizeof(op)); break; } case SHM_DEVICE_CMD_FREE: { res = copy_from_user(&op, (int __user *)arg, sizeof(op)); if (res != 0) break; op.m_param1 = shm_device_free(shm_dev, op.m_param1); res = copy_to_user((void __user *)arg, &op, sizeof(op)); break; } case SHM_DEVICE_CMD_INVALIDATE: case SHM_DEVICE_CMD_CLEAN: case SHM_DEVICE_CMD_CLEANANDINVALIDATE: { res = copy_from_user(&op, (int __user *)arg, sizeof(op)); if (res == 0) { res = shm_device_cache(shm_dev, cmd, op); } break; } default: res = -ENOTTY; } shm_debug("shm_driver_ioctl res = %d\n", res); return res; }
static int shm_driver_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { shm_driver_operation_t op; int res = 0; shm_debug("shm_driver_ioctl cmd = 0x%08x\n", cmd); switch(cmd) { case SHM_DEVICE_CMD_GET_MEMINFO: { MV_SHM_MemInfo_t meminfo; res = shm_device_get_meminfo(shm_device, &meminfo); if (res == 0) res = copy_to_user((void __user *)arg, &meminfo, sizeof(meminfo)); break; } case SHM_DEVICE_CMD_GET_BASEINFO: { MV_SHM_BaseInfo_t baseinfo; res = shm_device_get_baseinfo(shm_device, &baseinfo); if (res == 0) res = copy_to_user((void __user *)arg, &baseinfo, sizeof(baseinfo)); break; } case SHM_DEVICE_CMD_ALLOCATE: res = copy_from_user(&op, (int __user *)arg, sizeof(op)); if (res != 0) break; op.m_param1 = shm_device_allocate(shm_device, op.m_param1, op.m_param2); res = copy_to_user((void __user *)arg, &op, sizeof(op)); break; case SHM_DEVICE_CMD_FREE: res = copy_from_user(&op, (int __user *)arg, sizeof(op)); if (res != 0) break; res = shm_device_free(shm_device, op.m_param1); break; case SHM_DEVICE_CMD_INVALIDATE: res = copy_from_user(&op, (int __user *)arg, sizeof(op)); if ( res == 0) res = shm_device_cache_invalidate((void *)op.m_param1, op.m_param2); break; case SHM_DEVICE_CMD_CLEAN: res = copy_from_user(&op, (int __user *)arg, sizeof(op)); if ( res == 0) res = shm_device_cache_clean((void *)op.m_param1, op.m_param2); break; case SHM_DEVICE_CMD_CLEANANDINVALIDATE: res = copy_from_user(&op, (int __user *)arg, sizeof(op)); if ( res == 0) res = shm_device_cache_clean_and_invalidate((void *)op.m_param1, op.m_param2); break; default: res = -ENOTTY; } shm_debug("shm_driver_ioctl res = %d\n", res); return res; }
int memory_engine_create(memory_engine_t **engine, size_t base, size_t size, size_t threshold) { int res; memory_node_t * new_node; memory_engine_t *new_engine; shm_debug("memory_engine_create start. (0x%08X, %u, %u) \n", base, size, threshold); if (engine == NULL) { res = -EINVAL; goto err_add_engine; } new_engine = kmalloc(sizeof(memory_engine_t), GFP_KERNEL); if (new_engine == NULL) { res = -ENOMEM; goto err_add_engine; } memset(new_engine, 0 , sizeof(memory_engine_t)); new_engine->m_base = base; new_engine->m_size = new_engine->m_size_free = size; new_engine->m_size_used = 0; new_engine->m_threshold = threshold; new_engine->m_peak_usedmem = new_engine->m_max_usedblock = new_engine->m_min_usedblock = new_engine->m_size_used; new_engine->m_max_freeblock = new_engine->m_min_freeblock = new_engine->m_size_free; new_engine->m_num_freeblock = 1; new_engine->m_num_usedblock = 0; /* Allocate a new node object */ new_node = kmalloc(sizeof(memory_node_t), GFP_KERNEL); if (new_node == NULL) { res = -ENOMEM; goto err_add_node; } memset(new_node, 0, sizeof(memory_node_t)); new_node->m_next = new_node->m_prev = new_node->m_next_free = new_node->m_prev_free = &(new_engine->m_root); new_node->m_addr = new_engine->m_base; new_node->m_size = new_engine->m_size_free; new_node->m_alignment = 0; new_node->m_offset = 0; /* Initialize the linked list of nodes. */ new_engine->m_root.m_next = new_engine->m_root.m_prev = new_engine->m_root.m_next_free = new_engine->m_root.m_prev_free = new_node; new_engine->m_root.m_size = 0; new_engine->m_shm_root = RB_ROOT; /* Initialize the semaphore, come up in unlocked state. */ sema_init(&(new_engine->m_mutex), 1); *engine = new_engine; shm_debug("memory_engine_create OK.\n"); return 0; err_add_node: kfree(new_engine); err_add_engine: shm_error("memory_engine_create failed !!! (0x%08X, %u, %u) \n", base, size, threshold); return res; }
/* * release memory allocated by process pid */ int memory_engine_release_by_taskid(memory_engine_t *engine, pid_t taskid) { size_t alignaddr = 0 ; memory_node_t *new_node = NULL; int rlsCnt = 0 ; int idx = 0; int totalSize = 0 ; int *release_array = NULL; int static_release_array[64] = {0}; int *dynamic_release_array = NULL; if (NULL == engine) return -EINVAL; release_array = static_release_array; /* Walk all nodes until we have reached the root. (root.m_size == 0) */ down(&(engine->m_mutex)); for (new_node = engine->m_root.m_next ; new_node->m_size != 0 ; new_node = new_node->m_next) { if (NULL != new_node->m_next_free) continue;/* free node */ if (taskid == new_node->m_taskid) { release_array[rlsCnt++] = MEMNODE_ALIGN_ADDR(new_node); totalSize += new_node->m_size; } else continue; /* For berlin_avservice, the shm node number is larger than 64, * than swift to dynamic allocate array */ if (64 == rlsCnt) { printk("%s,%d memory note for pid(%d) exceed 64\n", __FUNCTION__,__LINE__,taskid); dynamic_release_array = kzalloc(1024 * sizeof(int), GFP_KERNEL); if (NULL == dynamic_release_array) { shm_error("%s,%d fail to allocate memory for dynamic_release_array\n", __FUNCTION__,__LINE__); break; } else { memcpy(dynamic_release_array, release_array, rlsCnt * sizeof(int)); release_array = dynamic_release_array; } } /* basically, it will not exceed 1024 shm memory node */ if (rlsCnt >= 1024) { shm_error("%s,%d, un released node exceed 1024 max value, ingore others!!!!\n",__FUNCTION__,__LINE__); break; } } up(&(engine->m_mutex)); for (idx = 0 ; idx < rlsCnt ; idx++) { alignaddr = release_array[idx]; if (0 != memory_engine_free(engine,alignaddr)) { shm_error("%s,%d fail to relesae shm alignaddr %#.8x,for taskid:%d\n", __FUNCTION__, __LINE__, alignaddr, taskid); } } if (NULL != dynamic_release_array) kfree(dynamic_release_array); shm_debug("%s,%d should release shm block %d size %d for taskid %d\n", __FUNCTION__,__LINE__,rlsCnt,totalSize,taskid); return 0; }
static int __init shm_driver_init(void) { int res, i; /* Figure out our device number. */ res = register_chrdev_region(MKDEV(GALOIS_SHM_MAJOR, 0), GALOIS_SHM_MINORS, SHM_DEVICE_NAME); if (res < 0) { shm_error("unable to get shm device major [%d]\n", GALOIS_SHM_MAJOR); goto err_reg_device; } shm_debug("register cdev device major [%d]\n", GALOIS_SHM_MAJOR); /* Now setup cdevs. */ for (i = 0; i < ARRAY_SIZE(shm_driver_dev_list); i++) { res = shm_driver_setup_cdev(shm_driver_dev_list[i].cdev, GALOIS_SHM_MAJOR, shm_driver_dev_list[i].minor, shm_driver_dev_list[i].fops); if (res) { shm_error("shm_driver_setup_cdev failed in [%d].\n", i); goto err_add_device; } shm_debug("setup cdevs device minor [%d]\n", shm_driver_dev_list[i].minor); } /* add shm devices to sysfs */ shm_dev_class = class_create(THIS_MODULE, SHM_DEVICE_NAME); if (IS_ERR(shm_dev_class)) { shm_error("class_create failed.\n"); res = -ENODEV; goto err_add_device; } for (i = 0; i < ARRAY_SIZE(shm_driver_dev_list); i++) { device_create(shm_dev_class, NULL, MKDEV(GALOIS_SHM_MAJOR, shm_driver_dev_list[i].minor), NULL, shm_driver_dev_list[i].name); shm_debug("create device sysfs [%s]\n", shm_driver_dev_list[i].name); } /* create shm device*/ res = shm_device_create(&shm_device, shm_base, shm_size, SHM_DEVICE_THRESHOLD); if (res != 0) { shm_error("shm_device_create failed.\n"); goto err_add_device; } /* create shm kernel API */ res = MV_SHM_Init(shm_device); if (res != 0) { shm_error("MV_SHM_Init failed !!!\n"); goto err_SHM_Init; } /* create shm device proc file*/ shm_driver_procdir = proc_mkdir(SHM_DEVICE_NAME, NULL); shm_driver_procdir->owner = THIS_MODULE; create_proc_read_entry(SHM_DEVICE_PROCFILE_MEMINFO, 0, shm_driver_procdir, read_proc_meminfo, NULL); create_proc_read_entry(SHM_DEVICE_PROCFILE_BASEINFO, 0, shm_driver_procdir, read_proc_baseinfo, NULL); create_proc_read_entry(SHM_DEVICE_PROCFILE_DETAIL, 0, shm_driver_procdir, read_proc_detail, NULL); shm_trace("shm_driver_init OK\n"); return 0; err_SHM_Init: shm_trace("shm_driver_init Undo ...\n"); shm_device_destroy(&shm_device); /* del sysfs entries */ for (i = 0; i < ARRAY_SIZE(shm_driver_dev_list); i++) { device_destroy(shm_dev_class, MKDEV(GALOIS_SHM_MAJOR, shm_driver_dev_list[i].minor)); shm_debug("delete device sysfs [%s]\n", shm_driver_dev_list[i].name); } class_destroy(shm_dev_class); err_add_device: for (i = 0; i < ARRAY_SIZE(shm_driver_dev_list); i++) { cdev_del(shm_driver_dev_list[i].cdev); } unregister_chrdev_region(MKDEV(GALOIS_SHM_MAJOR, 0), GALOIS_SHM_MINORS); err_reg_device: shm_trace("shm_driver_init failed !!! (%d)\n", res); return res; }
int memory_engine_allocate(memory_engine_t *engine, size_t size, size_t alignment, memory_node_t **node) { int res = 0; memory_node_t *new_node = NULL; struct task_struct *grptask = NULL; shm_debug("memory_engine_allocate start. (%d, %d)\n", size, alignment); if ((engine == NULL) || (node == NULL)) return -EINVAL; #ifdef SHM_GUARD_BYTES_ENABLE //add gurad bytes. if (engine->m_cache_or_noncache == SHM_CACHE){ size += SHM_GUARD_BYTES; } #endif down(&(engine->m_mutex)); if (size > engine->m_size_free) { shm_error("heap has not enough (%u) bytes for (%u) bytes\n", engine->m_size_free, size); res = -ENOMEM; goto err_exit; } /* Find a free node in heap */ new_node = _FindNode_size(engine, size, alignment); if (new_node == NULL) { memory_node_t *pLastNode = NULL; pLastNode = engine->m_root.m_prev_free; if (pLastNode) shm_error("heap has not enough liner memory for (%u) bytes, free blocks:%u(max free block:%u)\n", size, engine->m_num_freeblock, pLastNode->m_size); else shm_error("heap has not enough liner memory, no free blocks!!!\n"); res = -ENOMEM; goto err_exit; } /* Do we have enough memory after the allocation to split it? */ if (MEMNODE_ALIGN_SIZE(new_node) - size > engine->m_threshold) _Split(engine, new_node, size + new_node->m_offset);/* Adjust the node size. */ else engine->m_num_freeblock--; engine->m_num_usedblock++; /* Remove the node from the free list. */ new_node->m_prev_free->m_next_free = new_node->m_next_free; new_node->m_next_free->m_prev_free = new_node->m_prev_free; new_node->m_next_free = new_node->m_prev_free = NULL; /* Fill in the information. */ new_node->m_alignment = alignment; /*record pid/thread name in node info, for debug usage*/ new_node->m_threadid = task_pid_vnr(current);/*(current)->pid;*/ /* qzhang@marvell * record creating task id,user task id * by default user task id is creating task id * until memory_engine_lock invoked */ new_node->m_taskid = new_node->m_usrtaskid= task_tgid_vnr(current); strncpy(new_node->m_threadname, current->comm, 16); grptask = pid_task(task_tgid(current),PIDTYPE_PID); if (NULL != grptask) { strncpy(new_node->m_taskname,grptask->comm,16); strncpy(new_node->m_usrtaskname,grptask->comm,16); } else { memset(new_node->m_taskname,0,16); memset(new_node->m_usrtaskname,0,16); } new_node->m_phyaddress = MEMNODE_ALIGN_ADDR(new_node); memory_engine_insert_shm_node(&(engine->m_shm_root), new_node); /* Adjust the number of free bytes. */ engine->m_size_free -= new_node->m_size; engine->m_size_used += new_node->m_size; engine->m_peak_usedmem = max(engine->m_peak_usedmem, engine->m_size_used); /* Return the pointer to the node. */ *node = new_node; #ifdef SHM_GUARD_BYTES_ENABLE //fill gurad bytes with SHM_GUARD_DATA if (engine->m_cache_or_noncache == SHM_CACHE) { memset((void *)(MEMNODE_ALIGN_ADDR(new_node)- engine->m_base + engine->m_virt_base + MEMNODE_ALIGN_SIZE(new_node) - SHM_GUARD_BYTES), SHM_GUARD_DATA, SHM_GUARD_BYTES); } #endif up(&(engine->m_mutex)); shm_debug("Allocated %u (%u) bytes @ 0x%08X (0x%08X) for align (%u)\n", MEMNODE_ALIGN_SIZE(new_node), new_node->m_size, MEMNODE_ALIGN_ADDR(new_node), new_node->m_addr, new_node->m_alignment); shm_debug("memory_engine_allocate OK.\n"); return 0; err_exit: up(&(engine->m_mutex)); *node = NULL; shm_error("memory_engine_allocate failed !!! (%d, %d) (%d %s)\n", size, alignment, current->pid, current->comm); return res; }
int memory_engine_free(memory_engine_t *engine, int alignaddr) { int res = 0; #ifdef SHM_GUARD_BYTES_ENABLE int flag = 0; #endif memory_node_t *new_node, *node; shm_debug("memory_engine_free start. (0x%08X)\n", alignaddr); if ((engine == NULL) || (alignaddr == 0)) return -EINVAL; down(&(engine->m_mutex)); /* find alignaddr */ node = memory_engine_lookup_shm_node(&(engine->m_shm_root), alignaddr); if (node == NULL) { printk("memory_engine_lookup_shm_node Error alignaddr[%x]\n", alignaddr); res = -EFAULT; goto err_exit; } memory_engine_delete_shm_node(&(engine->m_shm_root), node); /* if the node to be freed is a free one, there could be invalid operations*/ if (node->m_next_free != NULL) { res = -EFAULT; goto err_exit; } #ifdef SHM_GUARD_BYTES_ENABLE //check stuff bytes if (engine->m_cache_or_noncache == SHM_CACHE) { if (_Check_guard_data(engine, node) != 0) { _Check_guard_data_all_node(engine); flag = -1; } } #endif /* clean node */ node->m_offset = 0; node->m_alignment = 0; /* Update the number of free bytes. */ engine->m_size_free += node->m_size; engine->m_size_used -= node->m_size; /* Find the next free node(go through node list, find the first node which is free). */ for (new_node = node->m_next; new_node->m_next_free == NULL; new_node = new_node->m_next) ; /* Insert this node in the free list. */ node->m_next_free = new_node; node->m_prev_free = new_node->m_prev_free; node->m_prev_free->m_next_free = new_node->m_prev_free = node; engine->m_num_usedblock--; engine->m_num_freeblock++; /* Is the next node a free node and not the root? */ if ((node->m_next == node->m_next_free) && (node->m_next->m_size != 0)) { /* Merge this node with the next node. */ new_node = node; res = _Merge(engine, new_node); if((new_node->m_next_free == new_node) || (new_node->m_prev_free == new_node) || (res != 0)) { /* Error. */ shm_error("_Merge next node failed.\n"); goto err_exit; } engine->m_num_freeblock--; shm_debug("_Merge next node OK.\n"); } /* Is the previous node a free node and not the root? */ if ((node->m_prev == node->m_prev_free) && (node->m_prev->m_size != 0)) { /* Merge this node with the previous node. */ new_node = node->m_prev; res = _Merge(engine, new_node); if((new_node->m_next_free == new_node) || (new_node->m_prev_free == new_node) || (res != 0)) { /* Error. */ shm_error("_Merge previous node failed.\n"); goto err_exit; } engine->m_num_freeblock--; shm_debug("_Merge previous node OK.\n"); } up(&(engine->m_mutex)); shm_debug("memory_engine_free OK.\n"); #ifdef SHM_GUARD_BYTES_ENABLE if (flag != 0) return flag; #endif return 0; err_exit: up(&(engine->m_mutex)); if (shm_lowmem_debug_level > 2) { shm_error("memory_engine_free failed !!! (0x%08X)\n", alignaddr); dump_stack(); } return res; }