static memory_node_t *memory_engine_lookup_shm_node_for_cache( struct rb_root *shm_root, const uint phyaddress, const uint size) { struct rb_node *n = shm_root->rb_node; memory_node_t *tmp_node; while (n) { tmp_node = rb_entry(n, memory_node_t, __rb_node); if (phyaddress < tmp_node->m_phyaddress) n = n->rb_left; else if (phyaddress > tmp_node->m_phyaddress) if ((phyaddress + size) <= (MEMNODE_ALIGN_ADDR(tmp_node) + MEMNODE_ALIGN_SIZE(tmp_node))) { return tmp_node; } else { n = n->rb_right; } else { if (size <= MEMNODE_ALIGN_SIZE(tmp_node)) { return tmp_node; } else { return NULL; } } } return NULL; }
static int _Check_guard_data(memory_engine_t *engine, memory_node_t *node) { int i = 0; int res = 0; unsigned char* check_addr = (unsigned char*) (MEMNODE_ALIGN_ADDR(node) - engine->m_base + engine->m_virt_base + MEMNODE_ALIGN_SIZE(node) - SHM_GUARD_BYTES); for (i = 0; i < SHM_GUARD_BYTES; i++) { if (check_addr[i] != SHM_GUARD_DATA) { printk("SHM _Check_guard_data Error\n"); printk("|Addr 0x%08X|Offset 0x%8x|Size 0x%9x|Align %d" "|threadid 0x%d(%s)|\n", MEMNODE_ALIGN_ADDR(node), (MEMNODE_ALIGN_ADDR(node) - engine->m_base), MEMNODE_ALIGN_SIZE(node), node->m_alignment, node->m_threadid, node->m_threadname); for(i = 0; i < SHM_GUARD_BYTES; i += 8) printk("%02x %02x %02x %02x %02x " "%02x %02x %02x\n", check_addr[i+0], check_addr[i+1], check_addr[i+2], check_addr[i+3], check_addr[i+4], check_addr[i+5], check_addr[i+6], check_addr[i+7]); res = -1; break; } } return res; }
int memory_engine_gothrough(memory_engine_t *engine, char *buffer, int count) { int len = 0, i = 0; memory_node_t *new_node; char tag_used[] = " Yes "; char tag_free[] = " "; char *ptr_tag; shm_debug("memory_engine_gothrough start. ( 0x%08X, %d)\n", (size_t)buffer, count); if ((engine == NULL) || (buffer == NULL) || (count <= 0)) return -EINVAL; len += sprintf(buffer + len, " No | Alloc | Node | Addr (Aligned) | Offset | Size (Aligned) | Align |\n"); len += sprintf(buffer + len, "---------------------------------------------------------------------------------------------------\n"); down(&(engine->m_mutex)); /* Walk all nodes until we have reached the root. (root.m_size == 0) */ for (new_node = engine->m_root.m_next; new_node->m_size != 0; new_node = new_node->m_next) { /* check buffer length to avoid buffer overflow */ if (len > count - 102) { shm_debug("memory_engine_gothrough buffer is full !!!\n"); goto done; } if (new_node->m_next_free != NULL) ptr_tag = tag_free; else ptr_tag = tag_used; len += sprintf(buffer + len, " %3d | %5s | 0x%08X | 0x%08X (0x%08X) | %8d | %9d (%9d) | %8d |\n", ++i, ptr_tag, (size_t)new_node, new_node->m_addr, MEMNODE_ALIGN_ADDR(new_node), (MEMNODE_ALIGN_ADDR(new_node) - engine->m_base), new_node->m_size, MEMNODE_ALIGN_SIZE(new_node), new_node->m_alignment); } done: up(&(engine->m_mutex)); shm_debug("memory_engine_gothrough OK. (node = %d, len = %d)\n", i, len); return len; }
int memory_engine_show(memory_engine_t *engine, struct seq_file *file) { int len = 0, i = 0; memory_node_t *new_node; char tag_used[] = " Yes "; char tag_free[] = " "; char *ptr_tag; if ((engine == NULL) || (file == NULL)) return -EINVAL; len += seq_printf(file, " No | Alloc | Node | Addr (Aligned) | Offset | Size (Aligned) | Align | thread id(name) \n"); len += seq_printf(file, "---------------------------------------------------------------------------------------------------\n"); down(&(engine->m_mutex)); /* Walk all nodes until we have reached the root. (root.m_size == 0) */ for (new_node = engine->m_root.m_next; new_node->m_size != 0; new_node = new_node->m_next) { if (new_node->m_next_free != NULL) ptr_tag = tag_free; else ptr_tag = tag_used; len += seq_printf(file, " %3d | %5s | 0x%08X | 0x%08X (0x%08X) | %8d | %9d (%9d) | %8d | 0x%08X(%s) \n", ++i, ptr_tag, (size_t)new_node, new_node->m_addr, MEMNODE_ALIGN_ADDR(new_node), (MEMNODE_ALIGN_ADDR(new_node) - engine->m_base), new_node->m_size, MEMNODE_ALIGN_SIZE(new_node), new_node->m_alignment, new_node->m_threadid, new_node->m_threadname); } up(&(engine->m_mutex)); shm_debug("memory_engine_gothrough OK. (node = %d, len = %d)\n", i, len); return len; }
static int memory_engine_show_debug(memory_engine_t *engine) { int i = 0; memory_node_t *new_node; char tag_used[] = " Yes "; char tag_free[] = " "; char *ptr_tag; if ((engine == NULL)) return -EINVAL; printk(" No | Alloc | Node | Addr (Aligned)" " | Offset | Size (Aligned) | Align" " | thread id(name)\n"); printk("--------------------------------------------------" "--------------------------------------------------" "-----------------------\n"); /* Walk all nodes until we have reached the root. (root.m_size == 0) */ for (new_node = engine->m_root.m_next; new_node->m_size != 0; new_node = new_node->m_next) { if (new_node->m_next_free != NULL) ptr_tag = tag_free; else ptr_tag = tag_used; printk(" %3d | %5s | 0x%08X | 0x%08X (0x%08X) | %8x " "| %9x (%9x) | %8d | 0x%08d(%s)\n", ++i, ptr_tag, (size_t)new_node, new_node->m_addr, MEMNODE_ALIGN_ADDR(new_node), (MEMNODE_ALIGN_ADDR(new_node) - engine->m_base), new_node->m_size, MEMNODE_ALIGN_SIZE(new_node), new_node->m_alignment, new_node->m_threadid, new_node->m_threadname); } return 0; }
int memory_engine_allocate(memory_engine_t *engine, size_t size, size_t alignment, memory_node_t **node) { int res = 0; memory_node_t *new_node = NULL; struct task_struct *grptask = NULL; shm_debug("memory_engine_allocate start. (%d, %d)\n", size, alignment); if ((engine == NULL) || (node == NULL)) return -EINVAL; #ifdef SHM_GUARD_BYTES_ENABLE //add gurad bytes. if (engine->m_cache_or_noncache == SHM_CACHE){ size += SHM_GUARD_BYTES; } #endif down(&(engine->m_mutex)); if (size > engine->m_size_free) { shm_error("heap has not enough (%u) bytes for (%u) bytes\n", engine->m_size_free, size); res = -ENOMEM; goto err_exit; } /* Find a free node in heap */ new_node = _FindNode_size(engine, size, alignment); if (new_node == NULL) { memory_node_t *pLastNode = NULL; pLastNode = engine->m_root.m_prev_free; if (pLastNode) shm_error("heap has not enough liner memory for (%u) bytes, free blocks:%u(max free block:%u)\n", size, engine->m_num_freeblock, pLastNode->m_size); else shm_error("heap has not enough liner memory, no free blocks!!!\n"); res = -ENOMEM; goto err_exit; } /* Do we have enough memory after the allocation to split it? */ if (MEMNODE_ALIGN_SIZE(new_node) - size > engine->m_threshold) _Split(engine, new_node, size + new_node->m_offset);/* Adjust the node size. */ else engine->m_num_freeblock--; engine->m_num_usedblock++; /* Remove the node from the free list. */ new_node->m_prev_free->m_next_free = new_node->m_next_free; new_node->m_next_free->m_prev_free = new_node->m_prev_free; new_node->m_next_free = new_node->m_prev_free = NULL; /* Fill in the information. */ new_node->m_alignment = alignment; /*record pid/thread name in node info, for debug usage*/ new_node->m_threadid = task_pid_vnr(current);/*(current)->pid;*/ /* qzhang@marvell * record creating task id,user task id * by default user task id is creating task id * until memory_engine_lock invoked */ new_node->m_taskid = new_node->m_usrtaskid= task_tgid_vnr(current); strncpy(new_node->m_threadname, current->comm, 16); grptask = pid_task(task_tgid(current),PIDTYPE_PID); if (NULL != grptask) { strncpy(new_node->m_taskname,grptask->comm,16); strncpy(new_node->m_usrtaskname,grptask->comm,16); } else { memset(new_node->m_taskname,0,16); memset(new_node->m_usrtaskname,0,16); } new_node->m_phyaddress = MEMNODE_ALIGN_ADDR(new_node); memory_engine_insert_shm_node(&(engine->m_shm_root), new_node); /* Adjust the number of free bytes. */ engine->m_size_free -= new_node->m_size; engine->m_size_used += new_node->m_size; engine->m_peak_usedmem = max(engine->m_peak_usedmem, engine->m_size_used); /* Return the pointer to the node. */ *node = new_node; #ifdef SHM_GUARD_BYTES_ENABLE //fill gurad bytes with SHM_GUARD_DATA if (engine->m_cache_or_noncache == SHM_CACHE) { memset((void *)(MEMNODE_ALIGN_ADDR(new_node)- engine->m_base + engine->m_virt_base + MEMNODE_ALIGN_SIZE(new_node) - SHM_GUARD_BYTES), SHM_GUARD_DATA, SHM_GUARD_BYTES); } #endif up(&(engine->m_mutex)); shm_debug("Allocated %u (%u) bytes @ 0x%08X (0x%08X) for align (%u)\n", MEMNODE_ALIGN_SIZE(new_node), new_node->m_size, MEMNODE_ALIGN_ADDR(new_node), new_node->m_addr, new_node->m_alignment); shm_debug("memory_engine_allocate OK.\n"); return 0; err_exit: up(&(engine->m_mutex)); *node = NULL; shm_error("memory_engine_allocate failed !!! (%d, %d) (%d %s)\n", size, alignment, current->pid, current->comm); return res; }
static void *MV_SHM_Base_GetVirtAddr(shm_device_t *shm_dev, size_t Offset, int mem_type) { shm_address_t *address_node; memory_node_t *tmp; size_t physaddress = 0; if (shm_dev == NULL) { shm_error("kernel MV_SHM_Base_GetVirtAddr parameters" " error shm_dev is NULL"); return NULL; } if (Offset >= shm_dev->m_size) { shm_error("kernel MV_SHM_Base_GetVirtAddr parameters error" " shm_dev[%p] Offset[%08x] > shm_size[%08x] mem_type[%d]" "\n", shm_dev, Offset, shm_dev->m_size, mem_type); return NULL; } physaddress = Offset + shm_dev->m_base; mutex_lock(&(shm_dev->m_engine->m_mutex)); tmp = memory_engine_lookup_shm_node_for_size( &(shm_dev->m_engine->m_shm_root), physaddress); mutex_unlock(&(shm_dev->m_engine->m_mutex)); if (tmp == NULL) { shm_error("kernel MV_SHM_Base_GetVirtAddr" " memory_engine_lookup_shm_node_for_size" " offset[%08x] physaddress[%08x] mem_type[%d]\n", Offset, (physaddress), mem_type); return NULL; } down_write(&shm_dev->m_rwsem); address_node = MV_SHM_lookup_phyaddress_node( &(shm_dev->m_phyaddr_root), physaddress); if (address_node == NULL) { address_node = kmalloc(sizeof(shm_address_t), GFP_KERNEL); if(address_node == NULL) { up_write(&shm_dev->m_rwsem); shm_error("kernel MV_SHM_Base_GetVirtAddr" " kmalloc fail offset[%08x] mem_type[%d]\n", Offset, mem_type); return NULL; } address_node->m_phyaddress = tmp->m_phyaddress; address_node->m_size = MEMNODE_ALIGN_SIZE(tmp); address_node->m_virtaddress = (size_t)MV_SHM_Base_ioremap( address_node->m_phyaddress, address_node->m_size, mem_type); if (address_node->m_virtaddress == (size_t)NULL) { kfree(address_node); up_write(&shm_dev->m_rwsem); shm_error("kernel MV_SHM_Base_GetVirtAddr" " MV_SHM_ioremap fail offset[%08x]" " mem_type[%d]\n",Offset, mem_type); return NULL; } MV_SHM_insert_phyaddress_node( &(shm_dev->m_phyaddr_root), address_node); MV_SHM_insert_virtaddress_node( &(shm_dev->m_virtaddr_root), address_node); up_write(&shm_dev->m_rwsem); return (void *)(address_node->m_virtaddress + ((Offset + shm_dev->m_base) - address_node->m_phyaddress)); } else { if((address_node->m_phyaddress == tmp->m_phyaddress) && (address_node->m_size == MEMNODE_ALIGN_SIZE(tmp))) { up_write(&shm_dev->m_rwsem); return (void *)(address_node->m_virtaddress + ((Offset + shm_dev->m_base) - address_node->m_phyaddress)); } else { MV_SHM_delete_phyaddress_node( &(shm_dev->m_phyaddr_root), address_node); MV_SHM_delete_virtaddress_node( &(shm_dev->m_virtaddr_root),address_node); iounmap((void *)(address_node->m_virtaddress)); address_node->m_phyaddress = tmp->m_phyaddress; address_node->m_size = MEMNODE_ALIGN_SIZE(tmp); address_node->m_virtaddress = (size_t)MV_SHM_Base_ioremap( address_node->m_phyaddress, address_node->m_size, mem_type); if (address_node->m_virtaddress == (size_t)NULL) { kfree(address_node); up_write(&shm_dev->m_rwsem); shm_error("kernel MV_SHM_Base_GetVirtAddr" " MV_SHM_ioremap fail offset[%08x]" " mem_type[%d]\n", Offset, mem_type); return NULL; } MV_SHM_insert_phyaddress_node( &(shm_dev->m_phyaddr_root), address_node); MV_SHM_insert_virtaddress_node( &(shm_dev->m_virtaddr_root), address_node); up_write(&shm_dev->m_rwsem); return (void *)(address_node->m_virtaddress + ((Offset + shm_dev->m_base) - address_node->m_phyaddress)); } } }