/** * @brief Allocate memory block from chunkmem. * @param id [in] ownerID of the chunkmem block. * @param size [in] size of the chunkmem block. * @return success: start address of allocated chunkmem block, * fail: NULL * @see dlMalloc */ void* gp_chunk_malloc(unsigned int id, unsigned int size) { void *ret = NULL; if ((chunkmem != NULL) && (size != 0)) { if (down_interruptible(&chunkmem->sem) == 0) { #if DEBUG_ALLOC_FREE DIAG_DEBUG("++++++++++++++++++++++++ ALLOC ++++++++++++++++++++++++++\n"); DIAG_DEBUG("!!!!size = %08X\n", size); dlMalloc_Status(NULL); #endif #if USE_DLMALLOC_EX ret = dlMallocEx(id, size); #else ret = dlMalloc(id, size); #endif #if DEBUG_ALLOC_FREE dlMalloc_Status(NULL); DIAG_DEBUG("------------------------ ALLOC --------------------------\n"); #endif up(&chunkmem->sem); } } return ret; }
STATIC bcm_rx_t rx_cb_handler(int unit, bcm_pkt_t *info, void *cookie) { int count; COMPILER_REFERENCE(cookie); count = ++rx_cb_count; DIAG_DEBUG(DIAG_DBG_RX, ("RX packet %d: unit=%d len=%d rx_port=%d reason=%d cos=%d\n", count, unit, info->tot_len, info->rx_port, info->rx_reason, info->cos)); #ifdef BCM_XGS_SUPPORT if (SOC_IS_XGS12_FABRIC(unit)) { if (DIAG_DEBUG_CHECK(DIAG_DBG_RX)) { soc_higig_dump(unit, "HG HEADER: ", (soc_higig_hdr_t *)BCM_PKT_HG_HDR(info)); } } #endif /* BCM_XGS_SUPPORT */ DIAG_DEBUG(DIAG_DBG_RX, ("Parsed packet info:\n")); DIAG_DEBUG(DIAG_DBG_RX, (" src mod=%d. src port=%d. op=%d.\n", info->src_mod, info->src_port, info->opcode)); DIAG_DEBUG(DIAG_DBG_RX, (" dest mod=%d. dest port=%d. chan=%d.\n", info->dest_mod, info->dest_port, info->dma_channel)); if (DIAG_DEBUG_CHECK(DIAG_DBG_RX)) { soc_dma_dump_pkt(unit, "Data: ", BCM_PKT_DMAC(info), info->tot_len, TRUE); } if (enqueue_pkts[unit] > 0) { sal_mutex_take(pkt_queue_lock[unit], sal_mutex_FOREVER); *(uint32 **)(info->alloc_ptr) = (uint32 *)pkt_free_queue[unit]; pkt_free_queue[unit] = info->alloc_ptr; rx_pkt_count[unit]++; if (rx_pkt_count[unit] >= enqueue_pkts[unit]) { sal_sem_give(pkts_are_ready[unit]); } sal_mutex_give(pkt_queue_lock[unit]); #if defined(BCM_RXP_DEBUG) bcm_rx_pool_own(info->alloc_ptr, "rxmon"); #endif return BCM_RX_HANDLED_OWNED; } return BCM_RX_HANDLED; }
/** * @brief Free chunkmem block. * @param addr [in] start address of chunkmem block to free, * kernel_addr. * @return None * @see dlFree */ void gp_chunk_free(void *addr) { if (chunkmem != NULL) { if (down_interruptible(&chunkmem->sem) == 0) { #if DEBUG_ALLOC_FREE DIAG_DEBUG("++++++++++++++++++++++++ FREE ++++++++++++++++++++++++++\n"); DIAG_DEBUG("!!!!addr = %08X\n", (unsigned long)addr); dlMalloc_Status(NULL); #endif #if (DIAG_LEVEL >= DIAG_LVL_VERB) && !defined(DIAG_VERB_OFF) int ret = dlFree(addr); #else dlFree(addr); #endif #if DEBUG_ALLOC_FREE dlMalloc_Status(NULL); DIAG_DEBUG("------------------------ FREE --------------------------\n"); #endif up(&chunkmem->sem); DIAG_VERB("dlFree: %d\n", ret); } } }
void my_save_data(unsigned long addr, unsigned long size) { void *va; data_block_t *block = kmalloc(sizeof(data_block_t) + size, GFP_KERNEL); if (block == NULL) { DIAG_ERROR("save data error: out of memory! %p %08X\n", addr, size); return; } va = gp_chunk_va(addr); if (va == NULL) { va = __va(addr); } memcpy(&block->data, va, size); block->addr = va; DIAG_DEBUG("save data: %08X(%p) %08X\n", addr, va, size); block->size = size; block->next = blocks; blocks = block; }
/** * @brief Chunkmem device ioctl function */ static long chunkmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { chunk_block_t block; void *ka; /* kernel_addr */ unsigned int va; /* user_addr */ unsigned int pa; /* phy_addr*/ long ret = 0; unsigned int offset = 0; switch (cmd) { case CHUNK_MEM_ALLOC: case CHUNK_MEM_SHARE: case CHUNK_MEM_MMAP: { if (copy_from_user(&block, (void __user*)arg, sizeof(block))) { ret = -EFAULT; break; } /* alloc|share|mmap memory */ if (cmd == CHUNK_MEM_MMAP) { DIAG_VERB("CHUNK_MEM_MMAP:\n"); ka = gp_chunk_va(block.phy_addr); if (ka == NULL) { DIAG_ERROR("CHUNK_MEM_MMAP: bad address! (%s:%08X)\n", current->comm, block.phy_addr); ret = -EFAULT; /* mmap fail */ break; } /* page alignment */ offset = block.phy_addr & ~PAGE_MASK; ka = (void *)((unsigned long)ka & PAGE_MASK); DIAG_VERB("CHUNK_MEM_MMAP: phy_addr = %08X\n", block.phy_addr); DIAG_VERB("CHUNK_MEM_MMAP: size = %08X\n", block.size); DIAG_VERB("CHUNK_MEM_MMAP: ka = %08X\n", (unsigned int)ka); DIAG_VERB("CHUNK_MEM_MMAP: offset = %08X\n", offset); DIAG_VERB("CHUNK_MEM_MMAP: PAGE_ALIGN(size + offset) = %08X\n", PAGE_ALIGN(block.size + offset)); } else { if (cmd == CHUNK_MEM_ALLOC) { DIAG_VERB("CHUNK_MEM_ALLOC:\n"); DIAG_VERB("size = %08X (%d)\n", block.size, block.size); ka = gp_chunk_malloc(current->tgid, block.size); DIAG_VERB("gp_chunk_malloc return ka=%08X\n", ka); if (ka == NULL) { DIAG_ERROR("CHUNK_MEM_ALLOC: out of memory! (%s:%08X)\n", current->comm, block.size); dlMalloc_Status(NULL); ret = -ENOMEM; break; } block.phy_addr = gp_chunk_pa(ka); } else { /* CHUNK_MEM_SHARE */ DIAG_VERB("CHUNK_MEM_SHARE:\n"); ka = gp_chunk_va(block.phy_addr); if ((ka == NULL) || (dlShare(ka) == 0)) { DIAG_ERROR("CHUNK_MEM_SHARE: bad address! (%s:%08X)\n", current->comm, block.phy_addr); ret = -EFAULT; /* share fail */ break; } } block.size = dlMalloc_Usable_Size(ka) & PAGE_MASK; /* actual allocated size */ DIAG_VERB("actual size = %08X (%d)\n", block.size, block.size); DIAG_VERB("ka = %08X\n", (unsigned int)ka); } /* mmap to userspace */ down(&chunkmem->sem); down_write(¤t->mm->mmap_sem); chunkmem->mmap_enable = 1; /* enable mmap in CHUNK_MEM_ALLOC */ va = do_mmap_pgoff( file, 0, PAGE_ALIGN(block.size + offset), PROT_READ|PROT_WRITE, MAP_SHARED, (ka - chunkmem->vbase) >> PAGE_SHIFT); chunkmem->mmap_enable = 0; /* disable it */ up_write(¤t->mm->mmap_sem); up(&chunkmem->sem); if (IS_ERR_VALUE(va)) { ret = va; /* errcode */ DIAG_ERROR("%s: chunkmem mmap fail(%d)! (%s)\n", (cmd == CHUNK_MEM_MMAP) ? "CHUNK_MEM_MMAP" : ((cmd == CHUNK_MEM_ALLOC) ? "CHUNK_MEM_ALLOC" : "CHUNK_MEM_SHARE"), ret, current->comm); break; } va += offset; block.addr = (void *)va; DIAG_VERB("va = %08X\n\n", va); if (copy_to_user((void __user*)arg, &block, sizeof(block))) { ret = -EFAULT; break; } } break; case CHUNK_MEM_FREE: { if (copy_from_user(&block, (void __user*)arg, sizeof(block))) { ret = -EFAULT; break; } /* translate user_va to ka */ DIAG_VERB("CHUNK_MEM_FREE:\n"); DIAG_VERB("va = %08X\n", (unsigned int)block.addr); pa = gp_user_va_to_pa(block.addr); /* user_addr to phy_addr */ if (pa == 0) { DIAG_ERROR("CHUNK_MEM_FREE: chunkmem user_va_to_pa fail! (%s:%08X)\n", current->comm, block.addr); ret = -EFAULT; break; } DIAG_VERB("pa = %08X\n", pa); ka = gp_chunk_va(pa); /* phy_addr to kernel_addr */ if (ka == NULL) { DIAG_ERROR("CHUNK_MEM_FREE: not a chunkmem address! (%s:%08X)\n", current->comm, pa); ret = -EFAULT; break; } block.size = dlMalloc_Usable_Size(ka) & PAGE_MASK; DIAG_VERB("ka = %08X\n", (unsigned int)ka); DIAG_VERB("actual size = %08X (%d)\n\n", block.size, block.size); /* munmap memory */ down_write(¤t->mm->mmap_sem); do_munmap(current->mm, (unsigned int)block.addr, block.size); up_write(¤t->mm->mmap_sem); /* free memory */ gp_chunk_free(ka); #if (DIAG_LEVEL >= DIAG_LVL_VERB) && !defined(DIAG_VERB_OFF) dlMalloc_Status(NULL); #endif } break; case CHUNK_MEM_INFO: { chunk_info_t info; if (copy_from_user(&info, (void __user*)arg, sizeof(info))) { ret = -EFAULT; break; } if (info.pid == (unsigned int)(-1)) { info.pid = current->tgid; } #if CHUNK_SUSPEND_TEST if (info.pid) { dlMalloc_Status(NULL); } else { gp_chunk_suspend(my_save_data); memset(chunkmem->vbase, 0, chunkmem->size); /* restore */ while (blocks != NULL) { data_block_t *block = blocks; blocks = block->next; DIAG_DEBUG("restore data: %p %08X\n", block->addr, block->size); memcpy(block->addr, &block->data, block->size); kfree(block); } } #else down(&chunkmem->sem); dlMalloc_Status((mem_info_t *)&info); up(&chunkmem->sem); #endif if (copy_to_user((void __user*)arg, &info, sizeof(info))) { ret = -EFAULT; break; } } break; case CHUNK_MEM_VA2PA: { ret = -EFAULT; if (copy_from_user(&block, (void __user*)arg, sizeof(block))) { break; } pa = gp_user_va_to_pa(block.addr); /* user_addr to phy_addr */ if (pa != 0) { ka = gp_chunk_va(pa); /* phy_addr to kernel_addr */ if (ka != NULL) { block.phy_addr = pa; if (copy_to_user((void __user*)arg, &block, sizeof(block)) == 0) { ret = 0; } } } } break; case CHUNK_MEM_MUNMAP: { if (copy_from_user(&block, (void __user*)arg, sizeof(block))) { ret = -EFAULT; break; } va = (unsigned int)block.addr; /* page alignment */ offset = va & ~PAGE_MASK; va &= PAGE_MASK; /* munmap memory */ down_write(¤t->mm->mmap_sem); do_munmap(current->mm, va, PAGE_ALIGN(block.size + offset)); up_write(¤t->mm->mmap_sem); } break; case CHUNK_MEM_FREEALL: gp_chunk_free_all((unsigned int)arg); printk(KERN_WARNING "CHUNK_MEM_FREEALL(%ld)\n", arg); break; case CHUNK_MEM_DUMP: dlMalloc_Status(0); break; default: ret = -ENOTTY; /* Inappropriate ioctl for device */ break; } return ret; }