void my_save_data(unsigned long addr, unsigned long size) { void *va; data_block_t *block = kmalloc(sizeof(data_block_t) + size, GFP_KERNEL); if (block == NULL) { DIAG_ERROR("save data error: out of memory! %p %08X\n", addr, size); return; } va = gp_chunk_va(addr); if (va == NULL) { va = __va(addr); } memcpy(&block->data, va, size); block->addr = va; DIAG_DEBUG("save data: %08X(%p) %08X\n", addr, va, size); block->size = size; block->next = blocks; blocks = block; }
/** * @brief PPU text number ram ptr set function. * @param p_register_set [in]: PPU struct value set. * @param text_index [in]: text_index:0:TEXT0,1: TEXT1,2: TEXT2,3: TEXT3. * @param value:[in]: value: 32-bit segment address. * @return SUCCESS/ERROR_ID. */ signed int gp_ppu_text_number_array_set_ptr( PPU_REGISTER_SETS *p_register_set, unsigned int text_index, unsigned int value ) { unsigned int temp,temp1; if (!p_register_set || text_index>C_PPU_TEXT4) { return -ENOIOCTLCMD; } temp = (unsigned int)gp_user_va_to_pa((unsigned short *)value); temp1 = (unsigned int)gp_chunk_va((unsigned int)temp); p_register_set->text[text_index].n_ptr = temp1; p_register_set->text[text_index].n_ptr_pa = temp; // Notify PPU driver to update text registers gp_ppu_text_set_update_reg_flag(p_register_set, text_index); return 0; }
/** * @brief Chunkmem device ioctl function */ static long chunkmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { chunk_block_t block; void *ka; /* kernel_addr */ unsigned int va; /* user_addr */ unsigned int pa; /* phy_addr*/ long ret = 0; unsigned int offset = 0; switch (cmd) { case CHUNK_MEM_ALLOC: case CHUNK_MEM_SHARE: case CHUNK_MEM_MMAP: { if (copy_from_user(&block, (void __user*)arg, sizeof(block))) { ret = -EFAULT; break; } /* alloc|share|mmap memory */ if (cmd == CHUNK_MEM_MMAP) { DIAG_VERB("CHUNK_MEM_MMAP:\n"); ka = gp_chunk_va(block.phy_addr); if (ka == NULL) { DIAG_ERROR("CHUNK_MEM_MMAP: bad address! (%s:%08X)\n", current->comm, block.phy_addr); ret = -EFAULT; /* mmap fail */ break; } /* page alignment */ offset = block.phy_addr & ~PAGE_MASK; ka = (void *)((unsigned long)ka & PAGE_MASK); DIAG_VERB("CHUNK_MEM_MMAP: phy_addr = %08X\n", block.phy_addr); DIAG_VERB("CHUNK_MEM_MMAP: size = %08X\n", block.size); DIAG_VERB("CHUNK_MEM_MMAP: ka = %08X\n", (unsigned int)ka); DIAG_VERB("CHUNK_MEM_MMAP: offset = %08X\n", offset); DIAG_VERB("CHUNK_MEM_MMAP: PAGE_ALIGN(size + offset) = %08X\n", PAGE_ALIGN(block.size + offset)); } else { if (cmd == CHUNK_MEM_ALLOC) { DIAG_VERB("CHUNK_MEM_ALLOC:\n"); DIAG_VERB("size = %08X (%d)\n", block.size, block.size); ka = gp_chunk_malloc(current->tgid, block.size); DIAG_VERB("gp_chunk_malloc return ka=%08X\n", ka); if (ka == NULL) { DIAG_ERROR("CHUNK_MEM_ALLOC: out of memory! (%s:%08X)\n", current->comm, block.size); dlMalloc_Status(NULL); ret = -ENOMEM; break; } block.phy_addr = gp_chunk_pa(ka); } else { /* CHUNK_MEM_SHARE */ DIAG_VERB("CHUNK_MEM_SHARE:\n"); ka = gp_chunk_va(block.phy_addr); if ((ka == NULL) || (dlShare(ka) == 0)) { DIAG_ERROR("CHUNK_MEM_SHARE: bad address! (%s:%08X)\n", current->comm, block.phy_addr); ret = -EFAULT; /* share fail */ break; } } block.size = dlMalloc_Usable_Size(ka) & PAGE_MASK; /* actual allocated size */ DIAG_VERB("actual size = %08X (%d)\n", block.size, block.size); DIAG_VERB("ka = %08X\n", (unsigned int)ka); } /* mmap to userspace */ down(&chunkmem->sem); down_write(¤t->mm->mmap_sem); chunkmem->mmap_enable = 1; /* enable mmap in CHUNK_MEM_ALLOC */ va = do_mmap_pgoff( file, 0, PAGE_ALIGN(block.size + offset), PROT_READ|PROT_WRITE, MAP_SHARED, (ka - chunkmem->vbase) >> PAGE_SHIFT); chunkmem->mmap_enable = 0; /* disable it */ up_write(¤t->mm->mmap_sem); up(&chunkmem->sem); if (IS_ERR_VALUE(va)) { ret = va; /* errcode */ DIAG_ERROR("%s: chunkmem mmap fail(%d)! (%s)\n", (cmd == CHUNK_MEM_MMAP) ? "CHUNK_MEM_MMAP" : ((cmd == CHUNK_MEM_ALLOC) ? "CHUNK_MEM_ALLOC" : "CHUNK_MEM_SHARE"), ret, current->comm); break; } va += offset; block.addr = (void *)va; DIAG_VERB("va = %08X\n\n", va); if (copy_to_user((void __user*)arg, &block, sizeof(block))) { ret = -EFAULT; break; } } break; case CHUNK_MEM_FREE: { if (copy_from_user(&block, (void __user*)arg, sizeof(block))) { ret = -EFAULT; break; } /* translate user_va to ka */ DIAG_VERB("CHUNK_MEM_FREE:\n"); DIAG_VERB("va = %08X\n", (unsigned int)block.addr); pa = gp_user_va_to_pa(block.addr); /* user_addr to phy_addr */ if (pa == 0) { DIAG_ERROR("CHUNK_MEM_FREE: chunkmem user_va_to_pa fail! (%s:%08X)\n", current->comm, block.addr); ret = -EFAULT; break; } DIAG_VERB("pa = %08X\n", pa); ka = gp_chunk_va(pa); /* phy_addr to kernel_addr */ if (ka == NULL) { DIAG_ERROR("CHUNK_MEM_FREE: not a chunkmem address! (%s:%08X)\n", current->comm, pa); ret = -EFAULT; break; } block.size = dlMalloc_Usable_Size(ka) & PAGE_MASK; DIAG_VERB("ka = %08X\n", (unsigned int)ka); DIAG_VERB("actual size = %08X (%d)\n\n", block.size, block.size); /* munmap memory */ down_write(¤t->mm->mmap_sem); do_munmap(current->mm, (unsigned int)block.addr, block.size); up_write(¤t->mm->mmap_sem); /* free memory */ gp_chunk_free(ka); #if (DIAG_LEVEL >= DIAG_LVL_VERB) && !defined(DIAG_VERB_OFF) dlMalloc_Status(NULL); #endif } break; case CHUNK_MEM_INFO: { chunk_info_t info; if (copy_from_user(&info, (void __user*)arg, sizeof(info))) { ret = -EFAULT; break; } if (info.pid == (unsigned int)(-1)) { info.pid = current->tgid; } #if CHUNK_SUSPEND_TEST if (info.pid) { dlMalloc_Status(NULL); } else { gp_chunk_suspend(my_save_data); memset(chunkmem->vbase, 0, chunkmem->size); /* restore */ while (blocks != NULL) { data_block_t *block = blocks; blocks = block->next; DIAG_DEBUG("restore data: %p %08X\n", block->addr, block->size); memcpy(block->addr, &block->data, block->size); kfree(block); } } #else down(&chunkmem->sem); dlMalloc_Status((mem_info_t *)&info); up(&chunkmem->sem); #endif if (copy_to_user((void __user*)arg, &info, sizeof(info))) { ret = -EFAULT; break; } } break; case CHUNK_MEM_VA2PA: { ret = -EFAULT; if (copy_from_user(&block, (void __user*)arg, sizeof(block))) { break; } pa = gp_user_va_to_pa(block.addr); /* user_addr to phy_addr */ if (pa != 0) { ka = gp_chunk_va(pa); /* phy_addr to kernel_addr */ if (ka != NULL) { block.phy_addr = pa; if (copy_to_user((void __user*)arg, &block, sizeof(block)) == 0) { ret = 0; } } } } break; case CHUNK_MEM_MUNMAP: { if (copy_from_user(&block, (void __user*)arg, sizeof(block))) { ret = -EFAULT; break; } va = (unsigned int)block.addr; /* page alignment */ offset = va & ~PAGE_MASK; va &= PAGE_MASK; /* munmap memory */ down_write(¤t->mm->mmap_sem); do_munmap(current->mm, va, PAGE_ALIGN(block.size + offset)); up_write(¤t->mm->mmap_sem); } break; case CHUNK_MEM_FREEALL: gp_chunk_free_all((unsigned int)arg); printk(KERN_WARNING "CHUNK_MEM_FREEALL(%ld)\n", arg); break; case CHUNK_MEM_DUMP: dlMalloc_Status(0); break; default: ret = -ENOTTY; /* Inappropriate ioctl for device */ break; } return ret; }