void nvrm_munmap(uint32_t id, uint64_t mmap_addr, uint64_t len, uint64_t mmap_offset) { struct gpu_object *obj; struct cpu_mapping *cpu_mapping; for (obj = gpu_objects; obj != NULL; obj = obj->next) for (cpu_mapping = obj->cpu_mappings; cpu_mapping != NULL; cpu_mapping = cpu_mapping->next) if (cpu_mapping->cpu_addr == mmap_addr) { if (dump_sys_munmap) { if (cpu_mapping->fdtype == FDNVIDIA) { mmt_log_cont(", cid: 0x%08x, handle: 0x%08x", obj->cid, obj->handle); describe_nvrm_object(obj->cid, obj->handle, ""); } } disconnect_cpu_mapping_from_gpu_object(cpu_mapping); break; } if (dump_sys_munmap) mmt_log_cont_nl(); buffer_munmap(id); }
static void dump_object_tree(struct gpu_object *obj, int level, struct gpu_object *highlight) { int i, indent_level = level * 4 + 2; mmt_log("%s", ""); if (obj == highlight) { mmt_log_cont("%s", "*"); indent_level--; } for (i = 0; i < indent_level; ++i) mmt_log_cont("%s", " "); // mmt_log_cont("cid: 0x%08x, ", obj->cid); mmt_log_cont("handle: 0x%08x", obj->handle); describe_nvrm_object(obj->cid, obj->handle, ""); if (obj->class_ == NVRM_DEVICE_0) { struct nvrm_device *d = nvrm_dev(obj); if (d) mmt_log_cont(", chipset: 0x%x", d->chipset); } mmt_log_cont("%s\n", ""); for (i = 0; i < obj->children_space; ++i) if (obj->children_objects[i]) dump_object_tree(obj->children_objects[i], level + 1, highlight); }
void nvrm_mmap(uint32_t id, uint32_t fd, uint64_t mmap_addr, uint64_t len, uint64_t mmap_offset) { struct gpu_object *obj; struct cpu_mapping *cpu_mapping; for (obj = gpu_objects; obj != NULL; obj = obj->next) for (cpu_mapping = obj->cpu_mappings; cpu_mapping != NULL; cpu_mapping = cpu_mapping->next) //can't validate fd if (cpu_mapping->mmap_offset == mmap_offset) { cpu_mapping->cpu_addr = mmap_addr; cpu_mapping->id = id; set_cpu_mapping(id, cpu_mapping); if (dump_sys_mmap) { if (cpu_mapping->fdtype == FDNVIDIA) { mmt_log_cont(", cid: 0x%08x, handle: 0x%08x", obj->cid, obj->handle); describe_nvrm_object(obj->cid, obj->handle, ""); } mmt_log_cont_nl(); } return; } if (dump_sys_mmap) mmt_log_cont_nl(); if (demmt_get_fdtype(fd) == FDNVIDIA) mmt_error("nvrm_mmap: couldn't find object/space offset: 0x%016" PRIx64 "\n", mmap_offset); buffer_mmap(id, fd, mmap_addr, len, mmap_offset); }
void nvrm_mmap(uint32_t id, uint32_t fd, uint64_t mmap_addr, uint64_t len, uint64_t mmap_offset) { struct gpu_object *obj; struct cpu_mapping *cpu_mapping; for (obj = gpu_objects; obj != NULL; obj = obj->next) for (cpu_mapping = obj->cpu_mappings; cpu_mapping != NULL; cpu_mapping = cpu_mapping->next) //can't validate fd if (cpu_mapping->mmap_offset == mmap_offset) { cpu_mapping->cpu_addr = mmap_addr; uint32_t old_id = cpu_mapping->id; cpu_mapping->id = id; set_cpu_mapping(id, cpu_mapping); if (dump_sys_mmap) { if (cpu_mapping->fdtype == FDNVIDIA) { mmt_log_cont(", cid: 0x%08x, handle: 0x%08x", obj->cid, obj->handle); describe_nvrm_object(obj->cid, obj->handle, ""); } mmt_log_cont_nl(); } if (old_id) mmt_error("%d -> %d, mapping reuse, expect crash soon\n", old_id, id); /* * On newer blob, where mmap_offset is 0 for * all mappings (WTF?), clobber the value to * prevent the next nvrm_mmap from finding this * mapping. */ if (cpu_mapping->mmap_offset == 0) cpu_mapping->mmap_offset = -1; return; } if (dump_sys_mmap) mmt_log_cont_nl(); if (demmt_get_fdtype(fd) == FDNVIDIA) mmt_error("nvrm_mmap: couldn't find object/space offset: 0x%016" PRIx64 "\n", mmap_offset); buffer_mmap(id, fd, mmap_addr, len, mmap_offset); }