static void handle_nvrm_ioctl_vspace_map(struct nvrm_ioctl_vspace_map *s) { struct buffer *buf; int found = 0; for (buf = buffers_list; buf != NULL; buf = buf->next) if (buf->data1 == s->dev && buf->data2 == s->handle && buf->length == s->size) { buf->gpu_start = s->addr; mmt_debug("setting gpu address for buffer %d to 0x%08lx\n", buf->id, buf->gpu_start); found = 1; break; } if (!found) { struct unk_map *tmp; for (tmp = unk_maps; tmp != NULL; tmp = tmp->next) { if (tmp->data1 == s->dev && tmp->data2 == s->handle) { mmt_log("TODO: unk buffer found, demmt_nv_gpu_map needs to be updated!%s\n", ""); break; } } register_gpu_only_buffer(s->addr, s->size, 0, s->dev, s->handle); } }
void demmt_nv_mmap2(struct mmt_nvidia_mmap2 *mm, void *state) { if (dump_sys_mmap) mmt_log("mmap: address: %p, length: 0x%08lx, id: %d, offset: 0x%08lx, data1: 0x%08lx, data2: 0x%08lx, fd: %d\n", (void *)mm->start, mm->len, mm->id, mm->offset, mm->data1, mm->data2, mm->fd); buffer_mmap(mm->id, mm->start, mm->len, mm->offset, &mm->data1, &mm->data2); }
static void handle_nvrm_ioctl_vspace_unmap(struct nvrm_ioctl_vspace_unmap *s) { struct buffer *buf; int found = 0; for (buf = buffers_list; buf != NULL; buf = buf->next) if (buf->data1 == s->dev && buf->data2 == s->handle && buf->gpu_start == s->addr) { mmt_debug("clearing gpu address for buffer %d (was: 0x%08lx)\n", buf->id, buf->gpu_start); buf->gpu_start = 0; found = 1; break; } if (!found) { for (buf = gpu_only_buffers_list; buf != NULL; buf = buf->next) { if (buf->data1 == s->dev && buf->data2 == s->handle && buf->gpu_start == s->addr) { mmt_debug("deregistering gpu only buffer of size %ld\n", buf->length); buffer_free(buf); found = 1; break; } } if (!found) mmt_log("gpu only buffer not found%s\n", ""); } }
static void dump_object_tree(struct gpu_object *obj, int level, struct gpu_object *highlight) { int i, indent_level = level * 4 + 2; mmt_log("%s", ""); if (obj == highlight) { mmt_log_cont("%s", "*"); indent_level--; } for (i = 0; i < indent_level; ++i) mmt_log_cont("%s", " "); // mmt_log_cont("cid: 0x%08x, ", obj->cid); mmt_log_cont("handle: 0x%08x", obj->handle); describe_nvrm_object(obj->cid, obj->handle, ""); if (obj->class_ == NVRM_DEVICE_0) { struct nvrm_device *d = nvrm_dev(obj); if (d) mmt_log_cont(", chipset: 0x%x", d->chipset); } mmt_log_cont("%s\n", ""); for (i = 0; i < obj->children_space; ++i) if (obj->children_objects[i]) dump_object_tree(obj->children_objects[i], level + 1, highlight); }
static void __demmt_ioctl_post(uint32_t fd, uint32_t id, struct mmt_buf *data, uint64_t ret, uint64_t err, void *state, struct mmt_memory_dump *args, int argc) { uint8_t dir, type, nr; uint16_t size; decode_ioctl_id(id, &dir, &type, &nr, &size); int print_raw = 0; enum mmt_fd_type fdtype = demmt_get_fdtype(fd); if (fdtype == FDDRM) print_raw = demmt_drm_ioctl_post(fd, id, dir, nr, size, data, ret, err, state, args, argc); else if (fdtype == FDNVIDIA) print_raw = nvrm_ioctl_post(fd, id, dir, nr, size, data, ret, err, state, args, argc); else if (fdtype == FDFGLRX) print_raw = fglrx_ioctl_post(fd, id, dir, nr, size, data, ret, err, state, args, argc); else mmt_error("ioctl 0x%x called for unknown type of file [%d, %d]\n", id, fd, fdtype); print_raw = print_raw || dump_raw_ioctl_data; if (print_raw) { mmt_log("ioctl post 0x%02x (0x%08x), fd: %d, dir: %2s, size: %4d", nr, id, fd, dir_desc[dir], size); if (ret) mmt_log_cont(", ret: 0x%" PRIx64 "", ret); if (err) mmt_log_cont(", err: 0x%" PRIx64 "", err); if (size != data->len) mmt_log_cont(", data.len: %d", data->len); ioctl_data_print(data); mmt_log_cont_nl(); } }
void __demmt_mmap(uint64_t start, uint64_t len, uint32_t id, uint64_t offset, void *state) { if (dump_sys_mmap) mmt_log("mmap: address: 0x%" PRIx64 ", length: 0x%08" PRIx64 ", id: %d, offset: 0x%08" PRIx64 "", start, len, id, offset); nvrm_mmap(id, -1, start, len, offset); }
void dump_regions(struct regions *regions) { struct region *cur = regions->head; while (cur) { mmt_log("<0x%08x, 0x%08x>\n", cur->start, cur->end); cur = cur->next; } }
static void demmt_msg(uint8_t *data, unsigned int len, void *state) { if (dump_msg) { mmt_log("MSG: %s", ""); fwrite(data, 1, len, stdout); mmt_log_cont_nl(); } }
void demmt_memory_dump(struct mmt_memory_dump_prefix *d, struct mmt_buf *b, void *state) { // dead code, because memory dumps are passed to ioctl_pre / ioctl_post handlers int i; mmt_log("memory dump, addr: 0x%016lx, txt: \"%s\", data.len: %d, data:", d->addr, d->str.data, b->len); for (i = 0; i < b->len / 4; ++i) mmt_log_cont(" 0x%08x", ((uint32_t *)b->data)[i]); mmt_log_cont("%s", "\n"); }
static void demmt_dup_syscall(struct mmt_dup_syscall *o, void *state) { if (o->newfd < MAX_FD && o->oldfd < MAX_FD) { open_files[o->newfd].path = open_files[o->oldfd].path; open_files[o->newfd].type = open_files[o->oldfd].type; } if (dump_sys_open) mmt_log("sys_dup: old: %d, new: %d\n", o->oldfd, o->newfd); }
static void demmt_mremap(struct mmt_mremap *mm, void *state) { if (get_cpu_mapping(mm->id) == NULL) { mmt_error("invalid buffer id: %d\n", mm->id); demmt_abort(); } if (dump_sys_mremap) mmt_log("mremap: old_address: 0x%" PRIx64 ", new_address: 0x%" PRIx64 ", old_length: 0x%08" PRIx64 ", new_length: 0x%08" PRIx64 ", id: %d, offset: 0x%08" PRIx64 "\n", mm->old_start, mm->start, mm->old_len, mm->len, mm->id, mm->offset); buffer_mremap(mm); }
static void demmt_munmap(struct mmt_unmap *mm, void *state) { if (get_cpu_mapping(mm->id) == NULL) { mmt_error("invalid buffer id: %d\n", mm->id); demmt_abort(); } if (dump_sys_munmap) mmt_log("munmap: address: 0x%" PRIx64 ", length: 0x%08" PRIx64 ", id: %d, offset: 0x%08" PRIx64 "", mm->start, mm->len, mm->id, mm->offset); nvrm_munmap(mm->id, mm->start, mm->len, mm->offset); }
void nvrm_device_set_chipset(struct gpu_object *dev, int chipset) { struct nvrm_device *d = nvrm_dev(dev); if (!d) { d = dev->class_data = calloc(1, sizeof(*d)); dev->class_data_destroy = device_destroy; } if (chipset != d->chipset) { d->chipset = chipset; mmt_log("Chipset: NV%02X\n", chipset); } }
static void decode_nvrm_ioctl_card_info3(struct nvrm_ioctl_card_info3 *s) { int nl = 0; int i, j; for (i = 0; i < 32; ++i) { int valid = 0; for (j = 0; j < sizeof(s->card[i]); ++j) if (((unsigned char *)&s->card[i])[j] != 0) { valid = 1; break; } if (valid) { if (!nl) { mmt_log_cont_nl(); nl = 1; } mmt_log(" %d: ", i); nvrm_reset_pfx(); nvrm_print_x32(&s->card[i], flags); nvrm_print_x32(&s->card[i], domain); nvrm_print_d32_align(&s->card[i], bus, 3); nvrm_print_d32_align(&s->card[i], slot, 3); nvrm_print_d32_align(&s->card[i], function, 3); nvrm_print_pad_x8(&s->card[i], _pad0); nvrm_print_x16(&s->card[i], vendor_id); nvrm_print_x16(&s->card[i], device_id); nvrm_print_pad_x32(&s->card[i], _pad1); nvrm_print_x32(&s->card[i], gpu_id); nvrm_print_x32(&s->card[i], interrupt); nvrm_print_pad_x32(&s->card[i], _pad2); nvrm_print_x64(&s->card[i], reg_address); nvrm_print_x64(&s->card[i], reg_size); nvrm_print_x64(&s->card[i], fb_address); nvrm_print_x64(&s->card[i], fb_size); nvrm_print_d32(&s->card[i], index); nvrm_print_pad_x32(&s->card[i], _pad3); nvrm_print_pad_x32(&s->card[i], _pad4); nvrm_print_pad_x32(&s->card[i], _pad5); mmt_log_cont_nl(); } } }
static struct gpu_object *nvrm_add_object(uint32_t fd, uint32_t cid, uint32_t parent, uint32_t handle, uint32_t class_) { struct gpu_object *obj = gpu_object_add(fd, cid, parent, handle, class_); if (dump_object_tree_on_create_destroy) { mmt_log("Object tree after create: %s\n", ""); dump_object_trees(obj); } if (is_fifo_ib_class(class_) || is_fifo_dma_class(class_)) { struct gpu_object *dev = nvrm_get_device(obj); if (dev && dev->class_data) nvrm_dev(dev)->fifos++; } return obj; }
static void dump_object_trees(struct gpu_object *highlight) { int orphaned = 0; struct gpu_object *obj; for (obj = gpu_objects; obj != NULL; obj = obj->next) if (obj->parent == obj->handle) dump_object_tree(obj, 0, highlight); else if (obj->parent_object == NULL) orphaned = 1; if (orphaned) { mmt_log("Orphaned objects: %s\n", ""); for (obj = gpu_objects; obj != NULL; obj = obj->next) if (obj->parent != obj->handle && obj->parent_object == NULL) dump_object_tree(obj, 0, highlight); } }
static void nvrm_destroy_gpu_object(uint32_t fd, uint32_t cid, uint32_t parent, uint32_t handle) { struct gpu_object *obj = gpu_object_find(cid, handle); if (obj == NULL) { uint16_t cl = handle & 0xffff; // userspace deletes objects which it didn't create and kernel returns SUCCESS :/ // just ignore known offenders switch (cl) { case 0x0014: case 0x0202: case 0x0301: case 0x0308: case 0x0360: case 0x0371: case 0x1e00: case 0x1e01: case 0x1e10: case 0x1e20: break; default: mmt_error("trying to destroy object 0x%08x / 0x%08x which does not exist!\n", cid, handle); } return; } if (dump_object_tree_on_create_destroy) { mmt_log("Object tree before destroy: %s\n", ""); dump_object_trees(obj); } if (is_fifo_ib_class(obj->class_) || is_fifo_dma_class(obj->class_)) { struct gpu_object *dev = nvrm_get_device(obj); if (dev && dev->class_data) nvrm_dev(dev)->fifos--; } gpu_object_destroy(obj); }
static void demmt_open(struct mmt_open *o, void *state) { if (o->ret < MAX_FD) { struct open_file *f = &open_files[o->ret]; f->path = strdup((void *)o->path.data); if (strstr(f->path, "/dev/nvidia")) f->type = FDNVIDIA; else if (strstr(f->path, "/dev/ati/")) f->type = FDFGLRX; else if (strstr(f->path, "/dev/dri/card")) f->type = FDDRM; else f->type = FDUNK; } if (dump_sys_open) mmt_log("sys_open: %s, flags: 0x%x, mode: 0x%x, ret: %d\n", o->path.data, o->flags, o->mode, o->ret); }
static void __demmt_ioctl_pre(uint32_t fd, uint32_t id, struct mmt_buf *data, void *state, struct mmt_memory_dump *args, int argc) { uint8_t dir, type, nr; uint16_t size; decode_ioctl_id(id, &dir, &type, &nr, &size); int print_raw = 1; enum mmt_fd_type fdtype = demmt_get_fdtype(fd); if (fdtype == FDUNK) { if (type == 0x64) // DRM fdtype = undetected_fdtype = FDDRM; else if (type == 0x46) // nvidia fdtype = undetected_fdtype = FDNVIDIA; } if (fdtype == FDDRM) print_raw = demmt_drm_ioctl_pre(fd, id, dir, nr, size, data, state, args, argc); else if (fdtype == FDNVIDIA) print_raw = nvrm_ioctl_pre(fd, id, dir, nr, size, data, state, args, argc); else if (fdtype == FDFGLRX) print_raw = fglrx_ioctl_pre(fd, id, dir, nr, size, data, state, args, argc); else mmt_error("ioctl 0x%x called for unknown type of file [%d, %d]\n", id, fd, fdtype); print_raw = print_raw || dump_raw_ioctl_data; if (print_raw) { mmt_log("ioctl pre 0x%02x (0x%08x), fd: %d, dir: %2s, size: %4d", nr, id, fd, dir_desc[dir], size); if (size != data->len) mmt_log_cont(", data.len: %d", data->len); ioctl_data_print(data); mmt_log_cont_nl(); } }
static void handle_nvrm_ioctl_create_vspace(struct nvrm_ioctl_create_vspace *s) { if (s->foffset != 0) { struct buffer *buf; int found = 0; for (buf = buffers_list; buf != NULL; buf = buf->next) if (buf->mmap_offset == s->foffset) { buf->data1 = s->parent; buf->data2 = s->handle; found = 1; break; } if (!found) { for (buf = gpu_only_buffers_list; buf != NULL; buf = buf->next) { if (buf->data1 == s->parent && buf->data2 == s->handle) { mmt_log("TODO: gpu only buffer found (0x%016lx), NVRM_IOCTL_CREATE_VSPACE handling needs to be updated!\n", buf->gpu_start); break; } } struct unk_map *m = malloc(sizeof(struct unk_map)); m->data1 = s->parent; m->data2 = s->handle; m->mmap_offset = s->foffset; m->next = unk_maps; unk_maps = m; } } nvrm_add_object(s->cid, s->parent, s->handle, s->cls); }
void __demmt_mmap2(uint64_t start, uint64_t len, uint32_t id, uint64_t offset, uint32_t fd, uint32_t prot, uint32_t flags, void *state) { if (dump_sys_mmap) { mmt_log("mmap: address: 0x%" PRIx64 ", length: 0x%08" PRIx64 ", id: %d, offset: 0x%08" PRIx64 ", fd: %d", start, len, id, offset, fd); if (dump_sys_mmap_details || prot != (PROT_READ | PROT_WRITE)) { mmt_log_cont(", prot: %s", ""); decode_mmap_prot(prot); } if (dump_sys_mmap_details || flags != MAP_SHARED) { mmt_log_cont(", flags: %s", ""); decode_mmap_flags(flags); } } nvrm_mmap(id, fd, start, len, offset); }
void demmt_nv_ioctl_4d(struct mmt_nvidia_ioctl_4d *ctl, void *state) { mmt_log("ioctl4d: %s\n", ctl->str.data); }