Ejemplo n.º 1
0
//------------------------------------------------------------------------------
static void write_addr(void** where, void* to_write)
{
    struct region_info_t region_info;

    get_region_info(where, &region_info);
    set_region_write_state(&region_info, 1);

    if (!write_vm(current_proc(), where, &to_write, sizeof(to_write)))
    {
        LOG_INFO("VM write to %p failed (err = %d)", where, GetLastError());
    }

    set_region_write_state(&region_info, 0);
}
Ejemplo n.º 2
0
//------------------------------------------------------------------------------
static void* hook_jmp_impl(void* to_hook, void* hook)
{
    struct region_info_t region_info;
    char* trampoline;
    char* write;
    int inst_len;

    LOG_INFO("Attempting to hook at %p with %p", to_hook, hook);

    to_hook = follow_jump(to_hook);

    // Work out the length of the first instruction. It will be copied it into
    // the trampoline.
    inst_len = get_instruction_length(to_hook);
    if (inst_len <= 0)
    {
        LOG_INFO("Unable to match instruction %08X", *(int*)(to_hook));
        return NULL;
    }

    // Prepare
    trampoline = write = alloc_trampoline(to_hook);
    if (trampoline == NULL)
    {
        LOG_INFO("Failed to allocate a page for trampolines.");
        return NULL;
    }

    // In
    write = write_trampoline_in(trampoline, to_hook, inst_len);
    if (write == NULL)
    {
        LOG_INFO("Failed to write trampoline in.");
        return NULL;
    }

    // Out
    get_region_info(to_hook, &region_info);
    set_region_write_state(&region_info, 1);
    write = write_trampoline_out(write, to_hook, hook);
    set_region_write_state(&region_info, 0);
    if (write == NULL)
    {
        LOG_INFO("Failed to write trampoline out.");
        return NULL;
    }

    return trampoline;
}
/*===========================================================================*
 *                              do_info                                      *
 *===========================================================================*/
int do_info(message *m)
{
	struct vm_stats_info vsi;
	struct vm_usage_info vui;
	static struct vm_region_info vri[MAX_VRI_COUNT];
	struct vmproc *vmp;
	vir_bytes addr, size, next, ptr;
	int r, pr, dummy, count, free_pages, largest_contig;

	if (vm_isokendpt(m->m_source, &pr) != OK)
		return EINVAL;
	vmp = &vmproc[pr];

	ptr = (vir_bytes) m->m_lsys_vm_info.ptr;

	switch(m->m_lsys_vm_info.what) {
	case VMIW_STATS:
		vsi.vsi_pagesize = VM_PAGE_SIZE;
		vsi.vsi_total = total_pages;
		memstats(&dummy, &free_pages, &largest_contig);
		vsi.vsi_free = free_pages;
		vsi.vsi_largest = largest_contig;

		get_stats_info(&vsi);

		addr = (vir_bytes) &vsi;
		size = sizeof(vsi);

		break;

	case VMIW_USAGE:
		if(m->m_lsys_vm_info.ep < 0)
			get_usage_info_kernel(&vui);
		else if (vm_isokendpt(m->m_lsys_vm_info.ep, &pr) != OK)
			return EINVAL;
		else get_usage_info(&vmproc[pr], &vui);

		addr = (vir_bytes) &vui;
		size = sizeof(vui);

		break;

	case VMIW_REGION:
		if(m->m_lsys_vm_info.ep == SELF) {
			m->m_lsys_vm_info.ep = m->m_source;
		}
		if (vm_isokendpt(m->m_lsys_vm_info.ep, &pr) != OK)
			return EINVAL;

		count = MIN(m->m_lsys_vm_info.count, MAX_VRI_COUNT);
		next = m->m_lsys_vm_info.next;

		count = get_region_info(&vmproc[pr], vri, count, &next);

		m->m_lsys_vm_info.count = count;
		m->m_lsys_vm_info.next = next;

		addr = (vir_bytes) vri;
		size = sizeof(vri[0]) * count;

		break;

	default:
		return EINVAL;
	}

	if (size == 0)
		return OK;

	/* Make sure that no page faults can occur while copying out. A page
	 * fault would cause the kernel to send a notify to us, while we would
	 * be waiting for the result of the copy system call, resulting in a
	 * deadlock. Note that no memory mapping can be undone without the
	 * involvement of VM, so we are safe until we're done.
	 */
	r = handle_memory_once(vmp, ptr, size, 1 /*wrflag*/);
	if (r != OK) return r;

	/* Now that we know the copy out will succeed, perform the actual copy
	 * operation.
	 */
	return sys_datacopy(SELF, addr,
		(vir_bytes) vmp->vm_endpoint, ptr, size);
}
Ejemplo n.º 4
0
int
bt_load_memmap (bt_accessor_t *acc, bt_memmap_t *memmap)
{
	pid_t           pid;
	char            path[64];
	int             fd;
	int             err = EOK;
	procfs_info     info;
	int             query_nmaps, nmaps;
	bt_mem_region_t *reg;
	procfs_mapinfo  *maps=0;
	int             i, ret;
	union {
		procfs_debuginfo    i;
		char                path[1024];
	} debug_info;
	mem_reader_t    rdr;

	if (acc==0 || memmap==0) {
		errno=EINVAL;
		return -1;
	}

	memmap->count=0;
	memmap->region=0;

	if (acc->type == BT_SELF)
		pid = getpid();
	else
		pid = acc->pid;

	sprintf(path, "/proc/%d/as", pid );

	if ((fd = open(path, O_RDONLY)) == -1) {
		err=errno;
		goto load_memmap_err;
	}

	if ((err=devctl(fd, DCMD_PROC_INFO, &info, sizeof(info), 0)) != EOK) {
		goto load_memmap_err;
	}
	if ((err=devctl(fd, DCMD_PROC_MAPINFO, NULL, 0, &query_nmaps)) != EOK) {
		goto load_memmap_err;
	}
	maps = malloc(query_nmaps * sizeof(*maps));
	if (maps == NULL) {
		err = ENOMEM;
		goto load_memmap_err;
	}
	if ((err=devctl(fd, DCMD_PROC_MAPINFO, maps,
					query_nmaps*sizeof(*maps), &nmaps)) != EOK) {
		goto load_memmap_err;
	}

	nmaps = min(query_nmaps, nmaps);
	memmap->region = calloc(nmaps, sizeof(bt_mem_region_t));
	if (memmap->region == 0) {
		err = ENOMEM;
		goto load_memmap_err;
	}

	if (pid == getpid()) {
		_bt_mem_reader_init(&rdr,
							_bt_read_mem_direct_safe,
							_bt_read_mem_direct,
							fd,
							0/*cache*/
							);
	} else {
		_bt_mem_reader_init(&rdr,
							_bt_read_mem_indirect_safe,
							_bt_read_mem_indirect,
							fd,
							alloca(MEM_RDR_CACHE_SZ)/*cache*/
							);
	}

	for (i = 0; i < nmaps; i++) {
		reg = &(memmap->region[memmap->count]);

		/*
		 * shared libs are mapped twice: once for the text segments,
		 * and once for the data segments.  So, the only maps that are
		 * interesting for backtrace are the shared elf (i.e. text segments).
		 * Skip anything else.
		 */
		if (!((maps[i].flags & MAP_ELF) && (maps[i].flags & MAP_SHARED))) {
			continue;
		}

		debug_info.i.vaddr = maps[i].vaddr;
		debug_info.i.path[0]=0;
		err = devctl(fd, DCMD_PROC_MAPDEBUG, &debug_info,
					 sizeof(debug_info), 0);
		if (err != EOK) goto load_memmap_err;

		ret=get_region_info(&rdr, reg,
							maps[i].vaddr, maps[i].vaddr-debug_info.i.vaddr);
		if (ret == -1) {
			err=errno;
			goto load_memmap_err;
		}
		if (ret == 1) {
			reg->index=memmap->count;
			reg->name = strdup(debug_info.i.path);
			if (reg->name == 0) {
				err = ENOMEM;
				goto load_memmap_err;
			}
			memmap->count ++;
		}
	}

	if (memmap->count == 0) {
		free(memmap->region);
	} else {
		// Resize the memory containing the regions, since not all map
		// will be elf
		memmap->region=realloc(memmap->region,
							   sizeof(bt_mem_region_t)*memmap->count);
	}

	close(fd);
	free(maps);
	return 0;

  load_memmap_err:
	close(fd);
	if (maps) free(maps);
	memmap->count=0;
	if (memmap->region) { free(memmap->region); memmap->region = 0; }
	errno=err;				 /* in case close+free change the errno */
	return -1;
}