void *xc_map_foreign_pages(int xc_handle, uint32_t dom, int prot, const xen_pfn_t *arr, int num) { xen_pfn_t *pfn; void *res; int i; pfn = malloc(num * sizeof(*pfn)); if (!pfn) return NULL; memcpy(pfn, arr, num * sizeof(*pfn)); res = xc_map_foreign_batch(xc_handle, dom, prot, pfn, num); if (res) { for (i = 0; i < num; i++) { if ((pfn[i] & 0xF0000000UL) == 0xF0000000UL) { /* * xc_map_foreign_batch() doesn't give us an error * code, so we have to make one up. May not be the * appropriate one. */ errno = EINVAL; munmap(res, num * PAGE_SIZE); res = NULL; break; } } } free(pfn); return res; }
/** * map_tbufs - memory map Xen trace buffers into user space * @tbufs_mfn: mfn of the trace buffers * @num: number of trace buffers to map * @size: size of each trace buffer * * Maps the Xen trace buffers them into process address space. */ static struct t_struct *map_tbufs(unsigned long tbufs_mfn, unsigned int num, unsigned long tinfo_size) { int xc_handle; static struct t_struct tbufs = { 0 }; int i; xc_handle = xc_interface_open(); if ( xc_handle < 0 ) { exit(EXIT_FAILURE); } /* Map t_info metadata structure */ tbufs.t_info = xc_map_foreign_range(xc_handle, DOMID_XEN, tinfo_size, PROT_READ, tbufs_mfn); if ( tbufs.t_info == 0 ) { PERROR("Failed to mmap trace buffers"); exit(EXIT_FAILURE); } if ( tbufs.t_info->tbuf_size == 0 ) { fprintf(stderr, "%s: tbuf_size 0!\n", __func__); exit(EXIT_FAILURE); } /* Map per-cpu buffers */ tbufs.meta = (struct t_buf **)calloc(num, sizeof(struct t_buf *)); tbufs.data = (unsigned char **)calloc(num, sizeof(unsigned char *)); if ( tbufs.meta == NULL || tbufs.data == NULL ) { PERROR( "Failed to allocate memory for buffer pointers\n"); exit(EXIT_FAILURE); } for(i=0; i<num; i++) { const uint32_t *mfn_list = (const uint32_t *)tbufs.t_info + tbufs.t_info->mfn_offset[i]; int j; xen_pfn_t pfn_list[tbufs.t_info->tbuf_size]; for ( j=0; j<tbufs.t_info->tbuf_size; j++) pfn_list[j] = (xen_pfn_t)mfn_list[j]; tbufs.meta[i] = xc_map_foreign_batch(xc_handle, DOMID_XEN, PROT_READ | PROT_WRITE, pfn_list, tbufs.t_info->tbuf_size); if ( tbufs.meta[i] == NULL ) { PERROR("Failed to map cpu buffer!"); exit(EXIT_FAILURE); } tbufs.data[i] = (unsigned char *)(tbufs.meta[i]+1); } xc_interface_close(xc_handle); return &tbufs; }
int main(int argc, char **argv) { int fd = -1; int domid = -1; char buf[20]; // This should be enough by now char *cptr; unsigned long init_task_vaddr = 0, init_task_tsk = 0; int i = 0; unsigned long offset, pfn; unsigned long nextp = 0; xc_handle = xc_interface_open(); if (xc_handle == -1) exit(-1); buf[0] = buf[19] = 0; fd = open("config", O_RDONLY); if (fd == -1) { printf("Unable to read config file.\n"); exit(-1); } read(fd, buf, 19); if (buf[0] == 0) { printf("Unable to get virtual address of init_task.\n" "Check init_task_vaddr\n"); exit(0); } cptr = buf; domid = strtoul(cptr, &cptr, 10); init_task_vaddr = strtoul(cptr, NULL, 16); nr_pages_to_map = vmi_get_domain_max_mem(xc_handle, domid); munmap(shinfo, getpagesize()); // mapping of p2m table shinfo_mfn = vmi_get_domain_shinfo(xc_handle, domid); shinfo = (shared_info_t*)vmi_map_range_ro(xc_handle, domid, getpagesize(), shinfo_mfn); if (shinfo == NULL) { printf("Unable to map shared info pages\n"); exit(-1); } arch_shinfo = &(shinfo->arch); /* This ONLY works for x86 platform */ max_pfn = arch_shinfo->max_pfn; p2m_list_list = vmi_map_range_ro(xc_handle, domid, getpagesize(), arch_shinfo->pfn_to_mfn_frame_list_list); if (p2m_list_list == NULL) { printf("Unable to map p2m list list page\n"); exit(-1); } p2m_list = xc_map_foreign_batch(xc_handle, domid, PROT_READ, p2m_list_list, (max_pfn+(FPP*FPP)-1)/(FPP*FPP)); if (p2m_list == NULL) { printf("Unable to map p2m list pages\n"); exit(-1); } p2m_table = xc_map_foreign_batch(xc_handle, domid, PROT_READ, p2m_list, (max_pfn+FPP-1)/FPP); if (p2m_table == NULL) { printf("Unable to map p2m table pages\n"); exit(-1); } // done mapping p2m table init_task_tsk = init_task_vaddr + TSK_OFFSET; pfn = (init_task_vaddr - KERNEL_OFFSET) >> 12; region = vmi_map_range_ro(xc_handle, domid, getpagesize(), ((xen_pfn_t*)p2m_table)[pfn]); if (region == NULL) { printf("Unable to map region pages\n"); exit(-1); } offset = (init_task_vaddr - KERNEL_OFFSET) & ~PAGE_MASK; printf("%s\n", ((char*)region+offset+COMM_OFFSET) ); memcpy((void*)&nextp, (const void*)(region+offset+TSK_OFFSET), 4); munmap(region, getpagesize()); while (1) { pfn = (nextp - KERNEL_OFFSET) >> 12; offset = (nextp - KERNEL_OFFSET) & ~PAGE_MASK; region = vmi_map_range_ro(xc_handle, domid, getpagesize(), ((xen_pfn_t*)p2m_table)[pfn]); if (region == NULL) { printf("Unable to map region pages\n"); exit(-1); } printf("%s\n", ((char*)region+offset+COMM_TO_TSK_OFFSET)); memcpy(&nextp, region+offset, 4); if (nextp == init_task_tsk) break; munmap(region, getpagesize()); } if (region) munmap(region, getpagesize()); /* clean up */ close(xc_handle); return 0; }