/* alloc shared memory pages */ void *qemu_vmalloc(size_t size) { void *ptr; ptr = qemu_memalign(getpagesize(), size); trace_qemu_vmalloc(size, ptr); return ptr; }
void *qemu_vmalloc(size_t size) { void *ptr; /* FIXME: this is not exactly optimal solution since VirtualAlloc has 64Kb granularity, but at least it guarantees us that the memory is page aligned. */ if (!size) { abort(); } ptr = qemu_oom_check(VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE)); trace_qemu_vmalloc(size, ptr); return ptr; }
/* alloc shared memory pages */ void *qemu_vmalloc(size_t size) { void *ptr; size_t align = QEMU_VMALLOC_ALIGN; #if defined(CONFIG_VALGRIND) if (running_on_valgrind < 0) { /* First call, test whether we are running on Valgrind. This is a substitute for RUNNING_ON_VALGRIND from valgrind.h. */ const char *ld = getenv("LD_PRELOAD"); running_on_valgrind = (ld != NULL && strstr(ld, "vgpreload")); } #endif if (size < align || running_on_valgrind) { align = getpagesize(); } ptr = qemu_memalign(align, size); trace_qemu_vmalloc(size, ptr); return ptr; }