Example #1
0
void flush_last_data(const char *log_dir)
{
	allocation_event_buffer *event_buffer = open_or_create_allocation_event_buffer(log_dir);
	if (event_buffer == NULL) {
		return;
	}
	
	if (has_event_in_buffer(event_buffer) == false) {
		__malloc_printf("has no event in buffer");
		close_allocation_event_buffer(event_buffer);
		return;
	}
	
	stack_frames_db *stack_frames_writer = open_or_create_stack_frames_db(log_dir);
	allocation_event_db *allocation_event_writer = open_or_create_allocation_event_db(log_dir);
	
	if (stack_frames_writer != NULL && allocation_event_writer != NULL) {
		while (has_event_in_buffer(event_buffer)) {
			int64_t next_index = 0;
			memory_logging_event *curr_event = get_event_from_buffer(event_buffer, &next_index);
			
			if (is_next_index_valid(event_buffer, next_index) == false) {
				__malloc_printf("next index not valid");
				break;
			}
			
			if (curr_event->event_type == EventType_Alloc) {
				uint32_t object_type = 0;
				uint32_t stack_identifier = 0;
				if (curr_event->stack_size > 0) {
					stack_identifier = add_stack_frames_in_table(stack_frames_writer, curr_event->stacks + curr_event->num_hot_to_skip, curr_event->stack_size - curr_event->num_hot_to_skip); // unique stack in memory
				} else {
					__malloc_printf("Data corrupted!");
					report_error(MS_ERRC_DATA_CORRUPTED2);
					break;
				}
				// Try to get vm memory type from type_flags
				if (object_type == 0) {
					VM_GET_FLAGS_ALIAS(curr_event->type_flags, object_type);
				}
				add_allocation_event(allocation_event_writer, curr_event->address, curr_event->type_flags, object_type, curr_event->argument, stack_identifier, curr_event->t_id);
			} else if (curr_event->event_type == EventType_Free) {
				del_allocation_event(allocation_event_writer, curr_event->address, curr_event->type_flags);
			} else if (curr_event->event_type == EventType_Update) {
				update_allocation_event_object_type(allocation_event_writer, curr_event->address, curr_event->argument);
			}
			
			update_read_index(event_buffer, next_index);
		}
	}
	
	__malloc_printf("done");

	close_stack_frames_db(stack_frames_writer);
	close_allocation_event_db(allocation_event_writer);
	close_allocation_event_buffer(event_buffer);
}
Example #2
0
File: kern_mman.c Project: argp/xnu
/*
 * XXX Internally, we use VM_PROT_* somewhat interchangeably, but the correct
 * XXX usage is PROT_* from an interface perspective.  Thus the values of
 * XXX VM_PROT_* and PROT_* need to correspond.
 */
int
mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval)
{
	/*
	 *	Map in special device (must be SHARED) or file
	 */
	struct fileproc *fp;
	struct			vnode *vp;
	int			flags;
	int			prot;
	int			err=0;
	vm_map_t		user_map;
	kern_return_t		result;
	vm_map_offset_t		user_addr;
	vm_map_size_t		user_size;
	vm_object_offset_t	pageoff;
	vm_object_offset_t	file_pos;
	int			alloc_flags = 0;
	vm_tag_t		tag = VM_KERN_MEMORY_NONE;
	vm_map_kernel_flags_t	vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
	boolean_t		docow;
	vm_prot_t		maxprot;
	void 			*handle;
	memory_object_t		pager = MEMORY_OBJECT_NULL;
	memory_object_control_t	 control;
	int 			mapanon=0;
	int 			fpref=0;
	int error =0;
	int fd = uap->fd;
	int num_retries = 0;

	/*
	 * Note that for UNIX03 conformance, there is additional parameter checking for
	 * mmap() system call in libsyscall prior to entering the kernel.  The sanity 
	 * checks and argument validation done in this function are not the only places
	 * one can get returned errnos.
	 */

	user_map = current_map();
	user_addr = (vm_map_offset_t)uap->addr;
	user_size = (vm_map_size_t) uap->len;

	AUDIT_ARG(addr, user_addr);
	AUDIT_ARG(len, user_size);
	AUDIT_ARG(fd, uap->fd);

	prot = (uap->prot & VM_PROT_ALL);
#if 3777787
	/*
	 * Since the hardware currently does not support writing without
	 * read-before-write, or execution-without-read, if the request is
	 * for write or execute access, we must imply read access as well;
	 * otherwise programs expecting this to work will fail to operate.
	 */
	if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
		prot |= VM_PROT_READ;
#endif	/* radar 3777787 */

	flags = uap->flags;
	vp = NULLVP;

	/*
	 * The vm code does not have prototypes & compiler doesn't do the'
	 * the right thing when you cast 64bit value and pass it in function 
	 * call. So here it is.
	 */
	file_pos = (vm_object_offset_t)uap->pos;


	/* make sure mapping fits into numeric range etc */
	if (file_pos + user_size > (vm_object_offset_t)-PAGE_SIZE_64)
		return (EINVAL);

	/*
	 * Align the file position to a page boundary,
	 * and save its page offset component.
	 */
	pageoff = (file_pos & vm_map_page_mask(user_map));
	file_pos -= (vm_object_offset_t)pageoff;


	/* Adjust size for rounding (on both ends). */
	user_size += pageoff;	/* low end... */
	user_size = vm_map_round_page(user_size,	
				      vm_map_page_mask(user_map)); /* hi end */

	if (flags & MAP_JIT) {
		if ((flags & MAP_FIXED) ||
		    (flags & MAP_SHARED) ||
		    !(flags & MAP_ANON) ||
		    (flags & MAP_RESILIENT_CODESIGN) ||
		    (flags & MAP_RESILIENT_MEDIA)) {
			return EINVAL;
		}
	}

	if ((flags & MAP_RESILIENT_CODESIGN) ||
	    (flags & MAP_RESILIENT_MEDIA)) {
		if ((flags & MAP_ANON) ||
		    (flags & MAP_JIT)) {
			return EINVAL;
		}
		if (prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
			return EPERM;
		}
	}

	/*
	 * Check for illegal addresses.  Watch out for address wrap... Note
	 * that VM_*_ADDRESS are not constants due to casts (argh).
	 */
	if (flags & MAP_FIXED) {
		/*
		 * The specified address must have the same remainder
		 * as the file offset taken modulo PAGE_SIZE, so it
		 * should be aligned after adjustment by pageoff.
		 */
		user_addr -= pageoff;
		if (user_addr & vm_map_page_mask(user_map))
			return (EINVAL);
	}
#ifdef notyet
	/* DO not have apis to get this info, need to wait till then*/
	/*
	 * XXX for non-fixed mappings where no hint is provided or
	 * the hint would fall in the potential heap space,
	 * place it after the end of the largest possible heap.
	 *
	 * There should really be a pmap call to determine a reasonable
	 * location.
	 */
	else if (addr < vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ,
					  vm_map_page_mask(user_map)))
		addr = vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ,
					 vm_map_page_mask(user_map));

#endif

	alloc_flags = 0;

	if (flags & MAP_ANON) {

		maxprot = VM_PROT_ALL;
#if CONFIG_MACF
		/*
		 * Entitlement check.
		 */
		error = mac_proc_check_map_anon(p, user_addr, user_size, prot, flags, &maxprot);
		if (error) {
			return EINVAL;
		}		
#endif /* MAC */

		/*
		 * Mapping blank space is trivial.  Use positive fds as the alias
		 * value for memory tracking. 
		 */
		if (fd != -1) {
			/*
			 * Use "fd" to pass (some) Mach VM allocation flags,
			 * (see the VM_FLAGS_* definitions).
			 */
			alloc_flags = fd & (VM_FLAGS_ALIAS_MASK |
					    VM_FLAGS_SUPERPAGE_MASK |
					    VM_FLAGS_PURGABLE |
					    VM_FLAGS_4GB_CHUNK);
			if (alloc_flags != fd) {
				/* reject if there are any extra flags */
				return EINVAL;
			}
			VM_GET_FLAGS_ALIAS(alloc_flags, tag);
			alloc_flags &= ~VM_FLAGS_ALIAS_MASK;
		}
			
		handle = NULL;
		file_pos = 0;
		mapanon = 1;
	} else {
		struct vnode_attr va;
		vfs_context_t ctx = vfs_context_current();

		if (flags & MAP_JIT)
			return EINVAL;

		/*
		 * Mapping file, get fp for validation. Obtain vnode and make
		 * sure it is of appropriate type.
		 */
		err = fp_lookup(p, fd, &fp, 0);
		if (err)
			return(err);
		fpref = 1;
		switch (FILEGLOB_DTYPE(fp->f_fglob)) {
		case DTYPE_PSXSHM:
			uap->addr = (user_addr_t)user_addr;
			uap->len = (user_size_t)user_size;
			uap->prot = prot;
			uap->flags = flags;
			uap->pos = file_pos;
			error = pshm_mmap(p, uap, retval, fp, (off_t)pageoff);
			goto bad;
		case DTYPE_VNODE:
			break;
		default:
			error = EINVAL;
			goto bad;
		}
		vp = (struct vnode *)fp->f_fglob->fg_data;
		error = vnode_getwithref(vp);
		if(error != 0)
			goto bad;

		if (vp->v_type != VREG && vp->v_type != VCHR) {
			(void)vnode_put(vp);
			error = EINVAL;
			goto bad;
		}

		AUDIT_ARG(vnpath, vp, ARG_VNODE1);
		
		/*
		 * POSIX: mmap needs to update access time for mapped files
		 */
		if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
			VATTR_INIT(&va);
			nanotime(&va.va_access_time);
			VATTR_SET_ACTIVE(&va, va_access_time);
			vnode_setattr(vp, &va, ctx);
		}

		/*
		 * XXX hack to handle use of /dev/zero to map anon memory (ala
		 * SunOS).
		 */
		if (vp->v_type == VCHR || vp->v_type == VSTR) {
			(void)vnode_put(vp);
			error = ENODEV;
			goto bad;
		} else {
			/*
			 * Ensure that file and memory protections are
			 * compatible.  Note that we only worry about
			 * writability if mapping is shared; in this case,
			 * current and max prot are dictated by the open file.
			 * XXX use the vnode instead?  Problem is: what
			 * credentials do we use for determination? What if
			 * proc does a setuid?
			 */
			maxprot = VM_PROT_EXECUTE;	/* ??? */
			if (fp->f_fglob->fg_flag & FREAD)
				maxprot |= VM_PROT_READ;
			else if (prot & PROT_READ) {
				(void)vnode_put(vp);
				error = EACCES;
				goto bad;
			}
			/*
			 * If we are sharing potential changes (either via
			 * MAP_SHARED or via the implicit sharing of character
			 * device mappings), and we are trying to get write
			 * permission although we opened it without asking
			 * for it, bail out. 
			 */

			if ((flags & MAP_SHARED) != 0) {
				if ((fp->f_fglob->fg_flag & FWRITE) != 0 &&
				    /*
				     * Do not allow writable mappings of 
				     * swap files (see vm_swapfile_pager.c).
				     */
				    !vnode_isswap(vp)) {
 					/*
 					 * check for write access
 					 *
 					 * Note that we already made this check when granting FWRITE
 					 * against the file, so it seems redundant here.
 					 */
 					error = vnode_authorize(vp, NULL, KAUTH_VNODE_CHECKIMMUTABLE, ctx);
 
 					/* if not granted for any reason, but we wanted it, bad */
 					if ((prot & PROT_WRITE) && (error != 0)) {
 						vnode_put(vp);
  						goto bad;
  					}
 
 					/* if writable, remember */
 					if (error == 0)
  						maxprot |= VM_PROT_WRITE;

				} else if ((prot & PROT_WRITE) != 0) {
					(void)vnode_put(vp);
					error = EACCES;
					goto bad;
				}
			} else
				maxprot |= VM_PROT_WRITE;

			handle = (void *)vp;
#if CONFIG_MACF
			error = mac_file_check_mmap(vfs_context_ucred(ctx),
			    fp->f_fglob, prot, flags, file_pos, &maxprot);
			if (error) {
				(void)vnode_put(vp);
				goto bad;
			}
#endif /* MAC */
		}
	}

	if (user_size == 0)  {
		if (!mapanon)
			(void)vnode_put(vp);
		error = 0;
		goto bad;
	}

	/*
	 *	We bend a little - round the start and end addresses
	 *	to the nearest page boundary.
	 */
	user_size = vm_map_round_page(user_size,
				      vm_map_page_mask(user_map));

	if (file_pos & vm_map_page_mask(user_map)) {
		if (!mapanon)
			(void)vnode_put(vp);
		error = EINVAL;
		goto bad;
	}

	if ((flags & MAP_FIXED) == 0) {
		alloc_flags |= VM_FLAGS_ANYWHERE;
		user_addr = vm_map_round_page(user_addr,
					      vm_map_page_mask(user_map));
	} else {
		if (user_addr != vm_map_trunc_page(user_addr,
						   vm_map_page_mask(user_map))) {
		        if (!mapanon)
			        (void)vnode_put(vp);
			error = EINVAL;
			goto bad;
		}
		/*
		 * mmap(MAP_FIXED) will replace any existing mappings in the
		 * specified range, if the new mapping is successful.
		 * If we just deallocate the specified address range here,
		 * another thread might jump in and allocate memory in that
		 * range before we get a chance to establish the new mapping,
		 * and we won't have a chance to restore the old mappings.
		 * So we use VM_FLAGS_OVERWRITE to let Mach VM know that it
		 * has to deallocate the existing mappings and establish the
		 * new ones atomically.
		 */
		alloc_flags |= VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
	}

	if (flags & MAP_NOCACHE)
		alloc_flags |= VM_FLAGS_NO_CACHE;

	if (flags & MAP_JIT) {
		vmk_flags.vmkf_map_jit = TRUE;
	}

	if (flags & MAP_RESILIENT_CODESIGN) {
		alloc_flags |= VM_FLAGS_RESILIENT_CODESIGN;
	}

	/*
	 * Lookup/allocate object.
	 */
	if (handle == NULL) {
		control = NULL;
#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
		if (prot & VM_PROT_READ)
			prot |= VM_PROT_EXECUTE;
		if (maxprot & VM_PROT_READ)
			maxprot |= VM_PROT_EXECUTE;
#endif
#endif

#if 3777787
		if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			prot |= VM_PROT_READ;
		if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			maxprot |= VM_PROT_READ;
#endif	/* radar 3777787 */
map_anon_retry:
		result = vm_map_enter_mem_object(user_map,
						 &user_addr, user_size,
						 0, alloc_flags, vmk_flags,
						 tag,
						 IPC_PORT_NULL, 0, FALSE,
						 prot, maxprot,
						 (flags & MAP_SHARED) ?
						 VM_INHERIT_SHARE : 
						 VM_INHERIT_DEFAULT);

		/* If a non-binding address was specified for this anonymous
		 * mapping, retry the mapping with a zero base
		 * in the event the mapping operation failed due to
		 * lack of space between the address and the map's maximum.
		 */
		if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
			user_addr = vm_map_page_size(user_map);
			goto map_anon_retry;
		}
	} else {
		if (vnode_isswap(vp)) {
			/*
			 * Map swap files with a special pager
			 * that returns obfuscated contents.
			 */
			control = NULL;
			pager = swapfile_pager_setup(vp);
			if (pager != MEMORY_OBJECT_NULL) {
				control = swapfile_pager_control(pager);
			}
		} else {
			control = ubc_getobject(vp, UBC_FLAGS_NONE);
		}
		
		if (control == NULL) {
			(void)vnode_put(vp);
			error = ENOMEM;
			goto bad;
		}

		/*
		 *  Set credentials:
		 *	FIXME: if we're writing the file we need a way to
		 *      ensure that someone doesn't replace our R/W creds
		 * 	with ones that only work for read.
		 */

		ubc_setthreadcred(vp, p, current_thread());
		docow = FALSE;
		if ((flags & (MAP_ANON|MAP_SHARED)) == 0) {
			docow = TRUE;
		}

#ifdef notyet
/* Hmm .. */
#if defined(VM_PROT_READ_IS_EXEC)
		if (prot & VM_PROT_READ)
			prot |= VM_PROT_EXECUTE;
		if (maxprot & VM_PROT_READ)
			maxprot |= VM_PROT_EXECUTE;
#endif
#endif /* notyet */

#if 3777787
		if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			prot |= VM_PROT_READ;
		if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE))
			maxprot |= VM_PROT_READ;
#endif	/* radar 3777787 */

map_file_retry:
		if ((flags & MAP_RESILIENT_CODESIGN) ||
		    (flags & MAP_RESILIENT_MEDIA)) {
			if (prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) {
				assert(!mapanon);
				vnode_put(vp);
				error = EPERM;
				goto bad;
			}
			/* strictly limit access to "prot" */
			maxprot &= prot;
		}

		vm_object_offset_t end_pos = 0;
		if (os_add_overflow(user_size, file_pos, &end_pos)) {
			vnode_put(vp);
			error = EINVAL;
			goto bad;
		}

		result = vm_map_enter_mem_object_control(user_map,
						 &user_addr, user_size,
						 0, alloc_flags, vmk_flags,
						 tag,
						 control, file_pos,
						 docow, prot, maxprot, 
						 (flags & MAP_SHARED) ?
						 VM_INHERIT_SHARE : 
						 VM_INHERIT_DEFAULT);

		/* If a non-binding address was specified for this file backed
		 * mapping, retry the mapping with a zero base
		 * in the event the mapping operation failed due to
		 * lack of space between the address and the map's maximum.
		 */
		if ((result == KERN_NO_SPACE) && ((flags & MAP_FIXED) == 0) && user_addr && (num_retries++ == 0)) {
			user_addr = vm_map_page_size(user_map);
			goto map_file_retry;
		}
	}

	if (!mapanon) {
		(void)vnode_put(vp);
	}

	switch (result) {
	case KERN_SUCCESS:
		*retval = user_addr + pageoff;
		error = 0;
		break;
	case KERN_INVALID_ADDRESS:
	case KERN_NO_SPACE:
		error =  ENOMEM;
		break;
	case KERN_PROTECTION_FAILURE:
		error =  EACCES;
		break;
	default:
		error =  EINVAL;
		break;
	}
bad:
	if (pager != MEMORY_OBJECT_NULL) {
		/*
		 * Release the reference on the pager.
		 * If the mapping was successful, it now holds
		 * an extra reference.
		 */
		memory_object_deallocate(pager);
	}
	if (fpref)
		fp_drop(p, fd, fp, 0);

	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_mmap) | DBG_FUNC_NONE), fd, (uint32_t)(*retval), (uint32_t)user_size, error, 0);
#ifndef	CONFIG_EMBEDDED
	KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO2, SYS_mmap) | DBG_FUNC_NONE), (uint32_t)(*retval >> 32), (uint32_t)(user_size >> 32),
			      (uint32_t)(file_pos >> 32), (uint32_t)file_pos, 0);
#endif
	return(error);
}
Example #3
0
void *__memory_logging_event_writing_thread(void *param)
{
	pthread_setname_np("Memory Logging");
	
	working_thread_id = current_thread_id(); // for preventing deadlock'ing on stack logging on a single thread
	log_internal_without_this_thread(working_thread_id);
	
	struct timeval delay;
	delay.tv_sec = 0;
	delay.tv_usec = 10 * 1000; // 10 ms
	
	while (logging_is_enable) {
		while (has_event_in_buffer(event_buffer) == false) {
			usleep(15000);
			//select(0, NULL, NULL, NULL, &delay);
		}
		
		if (!logging_is_enable) {
			break;
		}
		
		// pick an event from buffer
		int64_t next_index = 0;
		memory_logging_event *curr_event = get_event_from_buffer(event_buffer, &next_index);
		bool is_skip = (curr_event->event_type == EventType_Invalid);
		
		if (is_next_index_valid(event_buffer, next_index) == false) {
			// Impossible...
			continue;
		}
		
		// compaction
		uint32_t object_type = 0;
		if (curr_event->event_type == EventType_Alloc && has_event_in_buffer(event_buffer, next_index)) {
			memory_logging_event *next_event = get_event_from_buffer(event_buffer, NULL, next_index);
			if (curr_event->address == next_event->address) {
				if (curr_event->type_flags & memory_logging_type_alloc) {
					if (next_event->type_flags & memory_logging_type_dealloc) {
						// *waves hand* current allocation never occurred
						is_skip = true;
						next_event->event_type = EventType_Invalid;
					} else if (next_event->event_type == EventType_Update) {
						object_type = next_event->argument;
						next_event->event_type = EventType_Invalid;
					}
				} else if (next_event->type_flags & memory_logging_type_vm_deallocate) {
					// *waves hand* current allocation(VM) never occurred
					is_skip = true;
					next_event->event_type = EventType_Invalid;
				}
			}
		}
		
		if (!is_skip) {
			// Can't lock like this without brain, or affect performance
			//__malloc_lock_lock(&working_thread_lock);
			if (should_working_thread_lock == 1) {
				should_working_thread_lock = 2;
				while (should_working_thread_lock == 2);
			}
			
			if (curr_event->event_type == EventType_Alloc) {
				uint32_t stack_identifier = 0;
				if (curr_event->stack_size > 0) {
					stack_identifier = add_stack_frames_in_table(stack_frames_writer, curr_event->stacks + curr_event->num_hot_to_skip, curr_event->stack_size - curr_event->num_hot_to_skip); // unique stack in memory
				} else {
					__malloc_printf("Data corrupted!");
					
					//__malloc_lock_unlock(&working_thread_lock);
					// Restore abort()?
					//abort();
					report_error(MS_ERRC_DATA_CORRUPTED);
					disable_memory_logging();
					break;
				}
				// Try to get vm memory type from type_flags
				if (object_type == 0) {
					VM_GET_FLAGS_ALIAS(curr_event->type_flags, object_type);
				}
				add_allocation_event(allocation_event_writer, curr_event->address, curr_event->type_flags, object_type, curr_event->argument, stack_identifier, curr_event->t_id);
			} else if (curr_event->event_type == EventType_Free) {
				del_allocation_event(allocation_event_writer, curr_event->address, curr_event->type_flags);
			} else {
				update_allocation_event_object_type(allocation_event_writer, curr_event->address, curr_event->argument);
			}
			
			//__malloc_lock_unlock(&working_thread_lock);
		}
		
		update_read_index(event_buffer, next_index);
	}
	return NULL;
}
Example #4
0
void __memory_event_callback(uint32_t type_flags, uintptr_t zone_ptr, uintptr_t arg2, uintptr_t arg3, uintptr_t return_val, uint32_t num_hot_to_skip)
{
    uintptr_t size = 0;
    uintptr_t ptr_arg = 0;
	bool is_alloc = false;
	
	if (!logging_is_enable) {
		return;
	}

	uint32_t alias = 0;
	VM_GET_FLAGS_ALIAS(type_flags, alias);
	// skip all VM allocation events from malloc_zone
	if (alias >= VM_MEMORY_MALLOC && alias <= VM_MEMORY_MALLOC_NANO) {
		return;
	}

    // check incoming data
    if (type_flags & memory_logging_type_alloc && type_flags & memory_logging_type_dealloc) {
        size = arg3;
        ptr_arg = arg2; // the original pointer
		if (ptr_arg == return_val) {
			return; // realloc had no effect, skipping
		}
        if (ptr_arg == 0) { // realloc(NULL, size) same as malloc(size)
            type_flags ^= memory_logging_type_dealloc;
        } else {
            // realloc(arg1, arg2) -> result is same as free(arg1); malloc(arg2) -> result
            __memory_event_callback(memory_logging_type_dealloc, zone_ptr, ptr_arg, (uintptr_t)0, (uintptr_t)0, num_hot_to_skip + 1);
            __memory_event_callback(memory_logging_type_alloc, zone_ptr, size, (uintptr_t)0, return_val, num_hot_to_skip + 1);
            return;
        }
    }
    if (type_flags & memory_logging_type_dealloc || type_flags & memory_logging_type_vm_deallocate) {
        size = arg3;
		ptr_arg = arg2;
		if (ptr_arg == 0) {
			return; // free(nil)
		}
    }
    if (type_flags & memory_logging_type_alloc || type_flags & memory_logging_type_vm_allocate) {
		if (return_val == 0 || return_val == (uintptr_t)MAP_FAILED) {
			//return; // alloc that failed, but still record this allocation event
			return_val = 0;
		}
        size = arg2;
		is_alloc = true;
    }
    
	if (type_flags & memory_logging_type_vm_allocate || type_flags & memory_logging_type_vm_deallocate) {
		mach_port_t targetTask = (mach_port_t)zone_ptr;
		// For now, ignore "injections" of VM into other tasks.
		if (targetTask != mach_task_self()) {
			return;
		}
	}
    
    type_flags &= memory_logging_valid_type_flags;
	
	thread_id curr_thread = current_thread_id();
	
	if (curr_thread == working_thread_id || curr_thread == g_matrix_block_monitor_dumping_thread_id/* || is_thread_ignoring_logging(curr_thread)*/) {
        // Prevent a thread from deadlocking against itself if vm_allocate() or malloc()
        // is called below here, from woking thread or dumping thread
        return;
    }

	memory_logging_event curr_event;
	
	// gather stack, only alloc type
    if (is_alloc) {
		curr_event.stack_size = backtrace((void **)curr_event.stacks, STACK_LOGGING_MAX_STACK_SIZE);
		num_hot_to_skip += 1; // skip itself and caller
		if (curr_event.stack_size <= num_hot_to_skip) {
			// Oops!  Didn't get a valid backtrace from thread_stack_pcs().
			return;
		}

		if (is_stack_frames_should_skip(curr_event.stacks + num_hot_to_skip, curr_event.stack_size - num_hot_to_skip, size, type_flags)) {
			curr_event.stack_size = 0;
			// skip this event?
			return;
		} else {
			curr_event.num_hot_to_skip = num_hot_to_skip;
		}

		curr_event.address = return_val;
        curr_event.argument = (uint32_t)size;
		curr_event.event_type = EventType_Alloc;
		curr_event.type_flags = type_flags;
		curr_event.t_id = curr_thread;
    } else {
        curr_event.address = ptr_arg;
        curr_event.argument = (uint32_t)size;
		curr_event.event_type = EventType_Free;
		curr_event.type_flags = type_flags;
		curr_event.stack_size = 0;
    }
	
	append_event_to_buffer(event_buffer, &curr_event);
}