示例#1
0
static
errno_t ppfilter_connect_out (void *cookie, socket_t so, const struct sockaddr *to)
{
    errno_t block = ppfilter_connect((pp_filter_cookie_t)cookie, so, to, 0);
    // action is used by the raw data filters
    if (cookie) {
        if (!block)
            (void)OSCompareAndSwap(COOKIE_NO_ACTION, 0, (UInt32*)&((pp_filter_cookie_t)cookie)->action);
        else
            (void)OSCompareAndSwap(COOKIE_NO_ACTION, EHOSTUNREACH, (UInt32*)&((pp_filter_cookie_t)cookie)->action);
    }
    return (block);
}
示例#2
0
static
void ppfilter_unregister(sflt_handle h)
{
    // Signal exit
    int *event;
    switch (h) {
    case PP_FILTER_UDP_HANDLE:
        event = &udpFiltDone;
        break;
    case PP_FILTER_TCP_HANDLE:
        event = &tcpFiltDone;
        break;
    case PP_FILTER_ICMP_HANDLE:
        event = &icmpFiltDone;
        break;
    case PP_FILTER_UDP6_HANDLE:
        event = &udp6FiltDone;
        break;
    case PP_FILTER_TCP6_HANDLE:
        event = &tcp6FiltDone;
        break;
    case PP_FILTER_ICMP6_HANDLE:
        event = &icmp6FiltDone;
        break;
    case PP_FILTER_RAW_HANDLE:
        event = &rawFiltDone;
        break;
    default:
        return;
        break;
    }

    (void)OSCompareAndSwap(0, 1, (UInt32*)event);
    wakeup(event);
}
示例#3
0
void
panic_double_fault64(x86_saved_state_t *sp)
{
	(void)OSCompareAndSwap((UInt32) -1, (UInt32) cpu_number(), (volatile UInt32 *)&panic_double_fault_cpu);
	panic_64(sp, PANIC_DOUBLE_FAULT, "Double fault", FALSE);

}
示例#4
0
文件: loose_ends.c 项目: SbIm/xnu-env
uint32_t
hw_compare_and_store(uint32_t oldval, uint32_t newval, volatile uint32_t *dest)
{
	return OSCompareAndSwap((UInt32)oldval,
				(UInt32)newval,
				(volatile UInt32 *)dest);
}
__private_extern__ void
chudxnu_dtrace_callback_enter(chudxnu_dtrace_callback_t fn)
{
	chudxnu_dtrace_callback_t old_fn = dtrace_callback;

	/* Atomically clear the call back */
	while(!OSCompareAndSwap((UInt32)old_fn, (UInt32)fn, 
		(volatile UInt32 *) &dtrace_callback)) {
		old_fn = dtrace_callback;
	}
}
示例#6
0
SInt32	OSAddAtomic(SInt32 amount, volatile SInt32 * value)
{
	SInt32	oldValue;
	SInt32	newValue;
	
	do {
		oldValue = *value;
		newValue = oldValue + amount;
	} while (! OSCompareAndSwap((UInt32) oldValue, (UInt32) newValue, (UInt32 *) value));
	
	return oldValue;
}
static UInt32	OSBitwiseAtomic(UInt32 and_mask, UInt32 or_mask, UInt32 xor_mask, volatile UInt32 * value)
{
	UInt32	oldValue;
	UInt32	newValue;
	
	do {
		oldValue = *value;
		newValue = ((oldValue & and_mask) | or_mask) ^ xor_mask;
	} while (! OSCompareAndSwap(oldValue, newValue, value));
	
	return oldValue;
}
static Boolean OSCompareAndSwap8(UInt8 oldValue8, UInt8 newValue8, volatile UInt8 * value8)
{
	UInt32				mask		= 0x000000ff;
	UInt32				alignment	= (UInt32)((unsigned long) value8) & (sizeof(UInt32) - 1);
	UInt32				shiftValues = (24 << 24) | (16 << 16) | (8 << 8);
	int					shift		= (UInt32) *(((UInt8 *) &shiftValues) + alignment);
	volatile UInt32 *	value32		= (volatile UInt32 *) ((uintptr_t)value8 - alignment);
	UInt32				oldValue;
	UInt32				newValue;

	mask <<= shift;

	oldValue = *value32;
	oldValue = (oldValue & ~mask) | (oldValue8 << shift);
	newValue = (oldValue & ~mask) | (newValue8 << shift);

	return OSCompareAndSwap(oldValue, newValue, value32);
}
static Boolean OSCompareAndSwap16(UInt16 oldValue16, UInt16 newValue16, volatile UInt16 * value16)
{
	UInt32				mask		= 0x0000ffff;
	UInt32				alignment	= (UInt32)((unsigned long) value16) & (sizeof(UInt32) - 1);
	UInt32				shiftValues = (16 << 24) | (16 << 16);
	UInt32				shift		= (UInt32) *(((UInt8 *) &shiftValues) + alignment);
	volatile UInt32 *	value32		= (volatile UInt32 *) (((unsigned long) value16) - alignment);
	UInt32				oldValue;
	UInt32				newValue;

	mask <<= shift;

	oldValue = *value32;
	oldValue = (oldValue & ~mask) | (oldValue16 << shift);
	newValue = (oldValue & ~mask) | (newValue16 << shift);

	return OSCompareAndSwap(oldValue, newValue, value32);
}
示例#10
0
static void
smaplog_add_entry(boolean_t enabling)
{
	uint32_t index = 0;
	thread_t thread = current_thread();

	do {
		index = smaplog_head;
	} while (!OSCompareAndSwap(index, (index + 1) % SMAPLOG_BUFFER_SIZE, &smaplog_head));

	assert(index < SMAPLOG_BUFFER_SIZE);
	assert(smaplog_head < SMAPLOG_BUFFER_SIZE);
	assert(thread);

	smaplog_cbuf[index].timestamp = mach_absolute_time();
	smaplog_cbuf[index].thread = thread;
	smaplog_cbuf[index].cpuid = cpu_number();
	smaplog_cbuf[index].cr4 = get_cr4();
	smaplog_cbuf[index].smap_state = enabling;
	smaplog_cbuf[index].copyio_active = (thread->machine.specFlags & CopyIOActive) ? 1 : 0;
}
示例#11
0
文件: debug.c 项目: Prajna/xnu
__private_extern__ void panic_display_system_configuration(void) {

	panic_display_process_name();
	if (OSCompareAndSwap(0, 1, &config_displayed)) {
		char buf[256];
		if (strlcpy(buf, PE_boot_args(), sizeof(buf)))
			kdb_printf("Boot args: %s\n", buf);
		kdb_printf("\nMac OS version:\n%s\n",
		    (osversion[0] != 0) ? osversion : "Not yet set");
		kdb_printf("\nKernel version:\n%s\n",version);
		panic_display_kernel_uuid();
		panic_display_pal_info();
		panic_display_model_name();
		panic_display_uptime();
		panic_display_zprint();
#if CONFIG_ZLEAKS
		panic_display_ztrace();
#endif /* CONFIG_ZLEAKS */
		kext_dump_panic_lists(&kdb_log);
	}
}
示例#12
0
IOMemoryDescriptor *
IOGetBootKeyStoreData(void)
{
  IOMemoryDescriptor *memoryDescriptor;
  boot_args *args = (boot_args *)PE_state.bootArgs;
  IOOptionBits options;
  IOAddressRange ranges;

  if (!OSCompareAndSwap(0, 1, &alreadyFetched))
    return (NULL);

  if (newData)
  {
    IOMemoryDescriptor * data = newData;
    newData = NULL;
    return (data);
  }  

  DEBG("%s: data at address %u size %u\n", __func__,
       args->keyStoreDataStart,
       args->keyStoreDataSize);

  if (args->keyStoreDataStart == 0)
    return (NULL);

  ranges.address = args->keyStoreDataStart;
  ranges.length = args->keyStoreDataSize;

  options = kIODirectionInOut | kIOMemoryTypePhysical64;
  
  memoryDescriptor = IOMemoryDescriptor::withOptions(&ranges,
						     1,
						     0,
						     NULL,
						     options);

  DEBG("%s: memory descriptor %p\n", __func__, memoryDescriptor);

  return memoryDescriptor;
}
bool DldIPCUserClient::acquireWaitBlock( __out UInt32* waitBlockIndex, __in SInt32 eventID )
{
    UInt32 index = (-1);
    
    //
    // skip the first element with the 0x0 index, this value is used to indicate that no
    // waiting block has been allocated
    //
    for( UInt32 i = 1; i < DLD_STATIC_ARRAY_SIZE(DldIPCUserClient::WaitBlocks); ++i ){
        
        if( ! OSCompareAndSwap( 0x0, 0x1, &DldIPCUserClient::WaitBlocks[ i ].inUse ) )
            continue;
        
        assert( 0x1 == DldIPCUserClient::WaitBlocks[ i ].inUse );
        
        DldIPCUserClient::WaitBlocks[ i ].completed = false;
        DldIPCUserClient::WaitBlocks[ i ].eventID = eventID;
        index = i;
        *waitBlockIndex = i;
        break;
    } // end for
    
    return ( (-1) != index );
}
示例#14
0
int
boot(int paniced, int howto, char *command)
{
	struct proc *p = current_proc();	/* XXX */
	int hostboot_option=0;

	if (!OSCompareAndSwap(0, 1, &system_inshutdown)) {
		if ( (howto&RB_QUICK) == RB_QUICK)
			goto force_reboot;
		return (EBUSY);
	}
	/*
	 * Temporary hack to notify the power management root domain
	 * that the system will shut down.
	 */
	IOSystemShutdownNotification();

	md_prepare_for_shutdown(paniced, howto, command);

	if ((howto&RB_QUICK)==RB_QUICK) {
		printf("Quick reboot...\n");
		if ((howto&RB_NOSYNC)==0) {
			sync(p, (void *)NULL, (int *)NULL);
		}
	}
	else if ((howto&RB_NOSYNC)==0) {
		int iter, nbusy;

		printf("syncing disks... ");

		/*
		 * Release vnodes held by texts before sync.
		 */

		/* handle live procs (deallocate their root and current directories). */		
		proc_shutdown();

#if CONFIG_AUDIT
		audit_shutdown();
#endif

		if (unmountroot_pre_hook != NULL)
			unmountroot_pre_hook();

		sync(p, (void *)NULL, (int *)NULL);

		/*
		 * Now that all processes have been terminated and system is
		 * sync'ed up, suspend init
		 */
			
		if (initproc && p != initproc)
			task_suspend(initproc->task);

		if (kdebug_enable)
			kdbg_dump_trace_to_file("/var/log/shutdown/shutdown.trace");

		/*
		 * Unmount filesystems
		 */
		vfs_unmountall();

		/* Wait for the buffer cache to clean remaining dirty buffers */
		for (iter = 0; iter < 100; iter++) {
			nbusy = count_busy_buffers();
			if (nbusy == 0)
				break;
			printf("%d ", nbusy);
			delay_for_interval( 1 * nbusy, 1000 * 1000);
		}
		if (nbusy)
			printf("giving up\n");
		else
			printf("done\n");
	}
#if NETWORKING
	/*
	 * Can't just use an splnet() here to disable the network
	 * because that will lock out softints which the disk
	 * drivers depend on to finish DMAs.
	 */
	if_down_all();
#endif /* NETWORKING */

force_reboot:
	if (howto & RB_POWERDOWN)
		hostboot_option = HOST_REBOOT_HALT;
	if (howto & RB_HALT)
		hostboot_option = HOST_REBOOT_HALT;
	if (paniced == RB_PANIC)
		hostboot_option = HOST_REBOOT_HALT;

	if (howto & RB_UPSDELAY) {
		hostboot_option = HOST_REBOOT_UPSDELAY;
	}

	host_reboot(host_priv_self(), hostboot_option);
	/*
	 * should not be reached
	 */
	return (0);
}
示例#15
0
Boolean IODataQueue::enqueue(void * data, UInt32 dataSize)
{
    const UInt32       head      = dataQueue->head;  // volatile
    const UInt32       tail      = dataQueue->tail;
    const UInt32       entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
    IODataQueueEntry * entry;

    // Check for overflow of entrySize
    if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
        return false;
    }
    // Check for underflow of (dataQueue->queueSize - tail)
    if (dataQueue->queueSize < tail) {
        return false;
    }

    if ( tail >= head )
    {
        // Is there enough room at the end for the entry?
        if ((entrySize <= UINT32_MAX - tail) &&
            ((tail + entrySize) <= dataQueue->queueSize) )
        {
            entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);

            entry->size = dataSize;
            memcpy(&entry->data, data, dataSize);

            // The tail can be out of bound when the size of the new entry
            // exactly matches the available space at the end of the queue.
            // The tail can range from 0 to dataQueue->queueSize inclusive.
            
            OSAddAtomic(entrySize, (SInt32 *)&dataQueue->tail);
        }
        else if ( head > entrySize )     // Is there enough room at the beginning?
        {
            // Wrap around to the beginning, but do not allow the tail to catch
            // up to the head.

            dataQueue->queue->size = dataSize;

            // We need to make sure that there is enough room to set the size before
            // doing this. The user client checks for this and will look for the size
            // at the beginning if there isn't room for it at the end.

            if ( ( dataQueue->queueSize - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE )
            {
                ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
            }

            memcpy(&dataQueue->queue->data, data, dataSize);
            OSCompareAndSwap(dataQueue->tail, entrySize, &dataQueue->tail);
        }
        else
        {
            return false;    // queue is full
        }
    }
    else
    {
        // Do not allow the tail to catch up to the head when the queue is full.
        // That's why the comparison uses a '>' rather than '>='.

        if ( (head - tail) > entrySize )
        {
            entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);

            entry->size = dataSize;
            memcpy(&entry->data, data, dataSize);
            OSAddAtomic(entrySize, (SInt32 *)&dataQueue->tail);
        }
        else
        {
            return false;    // queue is full
        }
    }

    // Send notification (via mach message) that data is available.

    if ( ( head == tail )                                                   /* queue was empty prior to enqueue() */
    ||   ( dataQueue->head == tail ) )   /* queue was emptied during enqueue() */
    {
        sendDataAvailableNotification();
    }

    return true;
}
示例#16
0
int
reboot_kernel(int howto, char *message)
{
	int hostboot_option=0;

	if (!OSCompareAndSwap(0, 1, &system_inshutdown)) {
		if ( (howto&RB_QUICK) == RB_QUICK)
			goto force_reboot;
		return (EBUSY);
	}
	/*
	 * Temporary hack to notify the power management root domain
	 * that the system will shut down.
	 */
	IOSystemShutdownNotification();

	if ((howto&RB_QUICK)==RB_QUICK) {
		printf("Quick reboot...\n");
		if ((howto&RB_NOSYNC)==0) {
			sync((proc_t)NULL, (void *)NULL, (int *)NULL);
		}
	}
	else if ((howto&RB_NOSYNC)==0) {
		int iter, nbusy;

		printf("syncing disks... ");

		/*
		 * Release vnodes held by texts before sync.
		 */

		/* handle live procs (deallocate their root and current directories), suspend initproc */
		proc_shutdown();

#if CONFIG_AUDIT
		audit_shutdown();
#endif

		if (unmountroot_pre_hook != NULL)
			unmountroot_pre_hook();

		sync((proc_t)NULL, (void *)NULL, (int *)NULL);

		if (kdebug_enable)
			kdbg_dump_trace_to_file("/var/log/shutdown/shutdown.trace");

		/*
		 * Unmount filesystems
		 */

#if DEVELOPMENT || DEBUG
		if (!(howto & RB_PANIC) || !kdp_has_polled_corefile())
#endif /* DEVELOPMENT || DEBUG */
		{
			vfs_unmountall();
		}

		/* Wait for the buffer cache to clean remaining dirty buffers */
		for (iter = 0; iter < 100; iter++) {
			nbusy = count_busy_buffers();
			if (nbusy == 0)
				break;
			printf("%d ", nbusy);
			delay_for_interval( 1 * nbusy, 1000 * 1000);
		}
		if (nbusy)
			printf("giving up\n");
		else
			printf("done\n");
	}
#if NETWORKING
	/*
	 * Can't just use an splnet() here to disable the network
	 * because that will lock out softints which the disk
	 * drivers depend on to finish DMAs.
	 */
	if_down_all();
#endif /* NETWORKING */

force_reboot:

	if (howto & RB_PANIC) {
		if (strncmp(message, "Kernel memory has exceeded limits", 33) == 0) {
			kernel_hwm_panic_info();
		}
		panic ("userspace panic: %s", message);
	}

	if (howto & RB_POWERDOWN)
		hostboot_option = HOST_REBOOT_HALT;
	if (howto & RB_HALT)
		hostboot_option = HOST_REBOOT_HALT;

	if (howto & RB_UPSDELAY) {
		hostboot_option = HOST_REBOOT_UPSDELAY;
	}

	host_reboot(host_priv_self(), hostboot_option);
	/*
	 * should not be reached
	 */
	return (0);
}
示例#17
0
/*
 * Called from locore on a special reserved stack after a double-fault
 * is taken in kernel space.
 * Kernel stack overflow is one route here.
 */
void
panic_double_fault32(int code)
{
	(void)OSCompareAndSwap((UInt32) -1, (UInt32) cpu_number(), (volatile UInt32 *)&panic_double_fault_cpu);
	panic_32(code, PANIC_DOUBLE_FAULT, "Double fault", FALSE, TRUE);
}
示例#18
0
文件: debug.c 项目: UIKit0/xnu
__private_extern__ void panic_display_system_configuration(void) {

	//panic_display_process_name();
#ifdef __arm__
	{
#else
	if (OSCompareAndSwap(0, 1, &config_displayed)) {
#endif
		char buf[256];
		if (strlcpy(buf, PE_boot_args(), sizeof(buf)))
			kdb_printf("Boot args: %s\n", buf);
		kdb_printf("\nMac OS version:\n%s\n",
		    (osversion[0] != 0) ? osversion : "Not yet set");
		kdb_printf("\nKernel version:\n%s\n",version);
#ifdef __arm__
		kdb_printf("\niBoot version: %s\n", firmware_version);
		kdb_printf("Secure boot?: %s\n\n", debug_enabled ? "NO" : "YES");
#endif
		panic_display_kernel_uuid();
		panic_display_kernel_aslr();
		panic_display_pal_info();
		panic_display_model_name();
		panic_display_uptime();
		panic_display_zprint();
#if CONFIG_ZLEAKS
		panic_display_ztrace();
#endif /* CONFIG_ZLEAKS */
		kext_dump_panic_lists(&kdb_log);
	}
}

extern zone_t		first_zone;
extern unsigned int	num_zones, stack_total;
extern unsigned long long stack_allocs;

#if defined(__i386__) || defined (__x86_64__)
extern unsigned int	inuse_ptepages_count;
extern long long alloc_ptepages_count;
#endif

extern boolean_t	panic_include_zprint;

__private_extern__ void panic_display_zprint()
{
	if(panic_include_zprint == TRUE) {

		unsigned int	i;
		struct zone	zone_copy;

		if(first_zone!=NULL) {
			if(ml_nofault_copy((vm_offset_t)first_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) {
				for (i = 0; i < num_zones; i++) {
					if(zone_copy.cur_size > (1024*1024)) {
						kdb_printf("%.20s:%lu\n",zone_copy.zone_name,(uintptr_t)zone_copy.cur_size);
					}	
					
					if(zone_copy.next_zone == NULL) {
						break;
					}

					if(ml_nofault_copy((vm_offset_t)zone_copy.next_zone, (vm_offset_t)&zone_copy, sizeof(struct zone)) != sizeof(struct zone)) {
						break;
					}
				}
			}
		}

		kdb_printf("Kernel Stacks:%lu\n",(uintptr_t)(kernel_stack_size * stack_total));

#if defined(__i386__) || defined (__x86_64__)
		kdb_printf("PageTables:%lu\n",(uintptr_t)(PAGE_SIZE * inuse_ptepages_count));
#endif

		kdb_printf("Kalloc.Large:%lu\n",(uintptr_t)kalloc_large_total);
	}
}

#if CONFIG_ZLEAKS
extern boolean_t	panic_include_ztrace;
extern struct ztrace* top_ztrace;
/*
 * Prints the backtrace most suspected of being a leaker, if we paniced in the zone allocator.
 * top_ztrace and panic_include_ztrace comes from osfmk/kern/zalloc.c
 */
__private_extern__ void panic_display_ztrace(void)
{
	if(panic_include_ztrace == TRUE) {
		unsigned int i = 0;
		struct ztrace top_ztrace_copy;
		
		/* Make sure not to trip another panic if there's something wrong with memory */
		if(ml_nofault_copy((vm_offset_t)top_ztrace, (vm_offset_t)&top_ztrace_copy, sizeof(struct ztrace)) == sizeof(struct ztrace)) {
			kdb_printf("\nBacktrace suspected of leaking: (outstanding bytes: %lu)\n", (uintptr_t)top_ztrace_copy.zt_size);
			/* Print the backtrace addresses */
			for (i = 0; (i < top_ztrace_copy.zt_depth && i < MAX_ZTRACE_DEPTH) ; i++) {
				kdb_printf("%p\n", top_ztrace_copy.zt_stack[i]);
			}
			/* Print any kexts in that backtrace, along with their link addresses so we can properly blame them */
			kmod_panic_dump((vm_offset_t *)&top_ztrace_copy.zt_stack[0], top_ztrace_copy.zt_depth);
		}
		else {
			kdb_printf("\nCan't access top_ztrace...\n");
		}
		kdb_printf("\n");
	}
}
#endif /* CONFIG_ZLEAKS */

#if !MACH_KDP
static struct ether_addr kdp_current_mac_address = {{0, 0, 0, 0, 0, 0}};

/* XXX ugly forward declares to stop warnings */
void *kdp_get_interface(void);
void kdp_set_ip_and_mac_addresses(struct in_addr *, struct ether_addr *);
void kdp_set_gateway_mac(void *);
void kdp_set_interface(void *);
void kdp_register_send_receive(void *, void *);
void kdp_unregister_send_receive(void *, void *);
void kdp_snapshot_preflight(int, void *, uint32_t, uint32_t);
int kdp_stack_snapshot_geterror(void);
int kdp_stack_snapshot_bytes_traced(void);

void *
kdp_get_interface( void)
{
        return(void *)0;
}

unsigned int
kdp_get_ip_address(void )
{ return 0; }

struct ether_addr
kdp_get_mac_addr(void)
{       
        return kdp_current_mac_address;
}

void
kdp_set_ip_and_mac_addresses(   
        __unused struct in_addr          *ipaddr,
        __unused struct ether_addr       *macaddr)
{}

void
kdp_set_gateway_mac(__unused void *gatewaymac)
{}

void
kdp_set_interface(__unused void *ifp)
{}

void
kdp_register_send_receive(__unused void *send, __unused void *receive)
{}

void
kdp_unregister_send_receive(__unused void *send, __unused void *receive)
{}

void
kdp_snapshot_preflight(__unused int pid, __unused void * tracebuf,
		__unused uint32_t tracebuf_size, __unused uint32_t options)
{}

int
kdp_stack_snapshot_geterror(void)
{       
        return -1;
}

int
kdp_stack_snapshot_bytes_traced(void)
{       
        return 0;
}
bool IOSharedEventQueue::EnqueueTracker(DataArgs * data)
{
    uint32_t singleTrackerLen = sizeof(DataArgs);
    const UInt32 head = dataQueue->head;
    const UInt32 tail = dataQueue->tail;

    LOG(LOG_DEBUG, "head=%d", dataQueue->head);
    LOG(LOG_DEBUG, "tail=%d", dataQueue->tail);

    const UInt32 entrySize = singleTrackerLen+DATA_QUEUE_ENTRY_HEADER_SIZE;
    IODataQueueEntry *entry;

    if(singleTrackerLen>UINT32_MAX-DATA_QUEUE_ENTRY_HEADER_SIZE)
    {
        return false;
    }

    LOG(LOG_DEBUG, "this->getQueueSize()=%d", this->getQueueSize());
    if(this->getQueueSize()<tail)
    {
        return false;
    }

    if(tail>=head)
    {
        if(entrySize<=UINT32_MAX-DATA_QUEUE_ENTRY_HEADER_SIZE &&
        tail+entrySize<=this->getQueueSize())
        {
            entry = (IODataQueueEntry*)((uint8_t*)dataQueue->queue+dataQueue->tail);
            entry->size=singleTrackerLen;
            memcpy(entry->data, data, singleTrackerLen);
            OSAddAtomic(entrySize, (SInt32*)&(dataQueue->tail));
        }
        else if(head>singleTrackerLen)
        {
            dataQueue->queue->size = singleTrackerLen;

            if ( ( getQueueSize() - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE )
            {
                ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = entrySize;
            }

            memcpy(&dataQueue->queue->data, data, singleTrackerLen);
            OSCompareAndSwap(dataQueue->tail, entrySize, &dataQueue->tail);
        }
        else
        {
            return false;
        }
    }
    else
    {
        if ( (head - tail) > entrySize )
        {
            entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);

            entry->size = singleTrackerLen;
            memcpy(&entry->data, data, singleTrackerLen);
            OSAddAtomic(entrySize, (SInt32 *)&dataQueue->tail);
        }
        else
        {
            return false;    // queue is full
        }
    }

    if(head==tail) return true;

    //send notification to port if any data is added to queue.
    //if ( (this->_status&kSharedEventQueueNotifyWhenAddData) || ( head == tail ) || ( dataQueue->head == tail ))
    {
        sendDataAvailableNotification();
    }

    return true;
}
void IOSharedEventQueue::setStatus(OptionBits st)
{
    OptionBits oldValue = this->_status;
    OSCompareAndSwap(oldValue, st, &(this->_status));
}