device_hooks * find_device(const char *name) { FENTRYA("%s", name); FEXIT(); return gOSSCore->oss_get_driver_hooks(); }
status_t init_hardware(void) { status_t err; FENTRY(); err = get_module(OSS_CORE_MODULE_NAME, (module_info **)&gOSSCore); if (err < B_OK) { FEXIT(); return err; } put_module(OSS_CORE_MODULE_NAME); FEXIT(); return B_OK; }
void uninit_driver(void) { status_t err; FENTRY(); err = gOSSCore->oss_unload_all_drivers(); err = gOSSCore->uninit_osscore(); dprintf("oss:uninit_osscore: 0x%08lx\n", err); put_module(OSS_CORE_MODULE_NAME); FEXIT(); }
void btk_mem_fini( void ) { FUNCTION("btk_mem_fini"); LOG_UNKNOWN_UNIT; FENTRY; btk_mutex_fini(&kmem_mutex); FEXIT(0); return; }
int btk_mem_init( void ) { int retvalue = 0; /* Assume success */ FUNCTION("btk_mem_init"); LOG_UNKNOWN_UNIT; #if defined(__sgi) int old_level = 0; /* old interrupt level */ #endif /* defined(__sgi) */ FENTRY; #if defined(__sun) retvalue = btk_mutex_init(&kmem_mutex, "btk_mem mutex", NULL); #elif defined(_NTDDK_) retvalue = btk_mutex_init(&kmem_mutex, BT_SPIN_LOCK); #elif defined(__sgi) retvalue = btk_mutex_init(&kmem_mutex, "btk_mem_mutex"); #elif defined(BT_uCOS) retvalue = btk_mutex_init(&kmem_mutex); #elif defined(__linux__) retvalue = btk_mutex_init(&kmem_mutex, 0); #elif defined (__lynxos) retvalue = btk_mutex_init(&kmem_mutex); #else retvalue = btk_mutex_init(&kmem_mutex); #endif /* __sun, _NTDDK_ */ FEXIT(retvalue); return retvalue; }
loff_t btp_llseek( struct file *file_p, loff_t offset, int which ) { FUNCTION("btp_llseek"); LOG_DEVID(file_p); bt_dev_t type = GET_LDEV_TYPE(file_p); bt_unit_t *unit_p = GET_UNIT_PTR(file_p); loff_t retval = 0; loff_t f_pos, end_pos; FENTRY; /* Determine where the end of the various logical devices are */ switch(type) { case BT_DEV_RDP: if (IS_CLR(unit_p->bt_status, BT_PCI2PCI)) { end_pos = 8 * SIZE_1MB; } else if (IS_SET(unit_p->bt_status, BT_NEXT_GEN)) { end_pos = 192 * SIZE_1KB; } else { end_pos = 0; } break; case BT_DEV_LDP: if (IS_CLR(unit_p->bt_status, BT_NEXT_GEN)) { end_pos = 0; } else { end_pos = 192 * SIZE_1KB; } break; case BT_DEV_A24: /* 24 address bits */ end_pos = 16 * SIZE_1MB; break; case BT_DEV_IO: /* 16 address bits */ end_pos = 64 * SIZE_1KB; break; case BT_DEV_A32: /* 32 address bits */ end_pos = ((loff_t) 1)<<32; break; case BT_DEV_RE: /* 31 address bits, only provided for compatibility */ end_pos = ((loff_t) 1)<<31; break; case BT_DEV_LM: /* User determined when device was loaded */ end_pos = unit_p->lm_size; if (0 == end_pos) { INFO_STR("Local Memory device not enabled.\n"); retval = -ENXIO; goto llseek_end; } break; default: TRC_MSG(BT_TRC_RD_WR, (LOG_FMT "Unrecognized device type: %d.\n", LOG_ARG, type)); retval = -ENXIO; goto llseek_end; } /* Determine what the new address would be */ switch(which) { case SEEK_SET: f_pos = offset; break; case SEEK_CUR: f_pos = file_p->f_pos + offset; break; case SEEK_END: f_pos = end_pos + offset; /* Better be a negative offset */ break; default: /* Should never happen */ INFO_STR("Invalid SEEK type.\n"); retval = -EINVAL; goto llseek_end; } if (f_pos >= end_pos) { INFO_STR("Seek past end of logical device.\n"); retval = -EINVAL; goto llseek_end; } file_p->f_pos = f_pos; retval = f_pos; llseek_end: FEXIT(retval); return retval; }
static int btp_xfer( bt_unit_t *unit_p, bt_dev_t type, bt_accessflag_t dir, void * usr_data_p, unsigned long dest_addr, size_t length, size_t *xferred_bytes_p ) { FUNCTION("btp_xfer"); LOG_UNIT(unit_p->unit_number); void *data_p; int dma_flag; int data_width; size_t length_remaining = length; unsigned int start, need; bt_data32_t mreg_value; bt_error_t retval = BT_SUCCESS; unsigned int inx; unsigned long pci_addr; bt_data32_t ldma_addr; struct page **pages; int ret, i, write; bt_data32_t usr_curr_offset; caddr_t kbuf_p; unsigned int nr_pages; FENTRY; /* ** Haven't transferred any data yet */ *xferred_bytes_p = 0; /* ** Adjust for the extended remote ram window. */ if (type == BT_AXSRE) { dest_addr |= RE_ADJUST; } /* ** Normally you would have the while loop in the read and write routines. ** ** Since both would require the same loop, I've decided to just put it ** in this routine instead. */ while ((length_remaining > 0) && (BT_SUCCESS == retval)) { int xferred_length = 0; int requested_length = length_remaining; /* ** Setup direction and current offset */ if (dir == BT_WR) { write = 1; } else { write = 0; } usr_curr_offset = (bt_data32_t) ((bt_devaddr_t) usr_data_p & (PAGE_SIZE -1)); /* ** Malloc a scatter/gather list */ nr_pages = (usr_curr_offset + requested_length + ~PAGE_MASK) >> PAGE_SHIFT; pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); if (!pages) { WARN_STR("Failed to kmalloc scatter/gather list.\n"); retval = BT_EIO; goto btp_xfer_end; } /* ** Translate the user pages to physical addresses ** store in scatter/gather list */ down_read(¤t->mm->mmap_sem); ret = get_user_pages(current, current->mm, (unsigned long) usr_data_p, nr_pages, write, 1, pages, NULL); up_read(¤t->mm->mmap_sem); if (ret < nr_pages) { WARN_STR("Failed to create scatter/gather list for user buffer.\n"); for (i = 0; i < ret; i++) { page_cache_release(pages[i]); } kfree(pages); retval = BT_EIO; goto btp_xfer_end; } /* ** Determine whether we do DMA or PIO */ btk_dma_pio(unit_p, type, (bt_devaddr_t) usr_data_p, dest_addr, &requested_length, &dma_flag, &data_width, &start, &need); #define BTP_FREE_MREG if (dma_flag) { \ btk_mutex_enter(&unit_p->mreg_mutex); \ (void) btk_bit_free(unit_p->sdma_aval_p, start, need); \ btk_mutex_exit(&unit_p->mreg_mutex); \ btk_mutex_exit(&unit_p->dma_mutex); \ } \ for (i = 0; i < nr_pages; i++) { \ if (BT_RD == dir) { \ set_page_dirty_lock(pages[i]); \ } \ page_cache_release(pages[i]); \ } \ kfree(pages); /* ** Can't let PIO go past one page */ if (!dma_flag) { if ((usr_curr_offset + requested_length) > PAGE_SIZE) { requested_length = PAGE_SIZE - usr_curr_offset; } need = 1; } TRC_MSG(BT_TRC_RD_WR, (LOG_FMT "Transferring %d bytes data to 0x%lx using %s.\n", LOG_ARG, requested_length, dest_addr, ((dma_flag) ? "DMA" : "PIO"))); if (dma_flag) { /* ** Setup vme address, address modifier, and function code */ mreg_value = 0; btk_setup_mreg(unit_p, type, &mreg_value, BT_OP_DMA); /* ** Program up mapping RAM */ for (inx = start, i = 0; inx < (start + need); inx++, i++) { pci_addr = (unsigned long) page_to_phys(pages[i]); if (0 == pci_addr) { WARN_STR("Kernel to PCI address translation failed.\n"); retval = BT_EIO; goto end_xfer_loop; } mreg_value &= ~BT_MREG_ADDR_MASK; mreg_value |= (pci_addr & BT_MREG_ADDR_MASK); btk_put_mreg(unit_p, inx, BT_LMREG_DMA_2_PCI, mreg_value); if ( (btk_get_mreg(unit_p, inx, BT_LMREG_DMA_2_PCI)) != mreg_value ) { WARN_MSG((LOG_FMT "Verify Write BT_LMREG_DMA_2_PCI mapping register mr_idx = 0x%.1X failed.\n", LOG_ARG, inx)); retval = BT_EIO; goto end_xfer_loop; } } /* ** Now we need to get the DMA semaphore ** Note this routines does nothing in a single driver situtation */ retval = btk_take_drv_sema(unit_p); if (retval != BT_SUCCESS) { goto end_xfer_loop; } /* ** If old Nanobus card, we must stop PIO from occuring */ if (IS_CLR(unit_p->bt_status, BT_NEXT_GEN)) { btk_rwlock_wr_enter(&unit_p->hw_rwlock); } /* ** Do the DMA */ ldma_addr = (bt_data32_t) ((start * BT_PAGE_SIZE) + usr_curr_offset); xferred_length = requested_length; retval = btk_dma_xfer(unit_p, type, ldma_addr, (bt_data32_t) dest_addr, &xferred_length, (dir == BT_RD) ? BT_READ : BT_WRITE, data_width); if (IS_CLR(unit_p->bt_status, BT_NEXT_GEN)) { btk_rwlock_wr_exit(&unit_p->hw_rwlock); } btk_give_drv_sema(unit_p); /* ** Do a PIO */ } else { /* ** Perform the proper direction PIO data transfer */ kbuf_p = kmap(pages[0]); if (kbuf_p == NULL) { INFO_STR("Failed to get kernel pointer to PIO user buffer"); retval = BT_EIO; } else { data_p = (void *) (kbuf_p + usr_curr_offset); xferred_length = requested_length; retval = btk_pio_xfer(unit_p, type, data_p, (unsigned long) dest_addr, &xferred_length, (dir == BT_RD) ? BT_READ : BT_WRITE); kunmap(pages[0]); } } end_xfer_loop: BTP_FREE_MREG; TRC_MSG(BT_TRC_RD_WR, (LOG_FMT "%s transfer done 0x%x bytes transferred, retval %d\n", LOG_ARG, ((dma_flag) ? "DMA" : "PIO"), xferred_length, retval)); usr_data_p = (caddr_t) usr_data_p + xferred_length; dest_addr += xferred_length; length_remaining -= xferred_length; } btp_xfer_end: *xferred_bytes_p = length - length_remaining; FEXIT(retval); return retval; }
static ssize_t btp_lm_wr( struct file *file_p, const char *data_p, size_t length, loff_t *new_fpos_p ) { FUNCTION("btp_lm_wr"); LOG_DEVID(file_p); bt_dev_t type = GET_LDEV_TYPE(file_p); bt_unit_t *unit_p = GET_UNIT_PTR(file_p); char *curr_buf_p; size_t len; loff_t f_pos = file_p->f_pos; long xferred_bytes = 0; unsigned long dest_addr; FENTRY; /* ** Check for valid unit */ if (NULL == unit_p) { WARN_STR(no_unit_p); xferred_bytes = -ENXIO; goto btp_lm_wr_end; } /* ** Check if this device is currently on-line */ if ((unit_p->logstat[type] & STAT_ONLINE) == 0) { INFO_STR("Logical device not on-line"); xferred_bytes = -ENXIO; goto btp_lm_wr_end; } /* ** Check if this device supports writes */ if ((unit_p->logstat[type] & STAT_WRITE) == 0) { INFO_STR("Logical device does not support writing"); xferred_bytes = -ENXIO; goto btp_lm_wr_end; } /* ** Get the device address */ dest_addr = file_p->f_pos; TRC_MSG(BT_TRC_RD_WR, (LOG_FMT "Write logical device %d, transfer length %ld.\n", LOG_ARG, type, (long) length)); /* ** Copy to user from kernel memory */ len = f_pos + length > unit_p->lm_size ? unit_p->lm_size - f_pos : length; curr_buf_p = (char *) data_p; do { if (copy_from_user(unit_p->lm_kaddr + f_pos, curr_buf_p, len)) { if (0 == xferred_bytes) { xferred_bytes = -EFAULT; } break; } /* Update counts and such */ xferred_bytes += len; curr_buf_p += len; f_pos += len; if (f_pos >= unit_p->lm_size) { f_pos = 0; } if (length - xferred_bytes > unit_p->lm_size) { len = unit_p->lm_size; } else { len = length - xferred_bytes; } *new_fpos_p = f_pos; } while ((xferred_bytes > 0) && (xferred_bytes < length)); btp_lm_wr_end: FEXIT(xferred_bytes); return xferred_bytes; }
ssize_t btp_write( struct file *file_p, const char *data_p, size_t length, loff_t *new_fpos_p ) { FUNCTION("btp_write"); LOG_DEVID(file_p); bt_dev_t type = GET_LDEV_TYPE(file_p); bt_unit_t *unit_p = GET_UNIT_PTR(file_p); int ret_val; long xferred_bytes = 0; unsigned long dest_addr; size_t amount_xferred; FENTRY; /* ** Handle local memory */ if (type == BT_DEV_LM) { xferred_bytes = btp_lm_wr(file_p, data_p, length, new_fpos_p); goto btp_write_end; } /* ** Check for valid unit */ if (NULL == unit_p) { WARN_STR(no_unit_p); xferred_bytes = -ENXIO; goto btp_write_end; } /* ** Check if this device is currently on-line */ if ((unit_p->logstat[type] & STAT_ONLINE) == 0) { INFO_STR("Logical device not on-line"); xferred_bytes = -ENXIO; goto btp_write_end; } /* ** Check if this device supports writes */ if ((unit_p->logstat[type] & STAT_WRITE) == 0) { INFO_STR("Logical device does not support writing"); xferred_bytes = -ENXIO; goto btp_write_end; } /* ** Get the device address */ dest_addr = file_p->f_pos; TRC_MSG(BT_TRC_RD_WR, (LOG_FMT "Write logical device %d, transfer length %ld.\n", LOG_ARG, type, (long) length)); /* ** Do the transfer by either PIO or DMA */ ret_val = btp_xfer(unit_p, type, BT_WR, (char *) data_p, dest_addr, length, &amount_xferred); if (BT_SUCCESS == ret_val) { *new_fpos_p = dest_addr + amount_xferred; xferred_bytes = amount_xferred; } else { xferred_bytes = -ret_val; } btp_write_end: FEXIT(xferred_bytes); return xferred_bytes; }
void btk_mem_free( void * kmem_p, size_t size ) { FUNCTION("btk_mem_free"); LOG_UNKNOWN_UNIT; FENTRY; TRC_MSG((BT_TRC_ALLOC | BT_TRC_DETAIL), (LOG_FMT "ptr: " PTR_FMT " size %d.\n", LOG_ARG, kmem_p, (int) size)); #if defined (_AIX) { int retvalue; retvalue = (int) xmfree(kmem_p, pinned_heap); if (o != retvalue) { INFO_MSG((LOG_FMT "Error %d from xmfree().\n")); } } #elif defined(__hpux) sys_memfree(kmem_p, size); #elif defined(__sun) kmem_free( kmem_p, size ); /* returns default value of retvalue */ #elif defined(__sgi) kmem_free(kmem_p, size); #elif defined(__vxworks) free(kmem_p); #elif defined(_NTDDK_) ExFreePool(kmem_p); #elif defined(BT_uCOS) free(kmem_p); #elif defined(__linux__) kfree(kmem_p); #endif /* _AIX, __hpux, __sun, __sgi, __vxworks, _NTDDK_ */ btk_mutex_enter(&kmem_mutex); btk_alloc_total_g -= size; /* running total of kmem */ if (btk_alloc_total_g < 0) { FATAL_STR("Allocated kernel memory went negative.\n"); } TRC_MSG((BT_TRC_ALLOC), (LOG_FMT "ptr " PTR_FMT "; size %d; total %d.\n", LOG_ARG, kmem_p, (int) size, btk_alloc_total_g)); btk_mutex_exit(&kmem_mutex); FEXIT(0); return; }
void * btk_mem_alloc( size_t size, bt_data32_t flags ) { void * kmem_p = NULL; FUNCTION("btk_mem_alloc"); LOG_UNKNOWN_UNIT; FENTRY; #if defined (_AIX) kmem_p = xmalloc(size, ALIGN_LONG, (flags & BTK_ALLOC_SWAPPABLE) ? 0 : pinned_heap); #elif defined(__hpux) /* ** sys_memall() allocates a number of virtual memory pages (NBPG=4K). ** Since the driver calls btk_mem_alloc() to get each interrupt ** registration structure this is wasteful of lockable system memory. ** Eventually it would be a good idea to add another layer of memory ** management to clean this up. */ kmem_p = sys_memall(size); #elif defined(__sun) kmem_p = kmem_alloc( size, KM_NOSLEEP); #elif defined(__sgi) /* ** Does not request cache-aligned or physically contiguous memory. ** If the memory is going to be DMA'd in to or out of (i.e. write ** or read respectively) then kmem_alloc() must be called directly ** with the appropriate flag for cache-alignment. In addition to ** cache-aligning the buffer, cache flushing issues must be taken ** into consideration and are unique for the platform in question. */ kmem_p = kmem_alloc(size, 0); #elif defined(__vxworks) kmem_p = malloc(size); #elif defined(BT_uCOS) kmem_p = malloc(size); #elif defined(_NTDDK_) /* The requested memory is not cache aligned or physically contiguous. Don't use for DMA. Also, don't use for a buffer to be shared with user space unless size < physical page size because the user virtual address won't be valid across non-contiguous pages. */ kmem_p = ExAllocatePool( (flags & BTK_ALLOC_SWAPPABLE) ? PagedPool : NonPagedPool, size); #elif defined(__linux__) kmem_p = kmalloc(size, GFP_KERNEL); #elif defined(__lynxos) kmem_p = sysbrk(size); #endif /* _AIX, __hpux, __sun, __sgi, __vxworks, _NTDDK_ */ /* Protect btk_alloc_total_g accesses. This mutex may not be held during the preceeding memory allocation to avoid doing the allocations at a raised interrupt level. */ btk_mutex_enter(&kmem_mutex); btk_alloc_total_g += size; /* running total of kmem */ if (btk_alloc_total_g < 0) { FATAL_STR("Allocated kernel memory went negative.\n"); } TRC_MSG((BT_TRC_ALLOC), (LOG_FMT "ptr " PTR_FMT "; size %d; total %d.\n", LOG_ARG, kmem_p, (int) size, btk_alloc_total_g)); btk_mutex_exit(&kmem_mutex); FEXIT(kmem_p); return(kmem_p); }
/****************************************************************************** ** ** Function: btk_mutex_enter() ** ** Purpose: Start section protected by mutex. ** ** Args: ** mutex_p Pointer to bt_mutex_t to be entered. ** ** Returns: Void ** ** Notes: ** Calls to btk_mutex_enter() cannot be nested. ** ** NT Notes: ** If mutex_p->mutex_type == BT_SPIN_LOCK: ** 1. Callers must be running at IRQL <= DISPATCH_LEVEL. ** 2. The code guarded by btk_mutex_enter() must neither be pageable nor ** make any references to pageable data. ** 3. The code guarded by btk_mutex_enter() can neither call any external ** function that might access pageable data or raise an exception, nor ** can it generate any exceptions. ** 4. The caller should release the spin lock with btk_mutex_exit()�as ** quickly as possible. ** 5. Busy waits the CPU if the spin lock cannot be acquired immediately. ** ** If mutex_p->mutex_type == BT_FAST_MUTEX: ** 1. Callers must be running at IRQL < DISPATCH_LEVEL. ** 2. Puts the caller into a wait state and switches to another thread ** of execution if the fast mutex cannot be acquired immediately. ** ******************************************************************************/ void btk_mutex_enter( bt_mutex_t *mutex_p /* pointer to bt_mutex_t */ ) { FUNCTION("btk_mutex_enter"); LOG_UNKNOWN_UNIT; FENTRY; #if defined(__sun) mutex_enter(mutex_p); #elif defined(__sgi) MUTEX_LOCK(mutex_p, -1); /* -1 is unused param in IRIX 6.5.4 */ #elif defined(__vxworks) { STATUS vx_ret; vx_ret = semTake(*mutex_p, WAIT_FOREVER); if (OK != vx_ret) { INFO_STR("Invalid semaphore ID or task timed out."); } } #elif defined(_NTDDK_) { KIRQL local_old_irql; switch(mutex_p->mutex_type) { case BT_SPIN_LOCK: KeAcquireSpinLock(&mutex_p->mutex_obj.spin_lock, &local_old_irql); mutex_p->old_irql = local_old_irql; break; case BT_FAST_MUTEX: ExAcquireFastMutex(&mutex_p->mutex_obj.fast_mutex); break; default: BTK_ASSERT(FALSE); break; } } #elif defined(BT_uCOS) { UBYTE err; err = OSSemPost(*mutex_p); if (OS_NO_ERR != err) { TRC_STR(BT_TRC_KLIB, "Semaphore count overflow.\n"); } } #elif defined(__linux__) (void) down(mutex_p); #elif defined (__lynxos) /* Acquire the lock */ swait(mutex_p, SEM_SIGIGNORE); #else #error Code not written yet #endif /* __sun, __vxworks, _NTDDK_ */ FEXIT(0); return; } /* end btk_mutex_enter() */
/****************************************************************************** ** ** Function: btk_mutex_fini() ** ** Purpose: Releases any resources allocated by btk_mutex_init(). ** ** Args: ** mutex_p Pointer to bt_mutex_t to be intialized ** ** ** Returns: Void ** ** Notes: ** ******************************************************************************/ void btk_mutex_fini( bt_mutex_t *mutex_p /* pointer to bt_mutex_t */ ) { #if defined(__vxworks) SEM_ID dead_mutex; STATUS vx_ret; #endif /* defined(__vxworks) */ FUNCTION("btk_mutex_fini"); LOG_UNKNOWN_UNIT; FENTRY; #if defined(__sun) mutex_destroy(mutex_p); #elif defined(__sgi) MUTEX_DESTROY(mutex_p); #elif defined(__vxworks) dead_mutex = *mutex_p; *mutex_p = NULL; vx_ret = semMGiveForce(dead_mutex); if (OK != vx_ret) { INFO_STR("Semaphore ID is invalid."); } else { semDelete(dead_mutex); } #elif defined(_NTDDK_) switch(mutex_p->mutex_type) { case BT_SPIN_LOCK: break; case BT_FAST_MUTEX: break; default: BTK_ASSERT(FALSE); break; } #elif defined(BT_uCOS) /* no action required */ /* Once an uCOS semaphore is created it can not be deleted */ ; #elif defined(__linux__) /* no action required */ ; #elif defined (__lynxos) /* No action is required */ ; #else #error Code not written yet #endif /* defined(__sun, _NTDDK_) */ FEXIT(0); return; } /* end btk_mutex_fini() */
/****************************************************************************** ** ** Function: btk_mutex_init() ** ** Purpose: Initializes a kernel mutual exclusion object. ** ** Args: ** mutex_p Pointer to bt_mutex_t to be intialized ** ** Solaris Specific parameters: ** ** name_p String name for mutex ** iblock_p Interrupt cookie ** ** VxWorks specific parameters: ** None ** ** SGI Specific parameters: ** name_p String name for mutex, may be NULL ** ** NT Specific parameters: ** mutex_type Specifies type as BT_SPIN_LOCK or BT_FAST_MUTEX ** ** Linux Specific parameters: ** irq_excl == 0, never used from IRQ level ** Otherwise it may be at IRQ level so we use ** a spinlock. ** ** Returns: ** 0 Success ** Otherwise error value. ** ** Notes: ** ** NT Notes: ** Caller must be running at IRQL <= DISPATCH_LEVEL. ** ******************************************************************************/ int btk_mutex_init( bt_mutex_t *mutex_p /* pointer to bt_mutex_t */ #if defined(__sun) , char *name_p, ddi_iblock_cookie_t *mutex_cookie_p #elif defined(_NTDDK_) , bt_mutex_type_t mutex_type #elif defined(__sgi) , char *name_p #elif defined(__linux__) , bt_cookie_t irq_cookie #endif ) { int retvalue = BT_SUCCESS; /* Assume success */ FUNCTION("btk_mutex_init"); LOG_UNKNOWN_UNIT; FENTRY; #if defined(__sun) mutex_init(mutex_p, name_p, MUTEX_DRIVER, mutex_cookie_p); #elif defined (__sgi) MUTEX_INIT(mutex_p, MUTEX_DEFAULT, name_p); #elif defined(__vxworks) *mutex_p = semMCreate(SEM_INVERSION_SAFE | SEM_Q_PRIORITY); if (NULL == *mutex_p) { INFO_STR("Couldn't create semaphore."); retvalue = BT_ENOMEM; } #elif defined(_NTDDK_) switch(mutex_type) { case BT_SPIN_LOCK: KeInitializeSpinLock(&mutex_p->mutex_obj.spin_lock); break; case BT_FAST_MUTEX: ExInitializeFastMutex(&mutex_p->mutex_obj.fast_mutex); break; default: FATAL_STR("Invalid mutex type, defaulting to BT_FAST_MUTEX\n"); mutex_type = BT_FAST_MUTEX; BTK_ASSERT(FALSE); retvalue = BT_EINVAL; break; } mutex_p->mutex_type = mutex_type; #elif defined(BT_uCOS) /* create the semaphore and initilize the event to full so a call to */ /* OSSemPend() will decrement the semaphore count and lock out */ /* any other calling task. */ *mutex_p = OSSemCreate(0); if (NULL == *mutex_p) { INFO_STR("No event control block available.\n"); retvalue = ENOMEM; } #elif defined(__linux__) sema_init(mutex_p, 1); #elif defined(__lynxos) /* Allow anyone to acquire the mutex */ *mutex_p = 1; #else #error Code not written yet #endif /* __sun, __vxworks, _NTDDK_ */ FEXIT(retvalue); return retvalue; } /* end btk_mutex_init() */