/** * @brief Initialise the hashtable. * * Allocate the hash table entries and initialise the hash masks. * * @param hash_table Hash table to setup. * @param size Requested size for the hash table in number of entries. */ void hash_init(HashTable *hash_table, const unsigned long long size) { int i, n_way; for (n_way = 1; n_way < HASH_N_WAY; n_way <<= 1); assert(hash_table != NULL); assert((n_way & -n_way) == n_way); info("< init hashtable of %llu entries>\n", size); if (hash_table->hash != NULL) free(hash_table->memory); hash_table->memory = malloc((size + n_way + 1) * sizeof (Hash)); if (hash_table->memory == NULL) { fatal_error("hash_init: cannot allocate the hash table\n"); } if (HASH_ALIGNED) { const size_t alignment = n_way * sizeof (Hash) - 1; hash_table->hash = (Hash*) (((size_t) hash_table->memory + alignment) & ~alignment); hash_table->hash_mask = size - n_way; } else { hash_table->hash = (Hash*) hash_table->memory; hash_table->hash_mask = size - 1; } hash_cleanup(hash_table); hash_table->n_lock = 256 * MAX(get_cpu_number(), 1); hash_table->lock_mask = hash_table->n_lock - 1; hash_table->n_lock += n_way + 1; hash_table->lock = (HashLock*) malloc(hash_table->n_lock * sizeof (HashLock)); for (i = 0; i < hash_table->n_lock; ++i) spin_init(hash_table->lock + i); }
/* process module init */ status_t process_init(kernel_args_t *kargs) { status_t err; proc_id pid; /* call arch-specific init */ err = arch_process_init(kargs); if(err != NO_ERROR) return err; /* valid process ids and processes group ids starts from 1 */ next_process_id = 1; /* data structures spinlock init */ spin_init(&processes_lock); /* list init */ xlist_init(&processes_list); /* tree init */ avl_tree_create( &processes_tree, compare_process_id, sizeof(process_t), offsetof(process_t, procs_tree_node) ); /* create kernel process */ pid = proc_create_kernel_process("kernel_process"); if(pid == INVALID_PROCESSID) return ERR_MT_GENERAL; return NO_ERROR; }
void cv_init(struct cv *c, const char *desc) { c->cv_desc = desc; c->cv_waiters = 0; spin_init(&c->cv_lock, "cvinit"); }
ACPI_STATUS AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits, ACPI_HANDLE *OutHandle) { #ifndef ACPI_NO_SEMAPHORES struct acpi_semaphore *as; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (OutHandle == NULL) return_ACPI_STATUS (AE_BAD_PARAMETER); if (InitialUnits > MaxUnits) return_ACPI_STATUS (AE_BAD_PARAMETER); as = kmalloc(sizeof(*as), M_ACPISEM, M_INTWAIT | M_ZERO); spin_init(&as->as_spin); as->as_units = InitialUnits; as->as_maxunits = MaxUnits; as->as_pendings = as->as_resetting = as->as_timeouts = 0; ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created semaphore %p max %d, initial %d\n", as, InitialUnits, MaxUnits)); *OutHandle = (ACPI_HANDLE)as; #else *OutHandle = (ACPI_HANDLE)OutHandle; #endif /* !ACPI_NO_SEMAPHORES */ return_ACPI_STATUS (AE_OK); }
static void file_init(struct file *file, uint64_t inum, int flags) { file->inum = inum; file->flags = flags; spin_init(&file->lock); file->offset = 0; file->refcount = 1; }
fs_node_t * make_pipe(size_t size) { fs_node_t * fnode = malloc(sizeof(fs_node_t)); pipe_device_t * pipe = malloc(sizeof(pipe_device_t)); memset(fnode, 0, sizeof(fs_node_t)); memset(pipe, 0, sizeof(pipe_device_t)); fnode->device = 0; fnode->name[0] = '\0'; sprintf(fnode->name, "[pipe]"); fnode->uid = 0; fnode->gid = 0; fnode->mask = 0666; fnode->flags = FS_PIPE; fnode->read = read_pipe; fnode->write = write_pipe; fnode->open = open_pipe; fnode->close = close_pipe; fnode->readdir = NULL; fnode->finddir = NULL; fnode->ioctl = NULL; /* TODO ioctls for pipes? maybe */ fnode->get_size = pipe_size; fnode->selectcheck = pipe_check; fnode->selectwait = pipe_wait; fnode->atime = now(); fnode->mtime = fnode->atime; fnode->ctime = fnode->atime; fnode->device = pipe; pipe->buffer = malloc(size); pipe->write_ptr = 0; pipe->read_ptr = 0; pipe->size = size; pipe->refcount = 0; pipe->dead = 0; spin_init(pipe->lock_read); spin_init(pipe->lock_write); pipe->wait_queue_writers = list_create(); pipe->wait_queue_readers = list_create(); return fnode; }
/* * cam_sim_alloc() may potentially be called from an interrupt (?) but * unexpected things happen to the system if malloc() returns NULL so we * use M_INTWAIT anyway. */ struct cam_sim * cam_sim_alloc(sim_action_func sim_action, sim_poll_func sim_poll, const char *sim_name, void *softc, u_int32_t unit, sim_lock *lock, int max_dev_transactions, int max_tagged_dev_transactions, struct cam_devq *queue) { struct cam_sim *sim; /* * XXX ahd was limited to 256 instead of 512 for unknown reasons, * move that to a global limit here. We may be able to remove this * code, needs testing. */ if (max_dev_transactions > 256) max_dev_transactions = 256; if (max_tagged_dev_transactions > 256) max_tagged_dev_transactions = 256; /* * Allocate a simq or use the supplied (possibly shared) simq. */ if (queue == NULL) queue = cam_simq_alloc(max_tagged_dev_transactions); else cam_devq_reference(queue); if (lock == NULL) return (NULL); sim = kmalloc(sizeof(struct cam_sim), M_CAMSIM, M_INTWAIT | M_ZERO); sim->sim_action = sim_action; sim->sim_poll = sim_poll; sim->sim_name = sim_name; sim->softc = softc; sim->path_id = CAM_PATH_ANY; sim->unit_number = unit; sim->bus_id = 0; /* set in xpt_bus_register */ sim->max_tagged_dev_openings = max_tagged_dev_transactions; sim->max_dev_openings = max_dev_transactions; sim->flags = 0; sim->refcount = 1; sim->devq = queue; sim->lock = lock; if (lock == &sim_mplock) { sim->flags |= 0; callout_init(&sim->callout); } else { sim->flags |= CAM_SIM_MPSAFE; callout_init_mp(&sim->callout); } SLIST_INIT(&sim->ccb_freeq); TAILQ_INIT(&sim->sim_doneq); spin_init(&sim->sim_spin, "cam_sim_alloc"); return (sim); }
/* * Called from vfsinit() */ void vfs_lock_init(void) { TAILQ_INIT(&vnode_inactive_list); TAILQ_INIT(&vnode_active_list); TAILQ_INSERT_TAIL(&vnode_active_list, &vnode_active_rover, v_list); spin_init(&vfs_spin, "vfslock"); kmalloc_raise_limit(M_VNODE, 0); /* unlimited */ }
/* * Helper routine to copy olimit to nlimit and initialize nlimit for * use. nlimit's reference count will be set to 1 and its exclusive bit * will be cleared. * * MPSAFE */ static void plimit_copy(struct plimit *olimit, struct plimit *nlimit) { *nlimit = *olimit; spin_init(&nlimit->p_spin); nlimit->p_refcnt = 1; nlimit->p_exclusive = 0; }
/* 初始化日志模块 */ void log_init() { int i; for (i = 0; i < MAX_LOGS+1; i++) { log_files[i] = NULL; spin_init(&log_locks[i], NULL); } }
int ata_attach(device_t dev) { struct ata_channel *ch = device_get_softc(dev); int error, rid; /* check that we have a virgin channel to attach */ if (ch->r_irq) return EEXIST; /* initialize the softc basics */ ch->dev = dev; ch->state = ATA_IDLE; spin_init(&ch->state_mtx, "ataattach_state"); spin_init(&ch->queue_mtx, "ataattach_queue"); ata_queue_init(ch); /* reset the controller HW, the channel and device(s) */ while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit) tsleep(&error, 0, "ataatch", 1); ATA_RESET(dev); ATA_LOCKING(dev, ATA_LF_UNLOCK); /* setup interrupt delivery */ rid = ATA_IRQ_RID; ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (!ch->r_irq) { device_printf(dev, "unable to allocate interrupt\n"); return ENXIO; } if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, (driver_intr_t *)ata_interrupt, ch, &ch->ih, NULL))) { device_printf(dev, "unable to setup interrupt\n"); return error; } /* probe and attach devices on this channel unless we are in early boot */ ata_identify(dev); return 0; }
int drm_dma_setup(struct drm_device *dev) { dev->dma = kmalloc(sizeof(*dev->dma), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO); if (dev->dma == NULL) return ENOMEM; spin_init(&dev->dma_lock); return 0; }
/* * Obtain a new vnode. The returned vnode is VX locked & vrefd. * * All new vnodes set the VAGE flags. An open() of the vnode will * decrement the (2-bit) flags. Vnodes which are opened several times * are thus retained in the cache over vnodes which are merely stat()d. * * We always allocate the vnode. Attempting to recycle existing vnodes * here can lead to numerous deadlocks, particularly with softupdates. */ struct vnode * allocvnode(int lktimeout, int lkflags) { struct vnode *vp; /* * Do not flag for synchronous recyclement unless there are enough * freeable vnodes to recycle and the number of vnodes has * significantly exceeded our target. We want the normal vnlru * process to handle the cleaning (at 9/10's) before we are forced * to flag it here at 11/10's for userexit path processing. */ if (numvnodes >= maxvnodes * 11 / 10 && cachedvnodes + inactivevnodes >= maxvnodes * 5 / 10) { struct thread *td = curthread; if (td->td_lwp) atomic_set_int(&td->td_lwp->lwp_mpflags, LWP_MP_VNLRU); } /* * lktimeout only applies when LK_TIMELOCK is used, and only * the pageout daemon uses it. The timeout may not be zero * or the pageout daemon can deadlock in low-VM situations. */ if (lktimeout == 0) lktimeout = hz / 10; vp = kmalloc(sizeof(*vp), M_VNODE, M_ZERO | M_WAITOK); lwkt_token_init(&vp->v_token, "vnode"); lockinit(&vp->v_lock, "vnode", lktimeout, lkflags); TAILQ_INIT(&vp->v_namecache); RB_INIT(&vp->v_rbclean_tree); RB_INIT(&vp->v_rbdirty_tree); RB_INIT(&vp->v_rbhash_tree); spin_init(&vp->v_spin, "allocvnode"); lockmgr(&vp->v_lock, LK_EXCLUSIVE); atomic_add_int(&numvnodes, 1); vp->v_refcnt = 1; vp->v_flag = VAGE0 | VAGE1; vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT; KKASSERT(TAILQ_EMPTY(&vp->v_namecache)); /* exclusive lock still held */ vp->v_filesize = NOOFFSET; vp->v_type = VNON; vp->v_tag = 0; vp->v_state = VS_CACHED; _vactivate(vp); return (vp); }
/* PIT initialization */ uint32 pit_init(void) { uint16 cntr_value = SYS_CLOCK_RATE / HZ; spin_init(&pit_lock); /* init spin lock */ /* init timer */ pit_set_counter(0, PIT_CW_MODE3 >> 1, cntr_value); return 0; }
void cpu_init(device_t *dev, void * base, char *name) { spin_init(&dev->lock); dev->base = base; dev->irq = -1; // never bound dev->name = name; dev->type = DEV_CPU; dev->action.dev = dev; dev->action.irq_handler = &cpu_irq_handler; dev->data = kmalloc(sizeof(irq_action_t *) * CONFIG_CPU_MAX_IRQ_NR); }
int drm_dma_setup(struct drm_device *dev) { dev->dma = kmalloc(sizeof(*dev->dma), M_DRM, M_WAITOK | M_NULLOK | M_ZERO); if (dev->dma == NULL) return ENOMEM; spin_init(&dev->dma_lock, "drmdma_lock"); return 0; }
/* * Initialize a lock; required before use. */ void lockinit(struct lock *lkp, const char *wmesg, int timo, int flags) { spin_init(&lkp->lk_spinlock); lkp->lk_flags = (flags & LK_EXTFLG_MASK); lkp->lk_sharecount = 0; lkp->lk_waitcount = 0; lkp->lk_exclusivecount = 0; lkp->lk_wmesg = wmesg; lkp->lk_timo = timo; lkp->lk_lockholder = LK_NOTHREAD; }
/* Allocate and initialize walreceiver-related shared memory */ void wal_recv_shm_init(void) { bool found; WalRcv = (WalRcvData *) shm_init("Wal Receiver Ctl", WalRcvShmemSize(), &found); if (!found) { /* First time through, so initialize */ pg_memset(WalRcv, 0, WalRcvShmemSize()); WalRcv->walRcvState = WALRCV_STOPPED; spin_init(&WalRcv->mutex); } }
/* * Callbacks from machine-dependant startup code (e.g. init386) to set * up low level entities related to cpu #0's globaldata. * * Called from very low level boot code. */ void mi_proc0init(struct globaldata *gd, struct user *proc0paddr) { lwkt_init_thread(&thread0, proc0paddr, LWKT_THREAD_STACK, 0, gd); lwkt_set_comm(&thread0, "thread0"); RB_INIT(&proc0.p_lwp_tree); spin_init(&proc0.p_spin); lwkt_token_init(&proc0.p_token, "iproc"); proc0.p_lasttid = 0; /* +1 = next TID */ lwp_rb_tree_RB_INSERT(&proc0.p_lwp_tree, &lwp0); lwp0.lwp_thread = &thread0; lwp0.lwp_proc = &proc0; proc0.p_usched = usched_init(); lwp0.lwp_cpumask = (cpumask_t)-1; lwkt_token_init(&lwp0.lwp_token, "lwp_token"); spin_init(&lwp0.lwp_spin); varsymset_init(&proc0.p_varsymset, NULL); thread0.td_flags |= TDF_RUNNING; thread0.td_proc = &proc0; thread0.td_lwp = &lwp0; thread0.td_switch = cpu_lwkt_switch; lwkt_schedule_self(curthread); }
/* common routine for creating processes */ static process_t *create_process_common(const char *name, const char *args) { process_t *proc; /* allocate process structure */ proc = (process_t *)kmalloc(sizeof(process_t)); if(!proc) return NULL; /* init allocated memory with zeroes */ memset(proc, 0, sizeof(process_t)); /* if process has name - copy it into structure field */ if(name) { proc->name = kstrdup(name); if(!proc->name) goto error; } /* if arguments passed to process - make a copy */ if(args) { proc->args = kstrdup(args); if(!proc->args) goto error; } /* init other fields */ spin_init(&proc->lock); proc->state = PROCESS_STATE_BIRTH; /* init lists */ xlist_init(&proc->threads); xlist_init(&proc->children); xlist_init(&proc->semaphores); /* assign process id */ proc->id = get_next_process_id(); proc->gid = proc->id; /* process is a group leader by default */ /* return result to caller */ return proc; error: /* release memory on error */ if(proc->name) kfree(proc->name); if(proc->args) kfree(proc->args); kfree(proc); return NULL; /* failed to create process structure */ }
static int ata_usbchannel_attach(device_t dev) { struct ata_channel *ch = device_get_softc(dev); /* initialize the softc basics */ ch->dev = dev; ch->state = ATA_IDLE; ch->hw.begin_transaction = ata_usbchannel_begin_transaction; ch->hw.end_transaction = ata_usbchannel_end_transaction; ch->hw.status = NULL; ch->hw.command = NULL; spin_init(&ch->state_mtx); spin_init(&ch->queue_mtx); ata_queue_init(ch); /* XXX SOS reset the controller HW, the channel and device(s) */ /* ATA_RESET(dev); */ /* probe and attach device on this channel */ ch->devices = ATA_ATAPI_MASTER; ata_identify(dev); return 0; }
/** * @brief Initialize a message event. * * @param event Event. */ void event_init(Event *event) { int i; event->size = 4; event->first = 0; event->end = 0; event->ring = (char**) malloc(event->size * sizeof (char*)); if (event->ring == NULL) fatal_error("cannot allocate event buffers\n"); for (i = 0; i < event->size; ++i) event->ring[i] = NULL; spin_init(event); lock_init(event); condition_init(event); event->loop = true; }
ACPI_STATUS AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle) { ACPI_SPINLOCK spin; if (OutHandle == NULL) return (AE_BAD_PARAMETER); spin = kmalloc(sizeof(*spin), M_ACPISEM, M_INTWAIT|M_ZERO); spin_init(&spin->lock); #ifdef ACPI_DEBUG_LOCKS spin->owner = NULL; spin->func = ""; spin->line = 0; #endif *OutHandle = spin; return (AE_OK); }
ring_buffer_t * ring_buffer_create(size_t size) { ring_buffer_t * out = malloc(sizeof(ring_buffer_t)); out->buffer = malloc(size); out->write_ptr = 0; out->read_ptr = 0; out->size = size; spin_init(out->lock); out->internal_stop = 0; out->wait_queue_readers = list_create(); out->wait_queue_writers = list_create(); return out; }
/* * Initialise access to PCI configuration space */ int pci_cfgregopen(void) { static int inited = 0; uint64_t pciebar; uint16_t vid, did; if (!inited) { inited = 1; spin_init(&pcicfg_spin); } if (cfgmech != CFGMECH_NONE) return 1; cfgmech = CFGMECH_1; /* * Grope around in the PCI config space to see if this is a * chipset that is capable of doing memory-mapped config cycles. * This also implies that it can do PCIe extended config cycles. */ /* Check for supported chipsets */ vid = pci_cfgregread(0, 0, 0, PCIR_VENDOR, 2); did = pci_cfgregread(0, 0, 0, PCIR_DEVICE, 2); switch (vid) { case 0x8086: switch (did) { case 0x3590: case 0x3592: /* Intel 7520 or 7320 */ pciebar = pci_cfgregread(0, 0, 0, 0xce, 2) << 16; pcie_cfgregopen(pciebar, 0, 255); break; case 0x2580: case 0x2584: case 0x2590: /* Intel 915, 925, or 915GM */ pciebar = pci_cfgregread(0, 0, 0, 0x48, 4); pcie_cfgregopen(pciebar, 0, 255); break; } } return 1; }
/* * lwkt_initport_spin() * * Initialize a port for use with descriptors that might be accessed * via multiple LWPs, processes, or threads. Has somewhat more * overhead then thread ports. */ void lwkt_initport_spin(lwkt_port_t port, thread_t td) { int (*dmsgfn)(lwkt_port_t, lwkt_msg_t); if (td == NULL) dmsgfn = lwkt_panic_dropmsg; else dmsgfn = lwkt_spin_dropmsg; _lwkt_initport(port, lwkt_spin_getport, lwkt_spin_putport, lwkt_spin_waitmsg, lwkt_spin_waitport, lwkt_spin_replyport, dmsgfn); spin_init(&port->mpu_spin); port->mpu_td = td; }
/* * Initialize proc0's plimit structure. All later plimit structures * are inherited through fork. */ void plimit_init0(struct plimit *limit) { int i; rlim_t lim; for (i = 0; i < RLIM_NLIMITS; ++i) { limit->pl_rlimit[i].rlim_cur = RLIM_INFINITY; limit->pl_rlimit[i].rlim_max = RLIM_INFINITY; } limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur = maxfiles; limit->pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles; limit->pl_rlimit[RLIMIT_NPROC].rlim_cur = maxproc; limit->pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; lim = ptoa((rlim_t)vmstats.v_free_count); limit->pl_rlimit[RLIMIT_RSS].rlim_max = lim; limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = lim; limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3; limit->p_cpulimit = RLIM_INFINITY; limit->p_refcnt = 1; spin_init(&limit->p_spin); }
/* Note: if compiling with Win32 be sure to use /subsystem:windows and * /entry:mainCRTStartup */ int main(int argc, char *argv[]) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH); glutInitWindowPosition(0,0); glutInitWindowSize(INIT_WIN_W, INIT_WIN_H); disp_win = glutCreateWindow("QuasiPseudoSorta FlightSim"); glutDisplayFunc(display); glutKeyboardFunc(keyfunc); glutMouseFunc(mouse_button); glutMotionFunc(track_motion); glutReshapeFunc(reshape); glutVisibilityFunc(vis); /* don't do idle things if they aren't seen */ set_initial_size(INIT_WIN_W, INIT_WIN_H, INIT_AE_WIN_W, INIT_AE_WIN_H); init_look(); spin_init(); scene_init(); glutSetWindow(disp_win); glutMainLoop(); return 0; /* we'll never get here, of course */ }
static int atausb_attach(device_t dev) { struct atausb_softc *sc = device_get_softc(dev); struct usb_attach_arg *uaa = device_get_ivars(dev); usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; usbd_device_handle udev; usb_device_request_t request; char devinfo[1024], *proto, *subclass; u_int8_t maxlun; int err, i; sc->dev = dev; usbd_devinfo(uaa->device, 0, devinfo); device_set_desc_copy(dev, devinfo); sc->bulkin = sc->bulkout = sc->bulkirq = -1; sc->bulkin_pipe = sc->bulkout_pipe= sc->bulkirq_pipe = NULL; sc->iface = uaa->iface; sc->ifaceno = uaa->ifaceno; sc->maxlun = 0; sc->timeout = 5000; sc->locked_ch = NULL; sc->restart_ch = NULL; spin_init(&sc->locked_mtx); id = usbd_get_interface_descriptor(sc->iface); switch (id->bInterfaceProtocol) { case UIPROTO_MASS_BBB: case UIPROTO_MASS_BBB_OLD: proto = "Bulk-Only"; break; case UIPROTO_MASS_CBI: proto = "CBI"; break; case UIPROTO_MASS_CBI_I: proto = "CBI with CCI"; break; default: proto = "Unknown"; } switch (id->bInterfaceSubClass) { case UISUBCLASS_RBC: subclass = "RBC"; break; case UISUBCLASS_QIC157: case UISUBCLASS_SFF8020I: case UISUBCLASS_SFF8070I: subclass = "ATAPI"; break; case UISUBCLASS_SCSI: subclass = "SCSI"; break; case UISUBCLASS_UFI: subclass = "UFI"; break; default: subclass = "Unknown"; } device_printf(dev, "using %s over %s\n", subclass, proto); if (strcmp(proto, "Bulk-Only") || (strcmp(subclass, "ATAPI") && strcmp(subclass, "SCSI"))) return ENXIO; for (i = 0 ; i < id->bNumEndpoints ; i++) { if (!(ed = usbd_interface2endpoint_descriptor(sc->iface, i))) { device_printf(sc->dev, "could not read endpoint descriptor\n"); return ENXIO; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && (ed->bmAttributes & UE_XFERTYPE) == UE_BULK) { sc->bulkin = ed->bEndpointAddress; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && (ed->bmAttributes & UE_XFERTYPE) == UE_BULK) { sc->bulkout = ed->bEndpointAddress; } if (id->bInterfaceProtocol == UIPROTO_MASS_CBI_I && UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && (ed->bmAttributes & UE_XFERTYPE) == UE_INTERRUPT) { sc->bulkirq = ed->bEndpointAddress; } } /* check whether we found at least the endpoints we need */ if (!sc->bulkin || !sc->bulkout) { device_printf(sc->dev, "needed endpoints not found (%d,%d)\n", sc->bulkin, sc->bulkout); atausb_detach(dev); return ENXIO; } /* open the pipes */ if (usbd_open_pipe(sc->iface, sc->bulkout, USBD_EXCLUSIVE_USE, &sc->bulkout_pipe)) { device_printf(sc->dev, "cannot open bulkout pipe (%d)\n", sc->bulkout); atausb_detach(dev); return ENXIO; } if (usbd_open_pipe(sc->iface, sc->bulkin, USBD_EXCLUSIVE_USE, &sc->bulkin_pipe)) { device_printf(sc->dev, "cannot open bulkin pipe (%d)\n", sc->bulkin); atausb_detach(dev); return ENXIO; } if (id->bInterfaceProtocol == UIPROTO_MASS_CBI_I) { if (usbd_open_pipe(sc->iface, sc->bulkirq, USBD_EXCLUSIVE_USE, &sc->bulkirq_pipe)) { device_printf(sc->dev, "cannot open bulkirq pipe (%d)\n", sc->bulkirq); atausb_detach(dev); return ENXIO; } } sc->state = ATAUSB_S_ATTACH; /* alloc needed number of transfer handles */ for (i = 0; i < ATAUSB_T_MAX; i++) { sc->transfer[i] = usbd_alloc_xfer(uaa->device); if (!sc->transfer[i]) { device_printf(sc->dev, "out of memory\n"); atausb_detach(dev); return ENXIO; } } /* driver is ready to process requests here */ sc->state = ATAUSB_S_IDLE; /* get number of devices so we can add matching channels */ usbd_interface2device_handle(sc->iface, &udev); request.bmRequestType = UT_READ_CLASS_INTERFACE; request.bRequest = 0xfe; /* GET_MAX_LUN; */ USETW(request.wValue, 0); USETW(request.wIndex, sc->ifaceno); USETW(request.wLength, sizeof(maxlun)); switch ((err = usbd_do_request(udev, &request, &maxlun))) { case USBD_NORMAL_COMPLETION: if (bootverbose) device_printf(sc->dev, "maxlun=%d\n", maxlun); sc->maxlun = maxlun; break; default: if (bootverbose) device_printf(sc->dev, "get maxlun not supported %s\n", usbd_errstr(err)); } /* ata channels are children to this USB control device */ for (i = 0; i <= sc->maxlun; i++) { /* XXX TGEN devclass_find_free_unit() implementation */ int freeunit = 2; while (freeunit < devclass_get_maxunit(ata_devclass) && devclass_get_device(ata_devclass, freeunit) != NULL) freeunit++; if (!device_add_child(sc->dev, "ata", freeunit)) { device_printf(sc->dev, "failed to attach ata child device\n"); atausb_detach(dev); return ENXIO; } } bus_generic_attach(sc->dev); return 0; }
/* * Function name: twa_attach * Description: Allocates pci resources; updates sc; adds a node to the * sysctl tree to expose the driver version; makes calls * (to the Common Layer) to initialize ctlr, and to * attach to CAM. * * Input: dev -- bus device corresponding to the ctlr * Output: None * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 twa_attach(device_t dev) { struct twa_softc *sc = device_get_softc(dev); TW_INT32 bar_num; TW_INT32 bar0_offset; TW_INT32 bar_size; TW_INT32 irq_flags; TW_INT32 error; sc->ctlr_handle.osl_ctlr_ctxt = sc; /* Initialize the softc structure. */ sc->bus_dev = dev; tw_osli_dbg_dprintf(3, sc, "entered"); sc->device_id = pci_get_device(dev); /* Initialize the mutexes right here. */ sc->io_lock = &(sc->io_lock_handle); spin_init(sc->io_lock, "twa_iolock"); sc->q_lock = &(sc->q_lock_handle); spin_init(sc->q_lock, "twa_qlock"); sc->sim_lock = &(sc->sim_lock_handle); lockinit(sc->sim_lock, "tw_osl_sim_lock", 0, LK_CANRECURSE); SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "driver_version", CTLFLAG_RD, TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version"); /* Force the busmaster enable bit on, in case the BIOS forgot. */ pci_enable_busmaster(dev); /* Allocate the PCI register window. */ if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM, &bar_num, &bar0_offset, &bar_size))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x201F, "Can't get PCI BAR info", error); tw_osli_free_resources(sc); return(error); } sc->reg_res_id = PCIR_BARS + bar0_offset; if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2002, "Can't allocate register window", ENXIO); tw_osli_free_resources(sc); return(ENXIO); } sc->bus_tag = rman_get_bustag(sc->reg_res); sc->bus_handle = rman_get_bushandle(sc->reg_res); /* Allocate and register our interrupt. */ sc->irq_res_id = 0; sc->irq_type = pci_alloc_1intr(sc->bus_dev, twa_msi_enable, &sc->irq_res_id, &irq_flags); if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ, &(sc->irq_res_id), 0, ~0, 1, irq_flags)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2003, "Can't allocate interrupt", ENXIO); tw_osli_free_resources(sc); return(ENXIO); } if ((error = twa_setup_intr(sc))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2004, "Can't set up interrupt", error); tw_osli_free_resources(sc); return(error); } if ((error = tw_osli_alloc_mem(sc))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2005, "Memory allocation failure", error); tw_osli_free_resources(sc); return(error); } /* Initialize the Common Layer for this controller. */ if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS, sc->non_dma_mem, sc->dma_mem, sc->dma_mem_phys ))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2006, "Failed to initialize Common Layer/controller", error); tw_osli_free_resources(sc); return(error); } /* Create the control device. */ sc->ctrl_dev = make_dev(&twa_ops, device_get_unit(sc->bus_dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "twa%d", device_get_unit(sc->bus_dev)); sc->ctrl_dev->si_drv1 = sc; if ((error = tw_osli_cam_attach(sc))) { tw_osli_free_resources(sc); tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2007, "Failed to initialize CAM", error); return(error); } sc->watchdog_index = 0; callout_init_mp(&(sc->watchdog_callout[0])); callout_init_mp(&(sc->watchdog_callout[1])); callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle); return(0); }