Ejemplo n.º 1
0
/*
 * Create the OS-specific port helper thread and per-port lock.
 */
void
ahci_os_start_port(struct ahci_port *ap)
{
	char name[16];

	atomic_set_int(&ap->ap_signal, AP_SIGF_INIT | AP_SIGF_THREAD_SYNC);
	lockinit(&ap->ap_lock, "ahcipo", 0, 0);
	lockinit(&ap->ap_sim_lock, "ahcicam", 0, LK_CANRECURSE);
	lockinit(&ap->ap_sig_lock, "ahport", 0, 0);
	sysctl_ctx_init(&ap->sysctl_ctx);
	ksnprintf(name, sizeof(name), "%d", ap->ap_num);
	ap->sysctl_tree = SYSCTL_ADD_NODE(&ap->sysctl_ctx,
				SYSCTL_CHILDREN(ap->ap_sc->sysctl_tree),
				OID_AUTO, name, CTLFLAG_RD, 0, "");

	if ((ap->ap_sc->sc_cap & AHCI_REG_CAP_SALP) &&
	    (ap->ap_sc->sc_cap & (AHCI_REG_CAP_PSC | AHCI_REG_CAP_SSC))) {
		SYSCTL_ADD_PROC(&ap->sysctl_ctx,
			SYSCTL_CHILDREN(ap->sysctl_tree), OID_AUTO,
			"link_pwr_mgmt", CTLTYPE_INT | CTLFLAG_RW, ap, 0,
			ahci_sysctl_link_pwr_mgmt, "I",
			"Link power management policy "
			"(0 = disabled, 1 = medium, 2 = aggressive)");
		SYSCTL_ADD_PROC(&ap->sysctl_ctx,
			SYSCTL_CHILDREN(ap->sysctl_tree), OID_AUTO,
			"link_pwr_state", CTLTYPE_STRING | CTLFLAG_RD, ap, 0,
			ahci_sysctl_link_pwr_state, "A",
			"Link power management state");

	}

	kthread_create(ahci_port_thread, ap, &ap->ap_thread,
		       "%s", PORTNAME(ap));
}
Ejemplo n.º 2
0
/*
 * Create the OS-specific port helper thread and per-port lock.
 */
void
sili_os_start_port(struct sili_port *ap)
{
	atomic_set_int(&ap->ap_signal, AP_SIGF_INIT);
	lockinit(&ap->ap_lock, "silipo", 0, LK_CANRECURSE);
	lockinit(&ap->ap_sim_lock, "silicam", 0, LK_CANRECURSE);
	lockinit(&ap->ap_sig_lock, "siport", 0, 0);
	kthread_create(sili_port_thread, ap, &ap->ap_thread,
		       "%s", PORTNAME(ap));
}
Ejemplo n.º 3
0
int
tas(int *p)
{
	int *hwsem;
	int hash;

retry:
	switch(arch) {
	case 0:
		lockinit();
		goto retry;
	case MAGNUM:
	case MAGNUMII:
		return C_3ktas(p);
	case R4K:
		return C_4ktas(p);
	case POWER:
		/* Use low order lock bits to generate hash */
		hash = ((int)p/sizeof(int)) & (Semperpg-1);
		hwsem = (int*)Lockaddr+hash;

		if((*hwsem & 1) == 0) {
			if(*p)
				*hwsem = 0;
			else {
				*p = 1;
				*hwsem = 0;
				return 0;
			}
		}
		return 1;
	}
	return -1;	/* not reached */
}
Ejemplo n.º 4
0
/*
 * Initialize the GDT subsystem.  Called from autoconf().
 */
void
gdt_init()
{
	size_t max_len, min_len;
	struct vm_page *pg;
	vaddr_t va;
	struct cpu_info *ci = &cpu_info_primary;

	simple_lock_init(&gdt_simplelock);
	lockinit(&gdt_lock_store, PZERO, "gdtlck", 0, 0);

	max_len = MAXGDTSIZ * sizeof(union descriptor);
	min_len = MINGDTSIZ * sizeof(union descriptor);

	gdt_size = MINGDTSIZ;
	gdt_count = NGDT;
	gdt_next = NGDT;
	gdt_free = GNULL_SEL;

	gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
	for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) {
		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
		if (pg == NULL)
			panic("gdt_init: no pages");
		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
		    VM_PROT_READ | VM_PROT_WRITE);
	}
	bcopy(bootstrap_gdt, gdt, NGDT * sizeof(union descriptor));
	ci->ci_gdt = gdt;
	setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
	    SDT_MEMRWA, SEL_KPL, 0, 0);

	gdt_init_cpu(ci);
}
Ejemplo n.º 5
0
/**
 * radeon_irq_kms_init - init driver interrupt info
 *
 * @rdev: radeon device pointer
 *
 * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
 * Returns 0 for success, error for failure.
 */
int radeon_irq_kms_init(struct radeon_device *rdev)
{
	int r = 0;

	TASK_INIT(&rdev->hotplug_work, 0, radeon_hotplug_work_func, rdev);
	TASK_INIT(&rdev->audio_work, 0, r600_audio_update_hdmi, rdev);

	lockinit(&rdev->irq.lock, "drm__radeon_device__irq__lock", 0, LK_CANRECURSE);
	r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
	if (r) {
		return r;
	}
	/* enable msi */
	rdev->msi_enabled = rdev->ddev->msi_enabled;

	rdev->irq.installed = true;
	DRM_UNLOCK(rdev->ddev);
	r = drm_irq_install(rdev->ddev);
	DRM_LOCK(rdev->ddev);
	if (r) {
		rdev->irq.installed = false;
		return r;
	}
	DRM_INFO("radeon: irq initialized.\n");
	return 0;
}
Ejemplo n.º 6
0
static int
amdsmb_attach(device_t dev)
{
	struct amdsmb_softc *amdsmb_sc = device_get_softc(dev);

	/* Allocate I/O space */
	amdsmb_sc->rid = PCIR_BAR(0);

	amdsmb_sc->res = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
		&amdsmb_sc->rid, RF_ACTIVE);

	if (amdsmb_sc->res == NULL) {
		device_printf(dev, "could not map i/o space\n");
		return (ENXIO);
	}

	lockinit(&amdsmb_sc->lock, "amdsmb", 0, LK_CANRECURSE);

	/* Allocate a new smbus device */
	amdsmb_sc->smbus = device_add_child(dev, "smbus", -1);
	if (!amdsmb_sc->smbus) {
		amdsmb_detach(dev);
		return (EINVAL);
	}

	bus_generic_attach(dev);

	return (0);
}
Ejemplo n.º 7
0
/*
 * Initialize inode hash table.
 */
void
ntfs_nthashinit()
{
	lockinit(&ntfs_hashlock, PINOD, "ntfs_nthashlock", 0, 0);
	ntfs_nthashtbl = hashinit(desiredvnodes, M_NTFSNTHASH, &ntfs_nthash);
	mtx_init(&ntfs_nthash_mtx, "ntfs nthash", NULL, MTX_DEF);
}
Ejemplo n.º 8
0
/*------------------------------------------------------------------------*
 *	usb_bus_mem_alloc_all - factored out code
 *
 * Returns:
 *    0: Success
 * Else: Failure
 *------------------------------------------------------------------------*/
uint8_t
usb_bus_mem_alloc_all(struct usb_bus *bus, bus_dma_tag_t dmat,
    usb_bus_mem_cb_t *cb)
{
	bus->alloc_failed = 0;

	lockinit(&bus->bus_lock, "USB bus mem", 0, LK_CANRECURSE);

	usb_callout_init_mtx(&bus->power_wdog,
	    &bus->bus_lock, 0);

	TAILQ_INIT(&bus->intr_q.head);

#if USB_HAVE_BUSDMA
	usb_dma_tag_setup(bus->dma_parent_tag, bus->dma_tags,
	    dmat, &bus->bus_lock, NULL, 32, USB_BUS_DMA_TAG_MAX);
#endif
	if ((bus->devices_max > USB_MAX_DEVICES) ||
	    (bus->devices_max < USB_MIN_DEVICES) ||
	    (bus->devices == NULL)) {
		DPRINTFN(0, "Devices field has not been "
		    "initialised properly\n");
		bus->alloc_failed = 1;		/* failure */
	}
#if USB_HAVE_BUSDMA
	if (cb) {
		cb(bus, &usb_bus_mem_alloc_all_cb);
	}
#endif
	if (bus->alloc_failed) {
		usb_bus_mem_free_all(bus, cb);
	}
	return (bus->alloc_failed);
}
Ejemplo n.º 9
0
/*
 * Initialize hash links for nfsnodes
 * and build nfsnode free list.
 */
void
nfs_nhinit(void)
{
	nfsnode_objcache = objcache_create_simple(M_NFSNODE, sizeof(struct nfsnode));
	nfsnodehashtbl = hashinit(desiredvnodes, M_NFSHASH, &nfsnodehash);
	lockinit(&nfsnhash_lock, "nfsnht", 0, 0);
}
Ejemplo n.º 10
0
/**
 * drm_vma_offset_manager_init - Initialize new offset-manager
 * @mgr: Manager object
 * @page_offset: Offset of available memory area (page-based)
 * @size: Size of available address space range (page-based)
 *
 * Initialize a new offset-manager. The offset and area size available for the
 * manager are given as @page_offset and @size. Both are interpreted as
 * page-numbers, not bytes.
 *
 * Adding/removing nodes from the manager is locked internally and protected
 * against concurrent access. However, node allocation and destruction is left
 * for the caller. While calling into the vma-manager, a given node must
 * always be guaranteed to be referenced.
 */
void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
				 unsigned long page_offset, unsigned long size)
{
	lockinit(&mgr->vm_lock, "drmvml", 0, LK_CANRECURSE);
	mgr->vm_addr_space_rb = RB_ROOT;
	drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
}
Ejemplo n.º 11
0
static void
linker_init(void* arg)
{
    lockinit(&lock, PVM, "klink", 0, 0);
    TAILQ_INIT(&classes);
    TAILQ_INIT(&files);
}
Ejemplo n.º 12
0
static void
ucom_init(void *arg)
{
	DPRINTF("\n");
	kprintf("ucom init\n");
	ucom_unrhdr = new_unrhdr(0, UCOM_UNIT_MAX - 1, NULL);
	lockinit(&ucom_lock, "UCOM LOCK", 0, 0);
}
Ejemplo n.º 13
0
/*
 * Initialize inode hash table.
 */
void
ntfs_nthashinit()
{
	lockinit(&ntfs_hashlock, PINOD, "ntfs_nthashlock", 0, 0);
	ntfs_nthashtbl = HASHINIT(desiredvnodes, M_NTFSNTHASH, M_WAITOK,
	    &ntfs_nthash);
	simple_lock_init(&ntfs_nthash_slock);
}
Ejemplo n.º 14
0
void
osi_AttachVnode(struct vcache *avc, int seq) {
    ReleaseWriteLock(&afs_xvcache);
    AFS_GUNLOCK();
    afs_obsd_getnewvnode(avc);	/* includes one refcount */
    AFS_GLOCK();
    ObtainWriteLock(&afs_xvcache,337);
    lockinit(&avc->rwlock, PINOD, "vcache", 0, 0);
}
Ejemplo n.º 15
0
static int
mount_init(void *mem, int size, int flags)
{
	struct mount *mp;

	mp = (struct mount *)mem;
	mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
	lockinit(&mp->mnt_explock, PVFS, "explock", 0, 0);
	return (0);
}
Ejemplo n.º 16
0
static boolean_t
makepark(void *obj, void *privdata, int flags)
{
	struct puffs_msgpark *park = obj;

	lockinit(&park->park_mtx, "puffs park_mtx", 0, 0);
	cv_init(&park->park_cv, "puffsrpl");

	return TRUE;
}
Ejemplo n.º 17
0
void
ki2c_attach(struct device *parent, struct device *self, void *aux)
{
	struct ki2c_softc *sc = (struct ki2c_softc *)self;
	struct confargs *ca = aux;
	int node = ca->ca_node;
	int rate, count = 0;
	char name[32];

	ca->ca_reg[0] += ca->ca_baseaddr;

	if (OF_getprop(node, "AAPL,i2c-rate", &rate, 4) != 4) {
		printf(": cannot get i2c-rate\n");
		return;
	}
	if (OF_getprop(node, "AAPL,address", &sc->sc_paddr, 4) != 4) {
		printf(": unable to find i2c address\n");
		return;
	}
	if (OF_getprop(node, "AAPL,address-step", &sc->sc_regstep, 4) != 4) {
		printf(": unable to find i2c address step\n");
		return;
	}
	sc->sc_reg = mapiodev(sc->sc_paddr, (DATA+1)*sc->sc_regstep);

	printf("\n");

	ki2c_writereg(sc, STATUS, 0);
	ki2c_writereg(sc, ISR, 0);
	ki2c_writereg(sc, IER, 0);

	ki2c_setmode(sc, I2C_STDSUBMODE, 0);
	ki2c_setspeed(sc, I2C_100kHz);		/* XXX rate */

	lockinit(&sc->sc_buslock, PZERO, sc->sc_dev.dv_xname, 0, 0);
	ki2c_writereg(sc, IER,I2C_INT_DATA|I2C_INT_ADDR|I2C_INT_STOP);

	for (node = OF_child(ca->ca_node); node; node = OF_peer(node)) {
		if (OF_getprop(node, "name", &name, sizeof name) > 0) {
			if (strcmp(name, "i2c-bus") == 0) {
				ki2c_attach_bus(sc, &sc->sc_bus[count], node);
				if (++count >= KI2C_MAX_BUSSES)
					break;
			}
		}
	}

	/* 
	 * If we didn't find any i2c-bus nodes, there is only a single
	 * i2c bus.
	 */

	if (count == 0)
		ki2c_attach_bus(sc, &sc->sc_bus[0], ca->ca_node);
}
Ejemplo n.º 18
0
int
ata_attach(device_t dev)
{
    struct ata_channel *ch = device_get_softc(dev);
    int error, rid;

    /* check that we have a virgin channel to attach */
    if (ch->r_irq)
	return EEXIST;

    /* initialize the softc basics */
    ch->dev = dev;
    ch->state = ATA_IDLE;
    lockinit(&ch->state_mtx, "ataattach_state", 0, 0);
    lockinit(&ch->queue_mtx, "ataattach_queue", 0, 0);
    ata_queue_init(ch);

    /* reset the controller HW, the channel and device(s) */
    while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
	tsleep(&error, 0, "ataatch", 1);
    ATA_RESET(dev);
    ATA_LOCKING(dev, ATA_LF_UNLOCK);

    /* setup interrupt delivery */
    rid = ATA_IRQ_RID;
    ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
				       RF_SHAREABLE | RF_ACTIVE);
    if (!ch->r_irq) {
	device_printf(dev, "unable to allocate interrupt\n");
	return ENXIO;
    }
    if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS,
				(driver_intr_t *)ata_interrupt, ch, &ch->ih,
				NULL))) {
	device_printf(dev, "unable to setup interrupt\n");
	return error;
    }

    /* probe and attach devices on this channel unless we are in early boot */
    ata_identify(dev);
    return 0;
}
Ejemplo n.º 19
0
/*
 * Obtain a new vnode.  The returned vnode is VX locked & vrefd.
 *
 * All new vnodes set the VAGE flags.  An open() of the vnode will
 * decrement the (2-bit) flags.  Vnodes which are opened several times
 * are thus retained in the cache over vnodes which are merely stat()d.
 *
 * We always allocate the vnode.  Attempting to recycle existing vnodes
 * here can lead to numerous deadlocks, particularly with softupdates.
 */
struct vnode *
allocvnode(int lktimeout, int lkflags)
{
	struct vnode *vp;

	/*
	 * Do not flag for synchronous recyclement unless there are enough
	 * freeable vnodes to recycle and the number of vnodes has
	 * significantly exceeded our target.  We want the normal vnlru
	 * process to handle the cleaning (at 9/10's) before we are forced
	 * to flag it here at 11/10's for userexit path processing.
	 */
	if (numvnodes >= maxvnodes * 11 / 10 &&
	    cachedvnodes + inactivevnodes >= maxvnodes * 5 / 10) {
		struct thread *td = curthread;
		if (td->td_lwp)
			atomic_set_int(&td->td_lwp->lwp_mpflags, LWP_MP_VNLRU);
	}

	/*
	 * lktimeout only applies when LK_TIMELOCK is used, and only
	 * the pageout daemon uses it.  The timeout may not be zero
	 * or the pageout daemon can deadlock in low-VM situations.
	 */
	if (lktimeout == 0)
		lktimeout = hz / 10;

	vp = kmalloc(sizeof(*vp), M_VNODE, M_ZERO | M_WAITOK);

	lwkt_token_init(&vp->v_token, "vnode");
	lockinit(&vp->v_lock, "vnode", lktimeout, lkflags);
	TAILQ_INIT(&vp->v_namecache);
	RB_INIT(&vp->v_rbclean_tree);
	RB_INIT(&vp->v_rbdirty_tree);
	RB_INIT(&vp->v_rbhash_tree);
	spin_init(&vp->v_spin, "allocvnode");

	lockmgr(&vp->v_lock, LK_EXCLUSIVE);
	atomic_add_int(&numvnodes, 1);
	vp->v_refcnt = 1;
	vp->v_flag = VAGE0 | VAGE1;
	vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT;

	KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
	/* exclusive lock still held */

	vp->v_filesize = NOOFFSET;
	vp->v_type = VNON;
	vp->v_tag = 0;
	vp->v_state = VS_CACHED;
	_vactivate(vp);

	return (vp);
}
Ejemplo n.º 20
0
/*
 * Initialize per-FS structures supporting extended attributes.  Do not
 * start extended attributes yet.
 */
void
ufs_extattr_uepm_init(struct ufs_extattr_per_mount *uepm)
{

	uepm->uepm_flags = 0;

	LIST_INIT(&uepm->uepm_list);
	/* XXX is PVFS right, here? */
	lockinit(&uepm->uepm_lock, PVFS, "extattr", 0, 0);
	uepm->uepm_flags |= UFS_EXTATTR_UEPM_INITIALIZED;
}
Ejemplo n.º 21
0
void drm_global_init(void)
{
	int i;

	for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
		struct drm_global_item *item = &glob[i];
		lockinit(&item->mutex, "drmgi", 0, LK_CANRECURSE);
		item->object = NULL;
		item->refcount = 0;
	}
}
Ejemplo n.º 22
0
static int
sysvipc_register(cdev_t dev)
{
	struct sysvipc_softc *sysv;
	
	if (dev->si_drv1 != NULL)
		return (EEXIST);

	kprintf("aloc sysv\n");
	dev->si_drv1 = sysv = (struct sysvipc_softc *)
		kmalloc(sizeof(*sysv), M_TEMP,
				M_ZERO | M_WAITOK);
	sysv->sysvipc_dev = dev;
	TAILQ_INIT(&sysv->req_list);
	TAILQ_INIT(&sysv->consumed_list);
	lockinit(&sysv->req_mtx, "sysvlkr", 0, LK_CANRECURSE);
	lockinit(&sysv->consumed_mtx, "sysvlkc", 0, LK_CANRECURSE);
	sysv->sysvipc_daemon_thread = curthread;

	return 0;
}
Ejemplo n.º 23
0
/*
 * Initialize table_head structures, I'm trying to keep this structure as
 * opaque as possible.
 */
void
dm_table_head_init(dm_table_head_t * head)
{
	head->cur_active_table = 0;
	head->io_cnt = 0;

	/* Initialize tables. */
	SLIST_INIT(&head->tables[0]);
	SLIST_INIT(&head->tables[1]);

	lockinit(&head->table_mtx, "dmtbl", 0, LK_CANRECURSE);
}
Ejemplo n.º 24
0
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
			   unsigned long p_size)
{
	struct ttm_range_manager *rman;

	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
	if (!rman)
		return -ENOMEM;

	drm_mm_init(&rman->mm, 0, p_size);
	lockinit(&rman->lock, "ttmrman", 0, LK_CANRECURSE);
	man->priv = rman;
	return 0;
}
Ejemplo n.º 25
0
static int
ipheth_attach(device_t dev)
{
	struct ipheth_softc *sc = device_get_softc(dev);
	struct usb_ether *ue = &sc->sc_ue;
	struct usb_attach_arg *uaa = device_get_ivars(dev);
	int error;

	sc->sc_iface_no = uaa->info.bIfaceIndex;

	device_set_usb_desc(dev);

	lockinit(&sc->sc_lock, device_get_nameunit(dev), 0, LK_CANRECURSE);

	error = usbd_set_alt_interface_index(uaa->device,
	    uaa->info.bIfaceIndex, IPHETH_ALT_INTFNUM);
	if (error) {
		device_printf(dev, "Cannot set alternate setting\n");
		goto detach;
	}
	error = usbd_transfer_setup(uaa->device, &sc->sc_iface_no,
	    sc->sc_xfer, ipheth_config, IPHETH_N_TRANSFER, sc, &sc->sc_lock);
	if (error) {
		device_printf(dev, "Cannot setup USB transfers\n");
		goto detach;
	}
	ue->ue_sc = sc;
	ue->ue_dev = dev;
	ue->ue_udev = uaa->device;
	ue->ue_lock = &sc->sc_lock;
	ue->ue_methods = &ipheth_ue_methods;

	error = ipheth_get_mac_addr(sc);
	if (error) {
		device_printf(dev, "Cannot get MAC address\n");
		goto detach;
	}

	error = uether_ifattach(ue);
	if (error) {
		device_printf(dev, "could not attach interface\n");
		goto detach;
	}
	return (0);			/* success */

detach:
	ipheth_detach(dev);
	return (ENXIO);			/* failure */
}
Ejemplo n.º 26
0
int
nnpfs_getnewvnode(struct nnpfs *nnpfsp, struct vnode **vpp, 
		struct nnpfs_handle *handle)
{
    struct nnpfs_node *result, *check;
    int error;

    error = getnewvnode(VT_NNPFS, NNPFS_TO_VFS(nnpfsp), &nnpfs_vops,  vpp);
    if (error)
	return error;
    
    result = nnpfs_alloc(sizeof(*result), M_NNPFS_NODE);
    bzero(result, sizeof(*result));
    
    (*vpp)->v_data = result;
    result->vn = *vpp;
    
    result->handle = *handle;
    result->flags = 0;
    result->tokens = 0;
    result->offset = 0;
#if defined(HAVE_KERNEL_LOCKMGR) || defined(HAVE_KERNEL_DEBUGLOCKMGR)
    lockinit (&result->lock, PVFS, "nnpfs_lock", 0, LK_NOPAUSE);
#else
    result->vnlocks = 0;
#endif
    result->anonrights = 0;
    result->rd_cred = NULL;
    result->wr_cred = NULL;

#if defined(__NetBSD_Version__) && __NetBSD_Version__ >= 105280000
    genfs_node_init(*vpp, &nnpfs_genfsops);
#endif

    check = nnpfs_node_find(&nnpfsp->nodehead, handle);
    if (check) {
	vput(*vpp);
	*vpp = result->vn;
	return 0;
    }

    nnpfs_insert(&nnpfs->nodehead, result);

    return 0;
}
Ejemplo n.º 27
0
static int
ndisusb_attach(device_t self)
{
	const struct drvdb_ent	*db;
	struct ndisusb_softc *dummy = device_get_softc(self);
	struct usb_attach_arg *uaa = device_get_ivars(self);
	struct ndis_softc	*sc;
	struct ndis_usb_type	*t;
	driver_object		*drv;
	int			devidx = 0;

	device_set_usb_desc(self);
	db = uaa->driver_ivar;
	sc = (struct ndis_softc *)dummy;
	sc->ndis_dev = self;
	lockinit(&sc->ndisusb_lock, "NDIS USB", 0, LK_CANRECURSE);
	sc->ndis_dobj = db->windrv_object;
	sc->ndis_regvals = db->windrv_regvals;
	sc->ndis_iftype = PNPBus;
	sc->ndisusb_dev = uaa->device;

	/* Create PDO for this device instance */

	drv = windrv_lookup(0, "USB Bus");
	windrv_create_pdo(drv, self);

	/* Figure out exactly which device we matched. */

	t = db->windrv_devlist;

	while (t->ndis_name != NULL) {
		if ((uaa->info.idVendor == t->ndis_vid) &&
		    (uaa->info.idProduct == t->ndis_did)) {
			sc->ndis_devidx = devidx;
			break;
		}
		t++;
		devidx++;
	}

	if (ndis_attach(self) != 0)
		return (ENXIO);

	return (0);
}
Ejemplo n.º 28
0
/*
 * Initialize the suspplied thread structure, starting the specified
 * thread.
 */
void
hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp,
		   const char *id, int clindex, int repidx,
		   void (*func)(void *arg))
{
	lockinit(&thr->lk, "h2thr", 0, 0);
	thr->pmp = pmp;
	thr->clindex = clindex;
	thr->repidx = repidx;
	TAILQ_INIT(&thr->xopq);
	if (repidx >= 0) {
		lwkt_create(func, thr, &thr->td, NULL, 0, -1,
			    "%s-%s.%02d", id, pmp->pfs_names[clindex], repidx);
	} else {
		lwkt_create(func, thr, &thr->td, NULL, 0, -1,
			    "%s-%s", id, pmp->pfs_names[clindex]);
	}
}
Ejemplo n.º 29
0
void
lock(Lock *lk)
{
	int *hwsem;
	int hash;

retry:
	switch(arch) {
	case 0:
		lockinit();
		goto retry;
	case MAGNUM:
	case MAGNUMII:
		while(C_3ktas(&lk->val))
			_SLEEP(0);
		return;
	case R4K:
		for(;;){
			while(lk->val)
				;
			if(C_4ktas(&lk->val) == 0)
				return;
		}
		break;
	case POWER:
		/* Use low order lock bits to generate hash */
		hash = ((int)lk/sizeof(int)) & (Semperpg-1);
		hwsem = (int*)Lockaddr+hash;

		for(;;) {
			if((*hwsem & 1) == 0) {
				if(lk->val)
					*hwsem = 0;
				else {
					lk->val = 1;
					*hwsem = 0;
					return;
				}
			}
			while(lk->val)
				;
		}
	}	
}
Ejemplo n.º 30
0
static int
g_modem_attach(device_t dev)
{
	struct g_modem_softc *sc = device_get_softc(dev);
	struct usb_attach_arg *uaa = device_get_ivars(dev);
	int error;
	uint8_t iface_index[2];

	DPRINTFN(11, "\n");

	device_set_usb_desc(dev);

	lockinit(&sc->sc_lock, "g_modem", 0, 0);

	usb_callout_init_mtx(&sc->sc_callout, &sc->sc_lock, 0);
	usb_callout_init_mtx(&sc->sc_watchdog, &sc->sc_lock, 0);

	sc->sc_mode = G_MODEM_MODE_SILENT;

	iface_index[0] = uaa->info.bIfaceIndex;
	iface_index[1] = uaa->info.bIfaceIndex + 1;

	error = usbd_transfer_setup(uaa->device,
	    iface_index, sc->sc_xfer, g_modem_config,
	    G_MODEM_N_TRANSFER, sc, &sc->sc_lock);

	if (error) {
		DPRINTF("error=%s\n", usbd_errstr(error));
		goto detach;
	}
	usbd_set_parent_iface(uaa->device, iface_index[1], iface_index[0]);

	lockmgr(&sc->sc_lock, LK_EXCLUSIVE);
	g_modem_timeout_reset(sc);
	g_modem_watchdog_reset(sc);
	lockmgr(&sc->sc_lock, LK_RELEASE);

	return (0);			/* success */

detach:
	g_modem_detach(dev);

	return (ENXIO);			/* error */
}