Exemple #1
0
static void
icap_ebus_attach(device_t parent, device_t self, void *aux)
{
	struct icap_softc *sc = device_private(self);
	struct ebus_attach_args *ia =aux;

	DEBUG_PRINT(("icap_attach %p\n", sc), DEBUG_PROBE);

	sc->sc_dev = self;
	sc->sc_dp = (struct _Icap*)ia->ia_vaddr;
	bufq_alloc(&sc->sc_buflist, "fcfs", 0);
	sc->sc_bp = NULL;
	sc->sc_data = NULL;
	sc->sc_count = 0;

#if DEBUG
    printf(" virt=%p", (void*)sc->sc_dp);
#endif
	printf(": %s\n", "Internal Configuration Access Port");

	ebus_intr_establish(parent, (void*)ia->ia_cookie, IPL_BIO,
	    icap_ebus_intr, sc);

	icap_reset(sc);
}
Exemple #2
0
void
mtattach(device_t parent, device_t self, void *aux)
{
	struct mt_softc *sc = device_private(self);
	struct cs80bus_attach_args *ca = aux;
	int type;

	sc->sc_ic = ca->ca_ic;
	sc->sc_slave = ca->ca_slave;

	if ((type = mtlookup(ca->ca_id, ca->ca_slave, ca->ca_punit)) < 0)
		return;

	printf(": %s tape\n", mtinfo[type].desc);

	sc->sc_type = type;
	sc->sc_flags = MTF_EXISTS;

	bufq_alloc(&sc->sc_tab, "fcfs", 0);
	callout_init(&sc->sc_start_ch, 0);
	callout_init(&sc->sc_intr_ch, 0);

	if (gpibregister(sc->sc_ic, sc->sc_slave, mtcallback, sc,
	    &sc->sc_hdl)) {
		aprint_error_dev(sc->sc_dev, "can't register callback\n");
		return;
	}
}
Exemple #3
0
static void
ctattach(device_t parent, device_t self, void *aux)
{
	struct ct_softc *sc = device_private(self);
	struct hpibbus_attach_args *ha = aux;

	sc->sc_dev = self;
	if (ctident(parent, sc, ha) == 0) {
		aprint_error(": didn't respond to describe command!\n");
		return;
	}

	sc->sc_slave = ha->ha_slave;
	sc->sc_punit = ha->ha_punit;

	bufq_alloc(&sc->sc_tab, "fcfs", 0);

	/* Initialize hpib job queue entry. */
	sc->sc_hq.hq_softc = sc;
	sc->sc_hq.hq_slave = sc->sc_slave;
	sc->sc_hq.hq_start = ctstart;
	sc->sc_hq.hq_go = ctgo;
	sc->sc_hq.hq_intr = ctintr;

	ctreset(sc);
	sc->sc_flags |= CTF_ALIVE;
}
Exemple #4
0
static void
mtattach(device_t parent, device_t self, void *aux)
{
	struct mt_softc *sc = device_private(self);
	struct hpibbus_attach_args *ha = aux;
	int unit, hpibno, slave;

	sc->sc_dev = self;
	if (mtident(sc, ha) == 0) {
		aprint_error(": impossible!\n");
		return;
	}

	unit = device_unit(self);
	hpibno = device_unit(parent);
	slave = ha->ha_slave;

	bufq_alloc(&sc->sc_tab, "fcfs", 0);
	callout_init(&sc->sc_start_ch, 0);
	callout_init(&sc->sc_intr_ch, 0);

	sc->sc_hpibno = hpibno;
	sc->sc_slave = slave;
	sc->sc_flags = MTF_EXISTS;

	/* Initialize hpib job queue entry. */
	sc->sc_hq.hq_softc = sc;
	sc->sc_hq.hq_slave = sc->sc_slave;
	sc->sc_hq.hq_start = mtstart;
	sc->sc_hq.hq_go = mtgo;
	sc->sc_hq.hq_intr = mtintr;
}
Exemple #5
0
static void
md_attach(device_t parent, device_t self,
          void *aux)
{
    struct md_softc *sc = device_private(self);

    bufq_alloc(&sc->sc_buflist, "fcfs", 0);

    /* XXX - Could accept aux info here to set the config. */
#ifdef	MEMORY_DISK_HOOKS
    /*
     * This external function might setup a pre-loaded disk.
     * All it would need to do is setup the md_conf struct.
     * See sys/dev/md_root.c for an example.
     */
    md_attach_hook(device_unit(self), &sc->sc_md);
#endif

    /*
     * Initialize and attach the disk structure.
     */
    disk_init(&sc->sc_dkdev, device_xname(self), &mddkdriver);
    disk_attach(&sc->sc_dkdev);

    if (!pmf_device_register(self, NULL, NULL))
        aprint_error_dev(self, "couldn't establish power handler\n");
}
Exemple #6
0
static void
ed_mca_attach(device_t parent, device_t self, void *aux)
{
    struct ed_softc *ed = device_private(self);
    struct edc_mca_softc *sc = device_private(parent);
    struct ed_attach_args *eda = aux;
    char pbuf[8];
    int drv_flags;

    ed->sc_dev = self;
    ed->edc_softc = sc;
    ed->sc_devno  = eda->edc_drive;
    edc_add_disk(sc, ed);

    bufq_alloc(&ed->sc_q, "disksort", BUFQ_SORT_RAWBLOCK);
    mutex_init(&ed->sc_q_lock, MUTEX_DEFAULT, IPL_VM);

    if (ed_get_params(ed, &drv_flags)) {
        printf(": IDENTIFY failed, no disk found\n");
        return;
    }

    format_bytes(pbuf, sizeof(pbuf),
                 (u_int64_t) ed->sc_capacity * DEV_BSIZE);
    printf(": %s, %u cyl, %u head, %u sec, 512 bytes/sect x %u sectors\n",
           pbuf,
           ed->cyl, ed->heads, ed->sectors,
           ed->sc_capacity);

    printf("%s: %u spares/cyl, %s, %s, %s, %s, %s\n",
           device_xname(ed->sc_dev), ed->spares,
           (drv_flags & (1 << 0)) ? "NoRetries" : "Retries",
           (drv_flags & (1 << 1)) ? "Removable" : "Fixed",
           (drv_flags & (1 << 2)) ? "SkewedFormat" : "NoSkew",
           (drv_flags & (1 << 3)) ? "ZeroDefect" : "Defects",
           (drv_flags & (1 << 4)) ? "InvalidSecondary" : "SecondaryOK"
          );

    /*
     * Initialize and attach the disk structure.
     */
    disk_init(&ed->sc_dk, device_xname(ed->sc_dev), &eddkdriver);
    disk_attach(&ed->sc_dk);
    rnd_attach_source(&ed->rnd_source, device_xname(ed->sc_dev),
                      RND_TYPE_DISK, RND_FLAG_DEFAULT);

    ed->sc_flags |= EDF_INIT;

    /*
     * XXX We should try to discovery wedges here, but
     * XXX that would mean being able to do I/O.  Should
     * XXX use config_defer() here.
     */
}
Exemple #7
0
/*
 * hdc_attach() probes for all possible devices
 */
void 
hdcattach(device_t parent, device_t self, void *aux)
{
	struct vsbus_attach_args * const va = aux;
	struct hdcsoftc * const sc = device_private(self);
	struct hdc_attach_args ha;
	int status, i;

	aprint_normal("\n");

	sc->sc_dev = self;

	/*
	 * Get interrupt vector, enable instrumentation.
	 */
	scb_vecalloc(va->va_cvec, hdcintr, sc, SCB_ISTACK, &sc->sc_intrcnt);
	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
	    device_xname(self), "intr");

	sc->sc_regs = vax_map_physmem(va->va_paddr, 1);
	sc->sc_dmabase = (void *)va->va_dmaaddr;
	sc->sc_dmasize = va->va_dmasize;
	sc->sc_intbit = va->va_maskno;
	rd_dmasize = min(MAXPHYS, sc->sc_dmasize); /* Used in rd_minphys */

	sc->sc_vd.vd_go = hdc_qstart;
	sc->sc_vd.vd_arg = sc;
	/*
	 * Reset controller.
	 */
	HDC_WCMD(DKC_CMD_RESET);
	DELAY(1000);
	status = HDC_RSTAT;
	if (status != (DKC_ST_DONE|DKC_TC_SUCCESS)) {
		aprint_error_dev(self, "RESET failed,  status 0x%x\n", status);
		return;
	}
	bufq_alloc(&sc->sc_q, "disksort", BUFQ_SORT_CYLINDER);

	/*
	 * now probe for all possible hard drives
	 */
	for (i = 0; i < 4; i++) {
		if (i == 2) /* Floppy, needs special handling */
			continue;
		HDC_WCMD(DKC_CMD_DRSELECT | i);
		DELAY(1000);
		status = HDC_RSTAT;
		ha.ha_drive = i;
		if ((status & DKC_ST_TERMCOD) == DKC_TC_SUCCESS)
			config_found(self, (void *)&ha, hdcprint);
	}
}
Exemple #8
0
void
mcdattach(struct device *parent, struct device *self, void *aux)
{
	struct mcd_softc *sc = (void *)self;
	struct isa_attach_args *ia = aux;
	bus_space_tag_t iot = ia->ia_iot;
	bus_space_handle_t ioh;
	struct mcd_mbox mbx;

	/* Map i/o space */
	if (bus_space_map(iot, ia->ia_io[0].ir_addr, MCD_NPORT, 0, &ioh)) {
		printf(": can't map i/o space\n");
		return;
	}

	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);

	sc->sc_iot = iot;
	sc->sc_ioh = ioh;

	sc->probe = 0;
	sc->debug = 0;

	if (!mcd_find(iot, ioh, sc)) {
		printf(": mcd_find failed\n");
		return;
	}

	bufq_alloc(&sc->buf_queue, "disksort", BUFQ_SORT_RAWBLOCK);
	callout_init(&sc->sc_pintr_ch, 0);

	/*
	 * Initialize and attach the disk structure.
	 */
	disk_init(&sc->sc_dk, device_xname(&sc->sc_dev), &mcddkdriver);
	disk_attach(&sc->sc_dk);

	printf(": model %s\n", sc->type != 0 ? sc->type : "unknown");

	(void) mcd_setlock(sc, MCD_LK_UNLOCK);

	mbx.cmd.opcode = MCD_CMDCONFIGDRIVE;
	mbx.cmd.length = sizeof(mbx.cmd.data.config) - 1;
	mbx.cmd.data.config.subcommand = MCD_CF_IRQENABLE;
	mbx.cmd.data.config.data1 = 0x01;
	mbx.res.length = 0;
	(void) mcd_send(sc, &mbx, 0);

	mcd_soft_reset(sc);

	sc->sc_ih = isa_intr_establish(ia->ia_ic, ia->ia_irq[0].ir_irq,
	    IST_EDGE, IPL_BIO, mcdintr, sc);
}
Exemple #9
0
/*
 * Controller is working, and drive responded.  Attach it.
 */
void
fdattach(device_t parent, device_t self, void *aux)
{
	struct fdc_softc *fdc = device_private(parent);
	struct fd_softc *fd = device_private(self);
	struct fdc_attach_args *fa = aux;
	const struct fd_type *type = fa->fa_deftype;
	int drive = fa->fa_drive;

	fd->sc_dev = self;

	callout_init(&fd->sc_motoron_ch, 0);
	callout_init(&fd->sc_motoroff_ch, 0);

	/* XXX Allow `flags' to override device type? */

	if (type)
		aprint_normal(": %s, %d cyl, %d head, %d sec\n", type->name,
		    type->cyls, type->heads, type->sectrac);
	else
		aprint_normal(": density unknown\n");

	bufq_alloc(&fd->sc_q, "disksort", BUFQ_SORT_CYLINDER);
	fd->sc_cylin = -1;
	fd->sc_drive = drive;
	fd->sc_deftype = type;
	fdc->sc_fd[drive] = fd;

	/*
	 * Initialize and attach the disk structure.
	 */
	disk_init(&fd->sc_dk, device_xname(fd->sc_dev), &fddkdriver);
	disk_attach(&fd->sc_dk);

	/*
	 * Establish a mountroot hook.
	 */
	fd->sc_roothook =
	    mountroothook_establish(fd_mountroot_hook, fd->sc_dev);

#if NRND > 0
	rnd_attach_source(&fd->rnd_source, device_xname(fd->sc_dev),
			  RND_TYPE_DISK, 0);
#endif

	fd_set_properties(fd);

	if (!pmf_device_register(self, NULL, NULL))
		aprint_error_dev(self, "cannot set power mgmt handler\n");
}
static void
vnd_attach(device_t parent, device_t self, void *aux)
{
	struct vnd_softc *sc = device_private(self);

	sc->sc_dev = self;
	sc->sc_comp_offsets = NULL;
	sc->sc_comp_buff = NULL;
	sc->sc_comp_decombuf = NULL;
	bufq_alloc(&sc->sc_tab, "disksort", BUFQ_SORT_RAWBLOCK);
	disk_init(&sc->sc_dkdev, device_xname(self), &vnddkdriver);
	if (!pmf_device_register(self, NULL, NULL))
		aprint_error_dev(self, "couldn't establish power handler\n");
}
static void
rdattach(device_t parent, device_t self, void *aux)
{
	struct rd_softc *sc = device_private(self);
	struct hpibbus_attach_args *ha = aux;

	sc->sc_dev = self;
	bufq_alloc(&sc->sc_tab, "disksort", BUFQ_SORT_RAWBLOCK);

	if (rdident(parent, sc, ha) == 0) {
		aprint_error(": didn't respond to describe command!\n");
		return;
	}

	/*
	 * Initialize and attach the disk structure.
	 */
	memset(&sc->sc_dkdev, 0, sizeof(sc->sc_dkdev));
	disk_init(&sc->sc_dkdev, device_xname(sc->sc_dev), NULL);
	disk_attach(&sc->sc_dkdev);

	sc->sc_slave = ha->ha_slave;
	sc->sc_punit = ha->ha_punit;

	callout_init(&sc->sc_restart_ch, 0);

	/* Initialize the hpib job queue entry */
	sc->sc_hq.hq_softc = sc;
	sc->sc_hq.hq_slave = sc->sc_slave;
	sc->sc_hq.hq_start = rdstart;
	sc->sc_hq.hq_go = rdgo;
	sc->sc_hq.hq_intr = rdintr;

	sc->sc_flags = RDF_ALIVE;
#ifdef DEBUG
	/* always report errors */
	if (rddebug & RDB_ERROR)
		rderrthresh = 0;
#endif
	/*
	 * attach the device into the random source list
	 */
	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
	    RND_TYPE_DISK, RND_FLAG_DEFAULT);
}
Exemple #12
0
int
mfs_mountroot(void)
{
	struct fs *fs;
	struct mount *mp;
	struct lwp *l = curlwp;		/* XXX */
	struct ufsmount *ump;
	struct mfsnode *mfsp;
	int error = 0;

	if ((error = vfs_rootmountalloc(MOUNT_MFS, "mfs_root", &mp))) {
		vrele(rootvp);
		return (error);
	}

	mfsp = kmem_alloc(sizeof(*mfsp), KM_SLEEP);
	rootvp->v_data = mfsp;
	rootvp->v_op = mfs_vnodeop_p;
	rootvp->v_tag = VT_MFS;
	mfsp->mfs_baseoff = mfs_rootbase;
	mfsp->mfs_size = mfs_rootsize;
	mfsp->mfs_vnode = rootvp;
	mfsp->mfs_proc = NULL;		/* indicate kernel space */
	mfsp->mfs_shutdown = 0;
	cv_init(&mfsp->mfs_cv, "mfs");
	mfsp->mfs_refcnt = 1;
	bufq_alloc(&mfsp->mfs_buflist, "fcfs", 0);
	if ((error = ffs_mountfs(rootvp, mp, l)) != 0) {
		vfs_unbusy(mp, false, NULL);
		bufq_free(mfsp->mfs_buflist);
		vfs_destroy(mp);
		kmem_free(mfsp, sizeof(*mfsp));
		return (error);
	}
	mutex_enter(&mountlist_lock);
	CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
	mutex_exit(&mountlist_lock);
	mp->mnt_vnodecovered = NULLVP;
	ump = VFSTOUFS(mp);
	fs = ump->um_fs;
	(void) copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
	(void)ffs_statvfs(mp, &mp->mnt_stat);
	vfs_unbusy(mp, false, NULL);
	return (0);
}
void
rlcattach(device_t parent, device_t self, void *aux)
{
	struct rlc_softc *sc = device_private(self);
	struct uba_attach_args *ua = aux;
	struct rlc_attach_args ra;
	int i, error;

	sc->sc_dev = self;
	sc->sc_uh = device_private(parent);
	sc->sc_iot = ua->ua_iot;
	sc->sc_ioh = ua->ua_ioh;
	sc->sc_dmat = ua->ua_dmat;
	uba_intr_establish(ua->ua_icookie, ua->ua_cvec,
		rlcintr, sc, &sc->sc_intrcnt);
	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
		device_xname(sc->sc_dev), "intr");
	uba_reset_establish(rlcreset, self);

	printf("\n");

	/*
	 * The RL11 can only have one transfer going at a time,
	 * and max transfer size is one track, so only one dmamap
	 * is needed.
	 */
	error = bus_dmamap_create(sc->sc_dmat, MAXRLXFER, 1, MAXRLXFER, 0,
	    BUS_DMA_ALLOCNOW, &sc->sc_dmam);
	if (error) {
		aprint_error(": Failed to allocate DMA map, error %d\n", error);
		return;
	}
	bufq_alloc(&sc->sc_q, "disksort", BUFQ_SORT_CYLINDER);
	for (i = 0; i < RL_MAXDPC; i++) {
		waitcrdy(sc);
		RL_WREG(RL_DA, RLDA_GS|RLDA_RST);
		RL_WREG(RL_CS, RLCS_GS|(i << RLCS_USHFT));
		waitcrdy(sc);
		ra.type = RL_RREG(RL_MP);
		ra.hwid = i;
		if ((RL_RREG(RL_CS) & RLCS_ERR) == 0)
			config_found(sc->sc_dev, &ra, rlcprint);
	}
}
Exemple #14
0
/*
 * The routine called by the low level scsi routine when it discovers
 * A device suitable for this driver
 * If it is a know special, call special attach routine to install
 * special handlers into the ss_softc structure
 */
static void
ssattach(struct device *parent, struct device *self, void *aux)
{
	struct ss_softc *ss = device_private(self);
	struct scsipibus_attach_args *sa = aux;
	struct scsipi_periph *periph = sa->sa_periph;

	SC_DEBUG(periph, SCSIPI_DB2, ("ssattach: "));

	ss->flags |= SSF_AUTOCONF;

	/*
	 * Store information needed to contact our base driver
	 */
	ss->sc_periph = periph;
	periph->periph_dev = &ss->sc_dev;
	periph->periph_switch = &ss_switch;

	printf("\n");

	/*
	 * Set up the buf queue for this device
	 */
	bufq_alloc(&ss->buf_queue, "fcfs", 0);

	callout_init(&ss->sc_callout, 0);

	/*
	 * look for non-standard scanners with help of the quirk table
	 * and install functions for special handling
	 */
	SC_DEBUG(periph, SCSIPI_DB2, ("ssattach:\n"));
	if (memcmp(sa->sa_inqbuf.vendor, "MUSTEK", 6) == 0)
		mustek_attach(ss, sa);
	if (memcmp(sa->sa_inqbuf.vendor, "HP      ", 8) == 0 &&
	    memcmp(sa->sa_inqbuf.product, "ScanJet 5300C", 13) != 0)
		scanjet_attach(ss, sa);
	if (ss->special == NULL) {
		/* XXX add code to restart a SCSI2 scanner, if any */
	}

	ss->flags &= ~SSF_AUTOCONF;
}
Exemple #15
0
static void
fss_attach(device_t parent, device_t self, void *aux)
{
	struct fss_softc *sc = device_private(self);

	sc->sc_dev = self;
	sc->sc_bdev = NODEV;
	mutex_init(&sc->sc_slock, MUTEX_DEFAULT, IPL_NONE);
	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
	cv_init(&sc->sc_work_cv, "fssbs");
	cv_init(&sc->sc_cache_cv, "cowwait");
	bufq_alloc(&sc->sc_bufq, "fcfs", 0);
	sc->sc_dkdev = malloc(sizeof(*sc->sc_dkdev), M_DEVBUF, M_WAITOK);
	sc->sc_dkdev->dk_info = NULL;
	disk_init(sc->sc_dkdev, device_xname(self), NULL);
	if (!pmf_device_register(self, NULL, NULL))
		aprint_error_dev(self, "couldn't establish power handler\n");

	if (fss_num_attached++ == 0)
		vfs_hooks_attach(&fss_vfs_hooks);
}
Exemple #16
0
/* ARGSUSED */
int
mfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
{
	struct lwp *l = curlwp;
	struct vnode *devvp;
	struct mfs_args *args = data;
	struct ufsmount *ump;
	struct fs *fs;
	struct mfsnode *mfsp;
	struct proc *p;
	int flags, error = 0;

	if (*data_len < sizeof *args)
		return EINVAL;

	p = l->l_proc;
	if (mp->mnt_flag & MNT_GETARGS) {
		struct vnode *vp;

		ump = VFSTOUFS(mp);
		if (ump == NULL)
			return EIO;

		vp = ump->um_devvp;
		if (vp == NULL)
			return EIO;

		mfsp = VTOMFS(vp);
		if (mfsp == NULL)
			return EIO;

		args->fspec = NULL;
		args->base = mfsp->mfs_baseoff;
		args->size = mfsp->mfs_size;
		*data_len = sizeof *args;
		return 0;
	}
	/*
	 * XXX turn off async to avoid hangs when writing lots of data.
	 * the problem is that MFS needs to allocate pages to clean pages,
	 * so if we wait until the last minute to clean pages then there
	 * may not be any pages available to do the cleaning.
	 * ... and since the default partially-synchronous mode turns out
	 * to not be sufficient under heavy load, make it full synchronous.
	 */
	mp->mnt_flag &= ~MNT_ASYNC;
	mp->mnt_flag |= MNT_SYNCHRONOUS;

	/*
	 * If updating, check whether changing from read-only to
	 * read/write; if there is no device name, that's all we do.
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		ump = VFSTOUFS(mp);
		fs = ump->um_fs;
		if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
			flags = WRITECLOSE;
			if (mp->mnt_flag & MNT_FORCE)
				flags |= FORCECLOSE;
			error = ffs_flushfiles(mp, flags, l);
			if (error)
				return (error);
		}
		if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR))
			fs->fs_ronly = 0;
		if (args->fspec == NULL)
			return EINVAL;
		return (0);
	}
	error = getnewvnode(VT_MFS, NULL, mfs_vnodeop_p, NULL, &devvp);
	if (error)
		return (error);
	devvp->v_vflag |= VV_MPSAFE;
	devvp->v_type = VBLK;
	spec_node_init(devvp, makedev(255, mfs_minor));
	mfs_minor++;
	mfsp = kmem_alloc(sizeof(*mfsp), KM_SLEEP);
	devvp->v_data = mfsp;
	mfsp->mfs_baseoff = args->base;
	mfsp->mfs_size = args->size;
	mfsp->mfs_vnode = devvp;
	mfsp->mfs_proc = p;
	mfsp->mfs_shutdown = 0;
	cv_init(&mfsp->mfs_cv, "mfsidl");
	mfsp->mfs_refcnt = 1;
	bufq_alloc(&mfsp->mfs_buflist, "fcfs", 0);
	if ((error = ffs_mountfs(devvp, mp, l)) != 0) {
		mfsp->mfs_shutdown = 1;
		vrele(devvp);
		return (error);
	}
	ump = VFSTOUFS(mp);
	fs = ump->um_fs;
	error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
	    UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
	if (error)
		return error;
	(void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
		sizeof(fs->fs_fsmnt));
	fs->fs_fsmnt[sizeof(fs->fs_fsmnt) - 1] = '\0';
	/* XXX: cleanup on error */
	return 0;
}
void
tsattach(device_t parent, device_t self, void *aux)
{
	struct ts_softc *sc = device_private(self);
	struct uba_attach_args *ua = aux;
	int error;
	const char *t;

	sc->sc_dev = self;
	sc->sc_uh = device_private(parent);
	sc->sc_iot = ua->ua_iot;
	sc->sc_ioh = ua->ua_ioh;
	sc->sc_dmat = ua->ua_dmat;

	sc->sc_uu.uu_dev = self;
	sc->sc_uu.uu_ready = tsready;

	tsinit(sc);	/* reset and map */

	error = bus_dmamap_create(sc->sc_dmat, (64*1024), 16, (64*1024),
	    0, BUS_DMA_NOWAIT, &sc->sc_dmam);
	if (error) {
		aprint_error(": failed create DMA map %d\n", error);
		return;
	}

	bufq_alloc(&sc->sc_bufq, "fcfs", 0);

	/*
	 * write the characteristics (again)
	 */
	sc->sc_state = TS_INIT;		/* tsintr() checks this ... */
	tswchar(sc);
	if (tsleep(sc, PRIBIO, "tsattach", 100)) {
		aprint_error(": failed SET CHARACTERISTICS\n");
		return;
	}

	sc->sc_state = TS_RUNNING;
	if (sc->sc_uh->uh_type == UBA_UBA) {
		if (sc->sc_vts->status.xst2 & TS_SF_TU80) {
			sc->sc_type = TYPE_TU80;
			t = "TU80";
		} else {
			sc->sc_type = TYPE_TS11;
			t = "TS11";
		}
	} else {
		sc->sc_type = TYPE_TS05;
		t = "TS05";
	}

	aprint_normal(": %s\n", t);
	aprint_normal_dev(sc->sc_dev, 
	    "rev %d, extended features %s, transport %s\n",
	    (sc->sc_vts->status.xst2 & TS_SF_MCRL) >> 2,
	    (sc->sc_vts->status.xst2 & TS_SF_EFES ? "enabled" : "disabled"),
	    (TS_RCSR(TSSR) & TS_OFL ? "offline" : "online"));

	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, tsintr,
	    sc, &sc->sc_intrcnt);
	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
	    device_xname(sc->sc_dev), "intr");
}
static void
udf_discstrat_init_seq(struct udf_strat_args *args)
{
	struct udf_mount *ump = args->ump;
	struct strat_private *priv = PRIV(ump);
	struct disk_strategy dkstrat;
	uint32_t lb_size;

	KASSERT(ump);
	KASSERT(ump->logical_vol);
	KASSERT(priv == NULL);

	lb_size = udf_rw32(ump->logical_vol->lb_size);
	KASSERT(lb_size > 0);

	/* initialise our memory space */
	ump->strategy_private = malloc(sizeof(struct strat_private),
		M_UDFTEMP, M_WAITOK);
	priv = ump->strategy_private;
	memset(priv, 0 , sizeof(struct strat_private));

	/* initialise locks */
	cv_init(&priv->discstrat_cv, "udfstrat");
	mutex_init(&priv->discstrat_mutex, MUTEX_DEFAULT, IPL_NONE);

	/*
	 * Initialise pool for descriptors associated with nodes. This is done
	 * in lb_size units though currently lb_size is dictated to be
	 * sector_size.
	 */
	pool_init(&priv->desc_pool, lb_size, 0, 0, 0, "udf_desc_pool", NULL,
	    IPL_NONE);

	/*
	 * remember old device strategy method and explicit set method
	 * `discsort' since we have our own more complex strategy that is not
	 * implementable on the CD device and other strategies will get in the
	 * way.
	 */
	memset(&priv->old_strategy_setting, 0,
		sizeof(struct disk_strategy));
	VOP_IOCTL(ump->devvp, DIOCGSTRATEGY, &priv->old_strategy_setting,
		FREAD | FKIOCTL, NOCRED);
	memset(&dkstrat, 0, sizeof(struct disk_strategy));
	strcpy(dkstrat.dks_name, "discsort");
	VOP_IOCTL(ump->devvp, DIOCSSTRATEGY, &dkstrat, FWRITE | FKIOCTL,
		NOCRED);

	/* initialise our internal sheduler */
	priv->cur_queue = UDF_SHED_READING;
	bufq_alloc(&priv->queues[UDF_SHED_READING], "disksort",
		BUFQ_SORT_RAWBLOCK);
	bufq_alloc(&priv->queues[UDF_SHED_WRITING], "disksort",
		BUFQ_SORT_RAWBLOCK);
	bufq_alloc(&priv->queues[UDF_SHED_SEQWRITING], "fcfs", 0);
	vfs_timestamp(&priv->last_queued[UDF_SHED_READING]);
	vfs_timestamp(&priv->last_queued[UDF_SHED_WRITING]);
	vfs_timestamp(&priv->last_queued[UDF_SHED_SEQWRITING]);

	/* create our disk strategy thread */
	priv->run_thread = 1;
	if (kthread_create(PRI_NONE, 0 /* KTHREAD_MPSAFE*/, NULL /* cpu_info*/,
		udf_discstrat_thread, ump, &priv->queue_lwp,
		"%s", "udf_rw")) {
		panic("fork udf_rw");
	}
}
Exemple #19
0
/* ARGSUSED */
static int
cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
{
	struct	 cgd_ioctl *ci = data;
	struct	 vnode *vp;
	int	 ret;
	size_t	 i;
	size_t	 keybytes;			/* key length in bytes */
	const char *cp;
	struct pathbuf *pb;
	char	 *inbuf;
	struct dk_softc *dksc = &cs->sc_dksc;

	cp = ci->ci_disk;

	ret = pathbuf_copyin(ci->ci_disk, &pb);
	if (ret != 0) {
		return ret;
	}
	ret = dk_lookup(pb, l, &vp);
	pathbuf_destroy(pb);
	if (ret != 0) {
		return ret;
	}

	inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);

	if ((ret = cgdinit(cs, cp, vp, l)) != 0)
		goto bail;

	(void)memset(inbuf, 0, MAX_KEYSIZE);
	ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
	if (ret)
		goto bail;
	cs->sc_cfuncs = cryptfuncs_find(inbuf);
	if (!cs->sc_cfuncs) {
		ret = EINVAL;
		goto bail;
	}

	(void)memset(inbuf, 0, MAX_KEYSIZE);
	ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
	if (ret)
		goto bail;

	for (i = 0; i < __arraycount(encblkno); i++)
		if (strcmp(encblkno[i].n, inbuf) == 0)
			break;

	if (i == __arraycount(encblkno)) {
		ret = EINVAL;
		goto bail;
	}

	keybytes = ci->ci_keylen / 8 + 1;
	if (keybytes > MAX_KEYSIZE) {
		ret = EINVAL;
		goto bail;
	}

	(void)memset(inbuf, 0, MAX_KEYSIZE);
	ret = copyin(ci->ci_key, inbuf, keybytes);
	if (ret)
		goto bail;

	cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
	cs->sc_cdata.cf_mode = encblkno[i].v;
	cs->sc_cdata.cf_keylen = ci->ci_keylen;
	cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
	    &cs->sc_cdata.cf_blocksize);
	if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
	    log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
		cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
	    cs->sc_cdata.cf_priv = NULL;
	}

	/*
	 * The blocksize is supposed to be in bytes. Unfortunately originally
	 * it was expressed in bits. For compatibility we maintain encblkno
	 * and encblkno8.
	 */
	cs->sc_cdata.cf_blocksize /= encblkno[i].d;
	(void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
	if (!cs->sc_cdata.cf_priv) {
		ret = EINVAL;		/* XXX is this the right error? */
		goto bail;
	}
	free(inbuf, M_TEMP);

	bufq_alloc(&dksc->sc_bufq, "fcfs", 0);

	cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
	cs->sc_data_used = 0;

	/* Attach the disk. */
	dk_attach(dksc);
	disk_attach(&dksc->sc_dkdev);

	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);

	/* Discover wedges on this disk. */
	dkwedge_discover(&dksc->sc_dkdev);

	return 0;

bail:
	free(inbuf, M_TEMP);
	(void)vn_close(vp, FREAD|FWRITE, l->l_cred);
	return ret;
}