/* * Perform a synchronous indirect write of the given block number * on the given device, using the given fbuf. Upon return the fbp * is invalid. */ int fbiwrite(struct fbuf *fbp, vnode_t *devvp, daddr_t bn, int bsize) { struct buf *bp; int error, fberror; /* * Allocate a temp bp using pageio_setup, but then use it * for physio to the area mapped by fbuf which is currently * all locked down in place. * * XXX - need to have a generalized bp header facility * which we build up pageio_setup on top of. Other places * (like here and in device drivers for the raw I/O case) * could then use these new facilities in a more straight * forward fashion instead of playing all these games. */ bp = pageio_setup((struct page *)NULL, fbp->fb_count, devvp, B_WRITE); bp->b_flags &= ~B_PAGEIO; /* XXX */ bp->b_un.b_addr = fbp->fb_addr; bp->b_blkno = bn * btod(bsize); bp->b_dev = cmpdev(devvp->v_rdev); /* store in old dev format */ bp->b_edev = devvp->v_rdev; bp->b_proc = NULL; /* i.e. the kernel */ (void) bdev_strategy(bp); error = biowait(bp); pageio_done(bp); /*CSTYLED*/ FBCOMMON(fbp, S_OTHER, 0, fberror = ) return (error ? error : fberror); }
static void mkprpsinfo32(struct ps_prochandle *P, prpsinfo32_t *psp) { bzero(psp, sizeof (*psp)); psp->pr_state = P->psinfo.pr_lwp.pr_state; psp->pr_sname = P->psinfo.pr_lwp.pr_sname; psp->pr_zomb = (psp->pr_state == SZOMB); psp->pr_nice = P->psinfo.pr_lwp.pr_nice; psp->pr_flag = P->psinfo.pr_lwp.pr_flag; psp->pr_uid = P->psinfo.pr_uid; psp->pr_gid = P->psinfo.pr_gid; psp->pr_pid = P->psinfo.pr_pid; psp->pr_ppid = P->psinfo.pr_ppid; psp->pr_pgrp = P->psinfo.pr_pgid; psp->pr_sid = P->psinfo.pr_sid; psp->pr_addr = (caddr32_t)P->psinfo.pr_addr; psp->pr_size = P->psinfo.pr_size; psp->pr_rssize = P->psinfo.pr_rssize; psp->pr_wchan = (caddr32_t)P->psinfo.pr_lwp.pr_wchan; timestruc_n_to_32(&P->psinfo.pr_start, &psp->pr_start); timestruc_n_to_32(&P->psinfo.pr_time, &psp->pr_time); psp->pr_pri = P->psinfo.pr_lwp.pr_pri; psp->pr_oldpri = P->psinfo.pr_lwp.pr_oldpri; psp->pr_cpu = P->psinfo.pr_lwp.pr_cpu; psp->pr_ottydev = cmpdev(P->psinfo.pr_ttydev); psp->pr_lttydev = prcmpldev(P->psinfo.pr_ttydev); (void) strncpy(psp->pr_clname, P->psinfo.pr_lwp.pr_clname, sizeof (psp->pr_clname)); (void) strncpy(psp->pr_fname, P->psinfo.pr_fname, sizeof (psp->pr_fname)); bcopy(&P->psinfo.pr_psargs, &psp->pr_psargs, sizeof (psp->pr_psargs)); psp->pr_syscall = P->psinfo.pr_lwp.pr_syscall; timestruc_n_to_32(&P->psinfo.pr_ctime, &psp->pr_ctime); psp->pr_bysize = psp->pr_size * PAGESIZE; psp->pr_byrssize = psp->pr_rssize * PAGESIZE; psp->pr_argc = P->psinfo.pr_argc; psp->pr_argv = (caddr32_t)P->psinfo.pr_argv; psp->pr_envp = (caddr32_t)P->psinfo.pr_envp; psp->pr_wstat = P->psinfo.pr_wstat; psp->pr_pctcpu = P->psinfo.pr_pctcpu; psp->pr_pctmem = P->psinfo.pr_pctmem; psp->pr_euid = P->psinfo.pr_euid; psp->pr_egid = P->psinfo.pr_egid; psp->pr_aslwpid = 0; psp->pr_dmodel = P->psinfo.pr_dmodel; }
/* * Assign a buffer for the given block. If the appropriate * block is already associated, return it; otherwise search * for the oldest non-busy buffer and reassign it. */ struct buf * getblk_common(void * arg, dev_t dev, daddr_t blkno, long bsize, int errflg) { ufsvfs_t *ufsvfsp = (struct ufsvfs *)arg; struct buf *bp; struct buf *dp; struct buf *nbp = NULL; struct buf *errbp; uint_t index; kmutex_t *hmp; struct hbuf *hp; if (getmajor(dev) >= devcnt) cmn_err(CE_PANIC, "blkdev"); biostats.bio_lookup.value.ui32++; index = bio_bhash(dev, blkno); hp = &hbuf[index]; dp = (struct buf *)hp; hmp = &hp->b_lock; mutex_enter(hmp); loop: for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { if (bp->b_blkno != blkno || bp->b_edev != dev || (bp->b_flags & B_STALE)) continue; /* * Avoid holding the hash lock in the event that * the buffer is locked by someone. Since the hash chain * may change when we drop the hash lock * we have to start at the beginning of the chain if the * buffer identity/contents aren't valid. */ if (!sema_tryp(&bp->b_sem)) { biostats.bio_bufbusy.value.ui32++; mutex_exit(hmp); /* * OK, we are dealing with a busy buffer. * In the case that we are panicking and we * got called from bread(), we have some chance * for error recovery. So better bail out from * here since sema_p() won't block. If we got * called directly from ufs routines, there is * no way to report an error yet. */ if (panicstr && errflg) goto errout; /* * For the following line of code to work * correctly never kmem_free the buffer "header". */ sema_p(&bp->b_sem); if (bp->b_blkno != blkno || bp->b_edev != dev || (bp->b_flags & B_STALE)) { sema_v(&bp->b_sem); mutex_enter(hmp); goto loop; /* start over */ } mutex_enter(hmp); } /* Found */ biostats.bio_hit.value.ui32++; bp->b_flags &= ~B_AGE; /* * Yank it off the free/delayed write lists */ hp->b_length--; notavail(bp); mutex_exit(hmp); ASSERT((bp->b_flags & B_NOCACHE) == NULL); if (nbp == NULL) { /* * Make the common path short. */ ASSERT(SEMA_HELD(&bp->b_sem)); return (bp); } biostats.bio_bufdup.value.ui32++; /* * The buffer must have entered during the lock upgrade * so free the new buffer we allocated and return the * found buffer. */ kmem_free(nbp->b_un.b_addr, nbp->b_bufsize); nbp->b_un.b_addr = NULL; /* * Account for the memory */ mutex_enter(&bfree_lock); bfreelist.b_bufsize += nbp->b_bufsize; mutex_exit(&bfree_lock); /* * Destroy buf identity, and place on avail list */ nbp->b_dev = (o_dev_t)NODEV; nbp->b_edev = NODEV; nbp->b_flags = 0; nbp->b_file = NULL; nbp->b_offset = -1; sema_v(&nbp->b_sem); bio_bhdr_free(nbp); ASSERT(SEMA_HELD(&bp->b_sem)); return (bp); } /* * bio_getfreeblk may block so check the hash chain again. */ if (nbp == NULL) { mutex_exit(hmp); nbp = bio_getfreeblk(bsize); mutex_enter(hmp); goto loop; } /* * New buffer. Assign nbp and stick it on the hash. */ nbp->b_flags = B_BUSY; nbp->b_edev = dev; nbp->b_dev = (o_dev_t)cmpdev(dev); nbp->b_blkno = blkno; nbp->b_iodone = NULL; nbp->b_bcount = bsize; /* * If we are given a ufsvfsp and the vfs_root field is NULL * then this must be I/O for a superblock. A superblock's * buffer is set up in mountfs() and there is no root vnode * at that point. */ if (ufsvfsp && ufsvfsp->vfs_root) { nbp->b_vp = ufsvfsp->vfs_root; } else { nbp->b_vp = NULL; } ASSERT((nbp->b_flags & B_NOCACHE) == NULL); binshash(nbp, dp); mutex_exit(hmp); ASSERT(SEMA_HELD(&nbp->b_sem)); return (nbp); /* * Come here in case of an internal error. At this point we couldn't * get a buffer, but he have to return one. Hence we allocate some * kind of error reply buffer on the fly. This buffer is marked as * B_NOCACHE | B_AGE | B_ERROR | B_DONE to assure the following: * - B_ERROR will indicate error to the caller. * - B_DONE will prevent us from reading the buffer from * the device. * - B_NOCACHE will cause that this buffer gets free'd in * brelse(). */ errout: errbp = geteblk(); sema_p(&errbp->b_sem); errbp->b_flags &= ~B_BUSY; errbp->b_flags |= (B_ERROR | B_DONE); return (errbp); }
/*ARGSUSED*/ int ufs_rdwr_data( vnode_t *vnodep, u_offset_t offset, size_t len, fdbuffer_t *fdbp, int flags, cred_t *credp) { struct inode *ip = VTOI(vnodep); struct fs *fs; struct ufsvfs *ufsvfsp = ip->i_ufsvfs; struct buf *bp; krw_t rwtype = RW_READER; u_offset_t offset1 = offset; /* Initial offset */ size_t iolen; int curlen = 0; int pplen; daddr_t bn; int contig = 0; int error = 0; int nbytes; /* Number bytes this IO */ int offsetn; /* Start point this IO */ int iswrite = flags & B_WRITE; int io_started = 0; /* No IO started */ struct ulockfs *ulp; uint_t protp = PROT_ALL; error = ufs_lockfs_begin_getpage(ufsvfsp, &ulp, segkmap, !iswrite, &protp); if (error) { if (flags & B_ASYNC) { fdb_ioerrdone(fdbp, error); } return (error); } fs = ufsvfsp->vfs_fs; iolen = len; DEBUGF((CE_CONT, "?ufs_rdwr: %s vp: %p pages:%p off %llx len %lx" " isize: %llx fdb: %p\n", flags & B_READ ? "READ" : "WRITE", (void *)vnodep, (void *)vnodep->v_pages, offset1, iolen, ip->i_size, (void *)fdbp)); rw_enter(&ip->i_ufsvfs->vfs_dqrwlock, RW_READER); rw_enter(&ip->i_contents, rwtype); ASSERT(offset1 < ip->i_size); if ((offset1 + iolen) > ip->i_size) { iolen = ip->i_size - offset1; } while (!error && curlen < iolen) { contig = 0; if ((error = bmap_read(ip, offset1, &bn, &contig)) != 0) { break; } ASSERT(!(bn == UFS_HOLE && iswrite)); if (bn == UFS_HOLE) { /* * If the above assertion is true, * then the following if statement can never be true. */ if (iswrite && (rwtype == RW_READER)) { rwtype = RW_WRITER; if (!rw_tryupgrade(&ip->i_contents)) { rw_exit(&ip->i_contents); rw_enter(&ip->i_contents, rwtype); continue; } } offsetn = blkoff(fs, offset1); pplen = P2ROUNDUP(len, PAGESIZE); nbytes = MIN((pplen - curlen), (fs->fs_bsize - offsetn)); ASSERT(nbytes > 0); /* * We may be reading or writing. */ DEBUGF((CE_CONT, "?ufs_rdwr_data: hole %llx - %lx\n", offset1, (iolen - curlen))); if (iswrite) { printf("**WARNING: ignoring hole in write\n"); error = ENOSPC; } else { fdb_add_hole(fdbp, offset1 - offset, nbytes); } offset1 += nbytes; curlen += nbytes; continue; } ASSERT(contig > 0); pplen = P2ROUNDUP(len, PAGESIZE); contig = MIN(contig, len - curlen); contig = P2ROUNDUP(contig, DEV_BSIZE); bp = fdb_iosetup(fdbp, offset1 - offset, contig, vnodep, flags); bp->b_edev = ip->i_dev; bp->b_dev = cmpdev(ip->i_dev); bp->b_blkno = bn; bp->b_file = ip->i_vnode; bp->b_offset = (offset_t)offset1; if (ufsvfsp->vfs_snapshot) { fssnap_strategy(&ufsvfsp->vfs_snapshot, bp); } else { (void) bdev_strategy(bp); } io_started = 1; offset1 += contig; curlen += contig; if (iswrite) lwp_stat_update(LWP_STAT_OUBLK, 1); else lwp_stat_update(LWP_STAT_INBLK, 1); if ((flags & B_ASYNC) == 0) { error = biowait(bp); fdb_iodone(bp); } DEBUGF((CE_CONT, "?loop ufs_rdwr_data.. off %llx len %lx\n", offset1, (iolen - curlen))); } DEBUGF((CE_CONT, "?ufs_rdwr_data: off %llx len %lx pages: %p ------\n", offset1, (iolen - curlen), (void *)vnodep->v_pages)); rw_exit(&ip->i_contents); rw_exit(&ip->i_ufsvfs->vfs_dqrwlock); if (flags & B_ASYNC) { /* * Show that no more asynchronous IO will be added */ fdb_ioerrdone(fdbp, error); } if (ulp) { ufs_lockfs_end(ulp); } if (io_started && flags & B_ASYNC) { return (0); } else { return (error); } }
/* * ufs_alloc_data - supports allocating space and reads or writes * that involve changes to file length or space allocation. * * This function is more expensive, because of the UFS log transaction, * so ufs_rdwr_data() should be used when space or file length changes * will not occur. * * Inputs: * fdb - A null pointer instructs this function to only allocate * space for the specified offset and length. * An actual fdbuffer instructs this function to perform * the read or write operation. * flags - defaults (zero value) to synchronous write * B_READ - indicates read operation * B_ASYNC - indicates perform operation asynchronously */ int ufs_alloc_data( vnode_t *vnodep, u_offset_t offset, size_t *len, fdbuffer_t *fdbp, int flags, cred_t *credp) { struct inode *ip = VTOI(vnodep); size_t done_len, io_len; int contig; u_offset_t uoff, io_off; int error = 0; /* No error occured */ int offsetn; /* Start point this IO */ int nbytes; /* Number bytes in this IO */ daddr_t bn; struct fs *fs; struct ufsvfs *ufsvfsp = ip->i_ufsvfs; int i_size_changed = 0; u_offset_t old_i_size; struct ulockfs *ulp; int trans_size; int issync; /* UFS Log transaction */ /* synchronous when non-zero */ int io_started = 0; /* No IO started */ uint_t protp = PROT_ALL; ASSERT((flags & B_WRITE) == 0); /* * Obey the lockfs protocol */ error = ufs_lockfs_begin_getpage(ufsvfsp, &ulp, segkmap, 0, &protp); if (error) { if ((fdbp != NULL) && (flags & B_ASYNC)) { fdb_ioerrdone(fdbp, error); } return (error); } if (ulp) { /* * Try to begin a UFS log transaction */ trans_size = TOP_GETPAGE_SIZE(ip); TRANS_TRY_BEGIN_CSYNC(ufsvfsp, issync, TOP_GETPAGE, trans_size, error); if (error == EWOULDBLOCK) { ufs_lockfs_end(ulp); if ((fdbp != NULL) && (flags & B_ASYNC)) { fdb_ioerrdone(fdbp, EDEADLK); } return (EDEADLK); } } uoff = offset; io_off = offset; io_len = *len; done_len = 0; DEBUGF((CE_CONT, "?ufs_alloc: off %llx len %lx size %llx fdb: %p\n", uoff, (io_len - done_len), ip->i_size, (void *)fdbp)); rw_enter(&ip->i_ufsvfs->vfs_dqrwlock, RW_READER); rw_enter(&ip->i_contents, RW_WRITER); ASSERT((ip->i_mode & IFMT) == IFREG); fs = ip->i_fs; while (error == 0 && done_len < io_len) { uoff = (u_offset_t)(io_off + done_len); offsetn = (int)blkoff(fs, uoff); nbytes = (int)MIN(fs->fs_bsize - offsetn, io_len - done_len); DEBUGF((CE_CONT, "?ufs_alloc_data: offset: %llx len %x\n", uoff, nbytes)); if (uoff + nbytes > ip->i_size) { /* * We are extending the length of the file. * bmap is used so that we are sure that * if we need to allocate new blocks, that it * is done here before we up the file size. */ DEBUGF((CE_CONT, "?ufs_alloc_data: grow %llx -> %llx\n", ip->i_size, uoff + nbytes)); error = bmap_write(ip, uoff, (offsetn + nbytes), BI_ALLOC_ONLY, NULL, credp); if (ip->i_flag & (ICHG|IUPD)) ip->i_seq++; if (error) { DEBUGF((CE_CONT, "?ufs_alloc_data: grow " "failed err: %d\n", error)); break; } if (fdbp != NULL) { if (uoff >= ip->i_size) { /* * Desired offset is past end of bytes * in file, so we have a hole. */ fdb_add_hole(fdbp, uoff - offset, nbytes); } else { int contig; buf_t *bp; error = bmap_read(ip, uoff, &bn, &contig); if (error) { break; } contig = ip->i_size - uoff; contig = P2ROUNDUP(contig, DEV_BSIZE); bp = fdb_iosetup(fdbp, uoff - offset, contig, vnodep, flags); bp->b_edev = ip->i_dev; bp->b_dev = cmpdev(ip->i_dev); bp->b_blkno = bn; bp->b_file = ip->i_vnode; bp->b_offset = (offset_t)uoff; if (ufsvfsp->vfs_snapshot) { fssnap_strategy( &ufsvfsp->vfs_snapshot, bp); } else { (void) bdev_strategy(bp); } io_started = 1; lwp_stat_update(LWP_STAT_OUBLK, 1); if ((flags & B_ASYNC) == 0) { error = biowait(bp); fdb_iodone(bp); if (error) { break; } } if (contig > (ip->i_size - uoff)) { contig -= ip->i_size - uoff; fdb_add_hole(fdbp, ip->i_size - offset, contig); } } } i_size_changed = 1; old_i_size = ip->i_size; UFS_SET_ISIZE(uoff + nbytes, ip); TRANS_INODE(ip->i_ufsvfs, ip); /* * file has grown larger than 2GB. Set flag * in superblock to indicate this, if it * is not already set. */ if ((ip->i_size > MAXOFF32_T) && !(fs->fs_flags & FSLARGEFILES)) { ASSERT(ufsvfsp->vfs_lfflags & UFS_LARGEFILES); mutex_enter(&ufsvfsp->vfs_lock); fs->fs_flags |= FSLARGEFILES; ufs_sbwrite(ufsvfsp); mutex_exit(&ufsvfsp->vfs_lock); } } else { /* * The file length is not being extended. */ error = bmap_read(ip, uoff, &bn, &contig); if (error) { DEBUGF((CE_CONT, "?ufs_alloc_data: " "bmap_read err: %d\n", error)); break; } if (bn != UFS_HOLE) { /* * Did not map a hole in the file */ int contig = P2ROUNDUP(nbytes, DEV_BSIZE); buf_t *bp; if (fdbp != NULL) { bp = fdb_iosetup(fdbp, uoff - offset, contig, vnodep, flags); bp->b_edev = ip->i_dev; bp->b_dev = cmpdev(ip->i_dev); bp->b_blkno = bn; bp->b_file = ip->i_vnode; bp->b_offset = (offset_t)uoff; if (ufsvfsp->vfs_snapshot) { fssnap_strategy( &ufsvfsp->vfs_snapshot, bp); } else { (void) bdev_strategy(bp); } io_started = 1; lwp_stat_update(LWP_STAT_OUBLK, 1); if ((flags & B_ASYNC) == 0) { error = biowait(bp); fdb_iodone(bp); if (error) { break; } } } } else { /* * We read a hole in the file. * We have to allocate blocks for the hole. */ error = bmap_write(ip, uoff, (offsetn + nbytes), BI_ALLOC_ONLY, NULL, credp); if (ip->i_flag & (ICHG|IUPD)) ip->i_seq++; if (error) { DEBUGF((CE_CONT, "?ufs_alloc_data: fill" " hole failed error: %d\n", error)); break; } if (fdbp != NULL) { fdb_add_hole(fdbp, uoff - offset, nbytes); } } } done_len += nbytes; } if (error) { if (i_size_changed) { /* * Allocation of the blocks for the file failed. * So truncate the file size back to its original size. */ (void) ufs_itrunc(ip, old_i_size, 0, credp); } } DEBUGF((CE_CONT, "?ufs_alloc: uoff %llx len %lx\n", uoff, (io_len - done_len))); if ((offset + *len) < (NDADDR * fs->fs_bsize)) { *len = (size_t)(roundup(offset + *len, fs->fs_fsize) - offset); } else { *len = (size_t)(roundup(offset + *len, fs->fs_bsize) - offset); } /* * Flush cached pages. * * XXX - There should be no pages involved, since the I/O was performed * through the device strategy routine and the page cache was bypassed. * However, testing has demonstrated that this VOP_PUTPAGE is * necessary. Without this, data might not always be read back as it * was written. * */ (void) VOP_PUTPAGE(vnodep, 0, 0, B_INVAL, credp); rw_exit(&ip->i_contents); rw_exit(&ip->i_ufsvfs->vfs_dqrwlock); if ((fdbp != NULL) && (flags & B_ASYNC)) { /* * Show that no more asynchronous IO will be added */ fdb_ioerrdone(fdbp, error); } if (ulp) { /* * End the UFS Log transaction */ TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_GETPAGE, trans_size); ufs_lockfs_end(ulp); } if (io_started && (flags & B_ASYNC)) { return (0); } else { return (error); } }
/* ARGSUSED */ int dadk_ioctl(opaque_t objp, dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) { struct dadk *dadkp = (struct dadk *)objp; switch (cmd) { case DKIOCGETDEF: { struct buf *bp; int err, head; unsigned char *secbuf; STRUCT_DECL(defect_header, adh); STRUCT_INIT(adh, flag & FMODELS); /* * copyin header .... * yields head number and buffer address */ if (ddi_copyin((caddr_t)arg, STRUCT_BUF(adh), STRUCT_SIZE(adh), flag)) return (EFAULT); head = STRUCT_FGET(adh, head); if (head < 0 || head >= dadkp->dad_phyg.g_head) return (ENXIO); secbuf = kmem_zalloc(NBPSCTR, KM_SLEEP); if (!secbuf) return (ENOMEM); bp = getrbuf(KM_SLEEP); if (!bp) { kmem_free(secbuf, NBPSCTR); return (ENOMEM); } bp->b_edev = dev; bp->b_dev = cmpdev(dev); bp->b_flags = B_BUSY; bp->b_resid = 0; bp->b_bcount = NBPSCTR; bp->b_un.b_addr = (caddr_t)secbuf; bp->b_blkno = head; /* I had to put it somwhere! */ bp->b_forw = (struct buf *)dadkp; bp->b_back = (struct buf *)DCMD_GETDEF; mutex_enter(&dadkp->dad_cmd_mutex); dadkp->dad_cmd_count++; mutex_exit(&dadkp->dad_cmd_mutex); FLC_ENQUE(dadkp->dad_flcobjp, bp); err = biowait(bp); if (!err) { if (ddi_copyout((caddr_t)secbuf, STRUCT_FGETP(adh, buffer), NBPSCTR, flag)) err = ENXIO; } kmem_free(secbuf, NBPSCTR); freerbuf(bp); return (err); } case DIOCTL_RWCMD: { struct dadkio_rwcmd *rwcmdp; int status, rw; /* * copied in by cmdk and, if necessary, converted to the * correct datamodel */ rwcmdp = (struct dadkio_rwcmd *)(intptr_t)arg; /* * handle the complex cases here; we pass these * through to the driver, which will queue them and * handle the requests asynchronously. The simpler * cases ,which can return immediately, fail here, and * the request reverts to the dadk_ioctl routine, while * will reroute them directly to the ata driver. */ switch (rwcmdp->cmd) { case DADKIO_RWCMD_READ : /*FALLTHROUGH*/ case DADKIO_RWCMD_WRITE: rw = ((rwcmdp->cmd == DADKIO_RWCMD_WRITE) ? B_WRITE : B_READ); status = dadk_dk_buf_setup(dadkp, (opaque_t)rwcmdp, dev, ((flag &FKIOCTL) ? UIO_SYSSPACE : UIO_USERSPACE), rw); return (status); default: return (EINVAL); } } case DKIOC_UPDATEFW: /* * Require PRIV_ALL privilege to invoke DKIOC_UPDATEFW * to protect the firmware update from malicious use */ if (PRIV_POLICY(cred_p, PRIV_ALL, B_FALSE, EPERM, NULL) != 0) return (EPERM); else return (dadk_ctl_ioctl(dadkp, cmd, arg, flag)); case DKIOCFLUSHWRITECACHE: { struct buf *bp; int err = 0; struct dk_callback *dkc = (struct dk_callback *)arg; struct cmpkt *pktp; int is_sync = 1; mutex_enter(&dadkp->dad_mutex); if (dadkp->dad_noflush || ! dadkp->dad_wce) { err = dadkp->dad_noflush ? ENOTSUP : 0; mutex_exit(&dadkp->dad_mutex); /* * If a callback was requested: a * callback will always be done if the * caller saw the DKIOCFLUSHWRITECACHE * ioctl return 0, and never done if the * caller saw the ioctl return an error. */ if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback != NULL) { (*dkc->dkc_callback)(dkc->dkc_cookie, err); /* * Did callback and reported error. * Since we did a callback, ioctl * should return 0. */ err = 0; } return (err); } mutex_exit(&dadkp->dad_mutex); bp = getrbuf(KM_SLEEP); bp->b_edev = dev; bp->b_dev = cmpdev(dev); bp->b_flags = B_BUSY; bp->b_resid = 0; bp->b_bcount = 0; SET_BP_SEC(bp, 0); if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback != NULL) { struct dk_callback *dkc2 = (struct dk_callback *)kmem_zalloc( sizeof (struct dk_callback), KM_SLEEP); bcopy(dkc, dkc2, sizeof (*dkc2)); bp->b_private = dkc2; bp->b_iodone = dadk_flushdone; is_sync = 0; } /* * Setup command pkt * dadk_pktprep() can't fail since DDI_DMA_SLEEP set */ pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, DDI_DMA_SLEEP, NULL); pktp->cp_time = DADK_FLUSH_CACHE_TIME; *((char *)(pktp->cp_cdbp)) = DCMD_FLUSH_CACHE; pktp->cp_byteleft = 0; pktp->cp_private = NULL; pktp->cp_secleft = 0; pktp->cp_srtsec = -1; pktp->cp_bytexfer = 0; CTL_IOSETUP(dadkp->dad_ctlobjp, pktp); mutex_enter(&dadkp->dad_cmd_mutex); dadkp->dad_cmd_count++; mutex_exit(&dadkp->dad_cmd_mutex); FLC_ENQUE(dadkp->dad_flcobjp, bp); if (is_sync) { err = biowait(bp); freerbuf(bp); } return (err); } default: if (!dadkp->dad_rmb) return (dadk_ctl_ioctl(dadkp, cmd, arg, flag)); } switch (cmd) { case CDROMSTOP: return (dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0, 0, DADK_SILENT)); case CDROMSTART: return (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0, 0, DADK_SILENT)); case DKIOCLOCK: return (dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT)); case DKIOCUNLOCK: return (dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT)); case DKIOCEJECT: case CDROMEJECT: { int ret; if (ret = dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT)) { return (ret); } if (ret = dadk_rmb_ioctl(dadkp, DCMD_EJECT, 0, 0, DADK_SILENT)) { return (ret); } mutex_enter(&dadkp->dad_mutex); dadkp->dad_iostate = DKIO_EJECTED; cv_broadcast(&dadkp->dad_state_cv); mutex_exit(&dadkp->dad_mutex); return (0); } default: return (ENOTTY); /* * cdrom audio commands */ case CDROMPAUSE: cmd = DCMD_PAUSE; break; case CDROMRESUME: cmd = DCMD_RESUME; break; case CDROMPLAYMSF: cmd = DCMD_PLAYMSF; break; case CDROMPLAYTRKIND: cmd = DCMD_PLAYTRKIND; break; case CDROMREADTOCHDR: cmd = DCMD_READTOCHDR; break; case CDROMREADTOCENTRY: cmd = DCMD_READTOCENT; break; case CDROMVOLCTRL: cmd = DCMD_VOLCTRL; break; case CDROMSUBCHNL: cmd = DCMD_SUBCHNL; break; case CDROMREADMODE2: cmd = DCMD_READMODE2; break; case CDROMREADMODE1: cmd = DCMD_READMODE1; break; case CDROMREADOFFSET: cmd = DCMD_READOFFSET; break; } return (dadk_rmb_ioctl(dadkp, cmd, arg, flag, 0)); }