/* Loadable module configuration entry points */ int _init(void) { cmn_err(CE_NOTE, "Inside _init"); return(mod_install(&ml)); }
int t_kopen(file_t *fp, dev_t rdev, int flags, TIUSER **tiptr, cred_t *cr) { int madefp = 0; struct T_info_ack inforeq; int retval; vnode_t *vp; struct strioctl strioc; int error; TIUSER *ntiptr; int rtries = 0; /* * Special case for install: miniroot needs to be able to access files * via NFS as though it were always in the global zone. */ if (nfs_global_client_only != 0) cr = kcred; KTLILOG(2, "t_kopen: fp %x, ", fp); KTLILOG(2, "rdev %x, ", rdev); KTLILOG(2, "flags %x\n", flags); *tiptr = NULL; error = 0; retval = 0; if (fp == NULL) { if (rdev == 0 || rdev == NODEV) { KTLILOG(1, "t_kopen: null device\n", 0); return (EINVAL); } /* * allocate a file pointer, but * no file descripter. */ if ((error = falloc(NULL, flags, &fp, NULL)) != 0) { KTLILOG(1, "t_kopen: falloc: %d\n", error); return (error); } /* Install proper cred in file */ if (cr != fp->f_cred) { crhold(cr); crfree(fp->f_cred); fp->f_cred = cr; } vp = makespecvp(rdev, VCHR); /* * this will call the streams open for us. * Want to retry if error is EAGAIN, the streams open routine * might fail due to temporarely out of memory. */ do { if ((error = VOP_OPEN(&vp, flags, cr)) == EAGAIN) { (void) delay(hz); } } while (error == EAGAIN && ++rtries < 5); if (error) { KTLILOG(1, "t_kopen: VOP_OPEN: %d\n", error); unfalloc(fp); VN_RELE(vp); return (error); } /* * fp is completely initialized so drop the write lock. * I actually don't need any locking on fp in here since * there is no fd pointing at it. However, since I could * call closef if there is an error and closef requires * the fp read locked, I will acquire the read lock here * and make sure I release it before I leave this routine. */ fp->f_vnode = vp; mutex_exit(&fp->f_tlock); madefp = 1; } else { vp = fp->f_vnode; } if (vp->v_stream == NULL) { if (madefp) (void) closef(fp); KTLILOG(1, "t_kopen: not a streams device\n", 0); return (ENOSTR); } /* * allocate a new transport structure */ ntiptr = kmem_alloc(TIUSERSZ, KM_SLEEP); ntiptr->fp = fp; ntiptr->flags = madefp ? MADE_FP : 0; KTLILOG(2, "t_kopen: vp %x, ", vp); KTLILOG(2, "stp %x\n", vp->v_stream); /* * see if TIMOD is already pushed */ error = strioctl(vp, I_FIND, (intptr_t)"timod", 0, K_TO_K, cr, &retval); if (error) { kmem_free(ntiptr, TIUSERSZ); if (madefp) (void) closef(fp); KTLILOG(1, "t_kopen: strioctl(I_FIND, timod): %d\n", error); return (error); } if (retval == 0) { tryagain: error = strioctl(vp, I_PUSH, (intptr_t)"timod", 0, K_TO_K, cr, &retval); if (error) { switch (error) { case ENOSPC: case EAGAIN: case ENOSR: /* * This probably means the master file * should be tuned. */ cmn_err(CE_WARN, "t_kopen: I_PUSH of timod failed, error %d\n", error); (void) delay(hz); error = 0; goto tryagain; default: kmem_free(ntiptr, TIUSERSZ); if (madefp) (void) closef(fp); KTLILOG(1, "t_kopen: I_PUSH (timod): %d", error); return (error); } } } inforeq.PRIM_type = T_INFO_REQ; strioc.ic_cmd = TI_GETINFO; strioc.ic_timout = 0; strioc.ic_dp = (char *)&inforeq; strioc.ic_len = (int)sizeof (struct T_info_req); error = strdoioctl(vp->v_stream, &strioc, FNATIVE, K_TO_K, cr, &retval); if (error) { kmem_free(ntiptr, TIUSERSZ); if (madefp) (void) closef(fp); KTLILOG(1, "t_kopen: strdoioctl(T_INFO_REQ): %d\n", error); return (error); } if (retval) { if ((retval & 0xff) == TSYSERR) error = (retval >> 8) & 0xff; else error = t_tlitosyserr(retval & 0xff); kmem_free(ntiptr, TIUSERSZ); if (madefp) (void) closef(fp); KTLILOG(1, "t_kopen: strdoioctl(T_INFO_REQ): retval: 0x%x\n", retval); return (error); }
/* * dm2s_mbox_init - Mailbox specific initialization. */ static int dm2s_mbox_init(dm2s_t *dm2sp) { int ret; clock_t tout; ASSERT(MUTEX_HELD(&dm2sp->ms_lock)); dm2sp->ms_target = DM2S_TARGET_ID; dm2sp->ms_key = DSCP_KEY; dm2sp->ms_state &= ~DM2S_MB_INITED; /* Iterate until mailbox gets connected */ while (!(dm2sp->ms_state & DM2S_MB_CONN)) { DPRINTF(DBG_MBOX, ("dm2s_mbox_init: calling mb_init\n")); ret = scf_mb_init(dm2sp->ms_target, dm2sp->ms_key, dm2s_event_handler, (void *)dm2sp); DPRINTF(DBG_MBOX, ("dm2s_mbox_init: mb_init ret=%d\n", ret)); if (ret != 0) { DPRINTF(DBG_MBOX, ("dm2s_mbox_init: failed ret =%d\n", ret)); DTRACE_PROBE1(dm2s_mbox_fail, int, ret); } else { dm2sp->ms_state |= DM2S_MB_INITED; /* Block until the mailbox is ready to communicate. */ while (!(dm2sp->ms_state & (DM2S_MB_CONN | DM2S_MB_DISC))) { if (cv_wait_sig(&dm2sp->ms_wait, &dm2sp->ms_lock) <= 0) { /* interrupted */ ret = EINTR; break; } } } if ((ret != 0) || (dm2sp->ms_state & DM2S_MB_DISC)) { if (dm2sp->ms_state & DM2S_MB_INITED) { (void) scf_mb_fini(dm2sp->ms_target, dm2sp->ms_key); } if (dm2sp->ms_state & DM2S_MB_DISC) { DPRINTF(DBG_WARN, ("dm2s_mbox_init: mbox DISC_ERROR\n")); DTRACE_PROBE1(dm2s_mbox_fail, int, DM2S_MB_DISC); } dm2sp->ms_state &= ~(DM2S_MB_INITED | DM2S_MB_DISC | DM2S_MB_CONN); if (ret == EINTR) { return (ret); } /* * If there was failure, then wait for * DM2S_MB_TOUT secs and retry again. */ DPRINTF(DBG_MBOX, ("dm2s_mbox_init: waiting...\n")); tout = ddi_get_lbolt() + drv_usectohz(DM2S_MB_TOUT); ret = cv_timedwait_sig(&dm2sp->ms_wait, &dm2sp->ms_lock, tout); if (ret == 0) { /* if interrupted, return immediately. */ DPRINTF(DBG_MBOX, ("dm2s_mbox_init: interrupted\n")); return (EINTR); } } } /* * Obtain the max size of a single message. * NOTE: There is no mechanism to update the * upperlayers dynamically, so we expect this * size to be atleast the default MTU size. */ ret = scf_mb_ctrl(dm2sp->ms_target, dm2sp->ms_key, SCF_MBOP_MAXMSGSIZE, &dm2sp->ms_mtu); if ((ret == 0) && (dm2sp->ms_mtu < DM2S_DEF_MTU)) { cmn_err(CE_WARN, "Max message size expected >= %d " "but found %d\n", DM2S_DEF_MTU, dm2sp->ms_mtu); ret = EIO; } if (ret != 0) { dm2sp->ms_state &= ~DM2S_MB_INITED; (void) scf_mb_fini(dm2sp->ms_target, dm2sp->ms_key); } DPRINTF(DBG_MBOX, ("dm2s_mbox_init: mb_init ret=%d\n", ret)); return (ret); }
/** * Interrupt Service Routine for Virtio Net. * * @param Arg Private data (unused, will be NULL). * @returns DDI_INTR_CLAIMED if it's our interrupt, DDI_INTR_UNCLAIMED if it isn't. */ static uint_t VirtioNetISR(caddr_t Arg) { cmn_err(CE_NOTE, "VirtioNetISR Arg=%p\n", Arg); NOREF(Arg); return DDI_INTR_UNCLAIMED; }
static sbd_status_t sbd_ats_do_handling_before_io(scsi_task_t *task, struct sbd_lu *sl, uint64_t lba, uint64_t count, uint32_t flags) { sbd_status_t ret = SBD_SUCCESS; ats_state_t *ats_state, *ats_state_ret; sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private; uint8_t cdb0 = task->task_cdb[0]; if (HardwareAcceleratedLocking == 0) return (SBD_SUCCESS); mutex_enter(&sl->sl_lock); /* * if the list is empty then just add the element to the list and * return success. There is no overlap. This is done for every * read, write or compare and write. */ if (list_is_empty(&sl->sl_ats_io_list)) { goto done; } /* * There are inflight operations. As a result the list must be scanned * and if there are any overlaps then SBD_BUSY should be returned. * * Duplicate reads and writes are allowed and kept on the list * since there is no reason that overlapping IO operations should * be delayed. * * A command that conflicts with a running compare and write will * be rescheduled and rerun. This is handled by stmf_task_poll_lu. * There is a possibility that a command can be starved and still * return busy, which is valid in the SCSI protocol. */ for (ats_state = list_head(&sl->sl_ats_io_list); ats_state != NULL; ats_state = list_next(&sl->sl_ats_io_list, ats_state)) { if (is_overlapping(ats_state->as_cur_ats_lba, ats_state->as_cur_ats_len, lba, count) == 0) continue; /* if the task is already listed just return */ if (task == ats_state->as_cur_ats_task) { cmn_err(CE_WARN, "sbd_ats_handling_before_io: " "task %p already on list", (void *) task); ret = SBD_SUCCESS; goto exit; } /* * the current command is a compare and write, if there is any * overlap return error */ if ((cdb0 == SCMD_COMPARE_AND_WRITE) || (ats_state->as_cmd == SCMD_COMPARE_AND_WRITE)) { ret = SBD_BUSY; goto exit; } } done: ats_state_ret = (ats_state_t *)kmem_zalloc(sizeof (ats_state_t), KM_SLEEP); ats_state_ret->as_cur_ats_lba = lba; ats_state_ret->as_cur_ats_len = count; ats_state_ret->as_cmd = cdb0; ats_state_ret->as_cur_ats_task = task; if (list_is_empty(&sl->sl_ats_io_list)) { list_insert_head(&sl->sl_ats_io_list, ats_state_ret); } else { list_insert_tail(&sl->sl_ats_io_list, ats_state_ret); } scmd->flags |= SBD_SCSI_CMD_ATS_RELATED; scmd->ats_state = ats_state; sbd_list_length++; mutex_exit(&sl->sl_lock); return (SBD_SUCCESS); exit: mutex_exit(&sl->sl_lock); if (ret == SBD_SUCCESS) return (SBD_SUCCESS); /* * if the command cannot be allowed to be restarted then just * return an error. At the moment only unmap has this property. * Please refer to sbd_handle_unmap_xfer in sbd_scsi.c for full * details. */ if ((flags & SBD_ATS_NO_BUSY) != 0) return (ret); /* * at this point the command overlaps a running a compare and write. * It is either a compare and write overlapping a op or an op * overlapping a compare and write. It needs to be delayed. That * means it is not active so if the stmf_task_poll_lu works * turn off active */ if (stmf_task_poll_lu(task, 10) != STMF_SUCCESS) stmf_scsilib_send_status(task, STATUS_BUSY, 0); else scmd->flags &= ~SBD_SCSI_CMD_ACTIVE; return (ret); }
/* ARGSUSED */ static int dr_stop_user_threads(dr_sr_handle_t *srh) { int count; int bailout; dr_handle_t *handle = srh->sr_dr_handlep; static fn_t f = "dr_stop_user_threads"; kthread_id_t tp; extern void add_one_utstop(); extern void utstop_timedwait(clock_t); extern void utstop_init(void); #define DR_UTSTOP_RETRY 4 #define DR_UTSTOP_WAIT hz if (dr_skip_user_threads) return (DDI_SUCCESS); utstop_init(); /* we need to try a few times to get past fork, etc. */ srh->sr_err_idx = 0; for (count = 0; count < DR_UTSTOP_RETRY; count++) { /* walk the entire threadlist */ mutex_enter(&pidlock); for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) { proc_t *p = ttoproc(tp); /* handle kernel threads separately */ if (p->p_as == &kas || p->p_stat == SZOMB) continue; mutex_enter(&p->p_lock); thread_lock(tp); if (tp->t_state == TS_STOPPED) { /* add another reason to stop this thread */ tp->t_schedflag &= ~TS_RESUME; } else { tp->t_proc_flag |= TP_CHKPT; thread_unlock(tp); mutex_exit(&p->p_lock); add_one_utstop(); mutex_enter(&p->p_lock); thread_lock(tp); aston(tp); if (tp->t_state == TS_SLEEP && (tp->t_flag & T_WAKEABLE)) { setrun_locked(tp); } } /* grab thread if needed */ if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU) poke_cpu(tp->t_cpu->cpu_id); thread_unlock(tp); mutex_exit(&p->p_lock); } mutex_exit(&pidlock); /* let everything catch up */ utstop_timedwait(count * count * DR_UTSTOP_WAIT); /* now, walk the threadlist again to see if we are done */ mutex_enter(&pidlock); for (tp = curthread->t_next, bailout = 0; tp != curthread; tp = tp->t_next) { proc_t *p = ttoproc(tp); /* handle kernel threads separately */ if (p->p_as == &kas || p->p_stat == SZOMB) continue; /* * If this thread didn't stop, and we don't allow * unstopped blocked threads, bail. */ thread_lock(tp); if (!CPR_ISTOPPED(tp) && !(dr_allow_blocked_threads && DR_VSTOPPED(tp))) { bailout = 1; if (count == DR_UTSTOP_RETRY - 1) { /* * save the pid for later reporting */ srh->sr_err_idx = dr_add_int(srh->sr_err_ints, srh->sr_err_idx, DR_MAX_ERR_INT, (uint64_t)p->p_pid); cmn_err(CE_WARN, "%s: " "failed to stop thread: " "process=%s, pid=%d", f, p->p_user.u_psargs, p->p_pid); PR_QR("%s: failed to stop thread: " "process=%s, pid=%d, t_id=0x%p, " "t_state=0x%x, t_proc_flag=0x%x, " "t_schedflag=0x%x\n", f, p->p_user.u_psargs, p->p_pid, tp, tp->t_state, tp->t_proc_flag, tp->t_schedflag); } } thread_unlock(tp); } mutex_exit(&pidlock); /* were all the threads stopped? */ if (!bailout) break; } /* were we unable to stop all threads after a few tries? */ if (bailout) { handle->h_err = drerr_int(ESBD_UTHREAD, srh->sr_err_ints, srh->sr_err_idx, 0); return (ESRCH); } return (DDI_SUCCESS); }
void load_platform_drivers(void) { extern int watchdog_available; extern int watchdog_enable; dev_info_t *dip; /* dip of the isa driver */ int simba_present = 0; dev_info_t *root_child_node; major_t major; if (ddi_install_driver("power") != DDI_SUCCESS) cmn_err(CE_WARN, "Failed to install \"power\" driver."); /* * Install Isa driver. This is required for the southbridge IDE * workaround - to reset the IDE channel during IDE bus reset. * Panic the system in case ISA driver could not be loaded or * any problem in accessing its pci config space. Since the register * to reset the channel for IDE is in ISA config space!. */ root_child_node = ddi_get_child(ddi_root_node()); while (root_child_node != NULL) { if (strcmp(ddi_node_name(root_child_node), "pci") == 0) { root_child_node = ddi_get_child(root_child_node); if (strcmp(ddi_node_name(root_child_node), "pci") == 0) simba_present = 1; break; } root_child_node = ddi_get_next_sibling(root_child_node); } if (simba_present) dip = e_ddi_hold_devi_by_path(PLATFORM_ISA_PATHNAME_WITH_SIMBA, 0); else dip = e_ddi_hold_devi_by_path(PLATFORM_ISA_PATHNAME, 0); if (dip == NULL) { cmn_err(CE_PANIC, "Could not install the isa driver\n"); return; } if (pci_config_setup(dip, &platform_isa_handle) != DDI_SUCCESS) { cmn_err(CE_PANIC, "Could not get the config space of isa\n"); return; } /* * Load the blade support chip driver. * */ if (((major = ddi_name_to_major(BSC_DRV)) == -1) || (ddi_hold_installed_driver(major) == NULL)) { cmn_err(CE_WARN, "%s: failed to load", BSC_DRV); } else { bsc_drv_func_ptr = (void (*)(struct bscv_idi_info *)) modgetsymvalue(BSC_DRV_FUNC, 0); if (bsc_drv_func_ptr == NULL) { cmn_err(CE_WARN, "load_platform_defaults: %s()" " not found; signatures will not be updated\n", BSC_DRV_FUNC); watchdog_available = 0; if (watchdog_enable) { cmn_err(CE_WARN, "load_platform_defaults: %s()" " not found; BSC OS watchdog service not available\n", BSC_DRV_FUNC); } } } }
/* * Configure root file system. */ int rootconf(void) { int error; struct vfssw *vsw; extern void pm_init(void); BMDPRINTF(("rootconf: fstype %s\n", rootfs.bo_fstype)); BMDPRINTF(("rootconf: name %s\n", rootfs.bo_name)); BMDPRINTF(("rootconf: flags 0x%x\n", rootfs.bo_flags)); BMDPRINTF(("rootconf: obp_bootpath %s\n", obp_bootpath)); /* * Install cluster modules that were only loaded during * loadrootmodules(). */ if (error = clboot_rootconf()) return (error); if (root_is_svm) { (void) strncpy(rootfs.bo_name, obp_bootpath, BO_MAXOBJNAME); BMDPRINTF(("rootconf: svm: rootfs name %s\n", rootfs.bo_name)); BMDPRINTF(("rootconf: svm: svm name %s\n", svm_bootpath)); } /* * Run _init on the root filesystem (we already loaded it * but we've been waiting until now to _init it) which will * have the side-effect of running vsw_init() on this vfs. * Because all the nfs filesystems are lumped into one * module we need to special case it. */ if (strncmp(rootfs.bo_fstype, "nfs", 3) == 0) { if (modload("fs", "nfs") == -1) { cmn_err(CE_CONT, "Cannot initialize %s filesystem\n", rootfs.bo_fstype); return (ENXIO); } } else { if (modload("fs", rootfs.bo_fstype) == -1) { cmn_err(CE_CONT, "Cannot initialize %s filesystem\n", rootfs.bo_fstype); return (ENXIO); } } RLOCK_VFSSW(); vsw = vfs_getvfsswbyname(rootfs.bo_fstype); RUNLOCK_VFSSW(); VFS_INIT(rootvfs, &vsw->vsw_vfsops, (caddr_t)0); VFS_HOLD(rootvfs); if (root_is_svm) { rootvfs->vfs_flag |= VFS_RDONLY; } /* * This pm-releated call has to occur before root is mounted since we * need to power up all devices. It is placed after VFS_INIT() such * that opening a device via ddi_lyr_ interface just before root has * been mounted would work. */ pm_init(); if (netboot) { if ((error = strplumb()) != 0) { cmn_err(CE_CONT, "Cannot plumb network device\n"); return (error); } } /* * ufs_mountroot() ends up calling getrootdev() * (below) which actually triggers the _init, identify, * probe and attach of the drivers that make up root device * bush; these are also quietly waiting in memory. */ BMDPRINTF(("rootconf: calling VFS_MOUNTROOT %s\n", rootfs.bo_fstype)); error = VFS_MOUNTROOT(rootvfs, ROOT_INIT); vfs_unrefvfssw(vsw); rootdev = rootvfs->vfs_dev; if (error) cmn_err(CE_CONT, "Cannot mount root on %s fstype %s\n", rootfs.bo_name, rootfs.bo_fstype); else cmn_err(CE_CONT, "?root on %s fstype %s\n", rootfs.bo_name, rootfs.bo_fstype); return (error); }
int loadrootmodules(void) { struct vfssw *vsw; char *this; char *name; int err; /* ONC_PLUS EXTRACT END */ int i, proplen; extern char *impl_module_list[]; extern char *platform_module_list[]; /* Make sure that the PROM's devinfo tree has been created */ ASSERT(ddi_root_node()); BMDPRINTF(("loadrootmodules: fstype %s\n", rootfs.bo_fstype)); BMDPRINTF(("loadrootmodules: name %s\n", rootfs.bo_name)); BMDPRINTF(("loadrootmodules: flags 0x%x\n", rootfs.bo_flags)); /* * zzz We need to honor what's in rootfs if it's not null. * non-null means use what's there. This way we can * change rootfs with /etc/system AND with tunetool. */ if (root_is_svm) { /* user replaced rootdev, record obp_bootpath */ obp_bootpath[0] = '\0'; (void) getphysdev("root", obp_bootpath, BO_MAXOBJNAME); BMDPRINTF(("loadrootmodules: obp_bootpath %s\n", obp_bootpath)); } else { /* * Get the root fstype and root device path from boot. */ rootfs.bo_fstype[0] = '\0'; rootfs.bo_name[0] = '\0'; } /* * This lookup will result in modloadonly-ing the root * filesystem module - it gets _init-ed in rootconf() */ if ((vsw = getfstype("root", rootfs.bo_fstype, BO_MAXFSNAME)) == NULL) return (ENXIO); /* in case we have no file system types */ (void) strcpy(rootfs.bo_fstype, vsw->vsw_name); vfs_unrefvfssw(vsw); /* * Load the favored drivers of the implementation. * e.g. 'sbus' and possibly 'zs' (even). * * Called whilst boot is still loaded (because boot does * the i/o for us), and DDI services are unavailable. */ BMDPRINTF(("loadrootmodules: impl_module_list\n")); for (i = 0; (this = impl_module_list[i]) != NULL; i++) { if ((err = load_boot_driver(this)) != 0) { cmn_err(CE_WARN, "Cannot load drv/%s", this); return (err); /* NOTREACHED */ } } /* * Now load the platform modules (if any) */ BMDPRINTF(("loadrootmodules: platform_module_list\n")); for (i = 0; (this = platform_module_list[i]) != NULL; i++) { if ((err = load_boot_platform_modules(this)) != 0) { cmn_err(CE_WARN, "Cannot load drv/%s", this); return (err); /* NOTREACHED */ } } loop: (void) getphysdev("root", rootfs.bo_name, BO_MAXOBJNAME); /* * Given a physical pathname, load the correct set of driver * modules into memory, including all possible parents. * * NB: The code sets the variable 'name' for error reporting. */ err = 0; BMDPRINTF(("loadrootmodules: rootfs %s\n", rootfs.bo_name)); if (root_is_svm == 0) { BMDPRINTF(("loadrootmodules: rootfs %s\n", rootfs.bo_name)); name = rootfs.bo_name; err = load_bootpath_drivers(rootfs.bo_name); } /* * Load driver modules in obp_bootpath, this is always * required for mountroot to succeed. obp_bootpath is * is set if rootdev is set via /etc/system, which is * the case if booting of a SVM/VxVM mirror. */ if ((err == 0) && obp_bootpath[0] != '\0') { BMDPRINTF(("loadrootmodules: obp_bootpath %s\n", obp_bootpath)); name = obp_bootpath; err = load_bootpath_drivers(obp_bootpath); } if (err != 0) { cmn_err(CE_CONT, "Cannot load drivers for %s\n", name); goto out; /* NOTREACHED */ } /* * Check to see if the booter performed DHCP configuration * ("bootp-response" boot property exists). If so, then before * bootops disappears we need to save the value of this property * such that the userland dhcpagent can adopt the DHCP management * of our primary network interface. */ proplen = BOP_GETPROPLEN(bootops, "bootp-response"); if (proplen > 0) { dhcack = kmem_zalloc(proplen, KM_SLEEP); if (BOP_GETPROP(bootops, "bootp-response", dhcack) == -1) { cmn_err(CE_WARN, "BOP_GETPROP of " "\"bootp-response\" failed\n"); kmem_free(dhcack, dhcacklen); dhcack = NULL; goto out; } dhcacklen = proplen; /* * Fetch the "netdev-path" boot property (if it exists), and * stash it for later use by sysinfo(SI_DHCP_CACHE, ...). */ proplen = BOP_GETPROPLEN(bootops, "netdev-path"); if (proplen > 0) { netdev_path = kmem_zalloc(proplen, KM_SLEEP); if (BOP_GETPROP(bootops, "netdev-path", (uchar_t *)netdev_path) == -1) { cmn_err(CE_WARN, "BOP_GETPROP of " "\"netdev-path\" failed\n"); kmem_free(netdev_path, proplen); goto out; } } } /* * Preload (load-only, no init) all modules which * were added to the /etc/system file with the * FORCELOAD keyword. */ BMDPRINTF(("loadrootmodules: preload_module\n")); (void) mod_sysctl_type(MOD_FORCELOAD, preload_module, NULL); /* ONC_PLUS EXTRACT START */ /* * If we booted otw then load in the plumbing * routine now while we still can. If we didn't * boot otw then we will load strplumb in main(). * * NFS is actually a set of modules, the core routines, * a diskless helper module, rpcmod, and the tli interface. Load * them now while we still can. * * Because we glomb all versions of nfs into a single module * we check based on the initial string "nfs". * * XXX: A better test for this is to see if device_type * XXX: from the PROM is "network". */ if (strncmp(rootfs.bo_fstype, "nfs", 3) == 0) { ++netboot; if ((err = modload("misc", "tlimod")) < 0) { cmn_err(CE_CONT, "Cannot load misc/tlimod\n"); goto out; /* NOTREACHED */ } if ((err = modload("strmod", "rpcmod")) < 0) { cmn_err(CE_CONT, "Cannot load strmod/rpcmod\n"); goto out; /* NOTREACHED */ } if ((err = modload("misc", "nfs_dlboot")) < 0) { cmn_err(CE_CONT, "Cannot load misc/nfs_dlboot\n"); goto out; /* NOTREACHED */ } if ((err = modload("mac", "mac_ether")) < 0) { cmn_err(CE_CONT, "Cannot load mac/mac_ether\n"); goto out; /* NOTREACHED */ } if ((err = modload("misc", "strplumb")) < 0) { cmn_err(CE_CONT, "Cannot load misc/strplumb\n"); goto out; /* NOTREACHED */ } if ((err = strplumb_load()) < 0) { goto out; /* NOTREACHED */ } } /* * Preload modules needed for booting as a cluster. */ err = clboot_loadrootmodules(); out: if (err != 0 && (boothowto & RB_ASKNAME)) goto loop; return (err); }
static int dummy_close(dev_t dev, int flag, int otyp, cred_t *cred) { cmn_err(CE_NOTE, "Inside dummy_close"); return DDI_SUCCESS; }
static int dummy_write(dev_t dev, struct uio *uiop, cred_t *credp) { cmn_err(CE_NOTE, "Inside dummy_write"); return DDI_SUCCESS; }
static int dummy_open(dev_t *devp, int flag, int otyp, cred_t *cred) { cmn_err(CE_NOTE, "Inside dummy_open"); return DDI_SUCCESS; }
int _fini(void) { cmn_err(CE_NOTE, "Inside _fini"); return(mod_remove(&ml)); }
int _info(struct modinfo *modinfop) { cmn_err(CE_NOTE, "Inside _info"); return(mod_info(&ml, modinfop)); }
/* ARGSUSED */ fct_status_t fcoet_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx) { cmn_err(CE_WARN, "FLOGI requested (not supported)"); return (FCT_FAILURE); }
/* * Load a driver needed to boot. */ static int load_boot_driver(char *drv) { char *drvname; major_t major; #ifdef sparc struct devnames *dnp; ddi_prop_t *propp; char *module; char *dir, *mf; int plen; int mlen; #endif /* sparc */ if ((major = ddi_name_to_major(drv)) == (major_t)-1) { cmn_err(CE_CONT, "%s: no major number\n", drv); return (-1); } /* * resolve aliases */ drvname = ddi_major_to_name(major); #ifdef DEBUG if (strcmp(drv, drvname) == 0) { BMDPRINTF(("load_boot_driver: %s\n", drv)); } else { BMDPRINTF(("load_boot_driver: %s -> %s\n", drv, drvname)); } #endif /* DEBUG */ if (modloadonly("drv", drvname) == -1) { cmn_err(CE_CONT, "%s: cannot load driver\n", drvname); return (-1); } #ifdef sparc /* * NOTE: this can be removed when newboot-sparc is delivered. * * Check to see if the driver had a 'ddi-forceload' global driver.conf * property to identify additional modules that need to be loaded. * The driver still needs to use ddi_modopen() to open these modules, * but the 'ddi-forceload' property allows the modules to be loaded * into memory prior to lights-out, so that driver ddi_modopen() * calls during lights-out (when mounting root) will work correctly. * Use of 'ddi-forceload' is only required for drivers involved in * getting root mounted. */ dnp = &devnamesp[major]; if (dnp->dn_global_prop_ptr && dnp->dn_global_prop_ptr->prop_list && ((propp = i_ddi_prop_search(DDI_DEV_T_ANY, "ddi-forceload", DDI_PROP_TYPE_STRING, &dnp->dn_global_prop_ptr->prop_list)) != NULL)) { module = (char *)propp->prop_val; plen = propp->prop_len; while (plen > 0) { mlen = strlen(module); mf = strrchr(module, '/'); if (mf) { dir = module; *mf++ = '\0'; /* '/' -> '\0' */ } else { dir = "misc"; mf = module; } if (modloadonly(dir, mf) == -1) cmn_err(CE_CONT, "misc/%s: can't load module\n", mf); if (mf != module) *(mf - 1) = '/'; /* '\0' -> '/' */ module += mlen + 1; plen -= mlen + 1; } } #endif /* sparc */ return (0); }
void fcoet_ctl(struct fct_local_port *port, int cmd, void *arg) { stmf_change_status_t st; stmf_state_change_info_t *ssci = (stmf_state_change_info_t *)arg; fcoet_soft_state_t *this_ss = PORT2SS(port); st.st_completion_status = FCT_SUCCESS; st.st_additional_info = NULL; switch (cmd) { case FCT_CMD_PORT_ONLINE: if (this_ss->ss_state == FCT_STATE_ONLINE) st.st_completion_status = STMF_ALREADY; else if (this_ss->ss_state != FCT_STATE_OFFLINE) st.st_completion_status = FCT_FAILURE; if (st.st_completion_status == FCT_SUCCESS) { this_ss->ss_state = FCT_STATE_ONLINING; this_ss->ss_state_not_acked = 1; st.st_completion_status = fcoet_enable_port(this_ss); if (st.st_completion_status != STMF_SUCCESS) { this_ss->ss_state = FCT_STATE_OFFLINE; this_ss->ss_state_not_acked = 0; } else { this_ss->ss_state = FCT_STATE_ONLINE; } } fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st); this_ss->ss_change_state_flags = 0; break; case FCT_CMD_PORT_OFFLINE: if (this_ss->ss_state == FCT_STATE_OFFLINE) { st.st_completion_status = STMF_ALREADY; } else if (this_ss->ss_state != FCT_STATE_ONLINE) { st.st_completion_status = FCT_FAILURE; } if (st.st_completion_status == FCT_SUCCESS) { this_ss->ss_state = FCT_STATE_OFFLINING; this_ss->ss_state_not_acked = 1; this_ss->ss_change_state_flags = ssci->st_rflags; st.st_completion_status = fcoet_disable_port(this_ss); if (st.st_completion_status != STMF_SUCCESS) { this_ss->ss_state = FCT_STATE_ONLINE; this_ss->ss_state_not_acked = 0; } else { this_ss->ss_state = FCT_STATE_OFFLINE; } } /* * Notify the watchdog to do clear work */ mutex_enter(&this_ss->ss_watch_mutex); cv_signal(&this_ss->ss_watch_cv); mutex_exit(&this_ss->ss_watch_mutex); fct_ctl(port->port_lport, FCT_CMD_PORT_OFFLINE_COMPLETE, &st); break; case FCT_ACK_PORT_ONLINE_COMPLETE: this_ss->ss_state_not_acked = 0; break; case FCT_ACK_PORT_OFFLINE_COMPLETE: this_ss->ss_state_not_acked = 0; if (this_ss->ss_change_state_flags & STMF_RFLAG_RESET) { if (fct_port_initialize(port, this_ss->ss_change_state_flags, "fcoet_ctl FCT_ACK_PORT_OFFLINE_COMPLETE " "with RLFLAG_RESET") != FCT_SUCCESS) { cmn_err(CE_WARN, "fcoet_ctl: " "fct_port_initialize %s failed", this_ss->ss_alias); FCOET_LOG("fcoet_ctl: fct_port_initialize " "%s failed", this_ss->ss_alias); } } break; default: FCOET_LOG("fcoet_ctl", "Unsupported cmd %x", cmd); break; } }
/* * For a given path to a boot device, * load that driver and all its parents. */ static int load_bootpath_drivers(char *bootpath) { dev_info_t *dip; char *pathcopy; int pathcopy_len; int rval; char *p; if (bootpath == NULL || *bootpath == 0) return (-1); BMDPRINTF(("load_bootpath_drivers: %s\n", bootpath)); pathcopy = i_ddi_strdup(bootpath, KM_SLEEP); pathcopy_len = strlen(pathcopy) + 1; dip = path_to_devinfo(pathcopy); #if defined(__i386) || defined(__amd64) /* * i386 does not provide stub nodes for all boot devices, * but we should be able to find the node for the parent, * and the leaf of the boot path should be the driver name, * which we go ahead and load here. */ if (dip == NULL) { char *leaf; /* * Find last slash to build the full path to the * parent of the leaf boot device */ p = strrchr(pathcopy, '/'); *p++ = 0; /* * Now isolate the driver name of the leaf device */ leaf = p; p = strchr(leaf, '@'); *p = 0; BMDPRINTF(("load_bootpath_drivers: parent=%s leaf=%s\n", bootpath, leaf)); dip = path_to_devinfo(pathcopy); if (leaf) { rval = load_boot_driver(leaf, NULL); if (rval == -1) { kmem_free(pathcopy, pathcopy_len); return (NULL); } } } #endif if (dip == NULL) { cmn_err(CE_WARN, "can't bind driver for boot path <%s>", bootpath); kmem_free(pathcopy, pathcopy_len); return (NULL); } /* * Load IP over IB driver when netbooting over IB. * As per IB 1275 binding, IP over IB is represented as * service on the top of the HCA node. So, there is no * PROM node and generic framework cannot pre-load * IP over IB driver based on the bootpath. The following * code preloads IP over IB driver when doing netboot over * InfiniBand. */ if (netboot_over_ib(bootpath) && modloadonly("drv", "ibd") == -1) { cmn_err(CE_CONT, "ibd: cannot load platform driver\n"); kmem_free(pathcopy, pathcopy_len); return (NULL); } /* get rid of minor node at end of copy (if not already done above) */ p = strrchr(pathcopy, '/'); if (p) { p = strchr(p, ':'); if (p) *p = 0; } rval = load_parent_drivers(dip, pathcopy); kmem_free(pathcopy, pathcopy_len); return (rval); }
/* * Assign a buffer for the given block. If the appropriate * block is already associated, return it; otherwise search * for the oldest non-busy buffer and reassign it. */ struct buf * getblk_common(void * arg, dev_t dev, daddr_t blkno, long bsize, int errflg) { ufsvfs_t *ufsvfsp = (struct ufsvfs *)arg; struct buf *bp; struct buf *dp; struct buf *nbp = NULL; struct buf *errbp; uint_t index; kmutex_t *hmp; struct hbuf *hp; if (getmajor(dev) >= devcnt) cmn_err(CE_PANIC, "blkdev"); biostats.bio_lookup.value.ui32++; index = bio_bhash(dev, blkno); hp = &hbuf[index]; dp = (struct buf *)hp; hmp = &hp->b_lock; mutex_enter(hmp); loop: for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) { if (bp->b_blkno != blkno || bp->b_edev != dev || (bp->b_flags & B_STALE)) continue; /* * Avoid holding the hash lock in the event that * the buffer is locked by someone. Since the hash chain * may change when we drop the hash lock * we have to start at the beginning of the chain if the * buffer identity/contents aren't valid. */ if (!sema_tryp(&bp->b_sem)) { biostats.bio_bufbusy.value.ui32++; mutex_exit(hmp); /* * OK, we are dealing with a busy buffer. * In the case that we are panicking and we * got called from bread(), we have some chance * for error recovery. So better bail out from * here since sema_p() won't block. If we got * called directly from ufs routines, there is * no way to report an error yet. */ if (panicstr && errflg) goto errout; /* * For the following line of code to work * correctly never kmem_free the buffer "header". */ sema_p(&bp->b_sem); if (bp->b_blkno != blkno || bp->b_edev != dev || (bp->b_flags & B_STALE)) { sema_v(&bp->b_sem); mutex_enter(hmp); goto loop; /* start over */ } mutex_enter(hmp); } /* Found */ biostats.bio_hit.value.ui32++; bp->b_flags &= ~B_AGE; /* * Yank it off the free/delayed write lists */ hp->b_length--; notavail(bp); mutex_exit(hmp); ASSERT((bp->b_flags & B_NOCACHE) == NULL); if (nbp == NULL) { /* * Make the common path short. */ ASSERT(SEMA_HELD(&bp->b_sem)); return (bp); } biostats.bio_bufdup.value.ui32++; /* * The buffer must have entered during the lock upgrade * so free the new buffer we allocated and return the * found buffer. */ kmem_free(nbp->b_un.b_addr, nbp->b_bufsize); nbp->b_un.b_addr = NULL; /* * Account for the memory */ mutex_enter(&bfree_lock); bfreelist.b_bufsize += nbp->b_bufsize; mutex_exit(&bfree_lock); /* * Destroy buf identity, and place on avail list */ nbp->b_dev = (o_dev_t)NODEV; nbp->b_edev = NODEV; nbp->b_flags = 0; nbp->b_file = NULL; nbp->b_offset = -1; sema_v(&nbp->b_sem); bio_bhdr_free(nbp); ASSERT(SEMA_HELD(&bp->b_sem)); return (bp); } /* * bio_getfreeblk may block so check the hash chain again. */ if (nbp == NULL) { mutex_exit(hmp); nbp = bio_getfreeblk(bsize); mutex_enter(hmp); goto loop; } /* * New buffer. Assign nbp and stick it on the hash. */ nbp->b_flags = B_BUSY; nbp->b_edev = dev; nbp->b_dev = (o_dev_t)cmpdev(dev); nbp->b_blkno = blkno; nbp->b_iodone = NULL; nbp->b_bcount = bsize; /* * If we are given a ufsvfsp and the vfs_root field is NULL * then this must be I/O for a superblock. A superblock's * buffer is set up in mountfs() and there is no root vnode * at that point. */ if (ufsvfsp && ufsvfsp->vfs_root) { nbp->b_vp = ufsvfsp->vfs_root; } else { nbp->b_vp = NULL; } ASSERT((nbp->b_flags & B_NOCACHE) == NULL); binshash(nbp, dp); mutex_exit(hmp); ASSERT(SEMA_HELD(&nbp->b_sem)); return (nbp); /* * Come here in case of an internal error. At this point we couldn't * get a buffer, but he have to return one. Hence we allocate some * kind of error reply buffer on the fly. This buffer is marked as * B_NOCACHE | B_AGE | B_ERROR | B_DONE to assure the following: * - B_ERROR will indicate error to the caller. * - B_DONE will prevent us from reading the buffer from * the device. * - B_NOCACHE will cause that this buffer gets free'd in * brelse(). */ errout: errbp = geteblk(); sema_p(&errbp->b_sem); errbp->b_flags &= ~B_BUSY; errbp->b_flags |= (B_ERROR | B_DONE); return (errbp); }
/*ARGSUSED*/ static void sdt_provide_module(void *arg, struct modctl *ctl) { struct module *mp = ctl->mod_mp; char *modname = ctl->mod_modname; int primary, nprobes = 0; sdt_probedesc_t *sdpd; sdt_probe_t *sdp, *old; uint32_t *tab; sdt_provider_t *prov; int len; /* * One for all, and all for one: if we haven't yet registered all of * our providers, we'll refuse to provide anything. */ for (prov = sdt_providers; prov->sdtp_name != NULL; prov++) { if (prov->sdtp_id == DTRACE_PROVNONE) return; } if (mp->sdt_nprobes != 0 || (sdpd = mp->sdt_probes) == NULL) return; kobj_textwin_alloc(mp); /* * Hack to identify unix/genunix/krtld. */ primary = vmem_contains(heap_arena, (void *)ctl, sizeof (struct modctl)) == 0; /* * If there hasn't been an sdt table allocated, we'll do so now. */ if (mp->sdt_tab == NULL) { for (; sdpd != NULL; sdpd = sdpd->sdpd_next) { nprobes++; } /* * We could (should?) determine precisely the size of the * table -- but a reasonable maximum will suffice. */ mp->sdt_size = nprobes * SDT_ENTRY_SIZE; mp->sdt_tab = kobj_texthole_alloc(mp->text, mp->sdt_size); if (mp->sdt_tab == NULL) { cmn_err(CE_WARN, "couldn't allocate SDT table " "for module %s", modname); return; } } tab = (uint32_t *)mp->sdt_tab; for (sdpd = mp->sdt_probes; sdpd != NULL; sdpd = sdpd->sdpd_next) { char *name = sdpd->sdpd_name, *func, *nname; int i, j; sdt_provider_t *prov; ulong_t offs; dtrace_id_t id; for (prov = sdt_providers; prov->sdtp_prefix != NULL; prov++) { char *prefix = prov->sdtp_prefix; if (strncmp(name, prefix, strlen(prefix)) == 0) { name += strlen(prefix); break; } } nname = kmem_alloc(len = strlen(name) + 1, KM_SLEEP); for (i = 0, j = 0; name[j] != '\0'; i++) { if (name[j] == '_' && name[j + 1] == '_') { nname[i] = '-'; j += 2; } else { nname[i] = name[j++]; } } nname[i] = '\0'; sdp = kmem_zalloc(sizeof (sdt_probe_t), KM_SLEEP); sdp->sdp_loadcnt = ctl->mod_loadcnt; sdp->sdp_primary = primary; sdp->sdp_ctl = ctl; sdp->sdp_name = nname; sdp->sdp_namelen = len; sdp->sdp_provider = prov; func = kobj_searchsym(mp, sdpd->sdpd_offset + (uintptr_t)mp->text, &offs); if (func == NULL) func = "<unknown>"; /* * We have our provider. Now create the probe. */ if ((id = dtrace_probe_lookup(prov->sdtp_id, modname, func, nname)) != DTRACE_IDNONE) { old = dtrace_probe_arg(prov->sdtp_id, id); ASSERT(old != NULL); sdp->sdp_next = old->sdp_next; sdp->sdp_id = id; old->sdp_next = sdp; } else { sdp->sdp_id = dtrace_probe_create(prov->sdtp_id, modname, func, nname, 1, sdp); mp->sdt_nprobes++; } sdp->sdp_patchval = SDT_CALL((uintptr_t)mp->text + sdpd->sdpd_offset, tab); sdp->sdp_patchpoint = (uint32_t *)((uintptr_t)mp->textwin + sdpd->sdpd_offset); sdp->sdp_savedval = *sdp->sdp_patchpoint; sdt_initialize(sdp, &tab); } }
/** * Virtio Net Xmit hook. * * @param pvArg Pointer to private data. * @param pMsg Pointer to the message. * * @return Pointer to message not Xmited. */ static mblk_t *VirtioNetXmit(void *pvArg, mblk_t *pMsg) { LogFlowFunc((VIRTIOLOGNAME ":VirtioNetXmit pMsg=%p\n", pMsg)); cmn_err(CE_NOTE, "Xmit pMsg=%p\n", pMsg); PVIRTIODEVICE pDevice = pvArg; virtio_net_t *pNet = pDevice->pvDevice; bool fNotify = false; while (pMsg) { mblk_t *pNextMsg = pMsg->b_next; #if 0 mblk_t *pHdr = allocb(sizeof(virtio_net_header_t), BPRI_HI); if (RT_UNLIKELY(!pHdr)) break; virtio_net_header_t *pNetHdr = pHdr->b_rptr; memset(pNetHdr, 0, sizeof(virtio_net_header_t)); pNetHdr->u8Flags = VIRTIO_NET_GUEST_CSUM; pNetHdr->u16HdrLen = sizeof(virtio_net_header_t); pHdr->b_wptr += sizeof(virtio_net_header_t); pHdr->b_cont = pMsg; #endif virtio_net_txbuf_t *pTxBuf = kmem_cache_alloc(pNet->pTxCache, KM_SLEEP); if (!pTxBuf) break; ddi_dma_cookie_t DmaCookie; uint_t cCookies; int rc = ddi_dma_addr_bind_handle(pTxBuf->hDMA, NULL /* addrspace */, (char *)pMsg->b_rptr, MBLKL(pMsg), DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, 0 /* addr */, &DmaCookie, &cCookies); cmn_err(CE_NOTE, "VirtioNetXmit: MBLKL pMsg=%u\n", MBLKL(pMsg)); if (rc != DDI_DMA_MAPPED) { LogRel((VIRTIOLOGNAME ":VirtioNetXmit failed to map address to DMA handle. rc=%d\n", rc)); kmem_cache_free(pNet->pTxCache, pTxBuf); break; } /** @todo get 'cCookies' slots from the ring. */ for (uint_t i = 0; i < cCookies; i++) { uint16_t fFlags = 0; if (i < cCookies - 1) fFlags |= VIRTIO_FLAGS_RING_DESC_NEXT; rc = VirtioRingPush(pNet->pTxQueue, DmaCookie.dmac_laddress, DmaCookie.dmac_size, fFlags); if (RT_FAILURE(rc)) { LogRel((VIRTIOLOGNAME ":VirtioNetXmit failed. rc=%Rrc\n", rc)); break; } ddi_dma_nextcookie(pTxBuf->hDMA, &DmaCookie); } pMsg = pNextMsg; fNotify = true; if (RT_FAILURE(rc)) { ddi_dma_unbind_handle(pTxBuf->hDMA); break; } } if (fNotify) pDevice->pHyperOps->pfnNotifyQueue(pDevice, pNet->pTxQueue); return pMsg; }
/* PRIVATE, debugging */ int xfs_qm_internalqcheck( xfs_mount_t *mp) { xfs_ino_t lastino; int done, count; int i; xfs_dqtest_t *d, *e; xfs_dqhash_t *h1; int error; lastino = 0; qmtest_hashmask = 32; count = 5; done = 0; qmtest_nfails = 0; if (! XFS_IS_QUOTA_ON(mp)) return XFS_ERROR(ESRCH); xfs_log_force(mp, XFS_LOG_SYNC); XFS_bflush(mp->m_ddev_targp); xfs_log_force(mp, XFS_LOG_SYNC); XFS_bflush(mp->m_ddev_targp); mutex_lock(&qcheck_lock); /* There should be absolutely no quota activity while this is going on. */ qmtest_udqtab = kmem_zalloc(qmtest_hashmask * sizeof(xfs_dqhash_t), KM_SLEEP); qmtest_gdqtab = kmem_zalloc(qmtest_hashmask * sizeof(xfs_dqhash_t), KM_SLEEP); do { /* * Iterate thru all the inodes in the file system, * adjusting the corresponding dquot counters */ if ((error = xfs_bulkstat(mp, &lastino, &count, xfs_qm_internalqcheck_adjust, NULL, 0, NULL, BULKSTAT_FG_IGET, &done))) { break; } } while (! done); if (error) { cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error); } cmn_err(CE_DEBUG, "Checking results against system dquots"); for (i = 0; i < qmtest_hashmask; i++) { h1 = &qmtest_udqtab[i]; for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) { xfs_dqtest_cmp(d); e = (xfs_dqtest_t *) d->HL_NEXT; kmem_free(d); d = e; } h1 = &qmtest_gdqtab[i]; for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) { xfs_dqtest_cmp(d); e = (xfs_dqtest_t *) d->HL_NEXT; kmem_free(d); d = e; } } if (qmtest_nfails) { cmn_err(CE_DEBUG, "******** quotacheck failed ********"); cmn_err(CE_DEBUG, "failures = %d", qmtest_nfails); } else { cmn_err(CE_DEBUG, "******** quotacheck successful! ********"); } kmem_free(qmtest_udqtab); kmem_free(qmtest_gdqtab); mutex_unlock(&qcheck_lock); return (qmtest_nfails); }
void sbd_handle_ats_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd, struct stmf_data_buf *dbuf, uint8_t dbuf_reusable) { uint64_t laddr; uint32_t buflen, iolen, miscompare_off; int ndx; sbd_status_t ret; if (ATOMIC8_GET(scmd->nbufs) > 0) { atomic_dec_8(&scmd->nbufs); } if (dbuf->db_xfer_status != STMF_SUCCESS) { scmd->flags |= SBD_SCSI_CMD_ABORT_REQUESTED; sbd_ats_release_resources(task, scmd); stmf_abort(STMF_QUEUE_TASK_ABORT, task, dbuf->db_xfer_status, NULL); return; } /* if the command is no longer active return */ if (((scmd->flags & SBD_SCSI_CMD_ACTIVE) == 0) || (scmd->trans_data == NULL) || ((scmd->flags & SBD_SCSI_CMD_TRANS_DATA) == 0) || (scmd->nbufs == 0xff)) { cmn_err(CE_NOTE, "sbd_handle_ats_xfer_completion:handled" "unexpected completion"); return; } if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL) { goto ATS_XFER_DONE; } if (ATOMIC32_GET(scmd->len) != 0) { /* * Initiate the next port xfer to occur in parallel * with writing this buf. A side effect of sbd_do_ats_xfer is * it may set scmd_len to 0. This means all the data * transfers have been started, not that they are done. */ sbd_do_ats_xfer(task, scmd, NULL, 0); } laddr = dbuf->db_relative_offset; for (buflen = 0, ndx = 0; (buflen < dbuf->db_data_size) && (ndx < dbuf->db_sglist_length); ndx++) { iolen = min(dbuf->db_data_size - buflen, dbuf->db_sglist[ndx].seg_length); if (iolen == 0) break; bcopy(dbuf->db_sglist[ndx].seg_addr, &scmd->trans_data[laddr], iolen); buflen += iolen; laddr += (uint64_t)iolen; } task->task_nbytes_transferred += buflen; ATS_XFER_DONE: if (ATOMIC32_GET(scmd->len) == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) { stmf_free_dbuf(task, dbuf); /* * if this is not the last buffer to be transfered then exit * and wait for the next buffer. Once nbufs is 0 then all the * data has arrived and the compare can be done. */ if (ATOMIC8_GET(scmd->nbufs) > 0) { return; } if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL) { sbd_ats_release_resources(task, scmd); stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_WRITE_ERROR); } else { ret = sbd_compare_and_write(task, scmd, &miscompare_off); /* * since stmf_scsilib_send_status may result in * the task being released clean up resources before * calling it. */ sbd_ats_release_resources(task, scmd); if (ret != SBD_SUCCESS) { if (ret != SBD_COMPARE_FAILED) { stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_WRITE_ERROR); } else { sbd_send_miscompare_status(task, miscompare_off); } } else { stmf_scsilib_send_status(task, STATUS_GOOD, 0); } } return; } sbd_do_ats_xfer(task, scmd, dbuf, dbuf_reusable); }
/* * Allocate a state structure of size 'size' to be associated * with item 'item'. * * In this implementation, the array is extended to * allow the requested offset, if needed. */ int ddi_soft_state_zalloc(void *state, int item) { struct i_ddi_soft_state *ss; void **array; void *new_element; if ((ss = state) == NULL || item < 0) return (DDI_FAILURE); mutex_enter(&ss->lock); if (ss->size == 0) { mutex_exit(&ss->lock); cmn_err(CE_WARN, "ddi_soft_state_zalloc: bad handle"); return (DDI_FAILURE); } array = ss->array; /* NULL if ss->n_items == 0 */ ASSERT(ss->n_items != 0 && array != NULL); /* * refuse to tread on an existing element */ if (item < ss->n_items && array[item] != NULL) { mutex_exit(&ss->lock); return (DDI_FAILURE); } /* * Allocate a new element to plug in */ new_element = kmem_zalloc(ss->size, KM_SLEEP); /* * Check if the array is big enough, if not, grow it. */ if (item >= ss->n_items) { void **new_array; size_t new_n_items; struct i_ddi_soft_state *dirty; /* * Allocate a new array of the right length, copy * all the old pointers to the new array, then * if it exists at all, put the old array on the * dirty list. * * Note that we can't kmem_free() the old array. * * Why -- well the 'get' operation is 'mutex-free', so we * can't easily catch a suspended thread that is just about * to dereference the array we just grew out of. So we * cons up a header and put it on a list of 'dirty' * pointer arrays. (Dirty in the sense that there may * be suspended threads somewhere that are in the middle * of referencing them). Fortunately, we -can- garbage * collect it all at ddi_soft_state_fini time. */ new_n_items = ss->n_items; while (new_n_items < (1 + item)) new_n_items <<= 1; /* double array size .. */ ASSERT(new_n_items >= (1 + item)); /* sanity check! */ new_array = kmem_zalloc(new_n_items * sizeof (void *), KM_SLEEP); /* * Copy the pointers into the new array */ bcopy(array, new_array, ss->n_items * sizeof (void *)); /* * Save the old array on the dirty list */ dirty = kmem_zalloc(sizeof (*dirty), KM_SLEEP); dirty->array = ss->array; dirty->n_items = ss->n_items; dirty->next = ss->next; ss->next = dirty; ss->array = (array = new_array); ss->n_items = new_n_items; } ASSERT(array != NULL && item < ss->n_items && array[item] == NULL); array[item] = new_element; mutex_exit(&ss->lock); return (DDI_SUCCESS); }
enum clnt_stat rpcbind_getaddr(struct knetconfig *config, rpcprog_t prog, rpcvers_t vers, struct netbuf *addr) { char *ua = NULL; enum clnt_stat status; RPCB parms; struct timeval tmo; CLIENT *client = NULL; k_sigset_t oldmask; k_sigset_t newmask; ushort_t port; int iptype; /* * Call rpcbind (local or remote) to get an address we can use * in an RPC client handle. */ tmo.tv_sec = RPC_PMAP_TIMEOUT; tmo.tv_usec = 0; parms.r_prog = prog; parms.r_vers = vers; parms.r_addr = parms.r_owner = ""; if (strcmp(config->knc_protofmly, NC_INET) == 0) { if (strcmp(config->knc_proto, NC_TCP) == 0) parms.r_netid = "tcp"; else parms.r_netid = "udp"; put_inet_port(addr, htons(PMAPPORT)); } else if (strcmp(config->knc_protofmly, NC_INET6) == 0) { if (strcmp(config->knc_proto, NC_TCP) == 0) parms.r_netid = "tcp6"; else parms.r_netid = "udp6"; put_inet6_port(addr, htons(PMAPPORT)); } else if (strcmp(config->knc_protofmly, NC_LOOPBACK) == 0) { ASSERT(strnrchr(addr->buf, '.', addr->len) != NULL); if (config->knc_semantics == NC_TPI_COTS_ORD) parms.r_netid = "ticotsord"; else if (config->knc_semantics == NC_TPI_COTS) parms.r_netid = "ticots"; else parms.r_netid = "ticlts"; put_loopback_port(addr, "rpc"); } else { status = RPC_UNKNOWNPROTO; goto out; } /* * Mask signals for the duration of the handle creation and * RPC calls. This allows relatively normal operation with a * signal already posted to our thread (e.g., when we are * sending an NLM_CANCEL in response to catching a signal). * * Any further exit paths from this routine must restore * the original signal mask. */ sigfillset(&newmask); sigreplace(&newmask, &oldmask); if (clnt_tli_kcreate(config, addr, RPCBPROG, RPCBVERS, 0, 0, CRED(), &client)) { status = RPC_TLIERROR; sigreplace(&oldmask, (k_sigset_t *)NULL); goto out; } client->cl_nosignal = 1; if ((status = CLNT_CALL(client, RPCBPROC_GETADDR, xdr_rpcb, (char *)&parms, xdr_wrapstring, (char *)&ua, tmo)) != RPC_SUCCESS) { sigreplace(&oldmask, (k_sigset_t *)NULL); goto out; } sigreplace(&oldmask, (k_sigset_t *)NULL); if (ua == NULL || *ua == NULL) { status = RPC_PROGNOTREGISTERED; goto out; } /* * Convert the universal address to the transport address. * Theoretically, we should call the local rpcbind to translate * from the universal address to the transport address, but it gets * complicated (e.g., there's no direct way to tell rpcbind that we * want an IP address instead of a loopback address). Note that * the transport address is potentially host-specific, so we can't * just ask the remote rpcbind, because it might give us the wrong * answer. */ if (strcmp(config->knc_protofmly, NC_INET) == 0) { /* make sure that the ip address is the correct type */ if (rpc_iptype(ua, &iptype) != 0) { status = RPC_UNKNOWNADDR; goto out; } port = rpc_uaddr2port(iptype, ua); put_inet_port(addr, ntohs(port)); } else if (strcmp(config->knc_protofmly, NC_INET6) == 0) { /* make sure that the ip address is the correct type */ if (rpc_iptype(ua, &iptype) != 0) { status = RPC_UNKNOWNADDR; goto out; } port = rpc_uaddr2port(iptype, ua); put_inet6_port(addr, ntohs(port)); } else if (strcmp(config->knc_protofmly, NC_LOOPBACK) == 0) { loopb_u2t(ua, addr); } else { /* "can't happen" - should have been checked for above */ cmn_err(CE_PANIC, "rpcbind_getaddr: bad protocol family"); } out: if (client != NULL) { auth_destroy(client->cl_auth); clnt_destroy(client); } if (ua != NULL) xdr_free(xdr_wrapstring, (char *)&ua); return (status); }
static int /* ERRNO if error, 0 if successful. */ sam_cancel_call( void *arg, /* Pointer to arguments. */ int size, cred_t *credp) { sam_handle_t *fhandle; sam_fserror_arg_t args; sam_mount_t *mp; sam_node_t *ip; /* pointer to rm inode */ int error; /* * Validate and copyin the arguments. */ if (size != sizeof (args) || copyin(arg, (caddr_t)&args, sizeof (args))) { return (EFAULT); } /* * If the mount point is mounted, process cancel request. */ if ((mp = find_mount_point(args.handle.fseq)) == NULL) { return (ECANCELED); } if (secpolicy_fs_config(credp, mp->mi.m_vfsp)) { error = EINVAL; goto cancelerror; } fhandle = (sam_handle_t *)&args.handle; if ((ip = syscall_valid_ino(mp, fhandle)) == NULL) { error = ECANCELED; goto cancelerror; } TRACE(T_SAM_DAEMON_CAN, SAM_ITOV(ip), ip->di.id.ino, ip->rdev, args.ret_err); RW_LOCK_OS(&ip->data_rwl, RW_WRITER); /* Wait until I/O done */ RW_LOCK_OS(&ip->inode_rwl, RW_WRITER); if (ip->rdev && (fhandle->pid == ip->rm_pid)) { ip->rm_err = args.ret_err; RW_UNLOCK_OS(&ip->data_rwl, RW_WRITER); error = sam_unload_rm(ip, FWRITE, 0, 0, credp); if (ip->rm_err == 0) { ip->rm_err = error; } } else { cmn_err(CE_NOTE, "SAM-QFS: %s: sam_cancel_call:" " SC_fscancel error: rdev: %d rm_pid: %d fh_pid: %d", mp->mt.fi_name, (int)ip->rdev, ip->rm_pid, fhandle->pid); error = ECANCELED; RW_UNLOCK_OS(&ip->data_rwl, RW_WRITER); } RW_UNLOCK_OS(&ip->inode_rwl, RW_WRITER); VN_RELE(SAM_ITOV(ip)); /* * Decrement syscall count, the vnode count should be incremented by * now. */ cancelerror: SAM_SYSCALL_DEC(mp, 0); return (error); }
/* * dm2s_attach - Module's attach routine. */ int dm2s_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; dm2s_t *dm2sp; char name[20]; instance = ddi_get_instance(dip); /* Only one instance is supported. */ if (instance != 0) { cmn_err(CE_WARN, "only one instance is supported"); return (DDI_FAILURE); } if (cmd != DDI_ATTACH) { return (DDI_FAILURE); } if (ddi_soft_state_zalloc(dm2s_softstate, instance) != DDI_SUCCESS) { cmn_err(CE_WARN, "softstate allocation failure"); return (DDI_FAILURE); } dm2sp = (dm2s_t *)ddi_get_soft_state(dm2s_softstate, instance); if (dm2sp == NULL) { ddi_soft_state_free(dm2s_softstate, instance); cmn_err(CE_WARN, "softstate allocation failure."); return (DDI_FAILURE); } dm2sp->ms_dip = dip; dm2sp->ms_major = ddi_name_to_major(ddi_get_name(dip)); dm2sp->ms_ppa = instance; /* * Get an interrupt block cookie corresponding to the * interrupt priority of the event handler. * Assert that the event priority is not re-defined to * some higher priority. */ /* LINTED */ ASSERT(SCF_EVENT_PRI == DDI_SOFTINT_LOW); if (ddi_get_soft_iblock_cookie(dip, SCF_EVENT_PRI, &dm2sp->ms_ibcookie) != DDI_SUCCESS) { cmn_err(CE_WARN, "ddi_get_soft_iblock_cookie failed."); goto error; } mutex_init(&dm2sp->ms_lock, NULL, MUTEX_DRIVER, (void *)dm2sp->ms_ibcookie); dm2sp->ms_clean |= DM2S_CLEAN_LOCK; cv_init(&dm2sp->ms_wait, NULL, CV_DRIVER, NULL); dm2sp->ms_clean |= DM2S_CLEAN_CV; (void) sprintf(name, "%s%d", DM2S_MODNAME, instance); if (ddi_create_minor_node(dip, name, S_IFCHR, instance, DDI_PSEUDO, NULL) == DDI_FAILURE) { ddi_remove_minor_node(dip, NULL); cmn_err(CE_WARN, "Device node creation failed."); goto error; } dm2sp->ms_clean |= DM2S_CLEAN_NODE; ddi_set_driver_private(dip, (caddr_t)dm2sp); ddi_report_dev(dip); return (DDI_SUCCESS); error: dm2s_cleanup(dm2sp); return (DDI_FAILURE); }
bool process_connection(struct fsconn *conn) { struct rpc_header_req hdr; union cmd cmd; bool ok = false; size_t i; int ret; XDR xdr; memset(&hdr, 0, sizeof(struct rpc_header_req)); memset(&cmd, 0, sizeof(union cmd)); xdrfd_create(&xdr, conn->fd, XDR_DECODE); if (!xdr_rpc_header_req(&xdr, &hdr)) goto out; ret = -ENOTSUP; for (i = 0; i < ARRAY_LEN(cmdtbl); i++) { const struct cmdtbl *def = &cmdtbl[i]; int ret; if (def->opcode != hdr.opcode) continue; /* * we found the command handler */ cmn_err(CE_DEBUG, "opcode decoded as: %s", def->name); /* fetch arguments */ ok = process_args(&xdr, def, &cmd); if (!ok) { cmn_err(CE_ERROR, "failed to fetch args"); goto out; } /* if login is required, make sure it happened */ if (def->requires_login && !conn->vg) { ret = -EPROTO; cmn_err(CE_ERROR, "must do LOGIN before this operation"); break; } /* invoke the handler */ ret = def->handler(conn, &cmd); /* free the arguments */ xdr_destroy(&xdr); xdrfd_create(&xdr, conn->fd, XDR_FREE); process_args(&xdr, def, &cmd); xdr_destroy(&xdr); /* send back the response header */ xdrfd_create(&xdr, conn->fd, XDR_ENCODE); ok = send_response(&xdr, conn->fd, ret); /* send back the response payload */ if (ok && !ret) { ok = process_returns(&xdr, def, &cmd); /* free the responses */ xdr_destroy(&xdr); xdrfd_create(&xdr, conn->fd, XDR_FREE); process_returns(&xdr, def, &cmd); } goto out; } if (i == ARRAY_LEN(cmdtbl)) cmn_err(CE_DEBUG, "unknown opcode: %u", hdr.opcode); send_response(&xdr, conn->fd, ret); out: xdr_destroy(&xdr); return ok; }
/* * dm2s_event_handler - Mailbox event handler. */ void dm2s_event_handler(scf_event_t event, void *arg) { dm2s_t *dm2sp = (dm2s_t *)arg; queue_t *rq; ASSERT(dm2sp != NULL); mutex_enter(&dm2sp->ms_lock); if (!(dm2sp->ms_state & DM2S_MB_INITED)) { /* * Ignore all events if the state flag indicates that the * mailbox not initialized, this may happen during the close. */ mutex_exit(&dm2sp->ms_lock); DPRINTF(DBG_MBOX, ("Event(0x%X) received - Mailbox not inited\n", event)); return; } switch (event) { case SCF_MB_CONN_OK: /* * Now the mailbox is ready to use, lets wake up * any one waiting for this event. */ dm2sp->ms_state |= DM2S_MB_CONN; cv_broadcast(&dm2sp->ms_wait); DPRINTF(DBG_MBOX, ("Event received = CONN_OK\n")); break; case SCF_MB_MSG_DATA: if (!DM2S_MBOX_READY(dm2sp)) { DPRINTF(DBG_MBOX, ("Event(MSG_DATA) received - Mailbox not READY\n")); break; } /* * A message is available in the mailbox. * Lets enable the read service procedure * to receive this message. */ if (dm2sp->ms_rq != NULL) { qenable(dm2sp->ms_rq); } DPRINTF(DBG_MBOX, ("Event received = MSG_DATA\n")); break; case SCF_MB_SPACE: if (!DM2S_MBOX_READY(dm2sp)) { DPRINTF(DBG_MBOX, ("Event(MB_SPACE) received - Mailbox not READY\n")); break; } /* * Now the mailbox is ready to transmit, lets * schedule the write service procedure. */ if (dm2sp->ms_wq != NULL) { qenable(dm2sp->ms_wq); } DPRINTF(DBG_MBOX, ("Event received = MB_SPACE\n")); break; case SCF_MB_DISC_ERROR: dm2sp->ms_state |= DM2S_MB_DISC; if (dm2sp->ms_state & DM2S_MB_CONN) { /* * If it was previously connected, * then send a hangup message. */ rq = dm2sp->ms_rq; if (rq != NULL) { mutex_exit(&dm2sp->ms_lock); /* * Send a hangup message to indicate * disconnect event. */ (void) putctl(rq, M_HANGUP); DTRACE_PROBE1(dm2s_hangup, dm2s_t, dm2sp); mutex_enter(&dm2sp->ms_lock); } } else { /* * Signal if the open is waiting for a * connection. */ cv_broadcast(&dm2sp->ms_wait); } DPRINTF(DBG_MBOX, ("Event received = DISC_ERROR\n")); break; default: cmn_err(CE_WARN, "Unexpected event received\n"); break; } mutex_exit(&dm2sp->ms_lock); }
/** * Sets IRQ for VMMDev. * * @returns Solaris error code. * @param pDip Pointer to the device info structure. */ static int vgdrvSolarisAddIRQ(dev_info_t *pDip) { LogFlow(("vgdrvSolarisAddIRQ: pDip=%p\n", pDip)); /* Get the types of interrupt supported for this hardware. */ int fIntrType = 0; int rc = ddi_intr_get_supported_types(pDip, &fIntrType); if (rc == DDI_SUCCESS) { /* We only support fixed interrupts at this point, not MSIs. */ if (fIntrType & DDI_INTR_TYPE_FIXED) { /* Verify the number of interrupts supported by this device. There can only be one fixed interrupt. */ int cIntrCount = 0; rc = ddi_intr_get_nintrs(pDip, fIntrType, &cIntrCount); if ( rc == DDI_SUCCESS && cIntrCount == 1) { /* Allocated kernel memory for the interrupt handle. The allocation size is stored internally. */ g_pahIntrs = RTMemAllocZ(cIntrCount * sizeof(ddi_intr_handle_t)); if (g_pahIntrs) { /* Allocate the interrupt for this device and verify the allocation. */ int cIntrAllocated; rc = ddi_intr_alloc(pDip, g_pahIntrs, fIntrType, 0 /* interrupt number */, cIntrCount, &cIntrAllocated, DDI_INTR_ALLOC_NORMAL); if ( rc == DDI_SUCCESS && cIntrAllocated == 1) { /* Get the interrupt priority assigned by the system. */ uint_t uIntrPriority; rc = ddi_intr_get_pri(g_pahIntrs[0], &uIntrPriority); if (rc == DDI_SUCCESS) { /* Check if the interrupt priority is scheduler level or above, if so we need to use a high-level and low-level interrupt handlers with corresponding mutexes. */ cmn_err(CE_CONT, "!vboxguest: uIntrPriority=%d hilevel_pri=%d\n", uIntrPriority, ddi_intr_get_hilevel_pri()); if (uIntrPriority >= ddi_intr_get_hilevel_pri()) { /* Initialize the high-level mutex. */ mutex_init(&g_HighLevelIrqMtx, NULL /* pszDesc */, MUTEX_DRIVER, DDI_INTR_PRI(uIntrPriority)); /* Assign interrupt handler function to the interrupt handle. */ rc = ddi_intr_add_handler(g_pahIntrs[0], (ddi_intr_handler_t *)&vgdrvSolarisHighLevelISR, NULL /* pvArg1 */, NULL /* pvArg2 */); if (rc == DDI_SUCCESS) { /* Add the low-level interrupt handler. */ rc = ddi_intr_add_softint(pDip, &g_hSoftIntr, DDI_INTR_SOFTPRI_MAX, (ddi_intr_handler_t *)&vgdrvSolarisISR, NULL /* pvArg1 */); if (rc == DDI_SUCCESS) { /* Initialize the low-level mutex at the corresponding level. */ mutex_init(&g_IrqMtx, NULL /* pszDesc */, MUTEX_DRIVER, DDI_INTR_PRI(DDI_INTR_SOFTPRI_MAX)); g_fSoftIntRegistered = true; /* Enable the high-level interrupt. */ rc = ddi_intr_enable(g_pahIntrs[0]); if (rc == DDI_SUCCESS) return rc; LogRel((DEVICE_NAME "::AddIRQ: failed to enable interrupt. rc=%d\n", rc)); mutex_destroy(&g_IrqMtx); } else LogRel((DEVICE_NAME "::AddIRQ: failed to add soft interrupt handler. rc=%d\n", rc)); ddi_intr_remove_handler(g_pahIntrs[0]); } else LogRel((DEVICE_NAME "::AddIRQ: failed to add high-level interrupt handler. rc=%d\n", rc)); mutex_destroy(&g_HighLevelIrqMtx); } else { /* Interrupt handler runs at reschedulable level, initialize the mutex at the given priority. */ mutex_init(&g_IrqMtx, NULL /* pszDesc */, MUTEX_DRIVER, DDI_INTR_PRI(uIntrPriority)); /* Assign interrupt handler function to the interrupt handle. */ rc = ddi_intr_add_handler(g_pahIntrs[0], (ddi_intr_handler_t *)vgdrvSolarisISR, NULL /* pvArg1 */, NULL /* pvArg2 */); if (rc == DDI_SUCCESS) { /* Enable the interrupt. */ rc = ddi_intr_enable(g_pahIntrs[0]); if (rc == DDI_SUCCESS) return rc; LogRel((DEVICE_NAME "::AddIRQ: failed to enable interrupt. rc=%d\n", rc)); mutex_destroy(&g_IrqMtx); } } } else LogRel((DEVICE_NAME "::AddIRQ: failed to get priority of interrupt. rc=%d\n", rc)); Assert(cIntrAllocated == 1); ddi_intr_free(g_pahIntrs[0]); } else LogRel((DEVICE_NAME "::AddIRQ: failed to allocated IRQs. count=%d\n", cIntrCount)); RTMemFree(g_pahIntrs); } else LogRel((DEVICE_NAME "::AddIRQ: failed to allocated IRQs. count=%d\n", cIntrCount)); } else LogRel((DEVICE_NAME "::AddIRQ: failed to get or insufficient number of IRQs. rc=%d cIntrCount=%d\n", rc, cIntrCount)); } else LogRel((DEVICE_NAME "::AddIRQ: fixed-type interrupts not supported. IntrType=%#x\n", fIntrType)); } else LogRel((DEVICE_NAME "::AddIRQ: failed to get supported interrupt types. rc=%d\n", rc)); return rc; }