int smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, ushort_t *setup, int setupcnt, struct smb_cred *scred) { int i; int error; bzero(t2p, sizeof (*t2p)); mutex_init(&t2p->t2_lock, NULL, MUTEX_DRIVER, NULL); cv_init(&t2p->t2_cond, NULL, CV_DEFAULT, NULL); t2p->t2_source = source; t2p->t2_setupcount = (u_int16_t)setupcnt; t2p->t2_setupdata = t2p->t2_setup; for (i = 0; i < setupcnt; i++) t2p->t2_setup[i] = setup[i]; t2p->t2_fid = 0xffff; t2p->t2_cred = scred; t2p->t2_share = (source->co_level == SMBL_SHARE ? CPTOSS(source) : NULL); /* for smb up/down */ error = smb_rq_getenv(source, &t2p->t2_vc, NULL); if (error) return (error); return (0); }
int smb_rq_init(struct smb_rq *rqp, struct smb_connobj *co, uchar_t cmd, struct smb_cred *scred) { int error; bzero(rqp, sizeof (*rqp)); mutex_init(&rqp->sr_lock, NULL, MUTEX_DRIVER, NULL); cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL); error = smb_rq_getenv(co, &rqp->sr_vc, &rqp->sr_share); if (error) return (error); /* * We copied a VC pointer (vcp) into rqp->sr_vc, * but we do NOT do a smb_vc_hold here. Instead, * the caller is responsible for the hold on the * share or the VC as needed. For smbfs callers, * the hold is on the share, via the smbfs mount. * For nsmb ioctl callers, the hold is done when * the driver handle gets VC or share references. * This design avoids frequent hold/rele activity * when creating and completing requests. */ rqp->sr_rexmit = SMBMAXRESTARTS; rqp->sr_cred = scred; /* Note: ref hold done by caller. */ rqp->sr_pid = (uint16_t)ddi_get_pid(); error = smb_rq_new(rqp, cmd); return (error); }
int smb_nt_init(struct smb_ntrq *ntp, struct smb_connobj *source, ushort_t fn, struct smb_cred *scred) { int error; bzero(ntp, sizeof (*ntp)); ntp->nt_source = source; ntp->nt_function = fn; ntp->nt_cred = scred; ntp->nt_share = (source->co_level == SMBL_SHARE ? CPTOSS(source) : NULL); /* for smb up/down */ error = smb_rq_getenv(source, &ntp->nt_vc, NULL); if (error) return (error); return (0); }
int smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, u_short setup, struct smb_cred *scred) { int error; bzero(t2p, sizeof(*t2p)); t2p->t2_source = source; t2p->t2_setupcount = 1; t2p->t2_setupdata = t2p->t2_setup; t2p->t2_setup[0] = setup; t2p->t2_fid = 0xffff; t2p->t2_cred = scred; error = smb_rq_getenv(source, &t2p->t2_vc, NULL); if (error) return error; return 0; }
static int smb_rq_getenv(struct smb_connobj *layer, struct smb_vc **vcpp, struct smb_share **sspp) { struct smb_vc *vcp = NULL; struct smb_share *ssp = NULL; struct smb_connobj *cp; int error = 0; switch (layer->co_level) { case SMBL_VC: vcp = CPTOVC(layer); if (layer->co_parent == NULL) { SMBERROR("zombie VC %s\n", vcp->vc_srvname); error = EINVAL; break; } break; case SMBL_SHARE: ssp = CPTOSS(layer); cp = layer->co_parent; if (cp == NULL) { SMBERROR("zombie share %s\n", ssp->ss_name); error = EINVAL; break; } error = smb_rq_getenv(cp, &vcp, NULL); if (error) break; break; default: SMBERROR("invalid layer %d passed\n", layer->co_level); error = EINVAL; } if (vcpp) *vcpp = vcp; if (sspp) *sspp = ssp; return error; }
int smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, u_char cmd, struct smb_cred *scred) { int error; bzero(rqp, sizeof(*rqp)); smb_sl_init(&rqp->sr_slock, "srslock"); error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share); if (error) return error; error = smb_vc_access(rqp->sr_vc, scred, SMBM_EXEC); if (error) return error; if (rqp->sr_share) { error = smb_share_access(rqp->sr_share, scred, SMBM_EXEC); if (error) return error; } rqp->sr_cred = scred; rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc); return smb_rq_new(rqp, cmd); }