STATIC streamscall int ip2xinet_uwput(queue_t *q, mblk_t *mp) { int i; spin_lock(&ip2xinet_lock); switch (mp->b_datap->db_type) { case M_FLUSH: if (mp->b_rptr[0] & FLUSHW) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); qenable(q); mp->b_rptr[0] &= ~FLUSHW; } if (mp->b_rptr[0] & FLUSHR) { if (mp->b_rptr[0] & FLUSHBAND) flushband(RD(q), mp->b_rptr[1], FLUSHDATA); else flushq(RD(q), FLUSHDATA); if (!putq(RD(q), mp)) { mp->b_band = 0; putq(RD(q), mp); } } else freemsg(mp); break; case M_IOCTL: /* Process at least the I_LINK, I_UNLINK */ /* THINKME: Failure to correctly process I_LINK/I_UNLINK while returning correctly a nack to stream head will leave us in a possibly totally screwed up DLPI state from which we have to somehow recover. The possible problematic states are DL_UNBOUND, any DL_PENDING states Note: if we stay in UNATTACHED on I_LINK failure or in IDLE on I_UNLINK failure we're ok as long as the private data structure stuff is consistent with the state */ { struct iocblk *iocp; mblk_t *nmp; dl_attach_req_t *attach; struct linkblk *lp; iocp = (struct iocblk *) mp->b_rptr; #if 0 #ifdef DEBUG pkt_debug(X25DBIOCTL) KPRINTF("%s size %d\n", x25dbiocmsg(iocp->ioc_cmd), x25dbmsgsize(mp)); #endif #endif switch ((unsigned) iocp->ioc_cmd) { case I_LINK: iocp->ioc_error = 0; iocp->ioc_rval = 0; iocp->ioc_count = 0; lp = (struct linkblk *) mp->b_cont->b_rptr; /* Use only one xinet queue for all devices */ ip2xinet_status.lowerq = lp->l_qbot; ip2xinet_status.index = lp->l_index; /* Only one read q to get data from xinet */ ip2xinet_status.readq = RD(q); /* These are dummy ones to indicate the queues are being used */ ip2xinet_status.lowerq->q_ptr = (char *) &ip2xinet_numopen; RD(ip2xinet_status.lowerq)->q_ptr = (char *) &ip2xinet_numopen; if ((nmp = allocb(sizeof(union DL_primitives), BPRI_LO)) == NULL) { iocp->ioc_error = ENOSR; mp->b_datap->db_type = M_IOCNAK; if (!putq(RD(q), mp)) { mp->b_band = 0; putq(RD(q), mp); } spin_unlock(&ip2xinet_lock); printk("pktioctl: I_LINK failed: allocb failed"); return (0); } /* Setup and send an ATTACH */ nmp->b_datap->db_type = M_PROTO; nmp->b_wptr += DL_ATTACH_REQ_SIZE; attach = (dl_attach_req_t *) nmp->b_rptr; attach->dl_primitive = DL_ATTACH_REQ; attach->dl_ppa = ip2xinet_status.myminor; ip2xinet_status.ip2x_dlstate = DL_ATTACH_PENDING; /* experience shows that an I_LINKed queue needs to be enabled so that the service routine will be run. */ qenable(ip2xinet_status.lowerq); if (!putq(ip2xinet_status.lowerq, nmp)) { nmp->b_band = 0; putq(ip2xinet_status.lowerq, nmp); } /* all went well */ mp->b_datap->db_type = M_IOCACK; if (!putq(RD(q), mp)) { mp->b_band = 0; putq(RD(q), mp); } break; case I_UNLINK: { struct linkblk *lp; iocp->ioc_error = 0; iocp->ioc_rval = 0; iocp->ioc_count = 0; lp = (struct linkblk *) mp->b_cont->b_rptr; /* Ignore the DLPI state, the stack is being torn down regardless. */ ip2xinet_status.ip2x_dlstate = UNLINKED; /* can't transmit any more */ for (i = 0; i < NUMIP2XINET; i++) { struct ip2xinet_priv *privptr = &ip2xinet_devs[i].priv; if (privptr->state == 1) netif_stop_queue(&(ip2xinet_devs[i].dev)); } flushq(q, FLUSHALL); flushq(RD(lp->l_qbot), FLUSHALL); ip2xinet_status.readq = NULL; ip2xinet_status.lowerq = NULL; mp->b_datap->db_type = M_IOCACK; if (!putq(RD(q), mp)) { mp->b_band = 0; putq(RD(q), mp); } break; } default: iocp->ioc_error = EINVAL; mp->b_datap->db_type = M_IOCNAK; if (!putq(RD(q), mp)) { mp->b_band = 0; putq(RD(q), mp); } break; } } break; case M_DATA: case M_PCPROTO: case M_PROTO: default: printk("ip2xinet_uwput: unexpected type=0x%x", mp->b_datap->db_type); freemsg(mp); break; } spin_unlock(&ip2xinet_lock); return (0); }
int rdt_connect(struct in_addr dst, int scid, int dcid) { int n, fd; pid_t pid; struct sockaddr_un un; struct in_addr src; struct conn_info conn_info; if (signal(SIGINT, sig_hand) == SIG_ERR || signal(SIGHUP, sig_hand) == SIG_ERR || signal(SIGQUIT, sig_hand) == SIG_ERR) { err_sys("signal() error"); } src = get_addr(dst); pid = getpid(); /* allocate share area for send * and recv process. */ conn_alloc(); conn_info.cact = ACTIVE; conn_info.pid = pid; conn_info.src = conn_user->src = src; conn_info.dst = conn_user->dst = dst; conn_info.scid = conn_user->scid = scid; conn_info.dcid = conn_user->dcid = dcid; conn_user->sndfd = make_fifo(pid, "snd"); conn_user->rcvfd = make_fifo(pid, "rcv"); conn_user->sfd = make_sock(); conn_user->seq = conn_user->ack = 0; if (!mtu) { if (dev[0] == 0 && !get_dev(src, dev)) err_quit("can't get dev name"); mtu = get_mtu(dev); } n = min(mtu, 1500); conn_user->mss = n; if ((conn_user->sndpkt = malloc(n)) == NULL) err_sys("malloc() sndpkt error"); if ((conn_user->rcvpkt = malloc(n)) == NULL) err_sys("malloc() rcvpkt error"); if ((fd = ux_cli(RDT_UX_SOCK, &un)) < 0) err_sys("ux_cli() error"); n = sizeof(struct conn_info); if (sendto(fd, &conn_info, n, 0, (struct sockaddr *)&un, sizeof(struct sockaddr_un)) != n) { err_sys("sendto() error"); } get_pkt(conn_user->sndfd, &conn_info, NULL, 0); conn_user->scid = conn_info.scid; if (rexmt_pkt(conn_user, RDT_REQ, NULL, 0) < 0) err_sys("rexmt_pkt() error"); fprintf(stderr, "rdt_connect() succeed\n"); pkt_debug((struct rdthdr *)conn_user->sndpkt); return(0); }