/* * Handle the first completed incoming connection, assumed to be already * on the socket's so_comp queue. */ static void ng_ksocket_finish_accept(priv_p priv) { struct socket *const head = priv->so; struct socket *so; struct sockaddr *sa = NULL; struct ng_mesg *resp; struct ng_ksocket_accept *resp_data; node_p node; priv_p priv2; int len; int error; ACCEPT_LOCK(); so = TAILQ_FIRST(&head->so_comp); if (so == NULL) { /* Should never happen */ ACCEPT_UNLOCK(); return; } TAILQ_REMOVE(&head->so_comp, so, so_list); head->so_qlen--; so->so_qstate &= ~SQ_COMP; so->so_head = NULL; SOCK_LOCK(so); soref(so); so->so_state |= SS_NBIO; SOCK_UNLOCK(so); ACCEPT_UNLOCK(); /* XXX KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0); */ soaccept(so, &sa); len = OFFSETOF(struct ng_ksocket_accept, addr); if (sa != NULL) len += sa->sa_len; NG_MKMESSAGE(resp, NGM_KSOCKET_COOKIE, NGM_KSOCKET_ACCEPT, len, M_NOWAIT); if (resp == NULL) { soclose(so); goto out; } resp->header.flags |= NGF_RESP; resp->header.token = priv->response_token; /* Clone a ksocket node to wrap the new socket */ error = ng_make_node_common(&ng_ksocket_typestruct, &node); if (error) { free(resp, M_NETGRAPH); soclose(so); goto out; } if (ng_ksocket_constructor(node) != 0) { NG_NODE_UNREF(node); free(resp, M_NETGRAPH); soclose(so); goto out; } priv2 = NG_NODE_PRIVATE(node); priv2->so = so; priv2->flags |= KSF_CLONED | KSF_EMBRYONIC; /* * Insert the cloned node into a list of embryonic children * on the parent node. When a hook is created on the cloned * node it will be removed from this list. When the parent * is destroyed it will destroy any embryonic children it has. */ LIST_INSERT_HEAD(&priv->embryos, priv2, siblings); SOCKBUF_LOCK(&so->so_rcv); soupcall_set(so, SO_RCV, ng_ksocket_incoming, node); SOCKBUF_UNLOCK(&so->so_rcv); SOCKBUF_LOCK(&so->so_snd); soupcall_set(so, SO_SND, ng_ksocket_incoming, node); SOCKBUF_UNLOCK(&so->so_snd); /* Fill in the response data and send it or return it to the caller */ resp_data = (struct ng_ksocket_accept *)resp->data; resp_data->nodeid = NG_NODE_ID(node); if (sa != NULL) bcopy(sa, &resp_data->addr, sa->sa_len); NG_SEND_MSG_ID(error, node, resp, priv->response_addr, 0); out: if (sa != NULL) free(sa, M_SONAME); }
/* * tavor_agent_handle_req() * Context: Called with priority of taskQ thread */ static void tavor_agent_handle_req(void *cb_args) { tavor_agent_handler_arg_t *agent_args; tavor_agent_list_t *curr; tavor_state_t *state; ibmf_handle_t ibmf_handle; ibmf_msg_t *msgp; ibmf_msg_bufs_t *recv_msgbufp; ibmf_msg_bufs_t *send_msgbufp; ibmf_retrans_t retrans; uint_t port; int status; TAVOR_TNF_ENTER(tavor_agent_handle_req); /* Extract the necessary info from the callback args parameter */ agent_args = (tavor_agent_handler_arg_t *)cb_args; ibmf_handle = agent_args->ahd_ibmfhdl; msgp = agent_args->ahd_ibmfmsg; curr = agent_args->ahd_agentlist; state = curr->agl_state; port = curr->agl_port; /* * Set the message send buffer pointers to the message receive buffer * pointers to reuse the IBMF provided buffers for the sender * information. */ recv_msgbufp = &msgp->im_msgbufs_recv; send_msgbufp = &msgp->im_msgbufs_send; bcopy(recv_msgbufp, send_msgbufp, sizeof (ibmf_msg_bufs_t)); /* * Check if the incoming packet is a special "Tavor Trap" MAD. If it * is, then do the special handling. If it isn't, then simply pass it * on to the firmware and forward the response back to the IBMF. * * Note: Tavor has a unique method for handling internally generated * Traps. All internally detected/generated Trap messages are * automatically received by the IBMF (as receive completions on QP0), * which (because all Tavor Trap MADs have SLID == 0) detects it as a * special "Tavor Trap" and forwards it here to the driver's SMA. * It is then our responsibility here to fill in the Trap MAD's DLID * for forwarding to the real Master SM (as programmed in the port's * PortInfo.MasterSMLID field.) */ _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(msgp->im_local_addr)) if (TAVOR_IS_SPECIAL_TRAP_MAD(msgp)) { msgp->im_local_addr.ia_remote_lid = TAVOR_PORT_MASTERSMLID_GET(state, port - 1); } else { /* * Post the command to the firmware (using the MAD_IFC * command). Note: We also reuse the command that was passed * in. We pass the pointer to the original MAD payload as if * it were both the source of the incoming MAD as well as the * destination for the response. This is acceptable and saves * us the step of one additional copy. Note: If this command * fails for any reason other than TAVOR_CMD_BAD_PKT, it * probably indicates a serious problem. */ status = tavor_mad_ifc_cmd_post(state, port, TAVOR_CMD_SLEEP_NOSPIN, (uint32_t *)recv_msgbufp->im_bufs_mad_hdr, (uint32_t *)send_msgbufp->im_bufs_mad_hdr); if (status != TAVOR_CMD_SUCCESS) { if ((status != TAVOR_CMD_BAD_PKT) && (status != TAVOR_CMD_INSUFF_RSRC)) { cmn_err(CE_CONT, "Tavor: MAD_IFC (port %02d) " "command failed: %08x\n", port, status); TNF_PROBE_1(tavor_agent_handle_req_madifc_fail, TAVOR_TNF_ERROR, "", tnf_uint, cmd_status, status); } /* finish cleanup */ goto tavor_agent_handle_req_skip_response; } } /* * If incoming MAD was "TrapRepress", then no response is necessary. * Free the IBMF message and return. */ if (TAVOR_IS_TRAP_REPRESS_MAD(msgp)) { goto tavor_agent_handle_req_skip_response; } /* * Modify the response MAD as necessary (for any special cases). * Specifically, if this MAD was a directed route MAD, then some * additional packet manipulation may be necessary because the Tavor * firmware does not do all the required steps to respond to the * MAD. */ tavor_agent_mad_resp_handling(state, msgp, port); /* * Send response (or forwarded "Trap" MAD) back to IBMF. We use the * "response callback" to indicate when it is appropriate (later) to * free the IBMF msg. */ status = ibmf_msg_transport(ibmf_handle, IBMF_QP_HANDLE_DEFAULT, msgp, &retrans, tavor_agent_response_cb, state, 0); if (status != IBMF_SUCCESS) { TNF_PROBE_1(tavor_ibmf_send_msg_fail, TAVOR_TNF_ERROR, "", tnf_uint, ibmf_status, status); goto tavor_agent_handle_req_skip_response; } /* Free up the callback args parameter */ kmem_free(agent_args, sizeof (tavor_agent_handler_arg_t)); TAVOR_TNF_EXIT(tavor_agent_handle_req); return; tavor_agent_handle_req_skip_response: /* Free up the ibmf message */ status = ibmf_free_msg(ibmf_handle, &msgp); if (status != IBMF_SUCCESS) { TNF_PROBE_1(tavor_agent_handle_req_ibmf_free_msg_fail, TAVOR_TNF_ERROR, "", tnf_uint, ibmf_status, status); } /* Free up the callback args parameter */ kmem_free(agent_args, sizeof (tavor_agent_handler_arg_t)); TAVOR_TNF_EXIT(tavor_agent_handle_req); }
/* * Receive a control message */ static int ng_ksocket_rcvmsg(node_p node, item_p item, hook_p lasthook) { struct thread *td = curthread; /* XXX broken */ const priv_p priv = NG_NODE_PRIVATE(node); struct socket *const so = priv->so; struct ng_mesg *resp = NULL; int error = 0; struct ng_mesg *msg; ng_ID_t raddr; NGI_GET_MSG(item, msg); switch (msg->header.typecookie) { case NGM_KSOCKET_COOKIE: switch (msg->header.cmd) { case NGM_KSOCKET_BIND: { struct sockaddr *const sa = (struct sockaddr *)msg->data; /* Sanity check */ if (msg->header.arglen < SADATA_OFFSET || msg->header.arglen < sa->sa_len) ERROUT(EINVAL); if (so == NULL) ERROUT(ENXIO); /* Bind */ error = sobind(so, sa, td); break; } case NGM_KSOCKET_LISTEN: { /* Sanity check */ if (msg->header.arglen != sizeof(int32_t)) ERROUT(EINVAL); if (so == NULL) ERROUT(ENXIO); /* Listen */ error = solisten(so, *((int32_t *)msg->data), td); break; } case NGM_KSOCKET_ACCEPT: { /* Sanity check */ if (msg->header.arglen != 0) ERROUT(EINVAL); if (so == NULL) ERROUT(ENXIO); /* Make sure the socket is capable of accepting */ if (!(so->so_options & SO_ACCEPTCONN)) ERROUT(EINVAL); if (priv->flags & KSF_ACCEPTING) ERROUT(EALREADY); error = ng_ksocket_check_accept(priv); if (error != 0 && error != EWOULDBLOCK) ERROUT(error); /* * If a connection is already complete, take it. * Otherwise let the upcall function deal with * the connection when it comes in. */ priv->response_token = msg->header.token; raddr = priv->response_addr = NGI_RETADDR(item); if (error == 0) { ng_ksocket_finish_accept(priv); } else priv->flags |= KSF_ACCEPTING; break; } case NGM_KSOCKET_CONNECT: { struct sockaddr *const sa = (struct sockaddr *)msg->data; /* Sanity check */ if (msg->header.arglen < SADATA_OFFSET || msg->header.arglen < sa->sa_len) ERROUT(EINVAL); if (so == NULL) ERROUT(ENXIO); /* Do connect */ if ((so->so_state & SS_ISCONNECTING) != 0) ERROUT(EALREADY); if ((error = soconnect(so, sa, td)) != 0) { so->so_state &= ~SS_ISCONNECTING; ERROUT(error); } if ((so->so_state & SS_ISCONNECTING) != 0) { /* We will notify the sender when we connect */ priv->response_token = msg->header.token; raddr = priv->response_addr = NGI_RETADDR(item); priv->flags |= KSF_CONNECTING; ERROUT(EINPROGRESS); } break; } case NGM_KSOCKET_GETNAME: case NGM_KSOCKET_GETPEERNAME: { int (*func)(struct socket *so, struct sockaddr **nam); struct sockaddr *sa = NULL; int len; /* Sanity check */ if (msg->header.arglen != 0) ERROUT(EINVAL); if (so == NULL) ERROUT(ENXIO); /* Get function */ if (msg->header.cmd == NGM_KSOCKET_GETPEERNAME) { if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) ERROUT(ENOTCONN); func = so->so_proto->pr_usrreqs->pru_peeraddr; } else func = so->so_proto->pr_usrreqs->pru_sockaddr; /* Get local or peer address */ if ((error = (*func)(so, &sa)) != 0) goto bail; len = (sa == NULL) ? 0 : sa->sa_len; /* Send it back in a response */ NG_MKRESPONSE(resp, msg, len, M_NOWAIT); if (resp == NULL) { error = ENOMEM; goto bail; } bcopy(sa, resp->data, len); bail: /* Cleanup */ if (sa != NULL) free(sa, M_SONAME); break; } case NGM_KSOCKET_GETOPT: { struct ng_ksocket_sockopt *ksopt = (struct ng_ksocket_sockopt *)msg->data; struct sockopt sopt; /* Sanity check */ if (msg->header.arglen != sizeof(*ksopt)) ERROUT(EINVAL); if (so == NULL) ERROUT(ENXIO); /* Get response with room for option value */ NG_MKRESPONSE(resp, msg, sizeof(*ksopt) + NG_KSOCKET_MAX_OPTLEN, M_NOWAIT); if (resp == NULL) ERROUT(ENOMEM); /* Get socket option, and put value in the response */ sopt.sopt_dir = SOPT_GET; sopt.sopt_level = ksopt->level; sopt.sopt_name = ksopt->name; sopt.sopt_td = NULL; sopt.sopt_valsize = NG_KSOCKET_MAX_OPTLEN; ksopt = (struct ng_ksocket_sockopt *)resp->data; sopt.sopt_val = ksopt->value; if ((error = sogetopt(so, &sopt)) != 0) { NG_FREE_MSG(resp); break; } /* Set actual value length */ resp->header.arglen = sizeof(*ksopt) + sopt.sopt_valsize; break; } case NGM_KSOCKET_SETOPT: { struct ng_ksocket_sockopt *const ksopt = (struct ng_ksocket_sockopt *)msg->data; const int valsize = msg->header.arglen - sizeof(*ksopt); struct sockopt sopt; /* Sanity check */ if (valsize < 0) ERROUT(EINVAL); if (so == NULL) ERROUT(ENXIO); /* Set socket option */ sopt.sopt_dir = SOPT_SET; sopt.sopt_level = ksopt->level; sopt.sopt_name = ksopt->name; sopt.sopt_val = ksopt->value; sopt.sopt_valsize = valsize; sopt.sopt_td = NULL; error = sosetopt(so, &sopt); break; } default: error = EINVAL; break; } break; default: error = EINVAL; break; } done: NG_RESPOND_MSG(error, node, item, resp); NG_FREE_MSG(msg); return (error); }
int udf_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) { struct buf *bp; struct vnode *devvp; struct udf_mnt *udfmp; struct thread *td; struct vnode *vp; struct udf_node *unode; struct file_entry *fe; int error, sector, size; error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); /* * We must promote to an exclusive lock for vnode creation. This * can happen if lookup is passed LOCKSHARED. */ if ((flags & LK_TYPE_MASK) == LK_SHARED) { flags &= ~LK_TYPE_MASK; flags |= LK_EXCLUSIVE; } /* * We do not lock vnode creation as it is believed to be too * expensive for such rare case as simultaneous creation of vnode * for same ino by different processes. We just allow them to race * and check later to decide who wins. Let the race begin! */ td = curthread; udfmp = VFSTOUDFFS(mp); unode = uma_zalloc(udf_zone_node, M_WAITOK | M_ZERO); if ((error = udf_allocv(mp, &vp, td))) { printf("Error from udf_allocv\n"); uma_zfree(udf_zone_node, unode); return (error); } unode->i_vnode = vp; unode->hash_id = ino; unode->udfmp = udfmp; vp->v_data = unode; lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); error = insmntque(vp, mp); if (error != 0) { uma_zfree(udf_zone_node, unode); return (error); } error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); /* * Copy in the file entry. Per the spec, the size can only be 1 block. */ sector = ino + udfmp->part_start; devvp = udfmp->im_devvp; if ((error = RDSECTOR(devvp, sector, udfmp->bsize, &bp)) != 0) { printf("Cannot read sector %d\n", sector); vgone(vp); vput(vp); brelse(bp); *vpp = NULL; return (error); } fe = (struct file_entry *)bp->b_data; if (udf_checktag(&fe->tag, TAGID_FENTRY)) { printf("Invalid file entry!\n"); vgone(vp); vput(vp); brelse(bp); *vpp = NULL; return (ENOMEM); } size = UDF_FENTRY_SIZE + le32toh(fe->l_ea) + le32toh(fe->l_ad); unode->fentry = malloc(size, M_UDFFENTRY, M_NOWAIT | M_ZERO); if (unode->fentry == NULL) { printf("Cannot allocate file entry block\n"); vgone(vp); vput(vp); brelse(bp); *vpp = NULL; return (ENOMEM); } bcopy(bp->b_data, unode->fentry, size); brelse(bp); bp = NULL; switch (unode->fentry->icbtag.file_type) { default: vp->v_type = VBAD; break; case 4: vp->v_type = VDIR; break; case 5: vp->v_type = VREG; break; case 6: vp->v_type = VBLK; break; case 7: vp->v_type = VCHR; break; case 9: vp->v_type = VFIFO; vp->v_op = &udf_fifoops; break; case 10: vp->v_type = VSOCK; break; case 12: vp->v_type = VLNK; break; } if (vp->v_type != VFIFO) VN_LOCK_ASHARE(vp); if (ino == udf_getid(&udfmp->root_icb)) vp->v_vflag |= VV_ROOT; *vpp = vp; return (0); }
/* * ARCnet output routine. * Encapsulate a packet of type family for the local net. * Assumes that ifp is actually pointer to arccom structure. */ int arc_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct route *ro) { struct arc_header *ah; int error; u_int8_t atype, adst; int loop_copy = 0; int isphds; #if defined(INET) || defined(INET6) struct llentry *lle; #endif if (!((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))) return(ENETDOWN); /* m, m1 aren't initialized yet */ error = 0; switch (dst->sa_family) { #ifdef INET case AF_INET: /* * For now, use the simple IP addr -> ARCnet addr mapping */ if (m->m_flags & (M_BCAST|M_MCAST)) adst = arcbroadcastaddr; /* ARCnet broadcast address */ else if (ifp->if_flags & IFF_NOARP) adst = ntohl(SIN(dst)->sin_addr.s_addr) & 0xFF; else { error = arpresolve(ifp, ro ? ro->ro_rt : NULL, m, dst, &adst, &lle); if (error) return (error == EWOULDBLOCK ? 0 : error); } atype = (ifp->if_flags & IFF_LINK0) ? ARCTYPE_IP_OLD : ARCTYPE_IP; break; case AF_ARP: { struct arphdr *ah; ah = mtod(m, struct arphdr *); ah->ar_hrd = htons(ARPHRD_ARCNET); loop_copy = -1; /* if this is for us, don't do it */ switch(ntohs(ah->ar_op)) { case ARPOP_REVREQUEST: case ARPOP_REVREPLY: atype = ARCTYPE_REVARP; break; case ARPOP_REQUEST: case ARPOP_REPLY: default: atype = ARCTYPE_ARP; break; } if (m->m_flags & M_BCAST) bcopy(ifp->if_broadcastaddr, &adst, ARC_ADDR_LEN); else bcopy(ar_tha(ah), &adst, ARC_ADDR_LEN); } break; #endif #ifdef INET6 case AF_INET6: error = nd6_storelladdr(ifp, m, dst, (u_char *)&adst, &lle); if (error) return (error); atype = ARCTYPE_INET6; break; #endif #ifdef IPX case AF_IPX: adst = SIPX(dst)->sipx_addr.x_host.c_host[5]; atype = ARCTYPE_IPX; if (adst == 0xff) adst = arcbroadcastaddr; break; #endif case AF_UNSPEC: loop_copy = -1; ah = (struct arc_header *)dst->sa_data; adst = ah->arc_dhost; atype = ah->arc_type; if (atype == ARCTYPE_ARP) { atype = (ifp->if_flags & IFF_LINK0) ? ARCTYPE_ARP_OLD: ARCTYPE_ARP; #ifdef ARCNET_ALLOW_BROKEN_ARP /* * XXX It's not clear per RFC826 if this is needed, but * "assigned numbers" say this is wrong. * However, e.g., AmiTCP 3.0Beta used it... we make this * switchable for emergency cases. Not perfect, but... */ if (ifp->if_flags & IFF_LINK2) mtod(m, struct arphdr *)->ar_pro = atype - 1; #endif } break; default: if_printf(ifp, "can't handle af%d\n", dst->sa_family); senderr(EAFNOSUPPORT); } isphds = arc_isphds(atype); M_PREPEND(m, isphds ? ARC_HDRNEWLEN : ARC_HDRLEN, M_DONTWAIT); if (m == 0) senderr(ENOBUFS); ah = mtod(m, struct arc_header *); ah->arc_type = atype; ah->arc_dhost = adst; ah->arc_shost = ARC_LLADDR(ifp); if (isphds) { ah->arc_flag = 0; ah->arc_seqid = 0; } if ((ifp->if_flags & IFF_SIMPLEX) && (loop_copy != -1)) { if ((m->m_flags & M_BCAST) || (loop_copy > 0)) { struct mbuf *n = m_copy(m, 0, (int)M_COPYALL); (void) if_simloop(ifp, n, dst->sa_family, ARC_HDRLEN); } else if (ah->arc_dhost == ah->arc_shost) { (void) if_simloop(ifp, m, dst->sa_family, ARC_HDRLEN); return (0); /* XXX */ } } BPF_MTAP(ifp, m); error = ifp->if_transmit(ifp, m); return (error); bad: if (m) m_freem(m); return (error); }
/* * function: format4 * * export flows in wire format */ int format4(struct ftio *ftio, struct options *opt) { struct ftver ftv; struct ftencode fte; char *rec; int ret; /* initialize encode struct */ ftencode_init(&fte, 0); /* copy version from io stream */ ftio_get_ver(ftio, &ftv); bcopy(&ftv, &fte.ver, sizeof ftv); /* foreach flow */ while ((rec = ftio_read(ftio))) { retry: ret = fts3rec_pdu_encode(&fte, rec); /* ret == 0 then send and clear out buffer * ret > 0 then encode another * ret < 0 then this encoding failed, send and clear out buffer */ if (ret <= 0) { /* convert pdu to network byte order */ #if BYTE_ORDER == LITTLE_ENDIAN ftpdu_swap(fte.buf_enc, BYTE_ORDER); #endif /* BYTE_ORDER == LITTLE_ENDIAN */ if (fwrite(&fte.buf, fte.buf_size, 1, stdout) != 1) fterr_err(1, "fwrite()"); /* reset encode buffer */ ftencode_reset(&fte); /* if ret < 0 then the current record was not encoded */ if (ret < 0) goto retry; } ++opt->records; } /* any left over? */ if (fte.buf_size) { /* convert pdu to network byte order */ ftpdu_swap(fte.buf_enc, BYTE_ORDER); if (fwrite(&fte.buf, fte.buf_size, 1, stdout) != 1) fterr_err(1, "fwrite()"); } /* fte.buf_size */ return 0; } /* format4 */
/* * given an at_ifaddr,a sockaddr_at and an ifp, * bang them all together at high speed and see what happens */ static int at_ifinit(struct ifnet *ifp, struct at_ifaddr *aa, struct sockaddr_at *sat) { struct netrange nr, onr; struct sockaddr_at oldaddr; int error = 0, i, j; int netinc, nodeinc, nnets; u_short net; crit_enter(); /* * save the old addresses in the at_ifaddr just in case we need them. */ oldaddr = aa->aa_addr; onr.nr_firstnet = aa->aa_firstnet; onr.nr_lastnet = aa->aa_lastnet; /* * take the address supplied as an argument, and add it to the * at_ifnet (also given). Remember ing to update * those parts of the at_ifaddr that need special processing */ bzero( AA_SAT( aa ), sizeof( struct sockaddr_at )); bcopy( sat->sat_zero, &nr, sizeof( struct netrange )); bcopy( sat->sat_zero, AA_SAT( aa )->sat_zero, sizeof( struct netrange )); nnets = ntohs( nr.nr_lastnet ) - ntohs( nr.nr_firstnet ) + 1; aa->aa_firstnet = nr.nr_firstnet; aa->aa_lastnet = nr.nr_lastnet; /* XXX ALC */ #if 0 kprintf("at_ifinit: %s: %u.%u range %u-%u phase %d\n", ifp->if_name, ntohs(sat->sat_addr.s_net), sat->sat_addr.s_node, ntohs(aa->aa_firstnet), ntohs(aa->aa_lastnet), (aa->aa_flags & AFA_PHASE2) ? 2 : 1); #endif /* * We could eliminate the need for a second phase 1 probe (post * autoconf) if we check whether we're resetting the node. Note * that phase 1 probes use only nodes, not net.node pairs. Under * phase 2, both the net and node must be the same. */ if ( ifp->if_flags & IFF_LOOPBACK ) { AA_SAT( aa )->sat_len = sat->sat_len; AA_SAT( aa )->sat_family = AF_APPLETALK; AA_SAT( aa )->sat_addr.s_net = sat->sat_addr.s_net; AA_SAT( aa )->sat_addr.s_node = sat->sat_addr.s_node; #if 0 } else if ( fp->if_flags & IFF_POINTOPOINT) { /* unimplemented */ /* * we'd have to copy the dstaddr field over from the sat * but it's not clear that it would contain the right info.. */ #endif } else { /* * We are a normal (probably ethernet) interface. * apply the new address to the interface structures etc. * We will probe this address on the net first, before * applying it to ensure that it is free.. If it is not, then * we will try a number of other randomly generated addresses * in this net and then increment the net. etc.etc. until * we find an unused address. */ aa->aa_flags |= AFA_PROBING; /* if not loopback we Must probe? */ AA_SAT( aa )->sat_len = sizeof(struct sockaddr_at); AA_SAT( aa )->sat_family = AF_APPLETALK; if ( aa->aa_flags & AFA_PHASE2 ) { if ( sat->sat_addr.s_net == ATADDR_ANYNET ) { /* * If we are phase 2, and the net was not specified * then we select a random net within the supplied netrange. * XXX use /dev/random? */ if ( nnets != 1 ) { net = ntohs( nr.nr_firstnet ) + time_second % ( nnets - 1 ); } else { net = ntohs( nr.nr_firstnet ); } } else { /* * if a net was supplied, then check that it is within * the netrange. If it is not then replace the old values * and return an error */ if ( ntohs( sat->sat_addr.s_net ) < ntohs( nr.nr_firstnet ) || ntohs( sat->sat_addr.s_net ) > ntohs( nr.nr_lastnet )) { aa->aa_addr = oldaddr; aa->aa_firstnet = onr.nr_firstnet; aa->aa_lastnet = onr.nr_lastnet; crit_exit(); return( EINVAL ); } /* * otherwise just use the new net number.. */ net = ntohs( sat->sat_addr.s_net ); } } else { /* * we must be phase one, so just use whatever we were given. * I guess it really isn't going to be used... RIGHT? */ net = ntohs( sat->sat_addr.s_net ); } /* * set the node part of the address into the ifaddr. * If it's not specified, be random about it... * XXX use /dev/random? */ if ( sat->sat_addr.s_node == ATADDR_ANYNODE ) { AA_SAT( aa )->sat_addr.s_node = time_second; } else { AA_SAT( aa )->sat_addr.s_node = sat->sat_addr.s_node; } /* * Copy the phase. */ AA_SAT( aa )->sat_range.r_netrange.nr_phase = ((aa->aa_flags & AFA_PHASE2) ? 2:1); /* * step through the nets in the range * starting at the (possibly random) start point. */ for ( i = nnets, netinc = 1; i > 0; net = ntohs( nr.nr_firstnet ) + (( net - ntohs( nr.nr_firstnet ) + netinc ) % nnets ), i-- ) { AA_SAT( aa )->sat_addr.s_net = htons( net ); /* * using a rather strange stepping method, * stagger through the possible node addresses * Once again, starting at the (possibly random) * initial node address. */ for ( j = 0, nodeinc = time_second | 1; j < 256; j++, AA_SAT( aa )->sat_addr.s_node += nodeinc ) { if ( AA_SAT( aa )->sat_addr.s_node > 253 || AA_SAT( aa )->sat_addr.s_node < 1 ) { continue; } aa->aa_probcnt = 10; /* * start off the probes as an asynchronous activity. * though why wait 200mSec? */ callout_reset(&aa->aa_ch, hz / 5, aarpprobe, ifp); if ( tsleep( aa, PCATCH, "at_ifinit", 0 )) { /* * theoretically we shouldn't time out here * so if we returned with an error.. */ kprintf( "at_ifinit: why did this happen?!\n" ); aa->aa_addr = oldaddr; aa->aa_firstnet = onr.nr_firstnet; aa->aa_lastnet = onr.nr_lastnet; crit_exit(); return( EINTR ); } /* * The async activity should have woken us up. * We need to see if it was successful in finding * a free spot, or if we need to iterate to the next * address to try. */ if (( aa->aa_flags & AFA_PROBING ) == 0 ) { break; } } /* * of course we need to break out through two loops... */ if (( aa->aa_flags & AFA_PROBING ) == 0 ) { break; } /* reset node for next network */ AA_SAT( aa )->sat_addr.s_node = time_second; } /* * if we are still trying to probe, then we have finished all * the possible addresses, so we need to give up */ if ( aa->aa_flags & AFA_PROBING ) { aa->aa_addr = oldaddr; aa->aa_firstnet = onr.nr_firstnet; aa->aa_lastnet = onr.nr_lastnet; crit_exit(); return( EADDRINUSE ); } } /* * Now that we have selected an address, we need to tell the interface * about it, just in case it needs to adjust something. */ ifnet_serialize_all(ifp); if (ifp->if_ioctl && (error = ifp->if_ioctl(ifp, SIOCSIFADDR, (caddr_t)aa, NULL)) ) { /* * of course this could mean that it objects violently * so if it does, we back out again.. */ aa->aa_addr = oldaddr; aa->aa_firstnet = onr.nr_firstnet; aa->aa_lastnet = onr.nr_lastnet; ifnet_deserialize_all(ifp); crit_exit(); return( error ); } ifnet_deserialize_all(ifp); /* * set up the netmask part of the at_ifaddr * and point the appropriate pointer in the ifaddr to it. * probably pointless, but what the heck.. XXX */ bzero(&aa->aa_netmask, sizeof(aa->aa_netmask)); aa->aa_netmask.sat_len = sizeof(struct sockaddr_at); aa->aa_netmask.sat_family = AF_APPLETALK; aa->aa_netmask.sat_addr.s_net = 0xffff; aa->aa_netmask.sat_addr.s_node = 0; aa->aa_ifa.ifa_netmask =(struct sockaddr *) &(aa->aa_netmask); /* XXX */ /* * Initialize broadcast (or remote p2p) address */ bzero(&aa->aa_broadaddr, sizeof(aa->aa_broadaddr)); aa->aa_broadaddr.sat_len = sizeof(struct sockaddr_at); aa->aa_broadaddr.sat_family = AF_APPLETALK; aa->aa_ifa.ifa_metric = ifp->if_metric; if (ifp->if_flags & IFF_BROADCAST) { aa->aa_broadaddr.sat_addr.s_net = htons(0); aa->aa_broadaddr.sat_addr.s_node = 0xff; aa->aa_ifa.ifa_broadaddr = (struct sockaddr *) &aa->aa_broadaddr; /* add the range of routes needed */ error = aa_dorangeroute(&aa->aa_ifa, ntohs(aa->aa_firstnet), ntohs(aa->aa_lastnet), RTM_ADD ); } else if (ifp->if_flags & IFF_POINTOPOINT) { struct at_addr rtaddr, rtmask; bzero(&rtaddr, sizeof(rtaddr)); bzero(&rtmask, sizeof(rtmask)); /* fill in the far end if we know it here XXX */ aa->aa_ifa.ifa_dstaddr = (struct sockaddr *) &aa->aa_dstaddr; error = aa_addsingleroute(&aa->aa_ifa, &rtaddr, &rtmask); } else if ( ifp->if_flags & IFF_LOOPBACK ) { struct at_addr rtaddr, rtmask; bzero(&rtaddr, sizeof(rtaddr)); bzero(&rtmask, sizeof(rtmask)); rtaddr.s_net = AA_SAT( aa )->sat_addr.s_net; rtaddr.s_node = AA_SAT( aa )->sat_addr.s_node; rtmask.s_net = 0xffff; rtmask.s_node = 0x0; /* XXX should not be so.. should be HOST route */ error = aa_addsingleroute(&aa->aa_ifa, &rtaddr, &rtmask); } /* * set the address of our "check if this addr is ours" routine. */ aa->aa_ifa.ifa_claim_addr = aa_claim_addr; /* * of course if we can't add these routes we back out, but it's getting * risky by now XXX */ if ( error ) { at_scrub( ifp, aa ); aa->aa_addr = oldaddr; aa->aa_firstnet = onr.nr_firstnet; aa->aa_lastnet = onr.nr_lastnet; crit_exit(); return( error ); } /* * note that the address has a route associated with it.... */ aa->aa_ifa.ifa_flags |= IFA_ROUTE; aa->aa_flags |= AFA_ROUTE; crit_exit(); return( 0 ); }
/** * mps_config_get_bios_pg3 - obtain BIOS page 3 * @sc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mps_config_get_bios_pg3(struct mps_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage3_t *config_page) { MPI2_CONFIG_REQUEST *request; MPI2_CONFIG_REPLY *reply; struct mps_command *cm; Mpi2BiosPage3_t *page = NULL; int error = 0; u16 ioc_status; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); if ((cm = mps_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER; request->Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; request->Header.PageNumber = 3; request->Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; error = mps_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for header completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: header read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } /* We have to do free and alloc for the reply-free and reply-post * counters to match - Need to review the reply FIFO handling. */ mps_free_command(sc, cm); if ((cm = mps_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; request->Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; request->Header.PageNumber = 3; request->Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION; request->Header.PageLength = mpi_reply->Header.PageLength; cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4; cm->cm_sge = &request->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; page = malloc(cm->cm_length, M_MPT2, M_ZERO | M_NOWAIT); if (!page) { printf("%s: page alloc failed\n", __func__); error = ENOMEM; goto out; } cm->cm_data = page; error = mps_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for page completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: page read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } bcopy(page, config_page, MIN(cm->cm_length, sizeof(Mpi2BiosPage3_t))); out: free(page, M_MPT2); if (cm) mps_free_command(sc, cm); return (error); }
int rcmd( char **ahost, int rport, const char *locuser, const char *remuser, const char *cmd, int *fd2p ) { struct hostent *hp; struct sockaddr_in sin, from; fd_set reads; #ifndef __rtems__ long oldmask; #endif pid_t pid; int s, lport, timo; char c; pid = getpid(); hp = gethostbyname(*ahost); if (hp == NULL) { herror(*ahost); return (-1); } *ahost = hp->h_name; #ifndef __rtems__ oldmask = sigblock(sigmask(SIGURG)); #endif for (timo = 1, lport = IPPORT_RESERVED - 1;;) { s = rresvport(&lport); if (s < 0) { if (errno == EAGAIN) (void)fprintf(stderr, "rcmd: socket: All ports in use\n"); else (void)fprintf(stderr, "rcmd: socket: %s\n", strerror(errno)); #ifndef __rtems__ sigsetmask(oldmask); #endif return (-1); } fcntl(s, F_SETOWN, pid); bzero(&sin, sizeof sin); sin.sin_len = sizeof(struct sockaddr_in); sin.sin_family = hp->h_addrtype; sin.sin_port = rport; bcopy(hp->h_addr_list[0], &sin.sin_addr, MIN(hp->h_length, sizeof sin.sin_addr)); if (connect(s, (struct sockaddr *)&sin, sizeof(sin)) >= 0) break; (void)close(s); if (errno == EADDRINUSE) { lport--; continue; } if (errno == ECONNREFUSED && timo <= 16) { (void)sleep(timo); timo *= 2; continue; } if (hp->h_addr_list[1] != NULL) { int oerrno = errno; (void)fprintf(stderr, "connect to address %s: ", inet_ntoa(sin.sin_addr)); errno = oerrno; perror(0); hp->h_addr_list++; bcopy(hp->h_addr_list[0], &sin.sin_addr, MIN(hp->h_length, sizeof sin.sin_addr)); (void)fprintf(stderr, "Trying %s...\n", inet_ntoa(sin.sin_addr)); continue; } (void)fprintf(stderr, "%s: %s\n", hp->h_name, strerror(errno)); #ifndef __rtems__ sigsetmask(oldmask); #endif return (-1); } lport--; if (fd2p == 0) { write(s, "", 1); lport = 0; } else { char num[8]; int s2 = rresvport(&lport), s3; socklen_t len = sizeof(from); int nfds; if (s2 < 0) goto bad; listen(s2, 1); (void)snprintf(num, sizeof(num), "%d", lport); if (write(s, num, strlen(num)+1) != strlen(num)+1) { (void)fprintf(stderr, "rcmd: write (setting up stderr): %s\n", strerror(errno)); (void)close(s2); goto bad; } nfds = max(s, s2)+1; if(nfds > FD_SETSIZE) { fprintf(stderr, "rcmd: too many files\n"); (void)close(s2); goto bad; } again: FD_ZERO(&reads); FD_SET(s, &reads); FD_SET(s2, &reads); errno = 0; if (select(nfds, &reads, 0, 0, 0) < 1 || !FD_ISSET(s2, &reads)){ if (errno != 0) (void)fprintf(stderr, "rcmd: select (setting up stderr): %s\n", strerror(errno)); else (void)fprintf(stderr, "select: protocol failure in circuit setup\n"); (void)close(s2); goto bad; } s3 = accept(s2, (struct sockaddr *)&from, &len); /* * XXX careful for ftp bounce attacks. If discovered, shut them * down and check for the real auxiliary channel to connect. */ if (from.sin_family == AF_INET && from.sin_port == htons(20)) { close(s3); goto again; } (void)close(s2); if (s3 < 0) { (void)fprintf(stderr, "rcmd: accept: %s\n", strerror(errno)); lport = 0; goto bad; } *fd2p = s3; from.sin_port = ntohs((u_short)from.sin_port); if (from.sin_family != AF_INET || from.sin_port >= IPPORT_RESERVED || from.sin_port < IPPORT_RESERVED / 2) { (void)fprintf(stderr, "socket: protocol failure in circuit setup.\n"); goto bad2; } } (void)write(s, locuser, strlen(locuser)+1); (void)write(s, remuser, strlen(remuser)+1); (void)write(s, cmd, strlen(cmd)+1); if (read(s, &c, 1) != 1) { (void)fprintf(stderr, "rcmd: %s: %s\n", *ahost, strerror(errno)); goto bad2; } if (c != 0) { while (read(s, &c, 1) == 1) { (void)write(STDERR_FILENO, &c, 1); if (c == '\n') break; } goto bad2; } #ifndef __rtems__ sigsetmask(oldmask); #endif return (s); bad2: if (lport) (void)close(*fd2p); bad: (void)close(s); #ifndef __rtems__ sigsetmask(oldmask); #endif return (-1); }
/** * mps_config_get_man_pg10 - obtain Manufacturing Page 10 data and set flags * accordingly. Currently, this page does not need to return to caller. * @sc: per adapter object * @mpi_reply: reply mf payload returned from firmware * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mps_config_get_man_pg10(struct mps_softc *sc, Mpi2ConfigReply_t *mpi_reply) { MPI2_CONFIG_REQUEST *request; MPI2_CONFIG_REPLY *reply; struct mps_command *cm; pMpi2ManufacturingPagePS_t page = NULL; uint32_t *pPS_info; uint8_t OEM_Value = 0; int error = 0; u16 ioc_status; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); if ((cm = mps_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER; request->Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; request->Header.PageNumber = 10; request->Header.PageVersion = MPI2_MANUFACTURING10_PAGEVERSION; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; /* * This page must be polled because the IOC isn't ready yet when this * page is needed. */ error = mps_wait_command(sc, cm, 60, 0); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: poll for header completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: header read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } /* We have to do free and alloc for the reply-free and reply-post * counters to match - Need to review the reply FIFO handling. */ mps_free_command(sc, cm); if ((cm = mps_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; request->Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; request->Header.PageNumber = 10; request->Header.PageVersion = MPI2_MANUFACTURING10_PAGEVERSION; request->Header.PageLength = mpi_reply->Header.PageLength; cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4; cm->cm_sge = &request->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; page = malloc(MPS_MAN_PAGE10_SIZE, M_MPT2, M_ZERO | M_NOWAIT); if (!page) { printf("%s: page alloc failed\n", __func__); error = ENOMEM; goto out; } cm->cm_data = page; /* * This page must be polled because the IOC isn't ready yet when this * page is needed. */ error = mps_wait_command(sc, cm, 60, 0); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: poll for page completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: page read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } /* * If OEM ID is unknown, fail the request. */ sc->WD_hide_expose = MPS_WD_HIDE_ALWAYS; OEM_Value = (uint8_t)(page->ProductSpecificInfo & 0x000000FF); if (OEM_Value != MPS_WD_LSI_OEM) { mps_dprint(sc, MPS_FAULT, "Unknown OEM value for WarpDrive " "(0x%x)\n", OEM_Value); error = ENXIO; goto out; } /* * Set the phys disks hide/expose value. */ pPS_info = &page->ProductSpecificInfo; sc->WD_hide_expose = (uint8_t)(pPS_info[5]); sc->WD_hide_expose &= MPS_WD_HIDE_EXPOSE_MASK; if ((sc->WD_hide_expose != MPS_WD_HIDE_ALWAYS) && (sc->WD_hide_expose != MPS_WD_EXPOSE_ALWAYS) && (sc->WD_hide_expose != MPS_WD_HIDE_IF_VOLUME)) { mps_dprint(sc, MPS_FAULT, "Unknown value for WarpDrive " "hide/expose: 0x%x\n", sc->WD_hide_expose); error = ENXIO; goto out; } out: free(page, M_MPT2); if (cm) mps_free_command(sc, cm); return (error); }
int mps_config_set_dpm_pg0(struct mps_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2DriverMappingPage0_t *config_page, u16 entry_idx) { MPI2_CONFIG_REQUEST *request; MPI2_CONFIG_REPLY *reply; struct mps_command *cm; MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 *page = NULL; int error = 0; u16 ioc_status; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); if ((cm = mps_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER; request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING; request->Header.PageNumber = 0; request->Header.PageVersion = MPI2_DRIVERMAPPING0_PAGEVERSION; /* We can remove below two lines ????*/ request->PageAddress = 1 << MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT; request->PageAddress |= htole16(entry_idx); cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; error = mps_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request for header completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: header read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } /* We have to do free and alloc for the reply-free and reply-post * counters to match - Need to review the reply FIFO handling. */ mps_free_command(sc, cm); if ((cm = mps_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; request->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; request->ExtPageType = MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING; request->Header.PageNumber = 0; request->Header.PageVersion = MPI2_DRIVERMAPPING0_PAGEVERSION; request->ExtPageLength = mpi_reply->ExtPageLength; request->PageAddress = 1 << MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT; request->PageAddress |= htole16(entry_idx); cm->cm_length = le16toh(mpi_reply->ExtPageLength) * 4; cm->cm_sge = &request->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAOUT; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; page = malloc(cm->cm_length, M_MPT2, M_ZERO | M_NOWAIT); if (!page) { printf("%s: page alloc failed\n", __func__); error = ENOMEM; goto out; } bcopy(config_page, page, MIN(cm->cm_length, (sizeof(Mpi2DriverMappingPage0_t)))); cm->cm_data = page; error = mps_wait_command(sc, cm, 60, CAN_SLEEP); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: request to write page completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* * If the request returns an error then we need to do a diag * reset */ printf("%s: page written with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } out: free(page, M_MPT2); if (cm) mps_free_command(sc, cm); return (error); }
/** * mps_config_get_raid_volume_pg0 - obtain raid volume page 0 * @sc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page * @page_address: form and handle value used to get page * Context: sleep. * * Returns 0 for success, non-zero for failure. */ int mps_config_get_raid_volume_pg0(struct mps_softc *sc, Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 page_address) { MPI2_CONFIG_REQUEST *request; MPI2_CONFIG_REPLY *reply; struct mps_command *cm; Mpi2RaidVolPage0_t *page = NULL; int error = 0; u16 ioc_status; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); if ((cm = mps_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_HEADER; request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; request->Header.PageNumber = 0; request->Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; /* * This page must be polled because the IOC isn't ready yet when this * page is needed. */ error = mps_wait_command(sc, cm, 60, 0); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: poll for header completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: header read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } /* We have to do free and alloc for the reply-free and reply-post * counters to match - Need to review the reply FIFO handling. */ mps_free_command(sc, cm); if ((cm = mps_alloc_command(sc)) == NULL) { printf("%s: command alloc failed @ line %d\n", __func__, __LINE__); error = EBUSY; goto out; } request = (MPI2_CONFIG_REQUEST *)cm->cm_req; bzero(request, sizeof(MPI2_CONFIG_REQUEST)); request->Function = MPI2_FUNCTION_CONFIG; request->Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; request->Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; request->Header.PageNumber = 0; request->Header.PageLength = mpi_reply->Header.PageLength; request->Header.PageVersion = mpi_reply->Header.PageVersion; request->PageAddress = page_address; cm->cm_length = le16toh(mpi_reply->Header.PageLength) * 4; cm->cm_sge = &request->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; page = malloc(cm->cm_length, M_MPT2, M_ZERO | M_NOWAIT); if (!page) { printf("%s: page alloc failed\n", __func__); error = ENOMEM; goto out; } cm->cm_data = page; /* * This page must be polled because the IOC isn't ready yet when this * page is needed. */ error = mps_wait_command(sc, cm, 60, 0); reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (error || (reply == NULL)) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: poll for page completed with error %d", __func__, error); error = ENXIO; goto out; } ioc_status = le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK; bcopy(reply, mpi_reply, sizeof(MPI2_CONFIG_REPLY)); if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { /* FIXME */ /* If the poll returns error then we need to do diag reset */ printf("%s: page read with error; iocstatus = 0x%x\n", __func__, ioc_status); error = ENXIO; goto out; } bcopy(page, config_page, cm->cm_length); out: free(page, M_MPT2); if (cm) mps_free_command(sc, cm); return (error); }
int scsi_ioc_cmd(struct scsi_link *link, scsireq_t *screq) { struct scsi_xfer *xs; int err = 0; if (screq->cmdlen > sizeof(struct scsi_generic)) return (EFAULT); if (screq->datalen > MAXPHYS) return (EINVAL); xs = scsi_xs_get(link, 0); if (xs == NULL) return (ENOMEM); memcpy(xs->cmd, screq->cmd, screq->cmdlen); xs->cmdlen = screq->cmdlen; if (screq->datalen > 0) { xs->data = dma_alloc(screq->datalen, PR_WAITOK | PR_ZERO); if (xs->data == NULL) { err = ENOMEM; goto err; } xs->datalen = screq->datalen; } if (screq->flags & SCCMD_READ) xs->flags |= SCSI_DATA_IN; if (screq->flags & SCCMD_WRITE) { if (screq->datalen > 0) { err = copyin(screq->databuf, xs->data, screq->datalen); if (err != 0) goto err; } xs->flags |= SCSI_DATA_OUT; } xs->flags |= SCSI_SILENT; /* User is responsible for errors. */ xs->timeout = screq->timeout; xs->retries = 0; /* user must do the retries *//* ignored */ scsi_xs_sync(xs); screq->retsts = 0; screq->status = xs->status; switch (xs->error) { case XS_NOERROR: /* probably rubbish */ screq->datalen_used = xs->datalen - xs->resid; screq->retsts = SCCMD_OK; break; case XS_SENSE: #ifdef SCSIDEBUG scsi_sense_print_debug(xs); #endif screq->senselen_used = min(sizeof(xs->sense), sizeof(screq->sense)); bcopy(&xs->sense, screq->sense, screq->senselen_used); screq->retsts = SCCMD_SENSE; break; case XS_SHORTSENSE: #ifdef SCSIDEBUG scsi_sense_print_debug(xs); #endif printf("XS_SHORTSENSE\n"); screq->senselen_used = min(sizeof(xs->sense), sizeof(screq->sense)); bcopy(&xs->sense, screq->sense, screq->senselen_used); screq->retsts = SCCMD_UNKNOWN; break; case XS_DRIVER_STUFFUP: screq->retsts = SCCMD_UNKNOWN; break; case XS_TIMEOUT: screq->retsts = SCCMD_TIMEOUT; break; case XS_BUSY: screq->retsts = SCCMD_BUSY; break; default: screq->retsts = SCCMD_UNKNOWN; break; } if (screq->datalen > 0 && screq->flags & SCCMD_READ) { err = copyout(xs->data, screq->databuf, screq->datalen); if (err != 0) goto err; } err: if (xs->data) dma_free(xs->data, screq->datalen); scsi_xs_put(xs); return (err); }
static int inetport(struct Listener* listener) { struct SOCKADDR_IN port_sin; int fd; int opt = 1; /* * At first, open a new socket */ fd = socket(AFINET, SOCK_STREAM, 0); if (-1 == fd) { report_error("opening listener socket %s:%s", get_listener_name(listener), errno); return 0; } else if ((HARD_FDLIMIT - 10) < fd) { report_error("no more connections left for listener %s:%s", get_listener_name(listener), errno); CLOSE(fd); return 0; } /* * XXX - we don't want to do all this crap for a listener * set_sock_opts(listener); */ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char*) &opt, sizeof(opt))) { report_error("setting SO_REUSEADDR for listener %s:%s", get_listener_name(listener), errno); CLOSE(fd); return 0; } /* * Bind a port to listen for new connections if port is non-null, * else assume it is already open and try get something from it. */ memset(&port_sin, 0, sizeof(port_sin)); port_sin.SIN_FAMILY = AFINET; port_sin.SIN_PORT = htons(listener->port); #ifdef __CYGWIN__ port_sin.sin_addr = listener->addr; if (INADDR_ANY != listener->addr.S_ADDR) { strncpy_irc(listener->vhost, inetntoa((char *)&listener->addr), HOSTLEN); listener->name = listener->vhost; } #else #ifdef IPV6 bcopy((const char*)listener->addr.S_ADDR, (char*)port_sin.SIN_ADDR.S_ADDR, sizeof(struct IN_ADDR)); if ( bcmp((char*)listener->addr.S_ADDR, &INADDRANY, sizeof(struct IN_ADDR)) == 0 ) #else port_sin.sin_addr = listener->addr; if (INADDRANY != listener->addr.s_addr) #endif { struct addrinfo *ans; int ret; char port[5]; char tmp[HOSTLEN]; /* * XXX - blocking call to getaddrinfo */ sprintf( port, "%d", listener->port); #ifdef IPV6 inetntop(AFINET, &listener->addr, tmp, HOSTLEN); #else inet_ntop(AF_INET, &listener->addr, tmp, HOSTLEN); #endif ret = getaddrinfo(tmp, port, NULL, &ans ); if( ret == 0 && ans->ai_canonname) strncpy_irc(listener->vhost, ans->ai_canonname, HOSTLEN); } #endif if (bind(fd, (struct sockaddr*) &port_sin, sizeof(port_sin))) { report_error("binding listener socket %s:%s", get_listener_name(listener), errno); CLOSE(fd); return 0; } if (listen(fd, HYBRID_SOMAXCONN)) { report_error("listen failed for %s:%s", get_listener_name(listener), errno); CLOSE(fd); return 0; } /* * XXX - this should always work, performance will suck if it doesn't */ if (!set_non_blocking(fd)) report_error(NONB_ERROR_MSG, get_listener_name(listener), errno); listener->fd = fd; return 1; }
static int hkdf_sha512_expand(uint8_t *extract_key, uint8_t *info, uint_t info_len, uint8_t *out_buf, uint_t out_len) { int ret; crypto_mechanism_t mech; crypto_context_t ctx; crypto_key_t key; crypto_data_t T_cd, info_cd, c_cd; uint_t i, T_len = 0, pos = 0; uint8_t c; uint_t N = (out_len + SHA512_DIGEST_LENGTH) / SHA512_DIGEST_LENGTH; uint8_t T[SHA512_DIGEST_LENGTH]; if (N > 255) return (SET_ERROR(EINVAL)); /* initialize HMAC mechanism */ mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC); mech.cm_param = NULL; mech.cm_param_len = 0; /* initialize the salt as a crypto key */ key.ck_format = CRYPTO_KEY_RAW; key.ck_length = CRYPTO_BYTES2BITS(SHA512_DIGEST_LENGTH); key.ck_data = extract_key; /* initialize crypto data for the input and output data */ T_cd.cd_format = CRYPTO_DATA_RAW; T_cd.cd_offset = 0; T_cd.cd_raw.iov_base = (char *)T; c_cd.cd_format = CRYPTO_DATA_RAW; c_cd.cd_offset = 0; c_cd.cd_length = 1; c_cd.cd_raw.iov_base = (char *)&c; c_cd.cd_raw.iov_len = c_cd.cd_length; info_cd.cd_format = CRYPTO_DATA_RAW; info_cd.cd_offset = 0; info_cd.cd_length = info_len; info_cd.cd_raw.iov_base = (char *)info; info_cd.cd_raw.iov_len = info_cd.cd_length; for (i = 1; i <= N; i++) { c = i; T_cd.cd_length = T_len; T_cd.cd_raw.iov_len = T_cd.cd_length; ret = crypto_mac_init(&mech, &key, NULL, &ctx, NULL); if (ret != CRYPTO_SUCCESS) return (SET_ERROR(EIO)); ret = crypto_mac_update(ctx, &T_cd, NULL); if (ret != CRYPTO_SUCCESS) return (SET_ERROR(EIO)); ret = crypto_mac_update(ctx, &info_cd, NULL); if (ret != CRYPTO_SUCCESS) return (SET_ERROR(EIO)); ret = crypto_mac_update(ctx, &c_cd, NULL); if (ret != CRYPTO_SUCCESS) return (SET_ERROR(EIO)); T_len = SHA512_DIGEST_LENGTH; T_cd.cd_length = T_len; T_cd.cd_raw.iov_len = T_cd.cd_length; ret = crypto_mac_final(ctx, &T_cd, NULL); if (ret != CRYPTO_SUCCESS) return (SET_ERROR(EIO)); bcopy(T, out_buf + pos, (i != N) ? SHA512_DIGEST_LENGTH : (out_len - pos)); pos += SHA512_DIGEST_LENGTH; } return (0); }
int le_poll(struct iodesc *desc, void *pkt, int len) { #if 0 struct netif *nif = desc->io_netif; int unit = nif->nif_unit; #else int unit = 0; #endif struct le_softc *sc = &le_softc[unit]; int length; volatile struct mds *cdm; int stat; #ifdef LE_DEBUG if (/*le_debug*/0) printf("le%d: le_poll called. next_rd=%d\n", unit, sc->sc_next_rd); #endif stat = lerdcsr(sc, 0); lewrcsr(sc, 0, stat & (LE_BABL | LE_MISS | LE_MERR | LE_RINT)); cdm = &sc->sc_rd[sc->sc_next_rd]; if (cdm->flags & LE_OWN) return 0; #ifdef LE_DEBUG if (le_debug) { printf("next_rd %d\n", sc->sc_next_rd); printf("cdm->flags %x\n", cdm->flags); printf("cdm->bcnt %x, cdm->mcnt %x\n", cdm->bcnt, cdm->mcnt); printf("cdm->rbuf msg %d buf %d\n", cdm->mcnt, -cdm->bcnt ); } #endif if (stat & (LE_BABL | LE_CERR | LE_MISS | LE_MERR)) le_error(unit, "le_poll", stat); if (cdm->flags & (LE_FRAM | LE_OFLO | LE_CRC | LE_RBUFF)) { printf("le%d_poll: rmd status 0x%x\n", unit, cdm->flags); length = 0; goto cleanup; } if ((cdm->flags & (LE_STP|LE_ENP)) != (LE_STP|LE_ENP)) panic("le_poll: chained packet"); length = cdm->mcnt; #ifdef LE_DEBUG if (le_debug) printf("le_poll: length %d\n", length); #endif if (length >= BUFSIZE) { length = 0; panic("csr0 when bad things happen: %x", stat); goto cleanup; } if (!length) goto cleanup; length -= 4; if (length > 0) { /* * If the length of the packet is greater than the size of the * buffer, we have to truncate it, to avoid Bad Things. * XXX Is this the right thing to do? */ if (length > len) length = len; bcopy(sc->sc_rbuf + (BUFSIZE * sc->sc_next_rd), pkt, length); } cleanup: cdm->mcnt = 0; cdm->flags |= LE_OWN; if (++sc->sc_next_rd >= NRBUF) sc->sc_next_rd = 0; #ifdef LE_DEBUG if (le_debug) printf("new next_rd %d\n", sc->sc_next_rd); #endif return length; }
/* * function: format1 * * export flows in pcap format. Hack to use tcpdump's packet matcher */ int format1(struct ftio *ftio, struct options *opt) { struct timeval now; struct timezone tz; struct fts3rec_all cur; struct fts3rec_offsets fo; struct ftver ftv; struct pcap_file_header pfh; struct pcap_packet_header pph; struct pcap_data1 pd1; struct pcap_data2 pd2; struct pcap_data3 pd3; struct pcap_data4 pd4; int bsize, bsize2; long thiszone; char buf[1024]; char *rec; if (ftio_check_xfield(ftio, FT_XFIELD_TOS | FT_XFIELD_PROT | FT_XFIELD_SRCADDR | FT_XFIELD_DSTADDR | FT_XFIELD_SRCPORT | FT_XFIELD_DSTPORT | FT_XFIELD_UNIX_SECS | FT_XFIELD_UNIX_NSECS | FT_XFIELD_DPKTS | FT_XFIELD_DOCTETS)) { fterr_warnx("Flow record missing required field for format."); return -1; } ftio_get_ver(ftio, &ftv); fts3rec_compute_offsets(&fo, &ftv); if (gettimeofday(&now, &tz) < 0) { fterr_warnx("gettimeofday() failed"); return -1; } bzero(&pfh, sizeof pfh); bzero(&pph, sizeof pph); bzero(&pd1, sizeof pd1); /* ethernet */ bzero(&pd2, sizeof pd2); /* IP */ bzero(&pd3, sizeof pd3); /* TCP */ bzero(&pd4, sizeof pd4); /* UDP */ bsize = 0; pfh.magic = TCPDUMP_MAGIC; pfh.version_major = TCPDUMP_VERSION_MAJOR; pfh.version_minor = TCPDUMP_VERSION_MINOR; pfh.sigfigs = 6; pfh.snaplen = 65535; pfh.linktype = 1; if (fwrite(&pfh, sizeof pfh, 1, stdout) != 1) { fterr_warnx("pcap header write failed"); return -1; } pd1.eth_prot = 0x0008; pd2.version = 0x45; /* note: bcopy of pph to buf is deferred (so lengths can be adjusted below) */ bsize += sizeof pph; bcopy(&pd1, buf+bsize, sizeof pd1); bsize += sizeof pd1; while ((rec = ftio_read(ftio))) { cur.unix_secs = (uint32_t *)(rec+fo.unix_secs); cur.unix_nsecs = (uint32_t *)(rec+fo.unix_nsecs); cur.srcport = ((uint16_t*)(rec+fo.srcport)); cur.dstport = ((uint16_t*)(rec+fo.dstport)); cur.prot = ((uint8_t*)(rec+fo.prot)); cur.tos = ((uint8_t*)(rec+fo.tos)); cur.srcaddr = ((uint32_t*)(rec+fo.srcaddr)); cur.dstaddr = ((uint32_t*)(rec+fo.dstaddr)); pd2.version = 4 << 4 | ((sizeof pd2) / 4); pd2.tos = *cur.tos; pd2.prot = *cur.prot; pd2.srcaddr = *cur.srcaddr; pd2.dstaddr = *cur.dstaddr; pd2.len = sizeof pd2; /* this might be adjusted below for TCP, UDP, etc. */ pph.caplen = sizeof pd1 + sizeof pd2; /* ether and IP header */ #if BYTE_ORDER == LITTLE_ENDIAN SWAPINT32(pd2.srcaddr); SWAPINT32(pd2.dstaddr); #endif /* LITTLE_ENDIAN */ #if 1 /* { */ switch (pd2.prot) { case 6: /* TCP */ pd3.srcport = *cur.srcport; pd3.dstport = *cur.dstport; #if BYTE_ORDER == LITTLE_ENDIAN SWAPINT16(pd3.srcport); SWAPINT16(pd3.dstport); #endif /* LITTLE_ENDIAN */ /* set data offset to 6 32-bit words: */ pd3.data_reserved_flags_window = 0x60000000; #if BYTE_ORDER == LITTLE_ENDIAN SWAPINT32(pd3.data_reserved_flags_window); #endif /* LITTLE_ENDIAN */ pph.caplen += sizeof pd3; /* + TCP header */ pd2.len += sizeof pd3; bcopy(&pd3, buf+bsize+sizeof pd2, sizeof pd3); bsize2 = bsize + sizeof pd2 + sizeof pd3; break; case 17: /* UDP */ pd4.srcport = *cur.srcport; pd4.dstport = *cur.dstport; #if BYTE_ORDER == LITTLE_ENDIAN SWAPINT16(pd4.srcport); SWAPINT16(pd4.dstport); #endif /* LITTLE_ENDIAN */ pph.caplen += sizeof pd4; /* + UDP header */ pd2.len += sizeof pd4; pd4.len = sizeof pd4; /* FIXME */ #if BYTE_ORDER == LITTLE_ENDIAN SWAPINT16(pd4.len); #endif /* LITTLE_ENDIAN */ bcopy(&pd4, buf+bsize+sizeof pd2, sizeof pd4); bsize2 = bsize + sizeof pd2 + sizeof pd4; break; case 1: /* FIXME - handle ICMP specially */ default: /* handle others IP protocols */ bsize2 = bsize + sizeof pd2; break; } /* switch */ #else /* }{ */ bsize2 = bsize + sizeof pd2; #endif /* } */ pph.ts.tv_sec = *(uint32_t *)(rec+fo.unix_secs); pph.ts.tv_usec = *(uint32_t *)(rec+fo.unix_nsecs) / 1000; pph.len = pph.caplen; /* FIXME */ bcopy(&pph, buf, sizeof pph); #if BYTE_ORDER == LITTLE_ENDIAN SWAPINT16(pd2.len); #endif /* LITTLE_ENDIAN */ bcopy(&pd2, buf+bsize, sizeof pd2); { /* FIXME - add a loop here to write one record per packet (rather than * one per flow). change the packet size to be * floor(average_pkt_size), then add one byte to the first or * last flow, if necessary, to make the byte count correct. */ /* uint16_t dPkts; */ /* uint16_t dOctets; */ if (fwrite(&buf, bsize2, 1, stdout) != 1) { fterr_warnx("pcap pkt write failed"); return -1; } } ++opt->records; } /* while */ return 0; } /* format1 */
int le_put(struct iodesc *desc, void *pkt, size_t len) { #if 0 struct netif *nif = desc->io_netif; int unit = nif->nif_unit; #else int unit = 0; #endif struct le_softc *sc = &le_softc[unit]; volatile struct mds *cdm; int timo, i, stat; le_put_loop: timo = 100000; #ifdef LE_DEBUG if (le_debug) printf("le%d: le_put called. next_td=%d\n", unit, sc->sc_next_td); #endif stat = lerdcsr(sc, 0); lewrcsr(sc, 0, stat & (LE_BABL | LE_MISS | LE_MERR | LE_TINT)); if (stat & (LE_BABL | LE_CERR | LE_MISS | LE_MERR)) le_error(unit, "le_put(way before xmit)", stat); cdm = &sc->sc_td[sc->sc_next_td]; i = 0; #if 0 while (cdm->flags & LE_OWN) { if ((i % 100) == 0) printf("le%d: output buffer busy - flags=%x\n", unit, cdm->flags); if (i++ > 500) break; } if (cdm->flags & LE_OWN) getchar(); #else while (cdm->flags & LE_OWN); #endif bcopy(pkt, sc->sc_tbuf + (BUFSIZE * sc->sc_next_td), len); if (len < ETHER_MIN_LEN) cdm->bcnt = -ETHER_MIN_LEN; else cdm->bcnt = -len; cdm->mcnt = 0; cdm->flags |= LE_OWN | LE_STP | LE_ENP; stat = lerdcsr(sc, 0); if (stat & (LE_BABL | LE_CERR | LE_MISS | LE_MERR)) le_error(unit, "le_put(before xmit)", stat); lewrcsr(sc, 0, LE_TDMD); stat = lerdcsr(sc, 0); if (stat & (LE_BABL | LE_CERR | LE_MISS | LE_MERR)) le_error(unit, "le_put(after xmit)", stat); do { if (--timo == 0) { printf("le%d: transmit timeout, stat = 0x%x\n", unit, stat); if (stat & LE_SERR) le_error(unit, "le_put(timeout)", stat); if (stat & LE_INIT) { printf("le%d: reset and retry packet\n", unit); lewrcsr(sc, 0, LE_TINT); /* sanity */ le_init(desc, NULL); goto le_put_loop; } break; } stat = lerdcsr(sc, 0); } while ((stat & LE_TINT) == 0); lewrcsr(sc, 0, LE_TINT); if (stat & (LE_BABL |/* LE_CERR |*/ LE_MISS | LE_MERR)) { printf("le_put: xmit error, buf %d\n", sc->sc_next_td); le_error(unit, "le_put(xmit error)", stat); } if (++sc->sc_next_td >= NTBUF) sc->sc_next_td = 0; if (cdm->flags & LE_DEF) le_stats[unit].deferred++; if (cdm->flags & LE_ONE) le_stats[unit].collisions++; if (cdm->flags & LE_MORE) le_stats[unit].collisions+=2; if (cdm->flags & LE_ERR) { printf("le%d: transmit error, error = 0x%x\n", unit, cdm->mcnt); return -1; } #ifdef LE_DEBUG if (le_debug) { printf("le%d: le_put() successful: sent %d\n", unit, len); printf("le%d: le_put(): flags: %x mcnt: %x\n", unit, (unsigned int) cdm->flags, (unsigned int) cdm->mcnt); } #endif return len; }
/* * Open a file. */ static int ext2fs_open(const char *upath, struct open_file *f) { struct file *fp; struct ext2fs *fs; size_t buf_size; ino_t inumber, parent_inumber; int i, len, groups, bg_per_blk, blkgrps, mult; int nlinks = 0; int error = 0; char *cp, *ncp, *path = NULL, *buf = NULL; char namebuf[MAXPATHLEN+1]; char c; /* allocate file system specific data structure */ fp = malloc(sizeof(struct file)); if (fp == NULL) return (ENOMEM); bzero(fp, sizeof(struct file)); f->f_fsdata = (void *)fp; /* allocate space and read super block */ fs = (struct ext2fs *)malloc(sizeof(*fs)); fp->f_fs = fs; twiddle(); error = (f->f_dev->dv_strategy)(f->f_devdata, F_READ, EXT2_SBLOCK, EXT2_SBSIZE, (char *)fs, &buf_size); if (error) goto out; if (buf_size != EXT2_SBSIZE || fs->fs_magic != EXT2_MAGIC) { error = EINVAL; goto out; } /* * compute in-core values for the superblock */ fs->fs_bshift = EXT2_MINBSHIFT + fs->fs_fd.fd_bsize; fs->fs_bsize = 1 << fs->fs_bshift; fs->fs_bmask = fs->fs_bsize - 1; fs->fs_fshift = EXT2_MINFSHIFT + fs->fs_fd.fd_fsize; fs->fs_fsize = 1 << fs->fs_fshift; fs->fs_fmask = fs->fs_fsize - 1; if (fs->fs_revision == EXT2_REV0) { fs->fs_isize = EXT2_R0_ISIZE; fs->fs_firstino = EXT2_R0_FIRSTINO; } else { fs->fs_isize = fs->fs_fd.fd_isize; fs->fs_firstino = fs->fs_fd.fd_firstino; } fs->fs_imask = fs->fs_isize - 1; fs->fs_ipb = fs->fs_bsize / fs->fs_isize; fs->fs_fsbtodb = (fs->fs_bsize / DEV_BSIZE) - 1; /* * we have to load in the "group descriptors" here */ groups = howmany(fs->fs_blocks - fs->fs_firstblk, fs->fs_bpg); bg_per_blk = fs->fs_bsize / sizeof(struct ext2blkgrp); blkgrps = howmany(groups, bg_per_blk); len = blkgrps * fs->fs_bsize; fp->f_bg = malloc(len); twiddle(); error = (f->f_dev->dv_strategy)(f->f_devdata, F_READ, EXT2_SBLOCK + EXT2_SBSIZE / DEV_BSIZE, len, (char *)fp->f_bg, &buf_size); if (error) goto out; /* * XXX * validation of values? (blocksize, descriptors, etc?) */ /* * Calculate indirect block levels. */ mult = 1; for (i = 0; i < NIADDR; i++) { mult *= nindir(fs); fp->f_nindir[i] = mult; } inumber = EXT2_ROOTINO; if ((error = read_inode(inumber, f)) != 0) goto out; path = strdup(upath); if (path == NULL) { error = ENOMEM; goto out; } cp = path; while (*cp) { /* * Remove extra separators */ while (*cp == '/') cp++; if (*cp == '\0') break; /* * Check that current node is a directory. */ if (! S_ISDIR(fp->f_di.di_mode)) { error = ENOTDIR; goto out; } /* * Get next component of path name. */ len = 0; ncp = cp; while ((c = *cp) != '\0' && c != '/') { if (++len > EXT2_MAXNAMLEN) { error = ENOENT; goto out; } cp++; } *cp = '\0'; /* * Look up component in current directory. * Save directory inumber in case we find a * symbolic link. */ parent_inumber = inumber; error = search_directory(ncp, f, &inumber); *cp = c; if (error) goto out; /* * Open next component. */ if ((error = read_inode(inumber, f)) != 0) goto out; /* * Check for symbolic link. */ if (S_ISLNK(fp->f_di.di_mode)) { int link_len = fp->f_di.di_size; int len; len = strlen(cp); if (link_len + len > MAXPATHLEN || ++nlinks > MAXSYMLINKS) { error = ENOENT; goto out; } bcopy(cp, &namebuf[link_len], len + 1); if (fp->f_di.di_nblk == 0) { bcopy(fp->f_di.di_shortlink, namebuf, link_len); } else { /* * Read file for symbolic link */ struct ext2fs *fs = fp->f_fs; daddr_t disk_block; size_t buf_size; if (! buf) buf = malloc(fs->fs_bsize); error = block_map(f, (daddr_t)0, &disk_block); if (error) goto out; twiddle(); error = (f->f_dev->dv_strategy)(f->f_devdata, F_READ, fsb_to_db(fs, disk_block), fs->fs_bsize, buf, &buf_size); if (error) goto out; bcopy((char *)buf, namebuf, link_len); } /* * If relative pathname, restart at parent directory. * If absolute pathname, restart at root. */ cp = namebuf; if (*cp != '/') inumber = parent_inumber; else inumber = (ino_t)EXT2_ROOTINO; if ((error = read_inode(inumber, f)) != 0) goto out; } } /* * Found terminal component. */ error = 0; out: if (buf) free(buf); if (path) free(path); if (error) { if (fp->f_buf) free(fp->f_buf); free(fp->f_fs); free(fp); } return (error); }
static zio_t * vdev_queue_io_to_issue(vdev_queue_t *vq, uint64_t pending_limit) { zio_t *fio, *lio, *aio, *dio; avl_tree_t *tree; uint64_t size; ASSERT(MUTEX_HELD(&vq->vq_lock)); if (avl_numnodes(&vq->vq_pending_tree) >= pending_limit || avl_numnodes(&vq->vq_deadline_tree) == 0) return (NULL); fio = lio = avl_first(&vq->vq_deadline_tree); tree = fio->io_vdev_tree; size = fio->io_size; while ((dio = AVL_PREV(tree, fio)) != NULL && IS_ADJACENT(dio, fio) && size + dio->io_size <= zfs_vdev_aggregation_limit) { dio->io_delegate_next = fio; fio = dio; size += dio->io_size; } while ((dio = AVL_NEXT(tree, lio)) != NULL && IS_ADJACENT(lio, dio) && size + dio->io_size <= zfs_vdev_aggregation_limit) { lio->io_delegate_next = dio; lio = dio; size += dio->io_size; } if (fio != lio) { char *buf = zio_buf_alloc(size); uint64_t offset = 0; int nagg = 0; ASSERT(size <= zfs_vdev_aggregation_limit); aio = zio_vdev_child_io(fio, NULL, fio->io_vd, fio->io_offset, buf, size, fio->io_type, ZIO_PRIORITY_NOW, ZIO_FLAG_DONT_QUEUE | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_NOBOOKMARK, vdev_queue_agg_io_done, NULL); aio->io_delegate_list = fio; for (dio = fio; dio != NULL; dio = dio->io_delegate_next) { ASSERT(dio->io_type == aio->io_type); ASSERT(dio->io_vdev_tree == tree); if (dio->io_type == ZIO_TYPE_WRITE) bcopy(dio->io_data, buf + offset, dio->io_size); offset += dio->io_size; vdev_queue_io_remove(vq, dio); zio_vdev_io_bypass(dio); nagg++; } ASSERT(offset == size); dprintf("%5s T=%llu off=%8llx agg=%3d " "old=%5llx new=%5llx\n", zio_type_name[fio->io_type], fio->io_deadline, fio->io_offset, nagg, fio->io_size, size); avl_add(&vq->vq_pending_tree, aio); return (aio); } ASSERT(fio->io_vdev_tree == tree); vdev_queue_io_remove(vq, fio); avl_add(&vq->vq_pending_tree, fio); return (fio); }
static int udf_mountfs(struct vnode *devvp, struct mount *mp) { struct buf *bp = NULL; struct cdev *dev; struct anchor_vdp avdp; struct udf_mnt *udfmp = NULL; struct part_desc *pd; struct logvol_desc *lvd; struct fileset_desc *fsd; struct file_entry *root_fentry; uint32_t sector, size, mvds_start, mvds_end; uint32_t logical_secsize; uint32_t fsd_offset = 0; uint16_t part_num = 0, fsd_part = 0; int error = EINVAL; int logvol_found = 0, part_found = 0, fsd_found = 0; int bsize; struct g_consumer *cp; struct bufobj *bo; dev = devvp->v_rdev; dev_ref(dev); DROP_GIANT(); g_topology_lock(); error = g_vfs_open(devvp, &cp, "udf", 0); g_topology_unlock(); PICKUP_GIANT(); VOP_UNLOCK(devvp, 0); if (error) goto bail; bo = &devvp->v_bufobj; if (devvp->v_rdev->si_iosize_max != 0) mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max; if (mp->mnt_iosize_max > MAXPHYS) mp->mnt_iosize_max = MAXPHYS; /* XXX: should be M_WAITOK */ udfmp = malloc(sizeof(struct udf_mnt), M_UDFMOUNT, M_NOWAIT | M_ZERO); if (udfmp == NULL) { printf("Cannot allocate UDF mount struct\n"); error = ENOMEM; goto bail; } mp->mnt_data = udfmp; mp->mnt_stat.f_fsid.val[0] = dev2udev(devvp->v_rdev); mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED; MNT_IUNLOCK(mp); udfmp->im_mountp = mp; udfmp->im_dev = dev; udfmp->im_devvp = devvp; udfmp->im_d2l = NULL; udfmp->im_cp = cp; udfmp->im_bo = bo; #if 0 udfmp->im_l2d = NULL; #endif /* * The UDF specification defines a logical sectorsize of 2048 * for DVD media. */ logical_secsize = 2048; if (((logical_secsize % cp->provider->sectorsize) != 0) || (logical_secsize < cp->provider->sectorsize)) { error = EINVAL; goto bail; } bsize = cp->provider->sectorsize; /* * Get the Anchor Volume Descriptor Pointer from sector 256. * XXX Should also check sector n - 256, n, and 512. */ sector = 256; if ((error = bread(devvp, sector * btodb(logical_secsize), bsize, NOCRED, &bp)) != 0) goto bail; if ((error = udf_checktag((struct desc_tag *)bp->b_data, TAGID_ANCHOR))) goto bail; bcopy(bp->b_data, &avdp, sizeof(struct anchor_vdp)); brelse(bp); bp = NULL; /* * Extract the Partition Descriptor and Logical Volume Descriptor * from the Volume Descriptor Sequence. * XXX Should we care about the partition type right now? * XXX What about multiple partitions? */ mvds_start = le32toh(avdp.main_vds_ex.loc); mvds_end = mvds_start + (le32toh(avdp.main_vds_ex.len) - 1) / bsize; for (sector = mvds_start; sector < mvds_end; sector++) { if ((error = bread(devvp, sector * btodb(logical_secsize), bsize, NOCRED, &bp)) != 0) { printf("Can't read sector %d of VDS\n", sector); goto bail; } lvd = (struct logvol_desc *)bp->b_data; if (!udf_checktag(&lvd->tag, TAGID_LOGVOL)) { udfmp->bsize = le32toh(lvd->lb_size); udfmp->bmask = udfmp->bsize - 1; udfmp->bshift = ffs(udfmp->bsize) - 1; fsd_part = le16toh(lvd->_lvd_use.fsd_loc.loc.part_num); fsd_offset = le32toh(lvd->_lvd_use.fsd_loc.loc.lb_num); if (udf_find_partmaps(udfmp, lvd)) break; logvol_found = 1; } pd = (struct part_desc *)bp->b_data; if (!udf_checktag(&pd->tag, TAGID_PARTITION)) { part_found = 1; part_num = le16toh(pd->part_num); udfmp->part_len = le32toh(pd->part_len); udfmp->part_start = le32toh(pd->start_loc); } brelse(bp); bp = NULL; if ((part_found) && (logvol_found)) break; } if (!part_found || !logvol_found) { error = EINVAL; goto bail; } if (fsd_part != part_num) { printf("FSD does not lie within the partition!\n"); error = EINVAL; goto bail; } /* * Grab the Fileset Descriptor * Thanks to Chuck McCrobie <*****@*****.**> for pointing * me in the right direction here. */ sector = udfmp->part_start + fsd_offset; if ((error = RDSECTOR(devvp, sector, udfmp->bsize, &bp)) != 0) { printf("Cannot read sector %d of FSD\n", sector); goto bail; } fsd = (struct fileset_desc *)bp->b_data; if (!udf_checktag(&fsd->tag, TAGID_FSD)) { fsd_found = 1; bcopy(&fsd->rootdir_icb, &udfmp->root_icb, sizeof(struct long_ad)); } brelse(bp); bp = NULL; if (!fsd_found) { printf("Couldn't find the fsd\n"); error = EINVAL; goto bail; } /* * Find the file entry for the root directory. */ sector = le32toh(udfmp->root_icb.loc.lb_num) + udfmp->part_start; size = le32toh(udfmp->root_icb.len); if ((error = udf_readdevblks(udfmp, sector, size, &bp)) != 0) { printf("Cannot read sector %d\n", sector); goto bail; } root_fentry = (struct file_entry *)bp->b_data; if ((error = udf_checktag(&root_fentry->tag, TAGID_FENTRY))) { printf("Invalid root file entry!\n"); goto bail; } brelse(bp); bp = NULL; return 0; bail: if (udfmp != NULL) free(udfmp, M_UDFMOUNT); if (bp != NULL) brelse(bp); if (cp != NULL) { DROP_GIANT(); g_topology_lock(); g_vfs_close(cp); g_topology_unlock(); PICKUP_GIANT(); } dev_rel(dev); return error; };
void impact_attach(struct device *parent, struct device *self, void *aux) { struct xbow_attach_args *xaa = aux; struct wsemuldisplaydev_attach_args waa; struct impact_softc *sc = (void *)self; struct impact_screen *scr; if (strncmp(bios_graphics, "alive", 5) != 0) { printf(" device has not been setup by firmware!\n"); return; } printf(" revision %d\n", xaa->xaa_revision); if (impact_is_console(xaa)) { /* * Setup has already been done via impact_cnattach(). */ scr = &impact_cons; scr->sc = sc; sc->curscr = scr; sc->console = 1; } else { /* * Setup screen data. */ scr = malloc(sizeof(struct impact_screen), M_DEVBUF, M_NOWAIT | M_ZERO); if (scr == NULL) { printf("failed to allocate screen memory!\n"); return; } /* * Create a copy of the bus space tag. */ bcopy(xaa->xaa_iot, &scr->iot_store, sizeof(struct mips_bus_space)); scr->iot = &scr->iot_store; /* Setup bus space mappings. */ if (bus_space_map(scr->iot, IMPACTSR_REG_OFFSET, IMPACTSR_REG_SIZE, 0, &scr->ioh)) { printf("failed to map registers\n"); free(scr, M_DEVBUF); return; } scr->sc = sc; sc->curscr = scr; /* Setup hardware and clear screen. */ impact_setup(scr); if (impact_init_screen(scr) != 0) { printf("failed to allocate memory\n"); bus_space_unmap(scr->iot, scr->ioh, IMPACTSR_REG_SIZE); free(scr, M_DEVBUF); return; } } scr->scrlist[0] = &scr->wsd; scr->wsl.nscreens = 1; scr->wsl.screens = (const struct wsscreen_descr **)scr->scrlist; waa.console = sc->console; waa.scrdata = &scr->wsl; waa.accessops = &impact_accessops; waa.accesscookie = scr; waa.defaultscreens = 0; config_found(self, &waa, wsemuldisplaydevprint); }
static int udf_find_partmaps(struct udf_mnt *udfmp, struct logvol_desc *lvd) { struct part_map_spare *pms; struct regid *pmap_id; struct buf *bp; unsigned char regid_id[UDF_REGID_ID_SIZE + 1]; int i, k, ptype, psize, error; uint8_t *pmap = (uint8_t *) &lvd->maps[0]; for (i = 0; i < le32toh(lvd->n_pm); i++) { ptype = pmap[0]; psize = pmap[1]; if (((ptype != 1) && (ptype != 2)) || ((psize != UDF_PMAP_TYPE1_SIZE) && (psize != UDF_PMAP_TYPE2_SIZE))) { printf("Invalid partition map found\n"); return (1); } if (ptype == 1) { /* Type 1 map. We don't care */ pmap += UDF_PMAP_TYPE1_SIZE; continue; } /* Type 2 map. Gotta find out the details */ pmap_id = (struct regid *)&pmap[4]; bzero(®id_id[0], UDF_REGID_ID_SIZE); bcopy(&pmap_id->id[0], ®id_id[0], UDF_REGID_ID_SIZE); if (bcmp(®id_id[0], "*UDF Sparable Partition", UDF_REGID_ID_SIZE)) { printf("Unsupported partition map: %s\n", ®id_id[0]); return (1); } pms = (struct part_map_spare *)pmap; pmap += UDF_PMAP_TYPE2_SIZE; udfmp->s_table = malloc(le32toh(pms->st_size), M_UDFMOUNT, M_NOWAIT | M_ZERO); if (udfmp->s_table == NULL) return (ENOMEM); /* Calculate the number of sectors per packet. */ /* XXX Logical or physical? */ udfmp->p_sectors = le16toh(pms->packet_len) / udfmp->bsize; /* * XXX If reading the first Sparing Table fails, should look * for another table. */ if ((error = udf_readdevblks(udfmp, le32toh(pms->st_loc[0]), le32toh(pms->st_size), &bp)) != 0) { if (bp != NULL) brelse(bp); printf("Failed to read Sparing Table at sector %d\n", le32toh(pms->st_loc[0])); free(udfmp->s_table, M_UDFMOUNT); return (error); } bcopy(bp->b_data, udfmp->s_table, le32toh(pms->st_size)); brelse(bp); if (udf_checktag(&udfmp->s_table->tag, 0)) { printf("Invalid sparing table found\n"); free(udfmp->s_table, M_UDFMOUNT); return (EINVAL); } /* See how many valid entries there are here. The list is * supposed to be sorted. 0xfffffff0 and higher are not valid */ for (k = 0; k < le16toh(udfmp->s_table->rt_l); k++) { udfmp->s_table_entries = k; if (le32toh(udfmp->s_table->entries[k].org) >= 0xfffffff0) break; } } return (0); }
/* * Attach an EPIC interface to the system. */ void epic_attach(struct epic_softc *sc, const char *intrstr) { bus_space_tag_t st = sc->sc_st; bus_space_handle_t sh = sc->sc_sh; struct ifnet *ifp = &sc->sc_arpcom.ac_if; int rseg, error, miiflags; u_int i; bus_dma_segment_t seg; u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1]; u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6]; char *nullbuf; timeout_set(&sc->sc_mii_timeout, epic_tick, sc); /* * Allocate the control data structures, and create and load the * DMA map for it. */ if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct epic_control_data) + ETHER_PAD_LEN, PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { printf(": unable to allocate control data, error = %d\n", error); goto fail_0; } if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct epic_control_data) + ETHER_PAD_LEN, (caddr_t *)&sc->sc_control_data, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { printf(": unable to map control data, error = %d\n", error); goto fail_1; } nullbuf = (char *)sc->sc_control_data + sizeof(struct epic_control_data); memset(nullbuf, 0, ETHER_PAD_LEN); if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct epic_control_data), 1, sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT, &sc->sc_cddmamap)) != 0) { printf(": unable to create control data DMA map, error = %d\n", error); goto fail_2; } if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, sc->sc_control_data, sizeof(struct epic_control_data), NULL, BUS_DMA_NOWAIT)) != 0) { printf(": unable to load control data DMA map, error = %d\n", error); goto fail_3; } /* * Create the transmit buffer DMA maps. */ for (i = 0; i < EPIC_NTXDESC; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) { printf(": unable to create tx DMA map %d, error = %d\n", i, error); goto fail_4; } } /* * Create the receive buffer DMA maps. */ for (i = 0; i < EPIC_NRXDESC; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) { printf(": unable to create rx DMA map %d, error = %d\n", i, error); goto fail_5; } EPIC_DSRX(sc, i)->ds_mbuf = NULL; } /* * create and map the pad buffer */ if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1, ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) { printf(": unable to create pad buffer DMA map, error = %d\n", error); goto fail_5; } if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap, nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) { printf(": unable to load pad buffer DMA map, error = %d\n", error); goto fail_6; } bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN, BUS_DMASYNC_PREWRITE); /* * Bring the chip out of low-power mode and reset it to a known state. */ bus_space_write_4(st, sh, EPIC_GENCTL, 0); epic_reset(sc); /* * Read the Ethernet address from the EEPROM. */ epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea); for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) { enaddr[i * 2] = myea[i] & 0xff; enaddr[i * 2 + 1] = myea[i] >> 8; } /* * ...and the device name. */ epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])), mydevname); for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) { devname[i * 2] = mydevname[i] & 0xff; devname[i * 2 + 1] = mydevname[i] >> 8; } devname[sizeof(devname) - 1] = ' '; for (i = sizeof(devname) - 1; devname[i] == ' '; i--) { devname[i] = '\0'; if (i == 0) break; } printf(", %s : %s, address %s\n", devname, intrstr, ether_sprintf(enaddr)); miiflags = 0; if (sc->sc_hwflags & EPIC_HAS_MII_FIBER) miiflags |= MIIF_HAVEFIBER; /* * Initialize our media structures and probe the MII. */ sc->sc_mii.mii_ifp = ifp; sc->sc_mii.mii_readreg = epic_mii_read; sc->sc_mii.mii_writereg = epic_mii_write; sc->sc_mii.mii_statchg = epic_statchg; ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, epic_mediachange, epic_mediastatus); mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, miiflags); if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); } else ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); if (sc->sc_hwflags & EPIC_HAS_BNC) { /* use the next free media instance */ sc->sc_serinst = sc->sc_mii.mii_instance++; ifmedia_add(&sc->sc_mii.mii_media, IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, sc->sc_serinst), 0, NULL); } else sc->sc_serinst = -1; bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = epic_ioctl; ifp->if_start = epic_start; ifp->if_watchdog = epic_watchdog; IFQ_SET_MAXLEN(&ifp->if_snd, EPIC_NTXDESC - 1); IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities = IFCAP_VLAN_MTU; /* * Attach the interface. */ if_attach(ifp); ether_ifattach(ifp); return; /* * Free any resources we've allocated during the failed attach * attempt. Do this in reverse order and fall through. */ fail_6: bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap); fail_5: for (i = 0; i < EPIC_NRXDESC; i++) { if (EPIC_DSRX(sc, i)->ds_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, EPIC_DSRX(sc, i)->ds_dmamap); } fail_4: for (i = 0; i < EPIC_NTXDESC; i++) { if (EPIC_DSTX(sc, i)->ds_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, EPIC_DSTX(sc, i)->ds_dmamap); } bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); fail_3: bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); fail_2: bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, sizeof(struct epic_control_data)); fail_1: bus_dmamem_free(sc->sc_dmat, &seg, rseg); fail_0: return; }
/* * When incoming data is appended to the socket, we get notified here. * This is also called whenever a significant event occurs for the socket. * Our original caller may have queued this even some time ago and * we cannot trust that he even still exists. The node however is being * held with a reference by the queueing code and guarantied to be valid. */ static void ng_ksocket_incoming2(node_p node, hook_p hook, void *arg1, int arg2) { struct socket *so = arg1; const priv_p priv = NG_NODE_PRIVATE(node); struct ng_mesg *response; struct uio auio; int flags, error; KASSERT(so == priv->so, ("%s: wrong socket", __func__)); /* Allow next incoming event to be queued. */ atomic_store_rel_int(&priv->fn_sent, 0); /* Check whether a pending connect operation has completed */ if (priv->flags & KSF_CONNECTING) { if ((error = so->so_error) != 0) { so->so_error = 0; so->so_state &= ~SS_ISCONNECTING; } if (!(so->so_state & SS_ISCONNECTING)) { NG_MKMESSAGE(response, NGM_KSOCKET_COOKIE, NGM_KSOCKET_CONNECT, sizeof(int32_t), M_NOWAIT); if (response != NULL) { response->header.flags |= NGF_RESP; response->header.token = priv->response_token; *(int32_t *)response->data = error; /* * send an async "response" message * to the node that set us up * (if it still exists) */ NG_SEND_MSG_ID(error, node, response, priv->response_addr, 0); } priv->flags &= ~KSF_CONNECTING; } } /* Check whether a pending accept operation has completed */ if (priv->flags & KSF_ACCEPTING) { error = ng_ksocket_check_accept(priv); if (error != EWOULDBLOCK) priv->flags &= ~KSF_ACCEPTING; if (error == 0) ng_ksocket_finish_accept(priv); } /* * If we don't have a hook, we must handle data events later. When * the hook gets created and is connected, this upcall function * will be called again. */ if (priv->hook == NULL) return; /* Read and forward available mbuf's */ auio.uio_td = NULL; auio.uio_resid = MJUMPAGESIZE; /* XXXGL: sane limit? */ flags = MSG_DONTWAIT; while (1) { struct sockaddr *sa = NULL; struct mbuf *m; /* Try to get next packet from socket */ if ((error = soreceive(so, (so->so_state & SS_ISCONNECTED) ? NULL : &sa, &auio, &m, NULL, &flags)) != 0) break; /* See if we got anything */ if (m == NULL) { if (sa != NULL) free(sa, M_SONAME); break; } KASSERT(m->m_nextpkt == NULL, ("%s: nextpkt", __func__)); /* * Stream sockets do not have packet boundaries, so * we have to allocate a header mbuf and attach the * stream of data to it. */ if (so->so_type == SOCK_STREAM) { struct mbuf *mh; mh = m_gethdr(M_NOWAIT, MT_DATA); if (mh == NULL) { m_freem(m); if (sa != NULL) free(sa, M_SONAME); break; } mh->m_next = m; for (; m; m = m->m_next) mh->m_pkthdr.len += m->m_len; m = mh; } /* Put peer's socket address (if any) into a tag */ if (sa != NULL) { struct sa_tag *stag; stag = (struct sa_tag *)m_tag_alloc(NGM_KSOCKET_COOKIE, NG_KSOCKET_TAG_SOCKADDR, sizeof(ng_ID_t) + sa->sa_len, M_NOWAIT); if (stag == NULL) { free(sa, M_SONAME); goto sendit; } bcopy(sa, &stag->sa, sa->sa_len); free(sa, M_SONAME); stag->id = NG_NODE_ID(node); m_tag_prepend(m, &stag->tag); } sendit: /* Forward data with optional peer sockaddr as packet tag */ NG_SEND_DATA_ONLY(error, priv->hook, m); } /* * If the peer has closed the connection, forward a 0-length mbuf * to indicate end-of-file. */ if (so->so_rcv.sb_state & SBS_CANTRCVMORE && !(priv->flags & KSF_EOFSEEN)) { struct mbuf *m; m = m_gethdr(M_NOWAIT, MT_DATA); if (m != NULL) NG_SEND_DATA_ONLY(error, priv->hook, m); priv->flags |= KSF_EOFSEEN; } }
int NBJTtemp(GENmodel *inModel, CKTcircuit *ckt) /* * perform the temperature update to the bjt */ { register NBJTmodel *model = (NBJTmodel *) inModel; register NBJTinstance *inst; METHcard *methods; MODLcard *models; OPTNcard *options; OUTPcard *outputs; ONEmaterial *pM, *pMaterial, *pNextMaterial; ONEdevice *pDevice; double startTime; int baseIndex, indexBE, indexBC; /* loop through all the bjt models */ for (; model != NULL; model = model->NBJTnextModel) { methods = model->NBJTmethods; models = model->NBJTmodels; options = model->NBJToptions; outputs = model->NBJToutputs; if (!options->OPTNtnomGiven) { options->OPTNtnom = ckt->CKTnomTemp; } for (pM = model->NBJTmatlInfo; pM != NULL; pM = pM->next) { pM->tnom = options->OPTNtnom; } BandGapNarrowing = models->MODLbandGapNarrowing; ConcDepLifetime = models->MODLconcDepLifetime; TempDepMobility = models->MODLtempDepMobility; ConcDepMobility = models->MODLconcDepMobility; for (inst = model->NBJTinstances; inst != NULL; inst = inst->NBJTnextInstance) { startTime = SPfrontEnd->IFseconds(); if (!inst->NBJTtempGiven) { inst->NBJTtemp = ckt->CKTtemp; } if (!inst->NBJTareaGiven || inst->NBJTarea <= 0.0) { inst->NBJTarea = 1.0; } inst->NBJTpDevice->area = inst->NBJTarea * options->OPTNdefa; /* Compute and save globals for this instance. */ GLOBcomputeGlobals(&(inst->NBJTglobals), inst->NBJTtemp); /* Calculate new sets of material parameters. */ pM = model->NBJTmatlInfo; pMaterial = inst->NBJTpDevice->pMaterials; for (; pM != NULL; pM = pM->next, pMaterial = pMaterial->next) { /* Copy the original values, then fix the incorrect pointer. */ pNextMaterial = pMaterial->next; bcopy(pM, pMaterial, sizeof(ONEmaterial)); pMaterial->next = pNextMaterial; /* Now do the temperature dependence. */ MATLtempDep(pMaterial, pMaterial->tnom); if (outputs->OUTPmaterial) { printMaterialInfo(pMaterial); } } /* Assign doping to the mesh. */ ONEsetDoping(inst->NBJTpDevice, model->NBJTprofiles, model->NBJTdopTables); /* Assign other physical parameters to the mesh. */ ONEsetup(inst->NBJTpDevice); /* Assign boundary condition parameters. */ ONEsetBCparams(inst->NBJTpDevice, model->NBJTboundaries, model->NBJTcontacts); /* Normalize everything. */ ONEnormalize(inst->NBJTpDevice); /* Find the device's type. */ if (inst->NBJTpDevice->elemArray[1]->pNodes[0]->netConc < 0.0) { inst->NBJTtype = PNP; } else { inst->NBJTtype = NPN; } /* Find the location of the base index. */ pDevice = inst->NBJTpDevice; baseIndex = pDevice->baseIndex; if (baseIndex <= 0) { if (options->OPTNbaseDepthGiven) { printf("Warning: base contact not on node -- adjusting contact\n"); } NBJTjunctions(pDevice, &indexBE, &indexBC); pDevice->baseIndex = (indexBE + indexBC) / 2; } if (inst->NBJTtype == PNP) { pDevice->elemArray[pDevice->baseIndex]->pNodes[0]->baseType = N_TYPE; } else if (inst->NBJTtype == NPN) { pDevice->elemArray[pDevice->baseIndex]->pNodes[0]->baseType = P_TYPE; } else { printf("NBJTtemp: unknown BJT type \n"); } if (baseIndex <= 0 && !options->OPTNbaseDepthGiven) { ONEdcDebug = FALSE; ONEequilSolve(pDevice); adjustBaseContact(pDevice, indexBE, indexBC); } printf("BJT: base contact depth is %g um at node %d\n", pDevice->elemArray[pDevice->baseIndex]->pNodes[0]->x * 1e4, pDevice->baseIndex); /* Find, normalize and convert to reciprocal-form the base length. */ pDevice->baseLength = options->OPTNbaseLength; if (pDevice->baseLength > 0.0) { pDevice->baseLength /= LNorm; pDevice->baseLength = 1.0 / pDevice->baseLength; } else if (pDevice->elemArray[pDevice->baseIndex]->evalNodes[0]) { pDevice->baseLength = pDevice->elemArray[pDevice->baseIndex]->rDx; } else { pDevice->baseLength = pDevice->elemArray[pDevice->baseIndex - 1]->rDx; } /* Adjust reciprocal base length to account for base area factor */ pDevice->baseLength *= options->OPTNbaseArea; inst->NBJTpDevice->pStats->totalTime[STAT_SETUP] += SPfrontEnd->IFseconds() - startTime; } } return (OK); }
/* Convert a struct sockaddr from ASCII to binary. If its a protocol family that we specially handle, do that, otherwise defer to the generic parse type ng_ksocket_generic_sockaddr_type. */ static int ng_ksocket_sockaddr_parse(const struct ng_parse_type *type, const char *s, int *off, const u_char *const start, u_char *const buf, int *buflen) { struct sockaddr *const sa = (struct sockaddr *)buf; enum ng_parse_token tok; char fambuf[32]; int family, len; char *t; /* If next token is a left curly brace, use generic parse type */ if ((tok = ng_parse_get_token(s, off, &len)) == T_LBRACE) { return (*ng_ksocket_generic_sockaddr_type.supertype->parse) (&ng_ksocket_generic_sockaddr_type, s, off, start, buf, buflen); } /* Get socket address family followed by a slash */ while (isspace(s[*off])) (*off)++; if ((t = strchr(s + *off, '/')) == NULL) return (EINVAL); if ((len = t - (s + *off)) > sizeof(fambuf) - 1) return (EINVAL); strncpy(fambuf, s + *off, len); fambuf[len] = '\0'; *off += len + 1; if ((family = ng_ksocket_parse(ng_ksocket_families, fambuf, 0)) == -1) return (EINVAL); /* Set family */ if (*buflen < SADATA_OFFSET) return (ERANGE); sa->sa_family = family; /* Set family-specific data and length */ switch (sa->sa_family) { case PF_LOCAL: /* Get pathname */ { const int pathoff = OFFSETOF(struct sockaddr_un, sun_path); struct sockaddr_un *const sun = (struct sockaddr_un *)sa; int toklen, pathlen; char *path; if ((path = ng_get_string_token(s, off, &toklen, NULL)) == NULL) return (EINVAL); pathlen = strlen(path); if (pathlen > SOCK_MAXADDRLEN) { free(path, M_NETGRAPH_KSOCKET); return (E2BIG); } if (*buflen < pathoff + pathlen) { free(path, M_NETGRAPH_KSOCKET); return (ERANGE); } *off += toklen; bcopy(path, sun->sun_path, pathlen); sun->sun_len = pathoff + pathlen; free(path, M_NETGRAPH_KSOCKET); break; } case PF_INET: /* Get an IP address with optional port */ { struct sockaddr_in *const sin = (struct sockaddr_in *)sa; int i; /* Parse this: <ipaddress>[:port] */ for (i = 0; i < 4; i++) { u_long val; char *eptr; val = strtoul(s + *off, &eptr, 10); if (val > 0xff || eptr == s + *off) return (EINVAL); *off += (eptr - (s + *off)); ((u_char *)&sin->sin_addr)[i] = (u_char)val; if (i < 3) { if (s[*off] != '.') return (EINVAL); (*off)++; } else if (s[*off] == ':') { (*off)++; val = strtoul(s + *off, &eptr, 10); if (val > 0xffff || eptr == s + *off) return (EINVAL); *off += (eptr - (s + *off)); sin->sin_port = htons(val); } else sin->sin_port = 0; } bzero(&sin->sin_zero, sizeof(sin->sin_zero)); sin->sin_len = sizeof(*sin); break; } #if 0 case PF_APPLETALK: /* XXX implement these someday */ case PF_INET6: case PF_IPX: #endif default: return (EINVAL); } /* Done */ *buflen = sa->sa_len; return (0); }
/* * vmm guest system call: * - init the calling thread structure * - prepare for running in non-root mode */ int sys_vmm_guest_ctl(struct vmm_guest_ctl_args *uap) { int error = 0; struct vmm_guest_options options; struct trapframe *tf = uap->sysmsg_frame; unsigned long stack_limit = USRSTACK; unsigned char stack_page[PAGE_SIZE]; clear_quickret(); switch (uap->op) { case VMM_GUEST_RUN: error = copyin(uap->options, &options, sizeof(struct vmm_guest_options)); if (error) { kprintf("%s: error copyin vmm_guest_options\n", __func__); goto out; } while(stack_limit > tf->tf_sp) { stack_limit -= PAGE_SIZE; options.new_stack -= PAGE_SIZE; error = copyin((const void *)stack_limit, (void *)stack_page, PAGE_SIZE); if (error) { kprintf("%s: error copyin stack\n", __func__); goto out; } error = copyout((const void *)stack_page, (void *)options.new_stack, PAGE_SIZE); if (error) { kprintf("%s: error copyout stack\n", __func__); goto out; } } bcopy(tf, &options.tf, sizeof(struct trapframe)); error = vmm_vminit(&options); if (error) { if (error == ENODEV) { kprintf("%s: vmm_vminit failed - " "no VMM available \n", __func__); goto out; } kprintf("%s: vmm_vminit failed\n", __func__); goto out_exit; } generic_lwp_return(curthread->td_lwp, tf); error = vmm_vmrun(); break; default: kprintf("%s: INVALID op\n", __func__); error = EINVAL; goto out; } out_exit: exit1(W_EXITCODE(error, 0)); out: return (error); }
/* * Load the information expected by a kernel. * * - The 'boothowto' argument is constructed * - The 'bootdev' argument is constructed * - The kernel environment is copied into kernel space. * - Module metadata are formatted and placed in kernel space. */ int md_load(char *args, vm_offset_t *modulep) { struct preloaded_file *kfp, *bfp; struct preloaded_file *xp; struct file_metadata *md; struct bootinfo *bip; vm_offset_t kernend; vm_offset_t addr; vm_offset_t envp; vm_offset_t size; vm_offset_t vaddr; vm_offset_t dtbp; char *rootdevname; int howto; int i; /* * These metadata addreses must be converted for kernel after * relocation. */ uint32_t mdt[] = { MODINFOMD_SSYM, MODINFOMD_ESYM, MODINFOMD_KERNEND, MODINFOMD_ENVP, #if defined(LOADER_FDT_SUPPORT) MODINFOMD_DTBP #endif }; howto = md_getboothowto(args); /* * Allow the environment variable 'rootdev' to override the supplied * device. This should perhaps go to MI code and/or have $rootdev * tested/set by MI code before launching the kernel. */ rootdevname = getenv("rootdev"); if (rootdevname == NULL) rootdevname = getenv("currdev"); /* Try reading the /etc/fstab file to select the root device */ getrootmount(rootdevname); /* Find the last module in the chain */ addr = 0; for (xp = file_findfile(NULL, NULL); xp != NULL; xp = xp->f_next) { if (addr < (xp->f_addr + xp->f_size)) addr = xp->f_addr + xp->f_size; } /* Pad to a page boundary */ addr = roundup(addr, PAGE_SIZE); /* Copy our environment */ envp = addr; addr = md_copyenv(addr); /* Pad to a page boundary */ addr = roundup(addr, PAGE_SIZE); kernend = 0; kfp = file_findfile(NULL, "elf32 kernel"); if (kfp == NULL) kfp = file_findfile(NULL, "elf kernel"); if (kfp == NULL) panic("can't find kernel file"); file_addmetadata(kfp, MODINFOMD_HOWTO, sizeof howto, &howto); file_addmetadata(kfp, MODINFOMD_ENVP, sizeof envp, &envp); #if defined(LOADER_FDT_SUPPORT) /* Handle device tree blob */ dtbp = fdt_fixup(); if (dtbp != (vm_offset_t)NULL) file_addmetadata(kfp, MODINFOMD_DTBP, sizeof dtbp, &dtbp); else pager_output("WARNING! Trying to fire up the kernel, but no " "device tree blob found!\n"); #endif file_addmetadata(kfp, MODINFOMD_KERNEND, sizeof kernend, &kernend); /* Figure out the size and location of the metadata */ *modulep = addr; size = md_copymodules(0); kernend = roundup(addr + size, PAGE_SIZE); /* Provide MODINFOMD_KERNEND */ md = file_findmetadata(kfp, MODINFOMD_KERNEND); bcopy(&kernend, md->md_data, sizeof kernend); /* Convert addresses to the final VA */ *modulep -= __elfN(relocation_offset); for (i = 0; i < sizeof mdt / sizeof mdt[0]; i++) { md = file_findmetadata(kfp, mdt[i]); if (md) { bcopy(md->md_data, &vaddr, sizeof vaddr); vaddr -= __elfN(relocation_offset); bcopy(&vaddr, md->md_data, sizeof vaddr); } } /* Only now copy actual modules and metadata */ (void)md_copymodules(addr); return (0); }
/************************************************************************************************************* ** @brief : Load all watch's in ini file ** #conf : ini file dictionary ** #watch : parser ini configure file data ** @return : how many item in conf file *************************************************************************************************************/ int conf_init(dictionary *conf, P_WATCH_INFO watch, const unsigned int size) { char entry[64]; char *cp = NULL; unsigned int i; #define GET_INT_ENTRY(v, d) do { v = iniparser_getint(conf, entry, (d)); } while(0) #define GET_BOOL_ENTRY(v, d) do { v = iniparser_getboolean(conf, entry, (d)); } while(0) #define GET_STR_ENTRY(s, d) do { cp = iniparser_getstring(conf, entry, (d)); bcopy(cp, s, strlen(cp)); } while (0) #define FORMAT_ENTRY(s, k) do { bzero(entry, sizeof(entry)); snprintf(entry, sizeof(entry), "%s:%s", (s), (k)); } while(0) /* Debug print all conf */ if (debug){ iniparser_dump(conf, stderr); } /* Get section name's */ for (i = 0; i < size; i++){ cp = iniparser_getsecname(conf, i); bcopy(cp, watch[i].name, strlen(cp)); } /* Get each of them data */ for (i = 0; i < size; i++){ /* Ignore ? */ FORMAT_ENTRY(watch[i].name, IGNORE_KEY); GET_BOOL_ENTRY(watch[i].ignore, 0); /* If ignore, do not read jump to next */ if (watch[i].ignore){ continue; } /* Path */ FORMAT_ENTRY(watch[i].name, PATH_KEY); GET_STR_ENTRY(watch[i].path, ""); /* Check if path is dir set is_dir mark */ watch[i].is_dir = conf_is_path_is_dir(watch[i].path); /* Comment */ FORMAT_ENTRY(watch[i].name, COMMENT_KEY); GET_STR_ENTRY(watch[i].comment, ""); /* Find if setting special file */ FORMAT_ENTRY(watch[i].name, SPECIAL_KEY); /* Only dir have this option */ if (watch[i].is_dir && iniparser_find_entry(conf, entry)){ /* Read form ini file */ if ((cp = iniparser_getstring(conf, entry, ""))){ /* Get special file name */ watch[i].spc_file_cnt = conf_get_words(cp, watch[i].spc_file, MAX_SPC_FILE); } } /* events */ FORMAT_ENTRY(watch[i].name, EVENTS_KEY); GET_STR_ENTRY(watch[i].events_str, ""); /* Parser events */ watch[i].events = conf_parser_events(watch[i].events_str, watch[i].is_dir); /* Debug */ if (debug){ conf_print_watch(&watch[i]); } } /* end for */ /* Arrange watching list */ return conf_arrange_watch(watch, (i == size) ? size : i); }