/* *DESC: More built in symbols, to support turtle graphics */ static rout_node r_back = { 0, Fbackward, 1, 1, l_int_param }; ss ndt0 = {N_DECL_ID, RD(r_back), NLF, NNC("Back"), T_BPROC, HashByte, &nd4, Basetype(SP_proc), 0, 0, 0, Lmf, -260 }; static rout_node r_forwd = { 0, Fforward, 1, 1, l_int_param }; ss ndt1 = {N_DECL_ID, RD(r_forwd), NLF, NNC("Forwd"), T_BPROC, HashByte, &ndt0, Basetype(SP_proc), 0, 0, 0, Lmf, -261 }; static rout_node r_heading = { 0, Fgetheading, 0, 0, empty_list }; ss ndt2 = {N_DECL_ID, RD(r_heading), NLF, NNC("Heading"), T_BFUNC, HashByte, &ndt1, Basetype(BT_integer), 0, sizeof(rint), 0, Lmf, -262 }; static rout_node r_home = { 0, Fhome, 0, 0, empty_list }; ss ndt3 = {N_DECL_ID, RD(r_home), NLF, NNC("Home"), T_BPROC, HashByte, &ndt2, Basetype(SP_proc), 0, 0, 0, Lmf, -263 }; static rout_node r_pen = { 0, Fpen, 0, 0, empty_list }; ss ndt4 = {N_DECL_ID, RD(r_pen), NLF, NNC("PenDown"), T_BPROC, HashByte, &ndt3, Basetype(SP_proc), 0, 0, 0, Lmf, -264 }; ss ndt5 = {N_DECL_ID, RD(r_pen), NLF, NNC("PenUp"), T_BPROC, HashByte, &ndt4, Basetype(SP_proc), 0, 0, 0, Lmf, -265 };
size_t poisson_square(tkernel<glm::tvec2<T, P>> & kernel, const T min_dist, const unsigned int num_probes) { assert(kernel.depth() == 1); std::random_device RD; std::mt19937_64 generator(RD()); std::uniform_real_distribution<> radius_dist(min_dist, min_dist * 2.0); std::uniform_real_distribution<> angle_dist(0.0, 2.0 * glm::pi<T>()); std::uniform_int_distribution<> int_distribute(0, std::numeric_limits<int>::max()); auto occupancy = poisson_square_map<T, P>{ min_dist }; size_t k = 0; // number of valid/final points within the kernel kernel[k] = glm::tvec2<T, P>(0.5, 0.5); auto actives = std::list<size_t>(); actives.push_back(k); occupancy.mask(kernel[k], k); while (!actives.empty() && k < kernel.size() - 1) { // randomly pick an active point const auto pick = int_distribute(generator); auto pick_it = actives.begin(); std::advance(pick_it, pick % actives.size()); const auto active = kernel[*pick_it]; std::vector<std::tuple<glm::tvec2<T, P>, T>> probes{ num_probes }; #pragma omp parallel for for (int i = 0; i < static_cast<int>(num_probes); ++i) { const auto r = radius_dist(generator); const auto a = angle_dist(generator); auto probe = glm::tvec2<T, P>{ active.x + r * cos(a), active.y + r * sin(a) }; // within square? (tilable) if (probe.x < 0.0) probe.x += 1.0; else if (probe.x >= 1.0) probe.x -= 1.0; if (probe.y < 0.0) probe.y += 1.0; else if (probe.y >= 1.0) probe.y -= 1.0; // Note: do NOT make this optimization //if (!tilable && (probe.x < 0.0 || probe.x > 1.0 || probe.y < 0.0 || probe.y > 1.0)) // continue; // points within min_dist? const auto masked = occupancy.masked(probe, kernel); const auto delta = abs(active - probe); probes[i] = std::make_tuple<glm::tvec2<T, P>, T>(std::move(probe), (masked ? static_cast<T>(-1.0) : glm::dot(delta, delta))); } // pick nearest probe from sample set glm::vec2 nearest_probe; auto nearest_dist = 4 * min_dist * min_dist; auto nearest_found = false; for (int i = 0; i < static_cast<int>(num_probes); ++i) { // is this nearest point yet? - optimized by using square distance -> skipping sqrt const auto new_dist = std::get<1>(probes[i]); if (new_dist < 0.0 || nearest_dist < new_dist) continue; if (!nearest_found) nearest_found = true; nearest_dist = new_dist; nearest_probe = std::get<0>(probes[i]); } if (!nearest_found && (actives.size() > 0 || k > 1)) { actives.erase(pick_it); continue; } kernel[++k] = nearest_probe; actives.push_back(k); occupancy.mask(nearest_probe, k); } return k + 1; }
/****************************************************************************** * Examples: * a: Jim Jim Jim * other: Mary Mary Mary * in: [happy-for Jim Mary] [sorry-for J. M.] [gloating J. M.] * (positive-emotion) (negative-emotion) (positive-emotion) * attitude: like-human like-human like-human * weight: WEIGHT_DEFAULT WEIGHT_DEFAULT -WEIGHT_DEFAULT * o.e.c.: positive-emotion negative-emotion negative-emotion * att: [like-human J. M. WD] [like-human J. M. WD] [like-human J. M. * -WD] * * todo: Generalize this to work for all the related like/love attitudes as well * as friends/enemies relations. * The intensity rules are more complex: something like * value of actor2's emotion = value of actor1's emotion x * value of how much actor2 likes actor1. ******************************************************************************/ void UA_Emotion_FortunesOfOthers(Actor *ac, Ts *ts, Obj *a, Obj *in, Obj *other, Float weight, Obj *other_emot_class) { int found; Float weight1; Obj *other_emot_class1; ObjList *causes, *objs, *atts, *p, *q; /* Relate <a>'s emotion to <a>'s attitudes. */ if (0.0 != (weight1 = UA_FriendAttitude(ts, a, other, 1, &atts))) { if (FloatSign(weight1) == FloatSign(weight)) { /* The input emotion agrees with known attitudes. */ ContextSetRSN(ac->cx, RELEVANCE_TOTAL, SENSE_TOTAL, NOVELTY_HALF); ContextAddMakeSenseReasons(ac->cx, atts); } else { /* The input emotion disagrees with known attitudes. */ ContextSetRSN(ac->cx, RELEVANCE_TOTAL, SENSE_LITTLE, NOVELTY_TOTAL); ContextAddNotMakeSenseReasons(ac->cx, atts); } } else { /* Attitude of <a> toward <other> is unknown. */ ContextSetRSN(ac->cx, RELEVANCE_TOTAL, SENSE_MOSTLY, NOVELTY_MOSTLY); UA_Infer(ac->cx->dc, ac->cx, ts, L(N("like-human"), a, other, NumberToObj(weight), E), in); } /* Relate <a>'s emotion to <other>'s emotion. * found = <other>'s emotion implied by <a>'s emotion is already known, * excluding motivating emotions. */ objs = RD(ts, L(other_emot_class, other, E), 0); found = 0; for (p = objs; p; p = p->next) { if (ISA(N("motivation"), I(p->obj, 0))) continue; if ((causes = CAUSES(p->obj, ac->cx))) { for (q = causes; q; q = q->next) { if (!ISA(N("active-goal"), I(q->obj, 0))) { found = 1; ContextSetRSN(ac->cx, RELEVANCE_TOTAL, SENSE_TOTAL, NOVELTY_HALF); ContextAddMakeSenseReason(ac->cx, q->obj); } } } else { found = 1; ContextSetRSN(ac->cx, RELEVANCE_TOTAL, SENSE_TOTAL, NOVELTY_HALF); ContextAddMakeSenseReason(ac->cx, p->obj); } ObjListFree(causes); } ObjListFree(objs); if (!found) { /* <other>'s emotion implied by <a>'s emotion is not yet known. */ ContextSetRSN(ac->cx, RELEVANCE_TOTAL, SENSE_MOSTLY, NOVELTY_MOSTLY); if (other_emot_class == N("positive-emotion")) { other_emot_class1 = N("happiness"); } else if (other_emot_class == N("negative-emotion")) { other_emot_class1 = N("sadness"); } else { other_emot_class1 = other_emot_class; } UA_Infer(ac->cx->dc, ac->cx, ts, L(other_emot_class1, other, NumberToObj(FloatAbs(weight)), E), in); } /* todo: Relate <a>'s emotion to <other>'s goal. */ }
/* * This routine is called from both ptemwput and ptemwsrv to do the * actual work of dealing with mp. ptmewput will have already * dealt with high priority messages. * * Return 1 if the message was processed completely and 0 if not. */ static int ptemwmsg(queue_t *q, mblk_t *mp) { struct ptem *ntp = (struct ptem *)q->q_ptr; struct iocblk *iocp; /* outgoing ioctl structure */ struct termio *termiop; struct termios *termiosp; mblk_t *dack_ptr; /* disconnect message ACK block */ mblk_t *pckt_msgp; /* message sent to the PCKT module */ mblk_t *dp; /* ioctl reply data */ tcflag_t cflags; int error; switch (mp->b_datap->db_type) { case M_IOCTL: /* * Note: for each "set" type operation a copy * of the M_IOCTL message is made and passed * downstream. Eventually the PCKT module, if * it has been pushed, should pick up this message. * If the PCKT module has not been pushed the master * side stream head will free it. */ iocp = (struct iocblk *)mp->b_rptr; switch (iocp->ioc_cmd) { case TCSETAF: case TCSETSF: /* * Flush the read queue. */ if (putnextctl1(q, M_FLUSH, FLUSHR) == 0) { miocnak(q, mp, 0, EAGAIN); break; } /* FALLTHROUGH */ case TCSETA: case TCSETAW: case TCSETS: case TCSETSW: switch (iocp->ioc_cmd) { case TCSETAF: case TCSETA: case TCSETAW: error = miocpullup(mp, sizeof (struct termio)); if (error != 0) { miocnak(q, mp, 0, error); goto out; } cflags = ((struct termio *) mp->b_cont->b_rptr)->c_cflag; ntp->cflags = (ntp->cflags & 0xffff0000 | cflags); break; case TCSETSF: case TCSETS: case TCSETSW: error = miocpullup(mp, sizeof (struct termios)); if (error != 0) { miocnak(q, mp, 0, error); goto out; } cflags = ((struct termios *) mp->b_cont->b_rptr)->c_cflag; ntp->cflags = cflags; break; } if ((cflags & CBAUD) == B0) { /* * Hang-up: Send a zero length message. */ dack_ptr = ntp->dack_ptr; if (dack_ptr) { ntp->dack_ptr = NULL; /* * Send a zero length message * downstream. */ putnext(q, dack_ptr); } } else { /* * Make a copy of this message and pass it on * to the PCKT module. */ if ((pckt_msgp = copymsg(mp)) == NULL) { miocnak(q, mp, 0, EAGAIN); break; } putnext(q, pckt_msgp); } /* * Send ACK upstream. */ mioc2ack(mp, NULL, 0, 0); qreply(q, mp); out: break; case TCGETA: dp = allocb(sizeof (struct termio), BPRI_MED); if (dp == NULL) { miocnak(q, mp, 0, EAGAIN); break; } termiop = (struct termio *)dp->b_rptr; termiop->c_cflag = (ushort_t)ntp->cflags; mioc2ack(mp, dp, sizeof (struct termio), 0); qreply(q, mp); break; case TCGETS: dp = allocb(sizeof (struct termios), BPRI_MED); if (dp == NULL) { miocnak(q, mp, 0, EAGAIN); break; } termiosp = (struct termios *)dp->b_rptr; termiosp->c_cflag = ntp->cflags; mioc2ack(mp, dp, sizeof (struct termios), 0); qreply(q, mp); break; case TCSBRK: error = miocpullup(mp, sizeof (int)); if (error != 0) { miocnak(q, mp, 0, error); break; } /* * Need a copy of this message to pass it on to * the PCKT module. */ if ((pckt_msgp = copymsg(mp)) == NULL) { miocnak(q, mp, 0, EAGAIN); break; } /* * Send a copy of the M_IOCTL to the PCKT module. */ putnext(q, pckt_msgp); /* * TCSBRK meaningful if data part of message is 0 * cf. termio(7). */ if (!(*(int *)mp->b_cont->b_rptr)) (void) putnextctl(q, M_BREAK); /* * ACK the ioctl. */ mioc2ack(mp, NULL, 0, 0); qreply(q, mp); break; case JWINSIZE: case TIOCGWINSZ: case TIOCSWINSZ: ptioc(q, mp, WRSIDE); break; case TIOCSTI: /* * Simulate typing of a character at the terminal. In * all cases, we acknowledge the ioctl and pass a copy * of it along for the PCKT module to encapsulate. If * not in remote mode, we also process the ioctl * itself, looping the character given as its argument * back around to the read side. */ /* * Need a copy of this message to pass on to the PCKT * module. */ if ((pckt_msgp = copymsg(mp)) == NULL) { miocnak(q, mp, 0, EAGAIN); break; } if ((ntp->state & REMOTEMODE) == 0) { mblk_t *bp; error = miocpullup(mp, sizeof (char)); if (error != 0) { freemsg(pckt_msgp); miocnak(q, mp, 0, error); break; } /* * The permission checking has already been * done at the stream head, since it has to be * done in the context of the process doing * the call. */ if ((bp = allocb(1, BPRI_MED)) == NULL) { freemsg(pckt_msgp); miocnak(q, mp, 0, EAGAIN); break; } /* * XXX: Is EAGAIN really the right response to * flow control blockage? */ if (!bcanputnext(RD(q), mp->b_band)) { freemsg(bp); freemsg(pckt_msgp); miocnak(q, mp, 0, EAGAIN); break; } *bp->b_wptr++ = *mp->b_cont->b_rptr; qreply(q, bp); } putnext(q, pckt_msgp); mioc2ack(mp, NULL, 0, 0); qreply(q, mp); break; case PTSSTTY: if (ntp->state & IS_PTSTTY) { miocnak(q, mp, 0, EEXIST); } else { ntp->state |= IS_PTSTTY; mioc2ack(mp, NULL, 0, 0); qreply(q, mp); } break; default: /* * End of the line. The slave driver doesn't see any * ioctls that we don't explicitly pass along to it. */ miocnak(q, mp, 0, EINVAL); break; } break; case M_DELAY: /* tty delays not supported */ freemsg(mp); break; case M_DATA: if ((mp->b_wptr - mp->b_rptr) < 0) { /* * Free all bad length messages. */ freemsg(mp); break; } else if ((mp->b_wptr - mp->b_rptr) == 0) { if (!(ntp->state & IS_PTSTTY)) { freemsg(mp); break; } } if (ntp->state & OFLOW_CTL) return (0); default: putnext(q, mp); break; } return (1); }
/* * cvc_wput() * cn driver does a strwrite of console output data to rconsvp which has * been set by consconfig. The data enters the cvc stream at the streamhead * and flows thru ttycompat and ldterm which have been pushed on the * stream. Console output data gets sent out either to cvcredir, if the * network path is available and selected, or to IOSRAM otherwise. Data is * sent to cvcredir via its read queue (cvcoutput_q, which gets set in * cvc_register()). If the IOSRAM path is selected, or if previous mblks * are currently queued up for processing, the new mblk will be queued * and handled later on by cvc_wsrv. */ static int cvc_wput(queue_t *q, mblk_t *mp) { int error = 0; rw_enter(&cvclock, RW_READER); CVC_DBG2(CVC_DBG_WPUT, "mp 0x%x db_type 0x%x", mp, mp->b_datap->db_type); switch (mp->b_datap->db_type) { case M_IOCTL: case M_CTL: { struct iocblk *iocp = (struct iocblk *)mp->b_rptr; switch (iocp->ioc_cmd) { /* * These ioctls are only supposed to be * processed after everything else that is * already queued awaiting processing, so throw * them on the queue and let cvc_wsrv handle * them. */ case TCSETSW: case TCSETSF: case TCSETAW: case TCSETAF: case TCSBRK: putq(q, mp); break; default: cvc_ioctl(q, mp); } break; } case M_FLUSH: if (*mp->b_rptr & FLUSHW) { /* * Flush our write queue. */ flushq(q, FLUSHDATA); *mp->b_rptr &= ~FLUSHW; } if (*mp->b_rptr & FLUSHR) { flushq(RD(q), FLUSHDATA); qreply(q, mp); } else freemsg(mp); break; case M_STOP: cvc_stopped = 1; freemsg(mp); break; case M_START: cvc_stopped = 0; freemsg(mp); qenable(q); /* Start up delayed messages */ break; case M_READ: /* * ldterm handles this (VMIN/VTIME processing). */ freemsg(mp); break; default: cmn_err(CE_WARN, "cvc_wput: unexpected mblk type - mp =" " 0x%p, type = 0x%x", mp, mp->b_datap->db_type); freemsg(mp); break; case M_DATA: /* * If there are other mblks queued up for transmission, * or we're using IOSRAM either because cvcredir hasn't * registered yet or because we were configured that * way, or cvc has been stopped or suspended, place this * mblk on the input queue for future processing. * Otherwise, hand it off to cvcredir for transmission * via the network. */ if (q->q_first != NULL || cvcoutput_q == NULL || via_iosram || cvc_stopped == 1 || cvc_suspended == 1) { (void) putq(q, mp); } else { /* * XXX - should canputnext be called here? * Starfire's cvc doesn't do that, and it * appears to work anyway. */ (void) putnext(cvcoutput_q, mp); } break; } rw_exit(&cvclock); return (error); }
/* * Through message handle for write side stream * * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed )) * -. uinst_t->lock : M [RW_READER or RW_WRITER] * -. uinst_t->u_lock : P * -. uinst_t->l_lock : P * -. uinst_t->c_lock : P */ int oplmsu_wcmn_through_hndl(queue_t *q, mblk_t *mp, int pri_flag, krw_t rw) { queue_t *usr_queue = NULL, *dst_queue = NULL; ctrl_t *ctrl; ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock)); mutex_enter(&oplmsu_uinst->c_lock); if ((ctrl = oplmsu_uinst->user_ctrl) != NULL) { usr_queue = ctrl->queue; mutex_exit(&oplmsu_uinst->c_lock); } else { mutex_exit(&oplmsu_uinst->c_lock); if (mp->b_datap->db_type == M_IOCTL) { rw_exit(&oplmsu_uinst->lock); oplmsu_iocack(q, mp, ENODEV); rw_enter(&oplmsu_uinst->lock, rw); } else { freemsg(mp); } return (SUCCESS); } if (oplmsu_uinst->lower_queue != NULL) { dst_queue = WR(oplmsu_uinst->lower_queue); } else { cmn_err(CE_WARN, "!oplmsu: through-lwq: " "Active path doesn't exist"); if (mp->b_datap->db_type == M_IOCTL) { rw_exit(&oplmsu_uinst->lock); oplmsu_iocack(q, mp, ENODEV); rw_enter(&oplmsu_uinst->lock, rw); } else { freemsg(mp); } return (SUCCESS); } if ((usr_queue == WR(q)) || (usr_queue == RD(q))) { if (pri_flag == MSU_HIGH) { putq(dst_queue, mp); } else { if (canput(dst_queue)) { putq(dst_queue, mp); } else { oplmsu_wcmn_norm_putbq(WR(q), mp, dst_queue); return (FAILURE); } } } else { cmn_err(CE_WARN, "oplmsu: through-lwq: " "Inappropriate message for this node"); if (mp->b_datap->db_type == M_IOCTL) { rw_exit(&oplmsu_uinst->lock); oplmsu_iocack(q, mp, ENODEV); rw_enter(&oplmsu_uinst->lock, rw); } else { freemsg(mp); } } return (SUCCESS); }
static streamscall int sad_put(queue_t *q, mblk_t *mp) { struct sad *sad = q->q_ptr; union ioctypes *ioc; int err = 0, rval = 0, count = 0; mblk_t *dp = mp->b_cont; caddr_t sa_addr, sl_addr; size_t sa_size, sl_size; switch (mp->b_datap->db_type) { case M_FLUSH: if (mp->b_rptr[0] & FLUSHW) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHDATA); else flushq(q, FLUSHDATA); mp->b_rptr[0] &= ~FLUSHW; } if (mp->b_rptr[0] & FLUSHR) { queue_t *rq = RD(q); if (mp->b_rptr[0] & FLUSHBAND) flushband(rq, mp->b_rptr[1], FLUSHDATA); else flushq(rq, FLUSHDATA); qreply(q, mp); return (0); } break; case M_IOCTL: ioc = (typeof(ioc)) mp->b_rptr; #ifdef WITH_32BIT_CONVERSION if (ioc->iocblk.ioc_flag == IOC_ILP32) { /* XXX: following pointer conversion does not work on all architectures. */ sa_addr = (caddr_t) (unsigned long) (uint32_t) *(unsigned long *) dp->b_rptr; sa_size = sizeof(struct strapush32); sl_addr = sa_addr; sl_size = sizeof(struct str_list32); } else #endif { sa_addr = (caddr_t) *(unsigned long *) dp->b_rptr; sa_size = sizeof(struct strapush); sl_addr = sa_addr; sl_size = sizeof(struct str_list); } switch (ioc->iocblk.ioc_cmd) { case SAD_SAP: err = -EPERM; #ifdef HAVE_KMEMB_STRUCT_CRED_UID_VAL if (ioc->iocblk.ioc_uid.val != 0) goto nak; #else if (ioc->iocblk.ioc_uid != 0) goto nak; #endif if (ioc->iocblk.ioc_count == TRANSPARENT) { mp->b_datap->db_type = M_COPYIN; ioc->copyreq.cq_addr = sa_addr; ioc->copyreq.cq_size = sa_size; ioc->copyreq.cq_flag = 0; ioc->copyreq.cq_private = (mblk_t *) ioc->copyreq.cq_addr; sad->transparent = 1; sad->iocstate = 1; qreply(q, mp); return (0); } sad->transparent = 0; sad->iocstate = 1; goto sad_sap_state1; case SAD_GAP: if (ioc->iocblk.ioc_count == TRANSPARENT) { mp->b_datap->db_type = M_COPYIN; ioc->copyreq.cq_addr = sa_addr; ioc->copyreq.cq_size = sa_size; ioc->copyreq.cq_flag = 0; ioc->copyreq.cq_private = (mblk_t *) ioc->copyreq.cq_addr; sad->transparent = 1; sad->iocstate = 1; qreply(q, mp); return (0); } sad->transparent = 0; sad->iocstate = 1; goto sad_gap_state1; case SAD_LAP: if (ioc->iocblk.ioc_count == TRANSPARENT) { mp->b_datap->db_type = M_COPYIN; ioc->copyreq.cq_addr = sa_addr; ioc->copyreq.cq_size = sa_size; ioc->copyreq.cq_flag = 0; ioc->copyreq.cq_private = (mblk_t *) ioc->copyreq.cq_addr; sad->transparent = 1; sad->iocstate = 1; qreply(q, mp); return (0); } sad->transparent = 0; sad->iocstate = 1; goto sad_lap_state1; case SAD_VML: if (ioc->iocblk.ioc_count == TRANSPARENT) { mp->b_datap->db_type = M_COPYIN; ioc->copyreq.cq_addr = sl_addr; ioc->copyreq.cq_size = sl_size; ioc->copyreq.cq_flag = 0; ioc->copyreq.cq_private = (mblk_t *) ioc->copyreq.cq_addr; sad->transparent = 1; sad->iocstate = 1; qreply(q, mp); return (0); } sad->transparent = 0; sad->iocstate = 1; goto sad_vml_state1; } err = -EINVAL; goto nak; case M_IOCDATA: ioc = (typeof(ioc)) mp->b_rptr; if (ioc->copyresp.cp_rval != (caddr_t) 0) { sad->transparent = 0; sad->iocstate = 0; goto abort; } #ifdef WITH_32BIT_CONVERSION if (ioc->copyresp.cp_flag == IOC_ILP32) { sa_size = sizeof(struct strapush32); sl_size = sizeof(struct str_list32); } else #endif { sa_size = sizeof(struct strapush); sl_size = sizeof(struct str_list); } switch (ioc->copyresp.cp_cmd) { case SAD_SAP: switch (sad->iocstate) { case 1: sad_sap_state1: err = -EFAULT; if (!dp || dp->b_wptr < dp->b_rptr + sa_size) goto nak; #ifdef WITH_32BIT_CONVERSION if (ioc->copyresp.cp_flag == IOC_ILP32) { struct strapush32 *sap32 = (typeof(sap32)) dp->b_rptr; struct strapush sa, *sap = &sa; sap32_convert(sap32, sap); if ((err = apush_set(sap))) goto nak; sap32_revert(sap, sap32); } else #endif { struct strapush *sap = (typeof(sap)) dp->b_rptr; if ((err = apush_set(sap))) goto nak; } if (sad->transparent == 1) { mp->b_datap->db_type = M_COPYOUT; ioc->copyreq.cq_addr = (caddr_t) ioc->copyresp.cp_private; ioc->copyreq.cq_size = sa_size; ioc->copyreq.cq_flag = 0; sad->transparent = 1; sad->iocstate = 2; qreply(q, mp); return (0); } /* use implied I_STR copyout */ count = sa_size; goto ack; case 2: /* done */ goto ack; } err = -EIO; goto nak; case SAD_GAP: switch (sad->iocstate) { case 1: sad_gap_state1: err = -EFAULT; if (!dp || dp->b_wptr < dp->b_rptr + sa_size) goto nak; #ifdef WITH_32BIT_CONVERSION if (ioc->copyresp.cp_flag == IOC_ILP32) { struct strapush32 *sap32 = (typeof(sap32)) dp->b_rptr; struct strapush sa, *sap = &sa; sap32_convert(sap32, sap); if ((err = apush_get(sap))) goto nak; sap32_revert(sap, sap32); } else #endif { struct strapush *sap; sap = (typeof(sap)) dp->b_rptr; if ((err = apush_get(sap))) goto nak; } if (sad->transparent == 1) { mp->b_datap->db_type = M_COPYOUT; ioc->copyreq.cq_addr = (caddr_t) ioc->copyresp.cp_private; ioc->copyreq.cq_size = sa_size; ioc->copyreq.cq_flag = 0; sad->transparent = 1; sad->iocstate = 2; qreply(q, mp); return (0); } /* use implied I_STR copyout */ count = sa_size; goto ack; case 2: /* done */ goto ack; } err = -EIO; goto nak; case SAD_LAP: switch (sad->iocstate) { case 1: sad_lap_state1: err = -EFAULT; if (!dp || dp->b_wptr < dp->b_rptr + sa_size) goto nak; #ifdef WITH_32BIT_CONVERSION if (ioc->copyresp.cp_flag == IOC_ILP32) { struct strapush32 *sap32 = (typeof(sap32)) dp->b_rptr; struct strapush sa, *sap = &sa; sap32_convert(sap32, sap); if ((err = apush_lst(sap))) goto nak; sap32_revert(sap, sap32); } else #endif { struct strapush *sap; sap = (typeof(sap)) dp->b_rptr; if ((err = apush_lst(sap))) goto nak; } if (sad->transparent == 1) { mp->b_datap->db_type = M_COPYOUT; ioc->copyreq.cq_addr = (caddr_t) ioc->copyresp.cp_private; ioc->copyreq.cq_size = sa_size; ioc->copyreq.cq_flag = 0; sad->transparent = 1; sad->iocstate = 2; qreply(q, mp); return (0); } /* use implied I_STR copyout */ count = sa_size; goto ack; case 2: /* done */ goto ack; } err = -EIO; goto nak; case SAD_VML: switch (sad->iocstate) { case 1: sad_vml_state1: err = -EFAULT; if (!dp || dp->b_wptr < dp->b_rptr + sl_size) goto nak; #ifdef WITH_32BIT_CONVERSION if (ioc->copyresp.cp_flag == IOC_ILP32) { struct str_list32 *slp32 = (typeof(slp32)) dp->b_rptr; sad->sl.sl_nmods = slp32->sl_nmods; sad->sl.sl_modlist = (struct str_mlist *) (unsigned long) slp32->sl_modlist; } else #endif { struct str_list *slp = (typeof(slp)) dp->b_rptr; sad->sl.sl_nmods = slp->sl_nmods; sad->sl.sl_modlist = slp->sl_modlist; } err = -EINVAL; if (1 > sad->sl.sl_nmods || sad->sl.sl_nmods > MAXAPUSH) goto nak; mp->b_datap->db_type = M_COPYIN; ioc->copyreq.cq_addr = (caddr_t) sad->sl.sl_modlist; ioc->copyreq.cq_size = sad->sl.sl_nmods * sizeof(struct str_mlist); ioc->copyreq.cq_flag = 0; sad->iocstate = 2; qreply(q, mp); return (0); case 2: err = -EFAULT; if (!dp || dp->b_wptr < dp->b_rptr + sad->sl.sl_nmods * sizeof(struct str_mlist)) goto nak; sad->sl.sl_modlist = (struct str_mlist *) dp->b_rptr; if ((err = apush_vml(&sad->sl)) < 0) goto nak; rval = err; goto ack; } err = -EIO; goto nak; } } abort: freemsg(mp); return (0); nak: sad->iocstate = 0; mp->b_datap->db_type = M_IOCNAK; ioc->iocblk.ioc_count = 0; ioc->iocblk.ioc_rval = -1; ioc->iocblk.ioc_error = -err; sad->transparent = 0; sad->iocstate = 0; qreply(q, mp); return (0); ack: sad->iocstate = 0; mp->b_datap->db_type = M_IOCACK; ioc->iocblk.ioc_count = count; ioc->iocblk.ioc_rval = rval; ioc->iocblk.ioc_error = 0; sad->transparent = 0; sad->iocstate = 0; qreply(q, mp); return (0); }
void main() { int len, fileSize, flag, comm; char string[50]; char command[10]; char name[40]; char *s, *s1; version(); initial(); flag = 1; while (flag){ cout << endl << " 模拟文件管理模拟系统" << endl; cout << endl << "CD 改变目录 CREATE 创建文件 DEL 删除文件 " << endl << "LSALL 显示目录 MD 创建目录 RD 删除目录" << endl << "exit 退出" << endl; cout << endl << "-----------------------------------------------" << endl; printf("%s:>#", path); gets(string); len = strlen(string); if (len == 0){ strcpy(command, "errer"); } else{ //获得命令 s = NULL; s = strchr(string, ' '); if (s != NULL){ *s = '\0'; } strcpy(command, string); //测试命令类型 if ((!strcmp(command, "CD")) || !strcmp(command, "cd")){ comm = 1; } else{ if ((!strcmp(command, "CREATE")) || !strcmp(command, "create")){ comm = 2; } else{ if ((!strcmp(command, "DEL")) || !strcmp(command, "del")){ comm = 3; } else{ if ((!strcmp(command, "LSALL")) || !strcmp(command, "lsall")){ comm = 4; } else{ if ((!strcmp(command, "MD")) || !strcmp(command, "md")){ comm = 5; } else{ if ((!strcmp(command, "RD")) || !strcmp(command, "rd")){ comm = 6; } else{ if ((!strcmp(command, "EXIT")) || !strcmp(command, "exit")){ comm = 0; } else{ comm = 100; } } } } } } } switch (comm){ case 1: //1 改变目录 strcpy(name, s + 1); CD(name); break; case 2: //2 创建文件 s1 = strchr(s + 1, ' '); *s1 = '\0'; strcpy(name, s + 1); fileSize = atoi(s1 + 1); CREATE(name, fileSize); break; case 3: //3 删除文件 strcpy(name, s + 1); DEL(name); break; case 4: //4 显示目录 LSALL(); break; case 5: //5 创建目录 strcpy(name, s + 1); MD(name); break; case 6: //6 删除目录 strcpy(name, s + 1); RD(name); break; case 0: //0 退出系统 flag = 0; break; default: cout << "命令错误" << endl; } } } }
/** A select_handler_T read_func for itrm_in.sock. A slave process * calls this when the master sends it data to be displayed. The * master process never calls this. */ static void in_sock(struct itrm *itrm) { struct string path; struct string delete_; char ch; int fg; /* enum term_exec */ ssize_t bytes_read, i, p; unsigned char buf[ITRM_OUT_QUEUE_SIZE]; bytes_read = safe_read(itrm->in.sock, buf, ITRM_OUT_QUEUE_SIZE); if (bytes_read <= 0) goto free_and_return; qwerty: for (i = 0; i < bytes_read; i++) if (!buf[i]) goto has_nul_byte; safe_hard_write(itrm->out.std, buf, bytes_read); return; has_nul_byte: if (i) safe_hard_write(itrm->out.std, buf, i); i++; assert(ITRM_OUT_QUEUE_SIZE - i > 0); memmove(buf, buf + i, ITRM_OUT_QUEUE_SIZE - i); bytes_read -= i; p = 0; #define RD(xx) { \ unsigned char cc; \ \ if (p < bytes_read) \ cc = buf[p++]; \ else if ((hard_read(itrm->in.sock, &cc, 1)) <= 0) \ goto free_and_return; \ xx = cc; \ } RD(fg); if (!init_string(&path)) goto free_and_return; while (1) { RD(ch); if (!ch) break; add_char_to_string(&path, ch); } if (!init_string(&delete_)) { done_string(&path); goto free_and_return; } while (1) { RD(ch); if (!ch) break; add_char_to_string(&delete_, ch); } #undef RD if (!*path.source) { dispatch_special(delete_.source); } else { int blockh; unsigned char *param; int path_len, del_len, param_len; /* TODO: Should this be changed to allow TERM_EXEC_NEWWIN * in a blocked terminal? There is similar code in * exec_on_terminal(). --KON, 2007 */ if (is_blocked() && fg != TERM_EXEC_BG) { if (*delete_.source) unlink(delete_.source); goto nasty_thing; } path_len = path.length; del_len = delete_.length; param_len = path_len + del_len + 3; param = mem_alloc(param_len); if (!param) goto nasty_thing; param[0] = fg; memcpy(param + 1, path.source, path_len + 1); memcpy(param + 1 + path_len + 1, delete_.source, del_len + 1); if (fg == TERM_EXEC_FG) block_itrm(); blockh = start_thread((void (*)(void *, int)) exec_thread, param, param_len); mem_free(param); if (blockh == -1) { if (fg == TERM_EXEC_FG) unblock_itrm(); goto nasty_thing; } if (fg == TERM_EXEC_FG) { set_handlers(blockh, (select_handler_T) unblock_itrm_x, NULL, (select_handler_T) unblock_itrm_x, (void *) (long) blockh); } else { set_handlers(blockh, close_handle, NULL, close_handle, (void *) (long) blockh); } } nasty_thing: done_string(&path); done_string(&delete_); assert(ITRM_OUT_QUEUE_SIZE - p > 0); memmove(buf, buf + p, ITRM_OUT_QUEUE_SIZE - p); bytes_read -= p; goto qwerty; free_and_return: free_itrm(itrm); }
int main(int argc, char **argv) { int ch; uint32_t i; int rv; unsigned int iter = 0; glob_arg.ifname[0] = '\0'; glob_arg.output_rings = 0; glob_arg.batch = DEF_BATCH; glob_arg.syslog_interval = DEF_SYSLOG_INT; while ( (ch = getopt(argc, argv, "i:p:b:B:s:")) != -1) { switch (ch) { case 'i': D("interface is %s", optarg); if (strlen(optarg) > MAX_IFNAMELEN - 8) { D("ifname too long %s", optarg); return 1; } if (strncmp(optarg, "netmap:", 7) && strncmp(optarg, "vale", 4)) { sprintf(glob_arg.ifname, "netmap:%s", optarg); } else { strcpy(glob_arg.ifname, optarg); } break; case 'p': if (parse_pipes(optarg)) { usage(); return 1; } break; case 'B': glob_arg.extra_bufs = atoi(optarg); D("requested %d extra buffers", glob_arg.extra_bufs); break; case 'b': glob_arg.batch = atoi(optarg); D("batch is %d", glob_arg.batch); break; case 's': glob_arg.syslog_interval = atoi(optarg); D("syslog interval is %d", glob_arg.syslog_interval); break; default: D("bad option %c %s", ch, optarg); usage(); return 1; } } if (glob_arg.ifname[0] == '\0') { D("missing interface name"); usage(); return 1; } /* extract the base name */ char *nscan = strncmp(glob_arg.ifname, "netmap:", 7) ? glob_arg.ifname : glob_arg.ifname + 7; strncpy(glob_arg.base_name, nscan, MAX_IFNAMELEN); for (nscan = glob_arg.base_name; *nscan && !index("-*^{}/@", *nscan); nscan++) ; *nscan = '\0'; if (glob_arg.num_groups == 0) parse_pipes(""); setlogmask(LOG_UPTO(LOG_INFO)); openlog("lb", LOG_CONS | LOG_PID | LOG_NDELAY, LOG_LOCAL1); uint32_t npipes = glob_arg.output_rings; pthread_t stat_thread; ports = calloc(npipes + 1, sizeof(struct port_des)); if (!ports) { D("failed to allocate the stats array"); return 1; } struct port_des *rxport = &ports[npipes]; init_groups(); if (pthread_create(&stat_thread, NULL, print_stats, NULL) == -1) { D("unable to create the stats thread: %s", strerror(errno)); return 1; } /* we need base_req to specify pipes and extra bufs */ struct nmreq base_req; memset(&base_req, 0, sizeof(base_req)); base_req.nr_arg1 = npipes; base_req.nr_arg3 = glob_arg.extra_bufs; rxport->nmd = nm_open(glob_arg.ifname, &base_req, 0, NULL); if (rxport->nmd == NULL) { D("cannot open %s", glob_arg.ifname); return (1); } else { D("successfully opened %s (tx rings: %u)", glob_arg.ifname, rxport->nmd->req.nr_tx_slots); } uint32_t extra_bufs = rxport->nmd->req.nr_arg3; struct overflow_queue *oq = NULL; /* reference ring to access the buffers */ rxport->ring = NETMAP_RXRING(rxport->nmd->nifp, 0); if (!glob_arg.extra_bufs) goto run; D("obtained %d extra buffers", extra_bufs); if (!extra_bufs) goto run; /* one overflow queue for each output pipe, plus one for the * free extra buffers */ oq = calloc(npipes + 1, sizeof(struct overflow_queue)); if (!oq) { D("failed to allocated overflow queues descriptors"); goto run; } freeq = &oq[npipes]; rxport->oq = freeq; freeq->slots = calloc(extra_bufs, sizeof(struct netmap_slot)); if (!freeq->slots) { D("failed to allocate the free list"); } freeq->size = extra_bufs; snprintf(freeq->name, MAX_IFNAMELEN, "free queue"); /* * the list of buffers uses the first uint32_t in each buffer * as the index of the next buffer. */ uint32_t scan; for (scan = rxport->nmd->nifp->ni_bufs_head; scan; scan = *(uint32_t *)NETMAP_BUF(rxport->ring, scan)) { struct netmap_slot s; s.buf_idx = scan; ND("freeq <- %d", s.buf_idx); oq_enq(freeq, &s); } if (freeq->n != extra_bufs) { D("something went wrong: netmap reported %d extra_bufs, but the free list contained %d", extra_bufs, freeq->n); return 1; } rxport->nmd->nifp->ni_bufs_head = 0; run: /* we need to create the persistent vale ports */ if (create_custom_ports(rxport->nmd->req.nr_arg2)) { free_buffers(); return 1; } atexit(delete_custom_ports); atexit(free_buffers); int j, t = 0; for (j = 0; j < glob_arg.num_groups; j++) { struct group_des *g = &groups[j]; int k; for (k = 0; k < g->nports; ++k) { struct port_des *p = &g->ports[k]; char interface[25]; sprintf(interface, "netmap:%s{%d/xT", g->pipename, g->first_id + k); D("opening pipe named %s", interface); p->nmd = nm_open(interface, NULL, 0, rxport->nmd); if (p->nmd == NULL) { D("cannot open %s", interface); return (1); } else { D("successfully opened pipe #%d %s (tx slots: %d)", k + 1, interface, p->nmd->req.nr_tx_slots); p->ring = NETMAP_TXRING(p->nmd->nifp, 0); } D("zerocopy %s", (rxport->nmd->mem == p->nmd->mem) ? "enabled" : "disabled"); if (extra_bufs) { struct overflow_queue *q = &oq[t + k]; q->slots = calloc(extra_bufs, sizeof(struct netmap_slot)); if (!q->slots) { D("failed to allocate overflow queue for pipe %d", k); /* make all overflow queue management fail */ extra_bufs = 0; } q->size = extra_bufs; snprintf(q->name, MAX_IFNAMELEN, "oq %s{%d", g->pipename, k); p->oq = q; } } t += g->nports; } if (glob_arg.extra_bufs && !extra_bufs) { if (oq) { for (i = 0; i < npipes + 1; i++) { free(oq[i].slots); oq[i].slots = NULL; } free(oq); oq = NULL; } D("*** overflow queues disabled ***"); } sleep(2); struct pollfd pollfd[npipes + 1]; memset(&pollfd, 0, sizeof(pollfd)); signal(SIGINT, sigint_h); while (!do_abort) { u_int polli = 0; iter++; for (i = 0; i < npipes; ++i) { struct netmap_ring *ring = ports[i].ring; if (nm_ring_next(ring, ring->tail) == ring->cur) { /* no need to poll, there are no packets pending */ continue; } pollfd[polli].fd = ports[i].nmd->fd; pollfd[polli].events = POLLOUT; pollfd[polli].revents = 0; ++polli; } pollfd[polli].fd = rxport->nmd->fd; pollfd[polli].events = POLLIN; pollfd[polli].revents = 0; ++polli; //RD(5, "polling %d file descriptors", polli+1); rv = poll(pollfd, polli, 10); if (rv <= 0) { if (rv < 0 && errno != EAGAIN && errno != EINTR) RD(1, "poll error %s", strerror(errno)); continue; } if (oq) { /* try to push packets from the overflow queues * to the corresponding pipes */ for (i = 0; i < npipes; i++) { struct port_des *p = &ports[i]; struct overflow_queue *q = p->oq; struct group_des *g = p->group; uint32_t j, lim; struct netmap_ring *ring; struct netmap_slot *slot; if (oq_empty(q)) continue; ring = p->ring; lim = nm_ring_space(ring); if (!lim) continue; if (q->n < lim) lim = q->n; for (j = 0; j < lim; j++) { struct netmap_slot s = oq_deq(q), tmp; tmp.ptr = 0; slot = &ring->slot[ring->cur]; if (slot->ptr && !g->last) { tmp.buf_idx = forward_packet(g + 1, slot); /* the forwarding may have removed packets * from the current queue */ if (q->n < lim) lim = q->n; } else { tmp.buf_idx = slot->buf_idx; } oq_enq(freeq, &tmp); *slot = s; slot->flags |= NS_BUF_CHANGED; ring->cur = nm_ring_next(ring, ring->cur); } ring->head = ring->cur; forwarded += lim; p->ctr.pkts += lim; } } int batch = 0; for (i = rxport->nmd->first_rx_ring; i <= rxport->nmd->last_rx_ring; i++) { struct netmap_ring *rxring = NETMAP_RXRING(rxport->nmd->nifp, i); //D("prepare to scan rings"); int next_cur = rxring->cur; struct netmap_slot *next_slot = &rxring->slot[next_cur]; const char *next_buf = NETMAP_BUF(rxring, next_slot->buf_idx); while (!nm_ring_empty(rxring)) { struct netmap_slot *rs = next_slot; struct group_des *g = &groups[0]; // CHOOSE THE CORRECT OUTPUT PIPE uint32_t hash = pkt_hdr_hash((const unsigned char *)next_buf, 4, 'B'); if (hash == 0) { non_ip++; // XXX ?? } rs->ptr = hash | (1UL << 32); // prefetch the buffer for the next round next_cur = nm_ring_next(rxring, next_cur); next_slot = &rxring->slot[next_cur]; next_buf = NETMAP_BUF(rxring, next_slot->buf_idx); __builtin_prefetch(next_buf); // 'B' is just a hashing seed rs->buf_idx = forward_packet(g, rs); rs->flags |= NS_BUF_CHANGED; rxring->head = rxring->cur = next_cur; batch++; if (unlikely(batch >= glob_arg.batch)) { ioctl(rxport->nmd->fd, NIOCRXSYNC, NULL); batch = 0; } ND(1, "Forwarded Packets: %"PRIu64" Dropped packets: %"PRIu64" Percent: %.2f", forwarded, dropped, ((float)dropped / (float)forwarded * 100)); } } } pthread_join(stat_thread, NULL); printf("%"PRIu64" packets forwarded. %"PRIu64" packets dropped. Total %"PRIu64"\n", forwarded, dropped, forwarded + dropped); return 0; }
int main() { int n,m,a,b;n=RD();m=RD();a=RD();b=RD(); for(int i=1;i<=n;i++) for(int j=1;j<=m;j++) mtx[i][j]=RD(); for(int i=n;i>=1;i--) for(int j=m;j>=1;j--) sum[i][j]=sum[i][j+1]+mtx[i][j]; for(int i=n;i>=1;i--) for(int j=1;j<=m-b+1;j++) sum[i][j]-=sum[i][j+b]; for(int i=n;i>=1;i--) for(int j=m;j>=1;j--) sum[i][j]=sum[i+1][j]+sum[i][j]; for(int i=1;i<=n-a+1;i++) for(int j=m;j>=1;j--) sum[i][j]-=sum[i+a][j]; for(int i=1;i<=n;i++) { ll=1,rr=0; for(int j=1;j<=b;j++) { while(ll<=rr && mtx[i][j]<stk[rr]) rr--; stk[++rr]=mtx[i][j];pos[rr]=j; } mn[i][1]=stk[ll]; for(int j=b+1;j<=m;j++) { if(pos[ll]<=j-b) ll++; while(ll<=rr && mtx[i][j]<stk[rr]) rr--; stk[++rr]=mtx[i][j];pos[rr]=j; mn[i][j-b+1]=stk[ll]; } } for(int j=1;j<=m;j++) { ll=1,rr=0; for(int i=1;i<=a;i++) { while(ll<=rr && mn[i][j]<stk[rr]) rr--; stk[++rr]=mn[i][j];pos[rr]=i; } mtx[1][j]=stk[ll]; for(int i=a+1;i<=n;i++) { if(pos[ll]<=i-a) ll++; while(ll<=rr && mn[i][j]<stk[rr]) rr--; stk[++rr]=mn[i][j];pos[rr]=i; mtx[i-a+1][j]=stk[ll]; } } int cnt=0; for(int i=1;i<=n-a+1;i++) for(int j=1;j<=m-b+1;j++) nd[++cnt]=(Node){i,j,sum[i][j]-(long long)mtx[i][j]*a*b}; std::sort(nd+1,nd+1+cnt,cmp); for(int i=1;i<=cnt;i++) { int x=nd[i].x,y=nd[i].y; if(!rem[x][y] && !rem[x+a-1][y+b-1] && !rem[x+a-1][y] && !rem[x][y+b-1]) { ans[++anscnt]=i; for(int i=x;i<=x+a-1;i++) for(int j=y;j<=y+b-1;j++) rem[i][j]=1; } } printf("%d\n",anscnt); for(int i=1;i<=anscnt;i++) printf("%d %d %I64d\n",nd[ans[i]].x,nd[ans[i]].y,nd[ans[i]].val); }
/** * sl_w_ioctl: - process M_IOCTL message * @q: active queue (upper write queue) * @mp: the message * * Linking of streams: streams are linked under the multiplexing driver by opening an upper stream * and then linking a signalling link stream under the multiplexing driver. Then the SL_SETLINK * input-output control is used with the multiplexer id to set the global-PPA and CLEI associated * with the signalling link. The SL_GETLINK input-output control can be used at a later date to * determine the multiplexer id for a given signalling link stream. */ static struct int sl_w_ioctl(queue_t *q, mblk_t *mp) { struct iocblk *ioc = (struct iocblk *) mp->b_rptr; switch (ioc->ioc_cmd) { case I_LINK: case I_PLINK: { struct linkblk *l; if (!mp->b_cont) mi_copy_done(q, mp, EINVAL); l = (struct linkblk *) mp->b_cont->b_rptr; if (!(bot = kmem_alloc(sizeof(*bot), KM_NOSLEEP))) mi_copy_done(q, mp, ENOMEM); write_lock_str(&mux_lock, flags); bot->next = mux_links; bpt->prev = &mux_links; mux_links = bot; bot->dev = l->l_index; bot->rq = RD(l->l_qtop); bot->wq = l->l_qtop; bot->other = NULL; noenable(bot->rq); l->l_qtop->q_ptr = RD(l->l_qtop)->q_ptr = (void *) bot; write_unlock_str(&mux_lock, flags); mi_copy_done(q, mp, 0); return (0); } case I_UNLINK: case I_PUNLINK: { struct linkblk *l; if (!mp->b_cont) mi_copy_done(q, mp, EINVAL); l = (struct linkblk *) mp->b_cont->b_rptr; write_lock_str(&mux_lock, flags); for (bot = mux_list; bot; bot = bot->next) if (bot->dev == l->l_index) break; if (!bot) { write_unlock_str(&mux_lock, flags); mi_copy_done(q, mp, EINVAL); return (0); } /* Note that the lower multiplex driver put and service procedures must be prepared to be invoked event after the M_IOCACK for the I_UNLINK or I_PUNLINK ioctl has been returned. THis is because the setq(9) back to the Stream head is not performed until after the acknowledgement has been received. We set q->q_ptr to a null multiplex structure to keep the lower Stream functioning until the setq(9) is performed. */ l->l_qtop->q_ptr = RD(l->l_qtop)->q_ptr = &no_mux; if ((*bot->prev = bot->next)) { bot->next = NULL; bot->prev = &bot->next; } bot->other = NULL; kmem_free(bot, sizeof(*bot)); /* hang up all upper streams that feed this lower stream */ for (top = mux_opens; top; top = top->next) { if (top->other == bot) { putnextctl(top->rq, M_HANGUP); top->other = NULL; } } write_unlock_str(&mux_lock, flags); mi_copy_done(q, mp, 0); return (0); } case SL_SETLINK: { struct sl_mux_ppa *sm; /* This input-output control is used to set the global-PPA and CLEI associated with a lower multiplex stream. The argument is an sl_mux_ppa structure that contains the multiplex id, the 32-bit PPA, and a CLEI string of up to 32 characters in length. */ mi_copyin(q, mp, NULL, sizeof(struct sl_mux_ppa)); return (0); } case SL_GETLINK: { /* This input-output control is used to obtain the multiplex-id assocated with a lower multiplex stream. The argument is an sl_mux_ppa structure that contains a 32-bit PPA or CLEI string of up to 32 characters in length. It returns the multiplex id in the same structure. */ mi_copyin(q, mp, NULL, sizeof(struct sl_mux_ppa)); return (0); } default: if (mux->other && mux->other->rq) { if (bcanputnext(mux->other->rq, mp->b_band)) { putnext(mux->other->rq, mp); return (0); } return (-EBUSY); } break; } mi_copy_done(q, mp, EINVAL); return (0); }
INLINE UINT8 rdop( minx_state *minx ) { UINT8 op = RD( GET_MINX_PC ); minx->PC++; return op; }
INLINE UINT16 rd16( minx_state *minx, UINT32 offset ) { return RD( offset ) | ( RD( offset + 1 ) << 8 ); }
TEST(RInstruction, ExtractRd1) { int32 inst(0xFFFFFFFF); EXPECT_EQ(RD(inst), 31); }
struct sparc_opcode sparc_opcodes[] = { { "ld", F3(3, 0x00, 0), F3(~3, ~0x00, ~0), "[1+2],d", 0, v6 }, { "ld", F3(3, 0x00, 0), F3(~3, ~0x00, ~0)|RS2_G0, "[1],d", 0, v6 }, /* ld [rs1+%g0],d */ { "ld", F3(3, 0x00, 1), F3(~3, ~0x00, ~1), "[1+i],d", 0, v6 }, { "ld", F3(3, 0x00, 1), F3(~3, ~0x00, ~1), "[i+1],d", 0, v6 }, { "ld", F3(3, 0x00, 1), F3(~3, ~0x00, ~1)|RS1_G0, "[i],d", 0, v6 }, { "ld", F3(3, 0x00, 1), F3(~3, ~0x00, ~1)|SIMM13(~0), "[1],d", 0, v6 }, /* ld [rs1+0],d */ { "ld", F3(3, 0x20, 0), F3(~3, ~0x20, ~0), "[1+2],g", 0, v6 }, { "ld", F3(3, 0x20, 0), F3(~3, ~0x20, ~0)|RS2_G0, "[1],g", 0, v6 }, /* ld [rs1+%g0],d */ { "ld", F3(3, 0x20, 1), F3(~3, ~0x20, ~1), "[1+i],g", 0, v6 }, { "ld", F3(3, 0x20, 1), F3(~3, ~0x20, ~1), "[i+1],g", 0, v6 }, { "ld", F3(3, 0x20, 1), F3(~3, ~0x20, ~1)|RS1_G0, "[i],g", 0, v6 }, { "ld", F3(3, 0x20, 1), F3(~3, ~0x20, ~1)|SIMM13(~0), "[1],g", 0, v6 }, /* ld [rs1+0],d */ { "ld", F3(3, 0x21, 0), F3(~3, ~0x21, ~0)|RD(~0), "[1+2],F", 0, v6 }, { "ld", F3(3, 0x21, 0), F3(~3, ~0x21, ~0)|RS2_G0|RD(~0),"[1],F", 0, v6 }, /* ld [rs1+%g0],d */ { "ld", F3(3, 0x21, 1), F3(~3, ~0x21, ~1)|RD(~0), "[1+i],F", 0, v6 }, { "ld", F3(3, 0x21, 1), F3(~3, ~0x21, ~1)|RD(~0), "[i+1],F", 0, v6 }, { "ld", F3(3, 0x21, 1), F3(~3, ~0x21, ~1)|RS1_G0|RD(~0),"[i],F", 0, v6 }, { "ld", F3(3, 0x21, 1), F3(~3, ~0x21, ~1)|SIMM13(~0)|RD(~0),"[1],F", 0, v6 }, /* ld [rs1+0],d */ { "ld", F3(3, 0x30, 0), F3(~3, ~0x30, ~0), "[1+2],D", F_ALIAS, v6 }, { "ld", F3(3, 0x30, 0), F3(~3, ~0x30, ~0)|RS2_G0, "[1],D", F_ALIAS, v6 }, /* ld [rs1+%g0],d */ { "ld", F3(3, 0x30, 1), F3(~3, ~0x30, ~1), "[1+i],D", F_ALIAS, v6 }, { "ld", F3(3, 0x30, 1), F3(~3, ~0x30, ~1), "[i+1],D", F_ALIAS, v6 }, { "ld", F3(3, 0x30, 1), F3(~3, ~0x30, ~1)|RS1_G0, "[i],D", F_ALIAS, v6 }, { "ld", F3(3, 0x30, 1), F3(~3, ~0x30, ~1)|SIMM13(~0), "[1],D", F_ALIAS, v6 }, /* ld [rs1+0],d */ { "ld", F3(3, 0x31, 0), F3(~3, ~0x31, ~0), "[1+2],C", 0, v6 }, { "ld", F3(3, 0x31, 0), F3(~3, ~0x31, ~0)|RS2_G0, "[1],C", 0, v6 }, /* ld [rs1+%g0],d */ { "ld", F3(3, 0x31, 1), F3(~3, ~0x31, ~1), "[1+i],C", 0, v6 },
static int log_wput(queue_t *q, mblk_t *mp) { log_t *lp = (log_t *)q->q_ptr; struct iocblk *iocp; mblk_t *mp2; cred_t *cr = msg_getcred(mp, NULL); zoneid_t zoneid; /* * Default to global zone if dblk doesn't have a valid cred. * Calls to syslog() go through putmsg(), which does set up * the cred. */ zoneid = (cr != NULL) ? crgetzoneid(cr) : GLOBAL_ZONEID; switch (DB_TYPE(mp)) { case M_FLUSH: if (*mp->b_rptr & FLUSHW) { flushq(q, FLUSHALL); *mp->b_rptr &= ~FLUSHW; } if (*mp->b_rptr & FLUSHR) { flushq(RD(q), FLUSHALL); qreply(q, mp); return (0); } break; case M_IOCTL: iocp = (struct iocblk *)mp->b_rptr; if (lp->log_major != LOG_LOGMIN) { /* write-only device */ miocnak(q, mp, 0, EINVAL); return (0); } if (iocp->ioc_count == TRANSPARENT) { miocnak(q, mp, 0, EINVAL); return (0); } if (lp->log_flags) { miocnak(q, mp, 0, EBUSY); return (0); } freemsg(lp->log_data); lp->log_data = mp->b_cont; mp->b_cont = NULL; switch (iocp->ioc_cmd) { case I_CONSLOG: log_update(lp, RD(q), SL_CONSOLE, log_console); break; case I_TRCLOG: if (lp->log_data == NULL) { miocnak(q, mp, 0, EINVAL); return (0); } log_update(lp, RD(q), SL_TRACE, log_trace); break; case I_ERRLOG: log_update(lp, RD(q), SL_ERROR, log_error); break; default: miocnak(q, mp, 0, EINVAL); return (0); } miocack(q, mp, 0, 0); return (0); case M_PROTO: if (MBLKL(mp) == sizeof (log_ctl_t) && mp->b_cont != NULL) { log_ctl_t *lc = (log_ctl_t *)mp->b_rptr; /* This code is used by savecore to log dump msgs */ if (mp->b_band != 0 && secpolicy_sys_config(CRED(), B_FALSE) == 0) { (void) putq(log_consq, mp); return (0); } if ((lc->pri & LOG_FACMASK) == LOG_KERN) lc->pri |= LOG_USER; mp2 = log_makemsg(LOG_MID, LOG_CONSMIN, lc->level, lc->flags, lc->pri, mp->b_cont->b_rptr, MBLKL(mp->b_cont) + 1, 0); if (mp2 != NULL) log_sendmsg(mp2, zoneid); } break; case M_DATA: mp2 = log_makemsg(LOG_MID, LOG_CONSMIN, 0, SL_CONSOLE, LOG_USER | LOG_INFO, mp->b_rptr, MBLKL(mp) + 1, 0); if (mp2 != NULL) log_sendmsg(mp2, zoneid); break; } freemsg(mp); return (0); }
static void * receiver_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd fds[1]; struct netmap_if *nifp = targ->nifp; struct netmap_ring *rxring; int i; uint64_t received = 0; if (setaffinity(targ->thread, targ->affinity)) goto quit; /* setup poll(2) mechanism. */ memset(fds, 0, sizeof(fds)); fds[0].fd = targ->fd; fds[0].events = (POLLIN); /* unbounded wait for the first packet. */ for (;;) { i = poll(fds, 1, 1000); if (i > 0 && !(fds[0].revents & POLLERR)) break; RD(1, "waiting for initial packets, poll returns %d %d", i, fds[0].revents); } /* main loop, exit after 1s silence */ clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic); if (targ->g->dev_type == DEV_TAP) { D("reading from %s fd %d", targ->g->ifname, targ->g->main_fd); while (!targ->cancel) { char buf[2048]; /* XXX should we poll ? */ if (read(targ->g->main_fd, buf, sizeof(buf)) > 0) targ->count++; } #ifndef NO_PCAP } else if (targ->g->dev_type == DEV_PCAP) { while (!targ->cancel) { /* XXX should we poll ? */ pcap_dispatch(targ->g->p, targ->g->burst, receive_pcap, NULL); } #endif /* !NO_PCAP */ } else { int dump = targ->g->options & OPT_DUMP; while (!targ->cancel) { /* Once we started to receive packets, wait at most 1 seconds before quitting. */ if (poll(fds, 1, 1 * 1000) <= 0 && !targ->g->forever) { clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc); targ->toc.tv_sec -= 1; /* Subtract timeout time. */ break; } if (fds[0].revents & POLLERR) { D("poll err"); goto quit; } for (i = targ->qfirst; i < targ->qlast; i++) { int m; rxring = NETMAP_RXRING(nifp, i); if (nm_ring_empty(rxring)) continue; m = receive_packets(rxring, targ->g->burst, dump); received += m; } targ->count = received; // tell the card we have read the data //ioctl(fds[0].fd, NIOCRXSYNC, NULL); } } targ->completed = 1; targ->count = received; quit: /* reset the ``used`` flag. */ targ->used = 0; return (NULL); }
template<class T> inline void RD(T &x0, T &x1) { RD(x0), RD(x1); }
uint8_t minx_cpu_device::rdop() { uint8_t op = RD( GET_MINX_PC ); m_PC++; return op; }
static void * pinger_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd pfd = { .fd = targ->fd, .events = POLLIN }; struct netmap_if *nifp = targ->nmd->nifp; int i, rx = 0, n = targ->g->npackets; void *frame; int size; uint32_t sent = 0; struct timespec ts, now, last_print; uint32_t count = 0, min = 1000000000, av = 0; frame = &targ->pkt; frame += sizeof(targ->pkt.vh) - targ->g->virt_header; size = targ->g->pkt_size + targ->g->virt_header; if (targ->g->nthreads > 1) { D("can only ping with 1 thread"); return NULL; } clock_gettime(CLOCK_REALTIME_PRECISE, &last_print); now = last_print; while (n == 0 || (int)sent < n) { struct netmap_ring *ring = NETMAP_TXRING(nifp, 0); struct netmap_slot *slot; char *p; for (i = 0; i < 1; i++) { /* XXX why the loop for 1 pkt ? */ slot = &ring->slot[ring->cur]; slot->len = size; p = NETMAP_BUF(ring, slot->buf_idx); if (nm_ring_empty(ring)) { D("-- ouch, cannot send"); } else { nm_pkt_copy(frame, p, size); clock_gettime(CLOCK_REALTIME_PRECISE, &ts); bcopy(&sent, p+42, sizeof(sent)); bcopy(&ts, p+46, sizeof(ts)); sent++; ring->head = ring->cur = nm_ring_next(ring, ring->cur); } } /* should use a parameter to decide how often to send */ if (poll(&pfd, 1, 3000) <= 0) { D("poll error/timeout on queue %d: %s", targ->me, strerror(errno)); continue; } /* see what we got back */ for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) { ring = NETMAP_RXRING(nifp, i); while (!nm_ring_empty(ring)) { uint32_t seq; slot = &ring->slot[ring->cur]; p = NETMAP_BUF(ring, slot->buf_idx); clock_gettime(CLOCK_REALTIME_PRECISE, &now); bcopy(p+42, &seq, sizeof(seq)); bcopy(p+46, &ts, sizeof(ts)); ts.tv_sec = now.tv_sec - ts.tv_sec; ts.tv_nsec = now.tv_nsec - ts.tv_nsec; if (ts.tv_nsec < 0) { ts.tv_nsec += 1000000000; ts.tv_sec--; } if (1) D("seq %d/%d delta %d.%09d", seq, sent, (int)ts.tv_sec, (int)ts.tv_nsec); if (ts.tv_nsec < (int)min) min = ts.tv_nsec; count ++; av += ts.tv_nsec; ring->head = ring->cur = nm_ring_next(ring, ring->cur); rx++; } } //D("tx %d rx %d", sent, rx); //usleep(100000); ts.tv_sec = now.tv_sec - last_print.tv_sec; ts.tv_nsec = now.tv_nsec - last_print.tv_nsec; if (ts.tv_nsec < 0) { ts.tv_nsec += 1000000000; ts.tv_sec--; } if (ts.tv_sec >= 1) { D("count %d min %d av %d", count, min, av/count); count = 0; av = 0; min = 100000000; last_print = now; } } return NULL; } /* * reply to ping requests */ static void * ponger_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd pfd = { .fd = targ->fd, .events = POLLIN }; struct netmap_if *nifp = targ->nmd->nifp; struct netmap_ring *txring, *rxring; int i, rx = 0, sent = 0, n = targ->g->npackets; if (targ->g->nthreads > 1) { D("can only reply ping with 1 thread"); return NULL; } D("understood ponger %d but don't know how to do it", n); while (n == 0 || sent < n) { uint32_t txcur, txavail; //#define BUSYWAIT #ifdef BUSYWAIT ioctl(pfd.fd, NIOCRXSYNC, NULL); #else if (poll(&pfd, 1, 1000) <= 0) { D("poll error/timeout on queue %d: %s", targ->me, strerror(errno)); continue; } #endif txring = NETMAP_TXRING(nifp, 0); txcur = txring->cur; txavail = nm_ring_space(txring); /* see what we got back */ for (i = targ->nmd->first_rx_ring; i <= targ->nmd->last_rx_ring; i++) { rxring = NETMAP_RXRING(nifp, i); while (!nm_ring_empty(rxring)) { uint16_t *spkt, *dpkt; uint32_t cur = rxring->cur; struct netmap_slot *slot = &rxring->slot[cur]; char *src, *dst; src = NETMAP_BUF(rxring, slot->buf_idx); //D("got pkt %p of size %d", src, slot->len); rxring->head = rxring->cur = nm_ring_next(rxring, cur); rx++; if (txavail == 0) continue; dst = NETMAP_BUF(txring, txring->slot[txcur].buf_idx); /* copy... */ dpkt = (uint16_t *)dst; spkt = (uint16_t *)src; nm_pkt_copy(src, dst, slot->len); dpkt[0] = spkt[3]; dpkt[1] = spkt[4]; dpkt[2] = spkt[5]; dpkt[3] = spkt[0]; dpkt[4] = spkt[1]; dpkt[5] = spkt[2]; txring->slot[txcur].len = slot->len; /* XXX swap src dst mac */ txcur = nm_ring_next(txring, txcur); txavail--; sent++; } } txring->head = txring->cur = txcur; targ->count = sent; #ifdef BUSYWAIT ioctl(pfd.fd, NIOCTXSYNC, NULL); #endif //D("tx %d rx %d", sent, rx); } return NULL; } static __inline int timespec_ge(const struct timespec *a, const struct timespec *b) { if (a->tv_sec > b->tv_sec) return (1); if (a->tv_sec < b->tv_sec) return (0); if (a->tv_nsec >= b->tv_nsec) return (1); return (0); } static __inline struct timespec timeval2spec(const struct timeval *a) { struct timespec ts = { .tv_sec = a->tv_sec, .tv_nsec = a->tv_usec * 1000 }; return ts; } static __inline struct timeval timespec2val(const struct timespec *a) { struct timeval tv = { .tv_sec = a->tv_sec, .tv_usec = a->tv_nsec / 1000 }; return tv; } static __inline struct timespec timespec_add(struct timespec a, struct timespec b) { struct timespec ret = { a.tv_sec + b.tv_sec, a.tv_nsec + b.tv_nsec }; if (ret.tv_nsec >= 1000000000) { ret.tv_sec++; ret.tv_nsec -= 1000000000; } return ret; } static __inline struct timespec timespec_sub(struct timespec a, struct timespec b) { struct timespec ret = { a.tv_sec - b.tv_sec, a.tv_nsec - b.tv_nsec }; if (ret.tv_nsec < 0) { ret.tv_sec--; ret.tv_nsec += 1000000000; } return ret; } /* * wait until ts, either busy or sleeping if more than 1ms. * Return wakeup time. */ static struct timespec wait_time(struct timespec ts) { for (;;) { struct timespec w, cur; clock_gettime(CLOCK_REALTIME_PRECISE, &cur); w = timespec_sub(ts, cur); if (w.tv_sec < 0) return cur; else if (w.tv_sec > 0 || w.tv_nsec > 1000000) poll(NULL, 0, 1); } } static void * sender_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd pfd = { .fd = targ->fd, .events = POLLOUT }; struct netmap_if *nifp; struct netmap_ring *txring; int i, n = targ->g->npackets / targ->g->nthreads; int64_t sent = 0; int options = targ->g->options | OPT_COPY; struct timespec nexttime = { 0, 0}; // XXX silence compiler int rate_limit = targ->g->tx_rate; struct pkt *pkt = &targ->pkt; void *frame; int size; frame = pkt; frame += sizeof(pkt->vh) - targ->g->virt_header; size = targ->g->pkt_size + targ->g->virt_header; D("start"); if (setaffinity(targ->thread, targ->affinity)) goto quit; /* main loop.*/ clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic); if (rate_limit) { targ->tic = timespec_add(targ->tic, (struct timespec){2,0}); targ->tic.tv_nsec = 0; wait_time(targ->tic); nexttime = targ->tic; } if (targ->g->dev_type == DEV_TAP) { D("writing to file desc %d", targ->g->main_fd); for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) { if (write(targ->g->main_fd, frame, size) != -1) sent++; update_addresses(pkt, targ->g); if (i > 10000) { targ->count = sent; i = 0; } } #ifndef NO_PCAP } else if (targ->g->dev_type == DEV_PCAP) { pcap_t *p = targ->g->p; for (i = 0; !targ->cancel && (n == 0 || sent < n); i++) { if (pcap_inject(p, frame, size) != -1) sent++; update_addresses(pkt, targ->g); if (i > 10000) { targ->count = sent; i = 0; } } #endif /* NO_PCAP */ } else { int tosend = 0; int frags = targ->g->frags; nifp = targ->nmd->nifp; while (!targ->cancel && (n == 0 || sent < n)) { if (rate_limit && tosend <= 0) { tosend = targ->g->burst; nexttime = timespec_add(nexttime, targ->g->tx_period); wait_time(nexttime); } /* * wait for available room in the send queue(s) */ if (poll(&pfd, 1, 2000) <= 0) { if (targ->cancel) break; D("poll error/timeout on queue %d: %s", targ->me, strerror(errno)); // goto quit; } if (pfd.revents & POLLERR) { D("poll error"); goto quit; } /* * scan our queues and send on those with room */ if (options & OPT_COPY && sent > 100000 && !(targ->g->options & OPT_COPY) ) { D("drop copy"); options &= ~OPT_COPY; } for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) { int m, limit = rate_limit ? tosend : targ->g->burst; if (n > 0 && n - sent < limit) limit = n - sent; txring = NETMAP_TXRING(nifp, i); if (nm_ring_empty(txring)) continue; if (frags > 1) limit = ((limit + frags - 1) / frags) * frags; m = send_packets(txring, pkt, frame, size, targ->g, limit, options, frags); ND("limit %d tail %d frags %d m %d", limit, txring->tail, frags, m); sent += m; targ->count = sent; if (rate_limit) { tosend -= m; if (tosend <= 0) break; } } } /* flush any remaining packets */ ioctl(pfd.fd, NIOCTXSYNC, NULL); /* final part: wait all the TX queues to be empty. */ for (i = targ->nmd->first_tx_ring; i <= targ->nmd->last_tx_ring; i++) { txring = NETMAP_TXRING(nifp, i); while (nm_tx_pending(txring)) { ioctl(pfd.fd, NIOCTXSYNC, NULL); usleep(1); /* wait 1 tick */ } } } /* end DEV_NETMAP */ clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc); targ->completed = 1; targ->count = sent; quit: /* reset the ``used`` flag. */ targ->used = 0; return (NULL); } #ifndef NO_PCAP static void receive_pcap(u_char *user, const struct pcap_pkthdr * h, const u_char * bytes) { int *count = (int *)user; (void)h; /* UNUSED */ (void)bytes; /* UNUSED */ (*count)++; } #endif /* !NO_PCAP */ static int receive_packets(struct netmap_ring *ring, u_int limit, int dump) { u_int cur, rx, n; cur = ring->cur; n = nm_ring_space(ring); if (n < limit) limit = n; for (rx = 0; rx < limit; rx++) { struct netmap_slot *slot = &ring->slot[cur]; char *p = NETMAP_BUF(ring, slot->buf_idx); if (dump) dump_payload(p, slot->len, ring, cur); cur = nm_ring_next(ring, cur); } ring->head = ring->cur = cur; return (rx); } static void * receiver_body(void *data) { struct targ *targ = (struct targ *) data; struct pollfd pfd = { .fd = targ->fd, .events = POLLIN }; struct netmap_if *nifp; struct netmap_ring *rxring; int i; uint64_t received = 0; if (setaffinity(targ->thread, targ->affinity)) goto quit; /* unbounded wait for the first packet. */ for (;;) { i = poll(&pfd, 1, 1000); if (i > 0 && !(pfd.revents & POLLERR)) break; RD(1, "waiting for initial packets, poll returns %d %d", i, pfd.revents); } /* main loop, exit after 1s silence */ clock_gettime(CLOCK_REALTIME_PRECISE, &targ->tic); if (targ->g->dev_type == DEV_TAP) { D("reading from %s fd %d", targ->g->ifname, targ->g->main_fd); while (!targ->cancel) { char buf[2048]; /* XXX should we poll ? */ if (read(targ->g->main_fd, buf, sizeof(buf)) > 0) targ->count++; } #ifndef NO_PCAP } else if (targ->g->dev_type == DEV_PCAP) { while (!targ->cancel) { /* XXX should we poll ? */ pcap_dispatch(targ->g->p, targ->g->burst, receive_pcap, NULL); } #endif /* !NO_PCAP */ } else { int dump = targ->g->options & OPT_DUMP; nifp = targ->nmd->nifp; while (!targ->cancel) { /* Once we started to receive packets, wait at most 1 seconds before quitting. */ if (poll(&pfd, 1, 1 * 1000) <= 0 && !targ->g->forever) { clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc); targ->toc.tv_sec -= 1; /* Subtract timeout time. */ goto out; } if (pfd.revents & POLLERR) { D("poll err"); goto quit; } for (i = targ->nmd->first_rx_ring; i <= targ->nmd->last_rx_ring; i++) { int m; rxring = NETMAP_RXRING(nifp, i); if (nm_ring_empty(rxring)) continue; m = receive_packets(rxring, targ->g->burst, dump); received += m; } targ->count = received; } } clock_gettime(CLOCK_REALTIME_PRECISE, &targ->toc); out: targ->completed = 1; targ->count = received; quit: /* reset the ``used`` flag. */ targ->used = 0; return (NULL); } /* very crude code to print a number in normalized form. * Caller has to make sure that the buffer is large enough. */ static const char * norm(char *buf, double val) { char *units[] = { "", "K", "M", "G", "T" }; u_int i; for (i = 0; val >=1000 && i < sizeof(units)/sizeof(char *) - 1; i++) val /= 1000; sprintf(buf, "%.2f %s", val, units[i]); return buf; } static void tx_output(uint64_t sent, int size, double delta) { double bw, raw_bw, pps; char b1[40], b2[80], b3[80]; printf("Sent %llu packets, %d bytes each, in %.2f seconds.\n", (unsigned long long)sent, size, delta); if (delta == 0) delta = 1e-6; if (size < 60) /* correct for min packet size */ size = 60; pps = sent / delta; bw = (8.0 * size * sent) / delta; /* raw packets have4 bytes crc + 20 bytes framing */ raw_bw = (8.0 * (size + 24) * sent) / delta; printf("Speed: %spps Bandwidth: %sbps (raw %sbps)\n", norm(b1, pps), norm(b2, bw), norm(b3, raw_bw) ); } static void rx_output(uint64_t received, double delta) { double pps; char b1[40]; printf("Received %llu packets, in %.2f seconds.\n", (unsigned long long) received, delta); if (delta == 0) delta = 1e-6; pps = received / delta; printf("Speed: %spps\n", norm(b1, pps)); }
uint16_t minx_cpu_device::rd16( uint32_t offset ) { return RD( offset ) | ( RD( offset + 1 ) << 8 ); }
/* * Message must be of type M_IOCTL or M_IOCDATA for this routine to be called. */ static void ptioc(queue_t *q, mblk_t *mp, int qside) { struct ptem *tp; struct iocblk *iocp; struct winsize *wb; struct jwinsize *jwb; mblk_t *tmp; mblk_t *pckt_msgp; /* message sent to the PCKT module */ int error; iocp = (struct iocblk *)mp->b_rptr; tp = (struct ptem *)q->q_ptr; switch (iocp->ioc_cmd) { case JWINSIZE: /* * For compatibility: If all zeros, NAK the message for dumb * terminals. */ if ((tp->wsz.ws_row == 0) && (tp->wsz.ws_col == 0) && (tp->wsz.ws_xpixel == 0) && (tp->wsz.ws_ypixel == 0)) { miocnak(q, mp, 0, EINVAL); return; } tmp = allocb(sizeof (struct jwinsize), BPRI_MED); if (tmp == NULL) { miocnak(q, mp, 0, EAGAIN); return; } if (iocp->ioc_count == TRANSPARENT) mcopyout(mp, NULL, sizeof (struct jwinsize), NULL, tmp); else mioc2ack(mp, tmp, sizeof (struct jwinsize), 0); jwb = (struct jwinsize *)mp->b_cont->b_rptr; jwb->bytesx = tp->wsz.ws_col; jwb->bytesy = tp->wsz.ws_row; jwb->bitsx = tp->wsz.ws_xpixel; jwb->bitsy = tp->wsz.ws_ypixel; qreply(q, mp); return; case TIOCGWINSZ: /* * If all zeros NAK the message for dumb terminals. */ if ((tp->wsz.ws_row == 0) && (tp->wsz.ws_col == 0) && (tp->wsz.ws_xpixel == 0) && (tp->wsz.ws_ypixel == 0)) { miocnak(q, mp, 0, EINVAL); return; } tmp = allocb(sizeof (struct winsize), BPRI_MED); if (tmp == NULL) { miocnak(q, mp, 0, EAGAIN); return; } mioc2ack(mp, tmp, sizeof (struct winsize), 0); wb = (struct winsize *)mp->b_cont->b_rptr; wb->ws_row = tp->wsz.ws_row; wb->ws_col = tp->wsz.ws_col; wb->ws_xpixel = tp->wsz.ws_xpixel; wb->ws_ypixel = tp->wsz.ws_ypixel; qreply(q, mp); return; case TIOCSWINSZ: error = miocpullup(mp, sizeof (struct winsize)); if (error != 0) { miocnak(q, mp, 0, error); return; } wb = (struct winsize *)mp->b_cont->b_rptr; /* * Send a SIGWINCH signal if the row/col information has * changed. */ if ((tp->wsz.ws_row != wb->ws_row) || (tp->wsz.ws_col != wb->ws_col) || (tp->wsz.ws_xpixel != wb->ws_xpixel) || (tp->wsz.ws_ypixel != wb->ws_xpixel)) { /* * SIGWINCH is always sent upstream. */ if (qside == WRSIDE) (void) putnextctl1(RD(q), M_SIG, SIGWINCH); else if (qside == RDSIDE) (void) putnextctl1(q, M_SIG, SIGWINCH); /* * Message may have come in as an M_IOCDATA; pass it * to the master side as an M_IOCTL. */ mp->b_datap->db_type = M_IOCTL; if (qside == WRSIDE) { /* * Need a copy of this message to pass on to * the PCKT module, only if the M_IOCTL * orginated from the slave side. */ if ((pckt_msgp = copymsg(mp)) == NULL) { miocnak(q, mp, 0, EAGAIN); return; } putnext(q, pckt_msgp); } tp->wsz.ws_row = wb->ws_row; tp->wsz.ws_col = wb->ws_col; tp->wsz.ws_xpixel = wb->ws_xpixel; tp->wsz.ws_ypixel = wb->ws_ypixel; } mioc2ack(mp, NULL, 0, 0); qreply(q, mp); return; case TIOCSIGNAL: { /* * This ioctl can emanate from the master side in remote * mode only. */ int sig; if (DB_TYPE(mp) == M_IOCTL && iocp->ioc_count != TRANSPARENT) { error = miocpullup(mp, sizeof (int)); if (error != 0) { miocnak(q, mp, 0, error); return; } } if (DB_TYPE(mp) == M_IOCDATA || iocp->ioc_count != TRANSPARENT) sig = *(int *)mp->b_cont->b_rptr; else sig = (int)*(intptr_t *)mp->b_cont->b_rptr; if (sig < 1 || sig >= NSIG) { miocnak(q, mp, 0, EINVAL); return; } /* * Send an M_PCSIG message up the slave's read side and * respond back to the master with an ACK or NAK as * appropriate. */ if (putnextctl1(q, M_PCSIG, sig) == 0) { miocnak(q, mp, 0, EAGAIN); return; } mioc2ack(mp, NULL, 0, 0); qreply(q, mp); return; } case TIOCREMOTE: { int onoff; mblk_t *mctlp; if (DB_TYPE(mp) == M_IOCTL) { error = miocpullup(mp, sizeof (int)); if (error != 0) { miocnak(q, mp, 0, error); return; } } onoff = *(int *)mp->b_cont->b_rptr; /* * Send M_CTL up using the iocblk format. */ mctlp = mkiocb(onoff ? MC_NO_CANON : MC_DO_CANON); if (mctlp == NULL) { miocnak(q, mp, 0, EAGAIN); return; } mctlp->b_datap->db_type = M_CTL; putnext(q, mctlp); /* * ACK the ioctl. */ mioc2ack(mp, NULL, 0, 0); qreply(q, mp); /* * Record state change. */ if (onoff) tp->state |= REMOTEMODE; else tp->state &= ~REMOTEMODE; return; } default: putnext(q, mp); return; } }
/* * NO locking protection here as sockfs will only send down * one bind operation at a time. */ static void rds_bind(queue_t *q, mblk_t *mp) { sin_t *sin; rds_t *rds; struct T_bind_req *tbr; in_port_t port; /* Host byte order */ in_port_t requested_port; /* Host byte order */ struct T_bind_ack *tba; int count; rds_bf_t *rdsbf; in_port_t lport; /* Network byte order */ rds = (rds_t *)q->q_ptr; if (((uintptr_t)mp->b_wptr - (uintptr_t)mp->b_rptr) < sizeof (*tbr)) { rds_err_ack(q, mp, TPROTO, 0); return; } /* * We don't allow multiple binds */ if (rds->rds_state != TS_UNBND) { rds_err_ack(q, mp, TOUTSTATE, 0); return; } tbr = (struct T_bind_req *)(uintptr_t)mp->b_rptr; switch (tbr->ADDR_length) { case sizeof (sin_t): /* Complete IPv4 address */ sin = (sin_t *)(uintptr_t)mi_offset_param(mp, tbr->ADDR_offset, sizeof (sin_t)); if (sin == NULL || !OK_32PTR((char *)sin)) { rds_err_ack(q, mp, TSYSERR, EINVAL); return; } if (rds->rds_family != AF_INET_OFFLOAD || sin->sin_family != AF_INET_OFFLOAD) { rds_err_ack(q, mp, TSYSERR, EAFNOSUPPORT); return; } if (sin->sin_addr.s_addr == INADDR_ANY) { rds_err_ack(q, mp, TBADADDR, 0); return; } /* * verify that the address is hosted on IB * only exception is the loopback address. */ if ((sin->sin_addr.s_addr != INADDR_LOOPBACK) && !rds_verify_bind_address(sin->sin_addr.s_addr)) { rds_err_ack(q, mp, TBADADDR, 0); return; } port = ntohs(sin->sin_port); break; default: /* Invalid request */ rds_err_ack(q, mp, TBADADDR, 0); return; } requested_port = port; /* * TPI only sends down T_BIND_REQ for AF_INET and AF_INET6 * since RDS socket is of type AF_INET_OFFLOAD a O_T_BIND_REQ * will be sent down. Treat O_T_BIND_REQ as T_BIND_REQ */ if (requested_port == 0) { /* * If the application passed in zero for the port number, it * doesn't care which port number we bind to. Get one in the * valid range. */ port = rds_update_next_port(rds_next_port_to_try); } ASSERT(port != 0); count = 0; for (;;) { rds_t *rds1; ASSERT(sin->sin_addr.s_addr != INADDR_ANY); /* * Walk through the list of rds streams bound to * requested port with the same IP address. */ lport = htons(port); rdsbf = &rds_bind_fanout[RDS_BIND_HASH(lport)]; mutex_enter(&rdsbf->rds_bf_lock); for (rds1 = rdsbf->rds_bf_rds; rds1 != NULL; rds1 = rds1->rds_bind_hash) { if (lport != rds1->rds_port || rds1->rds_src != sin->sin_addr.s_addr || rds1->rds_zoneid != rds->rds_zoneid) continue; break; } if (rds1 == NULL) { /* * No other stream has this IP address * and port number. We can use it. */ break; } mutex_exit(&rdsbf->rds_bf_lock); if (requested_port != 0) { /* * We get here only when requested port * is bound (and only first of the for() * loop iteration). * * The semantics of this bind request * require it to fail so we return from * the routine (and exit the loop). * */ rds_err_ack(q, mp, TADDRBUSY, 0); return; } port = rds_update_next_port(port + 1); if (++count >= loopmax) { /* * We've tried every possible port number and * there are none available, so send an error * to the user. */ rds_err_ack(q, mp, TNOADDR, 0); return; } } /* * Copy the source address into our rds structure. */ rds->rds_src = sin->sin_addr.s_addr; rds->rds_port = lport; /* * reset the next port if we choose the port */ if (requested_port == 0) { rds_next_port_to_try = port + 1; } rds->rds_state = TS_IDLE; rds_bind_hash_insert(rdsbf, rds); mutex_exit(&rdsbf->rds_bf_lock); /* Reset the message type in preparation for shipping it back. */ mp->b_datap->db_type = M_PCPROTO; tba = (struct T_bind_ack *)(uintptr_t)mp->b_rptr; tba->PRIM_type = T_BIND_ACK; /* Increment the number of ports and set the port quota */ RDS_INCR_NPORT(); rds->rds_port_quota = RDS_CURRENT_PORT_QUOTA(); RDS_SET_PORT_QUOTA(rds->rds_port_quota); (void) proto_set_rx_hiwat(RD(q), NULL, rds->rds_port_quota * UserBufferSize); qreply(q, mp); }
/** * Run a self-test on the driver/device. Unless fault injection is implemented * in hardware, this function only does a minimal test in which available * registers (if any) are written and read. * * With fault injection, all possible single-bit and double-bit errors are * injected, and checked to the extent possible, given the implemented hardware. * * @param InstancePtr is a pointer to the XBram instance. * @param IntMask is the interrupt mask to use. When testing * with interrupts, this should be set to allow interrupt * generation, otherwise it should be 0. * * @return * - XST_SUCCESS if fault injection/detection is working properly OR * if ECC is Not Enabled in the HW. * - XST_FAILURE if the injected fault is not correctly detected or * the Control Base Address is Zero when ECC is enabled. * . * * If the BRAM device is not present in the * hardware a bus error could be generated. Other indicators of a * bus error, such as registers in bridges or buses, may be * necessary to determine if this function caused a bus error. * * @note None. * ******************************************************************************/ int XBram_SelfTest(XBram *InstancePtr, u8 IntMask) { Xil_AssertNonvoid(InstancePtr != NULL); Xil_AssertNonvoid(InstancePtr->IsReady == XIL_COMPONENT_IS_READY); if (InstancePtr->Config.EccPresent == 0) { return (XST_SUCCESS); } if (InstancePtr->Config.CtrlBaseAddress == 0) { return (XST_FAILURE); } /* * Only 32-bit data width is supported as of yet. 64-bit and 128-bit * widths will be supported in future. */ if (InstancePtr->Config.DataWidth != 32) return (XST_SUCCESS); /* * Read from the implemented readable registers in the hardware device. */ if (InstancePtr->Config.CorrectableFailingRegisters) { (void) RD(CE_FFA_0_OFFSET); } if (InstancePtr->Config.CorrectableFailingDataRegs) { (void) RD(CE_FFD_0_OFFSET); (void) RD(CE_FFE_0_OFFSET); } if (InstancePtr->Config.UncorrectableFailingRegisters) { (void) RD(UE_FFA_0_OFFSET); } if (InstancePtr->Config.UncorrectableFailingDataRegs) { (void) RD(UE_FFD_0_OFFSET); (void) RD(UE_FFE_0_OFFSET); } /* * Write and read the implemented read/write registers in the hardware * device. */ if (InstancePtr->Config.EccStatusInterruptPresent) { WR(ECC_EN_IRQ_OFFSET, 0); if (RD(ECC_EN_IRQ_OFFSET) != 0) { return (XST_FAILURE); } } if (InstancePtr->Config.CorrectableCounterBits > 0) { u32 Value; /* Calculate counter max value */ if (InstancePtr->Config.CorrectableCounterBits == 32) { Value = 0xFFFFFFFF; } else { Value = (1 << InstancePtr->Config.CorrectableCounterBits) - 1; } WR(CE_CNT_OFFSET, Value); if (RD(CE_CNT_OFFSET) != Value) { return (XST_FAILURE); } WR(CE_CNT_OFFSET, 0); if (RD(CE_CNT_OFFSET) != 0) { return (XST_FAILURE); } } /* * If fault injection is implemented, inject all possible single-bit * and double-bit errors, and check all observable effects. */ if (InstancePtr->Config.FaultInjectionPresent && InstancePtr->Config.WriteAccess != 0) { const u32 Addr[2] = {InstancePtr->Config.MemBaseAddress & 0xfffffffc, InstancePtr->Config.MemHighAddress & 0xfffffffc}; u32 SavedWords[2]; u32 ActualData; u32 ActualEcc; u32 CounterValue = 0; u32 CounterMax; int WordIndex = 0; int Result = XST_SUCCESS; int Index1; int Index2; PrngResult = 42; /* Random seed */ /* Save two words in BRAM used for test */ SavedWords[0] = XBram_In32(Addr[0]); SavedWords[1] = XBram_In32(Addr[1]); /* Calculate counter max value */ if (InstancePtr->Config.CorrectableCounterBits == 32) { CounterMax = 0xFFFFFFFF; } else { CounterMax =(1 << InstancePtr->Config.CorrectableCounterBits) - 1; } /* Inject and check all single bit errors */ for (Index1 = 0; Index1 < TOTAL_BITS; Index1++) { /* Save counter value */ if (InstancePtr->Config.CorrectableCounterBits > 0) { CounterValue = RD(CE_CNT_OFFSET); } /* Inject single bit error */ InjectErrors(InstancePtr, Addr[WordIndex], Index1, Index1, &ActualData, &ActualEcc); /* Check that CE is set */ if (InstancePtr->Config.EccStatusInterruptPresent) { CHECK(ECC_STATUS_OFFSET, XBRAM_IR_CE_MASK, Result); } /* Check that address, data, ECC are correct */ if (InstancePtr->Config.CorrectableFailingRegisters) { CHECK(CE_FFA_0_OFFSET, Addr[WordIndex], Result); } /* Checks are only for LMB BRAM */ if (InstancePtr->Config.CorrectableFailingDataRegs) { CHECK(CE_FFD_0_OFFSET, ActualData, Result); CHECK(CE_FFE_0_OFFSET, ActualEcc, Result); } /* Check that counter has incremented */ if (InstancePtr->Config.CorrectableCounterBits > 0 && CounterValue < CounterMax) { CHECK(CE_CNT_OFFSET, CounterValue + 1, Result); } /* Restore correct data in the used word */ XBram_Out32(Addr[WordIndex], SavedWords[WordIndex]); /* Allow interrupts to occur */ /* Clear status register */ if (InstancePtr->Config.EccStatusInterruptPresent) { WR(ECC_EN_IRQ_OFFSET, IntMask); WR(ECC_STATUS_OFFSET, XBRAM_IR_ALL_MASK); WR(ECC_EN_IRQ_OFFSET, 0); } /* Switch to the other word */ WordIndex = WordIndex ^ 1; if (Result != XST_SUCCESS) break; } if (Result != XST_SUCCESS) { return XST_FAILURE; } for (Index1 = 0; Index1 < TOTAL_BITS; Index1++) { for (Index2 = 0; Index2 < TOTAL_BITS; Index2++) { if (Index1 != Index2) { /* Inject double bit error */ InjectErrors(InstancePtr, Addr[WordIndex], Index1, Index2, &ActualData, &ActualEcc); /* Check that UE is set */ if (InstancePtr->Config. EccStatusInterruptPresent) { CHECK(ECC_STATUS_OFFSET, XBRAM_IR_UE_MASK, Result); } /* Check that address, data, ECC are correct */ if (InstancePtr->Config. UncorrectableFailingRegisters) { CHECK(UE_FFA_0_OFFSET, Addr[WordIndex], Result); CHECK(UE_FFD_0_OFFSET, ActualData, Result); CHECK(UE_FFE_0_OFFSET, ActualEcc, Result); } /* Restore correct data in the used word */ XBram_Out32(Addr[WordIndex], SavedWords[WordIndex]); /* Allow interrupts to occur */ /* Clear status register */ if (InstancePtr->Config. EccStatusInterruptPresent) { WR(ECC_EN_IRQ_OFFSET, IntMask); WR(ECC_STATUS_OFFSET, XBRAM_IR_ALL_MASK); WR(ECC_EN_IRQ_OFFSET, 0); } /* Switch to the other word */ WordIndex = WordIndex ^ 1; } if (Result != XST_SUCCESS) break; } if (Result != XST_SUCCESS) break; } /* Check saturation of correctable error counter */ if (InstancePtr->Config.CorrectableCounterBits > 0 && Result == XST_SUCCESS) { WR(CE_CNT_OFFSET, CounterMax); InjectErrors(InstancePtr, Addr[WordIndex], 0, 0, &ActualData, &ActualEcc); CHECK(CE_CNT_OFFSET, CounterMax, Result); } /* Restore the two words used for test */ XBram_Out32(Addr[0], SavedWords[0]); XBram_Out32(Addr[1], SavedWords[1]); /* Clear the Status Register. */ if (InstancePtr->Config.EccStatusInterruptPresent) { WR(ECC_STATUS_OFFSET, XBRAM_IR_ALL_MASK); } /* Set Correctable Counter to zero */ if (InstancePtr->Config.CorrectableCounterBits > 0) { WR(CE_CNT_OFFSET, 0); } return (Result); } return (XST_SUCCESS); }
{"bne", OP(0x26)|CEX(1), "rC,bB", F_BRANCH}, {"bnc", OP(0x27)|CEX(1), "rC,bB", F_BRANCH}, {"chrs", OP(0x33), "cN", 0}, {"cmp", OP(0x19), "rC,rA,rB", 0}, {"cmpi", OP(0x37), "rC,rA,cC", 0}, {"conb", OP(0x03), "rD,rA,rB", F_CEX}, {"conh", OP(0x04), "rD,rB,rA", F_CEX}, {"cop", OP(0x3C), "cP,cO", 0}, {"di", OP(0x15), "", 0}, {"ei", OP(0x16), "", 0}, {"exb", OP(0x30), "rD,rA,cN", F_CEX}, {"exbf", OP(0x1A), "rD,rA,rB", F_CEX}, {"exbfi", OP(0x3D), "rD,rA,cB,cb", 0}, {"exh", OP(0x31), "rD,rA,cn", F_CEX}, {"jal", OP(0x39), "bJ", F_BRANCH}, {"jalr", OP(0x35)|RD(0x1F), "rA", F_CEX}, {"jmp", OP(0x38), "bJ", F_BRANCH}, {"jmpr", OP(0x1B), "rA", F_CEX|F_BRANCH}, {"ld", OP(0x32), "rD,rA,cI", F_CEX}, {"lli", OP(0x3E), "rD,cX", 0}, {"lui", OP(0x3F), "rD,cX", 0}, {"mov", OP(0x13), "rD,rA", F_CEX}, {"movfc", OP(0x2C), "cN,rD,rP", F_CEX}, {"movtc", OP(0x36), "cN,rP,rA", F_CEX}, {"mulhi", OP(0x1D), "rD", F_CEX}, {"muli", OP(0x2E), "rD,rA,cI", F_CEX}, {"muls", OP(0x05), "rD,rA,rB", F_CEX}, {"muls_16", OP(0x08), "rD,rA,rB", F_CEX}, {"mulu", OP(0x06), "rD,rA,rB", F_CEX}, {"mulu_16", OP(0x09), "rD,rA,rB", F_CEX}, {"mulus", OP(0x07), "rD,rA,rB", F_CEX},
bool CBulletManager::CalcBullet (collide::rq_results & rq_storage, xr_vector<ISpatial*>& rq_spatial, SBullet* bullet, u32 delta_time) { VERIFY (bullet); float delta_time_sec = float(delta_time)/1000.f; float range = bullet->speed*delta_time_sec; float max_range = bullet->max_dist - bullet->fly_dist; if(range>max_range) range = max_range; //запомнить текущую скорость пули, т.к. в //RayQuery() она может поменяться из-за рикошетов //и столкновений с объектами Fvector cur_dir = bullet->dir; bullet_test_callback_data bullet_data; bullet_data.pBullet = bullet; bullet_data.bStopTracing = true; bullet->flags.ricochet_was = 0; collide::ray_defs RD (bullet->pos, bullet->dir, range, CDB::OPT_CULL, collide::rqtBoth); BOOL result = FALSE; VERIFY (!fis_zero(RD.dir.square_magnitude())); result = Level().ObjectSpace.RayQuery(rq_storage, RD, firetrace_callback, &bullet_data, test_callback, NULL); if (result && bullet_data.bStopTracing) { range = (rq_storage.r_begin()+rq_storage.r_count()-1)->range; } range = _max (EPS_L,range); bullet->flags.skipped_frame = (Device.dwFrame >= bullet->frame_num); if(!bullet->flags.ricochet_was) { //изменить положение пули bullet->pos.mad(bullet->pos, cur_dir, range); bullet->fly_dist += range; if(bullet->fly_dist>=bullet->max_dist) return false; Fbox level_box = Level().ObjectSpace.GetBoundingVolume(); /* if(!level_box.contains(bullet->pos)) return false; */ if(!((bullet->pos.x>=level_box.x1) && (bullet->pos.x<=level_box.x2) && (bullet->pos.y>=level_box.y1) && // (bullet->pos.y<=level_box.y2) && (bullet->pos.z>=level_box.z1) && (bullet->pos.z<=level_box.z2)) ) return false; //изменить скорость и направление ее полета //с учетом гравитации bullet->dir.mul(bullet->speed); Fvector air_resistance = bullet->dir; if (GameID() == GAME_SINGLE) air_resistance.mul(-m_fAirResistanceK*delta_time_sec); else air_resistance.mul(-bullet->air_resistance*(bullet->speed)/(bullet->max_speed)*delta_time_sec); /// Msg("Speed - %f; ar - %f, %f", bullet->dir.magnitude(), air_resistance.magnitude(), air_resistance.magnitude()/bullet->dir.magnitude()*100); bullet->dir.add(air_resistance); bullet->dir.y -= m_fGravityConst*delta_time_sec; bullet->speed = bullet->dir.magnitude(); VERIFY(_valid(bullet->speed)); VERIFY(!fis_zero(bullet->speed)); // by Real Wolf 11.07.2014. R_ASSERT(bullet->speed); //вместо normalize(), чтоб не считать 2 раза magnitude() bullet->dir.x /= bullet->speed; bullet->dir.y /= bullet->speed; bullet->dir.z /= bullet->speed; } if(bullet->speed<m_fMinBulletSpeed) return false; return true; }
TEST(RInstruction, ExtractRd2) { int32 inst(0x0399e021); EXPECT_EQ(RD(inst), 28); }
/* * txq[] has a batch of n packets that possibly need to be forwarded. */ int netmap_fwd(struct my_netmap_port *port) { u_int dr; /* destination ring */ u_int i = 0; const u_int n = port->cur_txq; /* how many queued packets */ struct txq_entry *x = port->q; int retry = 5; /* max retries */ struct nm_desc *dst = port->d; if (n == 0) { D("nothing to forward to %s", port->ifp.if_xname); return 0; } again: /* scan all output rings; dr is the destination ring index */ for (dr = dst->first_tx_ring; i < n && dr <= dst->last_tx_ring; dr++) { struct netmap_ring *ring = NETMAP_TXRING(dst->nifp, dr); __builtin_prefetch(ring); if (nm_ring_empty(ring)) continue; /* * We have different ways to transfer from src->dst * * src dst Now Eventually (not done) * * PHYS PHYS buf swap * PHYS VIRT NS_INDIRECT * VIRT PHYS copy NS_INDIRECT * VIRT VIRT NS_INDIRECT * MBUF PHYS copy NS_INDIRECT * MBUF VIRT NS_INDIRECT * * The "eventually" depends on implementing NS_INDIRECT * on physical device drivers. * Note we do not yet differentiate PHYS/VIRT. */ for (; i < n && !nm_ring_empty(ring); i++) { struct netmap_slot *dst, *src; dst = &ring->slot[ring->cur]; if (x[i].flags == TXQ_IS_SLOT) { struct netmap_ring *sr = x[i].ring_or_mbuf; src = &sr->slot[x[i].slot_idx]; dst->len = src->len; if (port->can_swap_bufs) { ND("pkt %d len %d", i, src->len); u_int tmp = dst->buf_idx; dst->flags = src->flags = NS_BUF_CHANGED; dst->buf_idx = src->buf_idx; src->buf_idx = tmp; } else if (port->peer->allocator_id == 1) { // no indirect nm_pkt_copy(NETMAP_BUF(sr, src->buf_idx), NETMAP_BUF(ring, dst->buf_idx), dst->len); } else { dst->ptr = (uintptr_t)NETMAP_BUF(sr, src->buf_idx); dst->flags = NS_INDIRECT; } } else if (x[i].flags == TXQ_IS_MBUF) { struct mbuf *m = (void *)x[i].ring_or_mbuf; ND("copy from mbuf"); dst->len = m->__m_extlen; nm_pkt_copy(m->__m_extbuf, NETMAP_BUF(ring, dst->buf_idx), dst->len); FREE_PKT(m); } else { panic("bad slot"); } x[i].flags = 0; ring->head = ring->cur = nm_ring_next(ring, ring->cur); } } if (i < n) { if (retry-- > 0) { ioctl(port->d->fd, NIOCTXSYNC); goto again; } RD(1, "%d buffers leftover", n - i); for (;i < n; i++) { if (x[i].flags == TXQ_IS_MBUF) { FREE_PKT(x[i].ring_or_mbuf); } } } port->cur_txq = 0; return 0; }
void dnsproxy_query(PNATState pData, struct socket *so, struct mbuf *m, int iphlen) #endif { #ifndef VBOX char buf[MAX_BUFSPACE]; unsigned int fromlen = sizeof(fromaddr); struct timeval tv; #else struct ip *ip; char *buf; int retransmit; struct udphdr *udp; #endif struct sockaddr_in addr; struct request *req = NULL; #ifndef VBOX struct sockaddr_in fromaddr; #else struct sockaddr_in fromaddr = { 0, }; #endif int byte = 0; ++all_queries; #ifndef VBOX /* Reschedule event */ event_add((struct event *)arg, NULL); /* read packet from socket */ if ((byte = recvfrom(fd, buf, sizeof(buf), 0, (struct sockaddr *)&fromaddr, &fromlen)) == -1) { LogRel(("recvfrom failed: %s\n", strerror(errno))); ++dropped_queries; return; } /* check for minimum dns packet length */ if (byte < 12) { LogRel(("query too short from %s\n", inet_ntoa(fromaddr.sin_addr))); ++dropped_queries; return; } /* allocate new request */ if ((req = calloc(1, sizeof(struct request))) == NULL) { LogRel(("calloc failed\n")); ++dropped_queries; return; } req->id = QUERYID; memcpy(&req->client, &fromaddr, sizeof(struct sockaddr_in)); memcpy(&req->clientid, &buf[0], 2); /* where is this query coming from? */ if (is_internal(pData, fromaddr.sin_addr)) { req->recursion = RD(buf); DPRINTF(("Internal query RD=%d\n", req->recursion)); } else { /* no recursion for foreigners */ req->recursion = 0; DPRINTF(("External query RD=%d\n", RD(buf))); } /* insert it into the hash table */ hash_add_request(pData, req); /* overwrite the original query id */ memcpy(&buf[0], &req->id, 2); if (req->recursion) { /* recursive queries timeout in 90s */ event_set(&req->timeout, -1, 0, timeout, req); tv.tv_sec=recursive_timeout; tv.tv_usec=0; event_add(&req->timeout, &tv); /* send it to our recursive server */ if ((byte = sendto(sock_answer, buf, (unsigned int)byte, 0, (struct sockaddr *)&recursive_addr, sizeof(struct sockaddr_in))) == -1) { LogRel(("sendto failed: %s\n", strerror(errno))); ++dropped_queries; return; } ++recursive_queries; } else { /* authoritative queries timeout in 10s */ event_set(&req->timeout, -1, 0, timeout, req); tv.tv_sec=authoritative_timeout; tv.tv_usec=0; event_add(&req->timeout, &tv); /* send it to our authoritative server */ if ((byte = sendto(sock_answer, buf, (unsigned int)byte, 0, (struct sockaddr *)&authoritative_addr, sizeof(struct sockaddr_in))) == -1) { LogRel(("sendto failed: %s\n", strerror(errno))); ++dropped_queries; return; } ++authoritative_queries; } #else /* VBOX */ AssertPtr(pData); /* m->m_data points to IP header */ #if 0 /* XXX: for some reason it make gdb ill, * it good to have this assert here with assumption above. */ M_ASSERTPKTHDR(m); #endif ip = mtod(m, struct ip *); udp = (struct udphdr *)(m->m_data + iphlen); fromaddr.sin_addr.s_addr = ip->ip_src.s_addr; fromaddr.sin_port = udp->uh_sport; fromaddr.sin_family = AF_INET; /* iphlen equals to lenght of ip header */ Assert(iphlen == sizeof(struct ip)); iphlen += sizeof (struct udphdr); byte = m->m_len - iphlen; buf = m->m_data + iphlen; /* check for minimum dns packet length */ if (byte < 12) { LogRel(("NAT: Query too short from %RTnaipv4\n", fromaddr.sin_addr)); ++dropped_queries; return; } req = so->so_timeout_arg; if (!req) { Assert(!so->so_timeout_arg); if ((req = RTMemAllocZ(sizeof(struct request) + byte)) == NULL) { LogRel(("NAT: calloc failed\n")); ++dropped_queries; return; } req->id = QUERYID; memcpy(&req->client, &fromaddr, sizeof(struct sockaddr_in)); memcpy(&req->clientid, &buf[0], 2); req->dns_server = TAILQ_LAST(&pData->pDnsList, dns_list_head); req->dnsgen = pData->dnsgen; if (req->dns_server == NULL) { RTMemFree(req); return; } retransmit = 0; so->so_timeout = timeout; so->so_timeout_arg = req; req->nbyte = byte; memcpy(req->byte, buf, byte); /* copying original request */ } else { if (req->dnsgen != pData->dnsgen) { /* XXX: Log2 */ LogRel(("NAT: dnsproxy: query: req %p dnsgen %u != %u on %R[natsock]\n", req, req->dnsgen, pData->dnsgen, so)); /* * XXX: TODO: this probably requires more cleanup. * Cf. XXX comment for sendto() failure below, but that * error leg is probably untested since ~never taken. */ ++dropped_queries; return; } retransmit = 1; } req->recursion = 0; DPRINTF(("External query RD=%d\n", RD(buf))); if (retransmit == 0) hash_add_request(pData, req); /* overwrite the original query id */ memcpy(&buf[0], &req->id, 2); /* let's slirp to care about expiration */ so->so_expire = curtime + recursive_timeout * 1000; memset(&addr, 0, sizeof(struct sockaddr_in)); addr.sin_family = AF_INET; if (req->dns_server->de_addr.s_addr == (pData->special_addr.s_addr | RT_H2N_U32_C(CTL_ALIAS))) { /* undo loopback remapping done in get_dns_addr_domain() */ addr.sin_addr.s_addr = RT_N2H_U32_C(INADDR_LOOPBACK); } else { addr.sin_addr.s_addr = req->dns_server->de_addr.s_addr; } addr.sin_port = htons(53); /* send it to our authoritative server */ Log2(("NAT: request will be %ssent to %RTnaipv4 on %R[natsock]\n", retransmit ? "re" : "", addr.sin_addr, so)); byte = sendto(so->s, buf, (unsigned int)byte, 0, (struct sockaddr *)&addr, sizeof(struct sockaddr_in)); if (byte == -1) { /* XXX: is it really enough? */ LogRel(("NAT: sendto failed: %s\n", strerror(errno))); ++dropped_queries; return; } so->so_state = SS_ISFCONNECTED; /* now it's selected */ Log2(("NAT: request was %ssent to %RTnaipv4 on %R[natsock]\n", retransmit ? "re" : "", addr.sin_addr, so)); ++authoritative_queries; # if 0 /* XXX: this stuff for _debugging_ only, * first enforce guest to send next request * and second for faster getting timeout callback * other option is adding couple entries in resolv.conf with * invalid nameservers. * * For testing purposes could be used * namebench -S -q 10000 -m random or -m chunk */ /* RTThreadSleep(3000); */ /* curtime += 300; */ # endif #endif /* VBOX */ }