/*! * Register a callback, returning an integer value suitable for * passing to glutAddMenuEntry() * * \param cb Callback function to be called. * \param client Data to be passed to the callback. * * \return integer callback id */ static int callback(void (*cb)(int, int, void *client), void *client) { if (ncb == 0) { int i; for (i = 0; i < NA(callbacks); ++i) callbacks[i].cb = NULL; } else if (ncb >= NA(callbacks)) { fprintf(stderr, "callback() out of callbacks, try changing MAX_CALLBACKS\n"); } callbacks[ncb].cb = cb; callbacks[ncb].client = client; return ncb++; }
inline Logical Logical::operator&&(Logical other) const { if (isFalse() || other.isFalse()) { return false; } if (isNA() || other.isNA()) { return NA(); } return true; }
/* Replacement for the driver ndo_start_xmit() method. * When this function is invoked because of the dev_queue_xmit() call * in generic_xmit_frame() (e.g. because of a txsync on the NIC), we have * to call the original ndo_start_xmit() method. * In all the other cases (e.g. when the TX request comes from the network * stack) we intercept the packet and put it into the RX ring associated * to the host stack. */ static netdev_tx_t generic_ndo_start_xmit(struct mbuf *m, struct ifnet *ifp) { struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)NA(ifp); if (likely(m->priority == NM_MAGIC_PRIORITY_TX)) return gna->save_start_xmit(m, ifp); /* To the driver. */ /* To a netmap RX ring. */ return linux_netmap_start_xmit(m, ifp); }
/*vsize*/ static bool vsize_insert_cell(struct statistic *s, const void *sample) { const libtop_psamp_t *psamp = sample; char buf[7]; if(top_prefs_get_mmr()) { if(top_uinteger_format_mem_result(buf, sizeof(buf), psamp->vsize, psamp->p_vsize, 0ULL)) { return true; } } else { NA(buf); } return generic_insert_cell(s, buf); }
/*reg/mregions*/ static bool mregion_insert_cell(struct statistic *s, const void *sample) { const libtop_psamp_t *psamp = sample; char buf[GENERIC_INT_SIZE]; if(top_prefs_get_mmr()) { if(top_uinteger_format_result(buf, sizeof(buf), psamp->reg, psamp->p_reg, 0ULL)) { return true; } } else { NA(buf); } return generic_insert_cell(s, buf); }
/* Replacement for the driver ndo_start_xmit() method. * When this function is invoked because of the dev_queue_xmit() call * in generic_xmit_frame() (e.g. because of a txsync on the NIC), we have * to call the original ndo_start_xmit() method. * In all the other cases (e.g. when the TX request comes from the network * stack) we intercept the packet and put it into the RX ring associated * to the host stack. */ static netdev_tx_t generic_ndo_start_xmit(struct mbuf *m, struct ifnet *ifp) { struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)NA(ifp); if (likely(m->priority == NM_MAGIC_PRIORITY_TX)) { /* Reset priority, so that generic_netmap_tx_clean() * knows that it can reclaim this mbuf. */ m->priority = 0; return gna->save_start_xmit(m, ifp); /* To the driver. */ } /* To a netmap RX ring. */ return linux_netmap_start_xmit(m, ifp); }
static struct mbuf * generic_qdisc_dequeue(struct Qdisc *qdisc) { struct mbuf *m = qdisc_dequeue_head(qdisc); if (!m) { return NULL; } if (unlikely(m->priority == NM_MAGIC_PRIORITY_TXQE)) { /* nm_os_generic_xmit_frame() asked us an event on this mbuf. * We have to set the priority to the normal TX token, so that * generic_ndo_start_xmit can pass it to the driver. */ m->priority = NM_MAGIC_PRIORITY_TX; ND(5, "Event met, notify %p", m); netmap_generic_irq(NA(qdisc_dev(qdisc)), skb_get_queue_mapping(m), NULL); } ND(5, "Dequeuing mbuf, len %u", qdisc_qlen(qdisc)); return m; }
short* foreach(short* list, short** bk) { if (*bk == NULL) *bk = list; short* peek = NULL; while (!EOL(**bk)) { if (!NA(**bk)) { *bk = *bk + 1; return *bk - 1; } else { /* move following */ if (peek == NULL) peek = *bk + 1; else peek ++; **bk = *peek; *peek = DEL_ELEMENT; } } *bk = NULL; return NULL; }
EndPoint() : value(NA()), type(INCLUSIVE_END_POINT) {}
static int alloc_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq) { int rc, cntxt_id; size_t len; struct adapter *sc = pi->adapter; struct netmap_adapter *na = NA(pi->nm_ifp); struct fw_eq_eth_cmd c; MPASS(na != NULL); MPASS(nm_txq->desc != NULL); len = na->num_tx_desc * EQ_ESIZE + spg_len; bzero(nm_txq->desc, len); bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | V_FW_EQ_ETH_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); c.autoequiqe_to_viid = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->nm_viid)); c.fetchszm_to_iqid = htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id)); c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE)); c.eqaddr = htobe64(nm_txq->ba); rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(pi->dev, "failed to create netmap egress queue: %d\n", rc); return (rc); } nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); sc->sge.eqmap[cntxt_id] = (void *)nm_txq; nm_txq->pidx = nm_txq->cidx = 0; MPASS(nm_txq->sidx == na->num_tx_desc); nm_txq->equiqidx = nm_txq-> equeqidx = nm_txq->dbidx = 0; nm_txq->doorbells = sc->doorbells; if (isset(&nm_txq->doorbells, DOORBELL_UDB) || isset(&nm_txq->doorbells, DOORBELL_UDBWC) || isset(&nm_txq->doorbells, DOORBELL_WCWR)) { uint32_t s_qpp = sc->sge.eq_s_qpp; uint32_t mask = (1 << s_qpp) - 1; volatile uint8_t *udb; udb = sc->udbs_base + UDBS_DB_OFFSET; udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT; nm_txq->udb_qid = nm_txq->cntxt_id & mask; if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) clrbit(&nm_txq->doorbells, DOORBELL_WCWR); else { udb += nm_txq->udb_qid << UDBS_SEG_SHIFT; nm_txq->udb_qid = 0; } nm_txq->udb = (volatile void *)udb; }
static int alloc_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq) { int rc, cntxt_id; __be32 v; struct adapter *sc = pi->adapter; struct netmap_adapter *na = NA(pi->nm_ifp); struct fw_iq_cmd c; MPASS(na != NULL); MPASS(nm_rxq->iq_desc != NULL); MPASS(nm_rxq->fl_desc != NULL); bzero(nm_rxq->iq_desc, pi->qsize_rxq * IQ_ESIZE); bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + spg_len); bzero(&c, sizeof(c)); c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | V_FW_IQ_CMD_VFN(0)); c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | FW_LEN16(c)); if (pi->flags & INTR_NM_RXQ) { KASSERT(nm_rxq->intr_idx < sc->intr_count, ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx)); v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx); } else { CXGBE_UNIMPLEMENTED(__func__); /* XXXNM: needs review */ v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx) | F_FW_IQ_CMD_IQANDST; } c.type_to_iqandstindex = htobe32(v | V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | V_FW_IQ_CMD_VIID(pi->nm_viid) | V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | F_FW_IQ_CMD_IQGTSMODE | V_FW_IQ_CMD_IQINTCNTTHRESH(0) | V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); c.iqsize = htobe16(pi->qsize_rxq); c.iqaddr = htobe64(nm_rxq->iq_ba); c.iqns_to_fl0congen |= htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0)); c.fl0dcaen_to_fl0cidxfthresh = htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) | V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B)); c.fl0size = htobe16(na->num_rx_desc + spg_len / EQ_ESIZE); c.fl0addr = htobe64(nm_rxq->fl_ba); rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); if (rc != 0) { device_printf(sc->dev, "failed to create netmap ingress queue: %d\n", rc); return (rc); } nm_rxq->iq_cidx = 0; MPASS(nm_rxq->iq_sidx == pi->qsize_rxq - spg_len / IQ_ESIZE); nm_rxq->iq_gen = F_RSPD_GEN; nm_rxq->iq_cntxt_id = be16toh(c.iqid); nm_rxq->iq_abs_id = be16toh(c.physiqid); cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start; if (cntxt_id >= sc->sge.niq) { panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.niq - 1); } sc->sge.iqmap[cntxt_id] = (void *)nm_rxq; nm_rxq->fl_cntxt_id = be16toh(c.fl0id); nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; MPASS(nm_rxq->fl_sidx == na->num_rx_desc); cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start; if (cntxt_id >= sc->sge.neq) { panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)", __func__, cntxt_id, sc->sge.neq - 1); } sc->sge.eqmap[cntxt_id] = (void *)nm_rxq; nm_rxq->fl_db_val = F_DBPRIO | V_QID(nm_rxq->fl_cntxt_id) | V_PIDX(0); if (is_t5(sc)) nm_rxq->fl_db_val |= F_DBTYPE; t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(F_QINTR_CNT_EN) | V_INGRESSQID(nm_rxq->iq_cntxt_id)); return (rc); }
/// Definition of the ideal gas constant [J/mol K] inline Real R() { return kB() * NA(); }
/// Definition of the Faraday's constant [C/mol] inline Real F() { return e() * NA(); }
/* CGAT * read and make tables * BA -> NA */ static void CGAT(){ FILE *IN=NULL, *IN_b=NULL, *IN_rev=NULL; int **table_value=NULL; unsigned short **table_num=NULL; int state_a2, state_b=0, state_rev; Fasta *fst1 = fasta_new(), *fst2 = fasta_new(), *fst_rev = fasta_new(); clock_t start, start1, end0, end1, end2; /*----- do BA and NA for each fasta-pair-----*/ /* genomeB file open */ IN_b = my_fopen_r(idata_b->seqname); par.OUT = my_fopen_w(par.outputfile); for(idata_b->cnt=0; idata_b->cnt < idata_b->fstnum; idata_b->cnt++){ start = clock(); /* MakeTable */ read_multifasta(IN_b, fst2, FORWARD, &state_b); table_value = (int **)my_malloc(idata_b->blocknum[idata_b->cnt] * sizeof(int *), "table_b_value"); table_num = (unsigned short **)my_malloc(idata_b->blocknum[idata_b->cnt] * sizeof(unsigned short *), "table_b_num"); Make_SeedTable(fst2, table_value, table_num); end0 = clock(); if(opt.debug) printf("MakeTable time: %.2f sec.\n", (double)(end0-start)/CLOCKS_PER_SEC); IN = my_fopen_r(idata_a->seqname); IN_rev = my_fopen_r(idata_a->seqname); state_a2=0; state_rev=0; for(idata_a->cnt=0; idata_a->cnt < idata_a->fstnum; idata_a->cnt++){ printf("\ngenomeA-fasta%d (%d blocks) - genomeB-fasta%d (%d blocks)\n", idata_a->cnt+1, idata_a->blocknum[idata_a->cnt], idata_b->cnt+1, idata_b->blocknum[idata_b->cnt]); start1 = clock(); /*--- BA: the results are stored in aln_for/rev ---*/ BA(&aln_for, table_value, table_num, FORWARD); if(reverse) BA(&aln_rev, table_value, table_num, REVERSE); if(idata_a->cnt == idata_a->fstnum -1) table_b_delete(table_value, table_num, idata_b->blocknum[idata_b->cnt]); end1 = clock(); if(opt.debug) printf("BA time: %.2f sec.\n", (double)(end1-start1)/CLOCKS_PER_SEC); /*--- (if -b is on) output BA result and skip NA ---*/ if(block){ output_BAresult(); continue; } /*--- NA: detailed alignmend within colonies in bl ---*/ NA(IN, fst1, fst2, &aln_for, FORWARD, &state_a2); if(reverse) NA(IN_rev, fst_rev, fst2, &aln_rev, REVERSE, &state_rev); end2 = clock(); if(opt.debug) printf("NA time: %.2f sec.\n", (double)(end2-end1)/CLOCKS_PER_SEC); } free(fst2->head); free(fst2->body); fclose(IN); fclose(IN_rev); } if(opt.boundary) output_fastaboundary(); free(fst1); free(fst2); free(fst_rev); fclose(IN_b); fclose(par.OUT); }
static Vector na() { return Vector(NA()); }
void unset_lower_bound() { lower_bound_.value = NA(); }
void unset_upper_bound() { upper_bound_.value = NA(); }
void EncodingEDSRSA(char *M_fname, char *nA_fname, char *eA_fname, char *dA_fname, char *nB_fname, char *eB_fname, char *dB_fname) { std::ifstream in(M_fname); int *M_hash = (int*)md5(&in), i; BigInt M(intToChar(M_hash[3])), NA(nA_fname, false), EA(eA_fname, false), DA(dA_fname, false); M *= BigInt("10000000000"); M += BigInt(intToChar(M_hash[2])); M *= BigInt("10000000000"); M += BigInt(intToChar(M_hash[1])); M *= BigInt("10000000000"); M += BigInt(intToChar(M_hash[0])); BigInt NB(nB_fname, false), EB(eB_fname, false), DB(dB_fname, false); BigInt Signature("1"), Code("1"), Encode("1"), CheckSign("1"); BigInt DegreeNet[RNet]; DegreeNet[0] = M; DegreeNet[0] %= NA; for(i = 1; i < RNet; i++) { DegreeNet[i] = DegreeNet[i-1] * DegreeNet[i-1]; DegreeNet[i] %= NA; } BigInt degreeNum[RNet]; degreeNum[0] = BigInt("1"); for(int i = 1; i < RNet; i++) degreeNum[i] = degreeNum[i-1] * BigInt("2"); BigInt I("0"); for(int j = RNet-1; j >= 0;) { if(DA >= I + degreeNum[j]) { Signature *= DegreeNet[j]; Signature %= NA; I += degreeNum[j]; } else j--; } ////////////////////////////// DegreeNet[0] = Signature; DegreeNet[0] %= NB; for(i = 1; i < RNet; i++) { DegreeNet[i] = DegreeNet[i-1] * DegreeNet[i-1]; DegreeNet[i] %= NB; } I = BigInt("0"); for(int j = RNet-1; j >= 0;) { if(EB >= I + degreeNum[j]) { Code *= DegreeNet[j]; Code %= NB; I += degreeNum[j]; } else j--; } ////////////////////////////// DegreeNet[0] = Code; DegreeNet[0] %= NB; for(i = 1; i < RNet; i++) { DegreeNet[i] = DegreeNet[i-1] * DegreeNet[i-1]; DegreeNet[i] %= NB; } I = BigInt("0"); for(int j = RNet-1; j >= 0;) { if(DB >= I + degreeNum[j]) { Encode *= DegreeNet[j]; Encode %= NB; I += degreeNum[j]; } else j--; } ////////////////////////////// DegreeNet[0] = Encode; DegreeNet[0] %= NA; for(i = 1; i < RNet; i++) { DegreeNet[i] = DegreeNet[i-1] * DegreeNet[i-1]; DegreeNet[i] %= NA; } I = BigInt("0"); for(int j = RNet - 1; j >= 0;) { if(EA >= I + degreeNum[j]) { CheckSign *= DegreeNet[j]; CheckSign %= NA; I += degreeNum[j]; } else j--; } ////////////////////////////// M.TextWrite("hash.txt"); Code.TextWrite("code.txt"); Encode.TextWrite("encode.txt"); CheckSign.TextWrite("checksign.txt"); if( M % NA == CheckSign) std::cout<<"OK\n"; else std::cout<<"NOT OK\n"; }
SmartPointer<Batch> Batch::Merge(SmartPointer<Batch> _A,SmartPointer<Batch> _B) { if (!_A) return _B; if (!_B) return _A; Batch* A=_A.get(); Batch* B=_B.get(); if (A->primitive!=B->primitive) return SmartPointer<Batch>(); if (A->ambient !=B->ambient ) return SmartPointer<Batch>(); if (A->diffuse !=B->diffuse ) return SmartPointer<Batch>(); if (A->specular !=B->specular ) return SmartPointer<Batch>(); if (A->emission !=B->emission ) return SmartPointer<Batch>(); if (A->shininess!=B->shininess) return SmartPointer<Batch>(); if ((A->vertices && !B->vertices) || (!A->vertices && B->vertices)) return SmartPointer<Batch>(); if ((A->normals && !B->normals ) || (!A->normals && B->normals )) return SmartPointer<Batch>(); if ((A->colors && !B->colors ) || (!A->colors && B->colors )) return SmartPointer<Batch>(); bool ATex0=(A->texture0) && (A->texture0coords); bool BTex0=(B->texture0) && (B->texture0coords); if ((ATex0 && !BTex0) || (!ATex0 && BTex0) || (ATex0 && BTex0 && A->texture0.get()!=B->texture0.get())) return SmartPointer<Batch>(); bool ATex1=(A->texture1) && (A->texture1coords); bool BTex1=(B->texture1) && (B->texture1coords); if ((ATex1 && !BTex1) || (!ATex1 && BTex1) || (ATex1 && BTex1 && A->texture1.get()!=B->texture1.get())) return SmartPointer<Batch>(); SmartPointer<Batch> ret(new Batch()); ret->matrix=Mat4f(); ret->primitive=A->primitive; ret->ambient =A->ambient; ret->diffuse =A->diffuse; ret->specular =A->specular; ret->emission =A->emission; ret->shininess=A->shininess; //vertices if (A->vertices) { Vector VA(*(A->vertices)); { Mat4f T=A->matrix; float* p=VA.mem(); for (int i=0;i<VA.size();i+=3,p+=3) { Vec3f V=T * Vec3f(p[0],p[1],p[2]); p[0]=V.x;p[1]=V.y;p[2]=V.z; } } Vector VB(*(B->vertices)); { Mat4f T=B->matrix; float* p=VB.mem(); for (int i=0;i<VB.size();i+=3,p+=3) { Vec3f V=T * Vec3f(p[0],p[1],p[2]); p[0]=V.x;p[1]=V.y;p[2]=V.z; } } ret->vertices.reset(new Vector(VA)); ret->vertices->append(VB); } //normals if (A->normals) { Vector NA(*(A->normals)); { Mat4f T=A->matrix.invert(); float* p=NA.mem(); for (int i=0;i<NA.size();i+=3,p+=3) { Vec4f _N=Vec4f(p[0],p[1],p[2],0.0) * T; Vec3f N=Vec3f(_N.x,_N.y,_N.z).normalize(); p[0]=N.x;p[1]=N.y;p[2]=N.z; } } Vector NB(*(B->normals)); { Mat4f T=B->matrix.invert(); float* p=NB.mem(); for (int i=0;i<NB.size();i+=3,p+=3) { Vec4f _N=Vec4f(p[0],p[1],p[2],0.0) * T; Vec3f N=Vec3f(_N.x,_N.y,_N.z).normalize(); p[0]=N.x;p[1]=N.y;p[2]=N.z; } } ret->normals.reset(new Vector(NA)); ret->normals->append(NB); } //colors if (A->colors) { ret->colors.reset(new Vector(*(A->colors))); ret->colors->append(*(B->colors)); } //texture 0 if (ATex0) { ret->texture0=A->texture0; ret->texture0coords.reset(new Vector(*(A->texture0coords))); ret->texture0coords->append(*(B->texture0coords)); } //texture 1 if (ATex1) { ret->texture1=A->texture1; ret->texture1coords.reset(new Vector(*(A->texture1coords))); ret->texture1coords->append(*(B->texture1coords)); } return ret; }
/** @brief NA aware equality operator. * * Returns NA if either or both operands are NA. Otherwise returns * whether or not the two values are equal. */ Logical equals(Logical other) const { return (isNA() || other.isNA()) ? NA() : identical(other); }
explicit constexpr Vector(NA) : data_(nullptr), size_(NA()) {}
static constexpr Bool na() { return Bool(NA()); }
static constexpr Vector na() { return Vector(NA()); }
/*! * Call the indexed callback. * * \param idx Callback index. * \param data Data to be passed to the callback */ static void call_callback(int idx, int data) { if( idx >= 0 && idx < NA(callbacks) && callbacks[idx].cb != NULL ) callbacks[idx].cb(idx, data, callbacks[idx].client); }
explicit Vector(NA) : is_direct_(true), size_(NA()), data_(nullptr) {}
// Inline definitions of operators. inline Logical Logical::operator!() const { if (isNA()) { return NA(); } return Logical(1 - m_value); }