/* Function: p7_omx_FDeconvert() * Synopsis: Convert an optimized DP matrix to generic one. * Incept: SRE, Tue Aug 19 17:58:13 2008 [Janelia] * * Purpose: Convert the 32-bit float values in optimized DP matrix * <ox> to a generic one <gx>. Caller provides <gx> with sufficient * space to hold the <ox->M> by <ox->L> matrix. * * This function is used to gain access to the * somewhat more powerful debugging and display * tools available for generic DP matrices. */ int p7_omx_FDeconvert(P7_OMX *ox, P7_GMX *gx) { int Q = p7O_NQF(ox->M); int i, q, r, k; union { __m128 v; float p[4]; } u; float **dp = gx->dp; float *xmx = gx->xmx; for (i = 0; i <= ox->L; i++) { MMX(i,0) = DMX(i,0) = IMX(i,0) = -eslINFINITY; for (q = 0; q < Q; q++) { u.v = MMO(ox->dpf[i],q); for (r = 0; r < 4; r++) { k = (Q*r)+q+1; if (k <= ox->M) MMX(i, (Q*r)+q+1) = u.p[r]; } u.v = DMO(ox->dpf[i],q); for (r = 0; r < 4; r++) { k = (Q*r)+q+1; if (k <= ox->M) DMX(i, (Q*r)+q+1) = u.p[r]; } u.v = IMO(ox->dpf[i],q); for (r = 0; r < 4; r++) { k = (Q*r)+q+1; if (k <= ox->M) IMX(i, (Q*r)+q+1) = u.p[r]; } } XMX(i,p7G_E) = ox->xmx[i*p7X_NXCELLS+p7X_E]; XMX(i,p7G_N) = ox->xmx[i*p7X_NXCELLS+p7X_N]; XMX(i,p7G_J) = ox->xmx[i*p7X_NXCELLS+p7X_J]; XMX(i,p7G_B) = ox->xmx[i*p7X_NXCELLS+p7X_B]; XMX(i,p7G_C) = ox->xmx[i*p7X_NXCELLS+p7X_C]; } gx->L = ox->L; gx->M = ox->M; return eslOK; }
/* Function: p7_OptimalAccuracy() * Synopsis: DP fill of an optimal accuracy alignment calculation. * Incept: SRE, Mon Aug 18 11:04:48 2008 [Janelia] * * Purpose: Calculates the fill step of the optimal accuracy decoding * algorithm \citep{Kall05}. * * Caller provides the posterior decoding matrix <pp>, * which was calculated by Forward/Backward on a target sequence * of length <pp->L> using the query model <om>. * * Caller also provides a DP matrix <ox>, allocated for a full * <om->M> by <L> comparison. The routine fills this in * with OA scores. * * Args: gm - query profile * pp - posterior decoding matrix created by <p7_GPosteriorDecoding()> * gx - RESULT: caller provided DP matrix for <gm->M> by <L> * ret_e - RETURN: expected number of correctly decoded positions * * Returns: <eslOK> on success, and <*ret_e> contains the final OA * score, which is the expected number of correctly decoded * positions in the target sequence (up to <L>). * * Throws: (no abnormal error conditions) */ int p7_OptimalAccuracy(const P7_OPROFILE *om, const P7_OMX *pp, P7_OMX *ox, float *ret_e) { vector float mpv, dpv, ipv; /* previous row values */ vector float sv; /* temp storage of 1 curr row value in progress */ vector float xEv; /* E state: keeps max for Mk->E as we go */ vector float xBv; /* B state: splatted vector of B[i-1] for B->Mk calculations */ vector float dcv; float *xmx = ox->xmx; vector float *dpc = ox->dpf[0]; /* current row, for use in {MDI}MO(dpp,q) access macro */ vector float *dpp; /* previous row, for use in {MDI}MO(dpp,q) access macro */ vector float *ppp; /* quads in the <pp> posterior probability matrix */ vector float *tp; /* quads in the <om->tfv> transition scores */ vector float zerov; vector float infv; int M = om->M; int Q = p7O_NQF(M); int q; int j; int i; float t1, t2; zerov = (vector float) vec_splat_u32(0); infv = esl_vmx_set_float(-eslINFINITY); ox->M = om->M; ox->L = pp->L; for (q = 0; q < Q; q++) MMO(dpc, q) = IMO(dpc,q) = DMO(dpc,q) = infv; XMXo(0, p7X_E) = -eslINFINITY; XMXo(0, p7X_N) = 0.; XMXo(0, p7X_J) = -eslINFINITY; XMXo(0, p7X_B) = 0.; XMXo(0, p7X_C) = -eslINFINITY; for (i = 1; i <= pp->L; i++) { dpp = dpc; /* previous DP row in OA matrix */ dpc = ox->dpf[i]; /* current DP row in OA matrix */ ppp = pp->dpf[i]; /* current row in the posterior probabilities per position */ tp = om->tfv; /* transition probabilities */ dcv = infv; xEv = infv; xBv = esl_vmx_set_float(XMXo(i-1, p7X_B)); mpv = vec_sld(infv, MMO(dpp,Q-1), 12); /* Right shifts by 4 bytes. 4,8,12,x becomes x,4,8,12. */ dpv = vec_sld(infv, DMO(dpp,Q-1), 12); ipv = vec_sld(infv, IMO(dpp,Q-1), 12); for (q = 0; q < Q; q++) { sv = vec_and(vec_cmpgt(*tp, zerov), xBv); tp++; sv = vec_max(sv, vec_and(vec_cmpgt(*tp, zerov), mpv)); tp++; sv = vec_max(sv, vec_and(vec_cmpgt(*tp, zerov), ipv)); tp++; sv = vec_max(sv, vec_and(vec_cmpgt(*tp, zerov), dpv)); tp++; sv = vec_add(sv, *ppp); ppp += 2; xEv = vec_max(xEv, sv); mpv = MMO(dpp,q); dpv = DMO(dpp,q); ipv = IMO(dpp,q); MMO(dpc,q) = sv; DMO(dpc,q) = dcv; dcv = vec_and(vec_cmpgt(*tp, zerov), sv); tp++; sv = vec_and(vec_cmpgt(*tp, zerov), mpv); tp++; sv = vec_max(sv, vec_and(vec_cmpgt(*tp, zerov), ipv)); tp++; IMO(dpc,q) = vec_add(sv, *ppp); ppp++; } /* dcv has carried through from end of q loop above; store it * in first pass, we add M->D and D->D path into DMX */ dcv = vec_sld(infv, dcv, 12); tp = om->tfv + 7*Q; /* set tp to start of the DD's */ for (q = 0; q < Q; q++) { DMO(dpc, q) = vec_max(dcv, DMO(dpc, q)); dcv = vec_and(vec_cmpgt(*tp, zerov), DMO(dpc,q)); tp++; } /* fully serialized D->D; can optimize later */ for (j = 1; j < 4; j++) { dcv = vec_sld(infv, dcv, 12); tp = om->tfv + 7*Q; for (q = 0; q < Q; q++) { DMO(dpc, q) = vec_max(dcv, DMO(dpc, q)); dcv = vec_and(vec_cmpgt(*tp, zerov), dcv); tp++; } } /* D->E paths */ for (q = 0; q < Q; q++) xEv = vec_max(xEv, DMO(dpc,q)); /* Specials */ XMXo(i,p7X_E) = esl_vmx_hmax_float(xEv); t1 = ( (om->xf[p7O_J][p7O_LOOP] == 0.0) ? 0.0 : ox->xmx[(i-1)*p7X_NXCELLS+p7X_J] + pp->xmx[i*p7X_NXCELLS+p7X_J]); t2 = ( (om->xf[p7O_E][p7O_LOOP] == 0.0) ? 0.0 : ox->xmx[ i *p7X_NXCELLS+p7X_E]); ox->xmx[i*p7X_NXCELLS+p7X_J] = ESL_MAX(t1, t2); t1 = ( (om->xf[p7O_C][p7O_LOOP] == 0.0) ? 0.0 : ox->xmx[(i-1)*p7X_NXCELLS+p7X_C] + pp->xmx[i*p7X_NXCELLS+p7X_C]); t2 = ( (om->xf[p7O_E][p7O_MOVE] == 0.0) ? 0.0 : ox->xmx[ i *p7X_NXCELLS+p7X_E]); ox->xmx[i*p7X_NXCELLS+p7X_C] = ESL_MAX(t1, t2); ox->xmx[i*p7X_NXCELLS+p7X_N] = ((om->xf[p7O_N][p7O_LOOP] == 0.0) ? 0.0 : ox->xmx[(i-1)*p7X_NXCELLS+p7X_N] + pp->xmx[i*p7X_NXCELLS+p7X_N]); t1 = ( (om->xf[p7O_N][p7O_MOVE] == 0.0) ? 0.0 : ox->xmx[i*p7X_NXCELLS+p7X_N]); t2 = ( (om->xf[p7O_J][p7O_MOVE] == 0.0) ? 0.0 : ox->xmx[i*p7X_NXCELLS+p7X_J]); ox->xmx[i*p7X_NXCELLS+p7X_B] = ESL_MAX(t1, t2); } *ret_e = ox->xmx[pp->L*p7X_NXCELLS+p7X_C]; return eslOK; }
static int backward_engine(int do_full, const ESL_DSQ *dsq, int L, const P7_OPROFILE *om, const P7_OMX *fwd, P7_OMX *bck, float *opt_sc) { register __m128 mpv, ipv, dpv; /* previous row values */ register __m128 mcv, dcv; /* current row values */ register __m128 tmmv, timv, tdmv; /* tmp vars for accessing rotated transition scores */ register __m128 xBv; /* collects B->Mk components of B(i) */ register __m128 xEv; /* splatted E(i) */ __m128 zerov; /* splatted 0.0's in a vector */ float xN, xE, xB, xC, xJ; /* special states' scores */ int i; /* counter over sequence positions 0,1..L */ int q; /* counter over quads 0..Q-1 */ int Q = p7O_NQF(om->M); /* segment length: # of vectors */ int j; /* DD segment iteration counter (4 = full serialization) */ __m128 *dpc; /* current DP row */ __m128 *dpp; /* next ("previous") DP row */ __m128 *rp; /* will point into om->rfv[x] for residue x[i+1] */ __m128 *tp; /* will point into (and step thru) om->tfv transition scores */ /* initialize the L row. */ bck->M = om->M; bck->L = L; bck->has_own_scales = FALSE; /* backwards scale factors are *usually* given by <fwd> */ dpc = bck->dpf[L * do_full]; xJ = 0.0; xB = 0.0; xN = 0.0; xC = om->xf[p7O_C][p7O_MOVE]; /* C<-T */ xE = xC * om->xf[p7O_E][p7O_MOVE]; /* E<-C, no tail */ xEv = _mm_set1_ps(xE); zerov = _mm_setzero_ps(); dcv = zerov; /* solely to silence a compiler warning */ for (q = 0; q < Q; q++) MMO(dpc,q) = DMO(dpc,q) = xEv; for (q = 0; q < Q; q++) IMO(dpc,q) = zerov; /* init row L's DD paths, 1) first segment includes xE, from DMO(q) */ tp = om->tfv + 8*Q - 1; /* <*tp> now the [4 8 12 x] TDD quad */ dpv = _mm_move_ss(DMO(dpc,Q-1), zerov); /* start leftshift: [1 5 9 13] -> [x 5 9 13] */ dpv = _mm_shuffle_ps(dpv, dpv, _MM_SHUFFLE(0,3,2,1)); /* finish leftshift:[x 5 9 13] -> [5 9 13 x] */ for (q = Q-1; q >= 0; q--) { dcv = _mm_mul_ps(dpv, *tp); tp--; DMO(dpc,q) = _mm_add_ps(DMO(dpc,q), dcv); dpv = DMO(dpc,q); } /* 2) three more passes, only extending DD component (dcv only; no xE contrib from DMO(q)) */ for (j = 1; j < 4; j++) { tp = om->tfv + 8*Q - 1; /* <*tp> now the [4 8 12 x] TDD quad */ dcv = _mm_move_ss(dcv, zerov); /* start leftshift: [1 5 9 13] -> [x 5 9 13] */ dcv = _mm_shuffle_ps(dcv, dcv, _MM_SHUFFLE(0,3,2,1)); /* finish leftshift:[x 5 9 13] -> [5 9 13 x] */ for (q = Q-1; q >= 0; q--) { dcv = _mm_mul_ps(dcv, *tp); tp--; DMO(dpc,q) = _mm_add_ps(DMO(dpc,q), dcv); } } /* now MD init */ tp = om->tfv + 7*Q - 3; /* <*tp> now the [4 8 12 x] Mk->Dk+1 quad */ dcv = _mm_move_ss(DMO(dpc,0), zerov); /* start leftshift: [1 5 9 13] -> [x 5 9 13] */ dcv = _mm_shuffle_ps(dcv, dcv, _MM_SHUFFLE(0,3,2,1)); /* finish leftshift:[x 5 9 13] -> [5 9 13 x] */ for (q = Q-1; q >= 0; q--) { MMO(dpc,q) = _mm_add_ps(MMO(dpc,q), _mm_mul_ps(dcv, *tp)); tp -= 7; dcv = DMO(dpc,q); } /* Sparse rescaling: same scale factors as fwd matrix */ if (fwd->xmx[L*p7X_NXCELLS+p7X_SCALE] > 1.0) { xE = xE / fwd->xmx[L*p7X_NXCELLS+p7X_SCALE]; xN = xN / fwd->xmx[L*p7X_NXCELLS+p7X_SCALE]; xC = xC / fwd->xmx[L*p7X_NXCELLS+p7X_SCALE]; xJ = xJ / fwd->xmx[L*p7X_NXCELLS+p7X_SCALE]; xB = xB / fwd->xmx[L*p7X_NXCELLS+p7X_SCALE]; xEv = _mm_set1_ps(1.0 / fwd->xmx[L*p7X_NXCELLS+p7X_SCALE]); for (q = 0; q < Q; q++) { MMO(dpc,q) = _mm_mul_ps(MMO(dpc,q), xEv); DMO(dpc,q) = _mm_mul_ps(DMO(dpc,q), xEv); IMO(dpc,q) = _mm_mul_ps(IMO(dpc,q), xEv); } } bck->xmx[L*p7X_NXCELLS+p7X_SCALE] = fwd->xmx[L*p7X_NXCELLS+p7X_SCALE]; bck->totscale = log(bck->xmx[L*p7X_NXCELLS+p7X_SCALE]); /* Stores */ bck->xmx[L*p7X_NXCELLS+p7X_E] = xE; bck->xmx[L*p7X_NXCELLS+p7X_N] = xN; bck->xmx[L*p7X_NXCELLS+p7X_J] = xJ; bck->xmx[L*p7X_NXCELLS+p7X_B] = xB; bck->xmx[L*p7X_NXCELLS+p7X_C] = xC; #if p7_DEBUGGING if (bck->debugging) p7_omx_DumpFBRow(bck, TRUE, L, 9, 4, xE, xN, xJ, xB, xC); /* logify=TRUE, <rowi>=L, width=9, precision=4*/ #endif /* main recursion */ for (i = L-1; i >= 1; i--) /* backwards stride */ { /* phase 1. B(i) collected. Old row destroyed, new row contains * complete I(i,k), partial {MD}(i,k) w/ no {MD}->{DE} paths yet. */ dpc = bck->dpf[i * do_full]; dpp = bck->dpf[(i+1) * do_full]; rp = om->rfv[dsq[i+1]] + Q-1; /* <*rp> is now the [4 8 12 x] match emission quad */ tp = om->tfv + 7*Q - 1; /* <*tp> is now the [4 8 12 x] TII transition quad */ /* leftshift the first transition quads */ tmmv = _mm_move_ss(om->tfv[1], zerov); tmmv = _mm_shuffle_ps(tmmv, tmmv, _MM_SHUFFLE(0,3,2,1)); timv = _mm_move_ss(om->tfv[2], zerov); timv = _mm_shuffle_ps(timv, timv, _MM_SHUFFLE(0,3,2,1)); tdmv = _mm_move_ss(om->tfv[3], zerov); tdmv = _mm_shuffle_ps(tdmv, tdmv, _MM_SHUFFLE(0,3,2,1)); mpv = _mm_mul_ps(MMO(dpp,0), om->rfv[dsq[i+1]][0]); /* precalc M(i+1,k+1) * e(M_k+1, x_{i+1}) */ mpv = _mm_move_ss(mpv, zerov); mpv = _mm_shuffle_ps(mpv, mpv, _MM_SHUFFLE(0,3,2,1)); xBv = zerov; for (q = Q-1; q >= 0; q--) /* backwards stride */ { ipv = IMO(dpp,q); /* assumes emission odds ratio of 1.0; i+1's IMO(q) now free */ IMO(dpc,q) = _mm_add_ps(_mm_mul_ps(ipv, *tp), _mm_mul_ps(mpv, timv)); tp--; DMO(dpc,q) = _mm_mul_ps(mpv, tdmv); mcv = _mm_add_ps(_mm_mul_ps(ipv, *tp), _mm_mul_ps(mpv, tmmv)); tp-= 2; mpv = _mm_mul_ps(MMO(dpp,q), *rp); rp--; /* obtain mpv for next q. i+1's MMO(q) is freed */ MMO(dpc,q) = mcv; tdmv = *tp; tp--; timv = *tp; tp--; tmmv = *tp; tp--; xBv = _mm_add_ps(xBv, _mm_mul_ps(mpv, *tp)); tp--; } /* phase 2: now that we have accumulated the B->Mk transitions in xBv, we can do the specials */ /* this incantation is a horiz sum of xBv elements: (_mm_hadd_ps() would require SSE3) */ xBv = _mm_add_ps(xBv, _mm_shuffle_ps(xBv, xBv, _MM_SHUFFLE(0, 3, 2, 1))); xBv = _mm_add_ps(xBv, _mm_shuffle_ps(xBv, xBv, _MM_SHUFFLE(1, 0, 3, 2))); _mm_store_ss(&xB, xBv); xC = xC * om->xf[p7O_C][p7O_LOOP]; xJ = (xB * om->xf[p7O_J][p7O_MOVE]) + (xJ * om->xf[p7O_J][p7O_LOOP]); /* must come after xB */ xN = (xB * om->xf[p7O_N][p7O_MOVE]) + (xN * om->xf[p7O_N][p7O_LOOP]); /* must come after xB */ xE = (xC * om->xf[p7O_E][p7O_MOVE]) + (xJ * om->xf[p7O_E][p7O_LOOP]); /* must come after xJ, xC */ xEv = _mm_set1_ps(xE); /* splat */ /* phase 3: {MD}->E paths and one step of the D->D paths */ tp = om->tfv + 8*Q - 1; /* <*tp> now the [4 8 12 x] TDD quad */ dpv = _mm_add_ps(DMO(dpc,0), xEv); dpv = _mm_move_ss(dpv, zerov); dpv = _mm_shuffle_ps(dpv, dpv, _MM_SHUFFLE(0,3,2,1)); for (q = Q-1; q >= 0; q--) { dcv = _mm_mul_ps(dpv, *tp); tp--; DMO(dpc,q) = _mm_add_ps(DMO(dpc,q), _mm_add_ps(dcv, xEv)); dpv = DMO(dpc,q); MMO(dpc,q) = _mm_add_ps(MMO(dpc,q), xEv); } /* phase 4: finish extending the DD paths */ /* fully serialized for now */ for (j = 1; j < 4; j++) /* three passes: we've already done 1 segment, we need 4 total */ { dcv = _mm_move_ss(dcv, zerov); dcv = _mm_shuffle_ps(dcv, dcv, _MM_SHUFFLE(0,3,2,1)); tp = om->tfv + 8*Q - 1; /* <*tp> now the [4 8 12 x] TDD quad */ for (q = Q-1; q >= 0; q--) { dcv = _mm_mul_ps(dcv, *tp); tp--; DMO(dpc,q) = _mm_add_ps(DMO(dpc,q), dcv); } } /* phase 5: add M->D paths */ dcv = _mm_move_ss(DMO(dpc,0), zerov); dcv = _mm_shuffle_ps(dcv, dcv, _MM_SHUFFLE(0,3,2,1)); tp = om->tfv + 7*Q - 3; /* <*tp> is now the [4 8 12 x] Mk->Dk+1 quad */ for (q = Q-1; q >= 0; q--) { MMO(dpc,q) = _mm_add_ps(MMO(dpc,q), _mm_mul_ps(dcv, *tp)); tp -= 7; dcv = DMO(dpc,q); } /* Sparse rescaling */ /* In rare cases [J3/119] scale factors from <fwd> are * insufficient and backwards will overflow. In this case, we * switch on the fly to using our own scale factors, different * from those in <fwd>. This will complicate subsequent * posterior decoding routines. */ if (xB > 1.0e16) bck->has_own_scales = TRUE; if (bck->has_own_scales) bck->xmx[i*p7X_NXCELLS+p7X_SCALE] = (xB > 1.0e4) ? xB : 1.0; else bck->xmx[i*p7X_NXCELLS+p7X_SCALE] = fwd->xmx[i*p7X_NXCELLS+p7X_SCALE]; if (bck->xmx[i*p7X_NXCELLS+p7X_SCALE] > 1.0) { xE /= bck->xmx[i*p7X_NXCELLS+p7X_SCALE]; xN /= bck->xmx[i*p7X_NXCELLS+p7X_SCALE]; xJ /= bck->xmx[i*p7X_NXCELLS+p7X_SCALE]; xB /= bck->xmx[i*p7X_NXCELLS+p7X_SCALE]; xC /= bck->xmx[i*p7X_NXCELLS+p7X_SCALE]; xBv = _mm_set1_ps(1.0 / bck->xmx[i*p7X_NXCELLS+p7X_SCALE]); for (q = 0; q < Q; q++) { MMO(dpc,q) = _mm_mul_ps(MMO(dpc,q), xBv); DMO(dpc,q) = _mm_mul_ps(DMO(dpc,q), xBv); IMO(dpc,q) = _mm_mul_ps(IMO(dpc,q), xBv); } bck->totscale += log(bck->xmx[i*p7X_NXCELLS+p7X_SCALE]); } /* Stores are separate only for pedagogical reasons: easy to * turn this into a more memory efficient version just by * deleting the stores. */ bck->xmx[i*p7X_NXCELLS+p7X_E] = xE; bck->xmx[i*p7X_NXCELLS+p7X_N] = xN; bck->xmx[i*p7X_NXCELLS+p7X_J] = xJ; bck->xmx[i*p7X_NXCELLS+p7X_B] = xB; bck->xmx[i*p7X_NXCELLS+p7X_C] = xC; #if p7_DEBUGGING if (bck->debugging) p7_omx_DumpFBRow(bck, TRUE, i, 9, 4, xE, xN, xJ, xB, xC); /* logify=TRUE, <rowi>=i, width=9, precision=4*/ #endif } /* thus ends the loop over sequence positions i */ /* Termination at i=0, where we can only reach N,B states. */ dpp = bck->dpf[1 * do_full]; tp = om->tfv; /* <*tp> is now the [1 5 9 13] TBMk transition quad */ rp = om->rfv[dsq[1]]; /* <*rp> is now the [1 5 9 13] match emission quad */ xBv = zerov; for (q = 0; q < Q; q++) { mpv = _mm_mul_ps(MMO(dpp,q), *rp); rp++; mpv = _mm_mul_ps(mpv, *tp); tp += 7; xBv = _mm_add_ps(xBv, mpv); } /* horizontal sum of xBv */ xBv = _mm_add_ps(xBv, _mm_shuffle_ps(xBv, xBv, _MM_SHUFFLE(0, 3, 2, 1))); xBv = _mm_add_ps(xBv, _mm_shuffle_ps(xBv, xBv, _MM_SHUFFLE(1, 0, 3, 2))); _mm_store_ss(&xB, xBv); xN = (xB * om->xf[p7O_N][p7O_MOVE]) + (xN * om->xf[p7O_N][p7O_LOOP]); bck->xmx[p7X_B] = xB; bck->xmx[p7X_C] = 0.0; bck->xmx[p7X_J] = 0.0; bck->xmx[p7X_N] = xN; bck->xmx[p7X_E] = 0.0; bck->xmx[p7X_SCALE] = 1.0; #if p7_DEBUGGING dpc = bck->dpf[0]; for (q = 0; q < Q; q++) /* Not strictly necessary, but if someone's looking at DP matrices, this is nice to do: */ MMO(dpc,q) = DMO(dpc,q) = IMO(dpc,q) = zerov; if (bck->debugging) p7_omx_DumpFBRow(bck, TRUE, 0, 9, 4, bck->xmx[p7X_E], bck->xmx[p7X_N], bck->xmx[p7X_J], bck->xmx[p7X_B], bck->xmx[p7X_C]); /* logify=TRUE, <rowi>=0, width=9, precision=4*/ #endif if (isnan(xN)) ESL_EXCEPTION(eslERANGE, "backward score is NaN"); else if (L>0 && xN == 0.0) ESL_EXCEPTION(eslERANGE, "backward score underflow (is 0.0)"); /* if L==0, xN *should* be 0.0 [J5/118]*/ else if (isinf(xN) == 1) ESL_EXCEPTION(eslERANGE, "backward score overflow (is infinity)"); if (opt_sc != NULL) *opt_sc = bck->totscale + log(xN); return eslOK; }
static int forward_engine(int do_full, const ESL_DSQ *dsq, int L, const P7_OPROFILE *om, P7_OMX *ox, float *opt_sc) { register __m128 mpv, dpv, ipv; /* previous row values */ register __m128 sv; /* temp storage of 1 curr row value in progress */ register __m128 dcv; /* delayed storage of D(i,q+1) */ register __m128 xEv; /* E state: keeps max for Mk->E as we go */ register __m128 xBv; /* B state: splatted vector of B[i-1] for B->Mk calculations */ __m128 zerov; /* splatted 0.0's in a vector */ float xN, xE, xB, xC, xJ; /* special states' scores */ int i; /* counter over sequence positions 1..L */ int q; /* counter over quads 0..nq-1 */ int j; /* counter over DD iterations (4 is full serialization) */ int Q = p7O_NQF(om->M); /* segment length: # of vectors */ __m128 *dpc = ox->dpf[0]; /* current row, for use in {MDI}MO(dpp,q) access macro */ __m128 *dpp; /* previous row, for use in {MDI}MO(dpp,q) access macro */ __m128 *rp; /* will point at om->rfv[x] for residue x[i] */ __m128 *tp; /* will point into (and step thru) om->tfv */ /* Initialization. */ ox->M = om->M; ox->L = L; ox->has_own_scales = TRUE; /* all forward matrices control their own scalefactors */ zerov = _mm_setzero_ps(); for (q = 0; q < Q; q++) MMO(dpc,q) = IMO(dpc,q) = DMO(dpc,q) = zerov; xE = ox->xmx[p7X_E] = 0.; xN = ox->xmx[p7X_N] = 1.; xJ = ox->xmx[p7X_J] = 0.; xB = ox->xmx[p7X_B] = om->xf[p7O_N][p7O_MOVE]; xC = ox->xmx[p7X_C] = 0.; ox->xmx[p7X_SCALE] = 1.0; ox->totscale = 0.0; #if p7_DEBUGGING if (ox->debugging) p7_omx_DumpFBRow(ox, TRUE, 0, 9, 5, xE, xN, xJ, xB, xC); /* logify=TRUE, <rowi>=0, width=8, precision=5*/ #endif for (i = 1; i <= L; i++) { dpp = dpc; dpc = ox->dpf[do_full * i]; /* avoid conditional, use do_full as kronecker delta */ rp = om->rfv[dsq[i]]; tp = om->tfv; dcv = _mm_setzero_ps(); xEv = _mm_setzero_ps(); xBv = _mm_set1_ps(xB); /* Right shifts by 4 bytes. 4,8,12,x becomes x,4,8,12. Shift zeros on. */ mpv = esl_sse_rightshift_ps(MMO(dpp,Q-1), zerov); dpv = esl_sse_rightshift_ps(DMO(dpp,Q-1), zerov); ipv = esl_sse_rightshift_ps(IMO(dpp,Q-1), zerov); for (q = 0; q < Q; q++) { /* Calculate new MMO(i,q); don't store it yet, hold it in sv. */ sv = _mm_mul_ps(xBv, *tp); tp++; sv = _mm_add_ps(sv, _mm_mul_ps(mpv, *tp)); tp++; sv = _mm_add_ps(sv, _mm_mul_ps(ipv, *tp)); tp++; sv = _mm_add_ps(sv, _mm_mul_ps(dpv, *tp)); tp++; sv = _mm_mul_ps(sv, *rp); rp++; xEv = _mm_add_ps(xEv, sv); /* Load {MDI}(i-1,q) into mpv, dpv, ipv; * {MDI}MX(q) is then the current, not the prev row */ mpv = MMO(dpp,q); dpv = DMO(dpp,q); ipv = IMO(dpp,q); /* Do the delayed stores of {MD}(i,q) now that memory is usable */ MMO(dpc,q) = sv; DMO(dpc,q) = dcv; /* Calculate the next D(i,q+1) partially: M->D only; * delay storage, holding it in dcv */ dcv = _mm_mul_ps(sv, *tp); tp++; /* Calculate and store I(i,q); assumes odds ratio for emission is 1.0 */ sv = _mm_mul_ps(mpv, *tp); tp++; IMO(dpc,q) = _mm_add_ps(sv, _mm_mul_ps(ipv, *tp)); tp++; } /* Now the DD paths. We would rather not serialize them but * in an accurate Forward calculation, we have few options. */ /* dcv has carried through from end of q loop above; store it * in first pass, we add M->D and D->D path into DMX */ /* We're almost certainly're obligated to do at least one complete * DD path to be sure: */ dcv = esl_sse_rightshift_ps(dcv, zerov); DMO(dpc,0) = zerov; tp = om->tfv + 7*Q; /* set tp to start of the DD's */ for (q = 0; q < Q; q++) { DMO(dpc,q) = _mm_add_ps(dcv, DMO(dpc,q)); dcv = _mm_mul_ps(DMO(dpc,q), *tp); tp++; /* extend DMO(q), so we include M->D and D->D paths */ } /* now. on small models, it seems best (empirically) to just go * ahead and serialize. on large models, we can do a bit better, * by testing for when dcv (DD path) accrued to DMO(q) is below * machine epsilon for all q, in which case we know DMO(q) are all * at their final values. The tradeoff point is (empirically) somewhere around M=100, * at least on my desktop. We don't worry about the conditional here; * it's outside any inner loops. */ if (om->M < 100) { /* Fully serialized version */ for (j = 1; j < 4; j++) { dcv = esl_sse_rightshift_ps(dcv, zerov); tp = om->tfv + 7*Q; /* set tp to start of the DD's */ for (q = 0; q < Q; q++) { /* note, extend dcv, not DMO(q); only adding DD paths now */ DMO(dpc,q) = _mm_add_ps(dcv, DMO(dpc,q)); dcv = _mm_mul_ps(dcv, *tp); tp++; } } } else { /* Slightly parallelized version, but which incurs some overhead */ for (j = 1; j < 4; j++) { register __m128 cv; /* keeps track of whether any DD's change DMO(q) */ dcv = esl_sse_rightshift_ps(dcv, zerov); tp = om->tfv + 7*Q; /* set tp to start of the DD's */ cv = zerov; for (q = 0; q < Q; q++) { /* using cmpgt below tests if DD changed any DMO(q) *without* conditional branch */ sv = _mm_add_ps(dcv, DMO(dpc,q)); cv = _mm_or_ps(cv, _mm_cmpgt_ps(sv, DMO(dpc,q))); DMO(dpc,q) = sv; /* store new DMO(q) */ dcv = _mm_mul_ps(dcv, *tp); tp++; /* note, extend dcv, not DMO(q) */ } if (! _mm_movemask_ps(cv)) break; /* DD's didn't change any DMO(q)? Then done, break out. */ } } /* Add D's to xEv */ for (q = 0; q < Q; q++) xEv = _mm_add_ps(DMO(dpc,q), xEv); /* Finally the "special" states, which start from Mk->E (->C, ->J->B) */ /* The following incantation is a horizontal sum of xEv's elements */ /* These must follow DD calculations, because D's contribute to E in Forward * (as opposed to Viterbi) */ xEv = _mm_add_ps(xEv, _mm_shuffle_ps(xEv, xEv, _MM_SHUFFLE(0, 3, 2, 1))); xEv = _mm_add_ps(xEv, _mm_shuffle_ps(xEv, xEv, _MM_SHUFFLE(1, 0, 3, 2))); _mm_store_ss(&xE, xEv); xN = xN * om->xf[p7O_N][p7O_LOOP]; xC = (xC * om->xf[p7O_C][p7O_LOOP]) + (xE * om->xf[p7O_E][p7O_MOVE]); xJ = (xJ * om->xf[p7O_J][p7O_LOOP]) + (xE * om->xf[p7O_E][p7O_LOOP]); xB = (xJ * om->xf[p7O_J][p7O_MOVE]) + (xN * om->xf[p7O_N][p7O_MOVE]); /* and now xB will carry over into next i, and xC carries over after i=L */ /* Sparse rescaling. xE above threshold? trigger a rescaling event. */ if (xE > 1.0e4) /* that's a little less than e^10, ~10% of our dynamic range */ { xN = xN / xE; xC = xC / xE; xJ = xJ / xE; xB = xB / xE; xEv = _mm_set1_ps(1.0 / xE); for (q = 0; q < Q; q++) { MMO(dpc,q) = _mm_mul_ps(MMO(dpc,q), xEv); DMO(dpc,q) = _mm_mul_ps(DMO(dpc,q), xEv); IMO(dpc,q) = _mm_mul_ps(IMO(dpc,q), xEv); } ox->xmx[i*p7X_NXCELLS+p7X_SCALE] = xE; ox->totscale += log(xE); xE = 1.0; } else ox->xmx[i*p7X_NXCELLS+p7X_SCALE] = 1.0; /* Storage of the specials. We could've stored these already * but using xE, etc. variables makes it easy to convert this * code to O(M) memory versions just by deleting storage steps. */ ox->xmx[i*p7X_NXCELLS+p7X_E] = xE; ox->xmx[i*p7X_NXCELLS+p7X_N] = xN; ox->xmx[i*p7X_NXCELLS+p7X_J] = xJ; ox->xmx[i*p7X_NXCELLS+p7X_B] = xB; ox->xmx[i*p7X_NXCELLS+p7X_C] = xC; #if p7_DEBUGGING if (ox->debugging) p7_omx_DumpFBRow(ox, TRUE, i, 9, 5, xE, xN, xJ, xB, xC); /* logify=TRUE, <rowi>=i, width=8, precision=5*/ #endif } /* end loop over sequence residues 1..L */ /* finally C->T, and flip total score back to log space (nats) */ /* On overflow, xC is inf or nan (nan arises because inf*0 = nan). */ /* On an underflow (which shouldn't happen), we counterintuitively return infinity: * the effect of this is to force the caller to rescore us with full range. */ if (isnan(xC)) ESL_EXCEPTION(eslERANGE, "forward score is NaN"); else if (L>0 && xC == 0.0) ESL_EXCEPTION(eslERANGE, "forward score underflow (is 0.0)"); /* if L==0, xC *should* be 0.0; J5/118 */ else if (isinf(xC) == 1) ESL_EXCEPTION(eslERANGE, "forward score overflow (is infinity)"); if (opt_sc != NULL) *opt_sc = ox->totscale + log(xC * om->xf[p7O_C][p7O_MOVE]); return eslOK; }