void afLib::loop(void) { if (isIdle() && (queueGet(&_request.messageType, &_request.requestId, &_request.attrId, &_request.valueLen, &_request.p_value) == afSUCCESS)) { switch (_request.messageType) { case MSG_TYPE_GET: doGetAttribute(_request.requestId, _request.attrId); break; case MSG_TYPE_SET: doSetAttribute(_request.requestId, _request.attrId, _request.valueLen, _request.p_value); break; case MSG_TYPE_UPDATE: doUpdateAttribute(_request.requestId, _request.attrId, 0, _request.valueLen, _request.p_value); break; default: _theLog->println("loop: request type!"); } } if (_request.p_value != NULL) { delete (_request.p_value); _request.p_value = NULL; } checkInterrupt(); }
bool readTouchInputs(){ if(!checkInterrupt()){ //read the touch state from the MPR121 Wire.requestFrom(0x5A,2); byte LSB = Wire.read(); byte MSB = Wire.read(); uint16_t touched = ((MSB << 8) | LSB); //16bits that make up the touch states for (int i=0; i < 12; i++){ // Check what electrodes were pressed if(touched & (1<<i)){ if(touchStates[i] == 0) return true; touchStates[i] = 1; } touchStates[i] = 0; } } return false; }
SEXP rph_gff_featureBits(SEXP gffListP, SEXP orP, SEXP returnGffP) { int numGff, i, j, or, returnGff; long numbit = 0; List *gfflist; GFF_Set *gff, *newgff=NULL; GFF_Feature *feat, *newfeat; SEXP rv; numGff = length(gffListP); gfflist = lst_new_ptr(numGff); // Rf_PrintValue(gffListP); for (i = 0; i < numGff; i++) { gff = (GFF_Set*)EXTPTR_PTR(VECTOR_ELT(gffListP, i)); lst_push_ptr(gfflist, gff); gff_register_protect(gff); } or = LOGICAL_VALUE(orP); returnGff = LOGICAL_VALUE(returnGffP); if (!or && numGff >= 2) { newgff = gff_overlap_gff(lst_get_ptr(gfflist, 0), lst_get_ptr(gfflist, 1), 1, -1.0, FALSE, TRUE, NULL); numbit = gff_flatten_mergeAll(newgff); for (i=2; i < numGff; i++) { checkInterrupt(); gff = gff_overlap_gff(newgff, lst_get_ptr(gfflist, i), 1, -1.0, FALSE, TRUE, NULL); numbit = gff_flatten_mergeAll(gff); gff_free_set(newgff); newgff = gff; } } else { newgff = gff_new_set(); for (i=0; i< numGff; i++) { gff = (GFF_Set*)lst_get_ptr(gfflist, i); for (j=0; j < lst_size(gff->features); j++) { checkInterruptN(j, 1000); feat = lst_get_ptr(gff->features, j); newfeat = gff_new_feature_copy(feat); lst_push_ptr(newgff->features, newfeat); } } numbit = gff_flatten_mergeAll(newgff); } if (returnGff) return rph_gff_new_extptr(newgff); if (numbit > INT_MAX) { PROTECT(rv = allocVector(REALSXP, 1)); REAL(rv)[0] = numbit; } else { PROTECT(rv = allocVector(INTSXP, 1)); INTEGER(rv)[0] = numbit; } UNPROTECT(1); return rv; }
void ms_print(FILE *F, MS *ms) { int i; for (i = 0; i < ms->nseqs; i++) { checkInterrupt(); fprintf(F, " Name %s\n", ms->names[i]); fprintf(F, " Offset %d\n", ms->idx_offsets[i]); fprintf(F, " Seq %s\n", ms->seqs[i]); if(i != (ms->nseqs-1)) fprintf(F, "\n"); } }
void ms_print_fasta(FILE *F, MS *ms) { int i, j, k, seqLen; for (i = 0; i < ms->nseqs; i++) { checkInterrupt(); fprintf(F, ">%s\n", ms->names[i]); seqLen = (int)strlen(ms->seqs[i]); for (j = 0; j < seqLen; j += OUTPUT_LINE_LEN) { checkInterruptN(j, 100); for (k = 0; k < OUTPUT_LINE_LEN && j + k < seqLen; k++) fprintf(F, "%c", ms->seqs[i][j+k]); fprintf(F, "\n"); } } }
/* Perform a GERP-like computation for each feature. Computes expected number of subst. under neutrality (feat_nneut), expected number after rescaling by ML (feat_nobs), expected number of rejected substitutions (feat_nrejected), and number of species with data (feat_nspecies). If any arrays are NULL, values will not be retained. Gaps and missing data are handled by working with induced subtree. */ void ff_gerp(TreeModel *mod, MSA *msa, GFF_Set *gff, mode_type mode, double *feat_nneut, double *feat_nobs, double *feat_nrejected, double *feat_nspec, FILE *logf) { int i, j, nspec = 0; double nneut, scale, lnl; int *has_data = smalloc(mod->tree->nnodes * sizeof(int)); FeatFitData *d; /* init FeatFitData */ d = ff_init_fit_data(mod, msa, ALL, NNEUT, FALSE); /* iterate through features */ for (i = 0; i < lst_size(gff->features); i++) { GFF_Feature *f = lst_get_ptr(gff->features, i); checkInterrupt(); ff_find_missing_branches(mod, msa, f, has_data, &nspec); if (nspec < 3) nneut = scale = 0; else { vec_set(d->cdata->params, 0, d->cdata->init_scale); d->feat = f; opt_newton_1d(ff_likelihood_wrapper_1d, &d->cdata->params->data[0], d, &lnl, SIGFIGS, d->cdata->lb->data[0], d->cdata->ub->data[0], logf, NULL, NULL); /* turns out to be faster to use numerical rather than exact derivatives (judging by col case) */ scale = d->cdata->params->data[0]; for (j = 1, nneut = 0; j < mod->tree->nnodes; j++) /* node 0 is root */ if (has_data[j]) nneut += ((TreeNode*)lst_get_ptr(mod->tree->nodes, j))->dparent; } if (feat_nspec != NULL) feat_nspec[i] = (double)nspec; if (feat_nneut != NULL) feat_nneut[i] = nneut; if (feat_nobs != NULL) feat_nobs[i] = scale * nneut; if (feat_nrejected != NULL) { feat_nrejected[i] = nneut * (1 - scale); if (mode == ACC) feat_nrejected[i] *= -1; else if (mode == NNEUT) feat_nrejected[i] = fabs(feat_nrejected[i]); } } ff_free_fit_data(d); sfree(has_data); }
void GroveMultiTouch::readTouchInputs(){ if(!checkInterrupt()){ //read the touch state from the MPR121 Wire.requestFrom(0x5A,2); byte LSB = Wire.read(); byte MSB = Wire.read(); uint16_t touched = ((MSB << 8) | LSB); //16bits that make up the touch states for (int i=0; i < 12; i++){ // Check what electrodes were pressed if(touched & (1<<i)){ /* if(touchStates[i] == 0){ //pin i was just touched Serial.print("pin "); Serial.print(i); Serial.println(" was just touched"); }else if(touchStates[i] == 1){ //pin i is still being touched } */ _touchStates[i] = 1; }else{ /* if(touchStates[i] == 1){ Serial.print("pin "); Serial.print(i); Serial.println(" is no longer being touched"); //pin i is no longer being touched } */ _touchStates[i] = 0; } } } }
/* based on a local pairwise alignment object, estimate the coordinate in the target sequence corresponding to the specified coordinate in the query sequence. Currently assumes alignment blocks, and gapless alignments within them, are in sorted order (wrt query sequence). Also currently does linear search (should use binary search). A value of -1 is returned if no reasonable estimate is possible. This routine should only be used with relatively close, generally orthologous sequences, having good synteny. */ int la_get_target_coord(LocalPwAlignment *lpwa, int query_coord, adjust_dir adjust) { int i, j; int q1 = -1, t1 = -1, t2 = -1; // int q2 = -2; //set but not used AlignmentBlock *last_ab = NULL; GaplessAlignment *last_ga = NULL; /* find query coords q1 and q2 bracketing the position in question, and corresponding target coords t1 and t2 */ for (i = 0; i < lst_size(lpwa->alignment_blocks); i++) { AlignmentBlock *ab = lst_get_ptr(lpwa->alignment_blocks, i); checkInterrupt(); if (!(last_ab == NULL || last_ab->query_end < query_coord)) die("ERROR la_get_target_coord: bad value for last_ab\n"); if (ab->query_beg > query_coord) { /* coord falls between alignment blocks */ if (last_ab == NULL) { /* occurs at beginning */ q1 = t1 = 0; // q2 = ab->query_beg; t2 = ab->query_end; } else { q1 = last_ab->query_end; // q2 = ab->query_beg; t1 = last_ab->target_end; t2 = ab->target_beg; } break; } else if (ab->query_end >= query_coord) { /* coord falls within an alignment block; need to look at the gapless alignments */ for (j = 0; j < lst_size(ab->gapless_alns); j++) { GaplessAlignment *ga = lst_get_ptr(ab->gapless_alns, j); if (!(last_ga == NULL || last_ga->query_end < query_coord)) die("ERROR la_get_target_coord: bad value for last_ga\n"); if (ga->query_beg > query_coord) { q1 = last_ga->query_end; // q2 = ga->query_beg; t1 = last_ga->target_end; t2 = ga->target_beg; break; } else if (ga->query_end >= query_coord) /* coord falls within gapless alignment -- this case is easy */ return (query_coord - ga->query_beg + ga->target_beg); last_ga = ga; /* keep looking */ } if (q1 == -1) die("ERROR la_get_target_coord: bad coords\n"); /* coords must be assigned above; otherwise the coords for the block must have been wrong */ break; } /* keep looking */ last_ab = ab; } if (q1 == -1) { /* coord must occur *beyond* all alignment blocks */ q1 = last_ab->query_end; // q2 = lpwa->query_len-1; t1 = last_ab->target_end; t2 = lpwa->target_len-1; } if (t2 < t1) return -1; return (adjust == ADJUSTRIGHT ? t2 : t1); }
/* if query_seq and target_seq are NULL, they will attempt to be read from the filenames contained in the LocalPwAlignment object (FASTA format is assumed) */ MSA* la_to_msa(LocalPwAlignment *lpwa, int force_global) { int i, j, k, len; char **names = (char**)smalloc(2 * sizeof(char*)); char **seqs = (char**)smalloc(2 * sizeof(char*)); String *query_seq = lpwa->query_seq, *target_seq = lpwa->target_seq; String *qseq = str_new(query_seq->length); String *tseq = str_new(target_seq->length); GaplessAlignment *lga = NULL; names[0] = copy_charstr(lpwa->query_name->chars); names[1] = copy_charstr(lpwa->target_name->chars); for (i = 0; i < lst_size(lpwa->alignment_blocks); i++) { AlignmentBlock* b = lst_get_ptr(lpwa->alignment_blocks, i); checkInterrupt(); for (j = 0; j < lst_size(b->gapless_alns); j++) { GaplessAlignment *ga = lst_get_ptr(b->gapless_alns, j); if (lga == NULL) { for (k = 0; k < ga->query_beg-1; k++) { str_append_char(qseq, query_seq->chars[k]); str_append_char(tseq, GAP_CHAR); } if (force_global) { for (k = 0; k < ga->target_beg-1; k++) { str_append_char(qseq, GAP_CHAR); str_append_char(tseq, target_seq->chars[k]); } } } else { if (lga->query_end >= ga->query_beg || (force_global && lga->target_end >= ga->target_beg)) { die("ERROR: overlapping alignment segments.\n"); } if (j > 0 && lga->query_end == ga->query_beg-1) { /* gap in query seq */ for (k = lga->target_end; k < ga->target_beg-1; k++) { str_append_char(qseq, GAP_CHAR); str_append_char(tseq, target_seq->chars[k]); } } else { /* gap in target seq */ for (k = lga->query_end; k < ga->query_beg-1; k++) { str_append_char(qseq, query_seq->chars[k]); str_append_char(tseq, GAP_CHAR); } if (force_global) { for (k = lga->target_end; k < ga->target_beg-1; k++) { str_append_char(qseq, GAP_CHAR); str_append_char(tseq, target_seq->chars[k]); } } } } for (k = 0; k < ga->query_end - ga->query_beg + 1; k++) { str_append_char(qseq, query_seq->chars[ga->query_beg + k - 1]); str_append_char(tseq, target_seq->chars[ga->target_beg + k - 1]); } lga = ga; } } for (k = lga->query_end; k < query_seq->length; k++) { str_append_char(qseq, query_seq->chars[k]); str_append_char(tseq, GAP_CHAR); } if (force_global) { for (k = lga->target_end; k < target_seq->length; k++) { str_append_char(qseq, GAP_CHAR); str_append_char(tseq, target_seq->chars[k]); } } seqs[0] = qseq->chars; seqs[1] = tseq->chars; qseq->chars = NULL; tseq->chars = NULL; len = qseq->length; str_free(qseq); str_free(tseq); return msa_new(seqs, names, 2, len, NULL); }
/* Subtree version of score test */ void ff_score_tests_sub(TreeModel *mod, MSA *msa, GFF_Set *gff, mode_type mode, double *feat_pvals, double *feat_null_scales, double *feat_derivs, double *feat_sub_derivs, double *feat_teststats, FILE *logf) { int i; FeatFitData *d, *d2; Vector *grad = vec_new(2); Matrix *fim = mat_new(2, 2); double lnl, teststat; FimGrid *grid; List *inside=NULL, *outside=NULL; TreeModel *modcpy = tm_create_copy(mod); /* need separate copy of tree model with different internal scaling data for supertree/subtree case */ /* init FeatFitData -- one for null model, one for alt */ d = ff_init_fit_data(modcpy, msa, ALL, NNEUT, FALSE); d2 = ff_init_fit_data(mod, msa, SUBTREE, NNEUT, FALSE); /* mod has the subtree info, modcpy does not */ /* precompute Fisher information matrices for a grid of scale values */ grid = col_fim_grid_sub(mod); /* prepare lists of leaves inside and outside root, for use in checking for informative substitutions */ if (mod->subtree_root != NULL) { inside = lst_new_ptr(mod->tree->nnodes); outside = lst_new_ptr(mod->tree->nnodes); tr_partition_leaves(mod->tree, mod->subtree_root, inside, outside); } /* iterate through features */ for (i = 0; i < lst_size(gff->features); i++) { checkInterrupt(); d->feat = lst_get_ptr(gff->features, i); /* first check for informative substitution data in feature; if none, don't waste time computing likelihoods */ if (!ff_has_data_sub(mod, msa, d->feat, inside, outside)) { teststat = 0; vec_zero(grad); } else { vec_set(d->cdata->params, 0, d->cdata->init_scale); opt_newton_1d(ff_likelihood_wrapper_1d, &d->cdata->params->data[0], d, &lnl, SIGFIGS, d->cdata->lb->data[0], d->cdata->ub->data[0], logf, NULL, NULL); /* turns out to be faster to use numerical rather than exact derivatives (judging by col case) */ d2->feat = d->feat; d2->cdata->mod->scale = d->cdata->params->data[0]; d2->cdata->mod->scale_sub = 1; tm_set_subst_matrices(d2->cdata->mod); ff_scale_derivs_subtree(d2, grad, NULL, d2->cdata->fels_scratch); fim = col_get_fim_sub(grid, d2->cdata->mod->scale); mat_scale(fim, d->feat->end - d->feat->start + 1); /* scale column-by-column FIM by length of feature (expected values are additive) */ teststat = grad->data[1]*grad->data[1] / (fim->data[1][1] - fim->data[0][1]*fim->data[1][0]/fim->data[0][0]); if (teststat < 0) { fprintf(stderr, "WARNING: teststat < 0 (%f)\n", teststat); teststat = 0; } if ((mode == ACC && grad->data[1] < 0) || (mode == CON && grad->data[1] > 0)) teststat = 0; /* derivative points toward boundary; truncate at 0 */ mat_free(fim); } if (feat_pvals != NULL) { if (mode == NNEUT || mode == CONACC) feat_pvals[i] = chisq_cdf(teststat, 1, FALSE); else feat_pvals[i] = half_chisq_cdf(teststat, 1, FALSE); /* assumes 50:50 mix of chisq and point mass at zero */ if (feat_pvals[i] < 1e-20) feat_pvals[i] = 1e-20; /* approx limit of eval of tail prob; pvals of 0 cause problems */ if (mode == CONACC && grad->data[1] > 0) feat_pvals[i] *= -1; /* mark as acceleration */ } /* store scales and log likelihood ratios if necessary */ if (feat_null_scales != NULL) feat_null_scales[i] = d->cdata->params->data[0]; if (feat_derivs != NULL) feat_derivs[i] = grad->data[0]; if (feat_sub_derivs != NULL) feat_sub_derivs[i] = grad->data[1]; if (feat_teststats != NULL) feat_teststats[i] = teststat; } ff_free_fit_data(d); ff_free_fit_data(d2); vec_free(grad); modcpy->estimate_branchlens = TM_BRANCHLENS_ALL; /* have to revert for tm_free to work correctly */ tm_free(modcpy); col_free_fim_grid(grid); if (inside != NULL) lst_free(inside); if (outside != NULL) lst_free(outside); }
/* Score test */ void ff_score_tests(TreeModel *mod, MSA *msa, GFF_Set *gff, mode_type mode, double *feat_pvals, double *feat_derivs, double *feat_teststats) { int i; FeatFitData *d; double first_deriv, teststat, fim; /* init FeatFitData */ d = ff_init_fit_data(mod, msa, ALL, NNEUT, FALSE); /* precompute FIM */ fim = col_estimate_fim(mod); if (fim < 0) die("ERROR: negative fisher information in col_score_tests\n"); /* iterate through features */ for (i = 0; i < lst_size(gff->features); i++) { checkInterrupt(); d->feat = lst_get_ptr(gff->features, i); /* first check for actual substitution data in feature; if none, don't waste time computing likelihoods */ if (!ff_has_data(mod, msa, d->feat)) { teststat = 0; first_deriv = 1; } else { ff_scale_derivs(d, &first_deriv, NULL, d->cdata->fels_scratch); teststat = first_deriv*first_deriv / ((d->feat->end - d->feat->start + 1) * fim); /* scale column-by-column FIM by length of feature (expected values are additive) */ if ((mode == ACC && first_deriv < 0) || (mode == CON && first_deriv > 0)) teststat = 0; /* derivative points toward boundary; truncate at 0 */ } if (feat_pvals != NULL) { if (mode == NNEUT || mode == CONACC) feat_pvals[i] = chisq_cdf(teststat, 1, FALSE); else feat_pvals[i] = half_chisq_cdf(teststat, 1, FALSE); /* assumes 50:50 mix of chisq and point mass at zero */ if (feat_pvals[i] < 1e-20) feat_pvals[i] = 1e-20; /* approx limit of eval of tail prob; pvals of 0 cause problems */ if (mode == CONACC && first_deriv > 0) feat_pvals[i] *= -1; /* mark as acceleration */ } /* store scales and log likelihood ratios if necessary */ if (feat_derivs != NULL) feat_derivs[i] = first_deriv; if (feat_teststats != NULL) feat_teststats[i] = teststat; } ff_free_fit_data(d); }
/* Subtree version of LRT */ void ff_lrts_sub(TreeModel *mod, MSA *msa, GFF_Set *gff, mode_type mode, double *feat_pvals, double *feat_null_scales, double *feat_scales, double *feat_sub_scales, double *feat_llrs, FILE *logf) { int i; FeatFitData *d, *d2; double null_lnl, alt_lnl, delta_lnl; TreeModel *modcpy; List *inside=NULL, *outside=NULL; modcpy = tm_create_copy(mod); /* need separate copy of tree model with different internal scaling data for supertree/subtree case */ /* init ColFitData -- one for null model, one for alt */ modcpy->estimate_branchlens = TM_BRANCHLENS_ALL; modcpy->subtree_root = NULL; d = ff_init_fit_data(modcpy, msa, ALL, NNEUT, FALSE); d2 = ff_init_fit_data(mod, msa, SUBTREE, mode, FALSE); /* mod has the subtree info, modcpy does not */ /* prepare lists of leaves inside and outside root, for use in checking for informative substitutions */ if (mod->subtree_root != NULL) { inside = lst_new_ptr(mod->tree->nnodes); outside = lst_new_ptr(mod->tree->nnodes); tr_partition_leaves(mod->tree, mod->subtree_root, inside, outside); } /* iterate through features */ for (i = 0; i < lst_size(gff->features); i++) { GFF_Feature *f = lst_get_ptr(gff->features, i); checkInterrupt(); /* first check for informative substitution data in feature; if none, don't waste time computing likelihoods */ if (!ff_has_data_sub(mod, msa, f, inside, outside)) { delta_lnl = 0; d->cdata->params->data[0] = d2->cdata->params->data[0] = d2->cdata->params->data[1] = 1; } else { /* compute log likelihoods under null and alt hypotheses */ d->feat = f; vec_set(d->cdata->params, 0, d->cdata->init_scale); opt_newton_1d(ff_likelihood_wrapper_1d, &d->cdata->params->data[0], d, &null_lnl, SIGFIGS, d->cdata->lb->data[0], d->cdata->ub->data[0], logf, NULL, NULL); null_lnl *= -1; d2->feat = f; vec_set(d2->cdata->params, 0, d->cdata->params->data[0]); /* init to previous estimate to save time */ vec_set(d2->cdata->params, 1, d2->cdata->init_scale_sub); // vec_set(d2->cdata->params, 1, 0.01); if (opt_bfgs(ff_likelihood_wrapper, d2->cdata->params, d2, &alt_lnl, d2->cdata->lb, d2->cdata->ub, logf, NULL, OPT_HIGH_PREC, NULL, NULL) != 0) ; /* do nothing; nonzero exit typically occurs when max iterations is reached; a warning is printed to the log */ alt_lnl *= -1; delta_lnl = alt_lnl - null_lnl; /* This is a hack, it would be better to figure out why the optimization sometimes fails here. If we get a significantly negative lnL, re-initialize params so that they are identical to null model params and re-start */ if (delta_lnl <= -0.05) { d2->feat = f; vec_set(d2->cdata->params, 0, d->cdata->params->data[0]); vec_set(d2->cdata->params, 1, 1.0); if (opt_bfgs(ff_likelihood_wrapper, d2->cdata->params, d2, &alt_lnl, d2->cdata->lb, d2->cdata->ub, logf, NULL, OPT_HIGH_PREC, NULL, NULL) != 0) if (delta_lnl <= -0.1) die("ERROR ff_lrts_sub: delta_lnl (%f) <= -0.1\n", delta_lnl); } if (delta_lnl < 0) delta_lnl = 0; } /* compute p-vals via chi-sq */ if (feat_pvals != NULL) { if (mode == NNEUT || mode == CONACC) feat_pvals[i] = chisq_cdf(2*delta_lnl, 1, FALSE); else feat_pvals[i] = half_chisq_cdf(2*delta_lnl, 1, FALSE); /* assumes 50:50 mix of chisq and point mass at zero, due to bounding of param */ if (feat_pvals[i] < 1e-20) feat_pvals[i] = 1e-20; /* approx limit of eval of tail prob; pvals of 0 cause problems */ if (mode == CONACC && d2->cdata->params->data[1] > 1) feat_pvals[i] *= -1; /* mark as acceleration */ } /* store scales and log likelihood ratios if necessary */ if (feat_null_scales != NULL) feat_null_scales[i] = d->cdata->params->data[0]; if (feat_scales != NULL) feat_scales[i] = d2->cdata->params->data[0]; if (feat_sub_scales != NULL) feat_sub_scales[i] = d2->cdata->params->data[1]; if (feat_llrs != NULL) feat_llrs[i] = delta_lnl; } ff_free_fit_data(d); ff_free_fit_data(d2); modcpy->estimate_branchlens = TM_BRANCHLENS_ALL; /* have to revert for tm_free to work correctly */ tm_free(modcpy); if (inside != NULL) lst_free(inside); if (outside != NULL) lst_free(outside); }
/* Perform a likelihood ratio test for each feature in a GFF, comparing the given null model with an alternative model that has a free scaling parameter for all branches. Assumes a 0th order model, leaf-to-sequence mapping already available, prob matrices computed, sufficient stats available. Computes p-values based using the chi-sq distribution and stores them in feat_pvals. Will optionally store the individual scale factors in feat_scales and raw log likelihood ratios in feat_llrs if these variables are non-NULL. Must define mode as CON (for 0 <= scale <= 1), ACC (for 1 <= scale), NNEUT (0 <= scale), or CONACC (0 <= scale) */ void ff_lrts(TreeModel *mod, MSA *msa, GFF_Set *gff, mode_type mode, double *feat_pvals, double *feat_scales, double *feat_llrs, FILE *logf) { int i; FeatFitData *d; double null_lnl, alt_lnl, delta_lnl, this_scale = 1; /* init FeatFitData */ d = ff_init_fit_data(mod, msa, ALL, mode, FALSE); /* iterate through features */ for (i = 0; i < lst_size(gff->features); i++) { GFF_Feature *f = lst_get_ptr(gff->features, i); checkInterrupt(); /* first check for actual substitution data in feature; if none, don't waste time computing likelihoods */ if (!ff_has_data(mod, msa, f)) { delta_lnl = 0; this_scale = 1; } else { mod->scale = 1; tm_set_subst_matrices(mod); /* compute log likelihoods under null and alt hypotheses */ null_lnl = ff_compute_log_likelihood(mod, msa, f, d->cdata->fels_scratch[0]); vec_set(d->cdata->params, 0, d->cdata->init_scale); d->feat = f; opt_newton_1d(ff_likelihood_wrapper_1d, &d->cdata->params->data[0], d, &alt_lnl, SIGFIGS, d->cdata->lb->data[0], d->cdata->ub->data[0], logf, NULL, NULL); /* turns out to be faster to use numerical rather than exact derivatives (judging by col case) */ alt_lnl *= -1; this_scale = d->cdata->params->data[0]; delta_lnl = alt_lnl - null_lnl; if (delta_lnl <= -0.01) die("ERROR ff_lrts: delta_lnl (%f) <= -0.01\n", delta_lnl); if (delta_lnl < 0) delta_lnl = 0; } /* compute p-vals via chi-sq */ if (feat_pvals != NULL) { if (mode == NNEUT || mode == CONACC) feat_pvals[i] = chisq_cdf(2*delta_lnl, 1, FALSE); else feat_pvals[i] = half_chisq_cdf(2*delta_lnl, 1, FALSE); /* assumes 50:50 mix of chisq and point mass at zero, due to bounding of param */ if (feat_pvals[i] < 1e-20) feat_pvals[i] = 1e-20; /* approx limit of eval of tail prob; pvals of 0 cause problems */ if (mode == CONACC && this_scale > 1) feat_pvals[i] *= -1; /* mark as acceleration */ } /* store scales and log likelihood ratios if necessary */ if (feat_scales != NULL) feat_scales[i] = this_scale; if (feat_llrs != NULL) feat_llrs[i] = delta_lnl; } ff_free_fit_data(d); sfree(d); //above function doesn't actually free object }
void check_user_interrupt_master() { if ( !is_aborted() && checkInterrupt() ) { //REprintf("detected User interruption...\n"); abort(); } }
SEXP rph_gff_dataframe(SEXP gffPtr) { GFF_Set *gff; GFF_Feature *feat; SEXP result, names, src, feature, start, end, score, strand, frame, attribute, header; int i, len, listlen, *intp; double *doublep; char strandStr[2]; //first five columns are required; others may not be defined char gffCols[9][20] = {"seqname", "src", "feature", "start", "end", "score", "strand", "frame", "attribute"}; int have[9] = {1, 1, 1, 1, 1, 0, 0, 0, 0}; int scorePos = 5, strandPos = 6, framePos = 7, attributePos = 8; SEXP vec[9]; gff = (GFF_Set*)EXTPTR_PTR(gffPtr); gff_register_protect(gff); len = lst_size(gff->features); //first five columns are required: name, src, feature, start, end PROTECT(names = allocVector(STRSXP, len)); for (i=0; i<len; i++) { feat = (GFF_Feature*)lst_get_ptr(gff->features, i); SET_STRING_ELT(names, i, mkChar(feat->seqname->chars)); } vec[0] = names; checkInterrupt(); PROTECT(src = allocVector(STRSXP, len)); for (i=0; i<len; i++) { feat = (GFF_Feature*)lst_get_ptr(gff->features, i); SET_STRING_ELT(src, i, mkChar(feat->source->chars)); } vec[1] = src; checkInterrupt(); PROTECT(feature=allocVector(STRSXP, len)); for (i=0; i<len; i++) { feat = (GFF_Feature*)lst_get_ptr(gff->features, i); SET_STRING_ELT(feature, i, mkChar(feat->feature->chars)); } vec[2] = feature; checkInterrupt(); PROTECT(start=NEW_INTEGER(len)); intp = INTEGER_POINTER(start); for (i=0; i<len; i++) { feat = (GFF_Feature*)lst_get_ptr(gff->features, i); intp[i] = feat->start; } vec[3] = start; checkInterrupt(); PROTECT(end = NEW_INTEGER(len)); intp = INTEGER_POINTER(end); for (i=0; i<len; i++) { feat = (GFF_Feature*)lst_get_ptr(gff->features, i); intp[i] = feat->end; } vec[4] = end; checkInterrupt(); PROTECT(score = NEW_NUMERIC(len)); doublep = NUMERIC_POINTER(score); for (i=0; i<len; i++) { feat = (GFF_Feature*)lst_get_ptr(gff->features, i); if (feat->score_is_null) doublep[i] = NA_REAL; //may have to include R_ext/Arith.h else { doublep[i] = feat->score; have[scorePos] = 1; } } vec[5] = score; checkInterrupt(); PROTECT(strand = allocVector(STRSXP, len)); strandStr[1] = '\0'; for (i=0; i<len; i++) { feat = (GFF_Feature*)lst_get_ptr(gff->features, i); strandStr[0] = feat->strand; SET_STRING_ELT(strand, i, mkChar(strandStr)); if (feat->strand != '.') have[strandPos] = 1; } vec[6] = strand; checkInterrupt(); PROTECT(frame = NEW_INTEGER(len)); intp = INTEGER_POINTER(frame); for (i=0; i<len; i++) { feat = (GFF_Feature*)lst_get_ptr(gff->features, i); if (feat->frame == GFF_NULL_FRAME) intp[i] = NA_INTEGER; else { have[framePos] = 1; intp[i] = feat->frame; if (feat->frame == 0) intp[i] = 0; else if (feat->frame==1) intp[i] = 2; else if (feat->frame==2) intp[i] = 1; else die("invalid frame %i in GFF", feat->frame); } } vec[7] = frame; checkInterrupt(); PROTECT(attribute = allocVector(STRSXP, len)); for (i=0; i<len; i++) { feat = (GFF_Feature*)lst_get_ptr(gff->features, i); //suspect mkChar is not dealing well with empty string? // SET_STRING_ELT(attribute, i, mkChar(feat->attribute->chars)); if (feat->attribute->length != 0) { have[attributePos] = 1; SET_STRING_ELT(attribute, i, mkChar(feat->attribute->chars)); } else SET_STRING_ELT(attribute, i, mkChar(".")); } vec[8] = attribute; checkInterrupt(); listlen = 0; for (i=0; i<9; i++) listlen += have[i]; PROTECT(header = allocVector(STRSXP, listlen)); PROTECT(result = allocVector(VECSXP, listlen)); listlen = 0; for (i=0; i<9; i++) { if (have[i]) { SET_STRING_ELT(header, listlen, mkChar(gffCols[i])); SET_VECTOR_ELT(result, listlen++, vec[i]); } } SET_NAMES(result, header); UNPROTECT(11); return result; }