/* * The chosen [encryption | MAC | compression] algorithm to each * direction MUST be the first algorithm on the client's list * that is also on the server's list. */ algo_type * cli_buf_match_algo(buffer* buf, algo_type localalgos[], int *goodguess) { unsigned char * algolist = NULL; unsigned char * remotealgos[MAX_PROPOSED_ALGO]; unsigned int len; unsigned int count, i, j; algo_type * ret = NULL; *goodguess = 0; /* get the comma-separated list from the buffer ie "algo1,algo2,algo3" */ algolist = buf_getstring(buf, &len); TRACE(("cli_buf_match_algo: %s", algolist)) if (len > MAX_PROPOSED_ALGO*(MAX_NAME_LEN+1)) { goto out; /* just a sanity check, no other use */ } /* remotealgos will contain a list of the strings parsed out */ /* We will have at least one string (even if it's just "") */ remotealgos[0] = algolist; count = 1; /* Iterate through, replacing ','s with NULs, to split it into * words. */ for (i = 0; i < len; i++) { if (algolist[i] == '\0') { /* someone is trying something strange */ goto out; } if (algolist[i] == ',') { algolist[i] = '\0'; remotealgos[count] = &algolist[i+1]; count++; } if (count >= MAX_PROPOSED_ALGO) { break; } } /* iterate and find the first match */ for (j = 0; localalgos[j].name != NULL; j++) { if (localalgos[j].usable) { len = strlen(localalgos[j].name); for (i = 0; i < count; i++) { if (len == strlen(remotealgos[i]) && strncmp(localalgos[j].name, remotealgos[i], len) == 0) { if (i == 0 && j == 0) { /* was a good guess */ *goodguess = 1; } ret = &localalgos[j]; goto out; } } } } out: m_free(algolist); return ret; }
static void str_buf_free(mp_lexer_str_buf_t *sb) { if (sb->free_len > 0) { m_free((char*)sb->src_beg, sb->free_len); } m_del_obj(mp_lexer_str_buf_t, sb); }
void tseg_free(tseg_text_t *ts) { m_free(ts->text); m_free(ts->m_seg); m_free(ts->m_attr); }
MBUF_SEG _mbufInsertBuf ( MBUF_ID mbufId, /* mbuf ID which buffer is inserted */ MBUF_SEG mbufSeg, /* mbuf base for <offset> */ int offset, /* relative byte offset */ caddr_t buf, /* user buffer for mbuf cluster */ int len, /* number of bytes to insert */ VOIDFUNCPTR freeRtn, /* user free routine */ int freeArg /* argument to free routine */ ) { MBUF_ID mbufIdNew; /* mbuf ID containing <buf> */ MBUF_DESC mbufDesc; /* desc for <buf> cluster */ MBUF_SEG mbufNew; /* mbuf for <buf> cluster */ CL_BLK_ID pClBlk; /* pointer to cluster blk */ int lockKey; /* int lock cookie */ int ix; /* counter for list init */ if (len <= 0) /* have to insert some bytes */ { errno = S_mbufLib_LENGTH_INVALID; return (NULL); } MBUF_ID_CREATE (mbufIdNew); /* create new mbuf ID for buf */ if (mbufIdNew == NULL) return (NULL); lockKey = intLock (); if ((mbufDesc = mbufDescHead) != NULL) /* free list empty ? */ { mbufDescHead = mbufDesc->mbufDescNext; /* pop first desc off list */ intUnlock (lockKey); } else /* list is empty */ { intUnlock (lockKey); if ((mbufDesc = (MBUF_DESC) KHEAP_ALLOC((sizeof (struct mbufDesc) * MBUF_DESC_INC))) != NULL) /* alloc more desc's */ { for (ix = 0; ix < (MBUF_DESC_INC - 1); ix++) mbufDesc[ix].mbufDescNext = &mbufDesc[ix + 1]; lockKey = intLock (); mbufDesc[ix].mbufDescNext = mbufDescHead; mbufDescHead = mbufDesc->mbufDescNext;/* hook head onto new list */ intUnlock (lockKey); } } if (mbufDesc == NULL) /* able to get a new desc ? */ { MBUF_ID_DELETE_EMPTY(mbufIdNew); return (NULL); } mbufDesc->buf = buf; /* get mbuf for cluster */ if ( (mbufNew = mBlkGet (_pNetDpool, M_WAIT, MT_DATA)) == NULL) { /* release on fail */ lockKey = intLock (); mbufDescHead = mbufDesc; intUnlock (lockKey); MBUF_ID_DELETE_EMPTY (mbufIdNew); return (NULL); } pClBlk = clBlkGet (_pNetDpool, M_WAIT); if (pClBlk == NULL) /* out of cl Blks */ { m_free (mbufNew); lockKey = intLock (); mbufDescHead = mbufDesc; intUnlock (lockKey); MBUF_ID_DELETE_EMPTY (mbufIdNew); return (NULL); } mbufNew->pClBlk = pClBlk; /* build <buf> into an mbuf cluster */ mbufNew->m_data = buf; mbufNew->m_len = len; mbufNew->m_flags |= M_EXT; mbufNew->m_extBuf = buf; mbufNew->m_extSize = len; mbufNew->m_extFreeRtn = (FUNCPTR) _mbufBufFree; mbufNew->m_extRefCnt = 1; mbufNew->m_extArg1 = (int) mbufDesc; mbufNew->m_extArg2 = (int) freeRtn; mbufNew->m_extArg3 = freeArg; mbufIdNew->mbufHead = mbufNew; /* put cluster into new ID */ /* insert the new mbuf ID with <buf> into <mbufId> */ if ((mbufSeg = _mbufInsert (mbufId, mbufSeg, offset, mbufIdNew)) == NULL) { mbufNew->m_extArg2 = (int)NULL; /* don't call freeRtn on fail */ MBUF_ID_DELETE(mbufIdNew); } return (mbufSeg); /* return inserted mbuf */ }
/** Trains the ghmm_dmodel with a set of annotated sequences till convergence using gradient descent. Model must not have silent states. (checked in Python wrapper) @return trained model/NULL pointer success/error @param mo: pointer to a ghmm_dmodel @param sq: struct of annotated sequences @param eta: intial parameter eta (learning rate) @param no_steps number of training steps */ ghmm_dmodel* ghmm_dmodel_label_gradient_descent (ghmm_dmodel* mo, ghmm_dseq * sq, double eta, int no_steps) { #define CUR_PROC "ghmm_dmodel_label_gradient_descent" char * str; int runs = 0; double cur_perf, last_perf; ghmm_dmodel *last; last = ghmm_dmodel_copy(mo); last_perf = compute_performance (last, sq); while (eta > GHMM_EPS_PREC && runs < no_steps) { runs++; if (-1 == gradient_descent_onestep(mo, sq, eta)) { ghmm_dmodel_free(&last); return NULL; } cur_perf = compute_performance(mo, sq); if (last_perf < cur_perf) { /* if model is degenerated, lower eta and try again */ if (cur_perf > 0.0) { str = ighmm_mprintf(NULL, 0, "current performance = %g", cur_perf); GHMM_LOG(LINFO, str); m_free(str); ghmm_dmodel_free(&mo); mo = ghmm_dmodel_copy(last); eta *= .5; } else { /* Improvement insignificant, assume convergence */ if (fabs (last_perf - cur_perf) < cur_perf * (-1e-8)) { ghmm_dmodel_free(&last); str = ighmm_mprintf(NULL, 0, "convergence after %d steps.", runs); GHMM_LOG(LINFO, str); m_free(str); return 0; } if (runs < 175 || 0 == runs % 50) { str = ighmm_mprintf(NULL, 0, "Performance: %g\t improvement: %g\t step %d", cur_perf, cur_perf - last_perf, runs); GHMM_LOG(LINFO, str); m_free(str); } /* significant improvement, next iteration */ ghmm_dmodel_free(&last); last = ghmm_dmodel_copy(mo); last_perf = cur_perf; eta *= 1.07; } } /* no improvement */ else { if (runs < 175 || 0 == runs % 50) { str = ighmm_mprintf(NULL, 0, "Performance: %g\t !IMPROVEMENT: %g\t step %d", cur_perf, cur_perf - last_perf, runs); GHMM_LOG(LINFO, str); m_free(str); } /* try another training step */ runs++; eta *= .85; if (-1 == gradient_descent_onestep(mo, sq, eta)) { ghmm_dmodel_free(&last); return NULL; } cur_perf = compute_performance (mo, sq); str = ighmm_mprintf(NULL, 0, "Performance: %g\t ?Improvement: %g\t step %d", cur_perf, cur_perf - last_perf, runs); GHMM_LOG(LINFO, str); m_free(str); /* improvement, save and proceed with next iteration */ if (last_perf < cur_perf && cur_perf < 0.0) { ghmm_dmodel_free (&last); last = ghmm_dmodel_copy(mo); last_perf = cur_perf; } /* still no improvement, revert to saved model */ else { runs--; ghmm_dmodel_free(&mo); mo = ghmm_dmodel_copy(last); eta *= .9; } } } ghmm_dmodel_free(&last); return mo; #undef CUR_PROC }
/* * Compress mbuf chain m into the socket buffer sb following mbuf tailm. * If tailm is null, the buffer is presumed empty. Also, as a side-effect, * increment the sockbuf counts for each mbuf in the chain. */ void sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *tailm) { int eor = 0; struct mbuf *free_chain = NULL; mbuftrackid(m, 23); sbcheck(sb); while (m) { struct mbuf *o; eor |= m->m_flags & M_EOR; /* * Disregard empty mbufs as long as we don't encounter * an end-of-record or there is a trailing mbuf of * the same type to propagate the EOR flag to. * * Defer the m_free() call because it can block and break * the atomicy of the sockbuf. */ if (m->m_len == 0 && (eor == 0 || (((o = m->m_next) || (o = tailm)) && o->m_type == m->m_type))) { o = m->m_next; m->m_next = free_chain; free_chain = m; m = o; continue; } /* See if we can coalesce with preceding mbuf. */ if (tailm && !(tailm->m_flags & M_EOR) && M_WRITABLE(tailm) && m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */ m->m_len <= M_TRAILINGSPACE(tailm) && tailm->m_type == m->m_type) { u_long mbcnt_sz; bcopy(mtod(m, caddr_t), mtod(tailm, caddr_t) + tailm->m_len, (unsigned)m->m_len); tailm->m_len += m->m_len; sb->sb_cc += m->m_len; /* update sb counter */ /* * Fix the wrongly updated mbcnt_prealloc */ mbcnt_sz = MSIZE; if (m->m_flags & M_EXT) mbcnt_sz += m->m_ext.ext_size; atomic_subtract_long(&sb->sb_mbcnt_prealloc, mbcnt_sz); o = m->m_next; m->m_next = free_chain; free_chain = m; m = o; continue; } /* Insert whole mbuf. */ if (tailm == NULL) { KASSERT(sb->sb_mb == NULL, ("sbcompress: sb_mb not NULL")); sb->sb_mb = m; /* only mbuf in sockbuf */ sb->sb_lastrecord = m; /* new last record */ } else { tailm->m_next = m; /* tack m on following tailm */ } sb->sb_lastmbuf = m; /* update last mbuf hint */ tailm = m; /* just inserted mbuf becomes the new tail */ m = m->m_next; /* advance to next mbuf */ tailm->m_next = NULL; /* split inserted mbuf off from chain */ /* update sb counters for just added mbuf */ sballoc(sb, tailm); /* clear EOR on intermediate mbufs */ tailm->m_flags &= ~M_EOR; } /* * Propogate EOR to the last mbuf */ if (eor) { if (tailm) tailm->m_flags |= eor; else kprintf("semi-panic: sbcompress"); } /* * Clean up any defered frees. */ while (free_chain) free_chain = m_free(free_chain); sbcheck(sb); }
static void connect_try_next(struct dropbear_progress_connection *c) { struct addrinfo *r; int res = 0; int fastopen = 0; #ifdef DROPBEAR_CLIENT_TCP_FAST_OPEN struct msghdr message; #endif for (r = c->res_iter; r; r = r->ai_next) { dropbear_assert(c->sock == -1); c->sock = socket(c->res_iter->ai_family, c->res_iter->ai_socktype, c->res_iter->ai_protocol); if (c->sock < 0) { continue; } ses.maxfd = MAX(ses.maxfd, c->sock); set_sock_nodelay(c->sock); setnonblocking(c->sock); #ifdef DROPBEAR_CLIENT_TCP_FAST_OPEN fastopen = (c->writequeue != NULL); if (fastopen) { memset(&message, 0x0, sizeof(message)); message.msg_name = r->ai_addr; message.msg_namelen = r->ai_addrlen; /* 6 is arbitrary, enough to hold initial packets */ unsigned int iovlen = 6; /* Linux msg_iovlen is a size_t */ struct iovec iov[6]; packet_queue_to_iovec(c->writequeue, iov, &iovlen); message.msg_iov = iov; message.msg_iovlen = iovlen; res = sendmsg(c->sock, &message, MSG_FASTOPEN); /* Returns EINPROGRESS if FASTOPEN wasn't available */ if (res < 0) { if (errno != EINPROGRESS) { m_free(c->errstring); c->errstring = m_strdup(strerror(errno)); /* Not entirely sure which kind of errors are normal - 2.6.32 seems to return EPIPE for any (nonblocking?) sendmsg(). just fall back */ TRACE(("sendmsg tcp_fastopen failed, falling back. %s", strerror(errno))); /* No kernel MSG_FASTOPEN support. Fall back below */ fastopen = 0; /* Set to NULL to avoid trying again */ c->writequeue = NULL; } } else { packet_queue_consume(c->writequeue, res); } } #endif /* Normal connect(), used as fallback for TCP fastopen too */ if (!fastopen) { res = connect(c->sock, r->ai_addr, r->ai_addrlen); } if (res < 0 && errno != EINPROGRESS) { /* failure */ m_free(c->errstring); c->errstring = m_strdup(strerror(errno)); close(c->sock); c->sock = -1; continue; } else { /* new connection was successful, wait for it to complete */ break; } } if (r) { c->res_iter = r->ai_next; } else { c->res_iter = NULL; } }
HASH_ITER(hh, users, user, tmp) { HASH_DEL(users, user); /* delete; users advances to next */ m_free(user); }
int * breakout(double * ts, int n, int min_size, double beta, int degree, int *ol){ if (!ts || min_size < 5 || n < 2 * min_size || !ol){ return NULL; } double (*G)(double); switch(degree){ case 1 : G = Linear; break; case 2 : G = Quadratic; break; default : G = Const; break; } int * prev = (int*)malloc(sizeof(int) * (n + 1)); memset(prev, 0, sizeof(int) * (n + 1)); int * num = (int*)malloc(sizeof(int) * (n + 1)); memset(num, 0, sizeof(int) * (n + 1)); double * F = (double*)malloc(sizeof(double) * (n + 1)); memset(F, 0, sizeof(double) * (n + 1)); MTrace * m1 = m_create((CMP_FN)cmp_fn, NULL); MTrace * m2 = m_create((CMP_FN)cmp_fn, NULL); for (int s = 2 * min_size; s < n + 1; ++s){ m_clear(m1); m_clear(m2); for (int i = 0; i < min_size - 1; ++i){ m_add(m1, ts + i); } for (int i = min_size - 1; i < s; ++i){ m_add(m2, ts + i); } for (int t = min_size; t < s - min_size + 1; ++t){ m_add(m1, ts + t - 1); m_remove(m2, ts + t - 1); if (prev[t] > prev[t - 1]){ for (int i = prev[t - 1]; i < prev[t]; ++i){ m_remove(m1, ts + i); } } if (prev[t] < prev[t - 1]){ for (int i = prev[t]; i < prev[t - 1]; ++i){ m_add(m1, ts + i); } } double lm = *(double*)get_median(m1); double rm = *(double*)get_median(m2); double normalize = ((t - prev[t]) * (s - t)) \ / ((double)(s - prev[t]) * (s - prev[t])); double tmp_s = F[t] + normalize * (lm - rm) * (lm - rm) - beta * G(num[t]); if (tmp_s > F[s]){ num[s] = num[t] + 1; F[s] = tmp_s; prev[s] = t; } } } int k = num[n]; *ol = k; int * re = (int*)malloc(sizeof(int) * k); memset(re, 0, sizeof(int) * k); int i = n; while(i > 0){ if (prev[i]) re[--k] = prev[i]; i = prev[i]; } free(prev); prev = NULL; free(num); num = NULL; free(F); F = NULL; m_free(m1); m1 = NULL; m_free(m2); m2 = NULL; return re; }
void ellipsetrack(avi_t *video, double *xc0, double *yc0, int Nc, int R, int Np, int Nf) { /* % ELLIPSETRACK tracks cells in the movie specified by 'video', at % locations 'xc0'/'yc0' with radii R using an ellipse with Np discrete % points, starting at frame number one and stopping at frame number 'Nf'. % % INPUTS: % video.......pointer to avi video object % xc0,yc0.....initial center location (Nc entries) % Nc..........number of cells % R...........initial radius % Np..........nbr of snaxels points per snake % Nf..........nbr of frames in which to track % % Matlab code written by: DREW GILLIAM (based on code by GANG DONG / % NILANJAN RAY) % Ported to C by: MICHAEL BOYER */ int i, j; // Compute angle parameter double *t = (double *) malloc(sizeof(double) * Np); double increment = (2.0 * PI) / (double) Np; for (i = 0; i < Np; i++) { t[i] = increment * (double) i ; } // Allocate space for a snake for each cell in each frame double **xc = alloc_2d_double(Nc, Nf + 1); double **yc = alloc_2d_double(Nc, Nf + 1); double ***r = alloc_3d_double(Nc, Np, Nf + 1); double ***x = alloc_3d_double(Nc, Np, Nf + 1); double ***y = alloc_3d_double(Nc, Np, Nf + 1); // Save the first snake for each cell for (i = 0; i < Nc; i++) { xc[i][0] = xc0[i]; yc[i][0] = yc0[i]; for (j = 0; j < Np; j++) { r[i][j][0] = (double) R; } } // Generate ellipse points for each cell for (i = 0; i < Nc; i++) { for (j = 0; j < Np; j++) { x[i][j][0] = xc[i][0] + (r[i][j][0] * cos(t[j])); y[i][j][0] = yc[i][0] + (r[i][j][0] * sin(t[j])); } } // Allocate arrays so we can break up the per-cell for loop below double *xci = (double *) malloc(sizeof(double) * Nc); double *yci = (double *) malloc(sizeof(double) * Nc); double **ri = alloc_2d_double(Nc, Np); double *ycavg = (double *) malloc(sizeof(double) * Nc); int *u1 = (int *) malloc(sizeof(int) * Nc); int *u2 = (int *) malloc(sizeof(int) * Nc); int *v1 = (int *) malloc(sizeof(int) * Nc); int *v2 = (int *) malloc(sizeof(int) * Nc); MAT **Isub = (MAT **) malloc(sizeof(MAT *) * Nc); MAT **Ix = (MAT **) malloc(sizeof(MAT *) * Nc); MAT **Iy = (MAT **) malloc(sizeof(MAT *) * Nc); MAT **IE = (MAT **) malloc(sizeof(MAT *) * Nc); // Keep track of the total time spent on computing // the MGVF matrix and evolving the snakes long long MGVF_time = 0; long long snake_time = 0; // Process each frame int frame_num; for (frame_num = 1; frame_num <= Nf; frame_num++) { printf("\rProcessing frame %d / %d", frame_num, Nf); //fflush(stdout); // Get the current video frame and its dimensions MAT *I = get_frame(video, frame_num, 0, 1); int Ih = I->m; int Iw = I->n; // Set the current positions equal to the previous positions for (i = 0; i < Nc; i++) { xc[i][frame_num] = xc[i][frame_num - 1]; yc[i][frame_num] = yc[i][frame_num - 1]; for (j = 0; j < Np; j++) { r[i][j][frame_num] = r[i][j][frame_num - 1]; } } // Split the work among multiple threads, if OPEN is defined // Track each cell int cell_num; for (cell_num = 0; cell_num < Nc; cell_num++) { // Make copies of the current cell's location xci[cell_num] = xc[cell_num][frame_num]; yci[cell_num] = yc[cell_num][frame_num]; for (j = 0; j < Np; j++) { ri[cell_num][j] = r[cell_num][j][frame_num]; } // Add up the last ten y-values for this cell // (or fewer if there are not yet ten previous frames) ycavg[cell_num] = 0.0; for (i = (frame_num > 10 ? frame_num - 10 : 0); i < frame_num; i++) { ycavg[cell_num] += yc[cell_num][i]; } // Compute the average of the last ten y-values // (this represents the expected y-location of the cell) ycavg[cell_num] = ycavg[cell_num] / (double) (frame_num > 10 ? 10 : frame_num); // Determine the range of the subimage surrounding the current position u1[cell_num] = max(xci[cell_num] - 4.0 * R + 0.5, 0 ); u2[cell_num] = min(xci[cell_num] + 4.0 * R + 0.5, Iw - 1); v1[cell_num] = max(yci[cell_num] - 2.0 * R + 1.5, 0 ); v2[cell_num] = min(yci[cell_num] + 2.0 * R + 1.5, Ih - 1); // Extract the subimage Isub[cell_num] = m_get(v2[cell_num] - v1[cell_num] + 1, u2[cell_num] - u1[cell_num] + 1); for (i = v1[cell_num]; i <= v2[cell_num]; i++) { for (j = u1[cell_num]; j <= u2[cell_num]; j++) { m_set_val(Isub[cell_num], i - v1[cell_num], j - u1[cell_num], m_get_val(I, i, j)); } } // Compute the subimage gradient magnitude Ix[cell_num] = gradient_x(Isub[cell_num]); Iy[cell_num] = gradient_y(Isub[cell_num]); IE[cell_num] = m_get(Isub[cell_num]->m, Isub[cell_num]->n); for (i = 0; i < Isub[cell_num]->m; i++) { for (j = 0; j < Isub[cell_num]->n; j++) { double temp_x = m_get_val(Ix[cell_num], i, j); double temp_y = m_get_val(Iy[cell_num], i, j); m_set_val(IE[cell_num], i, j, sqrt((temp_x * temp_x) + (temp_y * temp_y))); } } } // Compute the motion gradient vector flow (MGVF) edgemaps long long MGVF_start_time = get_time(); MAT **IMGVF = MGVF_f(IE, 1, 1, Nc); MGVF_time += get_time() - MGVF_start_time; // Sequentially determine the new location of each cell for (cell_num = 0; cell_num < Nc; cell_num++) { // Determine the position of the cell in the subimage xci[cell_num] = xci[cell_num] - (double) u1[cell_num]; yci[cell_num] = yci[cell_num] - (double) (v1[cell_num] - 1); ycavg[cell_num] = ycavg[cell_num] - (double) (v1[cell_num] - 1); // Evolve the snake long long snake_start_time = get_time(); ellipseevolve(IMGVF[cell_num], &(xci[cell_num]), &(yci[cell_num]), ri[cell_num], t, Np, (double) R, ycavg[cell_num]); //ellipseevolve(IMGVF, &(xci[cell_num]), &(yci[cell_num]), ri[cell_num], t, Np, (double) R, ycavg[cell_num]); snake_time += get_time() - snake_start_time; // Compute the cell's new position in the full image xci[cell_num] = xci[cell_num] + u1[cell_num]; yci[cell_num] = yci[cell_num] + (v1[cell_num] - 1); // Store the new location of the cell and the snake xc[cell_num][frame_num] = xci[cell_num]; yc[cell_num][frame_num] = yci[cell_num]; for (j = 0; j < Np; j++) { r[cell_num][j][frame_num] = 0; r[cell_num][j][frame_num] = ri[cell_num][j]; x[cell_num][j][frame_num] = xc[cell_num][frame_num] + (ri[cell_num][j] * cos(t[j])); y[cell_num][j][frame_num] = yc[cell_num][frame_num] + (ri[cell_num][j] * sin(t[j])); } // Output the updated center of each cell //printf("%d,%f,%f\n", cell_num, xci[cell_num], yci[cell_num]); // Free temporary memory m_free(Isub[cell_num]); m_free(Ix[cell_num]); m_free(Iy[cell_num]); m_free(IE[cell_num]); m_free(IMGVF[cell_num]); } // Free temporary memory free(IMGVF); // Output a new line to visually distinguish the output from different frames //printf("\n"); } // Free temporary memory free_2d_double(xc); free_2d_double(yc); free_3d_double(r); free_3d_double(x); free_3d_double(y); free(t); free(xci); free(yci); free_2d_double(ri); free(ycavg); free(u1); free(u2); free(v1); free(v2); free(Isub); free(Ix); free(Iy); free(IE); // Report average processing time per frame printf("\n\nTracking runtime (average per frame):\n"); printf("------------------------------------\n"); printf("MGVF computation: %.5f seconds\n", ((float) (MGVF_time)) / (float) (1000*1000*Nf)); printf(" Snake evolution: %.5f seconds\n", ((float) (snake_time)) / (float) (1000*1000*Nf)); }
void ellipseevolve(MAT *f, double *xc0, double *yc0, double *r0, double *t, int Np, double Er, double Ey) { /* % ELLIPSEEVOLVE evolves a parametric snake according % to some energy constraints. % % INPUTS: % f............potential surface % xc0,yc0......initial center position % r0,t.........initial radii & angle vectors (with Np elements each) % Np...........number of snaxel points per snake % Er...........expected radius % Ey...........expected y position % % OUTPUTS % xc0,yc0.......final center position % r0...........final radii % % Matlab code written by: DREW GILLIAM (based on work by GANG DONG / % NILANJAN RAY) % Ported to C by: MICHAEL BOYER */ // Constants double deltax = 0.2; double deltay = 0.2; double deltar = 0.2; double converge = 0.1; double lambdaedge = 1; double lambdasize = 0.2; double lambdapath = 0.05; int iterations = 1000; // maximum number of iterations int i, j; // Initialize variables double xc = *xc0; double yc = *yc0; double *r = (double *) malloc(sizeof(double) * Np); for (i = 0; i < Np; i++) r[i] = r0[i]; // Compute the x- and y-gradients of the MGVF matrix MAT *fx = gradient_x(f); MAT *fy = gradient_y(f); // Normalize the gradients int fh = f->m, fw = f->n; for (i = 0; i < fh; i++) { for (j = 0; j < fw; j++) { double temp_x = m_get_val(fx, i, j); double temp_y = m_get_val(fy, i, j); double fmag = sqrt((temp_x * temp_x) + (temp_y * temp_y)); m_set_val(fx, i, j, temp_x / fmag); m_set_val(fy, i, j, temp_y / fmag); } } double *r_old = (double *) malloc(sizeof(double) * Np); VEC *x = v_get(Np); VEC *y = v_get(Np); // Evolve the snake int iter = 0; double snakediff = 1.0; while (iter < iterations && snakediff > converge) { // Save the values from the previous iteration double xc_old = xc, yc_old = yc; for (i = 0; i < Np; i++) { r_old[i] = r[i]; } // Compute the locations of the snaxels for (i = 0; i < Np; i++) { v_set_val(x, i, xc + r[i] * cos(t[i])); v_set_val(y, i, yc + r[i] * sin(t[i])); } // See if any of the points in the snake are off the edge of the image double min_x = v_get_val(x, 0), max_x = v_get_val(x, 0); double min_y = v_get_val(y, 0), max_y = v_get_val(y, 0); for (i = 1; i < Np; i++) { double x_i = v_get_val(x, i); if (x_i < min_x) min_x = x_i; else if (x_i > max_x) max_x = x_i; double y_i = v_get_val(y, i); if (y_i < min_y) min_y = y_i; else if (y_i > max_y) max_y = y_i; } if (min_x < 0.0 || max_x > (double) fw - 1.0 || min_y < 0 || max_y > (double) fh - 1.0) break; // Compute the length of the snake double L = 0.0; for (i = 0; i < Np - 1; i++) { double diff_x = v_get_val(x, i + 1) - v_get_val(x, i); double diff_y = v_get_val(y, i + 1) - v_get_val(y, i); L += sqrt((diff_x * diff_x) + (diff_y * diff_y)); } double diff_x = v_get_val(x, 0) - v_get_val(x, Np - 1); double diff_y = v_get_val(y, 0) - v_get_val(y, Np - 1); L += sqrt((diff_x * diff_x) + (diff_y * diff_y)); // Compute the potential surface at each snaxel MAT *vf = linear_interp2(f, x, y); MAT *vfx = linear_interp2(fx, x, y); MAT *vfy = linear_interp2(fy, x, y); // Compute the average potential surface around the snake double vfmean = sum_m(vf ) / L; double vfxmean = sum_m(vfx) / L; double vfymean = sum_m(vfy) / L; // Compute the radial potential surface int m = vf->m, n = vf->n; MAT *vfr = m_get(m, n); for (i = 0; i < n; i++) { double vf_val = m_get_val(vf, 0, i); double vfx_val = m_get_val(vfx, 0, i); double vfy_val = m_get_val(vfy, 0, i); double x_val = v_get_val(x, i); double y_val = v_get_val(y, i); double new_val = (vf_val + vfx_val * (x_val - xc) + vfy_val * (y_val - yc) - vfmean) / L; m_set_val(vfr, 0, i, new_val); } // Update the snake center and snaxels xc = xc + (deltax * lambdaedge * vfxmean); yc = (yc + (deltay * lambdaedge * vfymean) + (deltay * lambdapath * Ey)) / (1.0 + deltay * lambdapath); double r_diff = 0.0; for (i = 0; i < Np; i++) { r[i] = (r[i] + (deltar * lambdaedge * m_get_val(vfr, 0, i)) + (deltar * lambdasize * Er)) / (1.0 + deltar * lambdasize); r_diff += fabs(r[i] - r_old[i]); } // Test for convergence snakediff = fabs(xc - xc_old) + fabs(yc - yc_old) + r_diff; // Free temporary matrices m_free(vf); m_free(vfx); m_free(vfy); m_free(vfr); iter++; } // Set the return values *xc0 = xc; *yc0 = yc; for (i = 0; i < Np; i++) r0[i] = r[i]; // Free memory free(r); free(r_old); v_free( x); v_free( y); m_free(fx); m_free(fy); }
void check_variography(const VARIOGRAM **v, int n_vars) /* * check for intrinsic correlation, linear model of coregionalisation * or else (with warning) Cauchy Swartz */ { int i, j, k, ic = 0, lmc, posdef = 1; MAT **a = NULL; double b; char *reason = NULL; if (n_vars <= 1) return; /* * find out if lmc (linear model of coregionalization) hold: * all models must have equal base models (sequence and range) */ for (i = 1, lmc = 1; lmc && i < get_n_vgms(); i++) { if (v[0]->n_models != v[i]->n_models) { reason = "number of models differ"; lmc = 0; } for (k = 0; lmc && k < v[0]->n_models; k++) { if (v[0]->part[k].model != v[i]->part[k].model) { reason = "model types differ"; lmc = 0; } if (v[0]->part[k].range[0] != v[i]->part[k].range[0]) { reason = "ranges differ"; lmc = 0; } } for (k = 0; lmc && k < v[0]->n_models; k++) if (v[0]->part[k].tm_range != NULL) { if (v[i]->part[k].tm_range == NULL) { reason = "anisotropy for part of models"; lmc = 0; } else if ( v[0]->part[k].tm_range->ratio[0] != v[i]->part[k].tm_range->ratio[0] || v[0]->part[k].tm_range->ratio[1] != v[i]->part[k].tm_range->ratio[1] || v[0]->part[k].tm_range->angle[0] != v[i]->part[k].tm_range->angle[0] || v[0]->part[k].tm_range->angle[1] != v[i]->part[k].tm_range->angle[1] || v[0]->part[k].tm_range->angle[2] != v[i]->part[k].tm_range->angle[2] ) { reason = "anisotropy parameters are not equal"; lmc = 0; } } else if (v[i]->part[k].tm_range != NULL) { reason = "anisotropy for part of models"; lmc = 0; } } if (lmc) { /* * check for ic: */ a = (MAT **) emalloc(v[0]->n_models * sizeof(MAT *)); for (k = 0; k < v[0]->n_models; k++) a[k] = m_get(n_vars, n_vars); for (i = 0; i < n_vars; i++) { for (j = 0; j < n_vars; j++) { /* for all variogram triplets: */ for (k = 0; k < v[0]->n_models; k++) ME(a[k], i, j) = v[LTI(i,j)]->part[k].sill; } } /* for ic: a's must be scaled versions of each other: */ ic = 1; for (k = 1, ic = 1; ic && k < v[0]->n_models; k++) { b = ME(a[0], 0, 0)/ME(a[k], 0, 0); for (i = 0; ic && i < n_vars; i++) for (j = 0; ic && j < n_vars; j++) if (fabs(ME(a[0], i, j) / ME(a[k], i, j) - b) > EPSILON) ic = 0; } /* check posdef matrices */ for (i = 0, lmc = 1, posdef = 1; i < v[0]->n_models; i++) { posdef = is_posdef(a[i]); if (posdef == 0) { reason = "coefficient matrix not positive definite"; if (DEBUG_COV) { printlog("non-positive definite coefficient matrix %d:\n", i); m_logoutput(a[i]); } ic = lmc = 0; } if (! posdef) printlog( "non-positive definite coefficient matrix in structure %d", i+1); } for (k = 0; k < v[0]->n_models; k++) m_free(a[k]); efree(a); if (ic) { printlog("Intrinsic Correlation found. Good.\n"); return; } else if (lmc) { printlog("Linear Model of Coregionalization found. Good.\n"); return; } } /* * lmc does not hold: check on Cauchy Swartz */ pr_warning("No Intrinsic Correlation or Linear Model of Coregionalization found\nReason: %s", reason ? reason : "unknown"); if (gl_nocheck == 0) { pr_warning("[add `set = list(nocheck = 1)' to the gstat() or krige() to ignore the following error]\n"); ErrMsg(ER_IMPOSVAL, "variograms do not satisfy a legal model"); } printlog("Now checking for Cauchy-Schwartz inequalities:\n"); for (i = 0; i < n_vars; i++) for (j = 0; j < i; j++) if (is_valid_cs(v[LTI(i,i)], v[LTI(j,j)], v[LTI(i,j)])) { printlog("variogram(%s,%s) passed Cauchy-Schwartz\n", name_identifier(j), name_identifier(i)); } else pr_warning("Cauchy-Schwartz inequality found for variogram(%s,%s)", name_identifier(j), name_identifier(i) ); return; }
/* Parse pubkey options and set ses.authstate.pubkey_options accordingly. * Returns DROPBEAR_SUCCESS if key is ok for auth, DROPBEAR_FAILURE otherwise */ int svr_add_pubkey_options(buffer *options_buf, int line_num, const char* filename) { int ret = DROPBEAR_FAILURE; TRACE(("enter addpubkeyoptions")) ses.authstate.pubkey_options = (struct PubKeyOptions*)m_malloc(sizeof( struct PubKeyOptions )); buf_setpos(options_buf, 0); while (options_buf->pos < options_buf->len) { if (match_option(options_buf, "no-port-forwarding") == DROPBEAR_SUCCESS) { dropbear_log(LOG_WARNING, "Port forwarding disabled."); ses.authstate.pubkey_options->no_port_forwarding_flag = 1; goto next_option; } #ifdef ENABLE_SVR_AGENTFWD if (match_option(options_buf, "no-agent-forwarding") == DROPBEAR_SUCCESS) { dropbear_log(LOG_WARNING, "Agent forwarding disabled."); ses.authstate.pubkey_options->no_agent_forwarding_flag = 1; goto next_option; } #endif #ifdef ENABLE_X11FWD if (match_option(options_buf, "no-X11-forwarding") == DROPBEAR_SUCCESS) { dropbear_log(LOG_WARNING, "X11 forwarding disabled."); ses.authstate.pubkey_options->no_x11_forwarding_flag = 1; goto next_option; } #endif if (match_option(options_buf, "no-pty") == DROPBEAR_SUCCESS) { dropbear_log(LOG_WARNING, "Pty allocation disabled."); ses.authstate.pubkey_options->no_pty_flag = 1; goto next_option; } if (match_option(options_buf, "command=\"") == DROPBEAR_SUCCESS) { int escaped = 0; const unsigned char* command_start = buf_getptr(options_buf, 0); while (options_buf->pos < options_buf->len) { const char c = buf_getbyte(options_buf); if (!escaped && c == '"') { const int command_len = buf_getptr(options_buf, 0) - command_start; ses.authstate.pubkey_options->forced_command = m_malloc(command_len); memcpy(ses.authstate.pubkey_options->forced_command, command_start, command_len-1); ses.authstate.pubkey_options->forced_command[command_len-1] = '\0'; dropbear_log(LOG_WARNING, "Forced command '%s'", ses.authstate.pubkey_options->forced_command); goto next_option; } escaped = (!escaped && c == '\\'); } dropbear_log(LOG_WARNING, "Badly formatted command= authorized_keys option"); goto bad_option; } next_option: /* * Skip the comma, and move to the next option * (or break out if there are no more). */ if (options_buf->pos < options_buf->len && buf_getbyte(options_buf) != ',') { goto bad_option; } /* Process the next option. */ } /* parsed all options with no problem */ ret = DROPBEAR_SUCCESS; goto end; bad_option: ret = DROPBEAR_FAILURE; m_free(ses.authstate.pubkey_options); ses.authstate.pubkey_options = NULL; dropbear_log(LOG_WARNING, "Bad public key options at %s:%d", filename, line_num); end: TRACE(("leave addpubkeyoptions")) return ret; }
/* Free potential public key options */ void svr_pubkey_options_cleanup() { if (ses.authstate.pubkey_options) { m_free(ses.authstate.pubkey_options); ses.authstate.pubkey_options = NULL; } }
void m_freem(struct mbuf *bp) { while (bp) bp = m_free(bp); }
/* * Take incoming datagram fragment and try to * reassemble it into whole datagram. If a chain for * reassembly of this datagram already exists, then it * is given as fp; otherwise have to make a chain. */ struct ip *ip_reass(struct ipasfrag *ip, struct ipq *fp) { struct mbuf *m = dtom(ip); struct ipasfrag *q; int hlen = ip->ip_hl << 2; int i, next; DEBUG_CALL("ip_reass"); DEBUG_ARG("ip = %lx", (long)ip); DEBUG_ARG("fp = %lx", (long)fp); DEBUG_ARG("m = %lx", (long)m); /* * Presence of header sizes in mbufs * would confuse code below. * Fragment m_data is concatenated. */ m->m_data += hlen; m->m_len -= hlen; /* * If first fragment to arrive, create a reassembly queue. */ if (fp == 0) { struct mbuf *t; if ((t = m_get()) == NULL) goto dropfrag; fp = mtod(t, struct ipq *); insque_32(fp, &ipq); fp->ipq_ttl = IPFRAGTTL; fp->ipq_p = ip->ip_p; fp->ipq_id = ip->ip_id; fp->ipq_next = fp->ipq_prev = (ipasfragp_32)fp; fp->ipq_src = ((struct ip *)ip)->ip_src; fp->ipq_dst = ((struct ip *)ip)->ip_dst; q = (struct ipasfrag *)fp; goto insert; } /* * Find a segment which begins after this one does. */ for (q = (struct ipasfrag *)fp->ipq_next; q != (struct ipasfrag *)fp; q = (struct ipasfrag *)q->ipf_next) if (q->ip_off > ip->ip_off) break; /* * If there is a preceding segment, it may provide some of * our data already. If so, drop the data from the incoming * segment. If it provides all of our data, drop us. */ if (q->ipf_prev != (ipasfragp_32)fp) { i = ((struct ipasfrag *)(q->ipf_prev))->ip_off + ((struct ipasfrag *)(q->ipf_prev))->ip_len - ip->ip_off; if (i > 0) { if (i >= ip->ip_len) goto dropfrag; m_adj(dtom(ip), i); ip->ip_off += i; ip->ip_len -= i; } } /* * While we overlap succeeding segments trim them or, * if they are completely covered, dequeue them. */ while (q != (struct ipasfrag *)fp && ip->ip_off + ip->ip_len > q->ip_off) { i = (ip->ip_off + ip->ip_len) - q->ip_off; if (i < q->ip_len) { q->ip_len -= i; q->ip_off += i; m_adj(dtom(q), i); break; } q = (struct ipasfrag *) q->ipf_next; m_freem(dtom((struct ipasfrag *) q->ipf_prev)); ip_deq((struct ipasfrag *) q->ipf_prev); } insert: /* * Stick new segment in its place; * check for complete reassembly. */ ip_enq(ip, (struct ipasfrag *) q->ipf_prev); next = 0; for (q = (struct ipasfrag *) fp->ipq_next; q != (struct ipasfrag *)fp; q = (struct ipasfrag *) q->ipf_next) { if (q->ip_off != next) return (0); next += q->ip_len; } if (((struct ipasfrag *)(q->ipf_prev))->ipf_mff & 1) return (0); /* * Reassembly is complete; concatenate fragments. */ q = (struct ipasfrag *) fp->ipq_next; m = dtom(q); q = (struct ipasfrag *) q->ipf_next; while (q != (struct ipasfrag *)fp) { struct mbuf *t; t = dtom(q); q = (struct ipasfrag *) q->ipf_next; m_cat(m, t); } /* * Create header for new ip packet by * modifying header of first packet; * dequeue and discard fragment reassembly header. * Make header visible. */ ip = (struct ipasfrag *) fp->ipq_next; /* * If the fragments concatenated to an mbuf that's * bigger than the total size of the fragment, then and * m_ext buffer was alloced. But fp->ipq_next points to * the old buffer (in the mbuf), so we must point ip * into the new buffer. */ if (m->m_flags & M_EXT) { int delta; delta = (char *)ip - m->m_dat; ip = (struct ipasfrag *)(m->m_ext + delta); } /* DEBUG_ARG("ip = %lx", (long)ip); * ip=(struct ipasfrag *)m->m_data; */ ip->ip_len = next; ip->ipf_mff &= ~1; ((struct ip *)ip)->ip_src = fp->ipq_src; ((struct ip *)ip)->ip_dst = fp->ipq_dst; remque_32(fp); (void) m_free(dtom(fp)); m = dtom(ip); m->m_len += (ip->ip_hl << 2); m->m_data -= (ip->ip_hl << 2); return ((struct ip *)ip); dropfrag: ipstat.ips_fragdropped++; m_freem(m); return (0); }
/* * Send a packet * We choose a packet based on it's position in the output queues; * If there are packets on the fastq, they are sent FIFO, before * everything else. Otherwise we choose the first packet from the * batchq and send it. the next packet chosen will be from the session * after this one, then the session after that one, and so on.. So, * for example, if there are 3 ftp session's fighting for bandwidth, * one packet will be sent from the first session, then one packet * from the second session, then one packet from the third, then back * to the first, etc. etc. */ void if_start(Slirp *slirp) { uint64_t now = qemu_get_clock_ns(rt_clock); bool from_batchq, next_from_batchq; struct mbuf *ifm, *ifm_next, *ifqt; DEBUG_CALL("if_start"); if (slirp->if_start_busy) { return; } slirp->if_start_busy = true; if (slirp->if_fastq.ifq_next != &slirp->if_fastq) { ifm_next = slirp->if_fastq.ifq_next; next_from_batchq = false; } else if (slirp->next_m != &slirp->if_batchq) { /* Nothing on fastq, pick up from batchq via next_m */ ifm_next = slirp->next_m; next_from_batchq = true; } else { ifm_next = NULL; } while (ifm_next) { /* check if we can really output */ if (!slirp_can_output(slirp->opaque)) { break; } ifm = ifm_next; from_batchq = next_from_batchq; ifm_next = ifm->ifq_next; if (ifm_next == &slirp->if_fastq) { /* No more packets in fastq, switch to batchq */ ifm_next = slirp->next_m; next_from_batchq = true; } if (ifm_next == &slirp->if_batchq) { /* end of batchq */ ifm_next = NULL; } /* Try to send packet unless it already expired */ if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) { /* Packet is delayed due to pending ARP resolution */ continue; } if (ifm == slirp->next_m) { /* Set which packet to send on next iteration */ slirp->next_m = ifm->ifq_next; } /* Remove it from the queue */ ifqt = ifm->ifq_prev; remque(ifm); /* If there are more packets for this session, re-queue them */ if (ifm->ifs_next != ifm) { struct mbuf *next = ifm->ifs_next; insque(next, ifqt); ifs_remque(ifm); if (!from_batchq) { /* Next packet in fastq is from the same session */ ifm_next = next; next_from_batchq = false; } else if (slirp->next_m == &slirp->if_batchq) { /* Set next_m and ifm_next if the session packet is now the * only one on batchq */ slirp->next_m = ifm_next = next; } } /* Update so_queued */ if (ifm->ifq_so && --ifm->ifq_so->so_queued == 0) { /* If there's no more queued, reset nqueued */ ifm->ifq_so->so_nqueued = 0; } m_free(ifm); } slirp->if_start_busy = false; }
/* * Ip input routine. Checksum and byte swap header. If fragmented * try to reassemble. Process options. Pass to next level. */ void ip_input(struct mbuf *m) { struct ip *ip; int hlen; DEBUG_CALL("ip_input"); DEBUG_ARG("m = %lx", (long)m); DEBUG_ARG("m_len = %d", m->m_len); ipstat.ips_total++; if (m->m_len < sizeof (struct ip)) { ipstat.ips_toosmall++; return; } ip = mtod(m, struct ip *); if (ip->ip_v != IPVERSION) { ipstat.ips_badvers++; goto bad; } hlen = ip->ip_hl << 2; if (hlen<sizeof(struct ip ) || hlen>m->m_len) {/* min header length */ ipstat.ips_badhlen++; /* or packet too short */ goto bad; } /* keep ip header intact for ICMP reply * ip->ip_sum = cksum(m, hlen); * if (ip->ip_sum) { */ if(cksum(m,hlen)) { ipstat.ips_badsum++; goto bad; } /* * Convert fields to host representation. */ NTOHS(ip->ip_len); if (ip->ip_len < hlen) { ipstat.ips_badlen++; goto bad; } NTOHS(ip->ip_id); NTOHS(ip->ip_off); /* * Check that the amount of data in the buffers * is as at least much as the IP header would have us expect. * Trim mbufs if longer than we expect. * Drop packet if shorter than we expect. */ if (m->m_len < ip->ip_len) { ipstat.ips_tooshort++; goto bad; } /* Should drop packet if mbuf too long? hmmm... */ if (m->m_len > ip->ip_len) m_adj(m, ip->ip_len - m->m_len); /* check ip_ttl for a correct ICMP reply */ if(ip->ip_ttl==0 || ip->ip_ttl==1) { icmp_error(m, ICMP_TIMXCEED,ICMP_TIMXCEED_INTRANS, 0,"ttl"); goto bad; } /* * Process options and, if not destined for us, * ship it on. ip_dooptions returns 1 when an * error was detected (causing an icmp message * to be sent and the original packet to be freed). */ /* We do no IP options */ /* if (hlen > sizeof (struct ip) && ip_dooptions(m)) * goto next; */ /* * If offset or IP_MF are set, must reassemble. * Otherwise, nothing need be done. * (We could look in the reassembly queue to see * if the packet was previously fragmented, * but it's not worth the time; just let them time out.) * * XXX This should fail, don't fragment yet */ if (ip->ip_off &~ IP_DF) { register struct ipq *fp; /* * Look for queue of fragments * of this datagram. */ for (fp = (struct ipq *) ipq.next; fp != &ipq; fp = (struct ipq *) fp->next) if (ip->ip_id == fp->ipq_id && ip->ip_src.s_addr == fp->ipq_src.s_addr && ip->ip_dst.s_addr == fp->ipq_dst.s_addr && ip->ip_p == fp->ipq_p) goto found; fp = 0; found: /* * Adjust ip_len to not reflect header, * set ip_mff if more fragments are expected, * convert offset of this to bytes. */ ip->ip_len -= hlen; if (ip->ip_off & IP_MF) ((struct ipasfrag *)ip)->ipf_mff |= 1; else ((struct ipasfrag *)ip)->ipf_mff &= ~1; ip->ip_off <<= 3; /* * If datagram marked as having more fragments * or if this is not the first fragment, * attempt reassembly; if it succeeds, proceed. */ if (((struct ipasfrag *)ip)->ipf_mff & 1 || ip->ip_off) { ipstat.ips_fragments++; ip = ip_reass((struct ipasfrag *)ip, fp); if (ip == 0) return; ipstat.ips_reassembled++; m = dtom(ip); } else if (fp) ip_freef(fp); } else ip->ip_len -= hlen; /* * Switch out to protocol's input routine. */ ipstat.ips_delivered++; switch (ip->ip_p) { case IPPROTO_TCP: tcp_input(m, hlen, (struct socket *)NULL); break; case IPPROTO_UDP: udp_input(m, hlen); break; case IPPROTO_ICMP: icmp_input(m, hlen); break; default: ipstat.ips_noproto++; m_free(m); } return; bad: m_freem(m); return; }
/* * recvfrom() a UDP socket */ void sorecvfrom(struct socket *so) { struct sockaddr_in addr; socklen_t addrlen = sizeof(struct sockaddr_in); DEBUG_CALL("sorecvfrom"); DEBUG_ARG("so = %p", so); if (so->so_type == IPPROTO_ICMP) { /* This is a "ping" reply */ char buff[256]; int len; len = recvfrom(so->s, buff, 256, 0, (struct sockaddr *)&addr, &addrlen); /* XXX Check if reply is "correct"? */ if(len == -1 || len == 0) { u_char code=ICMP_UNREACH_PORT; if(errno == EHOSTUNREACH) code=ICMP_UNREACH_HOST; else if(errno == ENETUNREACH) code=ICMP_UNREACH_NET; DEBUG_MISC((dfd," udp icmp rx errno = %d-%s\n", errno,strerror(errno))); icmp_error(so->so_m, ICMP_UNREACH,code, 0,strerror(errno)); } else { icmp_reflect(so->so_m); so->so_m = NULL; /* Don't m_free() it again! */ } /* No need for this socket anymore, udp_detach it */ udp_detach(so); } else { /* A "normal" UDP packet */ struct mbuf *m; int len; #ifdef _WIN32 unsigned long n; #else int n; #endif m = m_get(so->slirp); if (!m) { return; } m->m_data += IF_MAXLINKHDR; /* * XXX Shouldn't FIONREAD packets destined for port 53, * but I don't know the max packet size for DNS lookups */ len = M_FREEROOM(m); /* if (so->so_fport != htons(53)) { */ ioctlsocket(so->s, FIONREAD, &n); if (n > len) { n = (m->m_data - m->m_dat) + m->m_len + n + 1; m_inc(m, n); len = M_FREEROOM(m); } /* } */ m->m_len = recvfrom(so->s, m->m_data, len, 0, (struct sockaddr *)&addr, &addrlen); DEBUG_MISC((dfd, " did recvfrom %d, errno = %d-%s\n", m->m_len, errno,strerror(errno))); if(m->m_len<0) { u_char code=ICMP_UNREACH_PORT; if(errno == EHOSTUNREACH) code=ICMP_UNREACH_HOST; else if(errno == ENETUNREACH) code=ICMP_UNREACH_NET; DEBUG_MISC((dfd," rx error, tx icmp ICMP_UNREACH:%i\n", code)); icmp_error(so->so_m, ICMP_UNREACH,code, 0,strerror(errno)); m_free(m); } else { /* * Hack: domain name lookup will be used the most for UDP, * and since they'll only be used once there's no need * for the 4 minute (or whatever) timeout... So we time them * out much quicker (10 seconds for now...) */ if (so->so_expire) { if (so->so_fport == htons(53)) so->so_expire = curtime + SO_EXPIREFAST; else so->so_expire = curtime + SO_EXPIRE; } /* * If this packet was destined for CTL_ADDR, * make it look like that's where it came from, done by udp_output */ udp_output(so, m, &addr); } /* rx error */ } /* if ping packet */ }
/* * Create a writable copy of the mbuf chain. While doing this * we compact the chain with a goal of producing a chain with * at most two mbufs. The second mbuf in this chain is likely * to be a cluster. The primary purpose of this work is to create * a writable packet for encryption, compression, etc. The * secondary goal is to linearize the data so the data can be * passed to crypto hardware in the most efficient manner possible. */ struct mbuf * m_clone(struct mbuf *m0) { struct mbuf *m, *mprev; KASSERT(m0 != NULL, ("m_clone: null mbuf")); mprev = NULL; for (m = m0; m != NULL; m = mprev->m_next) { /* * Regular mbufs are ignored unless there's a cluster * in front of it that we can use to coalesce. We do * the latter mainly so later clusters can be coalesced * also w/o having to handle them specially (i.e. convert * mbuf+cluster -> cluster). This optimization is heavily * influenced by the assumption that we're running over * Ethernet where MCBYTES is large enough that the max * packet size will permit lots of coalescing into a * single cluster. This in turn permits efficient * crypto operations, especially when using hardware. */ if ((m->m_flags & M_EXT) == 0) { if (mprev && (mprev->m_flags & M_EXT) && m->m_len <= M_TRAILINGSPACE(mprev)) { /* XXX: this ignores mbuf types */ memcpy(mtod(mprev, caddr_t) + mprev->m_len, mtod(m, caddr_t), m->m_len); mprev->m_len += m->m_len; mprev->m_next = m->m_next; /* unlink from chain */ m_free(m); /* reclaim mbuf */ newipsecstat.ips_mbcoalesced++; } else { mprev = m; } continue; } /* * Cluster'd mbufs are left alone (for now). */ if (!MEXT_IS_REF(m)) { mprev = m; continue; } /* * Not writable, replace with a copy or coalesce with * the previous mbuf if possible (since we have to copy * it anyway, we try to reduce the number of mbufs and * clusters so that future work is easier). */ /* XXX why can M_PKTHDR be set past the first mbuf? */ KASSERT(m->m_flags & M_EXT, ("m_clone: m_flags 0x%x", m->m_flags)); /* NB: we only coalesce into a cluster */ if (mprev == NULL || (mprev->m_flags & M_EXT) == 0 || m->m_len > M_TRAILINGSPACE(mprev)) { struct mbuf *n; /* * Allocate a new page, copy the data to the front * and release the reference to the old page. */ n = m_getcl(M_DONTWAIT, m->m_type, m->m_flags); if (n == NULL) { m_freem(m0); return (NULL); } if (mprev == NULL && (m->m_flags & M_PKTHDR)) M_COPY_PKTHDR(n, m); memcpy(mtod(n, caddr_t), mtod(m, caddr_t), m->m_len); n->m_len = m->m_len; n->m_next = m->m_next; if (mprev == NULL) m0 = n; /* new head of chain */ else mprev->m_next = n; /* replace old mbuf */ m_free(m); /* release old mbuf */ mprev = n; newipsecstat.ips_clcopied++; } else { /* XXX: this ignores mbuf types */ memcpy(mtod(mprev, caddr_t) + mprev->m_len, mtod(m, caddr_t), m->m_len); mprev->m_len += m->m_len; mprev->m_next = m->m_next; /* unlink from chain */ m_free(m); /* reclaim mbuf */ newipsecstat.ips_clcoalesced++; } } return (m0); }
void load_all_hostkeys() { int i; int disable_unset_keys = 1; int any_keys = 0; svr_opts.hostkey = new_sign_key(); for (i = 0; i < svr_opts.num_hostkey_files; i++) { char *hostkey_file = svr_opts.hostkey_files[i]; loadhostkey(hostkey_file, 1); m_free(hostkey_file); } #ifdef DROPBEAR_RSA loadhostkey(RSA_PRIV_FILENAME, 0); #endif #ifdef DROPBEAR_DSS loadhostkey(DSS_PRIV_FILENAME, 0); #endif #ifdef DROPBEAR_ECDSA loadhostkey(ECDSA_PRIV_FILENAME, 0); #endif #ifdef DROPBEAR_DELAY_HOSTKEY if (svr_opts.delay_hostkey) { disable_unset_keys = 0; } #endif #ifdef DROPBEAR_RSA if (disable_unset_keys && !svr_opts.hostkey->rsakey) { disablekey(DROPBEAR_SIGNKEY_RSA); } else { any_keys = 1; } #endif #ifdef DROPBEAR_DSS if (disable_unset_keys && !svr_opts.hostkey->dsskey) { disablekey(DROPBEAR_SIGNKEY_DSS); } else { any_keys = 1; } #endif #ifdef DROPBEAR_ECDSA #ifdef DROPBEAR_ECC_256 if ((disable_unset_keys || ECDSA_DEFAULT_SIZE != 256) && !svr_opts.hostkey->ecckey256) { disablekey(DROPBEAR_SIGNKEY_ECDSA_NISTP256); } else { any_keys = 1; } #endif #ifdef DROPBEAR_ECC_384 if ((disable_unset_keys || ECDSA_DEFAULT_SIZE != 384) && !svr_opts.hostkey->ecckey384) { disablekey(DROPBEAR_SIGNKEY_ECDSA_NISTP384); } else { any_keys = 1; } #endif #ifdef DROPBEAR_ECC_521 if ((disable_unset_keys || ECDSA_DEFAULT_SIZE != 521) && !svr_opts.hostkey->ecckey521) { disablekey(DROPBEAR_SIGNKEY_ECDSA_NISTP521); } else { any_keys = 1; } #endif #endif /* DROPBEAR_ECDSA */ if (!any_keys) { dropbear_exit("No hostkeys available. 'dropbear -R' may be useful or run dropbearkey."); } }
static void clear(void) { if(len_txt>LIM_T) {m_free(text); text=(char*)m_alloc(len_txt=LEN_T,sizeof(char));} windup(); }
MBUF_SEG _mbufCut ( MBUF_ID mbufId, /* mbuf ID from which bytes are cut */ MBUF_SEG mbufSeg, /* mbuf base for <offset> */ int offset, /* relative byte offset */ int len /* number of bytes to cut */ ) { MBUF_ID mbufIdNew; /* dummy ID for dup operation */ MBUF_SEG * pMbufPrev; /* mbuf prev deleted mbuf */ int length; /* length of each cut */ /* find the mbuf ptr previous to the cut */ if ((pMbufPrev = _mbufSegFindPrev (mbufId, mbufSeg, &offset)) == NULL) return (NULL); if ((mbufSeg = *pMbufPrev) == NULL) /* find cut mbuf */ { errno = S_mbufLib_SEGMENT_NOT_FOUND; return (NULL); } if (len < 0) /* negative = rest of chain */ len = INT_MAX; while (len && (mbufSeg != NULL)) /* while more to cut */ { length = min (len, (mbufSeg->m_len - offset)); /* num for cut */ len -= length; if (offset != 0) /* if !cutting off front... */ { if (mbufSeg->m_len != (offset + length))/* cut from middle*/ { /* duplicate portion remaining after bytes to be cut */ if ((mbufIdNew = _mbufDup (mbufId, mbufSeg, offset + length, mbufSeg->m_len - offset - length)) == NULL) return (NULL); mbufIdNew->mbufHead->m_next = mbufSeg->m_next; mbufSeg->m_next = mbufIdNew->mbufHead;/* hook in saved data */ mbufSeg->m_len = offset; /* shorten to later portion */ MBUF_ID_DELETE_EMPTY(mbufIdNew);/* delete dup ID */ return (mbufSeg->m_next); /* return next real mbuf */ } else /* cut to end */ { mbufSeg->m_len -= length; /* decrease by len deleted */ pMbufPrev = &mbufSeg->m_next; /* update previous */ mbufSeg = mbufSeg->m_next; /* bump current mbuf to next */ } offset = 0; /* no more offset */ } else /* cutting off front... */ { if (length == mbufSeg->m_len) /* cutting whole mbuf ? */ { mbufSeg = m_free (mbufSeg); /* free and get next mbuf */ *pMbufPrev = mbufSeg; /* hook prev to next mbuf */ } else /* cut off front portion */ { mbufSeg->m_data += length; /* bump up offset */ mbufSeg->m_len -= length; /* taken from front */ } } } if (mbufSeg == NULL) /* special case - cut off end */ return (MBUF_NONE); return (mbufSeg); /* return next real mbuf */ }
void tty_detached(struct ttys *ttyp, int exiting) { struct ttys *ttyp_tmp, *ttyp_last = 0; DEBUG_CALL("tty_detached"); DEBUG_ARG("ttyp = %lx", (long)ttyp); DEBUG_ARG("exiting = %d", exiting); /* First, remove ttyp from the queue */ if (ttyp == ttys) { ttys = ttys->next; } else { for (ttyp_tmp = ttys; ttyp_tmp; ttyp_tmp = ttyp_tmp->next) { if (ttyp_tmp == ttyp) break; ttyp_last = ttyp_tmp; } if (!ttyp_last) { /* XXX */ /* Can't find it *shrug* */ return; } ttyp_last->next = ttyp->next; } term_restore(ttyp); #ifdef FULL_BOLT fd_block(ttyp->fd); #endif /* Restore device mode */ if (ttyp->mode) fchmod(ttyp->fd, ttyp->mode); /* Bring the link down */ #ifdef USE_PPP /* * Call lcp_lowerdown if it's ppp */ if (ttyp->proto == PROTO_PPP) { lcp_lowerdown(ttyp->unit); phase = PHASE_DEAD; /* XXXXX */ } #endif /* * Kill the guardian, if it exists */ if (ttyp->pid) kill(ttyp->pid, SIGQUIT); /* * If this was the last tty and we're not restarting, exit */ if (!ttys && slirp_socket < 0 && !exiting) slirp_exit(0); close(ttyp->fd); if (ttyp->m) m_free(ttyp->m); /* * If this was the controlling tty, call ctty_detached */ if ((ttyp->flags & TTY_CTTY) && !exiting) ctty_detached(); #ifdef USE_PPP /* Deallocate compress data */ ppp_ccp_closed(ttyp); #endif ttys_unit[ttyp->unit] = 0; /* * If you love it, set it free() ... * If it comes back, we have a memory leak */ free(ttyp); detach_time = curtime; }
/* free the buffer's data and the buffer itself */ void buf_free(buffer* buf) { m_free(buf->data) m_free(buf); }
/*============================================================================*/ int *ghmm_dmodel_label_kbest (ghmm_dmodel * mo, int *o_seq, int seq_len, int k, double *log_p) { #define CUR_PROC "ghmm_dl_kbest" int i, t, c, l, m; /* counters */ int no_oldHyps; /* number of hypotheses until position t-1 */ int b_index, i_id; /* index for addressing states' b arrays */ int no_labels = 0; int exists, g_nr; int *states_wlabel; int *label_max_out; char *str; /* logarithmized transition matrix A, log(a(i,j)) => log_a[i*N+j], 1.0 for zero probability */ double **log_a; /* matrix of hypotheses, holds for every position in the sequence a list of hypotheses */ hypoList **h; hypoList *hP; /* vectors for rows in the matrices */ int *hypothesis; /* pointer & prob. of the k most probable hypotheses for each state - matrices of dimensions #states x k: argm(i,l) => argmaxs[i*k+l] */ double *maxima; hypoList **argmaxs; /* pointer to & probability of most probable hypothesis in a certain state */ hypoList *argmax; double sum; /* break if sequence empty or k<1: */ if (seq_len <= 0 || k <= 0) return NULL; ARRAY_CALLOC (h, seq_len); /* 1. Initialization (extend empty hypothesis to #labels hypotheses of length 1): */ /* get number of labels (= maximum label + 1) and number of states with those labels */ ARRAY_CALLOC (states_wlabel, mo->N); ARRAY_CALLOC (label_max_out, mo->N); for (i = 0; i < mo->N; i++) { c = mo->label[i]; states_wlabel[c]++; if (c > no_labels) no_labels = c; if (mo->s[i].out_states > label_max_out[c]) label_max_out[c] = mo->s[i].out_states; } /* add one to the maximum label to get the number of labels */ no_labels++; ARRAY_REALLOC (states_wlabel, no_labels); ARRAY_REALLOC (label_max_out, no_labels); /* initialize h: */ hP = h[0]; for (i = 0; i < mo->N; i++) { if (mo->s[i].pi > KBEST_EPS) { /* printf("Found State %d with initial probability %f\n", i, mo->s[i].pi); */ exists = 0; while (hP != NULL) { if (hP->hyp_c == mo->label[i]) { /* add entry to the gamma list */ g_nr = hP->gamma_states; hP->gamma_id[g_nr] = i; hP->gamma_a[g_nr] = log (mo->s[i].pi) + log (mo->s[i].b[get_emission_index (mo, i, o_seq[0], 0)]); hP->gamma_states = g_nr + 1; exists = 1; break; } else hP = hP->next; } if (!exists) { ighmm_hlist_insert (&(h[0]), mo->label[i], NULL); /* initiallize gamma-array with safe size (number of states) and add the first entry */ ARRAY_MALLOC (h[0]->gamma_a, states_wlabel[mo->label[i]]); ARRAY_MALLOC (h[0]->gamma_id, states_wlabel[mo->label[i]]); h[0]->gamma_id[0] = i; h[0]->gamma_a[0] = log (mo->s[i].pi) + log (mo->s[i].b[get_emission_index (mo, i, o_seq[0], 0)]); h[0]->gamma_states = 1; h[0]->chosen = 1; } hP = h[0]; } } /* reallocating the gamma list to the real size */ hP = h[0]; while (hP != NULL) { ARRAY_REALLOC (hP->gamma_a, hP->gamma_states); ARRAY_REALLOC (hP->gamma_id, hP->gamma_states); hP = hP->next; } /* calculate transition matrix with logarithmic values: */ log_a = kbest_buildLogMatrix (mo->s, mo->N); /* initialize temporary arrays: */ ARRAY_MALLOC (maxima, mo->N * k); /* for each state save k */ ARRAY_MALLOC (argmaxs, mo->N * k); /*------ Main loop: Cycle through the sequence: ------*/ for (t = 1; t < seq_len; t++) { /* put o_seq[t-1] in emission history: */ update_emission_history (mo, o_seq[t - 1]); /* 2. Propagate hypotheses forward and update gamma: */ no_oldHyps = ighmm_hlist_prop_forward (mo, h[t - 1], &(h[t]), no_labels, states_wlabel, label_max_out); /* printf("t = %d (%d), no of old hypotheses = %d\n", t, seq_len, no_oldHyps); */ /*-- calculate new gamma: --*/ hP = h[t]; /* cycle through list of hypotheses */ while (hP != NULL) { for (i = 0; i < hP->gamma_states; i++) { /* if hypothesis hP ends with label of state i: gamma(i,c):= log(sum(exp(a(j,i)*exp(oldgamma(j,old_c))))) + log(b[i](o_seq[t])) else: gamma(i,c):= -INF (represented by 1.0) */ i_id = hP->gamma_id[i]; hP->gamma_a[i] = ighmm_log_gamma_sum (log_a[i_id], &mo->s[i_id], hP->parent); b_index = get_emission_index (mo, i_id, o_seq[t], t); if (b_index < 0) { hP->gamma_a[i] = 1.0; if (mo->order[i_id] > t) continue; else { str = ighmm_mprintf (NULL, 0, "i_id: %d, o_seq[%d]=%d\ninvalid emission index!\n", i_id, t, o_seq[t]); GHMM_LOG(LCONVERTED, str); m_free (str); } } else hP->gamma_a[i] += log (mo->s[i_id].b[b_index]); /*printf("%g = %g\n", log(mo->s[i_id].b[b_index]), hP->gamma_a[i]); */ if (hP->gamma_a[i] > 0.0) { GHMM_LOG(LCONVERTED, "gamma to large. ghmm_dl_kbest failed\n"); exit (1); } } hP = hP->next; } /* 3. Choose the k most probable hypotheses for each state and discard all hypotheses that were not chosen: */ /* initialize temporary arrays: */ for (i = 0; i < mo->N * k; i++) { maxima[i] = 1.0; argmaxs[i] = NULL; } /* cycle through hypotheses & calculate the k most probable hypotheses for each state: */ hP = h[t]; while (hP != NULL) { for (i = 0; i < hP->gamma_states; i++) { i_id = hP->gamma_id[i]; if (hP->gamma_a[i] > KBEST_EPS) continue; /* find first best hypothesis that is worse than current hypothesis: */ for (l = 0; l < k && maxima[i_id * k + l] < KBEST_EPS && maxima[i_id * k + l] > hP->gamma_a[i]; l++); if (l < k) { /* for each m>l: m'th best hypothesis becomes (m+1)'th best */ for (m = k - 1; m > l; m--) { argmaxs[i_id * k + m] = argmaxs[i_id * k + m - 1]; maxima[i_id * k + m] = maxima[i_id * k + m - 1]; } /* save new l'th best hypothesis: */ maxima[i_id * k + l] = hP->gamma_a[i]; argmaxs[i_id * k + l] = hP; } } hP = hP->next; } /* set 'chosen' for all hypotheses from argmaxs array: */ for (i = 0; i < mo->N * k; i++) /* only choose hypotheses whose prob. is at least threshold*max_prob */ if (maxima[i] != 1.0 && maxima[i] >= KBEST_THRESHOLD + maxima[(i % mo->N) * k]) argmaxs[i]->chosen = 1; /* remove hypotheses that were not chosen from the lists: */ /* remove all hypotheses till the first chosen one */ while (h[t] != NULL && 0 == h[t]->chosen) ighmm_hlist_remove (&(h[t])); /* remove all other not chosen hypotheses */ if (!h[t]) { GHMM_LOG(LCONVERTED, "No chosen hypothesis. ghmm_dl_kbest failed\n"); exit (1); } hP = h[t]; while (hP->next != NULL) { if (1 == hP->next->chosen) hP = hP->next; else ighmm_hlist_remove (&(hP->next)); } } /* dispose of temporary arrays: */ m_free(states_wlabel); m_free(label_max_out); m_free(argmaxs); m_free(maxima); /* transition matrix is no longer needed from here on */ for (i=0; i<mo->N; i++) m_free(log_a[i]); m_free(log_a); /* 4. Save the hypothesis with the highest probability over all states: */ hP = h[seq_len - 1]; argmax = NULL; *log_p = 1.0; /* log_p will store log of maximum summed probability */ while (hP != NULL) { /* sum probabilities for each hypothesis over all states: */ sum = ighmm_cvector_log_sum (hP->gamma_a, hP->gamma_states); /* and select maximum sum */ if (sum < KBEST_EPS && (*log_p == 1.0 || sum > *log_p)) { *log_p = sum; argmax = hP; } hP = hP->next; } /* found a valid path? */ if (*log_p < KBEST_EPS) { /* yes: extract chosen hypothesis: */ ARRAY_MALLOC (hypothesis, seq_len); for (i = seq_len - 1; i >= 0; i--) { hypothesis[i] = argmax->hyp_c; argmax = argmax->parent; } } else /* no: return 1.0 representing -INF and an empty hypothesis */ hypothesis = NULL; /* dispose of calculation matrices: */ hP = h[seq_len - 1]; while (hP != NULL) ighmm_hlist_remove (&hP); free (h); return hypothesis; STOP: /* Label STOP from ARRAY_[CM]ALLOC */ GHMM_LOG(LCONVERTED, "ghmm_dl_kbest failed\n"); exit (1); #undef CUR_PROC }
/*============================================================================*/ int *ghmm_dpmodel_viterbi_variable_tb(ghmm_dpmodel *mo, ghmm_dpseq * X, ghmm_dpseq * Y, double *log_p, int *path_length, int start_traceback_with) { #define CUR_PROC "ghmm_dpmodel_viterbi" int u, v, j, i, off_x, off_y, current_state_index; double value, max_value, previous_prob; plocal_store_t *pv; int *state_seq = NULL; int emission; double log_b_i, log_in_a_ij; double (*log_in_a)(plocal_store_t*, int, int, ghmm_dpseq*, ghmm_dpseq*, int, int); /* printf("---- viterbi -----\n"); */ i_list * state_list; state_list = ighmm_list_init_list(); log_in_a = &sget_log_in_a; /* int len_path = mo->N*len; the length of the path is not known apriori */ /* if (mo->model_type & kSilentStates && */ /* mo->silent != NULL && */ /* mo->topo_order == NULL) { */ /* ghmm_dmodel_topo_order( mo ); */ /* } */ /* Allocate the matrices log_in_a, log_b,Vektor phi, phi_new, Matrix psi */ pv = pviterbi_alloc(mo, X->length, Y->length); if (!pv) { GHMM_LOG_QUEUED(LCONVERTED); goto STOP; } /* Precomputing the log(a_ij) and log(bj(ot)) */ pviterbi_precompute(mo, pv); /* Initialize the lookback matrix (for positions [-offsetX,0], [-1, len_y]*/ init_phi(pv, X, Y); /* u > max_offset_x , v starts -1 to allow states with offset_x == 0 which corresponds to a series of gap states before reading the first character of x at position x=0, y=v */ /** THIS IS THE MAIN RECURRENCE **/ for (u = mo->max_offset_x + 1; u < X->length; u++) { for (v = -mo->max_offset_y; v < Y->length; v++) { for (j = 0; j < mo->N; j++) { /** initialization of phi (lookback matrix), psi (traceback) **/ set_phi(pv, u, v, j, +1); set_psi(pv, u, v, j, -1); } for (i = 0; i < mo->N; i++) { /* Determine the maximum */ /* max_phi = phi[i] + log_in_a[j][i] ... */ if (!(mo->model_type & GHMM_kSilentStates) || !mo->silent[i] ) { max_value = -DBL_MAX; set_psi(pv, u, v, i, -1); for (j = 0; j < mo->s[i].in_states; j++) { /* look back in the phi matrix at the offsets */ previous_prob = get_phi(pv, u, v, mo->s[i].offset_x, mo->s[i].offset_y, mo->s[i].in_id[j]); log_in_a_ij = (*log_in_a)(pv, i, j, X, Y, u, v); if ( previous_prob != +1 && log_in_a_ij != +1) { value = previous_prob + log_in_a_ij; if (value > max_value) { max_value = value; set_psi(pv, u, v, i, mo->s[i].in_id[j]); } } else {;} /* fprintf(stderr, " %d --> %d = %f, \n", i,i,v->log_in_a[i][i]); */ } emission = ghmm_dpmodel_pair(ghmm_dpseq_get_char(X, mo->s[i].alphabet, u), ghmm_dpseq_get_char(Y, mo->s[i].alphabet, v), mo->size_of_alphabet[mo->s[i].alphabet], mo->s[i].offset_x, mo->s[i].offset_y); #ifdef DEBUG if (emission > ghmm_dpmodel_emission_table_size(mo, i)){ printf("State %i\n", i); ghmm_dpmodel_state_print(&(mo->s[i])); printf("charX: %i charY: %i alphabet size: %i emission table: %i emission index: %i\n", ghmm_dpseq_get_char(X, mo->s[i].alphabet, u), ghmm_dpseq_get_char(Y, mo->s[i].alphabet, v), mo->size_of_alphabet[mo->s[i].alphabet], ghmm_dpmodel_emission_table_size(mo, i), emission); } #endif log_b_i = log_b(pv, i, ghmm_dpmodel_pair(ghmm_dpseq_get_char(X, mo->s[i].alphabet, u), ghmm_dpseq_get_char(Y, mo->s[i].alphabet, v), mo->size_of_alphabet[mo->s[i].alphabet], mo->s[i].offset_x, mo->s[i].offset_y)); /* No maximum found (that is, state never reached) or the output O[t] = 0.0: */ if (max_value == -DBL_MAX ||/* and then also: (v->psi[t][j] == -1) */ log_b_i == +1 ) { set_phi(pv, u, v, i, +1); } else set_phi(pv, u, v, i, max_value + log_b_i); } } /* complete time step for emitting states */ /* last_osc = osc; */ /* save last transition class */ /*if (mo->model_type & kSilentStates) { p__viterbi_silent( mo, t, v ); }*/ /* complete time step for silent states */ /************** for (j = 0; j < mo->N; j++) { printf("\npsi[%d],in:%d, phi=%f\n", t, v->psi[t][j], v->phi[j]); } for (i = 0; i < mo->N; i++){ printf("%d\t", former_matchcount[i]); } for (i = 0; i < mo->N; i++){ printf("%d\t", recent_matchcount[i]); } ****************/ } /* End for v in Y */ /* Next character in X */ push_back_phi(pv, Y->length); } /* End for u in X */ /* Termination */ max_value = -DBL_MAX; ighmm_list_append(state_list, -1); /* if start_traceback_with is -1 (it is by default) search for the most likely state at the end of both sequences */ if (start_traceback_with == -1) { for (j = 0; j < mo->N; j++){ #ifdef DEBUG printf("phi(len_x)(len_y)(%i)=%f\n", j, get_phi(pv, u, Y->length-1, 0, 0, j)); #endif if ( get_phi(pv, u, Y->length-1, 0, 0, j) != +1 && get_phi(pv, u, Y->length-1, 0, 0, j) > max_value) { max_value = get_phi(pv, X->length-1, Y->length-1, 0, 0, j); state_list->last->val = j; } } } /* this is the special traceback mode for the d & c algorithm that also connects the traceback to the first state of the rest of the path */ else { #ifdef DEBUG printf("D & C traceback from state %i!\n", start_traceback_with); printf("Last characters emitted X: %i, Y: %i\n", ghmm_dpseq_get_char(X, mo->s[start_traceback_with].alphabet, X->length-1), ghmm_dpseq_get_char(Y, mo->s[start_traceback_with].alphabet, Y->length-1)); for (j = 0; j < mo->N; j++){ printf("phi(len_x)(len_y)(%i)=%f\n", j, get_phi(pv, X->length-1, Y->length-1, 0, 0, j)); } #endif max_value = get_phi(pv, X->length-1, Y->length-1, 0, 0, start_traceback_with); if (max_value != 1 && max_value > -DBL_MAX) state_list->last->val = start_traceback_with; } if (max_value == -DBL_MAX) { /* Sequence can't be generated from the model! */ *log_p = +1; /* Backtracing doesn't work, because state_seq[*] allocated with -1 */ /* for (t = len - 2; t >= 0; t--) state_list->last->val = -1; */ } else { /* Backtracing, should put DEL path nicely */ *log_p = max_value; /* removed the handling of silent states here */ /* start trace back at the end of both sequences */ u = X->length - 1; v = Y->length - 1; current_state_index = state_list->first->val; off_x = mo->s[current_state_index].offset_x; off_y = mo->s[current_state_index].offset_y; while (u - off_x >= -1 && v - off_y >= -1 && current_state_index != -1) { /* while (u > 0 && v > 0) { */ /* look up the preceding state and save it in the first position of the state list */ /* printf("Current state %i at (%i,%i) -> preceding state %i\n", current_state_index, u, v, get_psi(pv, u, v, current_state_index)); */ /* update the current state */ current_state_index = get_psi(pv, u, v, current_state_index); if (current_state_index != -1) ighmm_list_insert(state_list, current_state_index); /* move in the alignment matrix */ u -= off_x; v -= off_y; /* get the next offsets */ off_x = mo->s[current_state_index].offset_x; off_y = mo->s[current_state_index].offset_y; } } /* Free the memory space */ pviterbi_free(&pv, mo->N, X->length, Y->length, mo->max_offset_x , mo->max_offset_y); /* printf("After traceback: last state = %i\n", state_list->last->val); */ state_seq = ighmm_list_to_array(state_list); *path_length = state_list->length; /* PRINT PATH */ /* fprintf(stderr, "Viterbi path: " ); */ /* int t; */ /* for(t=0; t < *path_length; t++) */ /* if (state_seq[t] >= 0) fprintf(stderr, " %d ", state_seq[t]); */ /* fprintf(stderr, "\n Freeing ... \n"); */ return (state_seq); STOP: /* Label STOP from ARRAY_[CM]ALLOC */ /* Free the memory space */ pviterbi_free(&pv, mo->N, X->length, Y->length, mo->max_offset_x, mo->max_offset_y); m_free(state_seq); ighmm_list_free(state_list); return NULL; #undef CUR_PROC } /* viterbi */
int CreateRecentItemMenu(char *pszFileName, char *pszCommand, char *pszTitle, char *pszIcon, int nKeep, int nSort, bool bBeginEnd){ static ItemList *KeepItemList; static ItemList *SortItemList; char szFilePath[MAX_PATH]; char buf[MAX_PATH]; ItemList *iln = (ItemList*)m_alloc(sizeof(ItemList)); // make path (compliant with relative-path) make_bb_path(szFilePath, pszFileName); // Newer Item if (pszIcon && *pszIcon) _snprintf(buf, MAX_PATH, "\t[exec] (%s) {%s} <%s>", pszTitle, pszCommand, pszIcon); else _snprintf(buf, MAX_PATH, "\t[exec] (%s) {%s}", pszTitle, pszCommand); _strcpy(iln->szItem, buf); iln->nFrequency = 1; append_node(&KeepItemList, iln); int cnt_k = 0; // keep items counter int cnt_s = 0; // sort items counter if (FILE *fp = fopen(szFilePath, "r")){ // read Recent-menu while(fgets(buf, MAX_PATH, fp)){ if (strstr(buf, "[end]")) break; else if (strstr(buf, "[exec]")){ if (buf[strlen(buf)-1] == '\n') buf[strlen(buf)-1] = '\0'; char *p0 = strchr(buf, '{'); // command char *p1 = strchr(buf, '}'); // end of command if (p0 && p1){ UINT nFrequency = 0; if (char *p = strrchr(buf, '#')){ nFrequency = atoi(p+1); while(*(p--) == ' '); // delete space of eol *p = '\0'; } if (0 != strnicmp(pszCommand, p0+1 , p1 - p0 - 1)){ // ignore duplicated item ItemList *il = (ItemList*)m_alloc(sizeof(ItemList)); _strcpy(il->szItem, buf); il->nFrequency = imin(nFrequency, UINT_MAX); if (cnt_k++ < nKeep - 1) append_node(&KeepItemList, il); else if (cnt_s++ < nSort) append_node(&SortItemList, il); else m_free(il); } else{ // if duplicate, increment freq iln->nFrequency += imin(nFrequency, UINT_MAX - 1); } } } } fclose(fp); } bool isSortList = listlen(SortItemList) != 0; bool isKeepList = listlen(KeepItemList) != 0; if (isSortList) SortItemList = sortlist(SortItemList); if (FILE *fp = fopen(szFilePath, "w")){ // write Recent-menu if (bBeginEnd) fprintf(fp, "[begin] (Recent Item)\n"); // most recent item ItemList *p; dolist (p, KeepItemList){ fprintf(fp, "%s #%u\n", p->szItem, p->nFrequency); } if (isKeepList && isSortList) fprintf(fp, "\t[separator]\n"); dolist (p, SortItemList){ fprintf(fp, "%s #%u\n", p->szItem, p->nFrequency); }
/* * TCP input routine, follows pages 65-76 of the * protocol specification dated September, 1981 very closely. */ void tcp_input(struct mbuf *m, int iphlen, struct socket *inso) { struct ip save_ip, *ip; register struct tcpiphdr *ti; caddr_t optp = NULL; int optlen = 0; int len, tlen, off; register struct tcpcb *tp = NULL; register int tiflags; struct socket *so = NULL; int todrop, acked, ourfinisacked, needoutput = 0; int iss = 0; u_long tiwin; int ret; struct ex_list *ex_ptr; Slirp *slirp; DEBUG_CALL("tcp_input"); DEBUG_ARGS((dfd," m = %8lx iphlen = %2d inso = %lx\n", (long )m, iphlen, (long )inso )); /* * If called with m == 0, then we're continuing the connect */ if (m == NULL) { so = inso; slirp = so->slirp; /* Re-set a few variables */ tp = sototcpcb(so); m = so->so_m; so->so_m = NULL; ti = so->so_ti; tiwin = ti->ti_win; tiflags = ti->ti_flags; goto cont_conn; } slirp = m->slirp; /* * Get IP and TCP header together in first mbuf. * Note: IP leaves IP header in first mbuf. */ ti = mtod(m, struct tcpiphdr *); if (iphlen > sizeof(struct ip )) { ip_stripoptions(m, (struct mbuf *)0); iphlen=sizeof(struct ip ); } /* XXX Check if too short */ /* * Save a copy of the IP header in case we want restore it * for sending an ICMP error message in response. */ ip=mtod(m, struct ip *); save_ip = *ip; save_ip.ip_len+= iphlen; /* * Checksum extended TCP header and data. */ tlen = ((struct ip *)ti)->ip_len; tcpiphdr2qlink(ti)->next = tcpiphdr2qlink(ti)->prev = NULL; memset(&ti->ti_i.ih_mbuf, 0 , sizeof(struct mbuf_ptr)); ti->ti_x1 = 0; ti->ti_len = htons((u_int16_t)tlen); len = sizeof(struct ip ) + tlen; if(cksum(m, len)) { goto drop; } /* * Check that TCP offset makes sense, * pull out TCP options and adjust length. XXX */ off = ti->ti_off << 2; if (off < sizeof (struct tcphdr) || off > tlen) { goto drop; } tlen -= off; ti->ti_len = tlen; if (off > sizeof (struct tcphdr)) { optlen = off - sizeof (struct tcphdr); optp = mtod(m, caddr_t) + sizeof (struct tcpiphdr); } tiflags = ti->ti_flags; /* * Convert TCP protocol specific fields to host format. */ NTOHL(ti->ti_seq); NTOHL(ti->ti_ack); NTOHS(ti->ti_win); NTOHS(ti->ti_urp); /* * Drop TCP, IP headers and TCP options. */ m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); m->m_len -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); if (slirp->restricted) { for (ex_ptr = slirp->exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next) { if (ex_ptr->ex_fport == ti->ti_dport && ti->ti_dst.s_addr == ex_ptr->ex_addr.s_addr) { break; } } if (!ex_ptr) goto drop; } /* * Locate pcb for segment. */ findso: so = slirp->tcp_last_so; if (so->so_fport != ti->ti_dport || so->so_lport != ti->ti_sport || so->so_laddr.s_addr != ti->ti_src.s_addr || so->so_faddr.s_addr != ti->ti_dst.s_addr) { so = solookup(&slirp->tcb, ti->ti_src, ti->ti_sport, ti->ti_dst, ti->ti_dport); if (so) slirp->tcp_last_so = so; } /* * If the state is CLOSED (i.e., TCB does not exist) then * all data in the incoming segment is discarded. * If the TCB exists but is in CLOSED state, it is embryonic, * but should either do a listen or a connect soon. * * state == CLOSED means we've done socreate() but haven't * attached it to a protocol yet... * * XXX If a TCB does not exist, and the TH_SYN flag is * the only flag set, then create a session, mark it * as if it was LISTENING, and continue... */ if (so == NULL) { if ((tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) != TH_SYN) goto dropwithreset; if ((so = socreate(slirp)) == NULL) goto dropwithreset; if (tcp_attach(so) < 0) { free(so); /* Not sofree (if it failed, it's not insqued) */ goto dropwithreset; } sbreserve(&so->so_snd, TCP_SNDSPACE); sbreserve(&so->so_rcv, TCP_RCVSPACE); so->so_laddr = ti->ti_src; so->so_lport = ti->ti_sport; so->so_faddr = ti->ti_dst; so->so_fport = ti->ti_dport; if ((so->so_iptos = tcp_tos(so)) == 0) so->so_iptos = ((struct ip *)ti)->ip_tos; tp = sototcpcb(so); tp->t_state = TCPS_LISTEN; } /* * If this is a still-connecting socket, this probably * a retransmit of the SYN. Whether it's a retransmit SYN * or something else, we nuke it. */ if (so->so_state & SS_ISFCONNECTING) goto drop; tp = sototcpcb(so); /* XXX Should never fail */ if (tp == NULL) goto dropwithreset; if (tp->t_state == TCPS_CLOSED) goto drop; tiwin = ti->ti_win; /* * Segment received on connection. * Reset idle time and keep-alive timer. */ tp->t_idle = 0; if (SO_OPTIONS) tp->t_timer[TCPT_KEEP] = TCPTV_KEEPINTVL; else tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_IDLE; /* * Process options if not in LISTEN state, * else do it below (after getting remote address). */ if (optp && tp->t_state != TCPS_LISTEN) tcp_dooptions(tp, (u_char *)optp, optlen, ti); /* * Header prediction: check for the two common cases * of a uni-directional data xfer. If the packet has * no control flags, is in-sequence, the window didn't * change and we're not retransmitting, it's a * candidate. If the length is zero and the ack moved * forward, we're the sender side of the xfer. Just * free the data acked & wake any higher level process * that was blocked waiting for space. If the length * is non-zero and the ack didn't move, we're the * receiver side. If we're getting packets in-order * (the reassembly queue is empty), add the data to * the socket buffer and note that we need a delayed ack. * * XXX Some of these tests are not needed * eg: the tiwin == tp->snd_wnd prevents many more * predictions.. with no *real* advantage.. */ if (tp->t_state == TCPS_ESTABLISHED && (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && ti->ti_seq == tp->rcv_nxt && tiwin && tiwin == tp->snd_wnd && tp->snd_nxt == tp->snd_max) { if (ti->ti_len == 0) { if (SEQ_GT(ti->ti_ack, tp->snd_una) && SEQ_LEQ(ti->ti_ack, tp->snd_max) && tp->snd_cwnd >= tp->snd_wnd) { /* * this is a pure ack for outstanding data. */ if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq)) tcp_xmit_timer(tp, tp->t_rtt); acked = ti->ti_ack - tp->snd_una; sbdrop(&so->so_snd, acked); tp->snd_una = ti->ti_ack; m_freem(m); /* * If all outstanding data are acked, stop * retransmit timer, otherwise restart timer * using current (possibly backed-off) value. * If process is waiting for space, * wakeup/selwakeup/signal. If data * are ready to send, let tcp_output * decide between more output or persist. */ if (tp->snd_una == tp->snd_max) tp->t_timer[TCPT_REXMT] = 0; else if (tp->t_timer[TCPT_PERSIST] == 0) tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; /* * This is called because sowwakeup might have * put data into so_snd. Since we don't so sowwakeup, * we don't need this.. XXX??? */ if (so->so_snd.sb_cc) (void) tcp_output(tp); return; } } else if (ti->ti_ack == tp->snd_una && tcpfrag_list_empty(tp) && ti->ti_len <= sbspace(&so->so_rcv)) { /* * this is a pure, in-sequence data packet * with nothing on the reassembly queue and * we have enough buffer space to take it. */ tp->rcv_nxt += ti->ti_len; /* * Add data to socket buffer. */ if (so->so_emu) { if (tcp_emu(so,m)) sbappend(so, m); } else sbappend(so, m); /* * If this is a short packet, then ACK now - with Nagel * congestion avoidance sender won't send more until * he gets an ACK. * * It is better to not delay acks at all to maximize * TCP throughput. See RFC 2581. */ tp->t_flags |= TF_ACKNOW; tcp_output(tp); return; } } /* header prediction */ /* * Calculate amount of space in receive window, * and then do TCP input processing. * Receive window is amount of space in rcv queue, * but not less than advertised window. */ { int win; win = sbspace(&so->so_rcv); if (win < 0) win = 0; tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt)); } switch (tp->t_state) { /* * If the state is LISTEN then ignore segment if it contains an RST. * If the segment contains an ACK then it is bad and send a RST. * If it does not contain a SYN then it is not interesting; drop it. * Don't bother responding if the destination was a broadcast. * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial * tp->iss, and send a segment: * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK> * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss. * Fill in remote peer address fields if not previously specified. * Enter SYN_RECEIVED state, and process any other fields of this * segment in this state. */ case TCPS_LISTEN: { if (tiflags & TH_RST) goto drop; if (tiflags & TH_ACK) goto dropwithreset; if ((tiflags & TH_SYN) == 0) goto drop; /* * This has way too many gotos... * But a bit of spaghetti code never hurt anybody :) */ /* * If this is destined for the control address, then flag to * tcp_ctl once connected, otherwise connect */ if ((so->so_faddr.s_addr & slirp->vnetwork_mask.s_addr) == slirp->vnetwork_addr.s_addr) { if (so->so_faddr.s_addr != slirp->vhost_addr.s_addr && so->so_faddr.s_addr != slirp->vnameserver_addr.s_addr) { /* May be an add exec */ for (ex_ptr = slirp->exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next) { if(ex_ptr->ex_fport == so->so_fport && so->so_faddr.s_addr == ex_ptr->ex_addr.s_addr) { so->so_state |= SS_CTL; break; } } if (so->so_state & SS_CTL) { goto cont_input; } } /* CTL_ALIAS: Do nothing, tcp_fconnect will be called on it */ } if (so->so_emu & EMU_NOCONNECT) { so->so_emu &= ~EMU_NOCONNECT; goto cont_input; } if((tcp_fconnect(so) == -1) && (errno != EINPROGRESS) && (errno != EWOULDBLOCK)) { u_char code=ICMP_UNREACH_NET; DEBUG_MISC((dfd," tcp fconnect errno = %d-%s\n", errno,strerror(errno))); if(errno == ECONNREFUSED) { /* ACK the SYN, send RST to refuse the connection */ tcp_respond(tp, ti, m, ti->ti_seq+1, (tcp_seq)0, TH_RST|TH_ACK); } else { if(errno == EHOSTUNREACH) code=ICMP_UNREACH_HOST; HTONL(ti->ti_seq); /* restore tcp header */ HTONL(ti->ti_ack); HTONS(ti->ti_win); HTONS(ti->ti_urp); m->m_data -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); m->m_len += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr); *ip=save_ip; icmp_error(m, ICMP_UNREACH,code, 0,strerror(errno)); } tp = tcp_close(tp); m_free(m); } else { /* * Haven't connected yet, save the current mbuf * and ti, and return * XXX Some OS's don't tell us whether the connect() * succeeded or not. So we must time it out. */ so->so_m = m; so->so_ti = ti; tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT; tp->t_state = TCPS_SYN_RECEIVED; } return; cont_conn: /* m==NULL * Check if the connect succeeded */ if (so->so_state & SS_NOFDREF) { tp = tcp_close(tp); goto dropwithreset; } cont_input: tcp_template(tp); if (optp) tcp_dooptions(tp, (u_char *)optp, optlen, ti); if (iss) tp->iss = iss; else tp->iss = slirp->tcp_iss; slirp->tcp_iss += TCP_ISSINCR/2; tp->irs = ti->ti_seq; tcp_sendseqinit(tp); tcp_rcvseqinit(tp); tp->t_flags |= TF_ACKNOW; tp->t_state = TCPS_SYN_RECEIVED; tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT; goto trimthenstep6; } /* case TCPS_LISTEN */ /* * If the state is SYN_SENT: * if seg contains an ACK, but not for our SYN, drop the input. * if seg contains a RST, then drop the connection. * if seg does not contain SYN, then drop it. * Otherwise this is an acceptable SYN segment * initialize tp->rcv_nxt and tp->irs * if seg contains ack then advance tp->snd_una * if SYN has been acked change to ESTABLISHED else SYN_RCVD state * arrange for segment to be acked (eventually) * continue processing rest of data/controls, beginning with URG */ case TCPS_SYN_SENT: if ((tiflags & TH_ACK) && (SEQ_LEQ(ti->ti_ack, tp->iss) || SEQ_GT(ti->ti_ack, tp->snd_max))) goto dropwithreset; if (tiflags & TH_RST) { if (tiflags & TH_ACK) tp = tcp_drop(tp,0); /* XXX Check t_softerror! */ goto drop; } if ((tiflags & TH_SYN) == 0) goto drop; if (tiflags & TH_ACK) { tp->snd_una = ti->ti_ack; if (SEQ_LT(tp->snd_nxt, tp->snd_una)) tp->snd_nxt = tp->snd_una; } tp->t_timer[TCPT_REXMT] = 0; tp->irs = ti->ti_seq; tcp_rcvseqinit(tp); tp->t_flags |= TF_ACKNOW; if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss)) { soisfconnected(so); tp->t_state = TCPS_ESTABLISHED; (void) tcp_reass(tp, (struct tcpiphdr *)0, (struct mbuf *)0); /* * if we didn't have to retransmit the SYN, * use its rtt as our initial srtt & rtt var. */ if (tp->t_rtt) tcp_xmit_timer(tp, tp->t_rtt); } else tp->t_state = TCPS_SYN_RECEIVED; trimthenstep6: /* * Advance ti->ti_seq to correspond to first data byte. * If data, trim to stay within window, * dropping FIN if necessary. */ ti->ti_seq++; if (ti->ti_len > tp->rcv_wnd) { todrop = ti->ti_len - tp->rcv_wnd; m_adj(m, -todrop); ti->ti_len = tp->rcv_wnd; tiflags &= ~TH_FIN; } tp->snd_wl1 = ti->ti_seq - 1; tp->rcv_up = ti->ti_seq; goto step6; } /* switch tp->t_state */ /* * States other than LISTEN or SYN_SENT. * Check that at least some bytes of segment are within * receive window. If segment begins before rcv_nxt, * drop leading data (and SYN); if nothing left, just ack. */ todrop = tp->rcv_nxt - ti->ti_seq; if (todrop > 0) { if (tiflags & TH_SYN) { tiflags &= ~TH_SYN; ti->ti_seq++; if (ti->ti_urp > 1) ti->ti_urp--; else tiflags &= ~TH_URG; todrop--; } /* * Following if statement from Stevens, vol. 2, p. 960. */ if (todrop > ti->ti_len || (todrop == ti->ti_len && (tiflags & TH_FIN) == 0)) { /* * Any valid FIN must be to the left of the window. * At this point the FIN must be a duplicate or out * of sequence; drop it. */ tiflags &= ~TH_FIN; /* * Send an ACK to resynchronize and drop any data. * But keep on processing for RST or ACK. */ tp->t_flags |= TF_ACKNOW; todrop = ti->ti_len; } m_adj(m, todrop); ti->ti_seq += todrop; ti->ti_len -= todrop; if (ti->ti_urp > todrop) ti->ti_urp -= todrop; else { tiflags &= ~TH_URG; ti->ti_urp = 0; } } /* * If new data are received on a connection after the * user processes are gone, then RST the other end. */ if ((so->so_state & SS_NOFDREF) && tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len) { tp = tcp_close(tp); goto dropwithreset; } /* * If segment ends after window, drop trailing data * (and PUSH and FIN); if nothing left, just ACK. */ todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd); if (todrop > 0) { if (todrop >= ti->ti_len) { /* * If a new connection request is received * while in TIME_WAIT, drop the old connection * and start over if the sequence numbers * are above the previous ones. */ if (tiflags & TH_SYN && tp->t_state == TCPS_TIME_WAIT && SEQ_GT(ti->ti_seq, tp->rcv_nxt)) { iss = tp->rcv_nxt + TCP_ISSINCR; tp = tcp_close(tp); goto findso; } /* * If window is closed can only take segments at * window edge, and have to drop data and PUSH from * incoming segments. Continue processing, but * remember to ack. Otherwise, drop segment * and ack. */ if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt) { tp->t_flags |= TF_ACKNOW; } else { goto dropafterack; } } m_adj(m, -todrop); ti->ti_len -= todrop; tiflags &= ~(TH_PUSH|TH_FIN); } /* * If the RST bit is set examine the state: * SYN_RECEIVED STATE: * If passive open, return to LISTEN state. * If active open, inform user that connection was refused. * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES: * Inform user that connection was reset, and close tcb. * CLOSING, LAST_ACK, TIME_WAIT STATES * Close the tcb. */ if (tiflags&TH_RST) switch (tp->t_state) { case TCPS_SYN_RECEIVED: case TCPS_ESTABLISHED: case TCPS_FIN_WAIT_1: case TCPS_FIN_WAIT_2: case TCPS_CLOSE_WAIT: tp->t_state = TCPS_CLOSED; tp = tcp_close(tp); goto drop; case TCPS_CLOSING: case TCPS_LAST_ACK: case TCPS_TIME_WAIT: tp = tcp_close(tp); goto drop; } /* * If a SYN is in the window, then this is an * error and we send an RST and drop the connection. */ if (tiflags & TH_SYN) { tp = tcp_drop(tp,0); goto dropwithreset; } /* * If the ACK bit is off we drop the segment and return. */ if ((tiflags & TH_ACK) == 0) goto drop; /* * Ack processing. */ switch (tp->t_state) { /* * In SYN_RECEIVED state if the ack ACKs our SYN then enter * ESTABLISHED state and continue processing, otherwise * send an RST. una<=ack<=max */ case TCPS_SYN_RECEIVED: if (SEQ_GT(tp->snd_una, ti->ti_ack) || SEQ_GT(ti->ti_ack, tp->snd_max)) goto dropwithreset; tp->t_state = TCPS_ESTABLISHED; /* * The sent SYN is ack'ed with our sequence number +1 * The first data byte already in the buffer will get * lost if no correction is made. This is only needed for * SS_CTL since the buffer is empty otherwise. * tp->snd_una++; or: */ tp->snd_una=ti->ti_ack; if (so->so_state & SS_CTL) { /* So tcp_ctl reports the right state */ ret = tcp_ctl(so); if (ret == 1) { soisfconnected(so); so->so_state &= ~SS_CTL; /* success XXX */ } else if (ret == 2) { so->so_state &= SS_PERSISTENT_MASK; so->so_state |= SS_NOFDREF; /* CTL_CMD */ } else { needoutput = 1; tp->t_state = TCPS_FIN_WAIT_1; } } else { soisfconnected(so); } (void) tcp_reass(tp, (struct tcpiphdr *)0, (struct mbuf *)0); tp->snd_wl1 = ti->ti_seq - 1; /* Avoid ack processing; snd_una==ti_ack => dup ack */ goto synrx_to_est; /* fall into ... */ /* * In ESTABLISHED state: drop duplicate ACKs; ACK out of range * ACKs. If the ack is in the range * tp->snd_una < ti->ti_ack <= tp->snd_max * then advance tp->snd_una to ti->ti_ack and drop * data from the retransmission queue. If this ACK reflects * more up to date window information we update our window information. */ case TCPS_ESTABLISHED: case TCPS_FIN_WAIT_1: case TCPS_FIN_WAIT_2: case TCPS_CLOSE_WAIT: case TCPS_CLOSING: case TCPS_LAST_ACK: case TCPS_TIME_WAIT: if (SEQ_LEQ(ti->ti_ack, tp->snd_una)) { if (ti->ti_len == 0 && tiwin == tp->snd_wnd) { DEBUG_MISC((dfd," dup ack m = %lx so = %lx \n", (long )m, (long )so)); /* * If we have outstanding data (other than * a window probe), this is a completely * duplicate ack (ie, window info didn't * change), the ack is the biggest we've * seen and we've seen exactly our rexmt * threshold of them, assume a packet * has been dropped and retransmit it. * Kludge snd_nxt & the congestion * window so we send only this one * packet. * * We know we're losing at the current * window size so do congestion avoidance * (set ssthresh to half the current window * and pull our congestion window back to * the new ssthresh). * * Dup acks mean that packets have left the * network (they're now cached at the receiver) * so bump cwnd by the amount in the receiver * to keep a constant cwnd packets in the * network. */ if (tp->t_timer[TCPT_REXMT] == 0 || ti->ti_ack != tp->snd_una) tp->t_dupacks = 0; else if (++tp->t_dupacks == TCPREXMTTHRESH) { tcp_seq onxt = tp->snd_nxt; u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg; if (win < 2) win = 2; tp->snd_ssthresh = win * tp->t_maxseg; tp->t_timer[TCPT_REXMT] = 0; tp->t_rtt = 0; tp->snd_nxt = ti->ti_ack; tp->snd_cwnd = tp->t_maxseg; (void) tcp_output(tp); tp->snd_cwnd = tp->snd_ssthresh + tp->t_maxseg * tp->t_dupacks; if (SEQ_GT(onxt, tp->snd_nxt)) tp->snd_nxt = onxt; goto drop; } else if (tp->t_dupacks > TCPREXMTTHRESH) { tp->snd_cwnd += tp->t_maxseg; (void) tcp_output(tp); goto drop; } } else tp->t_dupacks = 0; break; } synrx_to_est: /* * If the congestion window was inflated to account * for the other side's cached packets, retract it. */ if (tp->t_dupacks > TCPREXMTTHRESH && tp->snd_cwnd > tp->snd_ssthresh) tp->snd_cwnd = tp->snd_ssthresh; tp->t_dupacks = 0; if (SEQ_GT(ti->ti_ack, tp->snd_max)) { goto dropafterack; } acked = ti->ti_ack - tp->snd_una; /* * If transmit timer is running and timed sequence * number was acked, update smoothed round trip time. * Since we now have an rtt measurement, cancel the * timer backoff (cf., Phil Karn's retransmit alg.). * Recompute the initial retransmit timer. */ if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq)) tcp_xmit_timer(tp,tp->t_rtt); /* * If all outstanding data is acked, stop retransmit * timer and remember to restart (more output or persist). * If there is more data to be acked, restart retransmit * timer, using current (possibly backed-off) value. */ if (ti->ti_ack == tp->snd_max) { tp->t_timer[TCPT_REXMT] = 0; needoutput = 1; } else if (tp->t_timer[TCPT_PERSIST] == 0) tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; /* * When new data is acked, open the congestion window. * If the window gives us less than ssthresh packets * in flight, open exponentially (maxseg per packet). * Otherwise open linearly: maxseg per window * (maxseg^2 / cwnd per packet). */ { register u_int cw = tp->snd_cwnd; register u_int incr = tp->t_maxseg; if (cw > tp->snd_ssthresh) incr = incr * incr / cw; tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale); } if (acked > so->so_snd.sb_cc) { tp->snd_wnd -= so->so_snd.sb_cc; sbdrop(&so->so_snd, (int )so->so_snd.sb_cc); ourfinisacked = 1; } else { sbdrop(&so->so_snd, acked); tp->snd_wnd -= acked; ourfinisacked = 0; } tp->snd_una = ti->ti_ack; if (SEQ_LT(tp->snd_nxt, tp->snd_una)) tp->snd_nxt = tp->snd_una; switch (tp->t_state) { /* * In FIN_WAIT_1 STATE in addition to the processing * for the ESTABLISHED state if our FIN is now acknowledged * then enter FIN_WAIT_2. */ case TCPS_FIN_WAIT_1: if (ourfinisacked) { /* * If we can't receive any more * data, then closing user can proceed. * Starting the timer is contrary to the * specification, but if we don't get a FIN * we'll hang forever. */ if (so->so_state & SS_FCANTRCVMORE) { tp->t_timer[TCPT_2MSL] = TCP_MAXIDLE; } tp->t_state = TCPS_FIN_WAIT_2; } break; /* * In CLOSING STATE in addition to the processing for * the ESTABLISHED state if the ACK acknowledges our FIN * then enter the TIME-WAIT state, otherwise ignore * the segment. */ case TCPS_CLOSING: if (ourfinisacked) { tp->t_state = TCPS_TIME_WAIT; tcp_canceltimers(tp); tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; } break; /* * In LAST_ACK, we may still be waiting for data to drain * and/or to be acked, as well as for the ack of our FIN. * If our FIN is now acknowledged, delete the TCB, * enter the closed state and return. */ case TCPS_LAST_ACK: if (ourfinisacked) { tp = tcp_close(tp); goto drop; } break; /* * In TIME_WAIT state the only thing that should arrive * is a retransmission of the remote FIN. Acknowledge * it and restart the finack timer. */ case TCPS_TIME_WAIT: tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; goto dropafterack; } } /* switch(tp->t_state) */ step6: /* * Update window information. * Don't look at window if no ACK: TAC's send garbage on first SYN. */ if ((tiflags & TH_ACK) && (SEQ_LT(tp->snd_wl1, ti->ti_seq) || (tp->snd_wl1 == ti->ti_seq && (SEQ_LT(tp->snd_wl2, ti->ti_ack) || (tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd))))) { tp->snd_wnd = tiwin; tp->snd_wl1 = ti->ti_seq; tp->snd_wl2 = ti->ti_ack; if (tp->snd_wnd > tp->max_sndwnd) tp->max_sndwnd = tp->snd_wnd; needoutput = 1; } /* * Process segments with URG. */ if ((tiflags & TH_URG) && ti->ti_urp && TCPS_HAVERCVDFIN(tp->t_state) == 0) { /* * This is a kludge, but if we receive and accept * random urgent pointers, we'll crash in * soreceive. It's hard to imagine someone * actually wanting to send this much urgent data. */ if (ti->ti_urp + so->so_rcv.sb_cc > so->so_rcv.sb_datalen) { ti->ti_urp = 0; tiflags &= ~TH_URG; goto dodata; } /* * If this segment advances the known urgent pointer, * then mark the data stream. This should not happen * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since * a FIN has been received from the remote side. * In these states we ignore the URG. * * According to RFC961 (Assigned Protocols), * the urgent pointer points to the last octet * of urgent data. We continue, however, * to consider it to indicate the first octet * of data past the urgent section as the original * spec states (in one of two places). */ if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up)) { tp->rcv_up = ti->ti_seq + ti->ti_urp; so->so_urgc = so->so_rcv.sb_cc + (tp->rcv_up - tp->rcv_nxt); /* -1; */ tp->rcv_up = ti->ti_seq + ti->ti_urp; } } else /* * If no out of band data is expected, * pull receive urgent pointer along * with the receive window. */ if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) tp->rcv_up = tp->rcv_nxt; dodata: /* * Process the segment text, merging it into the TCP sequencing queue, * and arranging for acknowledgment of receipt if necessary. * This process logically involves adjusting tp->rcv_wnd as data * is presented to the user (this happens in tcp_usrreq.c, * case PRU_RCVD). If a FIN has already been received on this * connection then we just ignore the text. */ if ((ti->ti_len || (tiflags&TH_FIN)) && TCPS_HAVERCVDFIN(tp->t_state) == 0) { TCP_REASS(tp, ti, m, so, tiflags); /* * Note the amount of data that peer has sent into * our window, in order to estimate the sender's * buffer size. */ len = so->so_rcv.sb_datalen - (tp->rcv_adv - tp->rcv_nxt); } else { m_free(m); tiflags &= ~TH_FIN; } /* * If FIN is received ACK the FIN and let the user know * that the connection is closing. */ if (tiflags & TH_FIN) { if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { /* * If we receive a FIN we can't send more data, * set it SS_FDRAIN * Shutdown the socket if there is no rx data in the * buffer. * soread() is called on completion of shutdown() and * will got to TCPS_LAST_ACK, and use tcp_output() * to send the FIN. */ sofwdrain(so); tp->t_flags |= TF_ACKNOW; tp->rcv_nxt++; } switch (tp->t_state) { /* * In SYN_RECEIVED and ESTABLISHED STATES * enter the CLOSE_WAIT state. */ case TCPS_SYN_RECEIVED: case TCPS_ESTABLISHED: if(so->so_emu == EMU_CTL) /* no shutdown on socket */ tp->t_state = TCPS_LAST_ACK; else tp->t_state = TCPS_CLOSE_WAIT; break; /* * If still in FIN_WAIT_1 STATE FIN has not been acked so * enter the CLOSING state. */ case TCPS_FIN_WAIT_1: tp->t_state = TCPS_CLOSING; break; /* * In FIN_WAIT_2 state enter the TIME_WAIT state, * starting the time-wait timer, turning off the other * standard timers. */ case TCPS_FIN_WAIT_2: tp->t_state = TCPS_TIME_WAIT; tcp_canceltimers(tp); tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; break; /* * In TIME_WAIT state restart the 2 MSL time_wait timer. */ case TCPS_TIME_WAIT: tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; break; } } /* * If this is a small packet, then ACK now - with Nagel * congestion avoidance sender won't send more until * he gets an ACK. * * See above. */ if (ti->ti_len && (unsigned)ti->ti_len <= 5 && ((struct tcpiphdr_2 *)ti)->first_char == (char)27) { tp->t_flags |= TF_ACKNOW; } /* * Return any desired output. */ if (needoutput || (tp->t_flags & TF_ACKNOW)) { (void) tcp_output(tp); } return; dropafterack: /* * Generate an ACK dropping incoming segment if it occupies * sequence space, where the ACK reflects our state. */ if (tiflags & TH_RST) goto drop; m_freem(m); tp->t_flags |= TF_ACKNOW; (void) tcp_output(tp); return; dropwithreset: /* reuses m if m!=NULL, m_free() unnecessary */ if (tiflags & TH_ACK) tcp_respond(tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST); else { if (tiflags & TH_SYN) ti->ti_len++; tcp_respond(tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0, TH_RST|TH_ACK); } return; drop: /* * Drop space held by incoming segment and return. */ m_free(m); return; }
/* m->m_data points at ip packet header * m->m_len length ip packet * ip->ip_len length data (IPDU) */ void udp_input(register struct mbuf *m, int iphlen) { Slirp *slirp = m->slirp; register struct ip *ip; register struct udphdr *uh; int len; struct ip save_ip; struct socket *so; DEBUG_CALL("udp_input"); DEBUG_ARG("m = %lx", (long)m); DEBUG_ARG("iphlen = %d", iphlen); /* * Strip IP options, if any; should skip this, * make available to user, and use on returned packets, * but we don't yet have a way to check the checksum * with options still present. */ if(iphlen > sizeof(struct ip)) { ip_stripoptions(m, (struct mbuf *)0); iphlen = sizeof(struct ip); } /* * Get IP and UDP header together in first mbuf. */ ip = mtod(m, struct ip *); uh = (struct udphdr *)((caddr_t)ip + iphlen); /* * Make mbuf data length reflect UDP length. * If not enough data to reflect UDP length, drop. */ len = ntohs((uint16_t)uh->uh_ulen); if (ip->ip_len != len) { if (len > ip->ip_len) { goto bad; } m_adj(m, len - ip->ip_len); ip->ip_len = len; } /* * Save a copy of the IP header in case we want restore it * for sending an ICMP error message in response. */ save_ip = *ip; save_ip.ip_len+= iphlen; /* tcp_input subtracts this */ /* * Checksum extended UDP header and data. */ if (uh->uh_sum) { memset(&((struct ipovly *)ip)->ih_mbuf, 0, sizeof(struct mbuf_ptr)); ((struct ipovly *)ip)->ih_x1 = 0; ((struct ipovly *)ip)->ih_len = uh->uh_ulen; if(cksum(m, len + sizeof(struct ip))) { goto bad; } } /* * handle DHCP/BOOTP */ if (ntohs(uh->uh_dport) == BOOTP_SERVER && (ip->ip_dst.s_addr == slirp->vhost_addr.s_addr || ip->ip_dst.s_addr == 0xffffffff)) { bootp_input(m); goto bad; } /* * handle TFTP */ if (ntohs(uh->uh_dport) == TFTP_SERVER && ip->ip_dst.s_addr == slirp->vhost_addr.s_addr) { tftp_input(m); goto bad; } if (slirp->restricted) { goto bad; } /* * Locate pcb for datagram. */ so = slirp->udp_last_so; if (so->so_lport != uh->uh_sport || so->so_laddr.s_addr != ip->ip_src.s_addr) { struct socket *tmp; for (tmp = slirp->udb.so_next; tmp != &slirp->udb; tmp = tmp->so_next) { if (tmp->so_lport == uh->uh_sport && tmp->so_laddr.s_addr == ip->ip_src.s_addr) { so = tmp; break; } } if (tmp == &slirp->udb) { so = NULL; } else { slirp->udp_last_so = so; } } if (so == NULL) { /* * If there's no socket for this packet, * create one */ so = socreate(slirp); if (!so) { goto bad; } if(udp_attach(so) == -1) { DEBUG_MISC((dfd," udp_attach errno = %d-%s\n", errno,strerror(errno))); sofree(so); goto bad; } /* * Setup fields */ so->so_laddr = ip->ip_src; so->so_lport = uh->uh_sport; if ((so->so_iptos = udp_tos(so)) == 0) so->so_iptos = ip->ip_tos; /* * XXXXX Here, check if it's in udpexec_list, * and if it is, do the fork_exec() etc. */ } so->so_faddr = ip->ip_dst; /* XXX */ so->so_fport = uh->uh_dport; /* XXX */ iphlen += sizeof(struct udphdr); m->m_len -= iphlen; m->m_data += iphlen; /* * Now we sendto() the packet. */ if(sosendto(so,m) == -1) { m->m_len += iphlen; m->m_data -= iphlen; *ip=save_ip; DEBUG_MISC((dfd,"udp tx errno = %d-%s\n",errno,strerror(errno))); icmp_error(m, ICMP_UNREACH,ICMP_UNREACH_NET, 0,strerror(errno)); } m_free(so->so_m); /* used for ICMP if error on sorecvfrom */ /* restore the orig mbuf packet */ m->m_len += iphlen; m->m_data -= iphlen; *ip=save_ip; so->so_m=m; /* ICMP backup */ return; bad: m_free(m); }