void iq_test(void) { int i ; for(i=1;i<10000000;i++) { iq_t iq = iq_create(name) ; if ((i % 1) == 0) { eprintf("IQ:test(%d)\n", i) ; } while (1) { switch(util_random(2)) { case 0: { marsh_t marsh = marsh_create(NULL) ; marsh_buf_t buf = marsh_to_buf(name, marsh) ; marsh_free(marsh) ; if (!iq_assign(iq, util_random(IQ_CHECK_SIZE), buf, iov)) { iovec_free(iov) ; marsh_buf_free(buf) ; } } break ; case 1: { marsh_buf_t buf ; seqno_t seqno ; iovec_t iov ; if (!iq_get_prefix(iq, &seqno, &buf, &iov)) { break ; } //eprintf("seqno=%d\n", seqno) ; iovec_free(iov) ; marsh_buf_free(buf) ; if (seqno >= IQ_CHECK_SIZE - 1) { goto out ; } } break ; case 2: { marsh_t marsh = marsh_create(NULL) ; marsh_buf_t buf = marsh_to_buf(name, marsh) ; iovec_t iov = iovec_empty(name) ; marsh_free(marsh) ; iq_add(iq, buf, iov) ; } break ; case 3: { marsh_t marsh = marsh_create(NULL) ; marsh_buf_t buf = marsh_to_buf(name, marsh) ; iovec_t iov = iovec_empty(name) ; marsh_free(marsh) ; if (!iq_opt_insert_check_doread(iq, util_random(IQ_CHECK_SIZE), buf, iov)) { iovec_free(iov) ; marsh_buf_free(buf) ; } } break ; } } out: iq_free(iq) ; } }
ssize_t http_sendiovecdata( const int64 sock, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector ) { struct http_data *cookie = io_getcookie( sock ); char *header; int i; size_t header_size, size = iovec_length( &iovec_entries, &iovector ); tai6464 t; /* No cookie? Bad socket. Leave. */ if( !cookie ) { iovec_free( &iovec_entries, &iovector ); HTTPERROR_500; } /* If this socket collected request in a buffer, free it now */ array_reset( &cookie->request ); /* If we came here, wait for the answer is over */ cookie->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK; /* Our answers never are 0 vectors. Return an error. */ if( !iovec_entries ) { HTTPERROR_500; } /* Prepare space for http header */ header = malloc( SUCCESS_HTTP_HEADER_LENGTH + SUCCESS_HTTP_HEADER_LENGTH_CONTENT_ENCODING ); if( !header ) { iovec_free( &iovec_entries, &iovector ); HTTPERROR_500; } if( cookie->flag & STRUCT_HTTP_FLAG_GZIP ) header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: gzip\r\nContent-Length: %zd\r\n\r\n", size ); else if( cookie->flag & STRUCT_HTTP_FLAG_BZIP2 ) header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: bzip2\r\nContent-Length: %zd\r\n\r\n", size ); else header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r\n", size ); iob_reset( &cookie->batch ); iob_addbuf_free( &cookie->batch, header, header_size ); /* Will move to ot_iovec.c */ for( i=0; i<iovec_entries; ++i ) iob_addbuf_munmap( &cookie->batch, iovector[i].iov_base, iovector[i].iov_len ); free( iovector ); /* writeable sockets timeout after 10 minutes */ taia_now( &t ); taia_addsec( &t, &t, OT_CLIENT_TIMEOUT_SEND ); io_timeout( sock, t ); io_dontwantread( sock ); io_wantwrite( sock ); return 0; }
static void stats_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ) { char *r; *iovec_entries = 0; *iovector = NULL; if( !( r = iovec_increase( iovec_entries, iovector, OT_STATS_TMPSIZE ) ) ) return; switch( mode & TASK_TASK_MASK ) { case TASK_STATS_TORRENTS: r += stats_torrents_mrtg( r ); break; case TASK_STATS_PEERS: r += stats_peers_mrtg( r ); break; case TASK_STATS_SLASH24S: r += stats_slash24s_txt( r, 128 ); break; case TASK_STATS_TOP10: r += stats_top_txt( r, 10 ); break; case TASK_STATS_TOP100: r = iovec_fix_increase_or_free( iovec_entries, iovector, r, 4 * OT_STATS_TMPSIZE ); if( !r ) return; r += stats_top_txt( r, 100 ); break; case TASK_STATS_EVERYTHING: r += stats_return_everything( r ); break; #ifdef WANT_SPOT_WOODPECKER case TASK_STATS_WOODPECKERS: r += stats_return_woodpeckers( r, 128 ); break; #endif #ifdef WANT_FULLLOG_NETWORKS case TASK_STATS_FULLLOG: stats_return_fulllog( iovec_entries, iovector, r ); return; #endif default: iovec_free(iovec_entries, iovector); return; } iovec_fixlast( iovec_entries, iovector, r ); }
static void linop_matrix_del(const linop_data_t* _data) { const struct operator_matrix_s* data = CAST_DOWN(operator_matrix_s, _data); iovec_free(data->mat_iovec); iovec_free(data->mat_gram_iovec); iovec_free(data->domain_iovec); iovec_free(data->codomain_iovec); free((void*)data->max_dims); md_free((void*)data->mat); md_free((void*)data->mat_conj); md_free((void*)data->mat_gram); free((void*)data); }
static void identity_free(const linop_data_t* _data) { const struct identity_data_s* data = CAST_DOWN(identity_data_s, _data); iovec_free(data->domain); free((void*)data); }
static void * stats_worker( void * args ) { int iovec_entries; struct iovec *iovector; (void) args; while( 1 ) { ot_tasktype tasktype = TASK_STATS; ot_taskid taskid = mutex_workqueue_poptask( &tasktype ); stats_make( &iovec_entries, &iovector, tasktype ); if( mutex_workqueue_pushresult( taskid, iovec_entries, iovector ) ) iovec_free( &iovec_entries, &iovector ); } return NULL; }
/* * Copyin an iovec. If the iovec array fits, use the preallocated small * iovec structure. If it is too big, dynamically allocate an iovec array * of sufficient size. * * MPSAFE */ int iovec_copyin(const struct iovec *uiov, struct iovec **kiov, struct iovec *siov, int iov_cnt, size_t *iov_len) { struct iovec *iovp; int error, i; size_t len; if ((u_int)iov_cnt > UIO_MAXIOV) return EMSGSIZE; if (iov_cnt > UIO_SMALLIOV) { *kiov = kmalloc(sizeof(struct iovec) * iov_cnt, M_IOV, M_WAITOK); } else { *kiov = siov; } error = copyin(uiov, *kiov, iov_cnt * sizeof(struct iovec)); if (error == 0) { *iov_len = 0; for (i = 0, iovp = *kiov; i < iov_cnt; i++, iovp++) { /* * Check for both *iov_len overflows and out of * range iovp->iov_len's. We limit to the * capabilities of signed integers. * * GCC4 - overflow check opt requires assign/test. */ len = *iov_len + iovp->iov_len; if (len < *iov_len) error = EINVAL; *iov_len = len; } } /* * From userland disallow iovec's which exceed the sized size * limit as the system calls return ssize_t. * * NOTE: Internal kernel interfaces can handle the unsigned * limit. */ if (error == 0 && (ssize_t)*iov_len < 0) error = EINVAL; if (error) iovec_free(kiov, siov); return (error); }
/* This is the entry point into this worker thread It grabs tasks from mutex_tasklist and delivers results back */ static void * fullscrape_worker( void * args ) { int iovec_entries; struct iovec *iovector; (void) args; while( 1 ) { ot_tasktype tasktype = TASK_FULLSCRAPE; ot_taskid taskid = mutex_workqueue_poptask( &tasktype ); fullscrape_make( &iovec_entries, &iovector, tasktype ); if( mutex_workqueue_pushresult( taskid, iovec_entries, iovector ) ) iovec_free( &iovec_entries, &iovector ); if( !g_opentracker_running ) return NULL; } return NULL; }
static void identity_free(const void* data) { iovec_free((const struct iovec_s*)data); }