int main() { int time = 0; initialize(); write_frame(time); for(time=1; time<nsteps; time++) { update(); if (time%wstep==0) write_frame(time); } free(u); free(u_new); return 0; }
int main(int argc, char * argv[]) { int i,j; #ifdef _CIVL // elaborating nx, ny, NPROCSX and NPROCSY... //$elaborate(NPROCSY); //$elaborate(nx); //$elaborate(ny); //$elaborate(NPROCSX); #endif MPI_Init(&argc, &argv); MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &nprocs); initialization(argc, argv); initData(); for (i=0; i<nsteps; i++) { if (nxl != 0 && nyl != 0) { if (i%wstep == 0) write_frame(i); exchange(); update(); } } for (i=0; i<nyl+2; i++) { free(u_curr[i]); free(u_next[i]); } free(u_curr); free(u_next); if (rank == 0) free(recvbuf); return 0; }
int lpd8806_init(lpd8806_buffer *buf, int leds) { buf->leds = leds; buf->size = (leds+3)*sizeof(lpd8806_color); buf->buffer = (lpd8806_color*)malloc(buf->size); if(buf->buffer==NULL) { return -1; } buf->pixels = buf->buffer+3; write_frame(buf->buffer,0x00,0x00,0x00); write_frame(buf->buffer+1,0x00,0x00,0x00); write_frame(buf->buffer+2,0x00,0x00,0x00); return 0; }
/* handle_screenshots appends the screenshot to the video buffer */ int handle_screenshot(AVFormatContext *oc, AVStream *st, struct SwsContext *s_ctx, int frame_count, long time_interval, char* filepath, TouchActualizer *ta, int fps, long base_time) { FFMPEG_tmp *tmp; AVFrame *in_frame; tmp = picture_to_frame(filepath); in_frame = tmp->frame; #ifdef DEBUG_FRAME printf("Begin writing picture\nCurrent frame: %d\n", frame_count); #endif frame_count = write_frame(oc, st, s_ctx, in_frame, ta, fps, frame_count, time_interval, base_time); tmp_free(tmp); #ifdef DEBUG_FRAME printf("End writing picture\nCurrent frame: %d\n", frame_count); #endif #ifdef DEBUG_FRAME printf("Read file %s and wrote interval %ld\n", filepath, time_interval); printf("Written %d frames\n", interval_to_frames(time_interval, 25)); #endif return frame_count; }
void integrate_psd() { /* Copy lines until Mot: line */ while (!gzeof(infile)) { next_line(line,MAX_LINE); if (output == ICP) fputs(line,outfile); if (strncmp(line," Mot:",5) == 0) break; } /* Copy column header line */ next_line(line,MAX_LINE); if (output == ICP) fputs(line,outfile); /* Process data */ next_line(line,MAX_LINE); if (!gzeof(infile)) { points++; if (output == ICP) fputs(line,outfile); while (!gzeof(infile)) { /* process really ugly 2-D repr */ next_frame(); accumulate_bins(); write_frame(); if (line[0] != '\0') { points++; if (output == ICP) fputs(line,outfile); } } } }
/* * encode one video frame and send it to the muxer * return 1 when encoding is finished, 0 otherwise */ static int write_video_frame(AVFormatContext *oc, OutputStream *ost) { int ret; AVCodecContext *c; AVFrame *frame; int got_packet = 0; AVPacket pkt = { 0 }; c = ost->enc; frame = get_video_frame(ost); av_init_packet(&pkt); /* encode the image */ ret = avcodec_encode_video2(c, &pkt, frame, &got_packet); if (ret < 0) { fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret)); exit(1); } if (got_packet) { ret = write_frame(oc, &c->time_base, ost->st, &pkt); } else { ret = 0; } if (ret < 0) { fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret)); exit(1); } return (frame || got_packet) ? 0 : 1; }
/** * Service an Tx FIFO Empty interrupt * * @brief Interrupt based transfer on SPI. * @param [in] spi Which SPI to transfer to. */ static __inline__ void handle_tx_interrupt(const qm_spi_t spi) { qm_spi_reg_t *const controller = QM_SPI[spi]; const qm_spi_async_transfer_t *const transfer = spi_async_transfer[spi]; /* Jump to the right position of TX buffer. * If no bytes were transmitted before, we start from the beginning, * otherwise we jump to the next frame to be sent. */ const uint8_t *tx_buffer = transfer->tx + (tx_counter[spi] * dfs[spi]); int frames = SPI_FIFOS_DEPTH - controller->txflr - controller->rxflr - 1; while (frames > 0) { write_frame(spi, tx_buffer); tx_buffer += dfs[spi]; tx_counter[spi]++; frames--; if (transfer->tx_len == tx_counter[spi]) { controller->txftlr = 0; break; } } }
/* * encode one audio frame and send it to the muxer * return 1 when encoding is finished, 0 otherwise */ static int write_audio_frame(AVFormatContext *oc, OutputStream *ost) { AVCodecContext *c; AVPacket pkt = { 0 }; // data and size must be 0; AVFrame *frame; int ret; int got_packet; int dst_nb_samples; av_init_packet(&pkt); c = ost->st->codec; frame = get_audio_frame(ost); if (frame) { /* convert samples from native format to destination codec format, using the resampler */ /* compute destination number of samples */ dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples, c->sample_rate, c->sample_rate, AV_ROUND_UP); av_assert0(dst_nb_samples == frame->nb_samples); /* when we pass a frame to the encoder, it may keep a reference to it * internally; * make sure we do not overwrite it here */ ret = av_frame_make_writable(ost->frame); if (ret < 0) exit(1); /* convert to destination format */ ret = swr_convert(ost->swr_ctx, ost->frame->data, dst_nb_samples, (const uint8_t **)frame->data, frame->nb_samples); if (ret < 0) { fprintf(stderr, "Error while converting\n"); exit(1); } frame = ost->frame; frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base); ost->samples_count += dst_nb_samples; } ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet); if (ret < 0) { fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret)); exit(1); } if (got_packet) { ret = write_frame(oc, &c->time_base, ost->st, &pkt); if (ret < 0) { fprintf(stderr, "Error while writing audio frame: %s\n", av_err2str(ret)); exit(1); } } return (frame || got_packet) ? 0 : 1; }
int readflic(FILE *f1,int flag) { int a; long len,l,ll; len=dos_getw(f1); len+=(long)dos_getw(f1)<<16; dos_getw(f1); pics=dos_getw(f1); wid=dos_getw(f1); hig=dos_getw(f1); fseek(f1,0x80L,SEEK_SET); /*printf("\n\n\n\n\n\n\n\n\n\n\n");*/ for(;;) { l=ftell(f1); l+=(long)dos_getw(f1); l+=(long)dos_getw(f1)<<16; a=dos_getw(f1); if(feof(f1)) break; if(a==0xf1fa) { int a; blocksleft=dos_getw(f1); for(a=0;a<8;a++) getc(f1); ll=l; } else { doblock(f1,(unsigned)a); fseek(f1,l,SEEK_SET); blocksleft--; if(blocksleft==0) { fseek(f1,ll,SEEK_SET); { int x,y; unsigned u; for(y=0;y<hig;y++) { u=y*320; for(x=0;x<wid;x++) cnt[vram[u++]]++; } } if(savefcp) { static int framecnt=0; int x,y; { // outportb(0x3c8,7); // outportb(0x3c9,47); // outportb(0x3c9,47); // outportb(0x3c9,47); write_frame(vram,1,64000,out); } } //if(flag&1) getch(); } } } }
// Report that a child has exited void child_cb(EV_P_ ev_child *w, int revents) { char str[100]; char format[] = "{\"processTerminated\" : " " {\"pid\" : %d, \"returnCode\" : %d, \"bySignal\" : %d, \"endTime\" : %ld }}"; struct timeval tp; gettimeofday(&tp, NULL); long int time = (long int) tp.tv_sec * 1000 + tp.tv_usec; sprintf(str, format, w->rpid, WEXITSTATUS(w->rstatus), WSTOPSIG(w->rstatus), time); socket_t *s = w->data; ev_child_stop (EV_A_ w); frame_t *frame = malloc(sizeof(frame_t)); frame->fin = 1; frame->opcode = WS_OP_BIN; frame->len = strlen(str); frame->payload = str; int fr_len; char *fr_str = write_frame(frame, &fr_len); socket_write(s, fr_str, fr_len); free(frame); free(fr_str); ilist_remove(processes, w->rpid); }
void set_tag(const char* file_name, ID3v2_tag* tag) { if(tag == NULL) { return; } int padding = 2048; int old_size = tag->tag_header->tag_size; // Set the new tag header tag->tag_header = new_header(); memcpy(tag->tag_header->tag, "ID3", 3); tag->tag_header->major_version = '\x03'; tag->tag_header->minor_version = '\x00'; tag->tag_header->flags = '\x00'; tag->tag_header->tag_size = get_tag_size(tag) + padding; // Create temp file and prepare to write FILE* file; FILE* temp_file; file = fopen(file_name, "r+b"); temp_file = tmpfile(); // Write to file write_header(tag->tag_header, temp_file); ID3v2_frame_list* frame_list = tag->frames->start; while(frame_list != NULL) { write_frame(frame_list->frame, temp_file); frame_list = frame_list->next; } // Write padding int i; for(i = 0; i < padding; i++) { putc('\x00', temp_file); } fseek(file, old_size + 10, SEEK_SET); int c; while((c = getc(file)) != EOF) { putc(c, temp_file); } // Write temp file data back to original file fseek(temp_file, 0, SEEK_SET); fseek(file, 0, SEEK_SET); while((c = getc(temp_file)) != EOF) { putc(c, file); } fclose(file); fclose(temp_file); }
static gint write_frame_cb (mowgli_dictionary_elem_t * elem, void * user) { WriteState * state = user; gint size; if (! write_frame (state->file, elem->data, & size)) return -1; state->written_size += size; return 0; }
/* Executes the simulation. */ int main(int argc, char *argv[]) { MPI_Init(&argc, &argv); initialize(); write_frame(); printf("nx = %d", nx); for (time=1; time < nsteps; time++) { exchange_ghost_cells(); update(); if (time%wstep==0) write_frame(); } MPI_Finalize(); free(u); free(u_new); if (rank == 0) free(buf); return 0; }
void write_gamma_color(lpd8806_color *p, uint8_t red, uint8_t green, uint8_t blue) { uint8_t flag; uint8_t gamma_corrected_red = gamma_table_red[red]; uint8_t gamma_corrected_green = gamma_table_green[green]; uint8_t gamma_corrected_blue = gamma_table_blue[blue]; // flag = make_flag(gamma_corrected_red,gamma_corrected_green,gamma_corrected_blue); write_frame(p,gamma_corrected_red,gamma_corrected_green,gamma_corrected_blue); }
int qm_spi_irq_transfer(const qm_spi_t spi, const qm_spi_async_transfer_t *const xfer) { QM_CHECK(spi < QM_SPI_NUM, -EINVAL); QM_CHECK(xfer, -EINVAL); QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX_RX ? (xfer->tx_len == xfer->rx_len) : 1, -EINVAL); QM_CHECK(tmode[spi] == QM_SPI_TMOD_TX ? (xfer->rx_len == 0) : 1, -EINVAL); QM_CHECK(tmode[spi] == QM_SPI_TMOD_RX ? (xfer->tx_len == 0) : 1, -EINVAL); QM_CHECK(tmode[spi] == QM_SPI_TMOD_EEPROM_READ ? (xfer->tx_len && xfer->rx_len) : 1, -EINVAL); qm_spi_reg_t *const controller = QM_SPI[spi]; /* If we are in RX only or EEPROM Read mode, the ctrlr1 reg holds how * many bytes the controller solicits, minus 1. We also set the same * into rxftlr, so the controller only triggers a RX_FIFO_FULL * interrupt when all frames are available at the FIFO for consumption. */ if (xfer->rx_len) { controller->ctrlr1 = xfer->rx_len - 1; controller->rxftlr = (xfer->rx_len < SPI_FIFOS_DEPTH) ? xfer->rx_len - 1 : SPI_DEFAULT_RX_THRESHOLD; } controller->txftlr = SPI_DEFAULT_TX_THRESHOLD; spi_async_transfer[spi] = xfer; tx_counter[spi] = 0; rx_counter[spi] = 0; /* Unmask interrupts */ if (tmode[spi] == QM_SPI_TMOD_TX) { controller->imr = QM_SPI_IMR_TXEIM | QM_SPI_IMR_TXOIM; } else if (tmode[spi] == QM_SPI_TMOD_RX) { controller->imr = QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM | QM_SPI_IMR_RXFIM; controller->ssienr = QM_SPI_SSIENR_SSIENR; write_frame(spi, (uint8_t *)&tx_dummy_frame); } else { controller->imr = QM_SPI_IMR_TXEIM | QM_SPI_IMR_TXOIM | QM_SPI_IMR_RXUIM | QM_SPI_IMR_RXOIM | QM_SPI_IMR_RXFIM; } controller->ssienr = QM_SPI_SSIENR_SSIENR; /** Enable SPI Device */ return 0; }
static int activate(AVFilterContext *ctx) { FPSContext *s = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; AVFilterLink *outlink = ctx->outputs[0]; int ret; int again = 0; int64_t status_pts; FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); /* No buffered status: normal operation */ if (!s->status) { /* Read available input frames if we have room */ while (s->frames_count < 2 && ff_inlink_check_available_frame(inlink)) { ret = read_frame(ctx, s, inlink, outlink); if (ret < 0) return ret; } /* We do not yet have enough frames to produce output */ if (s->frames_count < 2) { /* Check if we've hit EOF (or otherwise that an error status is set) */ ret = ff_inlink_acknowledge_status(inlink, &s->status, &status_pts); if (ret > 0) update_eof_pts(ctx, s, inlink, outlink, status_pts); if (!ret) { /* If someone wants us to output, we'd better ask for more input */ FF_FILTER_FORWARD_WANTED(outlink, inlink); return 0; } } } /* Buffered frames are available, so generate an output frame */ if (s->frames_count > 0) { ret = write_frame(ctx, s, outlink, &again); /* Couldn't generate a frame, so schedule us to perform another step */ if (again) ff_filter_set_ready(ctx, 100); return ret; } /* No frames left, so forward the status */ if (s->status && s->frames_count == 0) { ff_outlink_set_status(outlink, s->status, s->next_pts); return 0; } return FFERROR_NOT_READY; }
static void write_frame_list (void * key, void * list, void * user) { WriteState * state = user; for (GList * node = list; node; node = node->next) { int size; if (write_frame (state->file, node->data, & size)) state->written_size += size; } }
/* * Purpose: * Input: * Return: */ void start_retry_write_thread( Channel *channel ) { unsigned char msc_cmd[4]; LOGMUX(LOG_DEBUG, "Enter"); /* Send the MSC cmd with FC Off to the modem side */ memcpy(msc_cmd, msc_channel_cmd, 4); msc_cmd[2] = (msc_cmd[2] | (channel->id << 2)); msc_cmd[3] = (msc_cmd[3] | 0x02); LOGMUX(LOG_INFO, "Not all requested data are written into serial buffer at this time"); #ifndef __MUX_UT__ write_frame(0, msc_cmd, 4, GSM0710_TYPE_UIH | GSM0710_PF); #else if (channel->id <= MAX_NON_GEMINI_NON_DATA_CHNL_NUM) write_frame(0, msc_cmd, 4, GSM0710_TYPE_UIH | GSM0710_PF); else /* frame->channel > 5 are used as the UT Test Channel */ set_mux_ut_rx_chnl_fc_flag(channel->id, FC_OFF_SENDING); #endif /* After receving the MSC FC OFF Rsp from the modem, AP side should keep to store the data already sent into CCCI Driver */ LOGMUX(LOG_INFO, "Set FC_OFF_SENDING and rx_fc_off as 1"); pthread_mutex_lock(&channel->rx_fc_lock); if (channel->rx_thread) pthread_cond_wait(&channel->rx_fc_on_signal, &channel->rx_fc_lock); channel->rx_fc_off = 1; pthread_mutex_unlock(&channel->rx_fc_lock); // Start retry thread if (create_thread(&channel->push_thread_id, retry_write_pty_thread, (void *)channel) != 0) { LOGMUX(LOG_ERR, "Could not create thread retry pty write thread for channel=%d", channel->id); return; } }
bool QVideoEncoder::encodeImage(const QImage &image, int frameIndex, QString* errorString/*=0*/) { if (!isOpen()) { if (errorString) *errorString = "Stream is not opened"; return false; } // SWS conversion if (!convertImage_sws(image, errorString)) { return false; } AVPacket pkt = { 0 }; av_init_packet(&pkt); // encode the image int got_packet = 0; { //compute correct timestamp based on the input frame index //int timestamp = ((m_ff->codecContext->time_base.num * 90000) / m_ff->codecContext->time_base.den) * frameIndex; m_ff->frame->pts = frameIndex/*timestamp*/; int ret = avcodec_encode_video2(m_ff->codecContext, &pkt, m_ff->frame, &got_packet); if (ret < 0) { char errorStr[AV_ERROR_MAX_STRING_SIZE] = {0}; av_make_error_string(errorStr, AV_ERROR_MAX_STRING_SIZE, ret); if (errorString) *errorString = QString("Error encoding video frame: %1").arg(errorStr); return false; } } if (got_packet) { int ret = write_frame(m_ff, &pkt); if (ret < 0) { char errorStr[AV_ERROR_MAX_STRING_SIZE] = {0}; av_make_error_string(errorStr, AV_ERROR_MAX_STRING_SIZE, ret); if (errorString) *errorString = QString("Error while writing video frame: %1").arg(errorStr); return false; } } av_packet_unref(&pkt); return true; }
/* * encode one video frame and send it to the muxer * return 1 when encoding is finished, 0 otherwise */ static int write_video_frame(AVFormatContext *oc, OutputStream *ost) { int ret; AVCodecContext *c; AVFrame *frame; int got_packet = 0; c = ost->st->codec; frame = get_video_frame(ost); if (oc->oformat->flags & AVFMT_RAWPICTURE) { /* a hack to avoid data copy with some raw video muxers */ AVPacket pkt; av_init_packet(&pkt); if (!frame) return 1; pkt.flags |= AV_PKT_FLAG_KEY; pkt.stream_index = ost->st->index; pkt.data = (uint8_t *)frame; pkt.size = sizeof(AVPicture); pkt.pts = pkt.dts = frame->pts; av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base); ret = av_interleaved_write_frame(oc, &pkt); } else { AVPacket pkt = { 0 }; av_init_packet(&pkt); /* encode the image */ ret = avcodec_encode_video2(c, &pkt, frame, &got_packet); if (ret < 0) { fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret)); exit(1); } if (got_packet) { ret = write_frame(oc, &c->time_base, ost->st, &pkt); } else { ret = 0; } } if (ret < 0) { fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret)); exit(1); } return (frame || got_packet) ? 0 : 1; }
int llwrite(int fd, unsigned char* buffer, int length) { int CompleteFrames = length / (MAX_SIZE); int remainingBytes = length % (MAX_SIZE); int flag = 1; int i; linkLayer.alarm_inf = TRUE; linkLayer.sequenceNumber = 0; linkLayer.alarm_char = DATA; for(i = 0; i < CompleteFrames; i++) write_frame(fd, buffer, 0, &i , FALSE); if(remainingBytes > 0) write_frame(fd, buffer, remainingBytes, &i, TRUE); (void) signal(SIGALRM, Timeout); llclose(linkLayer.fd); return 0; }
static void output_avro_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, Relation rel, ReorderBufferChange *change) { int err = 0; HeapTuple oldtuple = NULL, newtuple = NULL; plugin_state *state = ctx->output_plugin_private; MemoryContext oldctx = MemoryContextSwitchTo(state->memctx); reset_frame(state); switch (change->action) { case REORDER_BUFFER_CHANGE_INSERT: if (!change->data.tp.newtuple) { elog(ERROR, "output_avro_change: insert action without a tuple"); } newtuple = &change->data.tp.newtuple->tuple; err = update_frame_with_insert(&state->frame_value, state->schema_cache, rel, RelationGetDescr(rel), newtuple); break; case REORDER_BUFFER_CHANGE_UPDATE: if (!change->data.tp.newtuple) { elog(ERROR, "output_avro_change: update action without a tuple"); } if (change->data.tp.oldtuple) { oldtuple = &change->data.tp.oldtuple->tuple; } newtuple = &change->data.tp.newtuple->tuple; err = update_frame_with_update(&state->frame_value, state->schema_cache, rel, oldtuple, newtuple); break; case REORDER_BUFFER_CHANGE_DELETE: if (change->data.tp.oldtuple) { oldtuple = &change->data.tp.oldtuple->tuple; } err = update_frame_with_delete(&state->frame_value, state->schema_cache, rel, oldtuple); break; default: elog(ERROR, "output_avro_change: unknown change action %d", change->action); } if (err) { elog(ERROR, "output_avro_change: row conversion failed: %s", avro_strerror()); } if (write_frame(ctx, state)) { elog(ERROR, "output_avro_change: writing Avro binary failed: %s", avro_strerror()); } MemoryContextSwitchTo(oldctx); MemoryContextReset(state->memctx); }
/* main: executes simulation, creates one output file for each time * step */ int main(int argc,char *argv[]) { int iter; MPI_Init(&argc, &argv); assert(argc==2); init(argv[1]); write_frame(0); for (iter = 1; iter <= nsteps; iter++) { exchange_ghost_cells(); update(); if (iter%wstep==0) write_frame(iter); } MPI_Finalize(); free(u); free(u_new); if (rank == 0) { gdImageDestroy(previm); gdImageGifAnimEnd(file); fclose(file); free(buf); free(colors); } return 0; }
static void output_avro_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) { plugin_state *state = ctx->output_plugin_private; MemoryContext oldctx = MemoryContextSwitchTo(state->memctx); reset_frame(state); if (update_frame_with_begin_txn(&state->frame_value, txn)) { elog(ERROR, "output_avro_begin_txn: Avro conversion failed: %s", avro_strerror()); } if (write_frame(ctx, state)) { elog(ERROR, "output_avro_begin_txn: writing Avro binary failed: %s", avro_strerror()); } MemoryContextSwitchTo(oldctx); MemoryContextReset(state->memctx); }
bool QVideoEncoder::close() { if (!m_isOpen) { return false; } // delayed frames? while (true) { AVPacket pkt = { 0 }; av_init_packet(&pkt); int got_packet = 0; int ret = avcodec_encode_video2(m_ff->codecContext, &pkt, 0, &got_packet); if (ret < 0 || !got_packet) { break; } write_frame(m_ff, &pkt); av_packet_unref(&pkt); } av_write_trailer(m_ff->formatContext); // close the codec avcodec_close(m_ff->videoStream->codec); // free the streams and other data freeFrame(); for(unsigned i = 0; i < m_ff->formatContext->nb_streams; i++) { av_freep(&m_ff->formatContext->streams[i]->codec); av_freep(&m_ff->formatContext->streams[i]); } // close the file avio_close(m_ff->formatContext->pb); // free the stream av_free(m_ff->formatContext); m_isOpen = false; return true; }
int main(){ cairo_surface_t *cs = cairo_image_surface_create (CAIRO_FORMAT_ARGB32, W,H); int period=SAMPLES; int coeffs = period/4; float *square_coeffs; float *square_phases; int square_coeffs_n; int i,j; double phase=OFFSET*2*M_PI - (M_PI*CYCLES/period); float waveform[W]; float *work = fftwf_malloc((period+2)*sizeof(*work)); fftwf_plan plan = fftwf_plan_dft_r2c_1d(period,work, (fftwf_complex *)work, FFTW_ESTIMATE); for(i=0;i<period/2;i++) work[i]=1.; for(;i<period;i++) work[i]=-1.; fftwf_execute(plan); square_coeffs_n = coeffs; square_coeffs = calloc(coeffs,sizeof(*square_coeffs)); square_phases = calloc(coeffs,sizeof(*square_phases)); for(i=1,j=0;j<square_coeffs_n;i+=2,j++){ square_coeffs[j] = hypotf(work[i<<1],work[(i<<1)+1]) / period; square_phases[j] = atan2f(work[(i<<1)+1],work[i<<1]); } for(i=0;i<W;i++){ float acc=0.; int k; for(j=0,k=1;j<square_coeffs_n;j++,k+=2) acc += square_coeffs[j] * cos( square_phases[j] + k*phase); waveform[i]=acc; phase += 2*M_PI*CYCLES/W; if(phase>=2*M_PI)phase-=2*M_PI; } transparent_surface(cs); draw_overlay(cs,waveform); write_frame(cs,0); cairo_surface_destroy(cs); return 0; }
void run() { if (!setup_capture_source()) { std::cout << "Could not initialize capture source" << std::endl; return; } cv::Size output_size = get_output_size(); size_t full_image_dimensions = output_size.width * output_size.height * 3; m_framework = create_message_framework(m_direction, full_image_dimensions); if (m_framework == NULL) { std::cout << "Could not initialize message framework" << std::endl; return; } std::cout << "Set up camera. Starting image acquisition" << std::endl; long last_acq_time = 0; while (running) { std::experimental::optional<std::pair<cv::Mat, long>> next_image_op = acquire_next_image(); if (!next_image_op) { std::cout << "Error acquiring image! Trying again." << std::endl; continue; } cv::Mat next_image = next_image_op.value().first; long acq_time = next_image_op.value().second; write_frame(m_framework, next_image.data, acq_time, output_size.width, output_size.height, 3); long curr_time = get_time(); long time_since_last_acq = curr_time - last_acq_time; long min_acq_time = 1000 / m_max_fps; auto sleep_time = std::chrono::milliseconds(min_acq_time - time_since_last_acq); if (sleep_time > std::chrono::milliseconds(0)) { std::this_thread::sleep_for(sleep_time); } last_acq_time = acq_time; } destroy_capture_source(); std::cout << "Cleaned up capture source" << std::endl; cleanup_message_framework(m_framework); std::cout << "Cleaned up message framework" << std::endl; }
int main(int argc,char *argv[]) { int iter; MPI_Init(&argc, &argv); initdata(); // MPI_Barrier(MPI_COMM_WORLD); write_frame(0); MPI_Barrier(MPI_COMM_WORLD); for (iter = 1; iter <= nsteps; iter++) { exchange_ghost_cells(); //MPI_Barrier(MPI_COMM_WORLD); update(); //MPI_Barrier(MPI_COMM_WORLD); // write_frame(iter); //MPI_Barrier(MPI_COMM_WORLD); } MPI_Finalize(); }
RefreshLiveviewEvent:: RefreshLiveviewEvent(double refresh_interval, int min_delay, Visualization &vis) :vis_(&vis), refresh_interval_(refresh_interval) { #ifdef HAVE_BOOST boost::xtime_get(&last_refresh_, boost::TIME_UTC); last_refresh_.sec -= 1; boost::mutex::scoped_lock lock(*getUpdateMutex()); #endif min_delay_time_ = min_delay; // Convert to Nanoseconds as needed by boost::thread: min_delay_time_ *= 1000000; // Writes current network state to texture. write_frame(); // Informs the external window that texture data has changed. updateTexture(getTexture()); }
static int encode_audio(struct ffmpeg_t *self, AVFrame *audio_frame) { AVCodecContext *codec = audio_codec_ctx(self); // Initialise data packet AVPacket pkt = { 0 }; av_init_packet(&pkt); // Encode the audio frame int got_packet; int err = avcodec_encode_audio2(codec, &pkt, audio_frame, &got_packet); if (err < 0) scm_misc_error("encode-audio", "Error encoding audio frame: ~a", scm_list_1(get_error_text(err))); // Write any new audio packets if (got_packet) write_frame(self, &pkt, codec, audio_stream(self), self->audio_stream_idx); return got_packet; }