int gst_video1_encode(struct videnc_state *st, bool update, const struct vidframe *frame) { int err; if (!st || !frame || frame->fmt != VID_FMT_YUV420P) return EINVAL; if (!st->streamer.valid || !vidsz_cmp(&st->encoder.size, &frame->size)) { pipeline_close(st); err = pipeline_init(st, &frame->size); if (err) { warning("gst_video: pipeline initialization failed\n"); return err; } st->encoder.size = frame->size; } if (update) { debug("gst_video: gstreamer picture update" ", it's not implemented...\n"); } /* * Push frame into pipeline. * Function call will return once frame has been processed completely. */ err = pipeline_push(st, frame); return err; }
int gst_video1_encoder_set(struct videnc_state **stp, const struct vidcodec *vc, struct videnc_param *prm, const char *fmtp, videnc_packet_h *pkth, void *arg) { struct videnc_state *st = *stp; int err = 0; if (!stp || !vc || !prm || !pkth) return EINVAL; if (!st) { err = allocate_resources(stp); if (err) { warning("gst_video: resource allocation failed\n"); return err; } st = *stp; st->pkth = pkth; st->arg = arg; } else { if (!st->streamer.valid) { warning("gst_video codec: trying to work" " with invalid pipeline\n"); return EINVAL; } if ((st->encoder.bitrate != prm->bitrate || st->encoder.pktsize != prm->pktsize || st->encoder.fps != prm->fps)) { pipeline_close(st); } } st->encoder.bitrate = prm->bitrate; st->encoder.pktsize = prm->pktsize; st->encoder.fps = prm->fps; if (str_isset(fmtp)) { struct pl sdp_fmtp; pl_set_str(&sdp_fmtp, fmtp); /* store new parameters */ fmt_param_apply(&sdp_fmtp, param_handler, st); } info("gst_video: video encoder %s: %d fps, %d bit/s, pktsize=%u\n", vc->name, st->encoder.fps, st->encoder.bitrate, st->encoder.pktsize); return err; }
static void destruct_resources(void *data) { struct videnc_state *st = data; /* close pipeline */ pipeline_close(st); /* destroy locks */ pthread_mutex_destroy(&st->streamer.eos.mutex); pthread_cond_destroy(&st->streamer.eos.cond); pthread_mutex_destroy(&st->streamer.wait.mutex); pthread_cond_destroy(&st->streamer.wait.cond); }
static int socket_end_driver_pipeline_receive(struct socket_end_st *se) { int i, next, ret; ssize_t len; struct pipeline_end_st *pe; for (i=se->pipeline_1; i!=-1; i=next) { pe = se->pipeline_end[i]; next = pe->next_id; if (pe->connect_pending) { continue; } if (pe->iov_recv_pending) { ret = streambuf_write_nb(se->send_buffer, pe->iov_recv_pending); if (ret == ENOMEM) { mylog(L_INFO, "Write pe iov recv pending to se send buffer no memory, se[%d] pe[%d]", se->id, i); break; } pe->iov_recv_pending = NULL; } if (pe->closed) { mylog(L_INFO, "Pipeline is closed delete pipeline, se[%d] pe[%d]", se->id, i); socket_end_delete_pipeline(se, i); continue; } len = pipeline_upstream_recv(pe); if (len==-EPIPE) { if (likely(pipeline_close(pe) != ENOMEM)) { mylog(L_DEBUG, "Upstream closed send pe close to client, client %s, se[%d] pe[%d]", se->client_str, se->id, i); socket_end_delete_pipeline(se, i); } continue; } if (len==-EINVAL) { pe->error_code = PIPELINE_FAILURE_RECV_FAILED; if (likely(pipeline_failure(pe) != ENOMEM)) { mylog(L_ERR, "Receive from upstream failed, send pe failure to client, client %s, se[%d] pe[%d]", se->client_str, se->id, i); socket_end_delete_pipeline(se, i); } continue; } /* return EAGAIN: receive some data, there is still more data, update recv timeout */ if (len == EAGAIN) { se->pipeline_end[i]->recv_timeout_abs_ms = now + se->pipeline_end[i]->recv_timeout; mylog(L_DEBUG, "Receive from pe %d nonblock recv timeout is %lu, se[%d] pe[%d] ", i, se->pipeline_end[i]->recv_timeout_abs_ms, se->id, i); } /* first nonblock recv */ if (len == -EAGAIN && se->pipeline_end[i]->recv_timeout_abs_ms == 0) { se->pipeline_end[i]->recv_timeout_abs_ms = now + se->pipeline_end[i]->recv_timeout; mylog(L_DEBUG, "Receive from pe %d nonblock recv timeout is %lu, se[%d] pe[%d]", i, se->pipeline_end[i]->recv_timeout_abs_ms, se->id, i); } if (len == -1) { mylog(L_INFO, "Pipeline receive enqueue se send buffer no memory, se[%d] pe[%d]", se->id, i); break; } /* return -EAGAIN do not update timeout */ } return 0; }