示例#1
0
static void v4l_process(MSFilter * obj){
	V4lState *s=(V4lState*)obj->data;
	uint32_t timestamp;
	int cur_frame;
	if (s->frame_count==-1){
		s->start_time=obj->ticker->time;
		s->frame_count=0;
	}
	cur_frame=((obj->ticker->time-s->start_time)*s->fps/1000.0);
	if (cur_frame>=s->frame_count){
		mblk_t *om=NULL;
		ms_mutex_lock(&s->mutex);
		/*keep the most recent frame if several frames have been captured */
		if (s->fd!=-1){
			om=getq(&s->rq);
		}else{
			if (s->usemire){
				om=dupmsg(v4l_make_mire(s));
			}else {
				mblk_t *tmpm=v4l_make_nowebcam(s);
				if (tmpm) om=dupmsg(tmpm);
			}
		}
		ms_mutex_unlock(&s->mutex);
		if (om!=NULL){
			timestamp=obj->ticker->time*90;/* rtp uses a 90000 Hz clockrate for video*/
			mblk_set_timestamp_info(om,timestamp);
			mblk_set_marker_info(om,TRUE);
			ms_queue_put(obj->outputs[0],om);
			/*ms_message("picture sent");*/
			s->frame_count++;
		}
	}else flushq(&s->rq,0);
}
示例#2
0
mblk_t* rtp_session_create_rtcp_sdes_packet(RtpSession *session, bool_t full) {
	mblk_t *mp = allocb(sizeof(rtcp_common_header_t), 0);
	rtcp_common_header_t *rtcp;
	mblk_t *tmp;
	mblk_t *m = mp;
	mblk_t *sdes;
	queue_t *q;
	int rc = 0;

	sdes = (full == TRUE) ? session->full_sdes : session->minimal_sdes;
	rtcp = (rtcp_common_header_t *)mp->b_wptr;
	mp->b_wptr += sizeof(rtcp_common_header_t);

	/* Concatenate all sdes chunks. */
	sdes_chunk_set_ssrc(sdes, session->snd.ssrc);
	m = concatb(m, dupmsg(sdes));
	rc++;

	if (full == TRUE) {
		q = &session->contributing_sources;
		for (tmp = qbegin(q); !qend(q, tmp); tmp = qnext(q, mp)) {
			m = concatb(m, dupmsg(tmp));
			rc++;
		}
	}
	rtcp_common_header_init(rtcp, session, RTCP_SDES, rc, msgdsize(mp));

	return mp;
}
示例#3
0
static mblk_t *size_conv_alloc_mblk(SizeConvState *s){
	if (s->om!=NULL){
		int ref=s->om->b_datap->db_ref;
		if (ref==1){
			return dupmsg(s->om);
		}else{
			/*the last msg is still referenced by somebody else*/
			ms_message("size_conv_alloc_mblk: Somebody still retaining yuv buffer (ref=%i)",ref);
			freemsg(s->om);
			s->om=NULL;
		}
	}
	s->om=ms_yuv_buf_alloc(&s->outbuf,s->target_vsize.width,s->target_vsize.height);
	return dupmsg(s->om);
}
示例#4
0
static mblk_t * pixconv_alloc_mblk(PixConvState *s){
	if (s->yuv_msg!=NULL){
		int ref=s->yuv_msg->b_datap->db_ref;
		if (ref==1){
			return dupmsg(s->yuv_msg);
		}else{
			/*the last msg is still referenced by somebody else*/
			ms_message("Somebody still retaining yuv buffer (ref=%i)",ref);
			freemsg(s->yuv_msg);
			s->yuv_msg=NULL;
		}
	}
	s->yuv_msg=yuv_buf_alloc(&s->outbuf,s->size.width,s->size.height);
	return dupmsg(s->yuv_msg);
}
static void detector_process(MSFilter *f) {
    DetectorState *s=(DetectorState *)f->data;
    mblk_t *m;

    while ((m=ms_queue_get(f->inputs[0]))!=NULL) {
        ms_queue_put(f->outputs[0],m);
        if (s->tone_def->frequency!=0) {
            ms_bufferizer_put(s->buf,dupmsg(m));
        }
    }
    if (s->tone_def->frequency!=0) {
        uint8_t *buf=_alloca(s->framesize);

        while(ms_bufferizer_read(s->buf,buf,s->framesize)!=0) {
            float en=compute_energy((int16_t*)buf,s->framesize/2);
            if (en>energy_min) {
                float freq_en=goertzel_state_run(&s->tone_gs,(int16_t*)buf,s->framesize/2,en);
                if (freq_en>=s->tone_def->min_amplitude) {
                    if (s->dur==0) s->starttime=f->ticker->time;
                    s->dur+=s->frame_ms;
                    if (s->dur>s->tone_def->min_duration && !s->event_sent) {
                        MSToneDetectorEvent event;

                        strncpy(event.tone_name,s->tone_def->tone_name,sizeof(event.tone_name));
                        event.tone_start_time=s->starttime;
                        ms_filter_notify(f,MS_TONE_DETECTOR_EVENT,&event);
                        s->event_sent=TRUE;
                    }
                } else end_tone(s);
            } else end_tone(s);
        }
    }
}
示例#6
0
static void *v4l_thread(void *ptr){
	V4lState *s=(V4lState*)ptr;
	int err=-1;
	ms_message("v4l_thread starting");
	if (s->v4lv2){
#ifdef HAVE_LINUX_VIDEODEV2_H
		err=v4lv2_do_mmap(s);
#endif
	}else{
		err=v4l_do_mmap(s);
	}
	if (err<0){
		ms_thread_exit(NULL);
	}
	while(s->run){
		mblk_t *m;
#ifdef HAVE_LINUX_VIDEODEV2_H
		if (s->v4lv2)
			m=v4lv2_grab_image(s);
		else
#endif
			m=v4l_grab_image_mmap(s);
		if (m) {
			ms_mutex_lock(&s->mutex);
			putq(&s->rq,dupmsg(m));
			ms_mutex_unlock(&s->mutex);
		}
	}
	v4l_do_munmap(s);
	ms_message("v4l_thread exited.");
	ms_thread_exit(NULL);
}
示例#7
0
static void write264FlvTag(FLVStream *flv,FLVTag *tag,MSQueue *naluq, MSQueue *naluq_t)
{
    mblk_t *tm;
    int len;
    if(flvFull(flv,tag->dataSize + 15)) {
       // struct timeval tv1,tv2;
       // gettimeofday(&tv2,NULL);
        flvFlush(flv);
       // gettimeofday(&tv1,NULL);
       // ms_message("[INFO] TIME=%d ", tv1.tv_sec*1000 + tv1.tv_usec/1000 - tv2.tv_sec*1000 - tv2.tv_usec/1000);
    }

	FLVVideoTag *dataTag=&(tag->tagData);

	putChar(flv, tag->tagType);
	putUI24(flv, tag->dataSize);
	putUI24(flv, tag->timeStamp);
	putChar(flv, tag->timestampExtended);
	putUI24(flv, tag->streamID);

	putChar(flv, (dataTag->frameType<<4)|dataTag->codecId);
	putChar(flv, dataTag->pktType);
	putUI24(flv, dataTag->compositionTime);
     while((tm=ms_queue_get(naluq))!=NULL){
        len=tm->b_wptr-tm->b_rptr;
	    putUI32(flv, len);
        writeData(flv,tm->b_rptr,len);

        ms_queue_put(naluq_t,dupmsg(tm));
        freemsg(tm);
     }
	putUI32(flv, tag->dataSize + 11);
}
示例#8
0
static mblk_t *get_as_yuvmsg(MSFilter *f, DecData *s, AVFrame *orig){
	AVCodecContext *ctx=&s->av_context;

	if (s->outbuf.w!=ctx->width || s->outbuf.h!=ctx->height){
		if (s->sws_ctx!=NULL){
			sws_freeContext(s->sws_ctx);
			s->sws_ctx=NULL;
			freemsg(s->yuv_msg);
			s->yuv_msg=NULL;
		}
		ms_message("Getting yuv picture of %ix%i",ctx->width,ctx->height);
		s->yuv_msg=ms_yuv_buf_alloc(&s->outbuf,ctx->width,ctx->height);
		s->outbuf.w=ctx->width;
		s->outbuf.h=ctx->height;
		s->sws_ctx=sws_getContext(ctx->width,ctx->height,ctx->pix_fmt,
			ctx->width,ctx->height,PIX_FMT_YUV420P,SWS_FAST_BILINEAR,
                	NULL, NULL, NULL);
		ms_filter_notify_no_arg(f,MS_FILTER_OUTPUT_FMT_CHANGED);
	}
#if LIBSWSCALE_VERSION_INT >= AV_VERSION_INT(0,9,0)
	if (sws_scale(s->sws_ctx,(const uint8_t * const *)orig->data,orig->linesize, 0,
					ctx->height, s->outbuf.planes, s->outbuf.strides)<0){
#else
	if (sws_scale(s->sws_ctx,(uint8_t **)orig->data,orig->linesize, 0,
					ctx->height, s->outbuf.planes, s->outbuf.strides)<0){
#endif
		ms_error("%s: error in sws_scale().",f->desc->name);
	}
	return dupmsg(s->yuv_msg);
}

static void update_sps(DecData *d, mblk_t *sps){
	if (d->sps)
		freemsg(d->sps);
	d->sps=dupb(sps);
}

static void update_pps(DecData *d, mblk_t *pps){
	if (d->pps)
		freemsg(d->pps);
	if (pps) d->pps=dupb(pps);
	else d->pps=NULL;
}


static bool_t check_sps_change(DecData *d, mblk_t *sps){
	bool_t ret=FALSE;
	if (d->sps){
		ret=(msgdsize(sps)!=msgdsize(d->sps)) || (memcmp(d->sps->b_rptr,sps->b_rptr,msgdsize(sps))!=0);
		if (ret) {
			ms_message("SPS changed ! %i,%i",(int)msgdsize(sps),(int)msgdsize(d->sps));
			update_sps(d,sps);
			update_pps(d,NULL);
		}
	} else {
		ms_message("Receiving first SPS");
		update_sps(d,sps);
	}
	return ret;
}
示例#9
0
static void *msv4l2_thread(void *ptr){
	V4l2State *s=(V4l2State*)ptr;
	int try=0;
	
	ms_message("msv4l2_thread starting");
	if (s->fd==-1){
		if( msv4l2_open(s)!=0){
			ms_warning("msv4l2 could not be openned");
			goto close;
		}
	}
	
	if (!s->configured && msv4l2_configure(s)!=0){
		ms_warning("msv4l2 could not be configured");		
		goto close;
	}
	if (msv4l2_do_mmap(s)!=0)
	{
		ms_warning("msv4l2 do mmap");
		goto close;
	}
	ms_message("V4L2 video capture started.");
	while(s->thread_run)
	{
		if (s->fd!=-1){
			mblk_t *m;
			m=v4lv2_grab_image(s,50);
			if (m){
				mblk_t *om=dupmsg(m);
				mblk_set_marker_info(om,(s->pix_fmt==MS_MJPEG));
				ms_mutex_lock(&s->mutex);
				putq(&s->rq,om);
				ms_mutex_unlock(&s->mutex);
			}
		}
	}
	/*dequeue pending buffers so that we can properly unref them (avoids memleak )*/
	while(s->queued && try<10){
		v4l2_dequeue_ready_buffer(s,50);
		try++;
	}
	if (try==10) ms_warning("msv4l2: buffers not dequeued at exit !");
	msv4l2_do_munmap(s);
close:
	msv4l2_close(s);
	ms_message("msv4l2_thread exited.");
	ms_thread_exit(NULL);
	return NULL;
}

static void msv4l2_preprocess(MSFilter *f){
	V4l2State *s=(V4l2State*)f->data;
	s->thread_run=TRUE;
	ms_thread_create(&s->thread,NULL,msv4l2_thread,s);
	s->th_frame_count=-1;
	s->mean_inter_frame=0;
}
示例#10
0
static void notify_sent_rtcp(RtpSession *session, mblk_t *rtcp){
	if (session->eventqs!=NULL){
		OrtpEvent *ev;
		OrtpEventData *evd;
		ev=ortp_event_new(ORTP_EVENT_RTCP_PACKET_EMITTED);
		evd=ortp_event_get_data(ev);
		evd->packet=dupmsg(rtcp);
		rtp_session_dispatch_event(session,ev);
	}
}
示例#11
0
void SinkBase::cb_data(mblk_t *m)
{
	uint32_t st = mblk_get_timestamp_info(m);

	mblk_t *fm = dupmsg(m);
	mblk_meta_copy(m, fm);
	MSQueue *queue = post_handle(fm);	// 此处 dupmsg(m) 将不会导致 m 被释放
	bool first = true;

	unsigned char *obuf = 0;
	size_t olen = 0;
	int index = 0;
	size_t off = 0;
	bool key = false;

	while (mblk_t *om = ms_queue_get(queue)) {

		int dlen = size_for(index, om);
		obuf = (unsigned char*)realloc(obuf, off + dlen);
		save_for(index, om, obuf + off);

		if (index == 0)
			key = is_key(index, om);

		index++;
		off += dlen;

		// 处理时间戳回绕
		if (first_frame_) {
			first_frame_ = false;
		}
		else {
			uint32_t delta = st - last_stamp_;

			// 检查,是否乱序,乱序包直接扔掉!
			if (delta > 0x80000000) {
				fprintf(stderr, "??? maybe timestamp confusioned!, curr=%u, last=%u\n", st, last_stamp_);
				return;
			}

			next_stamp_ += delta / payload_freq();
		}

		last_stamp_ = st;

		freemsg(om);
	}

	if (cb_data_ && obuf) {
		cb_data_(opaque_, next_stamp_, obuf, off, key);
	}

	if (obuf) free(obuf);
}
示例#12
0
static void x264_nals_to_file(FLVStream *flv,MSQueue *naluq, MSQueue *naluq_t)
{
    mblk_t *tm;
     while((tm=ms_queue_get(naluq))!=NULL){
        writeData(flv,hd,4);
        writeData(flv,tm->b_rptr,tm->b_wptr-tm->b_rptr);

        ms_queue_put(naluq_t,dupmsg(tm));
        freemsg(tm);
     }
}
示例#13
0
static void tee_process(MSFilter *f){
	TeeData *d=(TeeData*)f->data;
	mblk_t *im;
	int i;
	while((im=ms_queue_get(f->inputs[0]))!=NULL){
		for(i=0;i<f->desc->noutputs;i++){
			if (f->outputs[i]!=NULL && !d->muted[i])
				ms_queue_put(f->outputs[i],dupmsg(im));
		}
		freemsg(im);
	}
}
static void notify_tev(RtpSession *session, telephone_event_t *event){
	OrtpEvent *ev;
	OrtpEventData *evd;
	rtp_signal_table_emit2(&session->on_telephone_event,(long)(long)event[0].event);
	if (session->eventqs!=NULL){
		ev=ortp_event_new(ORTP_EVENT_TELEPHONE_EVENT);
		evd=ortp_event_get_data(ev);
		evd->packet=dupmsg(session->current_tev);
		evd->info.telephone_event=event[0].event;
		rtp_session_dispatch_event(session,ev);
	}
}
示例#15
0
mblk_t* rtp_session_create_rtcp_sdes_packet(RtpSession *session)
{
    mblk_t *mp=allocb(sizeof(rtcp_common_header_t),0);
	rtcp_common_header_t *rtcp;
    mblk_t *tmp,*m=mp;
	queue_t *q;
	int rc=0;
    rtcp = (rtcp_common_header_t*)mp->b_wptr;
	mp->b_wptr+=sizeof(rtcp_common_header_t);
	
	/* concatenate all sdes chunks */
	sdes_chunk_set_ssrc(session->sd,session->snd.ssrc);
	m=concatb(m,dupmsg(session->sd));
	rc++;
	
	q=&session->contributing_sources;
    for (tmp=qbegin(q); !qend(q,tmp); tmp=qnext(q,mp)){
		m=concatb(m,dupmsg(tmp));
		rc++;
	}
	rtcp_common_header_init(rtcp,session,RTCP_SDES,rc,msgdsize(mp));
    return mp;
}
示例#16
0
/*
 * sppp_dlprsendup()
 *
 * Description:
 *    For any valid promiscuous streams (marked with SPS_PROMISC and its
 *    sps_dlstate is DL_IDLE), send data upstream. The caller is expected
 *    to hold ppa_sib_lock when calling this procedure.
 */
void
sppp_dlprsendup(spppstr_t *sps, mblk_t *mp, t_scalar_t proto, boolean_t header)
{
	sppa_t	*ppa;
	mblk_t	*dmp;

	ASSERT(sps != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	ppa = sps->sps_ppa;
	ASSERT(ppa != NULL);

	/* NOTE: caller must hold ppa_sib_lock in RW_READER mode */
	ASSERT(RW_READ_HELD(&ppa->ppa_sib_lock));

	for (; sps != NULL; sps = sps->sps_nextsib) {
		/*
		 * We specifically test to ensure that the DLPI state for the
		 * promiscous stream is IDLE (DL_IDLE), since such state tells
		 * us that the promiscous stream has been bound to PPP_ALLSAP.
		 */
		if (IS_SPS_PROMISC(sps) && (sps->sps_dlstate == DL_IDLE) &&
		    canputnext(sps->sps_rq)) {
			if ((dmp = dupmsg(mp)) == NULL) {
				mutex_enter(&ppa->ppa_sta_lock);
				ppa->ppa_allocbfail++;
				mutex_exit(&ppa->ppa_sta_lock);
				continue;
			}
			if (header) {
				dmp->b_rptr += PPP_HDRLEN;
			}
			if (IS_SPS_RAWDATA(sps)) {
				/* function frees original message if fails */
				dmp = sppp_dladdether(sps, dmp, proto);
			} else {
				/* function frees original message if fails */
				dmp = sppp_dladdud(sps, dmp, proto, B_TRUE);
			}
			if (dmp != NULL) {
				putnext(sps->sps_rq, dmp);
			} else {
				mutex_enter(&ppa->ppa_sta_lock);
				ppa->ppa_allocbfail++;
				mutex_exit(&ppa->ppa_sta_lock);
			}
		}
	}
}
示例#17
0
void static_image_process(MSFilter *f){
	SIData *d=(SIData*)f->data;
	int frame_interval=(int)(1000/d->fps);
	/*output a frame whenever needed, i.e. respect the FPS parameter */
	if ((f->ticker->time - d->lasttime>frame_interval) || d->lasttime==0){
		ms_mutex_lock(&f->lock);
		if (d->pic) {
			mblk_t *o=dupmsg(d->pic);
			/*prevent mirroring at the output*/
			mblk_set_precious_flag(o,1);
			ms_queue_put(f->outputs[0],o);
		}
		ms_filter_unlock(f);
		d->lasttime=f->ticker->time;
	}
}
示例#18
0
static mblk_t *
vsw_dupmsgchain(mblk_t *mp)
{
	mblk_t	*nmp = NULL;
	mblk_t	**nmpp = &nmp;

	for (; mp != NULL; mp = mp->b_next) {
		if ((*nmpp = dupmsg(mp)) == NULL) {
			freemsgchain(nmp);
			return (NULL);
		}

		nmpp = &((*nmpp)->b_next);
	}

	return (nmp);
}
示例#19
0
static void *v4l_thread(void *ptr){
	V4lState *s=(V4lState*)ptr;
	int err=-1;
	ms_message("v4l_thread starting");
	if (s->v4lv2){
#ifdef HAVE_LINUX_VIDEODEV2_H
		err=v4lv2_do_mmap(s);
#endif
	}else{
		err=v4l_do_mmap(s);
	}
	if (err<0){
		ms_thread_exit(NULL);
	}
	while(s->run){
		mblk_t *m;
#ifdef HAVE_LINUX_VIDEODEV2_H
		if (s->v4lv2)
			m=v4lv2_grab_image(s);
		else
#endif
			m=v4l_grab_image_mmap(s);

		if (s->vsize.width!=s->got_vsize.width){
			if (m){
				/* mblock was allocated by crop or pad! */
				ms_mutex_lock(&s->mutex);
				putq(&s->rq,m);
				ms_mutex_unlock(&s->mutex);
			}else{
				ms_error("grabbing failed !");
			}
		} else if (m!=NULL) {
			mblk_t *dm=dupmsg(m);
			ms_mutex_lock(&s->mutex);
			putq(&s->rq,dm);
			ms_mutex_unlock(&s->mutex);
		}
	}
	v4l_do_munmap(s);
	ms_message("v4l_thread exited.");
	ms_thread_exit(NULL);
	return NULL;
}
示例#20
0
static void enc_process(MSFilter *f){
	mblk_t *im,*om;
	ogg_packet op;
	EncState *s=(EncState*)f->data;
	uint64_t timems=f->ticker->time;
	uint32_t timestamp=timems*90;
	uint64_t elapsed;

	
	while((im=ms_queue_get(f->inputs[0]))!=NULL){
		/*for the firsts frames only send theora packed conf*/
		om=NULL;
		if (s->nframes==0){
			s->start_time=timems;
		}
		elapsed=timems-s->start_time;

		if (need_send_conf(s,elapsed)){
			if (s->packed_conf) {
				om=dupmsg(s->packed_conf);
				ms_message("sending theora packed conf (%i bytes)",msgdsize(om));
				packetize_and_send(f,s,om,timestamp,THEORA_PACKED_CONF);
			}else {
				ms_error("No packed conf to send.");
			}
		}else{
			enc_fill_yuv(&s->yuv,im);
			ms_debug("subtmitting yuv frame to theora encoder...");
			if (theora_encode_YUVin(&s->tstate,&s->yuv)!=0){
				ms_error("theora_encode_YUVin error.");
			}else{
				if (theora_encode_packetout(&s->tstate,0,&op)==1){
					ms_debug("Got theora coded frame");
					om=allocb(op.bytes,0);
					memcpy(om->b_wptr,op.packet,op.bytes);
					om->b_wptr+=op.bytes;
					packetize_and_send(f,s,om,timestamp,THEORA_RAW_DATA);
				}
			}
		}
		freemsg(im);
		s->nframes++;
	}
}
示例#21
0
static void detector_process(MSFilter *f){
	DetectorState *s=(DetectorState *)f->data;
	mblk_t *m;
	
	while ((m=ms_queue_get(f->inputs[0]))!=NULL){
		ms_queue_put(f->outputs[0],m);
		if (s->nscans>0){
			ms_bufferizer_put(s->buf,dupmsg(m));
		}
	}
	if (s->nscans>0){
		uint8_t *buf=alloca(s->framesize);

		while(ms_bufferizer_read(s->buf,buf,s->framesize)!=0){
			float en=compute_energy((int16_t*)buf,s->framesize/2);
			if (en>energy_min_threshold*(32767.0*32767.0*0.7)){
				int i;
				for(i=0;i<s->nscans;++i){
					GoertzelState *gs=&s->tone_gs[i];
					MSToneDetectorDef *tone_def=&s->tone_def[i];
					float freq_en=goertzel_state_run(gs,(int16_t*)buf,s->framesize/2,en);
					if (freq_en>=tone_def->min_amplitude){
						if (gs->dur==0) gs->starttime=f->ticker->time;
						gs->dur+=s->frame_ms;
						if (gs->dur>=tone_def->min_duration && !gs->event_sent){
							MSToneDetectorEvent event;
						
							strncpy(event.tone_name,tone_def->tone_name,sizeof(event.tone_name));
							event.tone_start_time=gs->starttime;
							ms_filter_notify(f,MS_TONE_DETECTOR_EVENT,&event);
							gs->event_sent=TRUE;
						}
					}else{
						gs->event_sent=FALSE;
						gs->dur=0;
						gs->starttime=0;
					}
				}
			}else end_all_tones(s);
		}
	}
}
示例#22
0
static void v4w_process(MSFilter * obj){
	V4wState *s=(V4wState*)obj->data;
	mblk_t *m;
	uint32_t timestamp;
	int cur_frame;

	if (s->frame_count==-1){
		s->start_time=obj->ticker->time;
		s->frame_count=0;
	}

	cur_frame=(int)((obj->ticker->time-s->start_time)*s->fps/1000.0);
	if (cur_frame>s->frame_count){
		mblk_t *om=NULL;
		ms_mutex_lock(&s->mutex);
		/*keep the most recent frame if several frames have been captured */
		if (s->rotregvalue!=0){
			while((m=getq(&s->rq))!=NULL){
				if (om!=NULL) freemsg(om);
				om=m;
			}
		}else {
			mblk_t *nowebcam = v4w_make_nowebcam(s);
			if (nowebcam!=NULL){
				om=dupmsg(nowebcam);
				mblk_set_precious_flag(om,1);
			}
		}
		ms_mutex_unlock(&s->mutex);
		if (om!=NULL){
			timestamp=(uint32_t)obj->ticker->time*90;/* rtp uses a 90000 Hz clockrate for video*/
			mblk_set_timestamp_info(om,timestamp);
			ms_queue_put(obj->outputs[0],om);
			/*ms_message("picture sent");*/
		}
		s->frame_count++;
	}
}
示例#23
0
static mblk_t *get_as_yuvmsg(MSFilter *f, DecData *s, AVFrame *orig){
	AVCodecContext *ctx=&s->av_context;

	if (s->outbuf.w!=ctx->width || s->outbuf.h!=ctx->height){
		if (s->sws_ctx!=NULL){
			ms_sws_freeContext(s->sws_ctx);
			s->sws_ctx=NULL;
			freemsg(s->yuv_msg);
			s->yuv_msg=NULL;
		}
		ms_message("Getting yuv picture of %ix%i",ctx->width,ctx->height);
		s->yuv_msg=yuv_buf_alloc(&s->outbuf,ctx->width,ctx->height);
		s->outbuf.w=ctx->width;
		s->outbuf.h=ctx->height;
		s->sws_ctx=ms_sws_getContext(ctx->width,ctx->height,ctx->pix_fmt,
			ctx->width,ctx->height,PIX_FMT_YUV420P,SWS_FAST_BILINEAR,
                	NULL, NULL, NULL);
	}
	if (ms_sws_scale(s->sws_ctx,orig->data,orig->linesize, 0,
					ctx->height, s->outbuf.planes, s->outbuf.strides)<0){
		ms_error("%s: error in sws_scale().",f->desc->name);
	}
	return dupmsg(s->yuv_msg);
}
示例#24
0
static void dec_process(MSFilter *f) {
	mblk_t *im;
	DecState *s=(DecState*)f->data;

	while( (im=ms_queue_get(f->inputs[0]))!=0) {
		mblk_t *m;

		dec_unpacketize(f, s, im, &s->q);

		while((m=ms_queue_get(&s->q))!=NULL){
			vpx_codec_err_t err;
			vpx_codec_iter_t  iter = NULL;
			vpx_image_t *img;

			err = vpx_codec_decode(&s->codec, m->b_rptr, m->b_wptr - m->b_rptr, NULL, 0);
			if (err) {
				ms_warning("vpx_codec_decode failed : %d %s (%s)\n", err, vpx_codec_err_to_string(err), vpx_codec_error_detail(&s->codec));

				if ((f->ticker->time - s->last_error_reported_time)>5000 || s->last_error_reported_time==0) {
					s->last_error_reported_time=f->ticker->time;
					ms_filter_notify_no_arg(f,MS_VIDEO_DECODER_DECODING_ERRORS);
				}
				if (s->first_image_decoded == FALSE) {
					/* if no frames have been decoded yet, do not try to browse decoded frames */
					freemsg(m);
					continue;
				}
			}


			/* browse decoded frames */
			while((img = vpx_codec_get_frame(&s->codec, &iter))) {
				int i,j;

				if (s->yuv_width != img->d_w || s->yuv_height != img->d_h) {
					if (s->yuv_msg)
						freemsg(s->yuv_msg);
					s->yuv_msg = ms_yuv_buf_alloc(&s->outbuf, img->d_w, img->d_h);
					s->yuv_width = img->d_w;
					s->yuv_height = img->d_h;
				}

				/* scale/copy frame to destination mblk_t */
				for(i=0; i<3; i++) {
					uint8_t* dest = s->outbuf.planes[i];
					uint8_t* src = img->planes[i];
					int h = img->d_h >> ((i>0)?1:0);

					for(j=0; j<h; j++) {
						memcpy(dest, src, s->outbuf.strides[i]);

						dest += s->outbuf.strides[i];
						src += img->stride[i];
					}
				}
				ms_queue_put(f->outputs[0], dupmsg(s->yuv_msg));

				if (ms_video_update_average_fps(&s->fps, f->ticker->time)) {
					ms_message("VP8 decoder: Frame size: %dx%d", s->yuv_width, s->yuv_height);
				}
				if (!s->first_image_decoded) {
					s->first_image_decoded = TRUE;
					ms_filter_notify_no_arg(f,MS_VIDEO_DECODER_FIRST_IMAGE_DECODED);
				}
			}
			freemsg(m);
		}
	}
}
示例#25
0
/*
 * Send a gratuitous RARP packet to notify the physical switch to update its
 * Layer2 forwarding table for the given mac address. This is done to allow the
 * switch to quickly learn the macaddr-port association when a guest is live
 * migrated or when vsw's physical device is changed dynamically. Any protocol
 * packet would serve this purpose, but we choose RARP, as it allows us to
 * accomplish this within L2 (ie, no need to specify IP addr etc in the packet)
 * The macaddr of vnet is retained across migration. Hence, we don't need to
 * update the arp cache of other hosts within the broadcast domain. Note that
 * it is harmless to send these RARP packets during normal port attach of a
 * client vnet. This can can be turned off if needed, by setting
 * vsw_publish_macaddr_count to zero in /etc/system.
 */
void
vsw_publish_macaddr(vsw_t *vswp, vsw_port_t *portp)
{
	mblk_t			*mp;
	mblk_t			*bp;
	struct arphdr		*arh;
	struct	ether_header 	*ehp;
	int			count = 0;
	int			plen = 4;
	uint8_t			*cp;

	mp = allocb(ETHERMIN, BPRI_MED);
	if (mp == NULL) {
		return;
	}

	/* Initialize eth header */
	ehp = (struct  ether_header *)mp->b_rptr;
	bcopy(&etherbroadcastaddr, &ehp->ether_dhost, ETHERADDRL);
	bcopy(&portp->p_macaddr, &ehp->ether_shost, ETHERADDRL);
	ehp->ether_type = htons(ETHERTYPE_REVARP);

	/* Initialize arp packet */
	arh = (struct arphdr *)(mp->b_rptr + sizeof (struct ether_header));
	cp = (uint8_t *)arh;

	arh->ar_hrd = htons(ARPHRD_ETHER);	/* Hardware type:  ethernet */
	arh->ar_pro = htons(ETHERTYPE_IP);	/* Protocol type:  IP */
	arh->ar_hln = ETHERADDRL;	/* Length of hardware address:  6 */
	arh->ar_pln = plen;		/* Length of protocol address:  4 */
	arh->ar_op = htons(REVARP_REQUEST);	/* Opcode: REVARP Request */

	cp += ARH_FIXED_LEN;

	/* Sender's hardware address and protocol address */
	bcopy(&portp->p_macaddr, cp, ETHERADDRL);
	cp += ETHERADDRL;
	bzero(cp, plen);	/* INADDR_ANY */
	cp += plen;

	/* Target hardware address and protocol address */
	bcopy(&portp->p_macaddr, cp, ETHERADDRL);
	cp += ETHERADDRL;
	bzero(cp, plen);	/* INADDR_ANY */
	cp += plen;

	mp->b_wptr += ETHERMIN;	/* total size is 42; round up to ETHERMIN */

	for (count = 0; count < vsw_publish_macaddr_count; count++) {

		bp = dupmsg(mp);
		if (bp == NULL) {
			continue;
		}

		/* transmit the packet */
		bp = vsw_tx_msg(vswp, bp, VSW_VNETPORT, portp);
		if (bp != NULL) {
			freemsg(bp);
		}
	}

	freemsg(mp);
}
示例#26
0
/*	inputs[0]= reference signal from far end (sent to soundcard)
 *	inputs[1]= near speech & echo signal	(read from soundcard)
 *	outputs[0]=  is a copy of inputs[0] to be sent to soundcard
 *	outputs[1]=  near end speech, echo removed - towards far end
*/
static void speex_ec_process(MSFilter *f){
	SpeexECState *s=(SpeexECState*)f->data;
	int nbytes=s->framesize*2;
	mblk_t *refm;
	uint8_t *ref,*echo;
	
	if (s->bypass_mode) {
		while((refm=ms_queue_get(f->inputs[0]))!=NULL){
			ms_queue_put(f->outputs[0],refm);
		}
		while((refm=ms_queue_get(f->inputs[1]))!=NULL){
			ms_queue_put(f->outputs[1],refm);
		}
		return;
	}
	
	if (f->inputs[0]!=NULL){
		if (s->echostarted){
			while((refm=ms_queue_get(f->inputs[0]))!=NULL){
				refm=audio_flow_controller_process(&s->afc,refm);
				if (refm){
					mblk_t *cp=dupmsg(refm);
					ms_bufferizer_put(&s->delayed_ref,cp);
					ms_bufferizer_put(&s->ref,refm);
				}
			}
		}else{
			ms_warning("Getting reference signal but no echo to synchronize on.");
			ms_queue_flush(f->inputs[0]);
		}
	}

	ms_bufferizer_put_from_queue(&s->echo,f->inputs[1]);
	
	ref=(uint8_t*)alloca(nbytes);
	echo=(uint8_t*)alloca(nbytes);
	while (ms_bufferizer_read(&s->echo,echo,nbytes)==nbytes){
		mblk_t *oecho=allocb(nbytes,0);
		int avail;
		int avail_samples;

		if (!s->echostarted) s->echostarted=TRUE;
		if ((avail=ms_bufferizer_get_avail(&s->delayed_ref))<((s->nominal_ref_samples*2)+nbytes)){
			/*we don't have enough to read in a reference signal buffer, inject silence instead*/
			avail=nbytes;
			refm=allocb(nbytes,0);
			memset(refm->b_wptr,0,nbytes);
			refm->b_wptr+=nbytes;
			ms_bufferizer_put(&s->delayed_ref,refm);
			ms_queue_put(f->outputs[0],dupmsg(refm));
			if (!s->using_zeroes){
				ms_warning("Not enough ref samples, using zeroes");
				s->using_zeroes=TRUE;
			}
		}else{
			if (s->using_zeroes){
				ms_message("Samples are back.");
				s->using_zeroes=FALSE;
			}
			/* read from our no-delay buffer and output */
			refm=allocb(nbytes,0);
			if (ms_bufferizer_read(&s->ref,refm->b_wptr,nbytes)==0){
				ms_fatal("Should never happen");
			}
			refm->b_wptr+=nbytes;
			ms_queue_put(f->outputs[0],refm);
		}

		/*now read a valid buffer of delayed ref samples*/
		if (ms_bufferizer_read(&s->delayed_ref,ref,nbytes)==0){
			ms_fatal("Should never happen");
		}
		avail-=nbytes;
		avail_samples=avail/2;
		/*ms_message("avail=%i",avail_samples);*/
		if (avail_samples<s->min_ref_samples || s->min_ref_samples==-1){
			s->min_ref_samples=avail_samples;
		}
		
#ifdef EC_DUMP
		if (s->reffile)
			fwrite(ref,nbytes,1,s->reffile);
		if (s->echofile)
			fwrite(echo,nbytes,1,s->echofile);
#endif
		speex_echo_cancellation(s->ecstate,(short*)echo,(short*)ref,(short*)oecho->b_wptr);
		speex_preprocess_run(s->den, (short*)oecho->b_wptr);
#ifdef EC_DUMP
		if (s->cleanfile)
			fwrite(oecho->b_wptr,nbytes,1,s->cleanfile);
#endif
		oecho->b_wptr+=nbytes;
		ms_queue_put(f->outputs[1],oecho);
	}
	
	/*verify our ref buffer does not become too big, meaning that we are receiving more samples than we are sending*/
	if ((((uint32_t)(f->ticker->time - s->flow_control_time)) >= flow_control_interval_ms) && (s->min_ref_samples != -1)) {
		int diff=s->min_ref_samples-s->nominal_ref_samples;
		if (diff>(nbytes/2)){
			int purge=diff-(nbytes/2);
			ms_warning("echo canceller: we are accumulating too much reference signal, need to throw out %i samples",purge);
			audio_flow_controller_set_target(&s->afc,purge,(flow_control_interval_ms*s->samplerate)/1000);
		}
		s->min_ref_samples=-1;
		s->flow_control_time = f->ticker->time;
	}
}
示例#27
0
/* for high level telephony event callback */
void rtp_session_check_telephone_events(RtpSession *session, mblk_t *m0)
{
    telephone_event_t *events,*evbuf;
    int num;
    int i;
    mblk_t *mp;
    rtp_header_t *hdr;
    mblk_t *cur_tev;

    hdr=(rtp_header_t*)m0->b_rptr;
    mp=m0->b_cont;

    num=((int)(mp->b_wptr-mp->b_rptr))/sizeof(telephone_event_t);
    events=(telephone_event_t*)mp->b_rptr;


    if (hdr->markbit==1)
    {
        /* this is a start of new events. Store the event buffer for later use*/
        if (session->current_tev!=NULL) {
            freemsg(session->current_tev);
            session->current_tev=NULL;
        }
        session->current_tev=copymsg(m0);
        /* handle the case where the events are short enough to end within the packet that has the marker bit*/
        notify_events_ended(session,events,num);
    }
    /* whatever there is a markbit set or not, we parse the packet and compare it to previously received one */
    cur_tev=session->current_tev;
    if (cur_tev!=NULL)
    {
        /* first compare timestamp, they must be identical */
        if (((rtp_header_t*)cur_tev->b_rptr)->timestamp==
                ((rtp_header_t*)m0->b_rptr)->timestamp)
        {
            evbuf=(telephone_event_t*)cur_tev->b_cont;
            for (i=0; i<num; i++)
            {
                if (events[i].E==1)
                {
                    /* update events that have ended */
                    if (evbuf[i].E==0) {
                        evbuf[i].E=1;
                        /* this is a end of event, report it */
                        notify_tev(session,&events[i]);
                    }
                }
            }
        }
        else
        {
            /* timestamp are not identical: this is not the same events*/
            if (session->current_tev!=NULL) {
                freemsg(session->current_tev);
                session->current_tev=NULL;
            }
            session->current_tev=dupmsg(m0);
        }
    }
    else
    {
        /* there is no pending events, but we did not received marked bit packet
        either the sending implementation is not compliant, either it has been lost,
        we must deal with it anyway.*/
        session->current_tev=copymsg(m0);
        /* inform the application if there are tone ends */
        notify_events_ended(session,events,num);
    }
}
示例#28
0
static void dec_process(MSFilter *f) {
	DecState *s = (DecState *)f->data;
	mblk_t *im;
	vpx_codec_err_t err;
	vpx_image_t *img;
	vpx_codec_iter_t iter = NULL;
	MSQueue frame;
	MSQueue mtofree_queue;
	Vp8RtpFmtFrameInfo frame_info;
	
	if (!s->ready){
		ms_queue_flush(f->inputs[0]);
		return;
	}
	
	ms_filter_lock(f);

	ms_queue_init(&frame);
	ms_queue_init(&mtofree_queue);

	/* Unpack RTP payload format for VP8. */
	vp8rtpfmt_unpacker_feed(&s->unpacker, f->inputs[0]);

	/* Decode unpacked VP8 frames. */
	while (vp8rtpfmt_unpacker_get_frame(&s->unpacker, &frame, &frame_info) == 0) {
		while ((im = ms_queue_get(&frame)) != NULL) {
			err = vpx_codec_decode(&s->codec, im->b_rptr, (unsigned int)(im->b_wptr - im->b_rptr), NULL, 0);
			if ((s->flags & VPX_CODEC_USE_INPUT_FRAGMENTS) && mblk_get_marker_info(im)) {
				err = vpx_codec_decode(&s->codec, NULL, 0, NULL, 0);
			}
			if (err) {
				ms_warning("vp8 decode failed : %d %s (%s)\n", err, vpx_codec_err_to_string(err), vpx_codec_error_detail(&s->codec)?vpx_codec_error_detail(&s->codec):"no details");
			}
			ms_queue_put(&mtofree_queue, im);
		}

		/* Get decoded frame */
		if ((img = vpx_codec_get_frame(&s->codec, &iter))) {
			int i, j;
			int reference_updates = 0;

			if (vpx_codec_control(&s->codec, VP8D_GET_LAST_REF_UPDATES, &reference_updates) == 0) {
				if (frame_info.pictureid_present && ((reference_updates & VP8_GOLD_FRAME) || (reference_updates & VP8_ALTR_FRAME))) {
					vp8rtpfmt_send_rpsi(&s->unpacker, frame_info.pictureid);
				}
			}

			if (s->yuv_width != img->d_w || s->yuv_height != img->d_h) {
				if (s->yuv_msg) freemsg(s->yuv_msg);
				s->yuv_msg = ms_yuv_buf_alloc(&s->outbuf, img->d_w, img->d_h);
				ms_message("MSVp8Dec: video is %ix%i", img->d_w, img->d_h);
				s->yuv_width = img->d_w;
				s->yuv_height = img->d_h;
				ms_filter_notify_no_arg(f, MS_FILTER_OUTPUT_FMT_CHANGED);
			}

			/* scale/copy frame to destination mblk_t */
			for (i = 0; i < 3; i++) {
				uint8_t *dest = s->outbuf.planes[i];
				uint8_t *src = img->planes[i];
				int h = img->d_h >> ((i > 0) ? 1 : 0);

				for (j = 0; j < h; j++) {
					memcpy(dest, src, s->outbuf.strides[i]);
					dest += s->outbuf.strides[i];
					src += img->stride[i];
				}
			}
			ms_queue_put(f->outputs[0], dupmsg(s->yuv_msg));

			ms_average_fps_update(&s->fps, (uint32_t)f->ticker->time);
			if (!s->first_image_decoded) {
				s->first_image_decoded = TRUE;
				ms_filter_notify_no_arg(f, MS_VIDEO_DECODER_FIRST_IMAGE_DECODED);
			}
		}

		while ((im = ms_queue_get(&mtofree_queue)) != NULL) {
			freemsg(im);
		}
	}
示例#29
0
void MSOpenH264Decoder::feed()
{
    if (!isInitialized()) {
        ms_error("MSOpenH264Decoder::feed(): not initialized");
        ms_queue_flush(mFilter->inputs[0]);
        return;
    }

    MSQueue nalus;
    ms_queue_init(&nalus);

    mblk_t *im;
    while ((im = ms_queue_get(mFilter->inputs[0])) != NULL) {
        if ((getIDRPicId() == 0) && (mSPS != 0) && (mPPS != 0)) {
            // Push the sps/pps given in sprop-parameter-sets if any
            mblk_set_timestamp_info(mSPS, mblk_get_timestamp_info(im));
            mblk_set_timestamp_info(mPPS, mblk_get_timestamp_info(im));
            rfc3984_unpack(mUnpacker, mSPS, &nalus);
            rfc3984_unpack(mUnpacker, mPPS, &nalus);
            mSPS = 0;
            mPPS = 0;
        }
        rfc3984_unpack(mUnpacker, im, &nalus);
        if (!ms_queue_empty(&nalus)) {
            void * pData[3] = { 0 };
            SBufferInfo sDstBufInfo = { 0 };
            int len = nalusToFrame(&nalus);

            DECODING_STATE state = mDecoder->DecodeFrame2(mBitstream, len, (uint8_t**)pData, &sDstBufInfo);
            if (state != dsErrorFree) {
                ms_error("OpenH264 decoder: DecodeFrame2 failed: 0x%x", state);
                if (((mFilter->ticker->time - mLastErrorReportTime) > 5000) || (mLastErrorReportTime == 0)) {
                    mLastErrorReportTime = mFilter->ticker->time;
                    ms_filter_notify_no_arg(mFilter, MS_VIDEO_DECODER_DECODING_ERRORS);
                }
            }
            if (sDstBufInfo.iBufferStatus == 1) {
                uint8_t * pDst[3] = { 0 };
                pDst[0] = (uint8_t *)pData[0];
                pDst[1] = (uint8_t *)pData[1];
                pDst[2] = (uint8_t *)pData[2];

                // Update video size and (re)allocate YUV buffer if needed
                if ((mWidth != sDstBufInfo.UsrData.sSystemBuffer.iWidth)
                        || (mHeight != sDstBufInfo.UsrData.sSystemBuffer.iHeight)) {
                    if (mYUVMsg) {
                        freemsg(mYUVMsg);
                    }
                    mWidth = sDstBufInfo.UsrData.sSystemBuffer.iWidth;
                    mHeight = sDstBufInfo.UsrData.sSystemBuffer.iHeight;
                    mYUVMsg = ms_yuv_buf_alloc(&mOutbuf, mWidth, mHeight);
                    ms_filter_notify_no_arg(mFilter,MS_FILTER_OUTPUT_FMT_CHANGED);
                }

                // Scale/copy frame to destination mblk_t
                for (int i = 0; i < 3; i++) {
                    uint8_t *dst = mOutbuf.planes[i];
                    uint8_t *src = pDst[i];
                    int h = mHeight >> (( i > 0) ? 1 : 0);

                    for(int j = 0; j < h; j++) {
                        memcpy(dst, src, mOutbuf.strides[i]);
                        dst += mOutbuf.strides[i];
                        src += sDstBufInfo.UsrData.sSystemBuffer.iStride[(i == 0) ? 0 : 1];
                    }
                }
                ms_queue_put(mFilter->outputs[0], dupmsg(mYUVMsg));

                // Update average FPS
                if (ms_average_fps_update(&mFPS, mFilter->ticker->time)) {
                    ms_message("OpenH264 decoder: Frame size: %dx%d", mWidth, mHeight);
                }

                // Notify first decoded image
                if (!mFirstImageDecoded) {
                    mFirstImageDecoded = true;
                    ms_filter_notify_no_arg(mFilter, MS_VIDEO_DECODER_FIRST_IMAGE_DECODED);
                }

#if MSOPENH264_DEBUG
                ms_message("OpenH264 decoder: IDR pic id: %d, Frame num: %d, Temporal id: %d, VCL NAL: %d", getIDRPicId(), getFrameNum(), getTemporalId(), getVCLNal());
#endif
            }
        }