コード例 #1
0
ファイル: ff-save.c プロジェクト: OpenCL/GEGL-OpenCL-old
void
write_audio_frame (GeglProperties *o, AVFormatContext * oc, AVStream * st)
{
  Priv *p = (Priv*)o->user_data;
  AVCodecContext *c = st->codec;
  int sample_count = 100000;
  static AVPacket  pkt = { 0 };

  if (pkt.size == 0)
  {
    av_init_packet (&pkt);
  }

  /* first we add incoming frames audio samples */
  {
    int i;
    int sample_count = gegl_audio_fragment_get_sample_count (o->audio);
    GeglAudioFragment *af = gegl_audio_fragment_new (gegl_audio_fragment_get_sample_rate (o->audio),
                                                     gegl_audio_fragment_get_channels (o->audio),
                                                     gegl_audio_fragment_get_channel_layout (o->audio),
                                                     sample_count);
    gegl_audio_fragment_set_sample_count (af, sample_count);
    for (i = 0; i < sample_count; i++)
      {
        af->data[0][i] = o->audio->data[0][i];
        af->data[1][i] = o->audio->data[1][i];
      }
    gegl_audio_fragment_set_pos (af, p->audio_pos);
    p->audio_pos += sample_count;
    p->audio_track = g_list_append (p->audio_track, af);
  }

  if (!(c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
    sample_count = c->frame_size;

  /* then we encode as much as we can in a loop using the codec frame size */

  
  while (p->audio_pos - p->audio_read_pos > sample_count)
  {
    long i;
    int ret;
    int got_packet = 0;
    AVFrame *frame = alloc_audio_frame (c->sample_fmt, c->channel_layout,
                                        c->sample_rate, sample_count);

    switch (c->sample_fmt) {
      case AV_SAMPLE_FMT_FLT:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[c->channels*i+0] = left;
          ((float*)frame->data[0])[c->channels*i+1] = right;
        }
        break;
      case AV_SAMPLE_FMT_FLTP:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[i] = left;
          ((float*)frame->data[1])[i] = right;
        }
        break;
      case AV_SAMPLE_FMT_S16:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[c->channels*i+0] = left * (1<<15);
          ((int16_t*)frame->data[0])[c->channels*i+1] = right * (1<<15);
        }
        break;
      case AV_SAMPLE_FMT_S32:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[c->channels*i+0] = left * (1<<31);
          ((int32_t*)frame->data[0])[c->channels*i+1] = right * (1<<31);
        }
        break;
      case AV_SAMPLE_FMT_S32P:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[i] = left * (1<<31);
          ((int32_t*)frame->data[1])[i] = right * (1<<31);
        }
        break;
      case AV_SAMPLE_FMT_S16P:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[i] = left * (1<<15);
          ((int16_t*)frame->data[1])[i] = right * (1<<15);
        }
        break;
      default:
        fprintf (stderr, "eeeek unhandled audio format\n");
        break;
    }
    frame->pts = p->next_apts;
    p->next_apts += sample_count;

    av_frame_make_writable (frame);
    ret = avcodec_encode_audio2 (c, &pkt, frame, &got_packet);

    av_packet_rescale_ts (&pkt, st->codec->time_base, st->time_base);
    if (ret < 0) {
      fprintf (stderr, "Error encoding audio frame: %s\n", av_err2str (ret));
    }

    if (got_packet)
    {
      pkt.stream_index = st->index;
      av_interleaved_write_frame (oc, &pkt);
      av_free_packet (&pkt);
    }

    av_frame_free (&frame);
    p->audio_read_pos += sample_count;
  }
}
コード例 #2
0
ファイル: printsample.c プロジェクト: mazerj/pype2
int write_sample(EDFFILE * combfile, RECORDINGS * rec, FSAMPLE *s)
{
  int hasleft = (s->flags & SAMPLE_LEFT)!=0;
  int hasright = (s->flags & SAMPLE_RIGHT)!=0;
  float xracc = 0;
  float yracc = 0;



  if(!options.samples_enabled) return 1;

  if((hasleft && s->pa[0]<0) ||(hasright && s->pa[1]<0))
  {
	return write_raw_sample(combfile, rec, s);
  }
  updatePPD(s);
  if(options.out_float_time)
	  print("%-8.1f", (float)(s->time+((s->flags & SAMPLE_ADD_OFFSET)?0.5:0)));
  else
	print("%lu", s->time);


  if(options.output_left_eye && options.out_sample_left && hasleft)
    {
      float x, y, a;
      get_sample_data(s,&x, &y, &a, 0);
      print_value(x,1);
      print_value(y,1);
      print_value(a,1);
    }
  if(options.output_right_eye && options.out_sample_right && hasright)
    {
      float x, y, a;
      get_sample_data(s,&x, &y, &a, 1);
      print_value(x,1);
      print_value(y,1);
      print_value(a,1);
    }


  if(options.output_sample_velocity)
  {
	float xlvel=NaN, xrvel=NaN, ylvel=NaN, yrvel=NaN;
    if(options.fast_velocity)
    {
        switch(options.output_sample_type)
	    {
	    case OUTPUT_GAZE:
		    xlvel = s->fgxvel[0];
		    xrvel = s->fgxvel[1];
		    ylvel = s->fgyvel[0];
		    yrvel = s->fgyvel[1];
		    break;
	    case OUTPUT_HREF:
		    xlvel = s->fhxvel[0];
		    xrvel = s->fhxvel[1];
		    ylvel = s->fhyvel[0];
		    yrvel = s->fhyvel[1];
            break;
	    case OUTPUT_PUPIL:
		    xlvel = s->frxvel[0];
		    xrvel = s->frxvel[1];
		    ylvel = s->fryvel[0];
		    yrvel = s->fryvel[1];
		    break;
	    default:
		    break;
	    }
    }
    else
    {
	    switch(options.output_sample_type)
	    {
	    case OUTPUT_GAZE:
		    xlvel = s->gxvel[0];
		    xrvel = s->gxvel[1];
		    ylvel = s->gyvel[0];
		    yrvel = s->gyvel[1];
		    break;
	    case OUTPUT_HREF:
		    xlvel = s->hxvel[0];
		    xrvel = s->hxvel[1];
		    ylvel = s->hyvel[0];
		    yrvel = s->hyvel[1];
            break;
	    case OUTPUT_PUPIL:
		    xlvel = s->rxvel[0];
		    xrvel = s->rxvel[1];
		    ylvel = s->ryvel[0];
		    yrvel = s->ryvel[1];
		    break;
	    default:
		    break;
	    }
    }
	if(options.output_left_eye &&  options.out_sample_left && hasleft)
	{
	  print_value(xlvel, 1);
	  print_value(ylvel, 1);
	}

	if(options.output_right_eye &&  options.out_sample_right && hasright)
	{
		print_value(xrvel, 1);
		print_value(yrvel, 1);
	}
  }




  if(options.output_resolution || options.output_sample_velocity) /* Need resolution? */
    {    					  /* already computed in output_sample */
      if(s->rx>0.01 && s->rx<10000 &&s->ry>0.01 && s->ry<10000 )
		{
			xracc = s->rx;
			yracc = s->ry;
		}
      else
		{
			xracc = options.default_resolution_x;
			yracc = options.default_resolution_y;
		}
    }
  else
    {
      xracc = options.default_resolution_x;
      yracc = options.default_resolution_y;
    }

    if(options.output_resolution)
    {
      print_value(xracc,2);
      print_value(yracc,2);
    }
    if(options.out_marker_fields==1)
      {
          if(s->flags & SAMPLE_HEADPOS)
          {

              int j =0;
              int i =s->htype;
              i &= 15;
              if(i>8) i = 8;
              if(i!=8) i =0;  /* if there is less than 8 markers, we put no values at the moment. */

              for(j =0; j <i; j++)
              {
                  double v = (double)(s->hdata[j]);
                  print_value(v,0);
              }
              for(j =i; j <8; j++)
              {
                  print_value(NaN,0);
              }
          }
		  
      }

  if(options.output_input_values)
  {
      print_value((double)(s->input),1) ;
  }
		 /* NEW EYELINK II flags (interp/CR state)*/
  if(options.out_sample_flags && edf_get_revision(combfile)>0 && (s->flags&SAMPLE_STATUS))
    {
      print("\t");
      if(s->errors & INTERP_SAMPLE_WARNING)
		print("I"); else print(".");

      if(options.output_left_eye && options.out_sample_left && rec->recording_mode && hasleft)
		{
			print((s->errors&CR_LOST_LEFT_WARNING)  ? "C":".");
			print((s->errors&CR_RECOV_LEFT_WARNING) ? "R":".");
		}
      if(options.output_right_eye && options.out_sample_right && rec->recording_mode && hasright)
		{
			print((s->errors&CR_LOST_RIGHT_WARNING)  ? "C":".");
			print((s->errors&CR_RECOV_RIGHT_WARNING) ? "R":".");
		}
    }

  if(options.output_sample_type == OUTPUT_GAZE && hasleft && hasright && options.out_averages)
  {
      float xl =NaN,
            yl =NaN;
      float xr=NaN,
            yr=NaN;
      get_sample_data(s,&xl, &yl, NULL, 0);
      get_sample_data(s,&xr, &yr,NULL, 1);
      if(xl == NaN  ||yl == NaN  ||xr == NaN  ||yr == NaN)
      {
        print_value(NaN,1);
        print_value(NaN,1);
      }
      else
      {
        print_value((xl+xr)/2.0,1);
        print_value((yl+yr)/2.0,1);
      }
  }

  if(options.output_elcl)
  {
	  int eye =hasleft?0:1;
	  for(; eye<(hasright?2:1); eye++)
	{
		float raw_pupil[2];
		float raw_cr[2];
		unsigned int pupil_area;
		unsigned int cr_area;
		UINT32 pupil_dimension[2];
		UINT32 cr_dimension[2];
		UINT32 window_position[2];
		float pupil_cr[2];
		

		edf_get_uncorrected_raw_pupil(combfile,s, eye,raw_pupil);
		edf_get_uncorrected_raw_cr(combfile,s, eye, raw_cr);
		pupil_area = edf_get_uncorrected_pupil_area(combfile,s, eye);
		cr_area = edf_get_uncorrected_cr_area(combfile,s, eye);
		edf_get_pupil_dimension(combfile,s, eye,pupil_dimension);
		edf_get_cr_dimension(combfile,s, cr_dimension);
		edf_get_window_position(combfile,s, window_position);
		edf_get_pupil_cr(combfile,s,eye, pupil_cr);

		print("\t%8.3f\t%8.3f\t%8.3f\t%8.3f\t%lu\t%lu\t%d\t%d\t%d\t%d\t%d\t%d\t%8.3f\t%8.3f",
				raw_pupil[0],
				raw_pupil[1],
				raw_cr[0],
				raw_cr[1],
				pupil_area,
				cr_area,
				pupil_dimension[0], pupil_dimension[1],
				cr_dimension[0],    cr_dimension[1],
				window_position[0], window_position[1],
				pupil_cr[0],
				pupil_cr[1]);

	  }
  }

  if(options.enable_htarget  && options.out_marker_fields==2&&s->flags & SAMPLE_HEADPOS)
  {
	INT16 v = s->hdata[3];

	print(" ");
	print_value((s->hdata[0]==(INT16)0x8000)?NaN:s->hdata[0],1); //x
	print_value((s->hdata[1]==(INT16)0x8000)?NaN:s->hdata[1],1); //y
	print_value((s->hdata[2]==(INT16)0x8000)?NaN:(((double)s->hdata[2])/10.0),1); //distance

	if(options.out_sample_flags)
	{
		print(" ");
		print("%s",(v & TFLAG_MISSING) ? "M":".");  
		print("%s",(v & TFLAG_ANGLE)   ? "A":".");
		print("%s",(v & TFLAG_NEAREYE) ? "N":".");
		print("%s",(v & TFLAG_CLOSE)   ? "C":".");
		print("%s",(v & TFLAG_FAR)     ? "F":".");
		print("%s",(v & TFLAG_T_TSIDE) ? "T":".");
		print("%s",(v & TFLAG_T_BSIDE) ? "B":".");
		print("%s",(v & TFLAG_T_LSIDE) ? "L":".");
		print("%s",(v & TFLAG_T_RSIDE) ? "R":".");
		print("%s",(v & TFLAG_E_TSIDE) ? "T":".");
		print("%s",(v & TFLAG_E_BSIDE) ? "B":".");
		print("%s",(v & TFLAG_E_LSIDE) ? "L":".");
		print("%s",(v & TFLAG_E_RSIDE) ? "R":".");
	}
	
  }
  print("\n");
  return 0;
}
コード例 #3
0
ファイル: hrt.c プロジェクト: FrozenCow/FIRE-ICE
static void
read_all_sources(struct pt_regs *regs, struct task_struct *task)
{
	u32 state, extra_data = 0;
	int i, vec_idx = 0, bt_size = 0;
	int nr_events = 0, nr_positive_events = 0;
	struct pt_regs *user_regs;
	struct quadd_iovec vec[5];
	struct hrt_event_value events[QUADD_MAX_COUNTERS];
	u32 events_extra[QUADD_MAX_COUNTERS];

	struct quadd_record_data record_data;
	struct quadd_sample_data *s = &record_data.sample;

	struct quadd_ctx *ctx = hrt.quadd_ctx;
	struct quadd_cpu_context *cpu_ctx = this_cpu_ptr(hrt.cpu_ctx);
	struct quadd_callchain *cc = &cpu_ctx->cc;

	if (!regs)
		return;

	if (atomic_read(&cpu_ctx->nr_active) == 0)
		return;

	if (!task)
		task = current;

	rcu_read_lock();
	if (!task_nsproxy(task)) {
		rcu_read_unlock();
		return;
	}
	rcu_read_unlock();

	if (ctx->pmu && ctx->pmu_info.active)
		nr_events += read_source(ctx->pmu, regs,
					 events, QUADD_MAX_COUNTERS);

	if (ctx->pl310 && ctx->pl310_info.active)
		nr_events += read_source(ctx->pl310, regs,
					 events + nr_events,
					 QUADD_MAX_COUNTERS - nr_events);

	if (!nr_events)
		return;

	if (user_mode(regs))
		user_regs = regs;
	else
		user_regs = current_pt_regs();

	if (get_sample_data(s, regs, task))
		return;

	vec[vec_idx].base = &extra_data;
	vec[vec_idx].len = sizeof(extra_data);
	vec_idx++;

	s->reserved = 0;

	if (ctx->param.backtrace) {
		cc->unw_method = hrt.unw_method;
		bt_size = quadd_get_user_callchain(user_regs, cc, ctx, task);

		if (!bt_size && !user_mode(regs)) {
			unsigned long pc = instruction_pointer(user_regs);

			cc->nr = 0;
#ifdef CONFIG_ARM64
			cc->cs_64 = compat_user_mode(user_regs) ? 0 : 1;
#else
			cc->cs_64 = 0;
#endif
			bt_size += quadd_callchain_store(cc, pc,
							 QUADD_UNW_TYPE_KCTX);
		}

		if (bt_size > 0) {
			int ip_size = cc->cs_64 ? sizeof(u64) : sizeof(u32);
			int nr_types = DIV_ROUND_UP(bt_size, 8);

			vec[vec_idx].base = cc->cs_64 ?
				(void *)cc->ip_64 : (void *)cc->ip_32;
			vec[vec_idx].len = bt_size * ip_size;
			vec_idx++;

			vec[vec_idx].base = cc->types;
			vec[vec_idx].len = nr_types * sizeof(cc->types[0]);
			vec_idx++;

			if (cc->cs_64)
				extra_data |= QUADD_SED_IP64;
		}

		extra_data |= cc->unw_method << QUADD_SED_UNW_METHOD_SHIFT;
		s->reserved |= cc->unw_rc << QUADD_SAMPLE_URC_SHIFT;
	}
	s->callchain_nr = bt_size;

	record_data.record_type = QUADD_RECORD_TYPE_SAMPLE;

	s->events_flags = 0;
	for (i = 0; i < nr_events; i++) {
		u32 value = events[i].value;
		if (value > 0) {
			s->events_flags |= 1 << i;
			events_extra[nr_positive_events++] = value;
		}
	}

	if (nr_positive_events == 0)
		return;

	vec[vec_idx].base = events_extra;
	vec[vec_idx].len = nr_positive_events * sizeof(events_extra[0]);
	vec_idx++;

	state = task->state;
	if (state) {
		s->state = 1;
		vec[vec_idx].base = &state;
		vec[vec_idx].len = sizeof(state);
		vec_idx++;
	} else {
		s->state = 0;
	}

	quadd_put_sample(&record_data, vec, vec_idx);
}
コード例 #4
0
ファイル: printsample.c プロジェクト: mazerj/pype2
/***********************************************************************
 *Function Name: updatePPD
 *Input: sample
 *Output: none
 *Purpose: updates the ppd namely the variables ppd_x, ppd_y, and
 *		   ppd_count
 ***********************************************************************/
void updatePPD(FSAMPLE * s)
{
  float a;
  int hasleft = (s->flags & SAMPLE_LEFT)!=0;
  int hasright = (s->flags & SAMPLE_RIGHT)!=0;

  if(options.output_sample_type==OUTPUT_HREF)
    {
      float x, y;
      float xracc = 0;
      float yracc = 0;
      float xrc = 0;
      float yrc = 0;


      if(options.out_sample_left && hasleft)
	  {
		get_sample_data(s,&x, &y, &a, 0);
		if(x!=NaN)
	    {
	      xracc += x;
	      xrc++;
	    }
		if(y!=NaN)
	    {
	      yracc += y;
	      yrc++;
	    }
	  }
      if(options.out_sample_right && hasright)
	  {
		  get_sample_data(s,&x, &y, &a, 1);
		  if(x!=NaN)
		  {
			xracc += x;
			xrc++;
		  }
		  if(y!=NaN)
		  {
			  yracc += y;
			  yrc++;
		  }
	  }
      if(xrc && yrc)
	  {
		float x1 = xracc/xrc;
		float y1 = yracc/yrc;
		s->rx = (float)((15000.0*15000.0+x1*x1+y1*y1)/
			sqrt(15000.0*15000.0+y1*y1) / 57.2958);
		s->ry = (float)((15000.0*15000.0+x1*x1+y1*y1)/
			sqrt(15000.0*15000.0+x1*x1) / 57.2958);
	  }
  }
  else if(options.output_sample_type==OUTPUT_GAZE)
  {
	  if(!(s->flags & SAMPLE_GAZERES))
	  {
		  s->rx = options.default_resolution_x;
		  s->ry = options.default_resolution_y;
	  }
  }
  else if(options.output_sample_type==OUTPUT_PUPIL)
  {
	  s->rx = options.default_resolution_x;
	  s->ry = options.default_resolution_y;
  }

  if(s->rx>0.01 && s->rx<10000 && s->ry>0.01 && s->ry<10000 )
  {
	  ppd_x += s->rx;
      ppd_y += s->ry;
      ppd_count++;
  }
}
コード例 #5
0
ファイル: ff-load.c プロジェクト: mhorga/GEGL-OpenCL
static gboolean
process (GeglOperation       *operation,
         GeglBuffer          *output,
         const GeglRectangle *result,
         gint                 level)
{
  GeglProperties *o = GEGL_PROPERTIES (operation);
  Priv       *p = (Priv*)o->user_data;

  {
    if (p->video_fcontext && !decode_frame (operation, o->frame))
      {
        long sample_start = 0;

	if (p->audio_stream) 
        {
          int sample_count;
          gegl_audio_fragment_set_sample_rate (o->audio, p->audio_stream->codec->sample_rate);
          gegl_audio_fragment_set_channels    (o->audio, 2);
          gegl_audio_fragment_set_channel_layout    (o->audio, GEGL_CH_LAYOUT_STEREO);

          sample_count = samples_per_frame (o->frame,
               o->frame_rate, p->audio_stream->codec->sample_rate,
               &sample_start);
          gegl_audio_fragment_set_sample_count (o->audio, sample_count);

	  decode_audio (operation, p->prevpts, p->prevpts + 5.0);
          {
            int i;
            for (i = 0; i < sample_count; i++)
            {
              get_sample_data (p, sample_start + i, &o->audio->data[0][i],
                                  &o->audio->data[1][i]);
            }
          }
        }
	
        if (p->video_stream->codec->pix_fmt == AV_PIX_FMT_RGB24)
        {
          GeglRectangle extent = {0,0,p->width,p->height};
          gegl_buffer_set (output, &extent, 0, babl_format("R'G'B' u8"), p->lavc_frame->data[0], GEGL_AUTO_ROWSTRIDE);
        }
        else
        {
          struct SwsContext *img_convert_ctx;
          GeglRectangle extent = {0,0,p->width,p->height};

          img_convert_ctx = sws_getContext(p->width, p->height, p->video_stream->codec->pix_fmt,
                                           p->width, p->height, AV_PIX_FMT_RGB24,
                                           SWS_BICUBIC, NULL, NULL, NULL);
          if (!p->rgb_frame)
            p->rgb_frame = alloc_picture (AV_PIX_FMT_RGB24, p->width, p->height);
          sws_scale (img_convert_ctx, (void*)p->lavc_frame->data,
                     p->lavc_frame->linesize, 0, p->height, p->rgb_frame->data, p->rgb_frame->linesize);
          gegl_buffer_set (output, &extent, 0, babl_format("R'G'B' u8"), p->rgb_frame->data[0], GEGL_AUTO_ROWSTRIDE);
          sws_freeContext (img_convert_ctx);
        }
      }
  }
  return  TRUE;
}