Example #1
0
/*
   ** _tuiUpdateLocation_command().
   **        Command to update the display with the current execution point
 */
static void
_tuiUpdateLocation_command (char *arg, int fromTTY)
{
#ifndef TRY
extern void frame_command (char *, int);
  frame_command ("0", FALSE);
#else
  struct frame_info *curFrame;

  /* Obtain the current execution point */
  if ((curFrame = get_current_frame ()) != (struct frame_info *) NULL)
    {
      struct frame_info *frame;
      int curLevel = 0;

      for (frame = get_prev_frame (curLevel);
	   (frame != (struct frame_info *) NULL && (frame != curFrame));
	   frame = get_prev_frame (frame))
	curLevel++;

      if (curFrame != (struct frame_info *) NULL)
	print_frame_info (frame, curLevel, 0, 1);
    }
#endif

  return;
}				/* _tuiUpdateLocation_command */
Example #2
0
/* Print a list of the stack frames. Args can be none, in which case
   we want to print the whole backtrace, or a pair of numbers
   specifying the frame numbers at which to start and stop the
   display. If the two numbers are equal, a single frame will be
   displayed. */
enum mi_cmd_result
mi_cmd_stack_list_frames (char *command, char **argv, int argc)
{
  int frame_low;
  int frame_high;
  int i;
  struct cleanup *cleanup_stack;
  struct frame_info *fi;

  if (!target_has_stack)
    error ("mi_cmd_stack_list_frames: No stack.");

  if (argc > 2 || argc == 1)
    error ("mi_cmd_stack_list_frames: Usage: [FRAME_LOW FRAME_HIGH]");

  if (argc == 2)
    {
      frame_low = atoi (argv[0]);
      frame_high = atoi (argv[1]);
    }
  else
    {
      /* Called with no arguments, it means we want the whole
         backtrace. */
      frame_low = -1;
      frame_high = -1;
    }

  /* Let's position fi on the frame at which to start the
     display. Could be the innermost frame if the whole stack needs
     displaying, or if frame_low is 0. */
  for (i = 0, fi = get_current_frame ();
       fi && i < frame_low;
       i++, fi = get_prev_frame (fi));

  if (fi == NULL)
    error ("mi_cmd_stack_list_frames: Not enough frames in stack.");

  cleanup_stack = make_cleanup_ui_out_list_begin_end (uiout, "stack");

  /* Now let;s print the frames up to frame_high, or until there are
     frames in the stack. */
  for (;
       fi && (i <= frame_high || frame_high == -1);
       i++, fi = get_prev_frame (fi))
    {
      QUIT;
      /* Print the location and the address always, even for level 0.
         args == 0: don't print the arguments. */
      print_frame_info (fi, 1, LOC_AND_ADDRESS, 0 /* args */ );
    }

  do_cleanups (cleanup_stack);
  if (i < frame_high)
    error ("mi_cmd_stack_list_frames: Not enough frames in stack.");

  return MI_CMD_DONE;
}
static inline CopyRet receive_frame(AVCodecContext *avctx,
                                    void *data, int *data_size,
                                    uint8_t second_field)
{
	BC_STATUS ret;
	BC_DTS_PROC_OUT output =
	{
		.PicInfo.width  = avctx->width,
		.PicInfo.height = avctx->height,
	};
	CHDContext *priv = avctx->priv_data;
	HANDLE dev       = priv->dev;

	*data_size = 0;

	// Request decoded data from the driver
	ret = DtsProcOutputNoCopy(dev, OUTPUT_PROC_TIMEOUT, &output);
	if (ret == BC_STS_FMT_CHANGE)
	{
		av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Initial format change\n");
		avctx->width  = output.PicInfo.width;
		avctx->height = output.PicInfo.height;
		return RET_COPY_AGAIN;
	}
	else if (ret == BC_STS_SUCCESS)
	{
		int copy_ret = -1;
		if (output.PoutFlags & BC_POUT_FLAGS_PIB_VALID)
		{
			if (priv->last_picture == -1)
			{
				/*
				 * Init to one less, so that the incrementing code doesn't
				 * need to be special-cased.
				 */
				priv->last_picture = output.PicInfo.picture_number - 1;
			}

			if (avctx->codec->id == CODEC_ID_MPEG4 &&
			        output.PicInfo.timeStamp == 0)
			{
				av_log(avctx, AV_LOG_VERBOSE,
				       "CrystalHD: Not returning packed frame twice.\n");
				priv->last_picture++;
				DtsReleaseOutputBuffs(dev, NULL, FALSE);
				return RET_COPY_AGAIN;
			}

			print_frame_info(priv, &output);

			if (priv->last_picture + 1 < output.PicInfo.picture_number)
			{
				av_log(avctx, AV_LOG_WARNING,
				       "CrystalHD: Picture Number discontinuity\n");
				/*
				 * Have we lost frames? If so, we need to shrink the
				 * pipeline length appropriately.
				 *
				 * XXX: I have no idea what the semantics of this situation
				 * are so I don't even know if we've lost frames or which
				 * ones.
				 *
				 * In any case, only warn the first time.
				 */
				priv->last_picture = output.PicInfo.picture_number - 1;
			}

			copy_ret = copy_frame(avctx, &output, data, data_size, second_field);
			if (*data_size > 0)
			{
				avctx->has_b_frames--;
				priv->last_picture++;
				av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Pipeline length: %u\n",
				       avctx->has_b_frames);
			}
		}
		else
		{
			/*
			 * An invalid frame has been consumed.
			 */
			av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput succeeded with "
			       "invalid PIB\n");
			avctx->has_b_frames--;
			copy_ret = RET_OK;
		}
		DtsReleaseOutputBuffs(dev, NULL, FALSE);

		return copy_ret;
	}
	else if (ret == BC_STS_BUSY)
	{
		return RET_COPY_AGAIN;
	}
	else
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput failed %d\n", ret);
		return RET_ERROR;
	}
}


static int decode(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
{
	BC_STATUS ret;
	BC_DTS_STATUS decoder_status;
	CopyRet rec_ret;
	CHDContext *priv   = avctx->priv_data;
	HANDLE dev         = priv->dev;
	int len            = avpkt->size;

	av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: decode_frame\n");

	if (len)
	{
		int32_t tx_free = (int32_t)DtsTxFreeSize(dev);
		if (len < tx_free - 1024)
		{
			/*
			 * Despite being notionally opaque, either libcrystalhd or
			 * the hardware itself will mangle pts values that are too
			 * small or too large. The docs claim it should be in units
			 * of 100ns. Given that we're nominally dealing with a black
			 * box on both sides, any transform we do has no guarantee of
			 * avoiding mangling so we need to build a mapping to values
			 * we know will not be mangled.
			 */
			uint64_t pts = opaque_list_push(priv, avctx->pkt->pts);
			if (!pts)
			{
				return AVERROR(ENOMEM);
			}
			av_log(priv->avctx, AV_LOG_VERBOSE,
			       "input \"pts\": %"PRIu64"\n", pts);
			ret = DtsProcInput(dev, avpkt->data, len, pts, 0);
			if (ret == BC_STS_BUSY)
			{
				av_log(avctx, AV_LOG_WARNING,
				       "CrystalHD: ProcInput returned busy\n");
				usleep(BASE_WAIT);
				return AVERROR(EBUSY);
			}
			else if (ret != BC_STS_SUCCESS)
			{
				av_log(avctx, AV_LOG_ERROR,
				       "CrystalHD: ProcInput failed: %u\n", ret);
				return -1;
			}
			avctx->has_b_frames++;
		}
		else
		{
			av_log(avctx, AV_LOG_WARNING, "CrystalHD: Input buffer full\n");
			len = 0; // We didn't consume any bytes.
		}
	}
	else
	{
		av_log(avctx, AV_LOG_INFO, "CrystalHD: No more input data\n");
	}

	if (priv->skip_next_output)
	{
		av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Skipping next output.\n");
		priv->skip_next_output = 0;
		avctx->has_b_frames--;
		return len;
	}

	ret = DtsGetDriverStatus(dev, &decoder_status);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed\n");
		return -1;
	}

	/*
	 * No frames ready. Don't try to extract.
	 *
	 * Empirical testing shows that ReadyListCount can be a damn lie,
	 * and ProcOut still fails when count > 0. The same testing showed
	 * that two more iterations were needed before ProcOutput would
	 * succeed.
	 */
	if (priv->output_ready < 2)
	{
		if (decoder_status.ReadyListCount != 0)
			priv->output_ready++;
		usleep(BASE_WAIT);
		av_log(avctx, AV_LOG_INFO, "CrystalHD: Filling pipeline.\n");
		return len;
	}
	else if (decoder_status.ReadyListCount == 0)
	{
		/*
		 * After the pipeline is established, if we encounter a lack of frames
		 * that probably means we're not giving the hardware enough time to
		 * decode them, so start increasing the wait time at the end of a
		 * decode call.
		 */
		usleep(BASE_WAIT);
		priv->decode_wait += WAIT_UNIT;
		av_log(avctx, AV_LOG_INFO, "CrystalHD: No frames ready. Returning\n");
		return len;
	}

	do
	{
		rec_ret = receive_frame(avctx, data, data_size, 0);
		if (rec_ret == 0 && *data_size == 0)
		{
			if (avctx->codec->id == CODEC_ID_H264)
			{
				/*
				 * This case is for when the encoded fields are stored
				 * separately and we get a separate avpkt for each one. To keep
				 * the pipeline stable, we should return nothing and wait for
				 * the next time round to grab the second field.
				 * H.264 PAFF is an example of this.
				 */
				av_log(avctx, AV_LOG_VERBOSE, "Returning after first field.\n");
				avctx->has_b_frames--;
			}
			else
			{
				/*
				 * This case is for when the encoded fields are stored in a
				 * single avpkt but the hardware returns then separately. Unless
				 * we grab the second field before returning, we'll slip another
				 * frame in the pipeline and if that happens a lot, we're sunk.
				 * So we have to get that second field now.
				 * Interlaced mpeg2 and vc1 are examples of this.
				 */
				av_log(avctx, AV_LOG_VERBOSE, "Trying to get second field.\n");
				while (1)
				{
					usleep(priv->decode_wait);
					ret = DtsGetDriverStatus(dev, &decoder_status);
					if (ret == BC_STS_SUCCESS &&
					        decoder_status.ReadyListCount > 0)
					{
						rec_ret = receive_frame(avctx, data, data_size, 1);
						if ((rec_ret == 0 && *data_size > 0) ||
						        rec_ret == RET_ERROR)
							break;
					}
				}
				av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Got second field.\n");
			}
		}
		else if (rec_ret == RET_SKIP_NEXT_COPY)
		{
			/*
			 * Two input packets got turned into a field pair. Gawd.
			 */
			av_log(avctx, AV_LOG_VERBOSE,
			       "Don't output on next decode call.\n");
			priv->skip_next_output = 1;
		}
		/*
		 * If rec_ret == RET_COPY_AGAIN, that means that either we just handled
		 * a FMT_CHANGE event and need to go around again for the actual frame,
		 * we got a busy status and need to try again, or we're dealing with
		 * packed b-frames, where the hardware strangely returns the packed
		 * p-frame twice. We choose to keep the second copy as it carries the
		 * valid pts.
		 */
	}
	while (rec_ret == RET_COPY_AGAIN);
	usleep(priv->decode_wait);
	return len;
}
Example #4
0
void
mi_cmd_stack_list_frames (char *command, char **argv, int argc)
{
  int frame_low;
  int frame_high;
  int i;
  struct cleanup *cleanup_stack;
  struct frame_info *fi;
  enum ext_lang_bt_status result = EXT_LANG_BT_ERROR;
  int raw_arg = 0;
  int oind = 0;
  enum opt
    {
      NO_FRAME_FILTERS
    };
  static const struct mi_opt opts[] =
    {
      {"-no-frame-filters", NO_FRAME_FILTERS, 0},
      { 0, 0, 0 }
    };

  /* Parse arguments.  In this instance we are just looking for
     --no-frame-filters.  */
  while (1)
    {
      char *oarg;
      int opt = mi_getopt ("-stack-list-frames", argc, argv,
			   opts, &oind, &oarg);
      if (opt < 0)
	break;
      switch ((enum opt) opt)
	{
	case NO_FRAME_FILTERS:
	  raw_arg = oind;
	  break;
	}
    }

  /* After the last option is parsed, there should either be low -
     high range, or no further arguments.  */
  if ((argc - oind != 0) && (argc - oind != 2))
    error (_("-stack-list-frames: Usage: [--no-frame-filters] [FRAME_LOW FRAME_HIGH]"));

  /* If there is a range, set it.  */
  if (argc - oind == 2)
    {
      frame_low = atoi (argv[0 + oind]);
      frame_high = atoi (argv[1 + oind]);
    }
  else
    {
      /* Called with no arguments, it means we want the whole
         backtrace.  */
      frame_low = -1;
      frame_high = -1;
    }

  /* Let's position fi on the frame at which to start the
     display. Could be the innermost frame if the whole stack needs
     displaying, or if frame_low is 0.  */
  for (i = 0, fi = get_current_frame ();
       fi && i < frame_low;
       i++, fi = get_prev_frame (fi));

  if (fi == NULL)
    error (_("-stack-list-frames: Not enough frames in stack."));

  cleanup_stack = make_cleanup_ui_out_list_begin_end (current_uiout, "stack");

  if (! raw_arg && frame_filters)
    {
      int flags = PRINT_LEVEL | PRINT_FRAME_INFO;
      int py_frame_low = frame_low;

      /* We cannot pass -1 to frame_low, as that would signify a
      relative backtrace from the tail of the stack.  So, in the case
      of frame_low == -1, assign and increment it.  */
      if (py_frame_low == -1)
	py_frame_low++;

      result = apply_ext_lang_frame_filter (get_current_frame (), flags,
					    NO_VALUES,  current_uiout,
					    py_frame_low, frame_high);
    }

  /* Run the inbuilt backtrace if there are no filters registered, or
     if "--no-frame-filters" has been specified from the command.  */
  if (! frame_filters || raw_arg  || result == EXT_LANG_BT_NO_FILTERS)
    {
      /* Now let's print the frames up to frame_high, or until there are
	 frames in the stack.  */
      for (;
	   fi && (i <= frame_high || frame_high == -1);
	   i++, fi = get_prev_frame (fi))
	{
	  QUIT;
	  /* Print the location and the address always, even for level 0.
	     If args is 0, don't print the arguments.  */
	  print_frame_info (fi, 1, LOC_AND_ADDRESS, 0 /* args */, 0);
	}
    }

  do_cleanups (cleanup_stack);
}
Example #5
0
int main(int argc, const char * argv[]) {

    dc1394_t *d;
    dc1394camera_list_t *list;
    dc1394error_t err;
    dc1394camera_t *camera;
    dc1394format7modeset_t modeset;
    dc1394video_frame_t *frame;
    FILE* imagefile;
    char filename[256];
    int i = 0;

    d = dc1394_new();
    if (!d) {
        return 1;
    }

    err = dc1394_camera_enumerate(d, &list);
    DC1394_ERR_RTN(err, "Failed to enumerate cameras");
    if (list->num == 0) {
        dc1394_log_error("No cameras found");
        dc1394_free(d);
        return 1;
    }
    printf("Detected %d cameras\n", list->num);

    // Assume that Ladybug 5 is detected as camera #0
    camera = dc1394_camera_new(d, list->ids[0].guid);
    if (!camera) {
        dc1394_log_error("Failed to initialize camera with guid %llx", list->ids[0].guid);
        dc1394_free(d);
    }
    dc1394_camera_free_list(list);
    printf("Using camera %s %s\n", camera->vendor, camera->model);

    // Report camera info
    err = dc1394_camera_print_info(camera, stdout);
    DC1394_ERR_RTN(err, "Could not print camera info");


    // Setup video mode, etc...
    err = dc1394_video_set_operation_mode(camera, DC1394_OPERATION_MODE_1394B);
    DC1394_ERR_RTN(err, "Could not set B mode");
    err = dc1394_video_set_iso_speed(camera, DC1394_ISO_SPEED_MAX);
    DC1394_ERR_RTN(err, "Could not set max speed");
    err = dc1394_video_set_mode(camera, DC1394_VIDEO_MODE_FORMAT7_0);
    DC1394_ERR_RTN(err, "Could not set DC1394_VIDEO_MODE_FORMAT7_0");

    // Get format7 mode info
    err = dc1394_format7_get_modeset(camera, &modeset);
    DC1394_ERR_RTN(err, "Could not get format 7 mode info\n");
    print_format7_info(&modeset);


    // Set format 7 roi
    err = dc1394_format7_set_roi(camera,
                                 DC1394_VIDEO_MODE_FORMAT7_0,
                                 modeset.mode[0].color_coding,
                                 modeset.mode[0].max_packet_size,
                                 modeset.mode[0].pos_x,
                                 modeset.mode[0].pos_y,
                                 modeset.mode[0].max_size_x,
                                 modeset.mode[0].max_size_y);
    DC1394_ERR_RTN(err, "Could not set max ROI");

    // Set capture
    err = dc1394_capture_setup(camera, 10, DC1394_CAPTURE_FLAGS_DEFAULT);
    DC1394_ERR_RTN(err, "Could not setup capture");
    err = dc1394_video_set_transmission(camera, DC1394_ON);
    DC1394_ERR_RTN(err, "Could not start transmission");

    while (i < NFRAMES) {
        // Capture image
        printf("Capturing image %d\n", i);
        err = dc1394_capture_dequeue(camera, DC1394_CAPTURE_POLICY_WAIT, &frame);
        DC1394_ERR_RTN(err, "Could not dequeue a frame");
        
        // Do something with the image
        print_frame_info(frame);

        // Save the image
        sprintf(filename, "%05d.pgm",i);
        imagefile = fopen(filename, "wb");
        if ( imagefile == NULL ) {
            printf("Could not save image\n");
            continue;
        }
        fprintf(imagefile, "P5\n%u %u 255\n", frame->size[0], frame->size[1]);
        fwrite(frame->image, 1, frame->image_bytes, imagefile);
        fclose(imagefile);
        printf("Saved image %s\n", filename);

        err = dc1394_capture_enqueue(camera, frame);
        DC1394_ERR_RTN(err, "Could enqueue a frame");

        i++;
    }

    dc1394_camera_free(camera);
    dc1394_free(d);
    return 0;
}