コード例 #1
0
ファイル: mpegtsbase.c プロジェクト: Haifen/gst-plugins-bad
static GstFlowReturn
mpegts_base_scan (MpegTSBase * base)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstBuffer *buf = NULL;
  guint i;
  gboolean done = FALSE;
  MpegTSPacketizerPacketReturn pret;
  gint64 tmpval;
  gint64 upstream_size, seek_pos, reverse_limit;
  GstFormat format;
  guint initial_pcr_seen;

  GST_DEBUG ("Scanning for initial sync point");

  /* Find initial sync point and at least 5 PCR values */
  for (i = 0; i < 20 && !done; i++) {
    GST_DEBUG ("Grabbing %d => %d", i * 65536, (i + 1) * 65536);

    ret = gst_pad_pull_range (base->sinkpad, i * 65536, 65536, &buf);
    if (G_UNLIKELY (ret == GST_FLOW_EOS))
      break;
    if (G_UNLIKELY (ret != GST_FLOW_OK))
      goto beach;

    /* Push to packetizer */
    mpegts_packetizer_push (base->packetizer, buf);
    buf = NULL;

    if (mpegts_packetizer_has_packets (base->packetizer)) {
      if (base->seek_offset == -1) {
        /* Mark the initial sync point and remember the packetsize */
        base->seek_offset = base->packetizer->offset;
        GST_DEBUG ("Sync point is now %" G_GUINT64_FORMAT, base->seek_offset);
        base->packetsize = base->packetizer->packet_size;
      }
      while (1) {
        /* Eat up all packets */
        pret = mpegts_packetizer_process_next_packet (base->packetizer);
        if (pret == PACKET_NEED_MORE)
          break;
        if (pret != PACKET_BAD && base->packetizer->nb_seen_offsets >= 5) {
          GST_DEBUG ("Got enough initial PCR");
          done = TRUE;
          break;
        }
      }
    }
  }

  initial_pcr_seen = base->packetizer->nb_seen_offsets;
  if (G_UNLIKELY (initial_pcr_seen == 0))
    goto no_initial_pcr;
  GST_DEBUG ("Seen %d initial PCR", initial_pcr_seen);

  /* Now send data from the end */

  /* Get the size of upstream */
  format = GST_FORMAT_BYTES;
  if (!gst_pad_peer_query_duration (base->sinkpad, format, &tmpval))
    goto beach;
  upstream_size = tmpval;

  /* The scanning takes place on the last 2048kB. Considering PCR should
   * be present at least every 100ms, this should cope with streams
   * up to 160Mbit/s */
  reverse_limit = MAX (0, upstream_size - 2097152);

  /* Find last PCR value, searching backwards by chunks of 300 MPEG-ts packets */
  for (seek_pos = MAX (0, upstream_size - 56400);
      seek_pos >= reverse_limit; seek_pos -= 56400) {
    mpegts_packetizer_clear (base->packetizer);
    GST_DEBUG ("Grabbing %" G_GUINT64_FORMAT " => %" G_GUINT64_FORMAT, seek_pos,
        seek_pos + 56400);

    ret = gst_pad_pull_range (base->sinkpad, seek_pos, 56400, &buf);
    if (G_UNLIKELY (ret == GST_FLOW_EOS))
      break;
    if (G_UNLIKELY (ret != GST_FLOW_OK))
      goto beach;

    /* Push to packetizer */
    mpegts_packetizer_push (base->packetizer, buf);
    buf = NULL;

    if (mpegts_packetizer_has_packets (base->packetizer)) {
      pret = PACKET_OK;
      /* Eat up all packets, really try to get last PCR(s) */
      while (pret != PACKET_NEED_MORE)
        pret = mpegts_packetizer_process_next_packet (base->packetizer);

      if (base->packetizer->nb_seen_offsets > initial_pcr_seen) {
        GST_DEBUG ("Got last PCR(s) (total seen:%d)",
            base->packetizer->nb_seen_offsets);
        break;
      }
    }
  }

beach:
  mpegts_packetizer_clear (base->packetizer);
  return ret;

no_initial_pcr:
  mpegts_packetizer_clear (base->packetizer);
  GST_WARNING_OBJECT (base, "Couldn't find any PCR within the first %d bytes",
      10 * 65536);
  return GST_FLOW_ERROR;
}
コード例 #2
0
ファイル: psdf_logphi.c プロジェクト: cbuehler/gwyddion
static void
psdflp_do(const PSDFLPArgs *args, GwyDataField *dfield, GwyDataField *lpsdf)
{
    enum { N = 4 };

    GwyDataField *reout, *imout;
    gint pxres, pyres, fxres, fyres;
    gint i, j, fi, pi;
    gdouble *ldata, *redata, *imdata;
    gdouble *cosphi, *sinphi;
    gdouble xreal, yreal, f0, f_max, b, p;

    reout = gwy_data_field_new_alike(dfield, FALSE);
    imout = gwy_data_field_new_alike(dfield, FALSE);
    gwy_data_field_2dfft(dfield, NULL, reout, imout,
                         args->window, GWY_TRANSFORM_DIRECTION_FORWARD,
                         GWY_INTERPOLATION_ROUND, /* Ignored */
                         TRUE, 1);

    pxres = reout->xres;
    pyres = reout->yres;
    redata = gwy_data_field_get_data(reout);
    imdata = gwy_data_field_get_data(imout);
    for (i = 0; i < pxres*pyres; i++)
        redata[i] = redata[i]*redata[i] + imdata[i]*imdata[i];
    gwy_data_field_2dfft_humanize(reout);
    gwy_data_field_filter_gaussian(reout, args->sigma);
    for (i = 0; i < pxres*pyres; i++)
        redata[i] = sqrt(redata[i]);

    fxres = pxres/2;
    fyres = pyres/2;
    gwy_data_field_resample(lpsdf, fxres, fyres, GWY_INTERPOLATION_NONE);
    ldata = gwy_data_field_get_data(lpsdf);

    xreal = dfield->xreal;
    yreal = dfield->yreal;
    f0 = 2.0/MIN(xreal, yreal);
    f_max = 0.5*MIN(pxres/xreal, pyres/yreal);
    if (f_max <= f0) {
        g_warning("Minimum frequency is not smaller than maximum frequency.");
    }
    b = log(f_max/f0)/fyres;

    /* Incorporate some prefactors to sinphi[] and cosphi[], knowing that
     * cosine is only ever used for x and sine for y frequencies. */
    cosphi = g_new(gdouble, (N+1)*fxres);
    sinphi = g_new(gdouble, (N+1)*fxres);
    for (j = 0; j < fxres; j++) {
        gdouble phi_from = 2.0*G_PI*j/fxres;
        gdouble phi_to = 2.0*G_PI*(j + 1.0)/fxres;

        for (pi = 0; pi <= N; pi++) {
            gdouble phi = ((pi + 0.5)*phi_from + (N - 0.5 - pi)*phi_to)/N;
            cosphi[j*(N+1) + pi] = cos(phi)*xreal;
            sinphi[j*(N+1) + pi] = sin(phi)*yreal;
        }
    }

    for (i = 0; i < fyres; i++) {
        gdouble f_from = f0*exp(b*i);
        gdouble f_to = f0*exp(b*(i + 1.0));

        for (j = 0; j < fxres; j++) {
            const gdouble *cosphi_j = cosphi + j*(N+1);
            const gdouble *sinphi_j = sinphi + j*(N+1);
            guint n = 0;
            gdouble s = 0.0;

            for (fi = 0; fi <= N; fi++) {
                gdouble f = ((fi + 0.5)*f_from + (N - 0.5 - fi)*f_to)/N;
                for (pi = 0; pi <= N; pi++) {
                    gdouble x = f*cosphi_j[pi] + pxres/2.0,
                            y = f*sinphi_j[pi] + pyres/2.0;

                    if (G_UNLIKELY(x < 0.5
                                   || y < 0.5
                                   || x > pxres - 1.5
                                   || y > pyres - 1.5))
                        continue;

                    p = gwy_data_field_get_dval(reout, x, y,
                                                GWY_INTERPOLATION_SCHAUM);
                    s += p;
                    n++;
                }
            }

            if (!n)
                n = 1;

            ldata[i*fxres + j] = 2.0*G_PI/fxres * s/n*(f_to - f_from);
        }
    }

    g_object_unref(imout);
    g_object_unref(reout);

    gwy_data_field_set_xreal(lpsdf, 2.0*G_PI);
    gwy_data_field_set_xoffset(lpsdf, 0.0);
    gwy_data_field_set_yreal(lpsdf, log(f_max/f0));
    gwy_data_field_set_yoffset(lpsdf, log(f0));
    gwy_si_unit_set_from_string(gwy_data_field_get_si_unit_xy(lpsdf), "");
    gwy_si_unit_set_from_string(gwy_data_field_get_si_unit_z(lpsdf), "");
    gwy_data_field_normalize(lpsdf);
}
コード例 #3
0
ファイル: sgen-major-copying.c プロジェクト: Alkarex/mono
static void
major_copy_or_mark_object (void **obj_slot, SgenGrayQueue *queue)
{
	char *forwarded;
	char *obj = *obj_slot;
	mword objsize;

	DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));

	HEAVY_STAT (++stat_copy_object_called_major);

	DEBUG (9, fprintf (gc_debug_file, "Precise copy of %p from %p", obj, obj_slot));

	/*
	 * obj must belong to one of:
	 *
	 * 1. the nursery
	 * 2. the LOS
	 * 3. a pinned chunk
	 * 4. a non-to-space section of the major heap
	 * 5. a to-space section of the major heap
	 *
	 * In addition, objects in 1, 2 and 4 might also be pinned.
	 * Objects in 1 and 4 might be forwarded.
	 *
	 * Before we can copy the object we must make sure that we are
	 * allowed to, i.e. that the object not pinned, not already
	 * forwarded, not in the nursery To Space and doesn't belong
	 * to the LOS, a pinned chunk, or a to-space section.
	 *
	 * We are usually called for to-space objects (5) when we have
	 * two remset entries for the same reference.  The first entry
	 * copies the object and updates the reference and the second
	 * calls us with the updated reference that points into
	 * to-space.  There might also be other circumstances where we
	 * get to-space objects.
	 */

	if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
		DEBUG (9, g_assert (((MonoVTable*)SGEN_LOAD_VTABLE(obj))->gc_descr));
		DEBUG (9, fprintf (gc_debug_file, " (already forwarded to %p)\n", forwarded));
		HEAVY_STAT (++stat_major_copy_object_failed_forwarded);
		*obj_slot = forwarded;
		return;
	}
	if (SGEN_OBJECT_IS_PINNED (obj)) {
		DEBUG (9, g_assert (((MonoVTable*)SGEN_LOAD_VTABLE(obj))->gc_descr));
		DEBUG (9, fprintf (gc_debug_file, " (pinned, no change)\n"));
		HEAVY_STAT (++stat_major_copy_object_failed_pinned);
		return;
	}

	if (ptr_in_nursery (obj)) {
		/* A To Space object is already on its final destination for the current collection. */
		if (sgen_nursery_is_to_space (obj))
			return;
		goto copy;
	}

	/*
	 * At this point we know obj is not pinned, not forwarded and
	 * belongs to 2, 3, 4, or 5.
	 *
	 * LOS object (2) are simple, at least until we always follow
	 * the rule: if objsize > SGEN_MAX_SMALL_OBJ_SIZE, pin the
	 * object and return it.  At the end of major collections, we
	 * walk the los list and if the object is pinned, it is
	 * marked, otherwise it can be freed.
	 *
	 * Pinned chunks (3) and major heap sections (4, 5) both
	 * reside in blocks, which are always aligned, so once we've
	 * eliminated LOS objects, we can just access the block and
	 * see whether it's a pinned chunk or a major heap section.
	 */

	objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));

	if (G_UNLIKELY (objsize > SGEN_MAX_SMALL_OBJ_SIZE || obj_is_from_pinned_alloc (obj))) {
		if (SGEN_OBJECT_IS_PINNED (obj))
			return;
		DEBUG (9, fprintf (gc_debug_file, " (marked LOS/Pinned %p (%s), size: %td)\n", obj, sgen_safe_name (obj), objsize));
		binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
		SGEN_PIN_OBJECT (obj);
		GRAY_OBJECT_ENQUEUE (queue, obj);
		HEAVY_STAT (++stat_major_copy_object_failed_large_pinned);
		return;
	}

	/*
	 * Now we know the object is in a major heap section.  All we
	 * need to do is check whether it's already in to-space (5) or
	 * not (4).
	 */
	if (MAJOR_OBJ_IS_IN_TO_SPACE (obj)) {
		DEBUG (9, g_assert (objsize <= SGEN_MAX_SMALL_OBJ_SIZE));
		DEBUG (9, fprintf (gc_debug_file, " (already copied)\n"));
		HEAVY_STAT (++stat_major_copy_object_failed_to_space);
		return;
	}

 copy:
	HEAVY_STAT (++stat_objects_copied_major);

	*obj_slot = copy_object_no_checks (obj, queue);
}
コード例 #4
0
ファイル: boundary.c プロジェクト: jdburton/gimp-osx
/**
 * boundary_sort:
 * @segs:       unsorted input segs.
 * @num_segs:   number of input segs
 * @num_groups: number of groups in the sorted segs
 *
 * This function takes an array of #BoundSeg's as returned by
 * boundary_find() and sorts it by contiguous groups. The returned
 * array contains markers consisting of -1 coordinates and is
 * @num_groups elements longer than @segs.
 *
 * Return value: the sorted segs
 **/
BoundSeg *
boundary_sort (const BoundSeg *segs,
               gint            num_segs,
               gint           *num_groups)
{
  Boundary        *boundary;
  const BoundSeg **segs_ptrs_by_xy1;
  const BoundSeg **segs_ptrs_by_xy2;
  gint             index;
  gint             x, y;
  gint             startx, starty;

  g_return_val_if_fail ((segs == NULL && num_segs == 0) ||
                        (segs != NULL && num_segs >  0), NULL);
  g_return_val_if_fail (num_groups != NULL, NULL);

  *num_groups = 0;

  if (num_segs == 0)
    return NULL;

  /* prepare arrays with BoundSeg pointers sorted by xy1 and xy2 accordingly */
  segs_ptrs_by_xy1 = g_new (const BoundSeg *, num_segs);
  segs_ptrs_by_xy2 = g_new (const BoundSeg *, num_segs);

  for (index = 0; index < num_segs; index++)
    {
      segs_ptrs_by_xy1[index] = segs + index;
      segs_ptrs_by_xy2[index] = segs + index;
    }

  qsort (segs_ptrs_by_xy1, num_segs, sizeof (BoundSeg *),
         (GCompareFunc) cmp_segptr_xy1_addr);
  qsort (segs_ptrs_by_xy2, num_segs, sizeof (BoundSeg *),
         (GCompareFunc) cmp_segptr_xy2_addr);

  for (index = 0; index < num_segs; index++)
    ((BoundSeg *) segs)[index].visited = FALSE;

  boundary = boundary_new (NULL);

  for (index = 0; index < num_segs; index++)
    {
      const BoundSeg *cur_seg;

      if (segs[index].visited)
        continue;

      boundary_add_seg (boundary,
                        segs[index].x1, segs[index].y1,
                        segs[index].x2, segs[index].y2,
                        segs[index].open);

      ((BoundSeg *) segs)[index].visited = TRUE;

      startx = segs[index].x1;
      starty = segs[index].y1;
      x = segs[index].x2;
      y = segs[index].y2;

      while ((cur_seg = find_segment (segs_ptrs_by_xy1, segs_ptrs_by_xy2,
                                      num_segs, x, y)) != NULL)
        {
          /*  make sure ordering is correct  */
          if (x == cur_seg->x1 && y == cur_seg->y1)
            {
              boundary_add_seg (boundary,
                                cur_seg->x1, cur_seg->y1,
                                cur_seg->x2, cur_seg->y2,
                                cur_seg->open);
              x = cur_seg->x2;
              y = cur_seg->y2;
            }
          else
            {
              boundary_add_seg (boundary,
                                cur_seg->x2, cur_seg->y2,
                                cur_seg->x1, cur_seg->y1,
                                cur_seg->open);
              x = cur_seg->x1;
              y = cur_seg->y1;
            }

          ((BoundSeg *) cur_seg)->visited = TRUE;
        }

      if (G_UNLIKELY (x != startx || y != starty))
        g_warning ("sort_boundary(): Unconnected boundary group!");

      /*  Mark the end of a group  */
      *num_groups = *num_groups + 1;
      boundary_add_seg (boundary, -1, -1, -1, -1, 0);
  }

  g_free (segs_ptrs_by_xy1);
  g_free (segs_ptrs_by_xy2);

  return boundary_free (boundary, FALSE);
}
コード例 #5
0
static void
major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
{
	void *obj = *ptr;
	mword vtable_word = *(mword*)obj;
	MonoVTable *vt = (MonoVTable*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
	mword objsize;
	MSBlockInfo *block;

	HEAVY_STAT (++stat_copy_object_called_major);

	DEBUG (9, g_assert (obj));
	DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));

	if (vtable_word & SGEN_FORWARDED_BIT) {
		*ptr = (void*)vt;
		return;
	}

	if (ptr_in_nursery (obj)) {
		int word, bit;
		gboolean has_references;
		void *destination;

		if (vtable_word & SGEN_PINNED_BIT)
			return;

		HEAVY_STAT (++stat_objects_copied_major);

	do_copy_object:
		objsize = SGEN_ALIGN_UP (mono_sgen_par_object_get_size (vt, (MonoObject*)obj));
		has_references = SGEN_VTABLE_HAS_REFERENCES (vt);

		destination = major_alloc_object (objsize, has_references);
		if (G_UNLIKELY (!destination)) {
			if (!ptr_in_nursery (obj)) {
				int size_index;
				block = MS_BLOCK_FOR_OBJ (obj);
				size_index = block->obj_size_index;
				evacuate_block_obj_sizes [size_index] = FALSE;
			}

			do {
				if (SGEN_CAS_PTR (obj, (void*)((mword)vt | SGEN_PINNED_BIT), vt) == vt) {
					mono_sgen_pin_object (obj, queue);
					break;
				}

				vtable_word = *(mword*)obj;
				/*someone else forwarded it, update the pointer and bail out*/
				if (vtable_word & SGEN_FORWARDED_BIT) {
					*ptr = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);
					break;
				}

				/*someone pinned it, nothing to do.*/
				if (vtable_word & SGEN_PINNED_BIT)
					break;
			} while (TRUE);
			return;
		}

		if (SGEN_CAS_PTR (obj, (void*)((mword)destination | SGEN_FORWARDED_BIT), vt) == vt) {
			gboolean was_marked;

			par_copy_object_no_checks (destination, vt, obj, objsize, has_references ? queue : NULL);
			obj = destination;
			*ptr = obj;

			/*
			 * FIXME: If we make major_alloc_object() give
			 * us the block info, too, we won't have to
			 * re-fetch it here.
			 */
			block = MS_BLOCK_FOR_OBJ (obj);
			MS_CALC_MARK_BIT (word, bit, obj);
			DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
			MS_PAR_SET_MARK_BIT (was_marked, block, word, bit);
		} else {
			/*
			 * FIXME: We have allocated destination, but
			 * we cannot use it.  Give it back to the
			 * allocator.
			 */
			*(void**)destination = NULL;

			vtable_word = *(mword*)obj;
			g_assert (vtable_word & SGEN_FORWARDED_BIT);

			obj = (void*)(vtable_word & ~SGEN_VTABLE_BITS_MASK);

			*ptr = obj;
		}
	} else {
#ifdef FIXED_HEAP
		if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
#else
		objsize = SGEN_ALIGN_UP (mono_sgen_par_object_get_size (vt, (MonoObject*)obj));

		if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
#endif
		{
			int size_index;

			block = MS_BLOCK_FOR_OBJ (obj);
			size_index = block->obj_size_index;

			if (!block->has_pinned && evacuate_block_obj_sizes [size_index]) {
				if (block->is_to_space)
					return;
				HEAVY_STAT (++stat_major_objects_evacuated);
				goto do_copy_object;
			} else {
				MS_PAR_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
			}
		} else {
			if (vtable_word & SGEN_PINNED_BIT)
				return;
			binary_protocol_pin (obj, vt, mono_sgen_safe_object_get_size ((MonoObject*)obj));
			if (SGEN_CAS_PTR (obj, (void*)(vtable_word | SGEN_PINNED_BIT), (void*)vtable_word) == (void*)vtable_word) {
				if (SGEN_VTABLE_HAS_REFERENCES (vt))
					GRAY_OBJECT_ENQUEUE (queue, obj);
			} else {
				g_assert (SGEN_OBJECT_IS_PINNED (obj));
			}
		}
	}
}
コード例 #6
0
static void
gst_pvrvideosink_blit (GstPVRVideoSink * pvrvideosink, GstBuffer * buffer)
{
  PVR2DERROR pvr_error;
  GstDrawContext *dcontext = pvrvideosink->dcontext;
  gint video_width;
  gint video_height;
  gboolean draw_border = FALSE;
  PPVR2D_3DBLT_EXT p_blt_3d;
  PVR2DMEMINFO *src_mem;
  PVR2DFORMAT pvr_format;
  GstVideoRectangle result;
  GstPVRMeta *meta;
  GstVideoCropMeta *cropmeta;

  GST_DEBUG_OBJECT (pvrvideosink, "buffer %p", buffer);

  pvr_format =
      GST_VIDEO_INFO_FORMAT (&pvrvideosink->info) ==
      GST_VIDEO_FORMAT_NV12 ? PVR2D_YUV420_2PLANE : PVR2D_ARGB8888;

  g_mutex_lock (pvrvideosink->flow_lock);
  if (buffer == NULL)
    buffer = pvrvideosink->current_buffer;

  if (buffer == NULL)
    goto done;

  meta = gst_buffer_get_pvr_meta (buffer);
  if (G_UNLIKELY (meta == NULL))
    goto no_pvr_meta;

  src_mem = meta->src_mem;
  p_blt_3d = dcontext->p_blt_info;

  video_width = GST_VIDEO_SINK_WIDTH (pvrvideosink);
  video_height = GST_VIDEO_SINK_HEIGHT (pvrvideosink);

  g_mutex_lock (pvrvideosink->dcontext->x_lock);

  /* Draw borders when displaying the first frame. After this
     draw borders only on expose event or after a size change. */
  if (!(pvrvideosink->current_buffer) || pvrvideosink->redraw_borders) {
    draw_border = TRUE;
  }

  /* Store a reference to the last image we put, lose the previous one */
  if (buffer && pvrvideosink->current_buffer != buffer) {
    if (pvrvideosink->current_buffer) {
      GST_LOG_OBJECT (pvrvideosink, "unreffing %p",
          pvrvideosink->current_buffer);
      gst_buffer_unref (GST_BUFFER_CAST (pvrvideosink->current_buffer));
    }
    GST_LOG_OBJECT (pvrvideosink, "reffing %p as our current buffer", buffer);
    pvrvideosink->current_buffer = gst_buffer_ref (buffer);
  }

  if (pvrvideosink->keep_aspect) {
    GstVideoRectangle src = { 0, };
    GstVideoRectangle dst = { 0, };

    src.w = GST_VIDEO_SINK_WIDTH (pvrvideosink);
    src.h = GST_VIDEO_SINK_HEIGHT (pvrvideosink);
    dst.w = pvrvideosink->render_rect.w;
    dst.h = pvrvideosink->render_rect.h;
    gst_video_sink_center_rect (src, dst, &result, TRUE);
    result.x += pvrvideosink->render_rect.x;
    result.y += pvrvideosink->render_rect.y;
  } else {
    memcpy (&result, &pvrvideosink->render_rect, sizeof (GstVideoRectangle));
  }

  p_blt_3d->sDst.pSurfMemInfo = &dcontext->dst_mem;
  p_blt_3d->sDst.SurfOffset = 0;
  p_blt_3d->sDst.Stride = 4 * pvrvideosink->render_params.ui32Stride;
  p_blt_3d->sDst.Format = PVR2D_ARGB8888;
  p_blt_3d->sDst.SurfWidth = pvrvideosink->xwindow->width;
  p_blt_3d->sDst.SurfHeight = pvrvideosink->xwindow->height;

  p_blt_3d->rcDest.left = result.x;
  p_blt_3d->rcDest.top = result.y;
  p_blt_3d->rcDest.right = result.w + result.x;
  p_blt_3d->rcDest.bottom = result.h + result.y;

  p_blt_3d->sSrc.pSurfMemInfo = src_mem;
  p_blt_3d->sSrc.SurfOffset = 0;
  p_blt_3d->sSrc.Stride = GST_VIDEO_INFO_COMP_STRIDE (&pvrvideosink->info, 0);
  p_blt_3d->sSrc.Format = pvr_format;
  p_blt_3d->sSrc.SurfWidth = video_width;
  p_blt_3d->sSrc.SurfHeight = video_height;

  /* If buffer has crop information, use that */
  if ((cropmeta = gst_buffer_get_video_crop_meta (buffer))) {
    p_blt_3d->rcSource.left = cropmeta->x;
    p_blt_3d->rcSource.top = cropmeta->y;
    p_blt_3d->rcSource.right = cropmeta->x + cropmeta->width;
    p_blt_3d->rcSource.bottom = cropmeta->y + cropmeta->height;
  } else {
    p_blt_3d->rcSource.left = 0;
    p_blt_3d->rcSource.top = 0;
    p_blt_3d->rcSource.right = video_width;
    p_blt_3d->rcSource.bottom = video_height;
  }

  p_blt_3d->hUseCode = NULL;

  if (GST_VIDEO_INFO_FORMAT (&pvrvideosink->info) == GST_VIDEO_FORMAT_NV12)
    p_blt_3d->bDisableDestInput = TRUE;
  else
    /* blit fails for RGB without this... not sure why yet... */
    p_blt_3d->bDisableDestInput = FALSE;

  GST_DEBUG_OBJECT (pvrvideosink, "about to blit");

  pvr_error = PVR2DBlt3DExt (pvrvideosink->dcontext->pvr_context,
      dcontext->p_blt_info);

  if (pvr_error != PVR2D_OK) {
    GST_ERROR_OBJECT (pvrvideosink, "Failed to blit. Error : %s",
        gst_pvr2d_error_get_string (pvr_error));
    goto done;
  }
  dcontext->wsegl_table->pfnWSEGL_SwapDrawable (dcontext->drawable_handle, 1);

  if (draw_border) {
    gst_pvrvideosink_xwindow_draw_borders (pvrvideosink, pvrvideosink->xwindow,
        result);
    pvrvideosink->redraw_borders = FALSE;
  }
  g_mutex_unlock (pvrvideosink->dcontext->x_lock);

done:
  GST_DEBUG_OBJECT (pvrvideosink, "end");
  g_mutex_unlock (pvrvideosink->flow_lock);
  return;

  /* Error cases */

no_pvr_meta:
  {
    g_mutex_unlock (pvrvideosink->flow_lock);
    GST_ERROR_OBJECT (pvrvideosink, "Got a buffer without GstPVRMeta");
    return;
  }
}
コード例 #7
0
static GstFlowReturn
theora_handle_data_packet (GstTheoraDec * dec, ogg_packet * packet,
    GstClockTime outtime, GstClockTime outdur)
{
  /* normal data packet */
  th_ycbcr_buffer buf;
  GstBuffer *out;
  gboolean keyframe;
  GstFlowReturn result;
  ogg_int64_t gp;

  if (G_UNLIKELY (!dec->have_header))
    goto not_initialized;

  /* get timestamp and durations */
  if (outtime == -1)
    outtime = dec->last_timestamp;
  if (outdur == -1)
    outdur = gst_util_uint64_scale_int (GST_SECOND, dec->info.fps_denominator,
        dec->info.fps_numerator);

  /* calculate expected next timestamp */
  if (outtime != -1 && outdur != -1)
    dec->last_timestamp = outtime + outdur;

  /* the second most significant bit of the first data byte is cleared 
   * for keyframes. We can only check it if it's not a zero-length packet. */
  keyframe = packet->bytes && ((packet->packet[0] & 0x40) == 0);
  if (G_UNLIKELY (keyframe)) {
    GST_DEBUG_OBJECT (dec, "we have a keyframe");
    dec->need_keyframe = FALSE;
  } else if (G_UNLIKELY (dec->need_keyframe)) {
    goto dropping;
  }

  GST_DEBUG_OBJECT (dec, "parsing data packet");

  /* this does the decoding */
  if (G_UNLIKELY (th_decode_packetin (dec->decoder, packet, &gp) < 0))
    goto decode_error;

  if (outtime != -1) {
    gboolean need_skip;
    GstClockTime running_time;
    GstClockTime earliest_time;
    gdouble proportion;

    /* qos needs to be done on running time */
    running_time = gst_segment_to_running_time (&dec->segment, GST_FORMAT_TIME,
        outtime);

    GST_OBJECT_LOCK (dec);
    proportion = dec->proportion;
    earliest_time = dec->earliest_time;
    /* check for QoS, don't perform the last steps of getting and
     * pushing the buffers that are known to be late. */
    need_skip = earliest_time != -1 && running_time <= earliest_time;
    GST_OBJECT_UNLOCK (dec);

    if (need_skip) {
      GstMessage *qos_msg;
      guint64 stream_time;
      gint64 jitter;

      GST_DEBUG_OBJECT (dec, "skipping decoding: qostime %"
          GST_TIME_FORMAT " <= %" GST_TIME_FORMAT,
          GST_TIME_ARGS (running_time), GST_TIME_ARGS (earliest_time));

      dec->dropped++;

      stream_time =
          gst_segment_to_stream_time (&dec->segment, GST_FORMAT_TIME, outtime);
      jitter = GST_CLOCK_DIFF (running_time, earliest_time);

      qos_msg =
          gst_message_new_qos (GST_OBJECT_CAST (dec), FALSE, running_time,
          stream_time, outtime, outdur);
      gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
      gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
          dec->processed, dec->dropped);
      gst_element_post_message (GST_ELEMENT_CAST (dec), qos_msg);

      goto dropping_qos;
    }
  }

  /* this does postprocessing and set up the decoded frame
   * pointers in our yuv variable */
  if (G_UNLIKELY (th_decode_ycbcr_out (dec->decoder, buf) < 0))
    goto no_yuv;

  if (G_UNLIKELY ((buf[0].width != dec->info.frame_width)
          || (buf[0].height != dec->info.frame_height)))
    goto wrong_dimensions;

  result = theora_handle_image (dec, buf, &out);
  if (result != GST_FLOW_OK)
    return result;

  GST_BUFFER_OFFSET (out) = dec->frame_nr;
  if (dec->frame_nr != -1)
    dec->frame_nr++;
  GST_BUFFER_OFFSET_END (out) = dec->frame_nr;

  GST_BUFFER_TIMESTAMP (out) = outtime;
  GST_BUFFER_DURATION (out) = outdur;

  dec->processed++;

  if (dec->segment.rate >= 0.0)
    result = theora_dec_push_forward (dec, out);
  else
    result = theora_dec_push_reverse (dec, out);

  return result;

  /* ERRORS */
not_initialized:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE,
        (NULL), ("no header sent yet"));
    return GST_FLOW_ERROR;
  }
dropping:
  {
    GST_WARNING_OBJECT (dec, "dropping frame because we need a keyframe");
    dec->discont = TRUE;
    return GST_FLOW_OK;
  }
dropping_qos:
  {
    if (dec->frame_nr != -1)
      dec->frame_nr++;
    dec->discont = TRUE;
    GST_WARNING_OBJECT (dec, "dropping frame because of QoS");
    return GST_FLOW_OK;
  }
decode_error:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE,
        (NULL), ("theora decoder did not decode data packet"));
    return GST_FLOW_ERROR;
  }
no_yuv:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE,
        (NULL), ("couldn't read out YUV image"));
    return GST_FLOW_ERROR;
  }
wrong_dimensions:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, FORMAT,
        (NULL), ("dimensions of image do not match header"));
    return GST_FLOW_ERROR;
  }
}
コード例 #8
0
void
gst_audio_fx_base_fir_filter_push_residue (GstAudioFXBaseFIRFilter * self)
{
  GstBuffer *outbuf;
  GstFlowReturn res;
  gint rate = GST_AUDIO_FILTER_CAST (self)->format.rate;
  gint channels = GST_AUDIO_FILTER_CAST (self)->format.channels;
  gint width = GST_AUDIO_FILTER_CAST (self)->format.width / 8;
  gint outsize, outsamples;
  guint8 *in, *out;

  if (channels == 0 || rate == 0 || self->nsamples_in == 0) {
    self->buffer_fill = 0;
    g_free (self->buffer);
    self->buffer = NULL;
    return;
  }

  /* Calculate the number of samples and their memory size that
   * should be pushed from the residue */
  outsamples = self->nsamples_in - (self->nsamples_out - self->latency);
  if (outsamples <= 0) {
    self->buffer_fill = 0;
    g_free (self->buffer);
    self->buffer = NULL;
    return;
  }
  outsize = outsamples * channels * width;

  if (!self->fft || self->low_latency) {
    gint64 diffsize, diffsamples;

    /* Process the difference between latency and residue length samples
     * to start at the actual data instead of starting at the zeros before
     * when we only got one buffer smaller than latency */
    diffsamples =
        ((gint64) self->latency) - ((gint64) self->buffer_fill) / channels;
    if (diffsamples > 0) {
      diffsize = diffsamples * channels * width;
      in = g_new0 (guint8, diffsize);
      out = g_new0 (guint8, diffsize);
      self->nsamples_out += self->process (self, in, out, diffsamples);
      g_free (in);
      g_free (out);
    }

    res = gst_pad_alloc_buffer (GST_BASE_TRANSFORM_CAST (self)->srcpad,
        GST_BUFFER_OFFSET_NONE, outsize,
        GST_PAD_CAPS (GST_BASE_TRANSFORM_CAST (self)->srcpad), &outbuf);

    if (G_UNLIKELY (res != GST_FLOW_OK)) {
      GST_WARNING_OBJECT (self, "failed allocating buffer of %d bytes",
          outsize);
      self->buffer_fill = 0;
      return;
    }

    /* Convolve the residue with zeros to get the actual remaining data */
    in = g_new0 (guint8, outsize);
    self->nsamples_out +=
        self->process (self, in, GST_BUFFER_DATA (outbuf), outsamples);
    g_free (in);
  } else {
    guint gensamples = 0;
    guint8 *data;

    outbuf = gst_buffer_new_and_alloc (outsize);
    data = GST_BUFFER_DATA (outbuf);

    while (gensamples < outsamples) {
      guint step_insamples = self->block_length - self->buffer_fill;
      guint8 *zeroes = g_new0 (guint8, step_insamples * channels * width);
      guint8 *out = g_new (guint8, self->block_length * channels * width);
      guint step_gensamples;

      step_gensamples = self->process (self, zeroes, out, step_insamples);
      g_free (zeroes);

      memcpy (data + gensamples * width, out, MIN (step_gensamples,
              outsamples - gensamples) * width);
      gensamples += MIN (step_gensamples, outsamples - gensamples);

      g_free (out);
    }
    self->nsamples_out += gensamples;
  }

  /* Set timestamp, offset, etc from the values we
   * saved when processing the regular buffers */
  if (GST_CLOCK_TIME_IS_VALID (self->start_ts))
    GST_BUFFER_TIMESTAMP (outbuf) = self->start_ts;
  else
    GST_BUFFER_TIMESTAMP (outbuf) = 0;
  GST_BUFFER_TIMESTAMP (outbuf) +=
      gst_util_uint64_scale_int (self->nsamples_out - outsamples -
      self->latency, GST_SECOND, rate);

  GST_BUFFER_DURATION (outbuf) =
      gst_util_uint64_scale_int (outsamples, GST_SECOND, rate);

  if (self->start_off != GST_BUFFER_OFFSET_NONE) {
    GST_BUFFER_OFFSET (outbuf) =
        self->start_off + self->nsamples_out - outsamples - self->latency;
    GST_BUFFER_OFFSET_END (outbuf) = GST_BUFFER_OFFSET (outbuf) + outsamples;
  }

  GST_DEBUG_OBJECT (self, "Pushing residue buffer of size %d with timestamp: %"
      GST_TIME_FORMAT ", duration: %" GST_TIME_FORMAT ", offset: %"
      G_GUINT64_FORMAT ", offset_end: %" G_GUINT64_FORMAT ", nsamples_out: %d",
      GST_BUFFER_SIZE (outbuf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)),
      GST_TIME_ARGS (GST_BUFFER_DURATION (outbuf)), GST_BUFFER_OFFSET (outbuf),
      GST_BUFFER_OFFSET_END (outbuf), outsamples);

  res = gst_pad_push (GST_BASE_TRANSFORM_CAST (self)->srcpad, outbuf);

  if (G_UNLIKELY (res != GST_FLOW_OK)) {
    GST_WARNING_OBJECT (self, "failed to push residue");
  }

  self->buffer_fill = 0;
}
コード例 #9
0
ファイル: nmp_mod_mss.c プロジェクト: dulton/nampu
gpointer
nmp_mod_mss_sync_req_2(NmpModMss *self, NmpMsgID msg_id,
       gpointer req, gint req_size, gint *res_size)
{
	gint err = 0;
	NmpMsgErrCode *res_info;
	gpointer res;
	NmpSysMsg *msg;
	G_ASSERT(self != NULL);

	msg = nmp_sysmsg_new_2(msg_id, req, req_size, ++msg_seq_generator);
	if (G_UNLIKELY(!msg))
		return NULL;

    MSG_SET_DSTPOS(msg, BUSSLOT_POS_DBS);
    err = nmp_app_mod_sync_request((NmpAppMod*)self, &msg);
    if (G_UNLIKELY(err))	/* send failed */
    {
        nmp_warning(
        	"<NmpModMss> request cmd %d failed!", msg_id
    );

    nmp_sysmsg_destroy(msg);
    res_info = nmp_mem_kalloc(sizeof(NmpMsgErrCode));
    if (res_info)
    {
    	SET_CODE(res_info, err);
    	*res_size = sizeof(NmpMsgErrCode);
    }

    return res_info;
    }

	if (G_UNLIKELY(!msg))	/* sent, but no response */
	{
		nmp_warning(
			"<NmpModMds> request cmd %d timeout!", msg_id
		);
		res_info = nmp_mem_kalloc(sizeof(NmpMsgErrCode));
		err = -E_TIMEOUT;
		if (res_info)
		{
			SET_CODE(res_info, err);
			*res_size = sizeof(NmpMsgErrCode);
		}

		return res_info;
	}

	res = MSG_GET_DATA(msg);
	if (!res)
	{
		nmp_sysmsg_destroy(msg);
		return NULL;
	}

	res_info = nmp_mem_kalloc(MSG_DATA_SIZE(msg));
	if (G_UNLIKELY(!res_info))
       {
       	nmp_sysmsg_destroy(msg);
       	return NULL;
       }
	*res_size =  MSG_DATA_SIZE(msg);
	memcpy(res_info, res, *res_size);
	nmp_sysmsg_destroy(msg);

	return res_info;
}
コード例 #10
0
ファイル: mpegtsbase.c プロジェクト: Haifen/gst-plugins-bad
static gboolean
mpegts_base_apply_pat (MpegTSBase * base, GstMpegtsSection * section)
{
  GPtrArray *pat = gst_mpegts_section_get_pat (section);
  GPtrArray *old_pat;
  MpegTSBaseProgram *program;
  gint i;

  if (G_UNLIKELY (pat == NULL))
    return FALSE;

  GST_INFO_OBJECT (base, "PAT");

  /* Applying a new PAT does two things:
   * * It adds the new programs to the list of programs this element handles
   *   and increments at the same time the number of times a program is referenced.
   *
   * * If there was a previously active PAT, It decrements the reference count
   *   of all program it used. If a program is no longer needed, it is removed.
   */

  old_pat = base->pat;
  base->pat = pat;

  GST_LOG ("Activating new Program Association Table");
  /* activate the new table */
  for (i = 0; i < pat->len; ++i) {
    GstMpegtsPatProgram *patp = g_ptr_array_index (pat, i);

    program = mpegts_base_get_program (base, patp->program_number);
    if (program) {
      /* IF the program already existed, just check if the PMT PID changed */
      if (program->pmt_pid != patp->network_or_program_map_PID) {
        if (program->pmt_pid != G_MAXUINT16) {
          /* pmt pid changed */
          /* FIXME: when this happens it may still be pmt pid of another
           * program, so setting to False may make it go through expensive
           * path in is_psi unnecessarily */
          MPEGTS_BIT_UNSET (base->known_psi, program->pmt_pid);
        }

        program->pmt_pid = patp->network_or_program_map_PID;
        if (G_UNLIKELY (MPEGTS_BIT_IS_SET (base->known_psi, program->pmt_pid)))
          GST_FIXME
              ("Refcounting issue. Setting twice a PMT PID (0x%04x) as know PSI",
              program->pmt_pid);
        MPEGTS_BIT_SET (base->known_psi, patp->network_or_program_map_PID);
      }
    } else {
      /* Create a new program */
      program =
          mpegts_base_add_program (base, patp->program_number,
          patp->network_or_program_map_PID);
    }
    /* We mark this program as being referenced by one PAT */
    program->patcount += 1;
  }

  if (old_pat) {
    MpegTSBaseClass *klass = GST_MPEGTS_BASE_GET_CLASS (base);
    /* deactivate the old table */
    GST_LOG ("Deactivating old Program Association Table");

    for (i = 0; i < old_pat->len; ++i) {
      GstMpegtsPatProgram *patp = g_ptr_array_index (old_pat, i);

      program = mpegts_base_get_program (base, patp->program_number);
      if (G_UNLIKELY (program == NULL)) {
        GST_DEBUG_OBJECT (base, "broken PAT, duplicated entry for program %d",
            patp->program_number);
        continue;
      }

      if (--program->patcount > 0)
        /* the program has been referenced by the new pat, keep it */
        continue;

      GST_INFO_OBJECT (base, "PAT removing program 0x%04x 0x%04x",
          patp->program_number, patp->network_or_program_map_PID);

      if (klass->can_remove_program (base, program)) {
        mpegts_base_deactivate_program (base, program);
        mpegts_base_remove_program (base, patp->program_number);
      } else {
        /* sub-class now owns the program and must call
         * mpegts_base_deactivate_and_free_program later */
        g_hash_table_steal (base->programs,
            GINT_TO_POINTER ((gint) patp->program_number));
      }
      /* FIXME: when this happens it may still be pmt pid of another
       * program, so setting to False may make it go through expensive
       * path in is_psi unnecessarily */
      if (G_UNLIKELY (MPEGTS_BIT_IS_SET (base->known_psi,
                  patp->network_or_program_map_PID))) {
        GST_FIXME
            ("Program refcounting : Setting twice a pid (0x%04x) as known PSI",
            patp->network_or_program_map_PID);
      }
      MPEGTS_BIT_SET (base->known_psi, patp->network_or_program_map_PID);
      mpegts_packetizer_remove_stream (base->packetizer,
          patp->network_or_program_map_PID);
    }

    g_ptr_array_unref (old_pat);
  }

  return TRUE;
}
コード例 #11
0
ファイル: mpegtsbase.c プロジェクト: Haifen/gst-plugins-bad
static gboolean
mpegts_base_apply_pmt (MpegTSBase * base, GstMpegtsSection * section)
{
  const GstMpegtsPMT *pmt;
  MpegTSBaseProgram *program, *old_program;
  guint program_number;
  gboolean initial_program = TRUE;

  pmt = gst_mpegts_section_get_pmt (section);
  if (G_UNLIKELY (pmt == NULL)) {
    GST_ERROR ("Could not get PMT (corrupted ?)");
    return FALSE;
  }

  /* FIXME : not so sure this is valid anymore */
  if (G_UNLIKELY (base->seen_pat == FALSE)) {
    GST_WARNING ("Got pmt without pat first. Returning");
    /* remove the stream since we won't get another PMT otherwise */
    mpegts_packetizer_remove_stream (base->packetizer, section->pid);
    return TRUE;
  }

  program_number = section->subtable_extension;
  GST_DEBUG ("Applying PMT (program_number:%d, pid:0x%04x)",
      program_number, section->pid);

  /* In order for stream switching to happen properly in decodebin(2),
   * we need to first add the new pads (i.e. activate the new program)
   * before removing the old ones (i.e. deactivating the old program)
   */

  old_program = mpegts_base_get_program (base, program_number);
  if (G_UNLIKELY (old_program == NULL))
    goto no_program;

  if (G_UNLIKELY (mpegts_base_is_same_program (base, old_program, section->pid,
              pmt)))
    goto same_program;

  /* If the current program is active, this means we have a new program */
  if (old_program->active) {
    MpegTSBaseClass *klass = GST_MPEGTS_BASE_GET_CLASS (base);
    old_program = mpegts_base_steal_program (base, program_number);
    program = mpegts_base_new_program (base, program_number, section->pid);
    program->patcount = old_program->patcount;
    g_hash_table_insert (base->programs,
        GINT_TO_POINTER (program_number), program);

    /* Desactivate the old program */
    /* FIXME : THIS IS BREAKING THE STREAM SWITCHING LOGIC !
     *  */
    if (klass->can_remove_program (base, old_program)) {
      mpegts_base_deactivate_program (base, old_program);
      mpegts_base_free_program (old_program);
    } else {
      /* sub-class now owns the program and must call
       * mpegts_base_deactivate_and_free_program later */
      g_hash_table_steal (base->programs,
          GINT_TO_POINTER ((gint) old_program->program_number));
    }
    initial_program = FALSE;
  } else
    program = old_program;

  /* activate program */
  /* Ownership of pmt_info is given to the program */
  mpegts_base_activate_program (base, program, section->pid, section, pmt,
      initial_program);

  return TRUE;

no_program:
  {
    GST_ERROR ("Attempted to apply a PMT on a program that wasn't created");
    return TRUE;
  }

same_program:
  {
    GST_DEBUG ("Not applying identical program");
    return TRUE;
  }
}
コード例 #12
0
ファイル: mpegtsbase.c プロジェクト: Haifen/gst-plugins-bad
static void
mpegts_base_activate_program (MpegTSBase * base, MpegTSBaseProgram * program,
    guint16 pmt_pid, GstMpegtsSection * section, const GstMpegtsPMT * pmt,
    gboolean initial_program)
{
  guint i;
  MpegTSBaseClass *klass;

  if (G_UNLIKELY (program->active))
    return;

  GST_DEBUG ("Activating program %d", program->program_number);

  /* activate new pmt */
  if (program->section)
    gst_mpegts_section_unref (program->section);
  program->section = gst_mpegts_section_ref (section);

  program->pmt = pmt;
  program->pmt_pid = pmt_pid;
  program->pcr_pid = pmt->pcr_pid;

  /* extract top-level registration_id if present */
  program->registration_id =
      get_registration_from_descriptors (pmt->descriptors);
  GST_DEBUG ("program 0x%04x, registration_id %" SAFE_FOURCC_FORMAT,
      program->program_number, SAFE_FOURCC_ARGS (program->registration_id));

  for (i = 0; i < pmt->streams->len; ++i) {
    GstMpegtsPMTStream *stream = g_ptr_array_index (pmt->streams, i);

    switch (stream->stream_type) {
      case GST_MPEGTS_STREAM_TYPE_SCTE_DSMCC_DCB:
      case GST_MPEGTS_STREAM_TYPE_SCTE_SIGNALING:
      {
        guint32 registration_id =
            get_registration_from_descriptors (stream->descriptors);
        /* Not a private section stream */
        if (registration_id != DRF_ID_CUEI && registration_id != DRF_ID_ETV1)
          break;
        /* Fall through on purpose - remove this PID from known_psi */
      }
      case GST_MPEGTS_STREAM_TYPE_PRIVATE_SECTIONS:
      case GST_MPEGTS_STREAM_TYPE_MHEG:
      case GST_MPEGTS_STREAM_TYPE_DSM_CC:
      case GST_MPEGTS_STREAM_TYPE_DSMCC_A:
      case GST_MPEGTS_STREAM_TYPE_DSMCC_B:
      case GST_MPEGTS_STREAM_TYPE_DSMCC_C:
      case GST_MPEGTS_STREAM_TYPE_DSMCC_D:
      case GST_MPEGTS_STREAM_TYPE_SL_FLEXMUX_SECTIONS:
      case GST_MPEGTS_STREAM_TYPE_METADATA_SECTIONS:
        /* Set known PSI streams */
        if (base->parse_private_sections)
          MPEGTS_BIT_SET (base->known_psi, stream->pid);
        break;
      default:
        if (G_UNLIKELY (MPEGTS_BIT_IS_SET (base->is_pes, stream->pid)))
          GST_FIXME
              ("Refcounting issue. Setting twice a PID (0x%04x) as known PES",
              stream->pid);
        if (G_UNLIKELY (MPEGTS_BIT_IS_SET (base->known_psi, stream->pid))) {
          GST_FIXME
              ("Refcounting issue. Setting a known PSI PID (0x%04x) as known PES",
              stream->pid);
          MPEGTS_BIT_UNSET (base->known_psi, stream->pid);
        }

        MPEGTS_BIT_SET (base->is_pes, stream->pid);
        break;
    }
    mpegts_base_program_add_stream (base, program,
        stream->pid, stream->stream_type, stream);

  }
  /* We add the PCR pid last. If that PID is already used by one of the media
   * streams above, no new stream will be created */
  mpegts_base_program_add_stream (base, program, pmt->pcr_pid, -1, NULL);
  MPEGTS_BIT_SET (base->is_pes, pmt->pcr_pid);

  program->active = TRUE;
  program->initial_program = initial_program;

  klass = GST_MPEGTS_BASE_GET_CLASS (base);
  if (klass->program_started != NULL)
    klass->program_started (base, program);

  GST_DEBUG_OBJECT (base, "new pmt activated");
}
コード例 #13
0
ファイル: mpegtsbase.c プロジェクト: Haifen/gst-plugins-bad
static void
mpegts_base_deactivate_program (MpegTSBase * base, MpegTSBaseProgram * program)
{
  gint i;
  MpegTSBaseClass *klass = GST_MPEGTS_BASE_GET_CLASS (base);

  if (G_UNLIKELY (program->active == FALSE))
    return;

  GST_DEBUG_OBJECT (base, "Deactivating PMT");

  program->active = FALSE;

  if (program->pmt) {
    for (i = 0; i < program->pmt->streams->len; ++i) {
      GstMpegtsPMTStream *stream = g_ptr_array_index (program->pmt->streams, i);

      mpegts_base_program_remove_stream (base, program, stream->pid);

      /* Only unset the is_pes/known_psi bit if the PID isn't used in any other active
       * program */
      if (!mpegts_pid_in_active_programs (base, stream->pid)) {
        switch (stream->stream_type) {
          case GST_MPEGTS_STREAM_TYPE_SCTE_DSMCC_DCB:
          case GST_MPEGTS_STREAM_TYPE_SCTE_SIGNALING:
          {
            guint32 registration_id =
                get_registration_from_descriptors (stream->descriptors);

            /* Not a private section stream */
            if (registration_id != DRF_ID_CUEI
                && registration_id != DRF_ID_ETV1)
              break;
            /* Fall through on purpose - remove this PID from known_psi */
          }
          case GST_MPEGTS_STREAM_TYPE_PRIVATE_SECTIONS:
          case GST_MPEGTS_STREAM_TYPE_MHEG:
          case GST_MPEGTS_STREAM_TYPE_DSM_CC:
          case GST_MPEGTS_STREAM_TYPE_DSMCC_A:
          case GST_MPEGTS_STREAM_TYPE_DSMCC_B:
          case GST_MPEGTS_STREAM_TYPE_DSMCC_C:
          case GST_MPEGTS_STREAM_TYPE_DSMCC_D:
          case GST_MPEGTS_STREAM_TYPE_SL_FLEXMUX_SECTIONS:
          case GST_MPEGTS_STREAM_TYPE_METADATA_SECTIONS:
            /* Set known PSI streams */
            if (base->parse_private_sections)
              MPEGTS_BIT_UNSET (base->known_psi, stream->pid);
            break;
          default:
            MPEGTS_BIT_UNSET (base->is_pes, stream->pid);
            break;
        }
      }
    }

    /* remove pcr stream */
    /* FIXME : This might actually be shared with another stream ? */
    mpegts_base_program_remove_stream (base, program, program->pcr_pid);
    if (!mpegts_pid_in_active_programs (base, program->pcr_pid))
      MPEGTS_BIT_UNSET (base->is_pes, program->pcr_pid);

    GST_DEBUG ("program stream_list is now %p", program->stream_list);
  }

  /* Inform subclasses we're deactivating this program */
  if (klass->program_stopped)
    klass->program_stopped (base, program);
}
コード例 #14
0
ファイル: mpegtsbase.c プロジェクト: Haifen/gst-plugins-bad
gboolean
mpegts_base_handle_seek_event (MpegTSBase * base, GstPad * pad,
    GstEvent * event)
{
  MpegTSBaseClass *klass = GST_MPEGTS_BASE_GET_CLASS (base);
  GstFlowReturn ret = GST_FLOW_ERROR;
  gdouble rate;
  gboolean flush;
  GstFormat format;
  GstSeekFlags flags;
  GstSeekType start_type, stop_type;
  gint64 start, stop;
  GstEvent *flush_event = NULL;

  gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
      &stop_type, &stop);

  if (format != GST_FORMAT_TIME)
    return FALSE;

  if (GST_EVENT_SEQNUM (event) == base->last_seek_seqnum) {
    GST_DEBUG_OBJECT (base, "Skipping already handled seek");
    return TRUE;
  }

  if (base->mode == BASE_MODE_PUSHING) {
    /* First try if upstream supports seeking in TIME format */
    if (gst_pad_push_event (base->sinkpad, gst_event_ref (event))) {
      GST_DEBUG ("upstream handled SEEK event");
      return TRUE;
    }

    /* If the subclass can seek, do that */
    if (klass->seek) {
      ret = klass->seek (base, event);
      if (G_UNLIKELY (ret != GST_FLOW_OK))
        GST_WARNING ("seeking failed %s", gst_flow_get_name (ret));
      else {
        GstEvent *new_seek;

        if (GST_CLOCK_TIME_IS_VALID (base->seek_offset)) {
          base->mode = BASE_MODE_SEEKING;
          new_seek = gst_event_new_seek (rate, GST_FORMAT_BYTES, flags,
              GST_SEEK_TYPE_SET, base->seek_offset, GST_SEEK_TYPE_NONE, -1);
          gst_event_set_seqnum (new_seek, GST_EVENT_SEQNUM (event));
          if (!gst_pad_push_event (base->sinkpad, new_seek))
            ret = GST_FLOW_ERROR;
          else
            base->last_seek_seqnum = GST_EVENT_SEQNUM (event);
        }
        base->mode = BASE_MODE_PUSHING;
      }
    } else {
      GST_WARNING ("subclass has no seek implementation");
    }

    return ret == GST_FLOW_OK;
  }

  if (!klass->seek) {
    GST_WARNING ("subclass has no seek implementation");
    return FALSE;
  }

  if (rate <= 0.0) {
    GST_WARNING ("Negative rate not supported");
    return FALSE;
  }

  GST_DEBUG ("seek event, rate: %f start: %" GST_TIME_FORMAT
      " stop: %" GST_TIME_FORMAT, rate, GST_TIME_ARGS (start),
      GST_TIME_ARGS (stop));

  flush = flags & GST_SEEK_FLAG_FLUSH;

  /* stop streaming, either by flushing or by pausing the task */
  base->mode = BASE_MODE_SEEKING;
  if (flush) {
    GST_DEBUG_OBJECT (base, "sending flush start");
    flush_event = gst_event_new_flush_start ();
    gst_event_set_seqnum (flush_event, GST_EVENT_SEQNUM (event));
    gst_pad_push_event (base->sinkpad, gst_event_ref (flush_event));
    GST_MPEGTS_BASE_GET_CLASS (base)->push_event (base, flush_event);
  } else
    gst_pad_pause_task (base->sinkpad);

  /* wait for streaming to finish */
  GST_PAD_STREAM_LOCK (base->sinkpad);

  if (flush) {
    /* send a FLUSH_STOP for the sinkpad, since we need data for seeking */
    GST_DEBUG_OBJECT (base, "sending flush stop");
    flush_event = gst_event_new_flush_stop (TRUE);
    gst_event_set_seqnum (flush_event, GST_EVENT_SEQNUM (event));

    /* ref for it to be reused later */
    gst_pad_push_event (base->sinkpad, gst_event_ref (flush_event));
    /* And actually flush our pending data but allow to preserve some info
     * to perform the seek */
    mpegts_base_flush (base, FALSE);
    mpegts_packetizer_flush (base->packetizer, FALSE);
  }

  if (flags & (GST_SEEK_FLAG_SEGMENT)) {
    GST_WARNING ("seek flags 0x%x are not supported", (int) flags);
    goto done;
  }


  /* If the subclass can seek, do that */
  ret = klass->seek (base, event);
  if (G_UNLIKELY (ret != GST_FLOW_OK))
    GST_WARNING ("seeking failed %s", gst_flow_get_name (ret));
  else
    base->last_seek_seqnum = GST_EVENT_SEQNUM (event);

  if (flush_event) {
    /* if we sent a FLUSH_START, we now send a FLUSH_STOP */
    GST_DEBUG_OBJECT (base, "sending flush stop");
    GST_MPEGTS_BASE_GET_CLASS (base)->push_event (base, flush_event);
    flush_event = NULL;
  }
done:
  if (flush_event)
    gst_event_unref (flush_event);
  gst_pad_start_task (base->sinkpad, (GstTaskFunction) mpegts_base_loop, base,
      NULL);

  GST_PAD_STREAM_UNLOCK (base->sinkpad);
  return ret == GST_FLOW_OK;
}
コード例 #15
0
ファイル: radix.c プロジェクト: balabit/syslog-ng
gboolean
r_parser_ipv6(gchar *str, gint *len, const gchar *param, gpointer state, RParserMatch *match)
{
  gint colons = 0;
  gint dots = 0;
  gint octet = 0;
  gint digit = 16;
  gboolean shortened = FALSE;

  *len = 0;

  while (1)
    {
      if (str[*len] == ':')
        {
          if (G_UNLIKELY(octet > 0xffff || (octet == -1 && shortened)))
            return FALSE;

          if (G_UNLIKELY(colons == 7 || dots == 3))
            break;

          if (G_UNLIKELY(digit == 10))
            return FALSE;

          if (octet == -1)
            shortened = TRUE;

          colons++;
          octet = -1;
        }
      else if (g_ascii_isxdigit(str[*len]))
        {
          if (octet == -1)
            octet = 0;
          else
            octet *= digit;

          octet += g_ascii_xdigit_value(str[*len]);
        }
      else if (str[*len] == '.')
        {
          if (G_UNLIKELY((digit == 10 && octet > 255)))
            return FALSE;

          if (G_UNLIKELY((digit == 16 && octet > 597) || octet == -1 || colons == 7 || dots == 3))
            break;

          dots++;
          octet = -1;
          digit = 10;
        }
      else
        break;

      (*len)++;
    }

  if (G_UNLIKELY(*len > 0 && str[*len-1] == '.'))
    {
      (*len)--;
      dots--;
    }
  else if (G_UNLIKELY(*len > 1 && str[*len-1] == ':' && str[*len - 2] != ':'))
    {
      (*len)--;
      colons--;
    }

  if (colons < 2 || colons > 7 || (digit == 10 && octet > 255) || (digit == 16 && octet > 0xffff) ||
      !(dots == 0 || dots == 3) || (!shortened && colons < 7 && dots == 0))
    return FALSE;

  return TRUE;
}
コード例 #16
0
ファイル: gstkateenc.c プロジェクト: jonasl/gst-svtplayer
static gboolean
gst_kate_enc_sink_event (GstPad * pad, GstEvent * event)
{
  GstKateEnc *ke = GST_KATE_ENC (gst_pad_get_parent (pad));
  GstStructure *structure;
  gboolean ret;

  switch (GST_EVENT_TYPE (event)) {
    case GST_EVENT_NEWSEGMENT:
      GST_LOG_OBJECT (ke, "Got newsegment event");
      if (ke->initialized) {
        GST_LOG_OBJECT (ke, "ensuring all headers are in");
        if (gst_kate_enc_flush_headers (ke) != GST_FLOW_OK) {
          GST_WARNING_OBJECT (ke, "Failed to flush headers");
        } else {
          GstFormat format;
          gint64 timestamp;

          gst_event_parse_new_segment (event, NULL, NULL, &format, &timestamp,
              NULL, NULL);
          if (format != GST_FORMAT_TIME || !GST_CLOCK_TIME_IS_VALID (timestamp)) {
            GST_WARNING_OBJECT (ke,
                "No time in newsegment event %p, format %d, timestamp %"
                G_GINT64_FORMAT, event, (int) format, timestamp);
            /* to be safe, we'd need to generate a keepalive anyway, but we'd have to guess at the timestamp to use; a
               good guess would be the last known timestamp plus the keepalive time, but if we then get a packet with a
               timestamp less than this, it would fail to encode, which would be Bad. If we don't encode a keepalive, we
               run the risk of stalling the pipeline and hanging, which is Very Bad. Oh dear. We can't exit(-1), can we ? */
          } else {
            float t = timestamp / (double) GST_SECOND;

            if (ke->delayed_spu
                && t - ke->delayed_start / (double) GST_SECOND >=
                ke->default_spu_duration) {
              if (G_UNLIKELY (gst_kate_enc_flush_waiting (ke,
                          timestamp) != GST_FLOW_OK)) {
                GST_WARNING_OBJECT (ke, "Failed to encode delayed packet");
                /* continue with new segment handling anyway */
              }
            }

            GST_LOG_OBJECT (ke, "ts %f, last %f (min %f)", t,
                ke->last_timestamp / (double) GST_SECOND,
                ke->keepalive_min_time);
            if (ke->keepalive_min_time > 0.0f
                && t - ke->last_timestamp / (double) GST_SECOND >=
                ke->keepalive_min_time) {
              /* we only generate a keepalive if there is no SPU waiting, as it would
                 mean out of sequence start times - and granulepos */
              if (!ke->delayed_spu) {
                gst_kate_enc_generate_keepalive (ke, timestamp);
              }
            }
          }
        }
      }
      ret = gst_pad_push_event (ke->srcpad, event);
      break;

    case GST_EVENT_CUSTOM_DOWNSTREAM:
      GST_LOG_OBJECT (ke, "Got custom downstream event");
      /* adapted from the dvdsubdec element */
      structure = event->structure;
      if (structure != NULL
          && gst_structure_has_name (structure, "application/x-gst-dvd")) {
        if (ke->initialized) {
          GST_LOG_OBJECT (ke, "ensuring all headers are in");
          if (gst_kate_enc_flush_headers (ke) != GST_FLOW_OK) {
            GST_WARNING_OBJECT (ke, "Failed to flush headers");
          } else {
            const gchar *event_name =
                gst_structure_get_string (structure, "event");
            if (event_name) {
              if (!strcmp (event_name, "dvd-spu-clut-change")) {
                gchar name[16];
                int idx;
                gboolean found;
                gint value;
                GST_INFO_OBJECT (ke, "New CLUT received");
                for (idx = 0; idx < 16; ++idx) {
                  g_snprintf (name, sizeof (name), "clut%02d", idx);
                  found = gst_structure_get_int (structure, name, &value);
                  if (found) {
                    ke->spu_clut[idx] = value;
                  } else {
                    GST_WARNING_OBJECT (ke,
                        "DVD CLUT event did not contain %s field", name);
                  }
                }
              } else if (!strcmp (event_name, "dvd-lang-codes")) {
                /* we can't know which stream corresponds to us */
              }
            } else {
              GST_WARNING_OBJECT (ke, "custom downstream event with no name");
            }
          }
        }
      }
      ret = gst_pad_push_event (ke->srcpad, event);
      break;

    case GST_EVENT_TAG:
      GST_LOG_OBJECT (ke, "Got tag event");
      if (ke->tags) {
        GstTagList *list;

        gst_event_parse_tag (event, &list);
        gst_tag_list_insert (ke->tags, list,
            gst_tag_setter_get_tag_merge_mode (GST_TAG_SETTER (ke)));
      } else {
        g_assert_not_reached ();
      }
      ret = gst_pad_event_default (pad, event);
      break;

    case GST_EVENT_EOS:
      GST_INFO_OBJECT (ke, "Got EOS event");
      if (ke->initialized) {
        GST_LOG_OBJECT (ke, "ensuring all headers are in");
        if (gst_kate_enc_flush_headers (ke) != GST_FLOW_OK) {
          GST_WARNING_OBJECT (ke, "Failed to flush headers");
        } else {
          kate_packet kp;
          int ret;
          GstClockTime delayed_end =
              ke->delayed_start + ke->default_spu_duration * GST_SECOND;

          if (G_UNLIKELY (gst_kate_enc_flush_waiting (ke,
                      delayed_end) != GST_FLOW_OK)) {
            GST_WARNING_OBJECT (ke, "Failed to encode delayed packet");
            /* continue with EOS handling anyway */
          }

          ret = kate_encode_finish (&ke->k, -1, &kp);
          if (ret < 0) {
            GST_WARNING_OBJECT (ke, "Failed to encode EOS packet: %d", ret);
          } else {
            kate_int64_t granpos = kate_encode_get_granule (&ke->k);
            GST_LOG_OBJECT (ke, "EOS packet encoded");
            if (gst_kate_enc_push_and_free_kate_packet (ke, &kp, granpos,
                    ke->latest_end_time, 0, FALSE)) {
              GST_WARNING_OBJECT (ke, "Failed to push EOS packet");
            }
          }
        }
      }
      ret = gst_pad_event_default (pad, event);
      break;

    default:
      GST_LOG_OBJECT (ke, "Got unhandled event");
      ret = gst_pad_event_default (pad, event);
      break;
  }

  gst_object_unref (ke);
  return ret;
}
コード例 #17
0
static gboolean
_cogl_pipeline_fragend_fixed_add_layer (CoglPipeline *pipeline,
                                        CoglPipelineLayer *layer,
                                        unsigned long layers_difference)
{
  CoglTextureUnit *unit =
    _cogl_get_texture_unit (_cogl_pipeline_layer_get_unit_index (layer));
  int unit_index = unit->index;
  int n_rgb_func_args;
  int n_alpha_func_args;

  _COGL_GET_CONTEXT (ctx, FALSE);

  /* XXX: Beware that since we are changing the active texture unit we
   * must make sure we don't call into other Cogl components that may
   * temporarily bind texture objects to query/modify parameters since
   * they will end up binding texture unit 1. See
   * _cogl_bind_gl_texture_transient for more details.
   */
  _cogl_set_active_texture_unit (unit_index);

  if (G_UNLIKELY (unit_index >= get_max_texture_units ()))
    {
      _cogl_disable_texture_unit (unit_index);
      /* TODO: although this isn't considered an error that
       * warrants falling back to a different backend we
       * should print a warning here. */
      return TRUE;
    }

  /* Handle enabling or disabling the right texture target */
  if (layers_difference & COGL_PIPELINE_LAYER_STATE_TEXTURE_TARGET)
    {
      CoglPipelineLayer *tex_authority =
        _cogl_pipeline_layer_get_authority (layer,
                                            COGL_PIPELINE_LAYER_STATE_TEXTURE_DATA);
      CoglPipelineLayer *target_authority =
        _cogl_pipeline_layer_get_authority (layer,
                                            COGL_PIPELINE_LAYER_STATE_TEXTURE_TARGET);
      /* XXX: currently layers with no associated texture fallback to
       * using ctx->default_gl_texture_2d_tex so they have a texture
       * target of GL_TEXTURE_2D */
      GLenum gl_target =
        tex_authority->texture ? target_authority->target : GL_TEXTURE_2D;

      _cogl_set_active_texture_unit (unit_index);

      /* The common GL code handles binding the right texture so we
         just need to handle enabling and disabling it */

      if (unit->enabled_gl_target != gl_target)
        {
          /* Disable the previous target if it's still enabled */
          if (unit->enabled_gl_target)
            GE (ctx, glDisable (unit->enabled_gl_target));

          /* Enable the new target */
          if (!G_UNLIKELY (COGL_DEBUG_ENABLED (COGL_DEBUG_DISABLE_TEXTURING)))
            {
              GE (ctx, glEnable (gl_target));
              unit->enabled_gl_target = gl_target;
            }
        }
    }
  else
    {
      /* Even though there may be no difference between the last flushed
       * texture state and the current layers texture state it may be that the
       * texture unit has been disabled for some time so we need to assert that
       * it's enabled now.
       */
      if (!G_UNLIKELY (COGL_DEBUG_ENABLED (COGL_DEBUG_DISABLE_TEXTURING)) &&
          unit->enabled_gl_target == 0)
        {
          _cogl_set_active_texture_unit (unit_index);
          GE (ctx, glEnable (unit->gl_target));
          unit->enabled_gl_target = unit->gl_target;
        }
    }

  if (layers_difference & COGL_PIPELINE_LAYER_STATE_COMBINE)
    {
      CoglPipelineLayer *authority =
        _cogl_pipeline_layer_get_authority (layer,
                                            COGL_PIPELINE_LAYER_STATE_COMBINE);
      CoglPipelineLayerBigState *big_state = authority->big_state;

      GE (ctx, glTexEnvi (GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE));

      /* Set the combiner functions... */
      GE (ctx, glTexEnvi (GL_TEXTURE_ENV,
                          GL_COMBINE_RGB,
                          big_state->texture_combine_rgb_func));
      GE (ctx, glTexEnvi (GL_TEXTURE_ENV,
                          GL_COMBINE_ALPHA,
                          big_state->texture_combine_alpha_func));

      /*
       * Setup the function arguments...
       */

      /* For the RGB components... */
      n_rgb_func_args =
        _cogl_get_n_args_for_combine_func (big_state->texture_combine_rgb_func);

      GE (ctx, glTexEnvi (GL_TEXTURE_ENV, GL_SRC0_RGB,
                          big_state->texture_combine_rgb_src[0]));
      GE (ctx, glTexEnvi (GL_TEXTURE_ENV, GL_OPERAND0_RGB,
                          big_state->texture_combine_rgb_op[0]));
      if (n_rgb_func_args > 1)
        {
          GE (ctx, glTexEnvi (GL_TEXTURE_ENV, GL_SRC1_RGB,
                              big_state->texture_combine_rgb_src[1]));
          GE (ctx, glTexEnvi (GL_TEXTURE_ENV, GL_OPERAND1_RGB,
                              big_state->texture_combine_rgb_op[1]));
        }
      if (n_rgb_func_args > 2)
        {
          GE (ctx, glTexEnvi (GL_TEXTURE_ENV, GL_SRC2_RGB,
                              big_state->texture_combine_rgb_src[2]));
          GE (ctx, glTexEnvi (GL_TEXTURE_ENV, GL_OPERAND2_RGB,
                              big_state->texture_combine_rgb_op[2]));
        }

      /* For the Alpha component */
      n_alpha_func_args =
        _cogl_get_n_args_for_combine_func (big_state->texture_combine_alpha_func);

      GE (ctx, glTexEnvi (GL_TEXTURE_ENV, GL_SRC0_ALPHA,
                          big_state->texture_combine_alpha_src[0]));
      GE (ctx, glTexEnvi (GL_TEXTURE_ENV, GL_OPERAND0_ALPHA,
                          big_state->texture_combine_alpha_op[0]));
      if (n_alpha_func_args > 1)
        {
          GE (ctx, glTexEnvi (GL_TEXTURE_ENV, GL_SRC1_ALPHA,
                              big_state->texture_combine_alpha_src[1]));
          GE (ctx, glTexEnvi (GL_TEXTURE_ENV, GL_OPERAND1_ALPHA,
                              big_state->texture_combine_alpha_op[1]));
        }
      if (n_alpha_func_args > 2)
        {
          GE (ctx, glTexEnvi (GL_TEXTURE_ENV, GL_SRC2_ALPHA,
                              big_state->texture_combine_alpha_src[2]));
          GE (ctx, glTexEnvi (GL_TEXTURE_ENV, GL_OPERAND2_ALPHA,
                              big_state->texture_combine_alpha_op[2]));
        }
    }

  if (layers_difference & COGL_PIPELINE_LAYER_STATE_COMBINE_CONSTANT)
    {
      CoglPipelineLayer *authority =
        _cogl_pipeline_layer_get_authority
        (layer, COGL_PIPELINE_LAYER_STATE_COMBINE_CONSTANT);
      CoglPipelineLayerBigState *big_state = authority->big_state;

      GE (ctx, glTexEnvfv (GL_TEXTURE_ENV, GL_TEXTURE_ENV_COLOR,
                           big_state->texture_combine_constant));
    }

  return TRUE;
}
コード例 #18
0
ファイル: gstkateenc.c プロジェクト: jonasl/gst-svtplayer
static GstFlowReturn
gst_kate_enc_send_headers (GstKateEnc * ke)
{
  GstFlowReturn rflow = GST_FLOW_OK;
  GstCaps *caps;
  GList *headers = NULL, *item;

  if (G_UNLIKELY (ke->category == NULL || *ke->category == '\0')) {
    /* The error code is a bit of a lie, but seems most appropriate. */
    GST_ELEMENT_ERROR (ke, LIBRARY, SETTINGS, (NULL),
        ("The 'category' property must be set. For subtitles, set it to "
            "either 'SUB' (text subtitles) or 'K-SPU' (dvd-style subtitles)"));
    return GST_FLOW_ERROR;
  }

  gst_kate_enc_set_metadata (ke);

  /* encode headers and store them in a list */
  while (1) {
    kate_packet kp;
    int ret = kate_encode_headers (&ke->k, &ke->kc, &kp);
    if (ret == 0) {
      GstBuffer *buffer;

      buffer = gst_kate_enc_create_buffer (ke, &kp, 0, 0, 0, TRUE);
      if (!buffer) {
        GST_ELEMENT_ERROR (ke, STREAM, ENCODE, (NULL),
            ("Failed to create buffer, %u bytes", (guint) kp.nbytes));
        rflow = GST_FLOW_ERROR;
        break;
      }
      kate_packet_clear (&kp);

      headers = g_list_append (headers, buffer);
    } else if (ret > 0) {
      GST_LOG_OBJECT (ke, "Last header encoded");
      break;
    } else {
      GST_ELEMENT_ERROR (ke, STREAM, ENCODE, (NULL),
          ("kate_encode_headers: %d", ret));
      rflow = GST_FLOW_ERROR;
      break;
    }
  }

  if (rflow == GST_FLOW_OK) {
    if (gst_kate_enc_is_simple_subtitle_category (ke, ke->category)) {
      caps = gst_kate_util_set_header_on_caps (&ke->element,
          gst_caps_from_string ("subtitle/x-kate"), headers);
    } else {
      caps = gst_kate_util_set_header_on_caps (&ke->element,
          gst_caps_from_string ("application/x-kate"), headers);
    }
    if (caps) {
      GST_DEBUG_OBJECT (ke, "here are the caps: %" GST_PTR_FORMAT, caps);
      gst_pad_set_caps (ke->srcpad, caps);

      GST_LOG_OBJECT (ke, "setting caps on headers");
      item = headers;
      while (item) {
        GstBuffer *buffer = item->data;
        GST_LOG_OBJECT (ke, "settings caps on header %p", buffer);
        gst_buffer_set_caps (buffer, caps);
        item = item->next;
      }

      gst_caps_unref (caps);

      GST_LOG_OBJECT (ke, "pushing headers");
      item = headers;
      while (item) {
        GstBuffer *buffer = item->data;
        GST_LOG_OBJECT (ke, "pushing header %p", buffer);
        gst_kate_enc_push_buffer (ke, buffer);
        item = item->next;
      }
    } else {
      GST_ERROR_OBJECT (ke, "Failed to set headers on caps");
    }
  }

  g_list_free (headers);

  return rflow;
}
コード例 #19
0
static void
gst_pvrvideosink_xwindow_update_geometry (GstPVRVideoSink * pvrvideosink)
{
  XWindowAttributes attr;
  WSEGLError glerror;
  WSEGLDrawableParams source_params;
  PVRSRV_CLIENT_MEM_INFO *client_mem_info;

  /* Update the window geometry */
  g_mutex_lock (pvrvideosink->dcontext->x_lock);
  if (G_UNLIKELY (pvrvideosink->xwindow == NULL)) {
    g_mutex_unlock (pvrvideosink->dcontext->x_lock);
    return;
  }
  pvrvideosink->redraw_borders = TRUE;

  XGetWindowAttributes (pvrvideosink->dcontext->x_display,
      pvrvideosink->xwindow->window, &attr);

  pvrvideosink->xwindow->width = attr.width;
  pvrvideosink->xwindow->height = attr.height;

  if (!pvrvideosink->have_render_rect) {
    pvrvideosink->render_rect.x = pvrvideosink->render_rect.y = 0;
    pvrvideosink->render_rect.w = attr.width;
    pvrvideosink->render_rect.h = attr.height;
  }
  if (pvrvideosink->dcontext != NULL) {
    glerror =
        pvrvideosink->dcontext->wsegl_table->
        pfnWSEGL_DeleteDrawable (pvrvideosink->dcontext->drawable_handle);
    if (glerror != WSEGL_SUCCESS) {
      GST_ERROR_OBJECT (pvrvideosink, "Error destroying drawable");
      return;
    }
    glerror =
        pvrvideosink->dcontext->wsegl_table->
        pfnWSEGL_CreateWindowDrawable (pvrvideosink->dcontext->display_handle,
        pvrvideosink->dcontext->glconfig,
        &pvrvideosink->dcontext->drawable_handle,
        (NativeWindowType) pvrvideosink->xwindow->window,
        &pvrvideosink->dcontext->rotation);
    if (glerror != WSEGL_SUCCESS) {
      GST_ERROR_OBJECT (pvrvideosink, "Error creating drawable");
      return;
    }
    glerror =
        pvrvideosink->dcontext->wsegl_table->
        pfnWSEGL_GetDrawableParameters (pvrvideosink->dcontext->drawable_handle,
        &source_params, &pvrvideosink->render_params);
    if (glerror != WSEGL_SUCCESS) {
      GST_ERROR_OBJECT (pvrvideosink, "Error getting Drawable params");
      return;
    }

    client_mem_info =
        (PVRSRV_CLIENT_MEM_INFO *) pvrvideosink->render_params.hPrivateData;
    PVR2DMEMINFO_INITIALISE (&pvrvideosink->dcontext->dst_mem, client_mem_info);
  }

  g_mutex_unlock (pvrvideosink->dcontext->x_lock);
}
コード例 #20
0
ファイル: gstkateenc.c プロジェクト: jonasl/gst-svtplayer
static GstFlowReturn
gst_kate_enc_chain_spu (GstKateEnc * ke, GstBuffer * buf)
{
  kate_packet kp;
  kate_region *kregion;
  kate_bitmap *kbitmap;
  kate_palette *kpalette;
  GstFlowReturn rflow;
  int ret = 0;

  /* allocate region, bitmap, and palette, in case we have to delay encoding them */
  kregion = (kate_region *) g_malloc (sizeof (kate_region));
  kbitmap = (kate_bitmap *) g_malloc (sizeof (kate_bitmap));
  kpalette = (kate_palette *) g_malloc (sizeof (kate_palette));
  if (!kregion || !kpalette || !kbitmap) {
    if (kregion)
      g_free (kregion);
    if (kbitmap)
      g_free (kbitmap);
    if (kpalette)
      g_free (kpalette);
    GST_ELEMENT_ERROR (ke, STREAM, ENCODE, (NULL), ("Out of memory"));
    return GST_FLOW_ERROR;
  }

  rflow = gst_kate_spu_decode_spu (ke, buf, kregion, kbitmap, kpalette);
  if (G_UNLIKELY (rflow != GST_FLOW_OK)) {
    GST_ERROR_OBJECT (ke, "Failed to decode incoming SPU");
#if 0
    {
      static int spu_count = 0;
      FILE *f;
      char name[32];
      snprintf (name, sizeof (name), "/tmp/bad_spu_%04d", spu_count++);
      name[sizeof (name) - 1] = 0;
      f = fopen (name, "w");
      if (f) {
        fwrite (GST_BUFFER_DATA (buf), GST_BUFFER_SIZE (buf), 1, f);
        fclose (f);
      }
    }
#endif
  } else if (G_UNLIKELY (kbitmap->width == 0 || kbitmap->height == 0)) {
    /* there are some DVDs (well, at least one) where some dimwits put in a wholly transparent full screen 720x576 SPU !!!!?! */
    GST_WARNING_OBJECT (ke, "SPU is totally invisible - dimwits");
    rflow = GST_FLOW_OK;
  } else {
    /* timestamp offsets are hidden in the SPU packets */
    GstClockTime start =
        GST_BUFFER_TIMESTAMP (buf) + GST_KATE_STM_TO_GST (ke->show_time);
    GstClockTime stop =
        GST_BUFFER_TIMESTAMP (buf) + GST_KATE_STM_TO_GST (ke->hide_time);
    kate_float t0 = start / (double) GST_SECOND;
    kate_float t1 = stop / (double) GST_SECOND;
    GST_DEBUG_OBJECT (ke, "buf ts %f, start/show %hu/%hu",
        GST_BUFFER_TIMESTAMP (buf) / (double) GST_SECOND, ke->show_time,
        ke->hide_time);

#if 0
    {
      static int spu_count = 0;
      FILE *f;
      char name[32];
      snprintf (name, sizeof (name), "/tmp/spu_%04d", spu_count++);
      name[sizeof (name) - 1] = 0;
      f = fopen (name, "w");
      if (f) {
        fwrite (GST_BUFFER_DATA (buf), GST_BUFFER_SIZE (buf), 1, f);
        fclose (f);
      }
    }
#endif
    GST_DEBUG_OBJECT (ke, "Encoding %ux%u SPU: (%u bytes) from %f to %f",
        (guint) kbitmap->width, (guint) kbitmap->height,
        GST_BUFFER_SIZE (buf), t0, t1);

    ret = kate_encode_set_region (&ke->k, kregion);
    if (G_UNLIKELY (ret < 0)) {
      GST_ELEMENT_ERROR (ke, STREAM, ENCODE, (NULL),
          ("kate_encode_set_region: %d", ret));
      rflow = GST_FLOW_ERROR;
    } else {
      ret = kate_encode_set_palette (&ke->k, kpalette);
      if (G_UNLIKELY (ret < 0)) {
        GST_ELEMENT_ERROR (ke, STREAM, ENCODE, (NULL),
            ("kate_encode_set_palette: %d", ret));
        rflow = GST_FLOW_ERROR;
      } else {
        ret = kate_encode_set_bitmap (&ke->k, kbitmap);
        if (G_UNLIKELY (ret < 0)) {
          GST_ELEMENT_ERROR (ke, STREAM, ENCODE, (NULL),
              ("kate_encode_set_bitmap: %d", ret));
          rflow = GST_FLOW_ERROR;
        } else {
          /* Some SPUs have no hide time - so I'm going to delay the encoding of the packet
             till either a suitable event happens, and the time of this event will be used
             as the end time of this SPU, which will then be encoded and sent off. Suitable
             events are the arrival of a subsequent SPU (eg, this SPU will replace the one
             with no end), EOS, a new segment event, or a time threshold being reached */
          if (ke->hide_time <= ke->show_time) {
            GST_INFO_OBJECT (ke,
                "Cannot encode SPU packet now, hide time is now known (starting at %f) - delaying",
                t0);
            ke->delayed_spu = TRUE;
            ke->delayed_start = start;
            ke->delayed_bitmap = kbitmap;
            ke->delayed_palette = kpalette;
            ke->delayed_region = kregion;
            rflow = GST_FLOW_OK;
          } else {
            ret = kate_encode_text (&ke->k, t0, t1, "", 0, &kp);
            if (G_UNLIKELY (ret < 0)) {
              GST_ELEMENT_ERROR (ke, STREAM, ENCODE, (NULL),
                  ("Failed to encode empty text for SPU buffer: %d", ret));
              rflow = GST_FLOW_ERROR;
            } else {
              rflow =
                  gst_kate_enc_chain_push_packet (ke, &kp, start,
                  stop - start + 1);
            }
          }
        }
      }
    }

    if (!ke->delayed_spu) {
      g_free (kpalette->colors);
      g_free (kpalette);
      g_free (kbitmap->pixels);
      g_free (kbitmap);
      g_free (kregion);
    }
  }

  return rflow;
}
コード例 #21
0
ファイル: gstvdpsink.c プロジェクト: LCW523/gst-plugins-bad
static GstFlowReturn
gst_vdp_sink_show_frame (GstBaseSink * bsink, GstBuffer * outbuf)
{
  VdpSink *vdp_sink = GST_VDP_SINK (bsink);
  VdpStatus status;
  GstVdpDevice *device;

  g_return_val_if_fail (GST_IS_VDP_SINK (vdp_sink), FALSE);

  /* We take the flow_lock. If expose is in there we don't want to run
     concurrently from the data flow thread */
  g_mutex_lock (vdp_sink->flow_lock);

  if (G_UNLIKELY (vdp_sink->window == NULL)) {
    g_mutex_unlock (vdp_sink->flow_lock);
    return GST_FLOW_ERROR;
  }

  device = vdp_sink->device;

  if (vdp_sink->cur_image) {
    VdpOutputSurface surface =
        GST_VDP_OUTPUT_BUFFER (vdp_sink->cur_image)->surface;
    VdpPresentationQueueStatus queue_status;
    VdpTime pres_time;

    g_mutex_lock (vdp_sink->x_lock);
    status =
        device->vdp_presentation_queue_query_surface_status (vdp_sink->window->
        queue, surface, &queue_status, &pres_time);
    g_mutex_unlock (vdp_sink->x_lock);

    if (queue_status == VDP_PRESENTATION_QUEUE_STATUS_QUEUED) {
      g_mutex_unlock (vdp_sink->flow_lock);
      return GST_FLOW_OK;
    }
  }

  /* Expose sends a NULL image, we take the latest frame */
  if (!outbuf) {
    if (vdp_sink->cur_image) {
      outbuf = vdp_sink->cur_image;
    } else {
      g_mutex_unlock (vdp_sink->flow_lock);
      return GST_FLOW_OK;
    }
  }

  gst_vdp_sink_window_update_geometry (vdp_sink, vdp_sink->window);

  g_mutex_lock (vdp_sink->x_lock);

  status = device->vdp_presentation_queue_display (vdp_sink->window->queue,
      GST_VDP_OUTPUT_BUFFER (outbuf)->surface, 0, 0, 0);
  if (status != VDP_STATUS_OK) {
    GST_ELEMENT_ERROR (vdp_sink, RESOURCE, READ,
        ("Could not display frame"),
        ("Error returned from vdpau was: %s",
            device->vdp_get_error_string (status)));

    g_mutex_unlock (vdp_sink->x_lock);
    g_mutex_unlock (vdp_sink->flow_lock);
    return GST_FLOW_ERROR;
  }


  if (!vdp_sink->cur_image)
    vdp_sink->cur_image = gst_buffer_ref (outbuf);

  else if (vdp_sink->cur_image != outbuf) {
    gst_buffer_unref (vdp_sink->cur_image);
    vdp_sink->cur_image = gst_buffer_ref (outbuf);
  }

  XSync (vdp_sink->device->display, FALSE);

  g_mutex_unlock (vdp_sink->x_lock);
  g_mutex_unlock (vdp_sink->flow_lock);

  return GST_FLOW_OK;
}
static GstFlowReturn
gst_opus_dec_handle_frame (GstAudioDecoder * adec, GstBuffer * buf)
{
  GstFlowReturn res;
  GstOpusDec *dec;

  /* no fancy draining */
  if (G_UNLIKELY (!buf))
    return GST_FLOW_OK;

  dec = GST_OPUS_DEC (adec);
  GST_LOG_OBJECT (dec,
      "Got buffer ts %" GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT,
      GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
      GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));

  /* If we have the streamheader and vorbiscomment from the caps already
   * ignore them here */
  if (dec->streamheader && dec->vorbiscomment) {
    if (memcmp_buffers (dec->streamheader, buf)) {
      GST_DEBUG_OBJECT (dec, "found streamheader");
      gst_audio_decoder_finish_frame (adec, NULL, 1);
      res = GST_FLOW_OK;
    } else if (memcmp_buffers (dec->vorbiscomment, buf)) {
      GST_DEBUG_OBJECT (dec, "found vorbiscomments");
      gst_audio_decoder_finish_frame (adec, NULL, 1);
      res = GST_FLOW_OK;
    } else {
      res = opus_dec_chain_parse_data (dec, buf);
    }
  } else {
    /* Otherwise fall back to packet counting and assume that the
     * first two packets might be the headers, checking magic. */
    switch (dec->packetno) {
      case 0:
        if (gst_opus_header_is_header (buf, "OpusHead", 8)) {
          GST_DEBUG_OBJECT (dec, "found streamheader");
          res = gst_opus_dec_parse_header (dec, buf);
          gst_audio_decoder_finish_frame (adec, NULL, 1);
        } else {
          res = opus_dec_chain_parse_data (dec, buf);
        }
        break;
      case 1:
        if (gst_opus_header_is_header (buf, "OpusTags", 8)) {
          GST_DEBUG_OBJECT (dec, "counted vorbiscomments");
          res = gst_opus_dec_parse_comments (dec, buf);
          gst_audio_decoder_finish_frame (adec, NULL, 1);
        } else {
          res = opus_dec_chain_parse_data (dec, buf);
        }
        break;
      default:
      {
        res = opus_dec_chain_parse_data (dec, buf);
        break;
      }
    }
  }

  dec->packetno++;

  return res;
}
コード例 #23
0
static void
major_copy_or_mark_object (void **ptr, SgenGrayQueue *queue)
{
	void *obj = *ptr;
	MSBlockInfo *block;

	HEAVY_STAT (++stat_copy_object_called_major);

	DEBUG (9, g_assert (obj));
	DEBUG (9, g_assert (current_collection_generation == GENERATION_OLD));

	if (ptr_in_nursery (obj)) {
		int word, bit;
		char *forwarded, *old_obj;

		if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
			*ptr = forwarded;
			return;
		}
		if (SGEN_OBJECT_IS_PINNED (obj))
			return;

		HEAVY_STAT (++stat_objects_copied_major);

	do_copy_object:
		old_obj = obj;
		obj = copy_object_no_checks (obj, queue);
		if (G_UNLIKELY (old_obj == obj)) {
			/*If we fail to evacuate an object we just stop doing it for a given block size as all other will surely fail too.*/
			if (!ptr_in_nursery (obj)) {
				int size_index;
				block = MS_BLOCK_FOR_OBJ (obj);
				size_index = block->obj_size_index;
				evacuate_block_obj_sizes [size_index] = FALSE;
				MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
			}
			return;
		}
		*ptr = obj;

		/*
		 * FIXME: See comment for copy_object_no_checks().  If
		 * we have that, we can let the allocation function
		 * give us the block info, too, and we won't have to
		 * re-fetch it.
		 */
		block = MS_BLOCK_FOR_OBJ (obj);
		MS_CALC_MARK_BIT (word, bit, obj);
		DEBUG (9, g_assert (!MS_MARK_BIT (block, word, bit)));
		MS_SET_MARK_BIT (block, word, bit);
	} else {
		char *forwarded;
#ifndef FIXED_HEAP
		mword objsize;
#endif

		if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
			*ptr = forwarded;
			return;
		}

#ifdef FIXED_HEAP
		if (MS_PTR_IN_SMALL_MAJOR_HEAP (obj))
#else
		objsize = SGEN_ALIGN_UP (mono_sgen_safe_object_get_size ((MonoObject*)obj));

		if (objsize <= SGEN_MAX_SMALL_OBJ_SIZE)
#endif
		{
			int size_index;

			block = MS_BLOCK_FOR_OBJ (obj);
			size_index = block->obj_size_index;

			if (!block->has_pinned && evacuate_block_obj_sizes [size_index]) {
				if (block->is_to_space)
					return;
				HEAVY_STAT (++stat_major_objects_evacuated);
				goto do_copy_object;
			} else {
				MS_MARK_OBJECT_AND_ENQUEUE (obj, block, queue);
			}
		} else {
			if (SGEN_OBJECT_IS_PINNED (obj))
				return;
			binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), mono_sgen_safe_object_get_size ((MonoObject*)obj));
			SGEN_PIN_OBJECT (obj);
			/* FIXME: only enqueue if object has references */
			GRAY_OBJECT_ENQUEUE (queue, obj);
		}
	}
}
コード例 #24
0
ファイル: resample.c プロジェクト: linuxmaniac/rtpengine
AVFrame *resample_frame(resample_t *resample, AVFrame *frame, const format_t *to_format) {
	const char *err;
	int errcode = 0;

	uint64_t to_channel_layout = av_get_default_channel_layout(to_format->channels);
	fix_frame_channel_layout(frame);

	if (frame->format != to_format->format)
		goto resample;
	if (frame->sample_rate != to_format->clockrate)
		goto resample;
	if (frame->channel_layout != to_channel_layout)
		goto resample;

	return av_frame_clone(frame);

resample:

	if (G_UNLIKELY(!resample->swresample)) {
		resample->swresample = swr_alloc_set_opts(NULL,
				to_channel_layout,
				to_format->format,
				to_format->clockrate,
				frame->channel_layout,
				frame->format,
				frame->sample_rate,
				0, NULL);
		err = "failed to alloc resample context";
		if (!resample->swresample)
			goto err;

		err = "failed to init resample context";
		if ((errcode = swr_init(resample->swresample)) < 0)
			goto err;
	}

	// get a large enough buffer for resampled audio - this should be enough so we don't
	// have to loop
	int dst_samples = av_rescale_rnd(swr_get_delay(resample->swresample, to_format->clockrate)
			+ frame->nb_samples,
				to_format->clockrate, frame->sample_rate, AV_ROUND_UP);

	AVFrame *swr_frame = av_frame_alloc();

	err = "failed to alloc resampling frame";
	if (!swr_frame)
		goto err;
	av_frame_copy_props(swr_frame, frame);
	swr_frame->format = to_format->format;
	swr_frame->channel_layout = to_channel_layout;
	swr_frame->nb_samples = dst_samples;
	swr_frame->sample_rate = to_format->clockrate;
	err = "failed to get resample buffers";
	if ((errcode = av_frame_get_buffer(swr_frame, 0)) < 0)
		goto err;

	int ret_samples = swr_convert(resample->swresample, swr_frame->extended_data,
				dst_samples,
				(const uint8_t **) frame->extended_data,
				frame->nb_samples);
	err = "failed to resample audio";
	if ((errcode = ret_samples) < 0)
		goto err;

	swr_frame->nb_samples = ret_samples;
	swr_frame->pts = av_rescale(frame->pts, to_format->clockrate, frame->sample_rate);
	return swr_frame;

err:
	if (errcode)
		ilog(LOG_ERR, "Error resampling: %s (%s)", err, av_error(errcode));
	else
		ilog(LOG_ERR, "Error resampling: %s", err);
	resample_shutdown(resample);
	return NULL;
}
コード例 #25
0
static gpointer
verve_env_load_thread (gpointer user_data)
{
  VerveEnv *env = VERVE_ENV (user_data);
  gchar   **paths;
  int       i;
  
  /* Get $PATH directories */
  paths = verve_env_get_path (env);
  
  /* Iterate over paths list */
  for (i=0; !env->load_thread_cancelled && i<g_strv_length (paths); i++)
  {
    const gchar *current;
    gchar       *filename;
    GList       *lp;
    /* Try opening the directory */
    GDir *dir = g_dir_open (paths[i], 0, NULL);

    /* Continue with next directory if this one cant' be opened */
    if (G_UNLIKELY (dir == NULL)) 
      continue;

    /* Iterate over files in this directory */
    while (!env->load_thread_cancelled && (current = g_dir_read_name (dir)) != NULL)
      {
        /* Convert to valid UTF-8 */
        filename = g_filename_display_name (current);

        /* Avoid duplicates */
        for (lp = g_list_first (env->binaries); lp != NULL; lp = lp->next)
          if (g_ascii_strcasecmp (lp->data, filename) == 0)
            break;
       
        /* Check details of file if it's not in the list already */
        if (G_LIKELY (lp == NULL))
          {
            /* Determine the absolute path to the file */
            gchar *path = g_build_filename (paths[i], current, NULL);

            /* Check if the path refers to an executable */
            if (g_file_test (path, G_FILE_TEST_IS_EXECUTABLE) &&
                !g_file_test (path, G_FILE_TEST_IS_DIR))
              {
                /* Add file filename to the list */
                env->binaries = g_list_prepend (env->binaries, filename);

                /* No need to free the filename later in this function */
                filename = NULL;
              }

            /* Free absolute path */
            g_free (path);
          }

        /* Release filename if necessary */
        g_free (filename);
      }

    /* Close directory */
    g_dir_close (dir);
  }

  /* Sort binaries */
  env->binaries = g_list_sort (env->binaries, (GCompareFunc) g_utf8_collate);

  /* Emit 'load-binaries' signal */
  g_signal_emit_by_name (env, "load-binaries");

  return env->binaries;
}
コード例 #26
0
gboolean
mm_location_gps_raw_add_trace (MMLocationGpsRaw *self,
                               const gchar *trace)
{
    GMatchInfo *match_info = NULL;

    /* Current implementation works only with $GPGGA traces */
    if (!g_str_has_prefix (trace, "$GPGGA"))
        return FALSE;

    /*
     * $GPGGA,hhmmss.ss,llll.ll,a,yyyyy.yy,a,x,xx,x.x,x.x,M,x.x,M,x.x,xxxx*hh
     * 1    = UTC of Position
     * 2    = Latitude
     * 3    = N or S
     * 4    = Longitude
     * 5    = E or W
     * 6    = GPS quality indicator (0=invalid; 1=GPS fix; 2=Diff. GPS fix)
     * 7    = Number of satellites in use [not those in view]
     * 8    = Horizontal dilution of position
     * 9    = Antenna altitude above/below mean sea level (geoid)
     * 10   = Meters  (Antenna height unit)
     * 11   = Geoidal separation (Diff. between WGS-84 earth ellipsoid and
     *        mean sea level.  -=geoid is below WGS-84 ellipsoid)
     * 12   = Meters  (Units of geoidal separation)
     * 13   = Age in seconds since last update from diff. reference station
     * 14   = Diff. reference station ID#
     * 15   = Checksum
     */
    if (G_UNLIKELY (!self->priv->gpgga_regex))
        self->priv->gpgga_regex = g_regex_new ("\\$GPGGA,(.*),(.*),(.*),(.*),(.*),(.*),(.*),(.*),(.*),(.*),(.*),(.*),(.*),(.*)\\*(.*).*",
                                               G_REGEX_RAW | G_REGEX_OPTIMIZE,
                                               0,
                                               NULL);

    if (g_regex_match (self->priv->gpgga_regex, trace, 0, &match_info)) {
        /* UTC time */
        if (self->priv->utc_time)
            g_free (self->priv->utc_time);
        self->priv->utc_time = g_match_info_fetch (match_info, 1);

        /* Latitude */
        self->priv->latitude = MM_LOCATION_GPS_RAW_LATITUDE_UNKNOWN;
        if (get_longitude_or_latitude_from_match_info (match_info, 2, &self->priv->latitude)) {
            gchar *str;

            /* N/S */
            str = g_match_info_fetch (match_info, 3);
            if (str && str[0] == 'S')
                self->priv->latitude *= -1;
            g_free (str);
        }

        /* Longitude */
        self->priv->longitude = MM_LOCATION_GPS_RAW_LONGITUDE_UNKNOWN;
        if (get_longitude_or_latitude_from_match_info (match_info, 4, &self->priv->longitude)) {
            gchar *str;

            /* N/S */
            str = g_match_info_fetch (match_info, 5);
            if (str && str[0] == 'W')
                self->priv->longitude *= -1;
            g_free (str);
        }

        /* Altitude */
        self->priv->altitude = MM_LOCATION_GPS_RAW_ALTITUDE_UNKNOWN;
        mm_get_double_from_match_info (match_info, 9, &self->priv->altitude);
    }

    g_match_info_free (match_info);

    return TRUE;
}
コード例 #27
0
ファイル: gst-nle-source.c プロジェクト: fluendo/VAS
static GstFlowReturn
gst_nle_source_push_buffer (GstNleSource * nlesrc, GstBuffer * buf,
    gboolean is_audio)
{
  GstPad *sinkpad;
  gboolean push_buf;
  guint64 buf_ts, buf_rel_ts, last_ts;
  GstNleSrcItem *item;
  GstFlowReturn ret;

  item = (GstNleSrcItem *) g_list_nth_data (nlesrc->queue, nlesrc->index);
  buf_ts = GST_BUFFER_TIMESTAMP (buf);

  if (buf_ts < item->start) {
    GST_LOG_OBJECT (nlesrc, "Discard early %s buffer with ts: %"
        GST_TIME_FORMAT " start: %" GST_TIME_FORMAT,
        is_audio ? "audio" : "video", GST_TIME_ARGS (buf_ts),
        GST_TIME_ARGS (item->start));
    gst_buffer_unref (buf);
    return GST_FLOW_OK;
  }
  buf_rel_ts = buf_ts - item->start;

  g_mutex_lock (&nlesrc->stream_lock);

  if (is_audio) {
    push_buf = nlesrc->audio_seek_done;
    last_ts = nlesrc->audio_ts;
    nlesrc->audio_ts = buf_ts;
    sinkpad = nlesrc->audio_sinkpad;
  } else {
    push_buf = nlesrc->video_seek_done;
    last_ts = nlesrc->video_ts;
    nlesrc->video_ts = buf_ts;
    sinkpad = nlesrc->video_sinkpad;
  }

  if (push_buf && GST_BUFFER_TIMESTAMP (buf) >= last_ts) {
    /* Retimestamps buffer */
    guint64 new_ts = nlesrc->start_ts + buf_rel_ts / item->rate;

    GST_BUFFER_TIMESTAMP (buf) = new_ts;
    GST_LOG_OBJECT (nlesrc, "Pushing %s buffer with ts: %" GST_TIME_FORMAT
        " dur:%" GST_TIME_FORMAT " orig:%" GST_TIME_FORMAT,
        is_audio ? "audio" : "video", GST_TIME_ARGS (new_ts),
        GST_TIME_ARGS (GST_BUFFER_DURATION (buf)), GST_TIME_ARGS (buf_ts));
    if (GST_BUFFER_DURATION_IS_VALID (buf)) {
      new_ts += GST_BUFFER_DURATION (buf);
    }
    if (new_ts >= nlesrc->accu_time) {
      nlesrc->accu_time = new_ts;
    }

    if (G_UNLIKELY (!nlesrc->item_setup) && !is_audio) {
      GST_DEBUG_OBJECT (nlesrc,
          "Applying roi and title properties for this segment");
      gst_nle_source_update_videocrop (nlesrc, GST_BUFFER_CAPS (buf));
      gst_nle_source_update_overlay_title (nlesrc);
      nlesrc->item_setup = TRUE;
    }

    /* We need to unlock before pushing since push_buffer can block */
    g_mutex_unlock (&nlesrc->stream_lock);

    ret = gst_pad_chain (sinkpad, buf);
    if (ret != GST_FLOW_OK) {
      GST_WARNING_OBJECT (nlesrc, "pushing buffer returned %s",
          gst_flow_get_name (ret));
    }
    return ret;
  } else {
    GST_LOG_OBJECT (nlesrc, "Discard %s buffer with ts: %" GST_TIME_FORMAT,
        is_audio ? "audio" : "video", GST_TIME_ARGS (buf_ts));
    gst_buffer_unref (buf);
    g_mutex_unlock (&nlesrc->stream_lock);
    return GST_FLOW_OK;
  }
}
コード例 #28
0
/* this internal thread does nothing else but write samples to the audio device.
 * It will write each segment in the ringbuffer and will update the play
 * pointer.
 * The start/stop methods control the thread.
 */
static void
audioringbuffer_thread_func (GstAudioRingBuffer * buf)
{
    GstAudioSink *sink;
    GstAudioSinkClass *csink;
    GstAudioSinkRingBuffer *abuf = GST_AUDIO_SINK_RING_BUFFER_CAST (buf);
    WriteFunc writefunc;
    GstMessage *message;
    GValue val = { 0 };

    sink = GST_AUDIO_SINK (GST_OBJECT_PARENT (buf));
    csink = GST_AUDIO_SINK_GET_CLASS (sink);

    GST_DEBUG_OBJECT (sink, "enter thread");

    GST_OBJECT_LOCK (abuf);
    GST_DEBUG_OBJECT (sink, "signal wait");
    GST_AUDIO_SINK_RING_BUFFER_SIGNAL (buf);
    GST_OBJECT_UNLOCK (abuf);

    writefunc = csink->write;
    if (writefunc == NULL)
        goto no_function;

    message = gst_message_new_stream_status (GST_OBJECT_CAST (buf),
              GST_STREAM_STATUS_TYPE_ENTER, GST_ELEMENT_CAST (sink));
    g_value_init (&val, GST_TYPE_G_THREAD);
    g_value_set_boxed (&val, g_thread_self ());
    gst_message_set_stream_status_object (message, &val);
    g_value_unset (&val);
    GST_DEBUG_OBJECT (sink, "posting ENTER stream status");
    gst_element_post_message (GST_ELEMENT_CAST (sink), message);

    while (TRUE) {
        gint left, len;
        guint8 *readptr;
        gint readseg;

        /* buffer must be started */
        if (gst_audio_ring_buffer_prepare_read (buf, &readseg, &readptr, &len)) {
            gint written;

            left = len;
            do {
                written = writefunc (sink, readptr, left);
                GST_LOG_OBJECT (sink, "transfered %d bytes of %d from segment %d",
                                written, left, readseg);
                if (written < 0 || written > left) {
                    /* might not be critical, it e.g. happens when aborting playback */
                    GST_WARNING_OBJECT (sink,
                                        "error writing data in %s (reason: %s), skipping segment (left: %d, written: %d)",
                                        GST_DEBUG_FUNCPTR_NAME (writefunc),
                                        (errno > 1 ? g_strerror (errno) : "unknown"), left, written);
                    break;
                }
                left -= written;
                readptr += written;
            } while (left > 0);

            /* clear written samples */
            gst_audio_ring_buffer_clear (buf, readseg);

            /* we wrote one segment */
            gst_audio_ring_buffer_advance (buf, 1);
        } else {
            GST_OBJECT_LOCK (abuf);
            if (!abuf->running)
                goto stop_running;
            if (G_UNLIKELY (g_atomic_int_get (&buf->state) ==
                            GST_AUDIO_RING_BUFFER_STATE_STARTED)) {
                GST_OBJECT_UNLOCK (abuf);
                continue;
            }
            GST_DEBUG_OBJECT (sink, "signal wait");
            GST_AUDIO_SINK_RING_BUFFER_SIGNAL (buf);
            GST_DEBUG_OBJECT (sink, "wait for action");
            GST_AUDIO_SINK_RING_BUFFER_WAIT (buf);
            GST_DEBUG_OBJECT (sink, "got signal");
            if (!abuf->running)
                goto stop_running;
            GST_DEBUG_OBJECT (sink, "continue running");
            GST_OBJECT_UNLOCK (abuf);
        }
    }

    /* Will never be reached */
    g_assert_not_reached ();
    return;

    /* ERROR */
no_function:
    {
        GST_DEBUG_OBJECT (sink, "no write function, exit thread");
        return;
    }
stop_running:
    {
        GST_OBJECT_UNLOCK (abuf);
        GST_DEBUG_OBJECT (sink, "stop running, exit thread");
        message = gst_message_new_stream_status (GST_OBJECT_CAST (buf),
                  GST_STREAM_STATUS_TYPE_LEAVE, GST_ELEMENT_CAST (sink));
        g_value_init (&val, GST_TYPE_G_THREAD);
        g_value_set_boxed (&val, g_thread_self ());
        gst_message_set_stream_status_object (message, &val);
        g_value_unset (&val);
        GST_DEBUG_OBJECT (sink, "posting LEAVE stream status");
        gst_element_post_message (GST_ELEMENT_CAST (sink), message);
        return;
    }
}
コード例 #29
0
static void
gst_droidcamsrc_dev_video_frame_callback (void *user,
    DroidMediaCameraRecordingData * video_data)
{
  GstDroidCamSrcDev *dev = (GstDroidCamSrcDev *) user;
  GstDroidCamSrc *src = GST_DROIDCAMSRC (GST_PAD_PARENT (dev->imgsrc->pad));
  void *data = droid_media_camera_recording_frame_get_data (video_data);
  GstBuffer *buffer;
  GstMemory *mem;
  GstDroidCamSrcDevVideoData *mem_data;
  gboolean drop_buffer;

  GST_DEBUG_OBJECT (src, "dev video frame callback");

  g_mutex_lock (&dev->vid->lock);

  /* TODO: not sure what to do with timestamp */

  /* unlikely but just in case */
  if (G_UNLIKELY (!data)) {
    GST_ERROR ("invalid memory from camera HAL");
    droid_media_camera_release_recording_frame (dev->cam, video_data);
    goto unlock_and_out;
  }

  /* TODO: this is bad */
  mem_data = g_slice_new0 (GstDroidCamSrcDevVideoData);
  mem_data->dev = dev;
  mem_data->data = video_data;

  buffer = gst_buffer_new ();
  mem = gst_wrapped_memory_allocator_wrap (dev->wrap_allocator,
      data, droid_media_camera_recording_frame_get_size (video_data),
      (GFunc) gst_droidcamsrc_dev_release_recording_frame, mem_data);
  gst_buffer_insert_memory (buffer, 0, mem);

  GST_BUFFER_OFFSET (buffer) = dev->vid->video_frames;
  GST_BUFFER_OFFSET_END (buffer) = ++dev->vid->video_frames;
  gst_droidcamsrc_timestamp (src, buffer);

  g_rec_mutex_lock (dev->lock);
  ++dev->vid->queued_frames;
  g_rec_mutex_unlock (dev->lock);

  drop_buffer = !dev->vid->running;

  if (drop_buffer) {
    GST_INFO_OBJECT (src,
        "dropping buffer because video recording is not running");
    gst_buffer_unref (buffer);
  } else {
    g_mutex_lock (&dev->vidsrc->lock);
    g_queue_push_tail (dev->vidsrc->queue, buffer);
    g_cond_signal (&dev->vidsrc->cond);
    g_mutex_unlock (&dev->vidsrc->lock);
  }

unlock_and_out:
  /* in case stop_video_recording() is waiting for us */
  g_cond_signal (&dev->vid->cond);
  g_mutex_unlock (&dev->vid->lock);
}
コード例 #30
0
ファイル: mpegtsbase.c プロジェクト: Haifen/gst-plugins-bad
static GstFlowReturn
mpegts_base_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
{
  GstFlowReturn res = GST_FLOW_OK;
  MpegTSBase *base;
  MpegTSPacketizerPacketReturn pret;
  MpegTSPacketizer2 *packetizer;
  MpegTSPacketizerPacket packet;
  MpegTSBaseClass *klass;

  base = GST_MPEGTS_BASE (parent);
  klass = GST_MPEGTS_BASE_GET_CLASS (base);

  packetizer = base->packetizer;

  if (klass->input_done)
    gst_buffer_ref (buf);

  if (GST_BUFFER_IS_DISCONT (buf)) {
    GST_DEBUG_OBJECT (base, "Got DISCONT buffer, flushing");
    res = mpegts_base_drain (base);
    if (G_UNLIKELY (res != GST_FLOW_OK))
      return res;

    mpegts_base_flush (base, FALSE);
    /* In the case of discontinuities in push-mode with TIME segment
     * we want to drop all previous observations (hard:TRUE) from
     * the packetizer */
    if (base->mode == BASE_MODE_PUSHING
        && base->segment.format == GST_FORMAT_TIME)
      mpegts_packetizer_flush (base->packetizer, TRUE);
    else
      mpegts_packetizer_flush (base->packetizer, FALSE);
  }

  mpegts_packetizer_push (base->packetizer, buf);

  while (res == GST_FLOW_OK) {
    pret = mpegts_packetizer_next_packet (base->packetizer, &packet);

    /* If we don't have enough data, return */
    if (G_UNLIKELY (pret == PACKET_NEED_MORE))
      break;

    if (G_UNLIKELY (pret == PACKET_BAD)) {
      /* bad header, skip the packet */
      GST_DEBUG_OBJECT (base, "bad packet, skipping");
      goto next;
    }

    if (klass->inspect_packet)
      klass->inspect_packet (base, &packet);

    /* If it's a known PES, push it */
    if (MPEGTS_BIT_IS_SET (base->is_pes, packet.pid)) {
      /* push the packet downstream */
      if (base->push_data)
        res = klass->push (base, &packet, NULL);
    } else if (packet.payload
        && MPEGTS_BIT_IS_SET (base->known_psi, packet.pid)) {
      /* base PSI data */
      GList *others, *tmp;
      GstMpegtsSection *section;

      section = mpegts_packetizer_push_section (packetizer, &packet, &others);
      if (section)
        mpegts_base_handle_psi (base, section);
      if (G_UNLIKELY (others)) {
        for (tmp = others; tmp; tmp = tmp->next)
          mpegts_base_handle_psi (base, (GstMpegtsSection *) tmp->data);
        g_list_free (others);
      }

      /* we need to push section packet downstream */
      if (base->push_section)
        res = klass->push (base, &packet, section);

    } else if (packet.payload && packet.pid != 0x1fff)
      GST_LOG ("PID 0x%04x Saw packet on a pid we don't handle", packet.pid);

  next:
    mpegts_packetizer_clear_packet (base->packetizer, &packet);
  }

  if (klass->input_done) {
    if (res == GST_FLOW_OK)
      res = klass->input_done (base, buf);
    else
      gst_buffer_unref (buf);
  }

  return res;
}