static void demux_mve_send_headers(demux_plugin_t *this_gen) { demux_mve_t *this = (demux_mve_t *) this_gen; buf_element_t *buf; this->video_fifo = this->stream->video_fifo; this->audio_fifo = this->stream->audio_fifo; this->status = DEMUX_OK; /* load stream information */ _x_stream_info_set(this->stream, XINE_STREAM_INFO_HAS_VIDEO, 1); /* this is not strictly correct-- some WC3 MVE files do not contain * audio, but I'm too lazy to check if that is the case */ _x_stream_info_set(this->stream, XINE_STREAM_INFO_HAS_AUDIO, 1); _x_stream_info_set(this->stream, XINE_STREAM_INFO_VIDEO_WIDTH, this->bih.biWidth); _x_stream_info_set(this->stream, XINE_STREAM_INFO_VIDEO_HEIGHT, this->bih.biHeight); _x_stream_info_set(this->stream, XINE_STREAM_INFO_AUDIO_CHANNELS, this->wave.nChannels); _x_stream_info_set(this->stream, XINE_STREAM_INFO_AUDIO_SAMPLERATE, this->wave.nSamplesPerSec); _x_stream_info_set(this->stream, XINE_STREAM_INFO_AUDIO_BITS, this->wave.wBitsPerSample); /* send start buffers */ _x_demux_control_start(this->stream); /* send init info to decoders */ buf = this->video_fifo->buffer_pool_alloc (this->video_fifo); buf->decoder_flags = BUF_FLAG_HEADER|BUF_FLAG_STDHEADER|BUF_FLAG_FRAMERATE| BUF_FLAG_FRAME_END; buf->decoder_info[0] = WC3_PTS_INC; /* initial video_step */ buf->content = (void *)&this->bih; buf->size = sizeof(this->bih); buf->type = BUF_VIDEO_WC3; this->video_fifo->put (this->video_fifo, buf); if (this->audio_fifo) { this->wave.wFormatTag = 1; this->wave.nChannels = 1; this->wave.nSamplesPerSec = 22050; this->wave.wBitsPerSample = 16; this->wave.nBlockAlign = (this->wave.wBitsPerSample / 8) * this->wave.nChannels; this->wave.nAvgBytesPerSec = this->wave.nBlockAlign * this->wave.nSamplesPerSec; buf = this->audio_fifo->buffer_pool_alloc (this->audio_fifo); buf->type = BUF_AUDIO_LPCM_LE; buf->decoder_flags = BUF_FLAG_HEADER|BUF_FLAG_STDHEADER|BUF_FLAG_FRAME_END; buf->decoder_info[0] = 0; buf->decoder_info[1] = this->wave.nSamplesPerSec; buf->decoder_info[2] = this->wave.wBitsPerSample; buf->decoder_info[3] = this->wave.nChannels; buf->content = (void *)&this->wave; buf->size = sizeof(this->wave); this->audio_fifo->put (this->audio_fifo, buf); } }
static void demux_cdda_send_headers(demux_plugin_t *this_gen) { demux_cdda_t *this = (demux_cdda_t *) this_gen; buf_element_t *buf; this->video_fifo = this->stream->video_fifo; this->audio_fifo = this->stream->audio_fifo; this->status = DEMUX_OK; /* load stream information */ _x_stream_info_set(this->stream, XINE_STREAM_INFO_SEEKABLE, INPUT_IS_SEEKABLE(this->input)); _x_stream_info_set(this->stream, XINE_STREAM_INFO_HAS_VIDEO, 0); _x_stream_info_set(this->stream, XINE_STREAM_INFO_HAS_AUDIO, 1); _x_stream_info_set(this->stream, XINE_STREAM_INFO_AUDIO_CHANNELS, 2); _x_stream_info_set(this->stream, XINE_STREAM_INFO_AUDIO_SAMPLERATE, 44100); _x_stream_info_set(this->stream, XINE_STREAM_INFO_AUDIO_BITS, 16); /* send start buffers */ _x_demux_control_start(this->stream); /* send init info to decoders */ if (this->audio_fifo) { buf = this->audio_fifo->buffer_pool_alloc (this->audio_fifo); buf->type = BUF_AUDIO_LPCM_LE; buf->decoder_flags = BUF_FLAG_HEADER|BUF_FLAG_STDHEADER|BUF_FLAG_FRAME_END; buf->decoder_info[0] = 0; buf->decoder_info[1] = 44100; buf->decoder_info[2] = 16; buf->decoder_info[3] = 2; buf->size = 0; this->audio_fifo->put (this->audio_fifo, buf); } }
static void image_decode_data (video_decoder_t *this_gen, buf_element_t *buf) { image_decoder_t *this = (image_decoder_t *) this_gen; GError *error = NULL; if (!this->video_open) { lprintf("opening video\n"); (this->stream->video_out->open) (this->stream->video_out, this->stream); this->video_open = 1; } if (this->loader == NULL) { this->loader = gdk_pixbuf_loader_new (); } if (gdk_pixbuf_loader_write (this->loader, buf->mem, buf->size, &error) == FALSE) { lprintf("error loading image: %s\n", error->message); g_error_free (error); gdk_pixbuf_loader_close (this->loader, NULL); g_object_unref (G_OBJECT (this->loader)); this->loader = NULL; return; } if (buf->decoder_flags & BUF_FLAG_FRAME_END) { GdkPixbuf *pixbuf; int width, height, rowstride, n_channels; guchar *img_buf; vo_frame_t *img; int color_matrix, flags; void *rgb2yuy2; /* * this->image -> rgb data */ if (gdk_pixbuf_loader_close (this->loader, &error) == FALSE) { lprintf("error loading image: %s\n", error->message); g_error_free (error); g_object_unref (G_OBJECT (this->loader)); this->loader = NULL; return; } pixbuf = gdk_pixbuf_loader_get_pixbuf (this->loader); if (pixbuf != NULL) g_object_ref (G_OBJECT (pixbuf)); g_object_unref (this->loader); this->loader = NULL; if (pixbuf == NULL) { lprintf("error loading image\n"); return; } width = gdk_pixbuf_get_width (pixbuf) & ~1; /* must be even for init_yuv_planes */ height = gdk_pixbuf_get_height (pixbuf); img_buf = gdk_pixbuf_get_pixels (pixbuf); _x_stream_info_set(this->stream, XINE_STREAM_INFO_VIDEO_WIDTH, width); _x_stream_info_set(this->stream, XINE_STREAM_INFO_VIDEO_HEIGHT, height); lprintf("image loaded successfully\n"); flags = VO_BOTH_FIELDS; color_matrix = this->stream->video_out->get_capabilities (this->stream->video_out) & VO_CAP_FULLRANGE ? 11 : 10; VO_SET_FLAGS_CM (color_matrix, flags); /* * alloc video frame */ img = this->stream->video_out->get_frame (this->stream->video_out, width, height, (double)width / (double)height, XINE_IMGFMT_YUY2, flags); /* crop if allocated frame is smaller than requested */ if (width > img->width) width = img->width; if (height > img->height) height = img->height; img->ratio = (double)width / (double)height; /* rgb data -> yuv */ n_channels = gdk_pixbuf_get_n_channels (pixbuf); rowstride = gdk_pixbuf_get_rowstride (pixbuf); rgb2yuy2 = rgb2yuy2_alloc (color_matrix, n_channels > 3 ? "rgba" : "rgb"); if (!img->proc_slice || (img->height & 15)) { /* do all at once */ rgb2yuy2_slice (rgb2yuy2, img_buf, rowstride, img->base[0], img->pitches[0], width, height); } else { /* sliced */ uint8_t *sptr[1]; int y, h = 16; for (y = 0; y < height; y += 16) { if (y + 16 > height) h = height & 15; sptr[0] = img->base[0] + y * img->pitches[0]; rgb2yuy2_slice (rgb2yuy2, img_buf + y * rowstride, rowstride, sptr[0], img->pitches[0], width, h); img->proc_slice (img, sptr); } } rgb2yuy2_free (rgb2yuy2); g_object_unref (pixbuf); /* * draw video frame */ img->pts = buf->pts; img->duration = 3600; img->bad_frame = 0; _x_stream_info_set(this->stream, XINE_STREAM_INFO_FRAME_DURATION, img->duration); img->draw(img, this->stream); img->free(img); } }
static void mad_decode_data (audio_decoder_t *this_gen, buf_element_t *buf) { mad_decoder_t *this = (mad_decoder_t *) this_gen; int bytes_in_buffer_at_pts; lprintf ("decode data, size: %d, decoder_flags: %d\n", buf->size, buf->decoder_flags); if (buf->size>(INPUT_BUF_SIZE-this->bytes_in_buffer)) { xprintf (this->xstream->xine, XINE_VERBOSITY_DEBUG, "libmad: ALERT input buffer too small (%d bytes, %d avail)!\n", buf->size, INPUT_BUF_SIZE-this->bytes_in_buffer); buf->size = INPUT_BUF_SIZE-this->bytes_in_buffer; } if ((buf->decoder_flags & BUF_FLAG_HEADER) == 0) { /* reset decoder on leaving preview mode */ if ((buf->decoder_flags & BUF_FLAG_PREVIEW) == 0) { if (this->preview_mode) { mad_reset (this_gen); } } else { this->preview_mode = 1; } bytes_in_buffer_at_pts = this->bytes_in_buffer; xine_fast_memcpy (&this->buffer[this->bytes_in_buffer], buf->content, buf->size); this->bytes_in_buffer += buf->size; /* printf ("libmad: decode data - doing it\n"); */ mad_stream_buffer (&this->stream, this->buffer, this->bytes_in_buffer); if (this->bytes_in_buffer < MAD_MIN_SIZE && buf->pts == 0) return; if (!this->needs_more_data) { this->pts = buf->pts; if (buf->decoder_flags & BUF_FLAG_AUDIO_PADDING) { this->start_padding = buf->decoder_info[1]; this->end_padding = buf->decoder_info[2]; } else { this->start_padding = 0; this->end_padding = 0; } } while (1) { if (mad_frame_decode (&this->frame, &this->stream) != 0) { if (this->stream.next_frame) { int num_bytes = this->buffer + this->bytes_in_buffer - this->stream.next_frame; /* printf("libmad: MAD_ERROR_BUFLEN\n"); */ memmove(this->buffer, this->stream.next_frame, num_bytes); this->bytes_in_buffer = num_bytes; } switch (this->stream.error) { case MAD_ERROR_BUFLEN: /* libmad wants more data */ this->needs_more_data = 1; return; default: lprintf ("error 0x%04X, mad_stream_buffer %d bytes\n", this->stream.error, this->bytes_in_buffer); mad_stream_buffer (&this->stream, this->buffer, this->bytes_in_buffer); } } else { int mode = (this->frame.header.mode == MAD_MODE_SINGLE_CHANNEL) ? AO_CAP_MODE_MONO : AO_CAP_MODE_STEREO; if (!this->output_open || (this->output_sampling_rate != this->frame.header.samplerate) || (this->output_mode != mode)) { lprintf ("audio sample rate %d mode %08x\n", this->frame.header.samplerate, mode); /* the mpeg audio demuxer can set audio bitrate */ if (! _x_stream_info_get(this->xstream, XINE_STREAM_INFO_AUDIO_BITRATE)) { _x_stream_info_set(this->xstream, XINE_STREAM_INFO_AUDIO_BITRATE, this->frame.header.bitrate); } /* the mpeg audio demuxer can set this meta info */ if (! _x_meta_info_get(this->xstream, XINE_META_INFO_AUDIOCODEC)) { switch (this->frame.header.layer) { case MAD_LAYER_I: _x_meta_info_set_utf8(this->xstream, XINE_META_INFO_AUDIOCODEC, "MPEG audio layer 1 (lib: MAD)"); break; case MAD_LAYER_II: _x_meta_info_set_utf8(this->xstream, XINE_META_INFO_AUDIOCODEC, "MPEG audio layer 2 (lib: MAD)"); break; case MAD_LAYER_III: _x_meta_info_set_utf8(this->xstream, XINE_META_INFO_AUDIOCODEC, "MPEG audio layer 3 (lib: MAD)"); break; default: _x_meta_info_set_utf8(this->xstream, XINE_META_INFO_AUDIOCODEC, "MPEG audio (lib: MAD)"); } } if (this->output_open) { this->xstream->audio_out->close (this->xstream->audio_out, this->xstream); this->output_open = 0; } if (!this->output_open) { this->output_open = (this->xstream->audio_out->open) (this->xstream->audio_out, this->xstream, 16, this->frame.header.samplerate, mode) ; } if (!this->output_open) { return; } this->output_sampling_rate = this->frame.header.samplerate; this->output_mode = mode; } mad_synth_frame (&this->synth, &this->frame); if ( (buf->decoder_flags & BUF_FLAG_PREVIEW) == 0 ) { unsigned int nchannels, nsamples; mad_fixed_t const *left_ch, *right_ch; struct mad_pcm *pcm = &this->synth.pcm; audio_buffer_t *audio_buffer; uint16_t *output; int bitrate; int pts_offset; audio_buffer = this->xstream->audio_out->get_buffer (this->xstream->audio_out); output = audio_buffer->mem; nchannels = pcm->channels; nsamples = pcm->length; left_ch = pcm->samples[0]; right_ch = pcm->samples[1]; /* padding */ if (this->start_padding || this->end_padding) { /* check padding validity */ if (nsamples < (this->start_padding + this->end_padding)) { lprintf("invalid padding data"); this->start_padding = 0; this->end_padding = 0; } lprintf("nsamples=%d, start_padding=%d, end_padding=%d\n", nsamples, this->start_padding, this->end_padding); nsamples -= this->start_padding + this->end_padding; left_ch += this->start_padding; right_ch += this->start_padding; } audio_buffer->num_frames = nsamples; audio_buffer->vpts = this->pts; while (nsamples--) { /* output sample(s) in 16-bit signed little-endian PCM */ *output++ = scale(*left_ch++); if (nchannels == 2) *output++ = scale(*right_ch++); } audio_buffer->num_frames = pcm->length; /* pts computing */ if (this->frame.header.bitrate > 0) { bitrate = this->frame.header.bitrate; } else { bitrate = _x_stream_info_get(this->xstream, XINE_STREAM_INFO_AUDIO_BITRATE); lprintf("offset %d bps\n", bitrate); } audio_buffer->vpts = buf->pts; if (audio_buffer->vpts && (bitrate > 0)) { pts_offset = (bytes_in_buffer_at_pts * 8 * 90) / (bitrate / 1000); lprintf("pts: %"PRId64", offset: %d pts, %d bytes\n", buf->pts, pts_offset, bytes_in_buffer_at_pts); if (audio_buffer->vpts < pts_offset) pts_offset = audio_buffer->vpts; audio_buffer->vpts -= pts_offset; } this->xstream->audio_out->put_buffer (this->xstream->audio_out, audio_buffer, this->xstream); this->pts = buf->pts; buf->pts = 0; if (buf->decoder_flags & BUF_FLAG_AUDIO_PADDING) { this->start_padding = buf->decoder_info[1]; this->end_padding = buf->decoder_info[2]; buf->decoder_info[1] = 0; buf->decoder_info[2] = 0; } else { this->start_padding = 0; this->end_padding = 0; } } lprintf ("decode worked\n"); } } } }
static void rgb_decode_data (video_decoder_t *this_gen, buf_element_t *buf) { rgb_decoder_t *this = (rgb_decoder_t *) this_gen; xine_bmiheader *bih; palette_entry_t *palette; int i; int pixel_ptr, row_ptr; int palette_index; int buf_ptr; unsigned int packed_pixel; unsigned char r, g, b; int pixels_left; unsigned char pixel_byte = 0; vo_frame_t *img; /* video out frame */ /* a video decoder does not care about this flag (?) */ if (buf->decoder_flags & BUF_FLAG_PREVIEW) return; if ((buf->decoder_flags & BUF_FLAG_SPECIAL) && (buf->decoder_info[1] == BUF_SPECIAL_PALETTE)) { palette = (palette_entry_t *)buf->decoder_info_ptr[2]; for (i = 0; i < buf->decoder_info[2]; i++) { this->yuv_palette[i * 4 + 0] = COMPUTE_Y(palette[i].r, palette[i].g, palette[i].b); this->yuv_palette[i * 4 + 1] = COMPUTE_U(palette[i].r, palette[i].g, palette[i].b); this->yuv_palette[i * 4 + 2] = COMPUTE_V(palette[i].r, palette[i].g, palette[i].b); } } if (buf->decoder_flags & BUF_FLAG_FRAMERATE) { this->video_step = buf->decoder_info[0]; _x_stream_info_set(this->stream, XINE_STREAM_INFO_FRAME_DURATION, this->video_step); } if (buf->decoder_flags & BUF_FLAG_STDHEADER) { /* need to initialize */ (this->stream->video_out->open) (this->stream->video_out, this->stream); bih = (xine_bmiheader *) buf->content; this->width = (bih->biWidth + 3) & ~0x03; this->height = (bih->biHeight + 3) & ~0x03; if (this->height < 0) { this->upside_down = 1; this->height = -this->height; } else { this->upside_down = 0; } this->ratio = (double)this->width/(double)this->height; this->bit_depth = bih->biBitCount; if (this->bit_depth > 32) this->bit_depth &= 0x1F; /* round this number up in case of 15 */ lprintf("width = %d, height = %d, bit_depth = %d\n", this->width, this->height, this->bit_depth); this->bytes_per_pixel = (this->bit_depth + 1) / 8; free (this->buf); /* minimal buffer size */ this->bufsize = this->width * this->height * this->bytes_per_pixel; this->buf = calloc(1, this->bufsize); this->size = 0; init_yuv_planes(&this->yuv_planes, this->width, this->height); (this->stream->video_out->open) (this->stream->video_out, this->stream); this->decoder_ok = 1; /* load the stream/meta info */ _x_meta_info_set_utf8(this->stream, XINE_META_INFO_VIDEOCODEC, "Raw RGB"); return; } else if (this->decoder_ok) { if (this->size + buf->size > this->bufsize) { this->bufsize = this->size + 2 * buf->size; this->buf = realloc (this->buf, this->bufsize); } xine_fast_memcpy (&this->buf[this->size], buf->content, buf->size); this->size += buf->size; if (buf->decoder_flags & BUF_FLAG_FRAME_END) { img = this->stream->video_out->get_frame (this->stream->video_out, this->width, this->height, this->ratio, XINE_IMGFMT_YUY2, VO_BOTH_FIELDS); img->duration = this->video_step; img->pts = buf->pts; img->bad_frame = 0; /* iterate through each row */ buf_ptr = 0; if (this->upside_down) { for (row_ptr = this->yuv_planes.row_width * (this->yuv_planes.row_count - 1); row_ptr >= 0; row_ptr -= this->yuv_planes.row_width) { for (pixel_ptr = 0; pixel_ptr < this->width; pixel_ptr++) { if (this->bytes_per_pixel == 1) { palette_index = this->buf[buf_ptr++]; this->yuv_planes.y[row_ptr + pixel_ptr] = this->yuv_palette[palette_index * 4 + 0]; this->yuv_planes.u[row_ptr + pixel_ptr] = this->yuv_palette[palette_index * 4 + 1]; this->yuv_planes.v[row_ptr + pixel_ptr] = this->yuv_palette[palette_index * 4 + 2]; } else if (this->bytes_per_pixel == 2) { /* ABGR1555 format, little-endian order */ packed_pixel = _X_LE_16(&this->buf[buf_ptr]); buf_ptr += 2; UNPACK_BGR15(packed_pixel, r, g, b); this->yuv_planes.y[row_ptr + pixel_ptr] = COMPUTE_Y(r, g, b); this->yuv_planes.u[row_ptr + pixel_ptr] = COMPUTE_U(r, g, b); this->yuv_planes.v[row_ptr + pixel_ptr] = COMPUTE_V(r, g, b); } else { /* BGR24 or BGRA32 */ b = this->buf[buf_ptr++]; g = this->buf[buf_ptr++]; r = this->buf[buf_ptr++]; /* the next line takes care of 'A' in the 32-bit case */ buf_ptr += this->bytes_per_pixel - 3; this->yuv_planes.y[row_ptr + pixel_ptr] = COMPUTE_Y(r, g, b); this->yuv_planes.u[row_ptr + pixel_ptr] = COMPUTE_U(r, g, b); this->yuv_planes.v[row_ptr + pixel_ptr] = COMPUTE_V(r, g, b); } } } } else { for (row_ptr = 0; row_ptr < this->yuv_planes.row_width * this->yuv_planes.row_count; row_ptr += this->yuv_planes.row_width) { pixels_left = 0; for (pixel_ptr = 0; pixel_ptr < this->width; pixel_ptr++) { if (this->bit_depth == 1) { if (pixels_left == 0) { pixels_left = 8; pixel_byte = *this->buf++; } if (pixel_byte & 0x80) { this->yuv_planes.y[row_ptr + pixel_ptr] = this->yuv_palette[1 * 4 + 0]; this->yuv_planes.u[row_ptr + pixel_ptr] = this->yuv_palette[1 * 4 + 1]; this->yuv_planes.v[row_ptr + pixel_ptr] = this->yuv_palette[1 * 4 + 2]; } else { this->yuv_planes.y[row_ptr + pixel_ptr] = this->yuv_palette[0 * 4 + 0]; this->yuv_planes.u[row_ptr + pixel_ptr] = this->yuv_palette[0 * 4 + 1]; this->yuv_planes.v[row_ptr + pixel_ptr] = this->yuv_palette[0 * 4 + 2]; } pixels_left--; pixel_byte <<= 1; } else if (this->bit_depth == 2) { if (pixels_left == 0) { pixels_left = 4; pixel_byte = *this->buf++; } palette_index = (pixel_byte & 0xC0) >> 6; this->yuv_planes.y[row_ptr + pixel_ptr] = this->yuv_palette[palette_index * 4 + 0]; this->yuv_planes.u[row_ptr + pixel_ptr] = this->yuv_palette[palette_index * 4 + 1]; this->yuv_planes.v[row_ptr + pixel_ptr] = this->yuv_palette[palette_index * 4 + 2]; pixels_left--; pixel_byte <<= 2; } else if (this->bit_depth == 4) { if (pixels_left == 0) { pixels_left = 2; pixel_byte = *this->buf++; } palette_index = (pixel_byte & 0xF0) >> 4; this->yuv_planes.y[row_ptr + pixel_ptr] = this->yuv_palette[palette_index * 4 + 0]; this->yuv_planes.u[row_ptr + pixel_ptr] = this->yuv_palette[palette_index * 4 + 1]; this->yuv_planes.v[row_ptr + pixel_ptr] = this->yuv_palette[palette_index * 4 + 2]; pixels_left--; pixel_byte <<= 4; } else if (this->bytes_per_pixel == 1) { palette_index = this->buf[buf_ptr++]; this->yuv_planes.y[row_ptr + pixel_ptr] = this->yuv_palette[palette_index * 4 + 0]; this->yuv_planes.u[row_ptr + pixel_ptr] = this->yuv_palette[palette_index * 4 + 1]; this->yuv_planes.v[row_ptr + pixel_ptr] = this->yuv_palette[palette_index * 4 + 2]; } else if (this->bytes_per_pixel == 2) { /* ARGB1555 format, big-endian order */ packed_pixel = _X_BE_16(&this->buf[buf_ptr]); buf_ptr += 2; UNPACK_RGB15(packed_pixel, r, g, b); this->yuv_planes.y[row_ptr + pixel_ptr] = COMPUTE_Y(r, g, b); this->yuv_planes.u[row_ptr + pixel_ptr] = COMPUTE_U(r, g, b); this->yuv_planes.v[row_ptr + pixel_ptr] = COMPUTE_V(r, g, b); } else { /* RGB24 or ARGB32; the next line takes care of 'A' in the * 32-bit case */ buf_ptr += this->bytes_per_pixel - 3; r = this->buf[buf_ptr++]; g = this->buf[buf_ptr++]; b = this->buf[buf_ptr++]; this->yuv_planes.y[row_ptr + pixel_ptr] = COMPUTE_Y(r, g, b); this->yuv_planes.u[row_ptr + pixel_ptr] = COMPUTE_U(r, g, b); this->yuv_planes.v[row_ptr + pixel_ptr] = COMPUTE_V(r, g, b); } } } }
static void *audio_decoder_loop (void *stream_gen) { buf_element_t *buf = NULL; buf_element_t *first_header = NULL; buf_element_t *last_header = NULL; int replaying_headers = 0; xine_stream_t *stream = (xine_stream_t *) stream_gen; xine_ticket_t *running_ticket = stream->xine->port_ticket; int running = 1; int prof_audio_decode = -1; uint32_t buftype_unknown = 0; int audio_channel_user = stream->audio_channel_user; if (prof_audio_decode == -1) prof_audio_decode = xine_profiler_allocate_slot ("audio decoder/output"); while (running) { lprintf ("audio_loop: waiting for package...\n"); if( !replaying_headers ) buf = stream->audio_fifo->get (stream->audio_fifo); lprintf ("audio_loop: got package pts = %"PRId64", type = %08x\n", buf->pts, buf->type); _x_extra_info_merge( stream->audio_decoder_extra_info, buf->extra_info ); stream->audio_decoder_extra_info->seek_count = stream->video_seek_count; switch (buf->type) { case BUF_CONTROL_HEADERS_DONE: pthread_mutex_lock (&stream->counter_lock); stream->header_count_audio++; pthread_cond_broadcast (&stream->counter_changed); pthread_mutex_unlock (&stream->counter_lock); break; case BUF_CONTROL_START: lprintf ("start\n"); /* decoder dispose might call port functions */ running_ticket->acquire(running_ticket, 0); if (stream->audio_decoder_plugin) { lprintf ("close old decoder\n"); stream->keep_ao_driver_open = !!(buf->decoder_flags & BUF_FLAG_GAPLESS_SW); _x_free_audio_decoder (stream, stream->audio_decoder_plugin); stream->audio_decoder_plugin = NULL; stream->audio_track_map_entries = 0; stream->audio_type = 0; stream->keep_ao_driver_open = 0; } running_ticket->release(running_ticket, 0); if( !(buf->decoder_flags & BUF_FLAG_GAPLESS_SW) ) stream->metronom->handle_audio_discontinuity (stream->metronom, DISC_STREAMSTART, 0); buftype_unknown = 0; break; case BUF_CONTROL_END: /* free all held header buffers, see comments below */ if( first_header ) { buf_element_t *cur, *next; cur = first_header; while( cur ) { next = cur->next; cur->free_buffer (cur); cur = next; } first_header = last_header = NULL; } /* * wait the output fifos to run dry before sending the notification event * to the frontend. this test is only valid if there is only a single * stream attached to the current output port. */ while(1) { int num_bufs, num_streams; running_ticket->acquire(running_ticket, 0); num_bufs = stream->audio_out->get_property(stream->audio_out, AO_PROP_BUFS_IN_FIFO); num_streams = stream->audio_out->get_property(stream->audio_out, AO_PROP_NUM_STREAMS); running_ticket->release(running_ticket, 0); if( num_bufs > 0 && num_streams == 1 && !stream->early_finish_event) xine_usec_sleep (10000); else break; } /* wait for video to reach this marker, if necessary */ pthread_mutex_lock (&stream->counter_lock); stream->finished_count_audio++; lprintf ("reached end marker # %d\n", stream->finished_count_audio); pthread_cond_broadcast (&stream->counter_changed); if (stream->video_thread_created) { while (stream->finished_count_video < stream->finished_count_audio) { struct timeval tv; struct timespec ts; gettimeofday(&tv, NULL); ts.tv_sec = tv.tv_sec + 1; ts.tv_nsec = tv.tv_usec * 1000; /* use timedwait to workaround buggy pthread broadcast implementations */ pthread_cond_timedwait (&stream->counter_changed, &stream->counter_lock, &ts); } } pthread_mutex_unlock (&stream->counter_lock); stream->audio_channel_auto = -1; break; case BUF_CONTROL_QUIT: /* decoder dispose might call port functions */ running_ticket->acquire(running_ticket, 0); if (stream->audio_decoder_plugin) { _x_free_audio_decoder (stream, stream->audio_decoder_plugin); stream->audio_decoder_plugin = NULL; stream->audio_track_map_entries = 0; stream->audio_type = 0; } running_ticket->release(running_ticket, 0); running = 0; break; case BUF_CONTROL_NOP: break; case BUF_CONTROL_RESET_DECODER: lprintf ("reset\n"); _x_extra_info_reset( stream->audio_decoder_extra_info ); if (stream->audio_decoder_plugin) { running_ticket->acquire(running_ticket, 0); stream->audio_decoder_plugin->reset (stream->audio_decoder_plugin); running_ticket->release(running_ticket, 0); } break; case BUF_CONTROL_DISCONTINUITY: if (stream->audio_decoder_plugin) { running_ticket->acquire(running_ticket, 0); stream->audio_decoder_plugin->discontinuity (stream->audio_decoder_plugin); running_ticket->release(running_ticket, 0); } stream->metronom->handle_audio_discontinuity (stream->metronom, DISC_RELATIVE, buf->disc_off); break; case BUF_CONTROL_NEWPTS: if (stream->audio_decoder_plugin) { running_ticket->acquire(running_ticket, 0); stream->audio_decoder_plugin->discontinuity (stream->audio_decoder_plugin); running_ticket->release(running_ticket, 0); } if (buf->decoder_flags & BUF_FLAG_SEEK) { stream->metronom->handle_audio_discontinuity (stream->metronom, DISC_STREAMSEEK, buf->disc_off); } else { stream->metronom->handle_audio_discontinuity (stream->metronom, DISC_ABSOLUTE, buf->disc_off); } break; case BUF_CONTROL_AUDIO_CHANNEL: { xprintf(stream->xine, XINE_VERBOSITY_DEBUG, "audio_decoder: suggested switching to stream_id %02x\n", buf->decoder_info[0]); stream->audio_channel_auto = buf->decoder_info[0] & 0xff; } break; case BUF_CONTROL_RESET_TRACK_MAP: if (stream->audio_track_map_entries) { xine_event_t ui_event; stream->audio_track_map_entries = 0; ui_event.type = XINE_EVENT_UI_CHANNELS_CHANGED; ui_event.data_length = 0; xine_event_send(stream, &ui_event); } break; default: if (_x_stream_info_get(stream, XINE_STREAM_INFO_IGNORE_AUDIO)) break; xine_profiler_start_count (prof_audio_decode); running_ticket->acquire(running_ticket, 0); if ( (buf->type & 0xFF000000) == BUF_AUDIO_BASE ) { uint32_t audio_type = 0; int i,j; uint32_t chan=buf->type&0x0000FFFF; /* printf("audio_decoder: buf_type=%08x auto=%08x user=%08x\n", buf->type, stream->audio_channel_auto, audio_channel_user); */ /* update track map */ i = 0; while ( (i<stream->audio_track_map_entries) && ((stream->audio_track_map[i]&0x0000FFFF)<chan) ) i++; if ( (i==stream->audio_track_map_entries) || ((stream->audio_track_map[i]&0x0000FFFF)!=chan) ) { xine_event_t ui_event; j = stream->audio_track_map_entries; if (j >= 50) break; while (j>i) { stream->audio_track_map[j] = stream->audio_track_map[j-1]; j--; } stream->audio_track_map[i] = buf->type; stream->audio_track_map_entries++; /* implicit channel change - reopen decoder below */ if ((i == 0) && (audio_channel_user == -1) && (stream->audio_channel_auto < 0)) stream->audio_decoder_streamtype = -1; ui_event.type = XINE_EVENT_UI_CHANNELS_CHANGED; ui_event.data_length = 0; xine_event_send (stream, &ui_event); } /* find out which audio type to decode */ lprintf ("audio_channel_user = %d, map[0]=%08x\n", audio_channel_user, stream->audio_track_map[0]); if (audio_channel_user > -2) { if (audio_channel_user == -1) { /* auto */ lprintf ("audio_channel_auto = %d\n", stream->audio_channel_auto); if (stream->audio_channel_auto>=0) { if ((buf->type & 0xFF) == stream->audio_channel_auto) { audio_type = buf->type; } else audio_type = -1; } else audio_type = stream->audio_track_map[0]; } else { if (audio_channel_user <= stream->audio_track_map_entries) audio_type = stream->audio_track_map[audio_channel_user]; else audio_type = -1; } /* now, decode stream buffer if it's the right audio type */ if (buf->type == audio_type) { int streamtype = (buf->type>>16) & 0xFF; /* close old decoder of audio type has changed */ if( buf->type != buftype_unknown && (stream->audio_decoder_streamtype != streamtype || !stream->audio_decoder_plugin) ) { if (stream->audio_decoder_plugin) { _x_free_audio_decoder (stream, stream->audio_decoder_plugin); } stream->audio_decoder_streamtype = streamtype; stream->audio_decoder_plugin = _x_get_audio_decoder (stream, streamtype); _x_stream_info_set(stream, XINE_STREAM_INFO_AUDIO_HANDLED, (stream->audio_decoder_plugin != NULL)); } if (audio_type != stream->audio_type) { if (stream->audio_decoder_plugin) { xine_event_t event; stream->audio_type = audio_type; event.type = XINE_EVENT_UI_CHANNELS_CHANGED; event.data_length = 0; xine_event_send(stream, &event); } } /* finally - decode data */ if (stream->audio_decoder_plugin) stream->audio_decoder_plugin->decode_data (stream->audio_decoder_plugin, buf); if (buf->type != buftype_unknown && !_x_stream_info_get(stream, XINE_STREAM_INFO_AUDIO_HANDLED)) { xine_log (stream->xine, XINE_LOG_MSG, _("audio_decoder: no plugin available to handle '%s'\n"), _x_buf_audio_name( buf->type ) ); if( !_x_meta_info_get(stream, XINE_META_INFO_AUDIOCODEC) ) _x_meta_info_set_utf8(stream, XINE_META_INFO_AUDIOCODEC, _x_buf_audio_name( buf->type )); buftype_unknown = buf->type; /* fatal error - dispose plugin */ if (stream->audio_decoder_plugin) { _x_free_audio_decoder (stream, stream->audio_decoder_plugin); stream->audio_decoder_plugin = NULL; } } } } } else if( buf->type != buftype_unknown ) {
static void jpeg_decode_data (video_decoder_t *this_gen, buf_element_t *buf) { jpeg_decoder_t *this = (jpeg_decoder_t *) this_gen; if (!this->video_open) { lprintf("opening video\n"); (this->stream->video_out->open) (this->stream->video_out, this->stream); this->video_open = 1; } xine_buffer_copyin(this->image, this->index, buf->mem, buf->size); this->index += buf->size; if (buf->decoder_flags & BUF_FLAG_FRAME_END && this->index > 0) { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; JSAMPARRAY buffer; int i, linesize; int width, height; vo_frame_t *img; int max_width, max_height; uint8_t *slice_start[1] = {NULL}; int slice_line = 0; /* query max. image size vo can handle */ max_width = this->stream->video_out->get_property( this->stream->video_out, VO_PROP_MAX_VIDEO_WIDTH); max_height = this->stream->video_out->get_property( this->stream->video_out, VO_PROP_MAX_VIDEO_HEIGHT); /* init and parse header */ cinfo.err = jpeg_std_error(&jerr); jpeg_create_decompress(&cinfo); jpeg_memory_src(&cinfo, this->image, this->index); jpeg_read_header(&cinfo, TRUE); _x_stream_info_set(this->stream, XINE_STREAM_INFO_VIDEO_WIDTH, cinfo.image_width); _x_stream_info_set(this->stream, XINE_STREAM_INFO_VIDEO_HEIGHT, cinfo.image_height); lprintf("header parsed\n"); /* set decoding parameters */ cinfo.out_color_space = JCS_YCbCr; /* request scaling when image is too large for vo */ if (this->cls->enable_downscaling) { cinfo.output_width = cinfo.image_width; cinfo.output_height = cinfo.image_height; cinfo.scale_num = 1; cinfo.scale_denom = 1; while ((max_width > 0 && cinfo.output_width > max_width) || (max_height > 0 && cinfo.output_height > max_height)) { cinfo.scale_denom <<= 1; cinfo.output_width >>= 1; cinfo.output_height >>= 1; } if (cinfo.scale_denom > 1) { xprintf(this->stream->xine, XINE_VERBOSITY_LOG, LOG_MODULE ": downscaling image by 1:%d to %dx%d\n", cinfo.scale_denom, cinfo.output_width, cinfo.output_height); } } /* start decompress */ jpeg_start_decompress(&cinfo); width = cinfo.output_width; height = cinfo.output_height; /* crop when image is too large for vo */ if (max_width > 0 && cinfo.output_width > max_width) width = max_width; if (max_height > 0 && cinfo.output_height > max_height) height = max_height; img = this->stream->video_out->get_frame (this->stream->video_out, width, height, (double)width/(double)height, XINE_IMGFMT_YUY2, VO_BOTH_FIELDS); linesize = cinfo.output_width * cinfo.output_components; buffer = (cinfo.mem->alloc_sarray)((void*)&cinfo, JPOOL_IMAGE, linesize, 1); if (img->proc_slice && !(img->height & 0xf)) { slice_start[0] = img->base[0]; } /* cut to frame width */ if (cinfo.output_width > img->width) { lprintf("cut right border %d pixels\n", cinfo.output_width - img->width); linesize = img->width * 3; } /* YUV444->YUV422 simple */ while (cinfo.output_scanline < cinfo.output_height) { uint8_t *dst = img->base[0] + img->pitches[0] * cinfo.output_scanline; jpeg_read_scanlines(&cinfo, buffer, 1); /* cut to frame height */ if (cinfo.output_scanline > img->height) { lprintf("cut bottom scanline %d\n", cinfo.output_scanline - 1); continue; } for (i = 0; i < linesize; i += 3) { *dst++ = buffer[0][i]; if (i & 1) { *dst++ = buffer[0][i + 2]; } else { *dst++ = buffer[0][i + 1]; } } if (slice_start[0]) { slice_line++; if (slice_line == 16) { img->proc_slice(img, slice_start); slice_start[0] += 16 * img->pitches[0]; slice_line = 0; } } } /* final slice */ if (slice_start[0] && slice_line) { img->proc_slice(img, slice_start); } jpeg_finish_decompress(&cinfo); jpeg_destroy_decompress(&cinfo); img->pts = buf->pts; img->duration = 3600; img->bad_frame = 0; _x_stream_info_set(this->stream, XINE_STREAM_INFO_FRAME_DURATION, img->duration); img->draw(img, this->stream); img->free(img); this->index = 0; }