int main(int argc, char* argv[]){
	char* codec = argv[1];
	init_decoder(codec, "1234");
	printf("Begin testing ...\n");
	/*
	 * Opening test file
	 */
	FILE* f = fopen("sample.mp4", "r+");
	if(f == NULL){
		printf("Couldn't find sample.mp4\n");
		return 1;
	}
	
	fseek(f, 0, SEEK_END);
	unsigned long flen = ftell(f);
	fseek(f, 0, SEEK_SET);

	char* buffer = (char*)malloc(flen * sizeof(char));
	int nbytes = fread(buffer, flen, 1, f);

	/*
	 * Testing decode video
	 */
	decode_video2(buffer, (size_t)flen);
	printf("Complete testing ...\n");
	return 0;
}
Esempio n. 2
0
sheepshaver_cpu::sheepshaver_cpu()
{
	init_decoder();

#if PPC_ENABLE_JIT
	if (PrefsFindBool("jit"))
		enable_jit();
#endif
}
int main( int argc, char** argv )
{
	_CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
	Player *pPlayer = player_new();
	player_set_decoder( pPlayer, init_decoder() );
	player_set_display( pPlayer, init_display() );
	SDL_CreateThread( player_play, pPlayer );
	player_run( pPlayer );
	_CrtDumpMemoryLeaks();
	getchar();
	return 0;
}
Esempio n. 4
0
static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
                                  const uint8_t **data, unsigned int data_sz,
                                  void *user_priv, int64_t deadline) {
  YV12_BUFFER_CONFIG sd = { 0 };
  int64_t time_stamp = 0, time_end_stamp = 0;
  vp9_ppflags_t flags = {0};
  VP9_COMMON *cm = NULL;

  ctx->img_avail = 0;

  // Determine the stream parameters. Note that we rely on peek_si to
  // validate that we have a buffer that does not wrap around the top
  // of the heap.
  if (!ctx->si.h) {
    const vpx_codec_err_t res =
        ctx->base.iface->dec.peek_si(*data, data_sz, &ctx->si);
    if (res != VPX_CODEC_OK)
      return res;
  }

  // Initialize the decoder instance on the first frame
  if (!ctx->decoder_init) {
    init_decoder(ctx);
    if (ctx->pbi == NULL)
      return VPX_CODEC_ERROR;

    ctx->decoder_init = 1;
  }

  cm = &ctx->pbi->common;

  if (vp9_receive_compressed_data(ctx->pbi, data_sz, data, deadline))
    return update_error_state(ctx, &cm->error);

  if (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)
    set_ppflags(ctx, &flags);

  if (vp9_get_raw_frame(ctx->pbi, &sd, &time_stamp, &time_end_stamp, &flags))
    return update_error_state(ctx, &cm->error);

  yuvconfig2image(&ctx->img, &sd, user_priv);
  ctx->img.fb_priv = cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
  ctx->img_avail = 1;

  return VPX_CODEC_OK;
}
Esempio n. 5
0
bool XzDecompressor::decompress(QBuffer *in, QBuffer *out)
{
	lzma_stream strm = LZMA_STREAM_INIT;
	bool success;

	if (!init_decoder(&strm)) {
		return false;
	}

	success = internal_decompress(&strm, in, out);

	// Free the memory allocated for the decoder. This only needs to be
	// done after the last file.
	lzma_end(&strm);

	return success;
}
Esempio n. 6
0
static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
                                  const uint8_t **data, unsigned int data_sz,
                                  void *user_priv, int64_t deadline) {
  vp9_ppflags_t flags = {0};
  VP9_COMMON *cm = NULL;

  (void)deadline;

  // Determine the stream parameters. Note that we rely on peek_si to
  // validate that we have a buffer that does not wrap around the top
  // of the heap.
  if (!ctx->si.h) {
    const vpx_codec_err_t res =
        decoder_peek_si_internal(*data, data_sz, &ctx->si, ctx->decrypt_cb,
                                 ctx->decrypt_state);
    if (res != VPX_CODEC_OK)
      return res;

    if (!ctx->si.is_kf)
      return VPX_CODEC_ERROR;
  }

  // Initialize the decoder instance on the first frame
  if (ctx->pbi == NULL) {
    init_decoder(ctx);
    if (ctx->pbi == NULL)
      return VPX_CODEC_ERROR;
  }

  // Set these even if already initialized.  The caller may have changed the
  // decrypt config between frames.
  ctx->pbi->decrypt_cb = ctx->decrypt_cb;
  ctx->pbi->decrypt_state = ctx->decrypt_state;

  cm = &ctx->pbi->common;

  if (vp9_receive_compressed_data(ctx->pbi, data_sz, data))
    return update_error_state(ctx, &cm->error);

  if (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)
    set_ppflags(ctx, &flags);

  return VPX_CODEC_OK;
}
int main(void)
{
    AVCodec *enc = NULL, *dec = NULL;
    AVCodecContext *enc_ctx = NULL, *dec_ctx = NULL;
    uint64_t channel_layouts[] = {AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_5POINT1_BACK, AV_CH_LAYOUT_SURROUND, AV_CH_LAYOUT_STEREO_DOWNMIX};
    int sample_rates[] = {8000, 44100, 48000, 192000};
    int cl, sr;

    avcodec_register_all();

    enc = avcodec_find_encoder(AV_CODEC_ID_FLAC);
    if (!enc)
    {
        av_log(NULL, AV_LOG_ERROR, "Can't find encoder\n");
        return 1;
    }

    dec = avcodec_find_decoder(AV_CODEC_ID_FLAC);
    if (!dec)
    {
        av_log(NULL, AV_LOG_ERROR, "Can't find decoder\n");
        return 1;
    }

    for (cl = 0; cl < FF_ARRAY_ELEMS(channel_layouts); cl++)
    {
        for (sr = 0; sr < FF_ARRAY_ELEMS(sample_rates); sr++)
        {
            if (init_encoder(enc, &enc_ctx, channel_layouts[cl], sample_rates[sr]) != 0)
                return 1;
            if (init_decoder(dec, &dec_ctx, channel_layouts[cl]) != 0)
                return 1;
            if (run_test(enc, dec, enc_ctx, dec_ctx) != 0)
                return 1;
            close_encoder(&enc_ctx);
            close_decoder(&dec_ctx);
        }
    }

    return 0;
}
Esempio n. 8
0
int decode_update(struct viddec_state **vdsp, const struct vidcodec *vc,
		  const char *fmtp)
{
	struct viddec_state *st;
	int err = 0;

	if (!vdsp || !vc)
		return EINVAL;

	if (*vdsp)
		return 0;

	(void)fmtp;

	st = mem_zalloc(sizeof(*st), destructor);
	if (!st)
		return ENOMEM;

	st->mb = mbuf_alloc(1024);
	if (!st->mb) {
		err = ENOMEM;
		goto out;
	}

	err = init_decoder(st, vc->name);
	if (err) {
		warning("avcodec: %s: could not init decoder\n", vc->name);
		goto out;
	}

	debug("avcodec: video decoder %s (%s)\n", vc->name, fmtp);

 out:
	if (err)
		mem_deref(st);
	else
		*vdsp = st;

	return err;
}
Esempio n. 9
0
int main(int argc, char **argv) {
    char *hexaeskey = 0, *hexaesiv = 0;
    char *fmtpstr = 0;
    char *arg;
    int i;
    assert(RAND_MAX >= 0x10000);    // XXX move this to compile time
    while ( (arg = *++argv) ) {
        if (!strcasecmp(arg, "iv")) {
            hexaesiv = *++argv;
            argc--;
        } else
        if (!strcasecmp(arg, "key")) {
            hexaeskey = *++argv;
            argc--;
        } else
        if (!strcasecmp(arg, "fmtp")) {
            fmtpstr = *++argv;
        } else
        if (!strcasecmp(arg, "cport")) {
            controlport = atoi(*++argv);
        } else
        if (!strcasecmp(arg, "tport")) {
            timingport = atoi(*++argv);
        } else
        if (!strcasecmp(arg, "dport")) {
            dataport = atoi(*++argv);
        } else
        if (!strcasecmp(arg, "host")) {
            rtphost = *++argv;
        } else
        if (!strcasecmp(arg, "pipe")) {
            if (libao_driver || libao_devicename || libao_deviceid ) {
                die("Option 'pipe' may not be combined with 'ao_driver', 'ao_devicename' or 'ao_deviceid'");
            }

            pipename = *++argv;
        } else
        if (!strcasecmp(arg, "ao_driver")) {
            if (pipename) {
                die("Option 'ao_driver' may not be combined with 'pipe'");
            }

            libao_driver = *++argv;
        } else
        if (!strcasecmp(arg, "ao_devicename")) {
            if (pipename || libao_deviceid ) {
                die("Option 'ao_devicename' may not be combined with 'pipe' or 'ao_deviceid'");
            }

            libao_devicename = *++argv;
        } else
        if (!strcasecmp(arg, "ao_deviceid")) {
            if (pipename || libao_devicename) {
                die("Option 'ao_deviceid' may not be combined with 'pipe' or 'ao_devicename'");
            }

            libao_deviceid = *++argv;
        }
#ifdef FANCY_RESAMPLING
        else
        if (!strcasecmp(arg, "resamp")) {
            fancy_resampling = atoi(*++argv);
        }
#endif
    }

    if (!hexaeskey || !hexaesiv)
        die("Must supply AES key and IV!");

    if (hex2bin(aesiv, hexaesiv))
        die("can't understand IV");
    if (hex2bin(aeskey, hexaeskey))
        die("can't understand key");
    AES_set_decrypt_key(aeskey, 128, &aes);

    memset(fmtp, 0, sizeof(fmtp));
    i = 0;
    while ( (arg = strsep(&fmtpstr, " \t")) )
        fmtp[i++] = atoi(arg);

    init_decoder();
    init_buffer();
    init_rtp();      // open a UDP listen port and start a listener; decode into ring buffer
    fflush(stdout);
    init_output();              // resample and output from ring buffer

    char line[128];
    int in_line = 0;
    int n;
    double f;
    while (fgets(line + in_line, sizeof(line) - in_line, stdin)) {
        n = strlen(line);
        if (line[n-1] != '\n') {
            in_line = strlen(line) - 1;
            if (n == sizeof(line)-1)
                in_line = 0;
            continue;
        }
        if (sscanf(line, "vol: %lf\n", &f)) {
            assert(f<=0);
            if (debug)
                fprintf(stderr, "VOL: %lf\n", f);
            volume = pow(10.0,0.05*f);
            fix_volume = 65536.0 * volume;
            continue;
        }
        if (!strcmp(line, "exit\n")) {
            exit(0);
        }
        if (!strcmp(line, "flush\n")) {
            pthread_mutex_lock(&ab_mutex);
            ab_resync();
            pthread_mutex_unlock(&ab_mutex);
            if (debug)
                fprintf(stderr, "FLUSH\n");
        }
    }
    fprintf(stderr, "bye!\n");
    fflush(stderr);

    return EXIT_SUCCESS;
}
Esempio n. 10
0
int hairtunes_init(char *pAeskey, char *pAesiv, char *fmtpstr, int pCtrlPort, int pTimingPort,
         int pDataPort, char *pRtpHost, char*pPipeName, char *pLibaoDriver, char *pLibaoDeviceName, char *pLibaoDeviceId)
{
    if(pAeskey != NULL)    
        memcpy(aeskey, pAeskey, sizeof(aeskey));
    if(pAesiv != NULL)
        memcpy(aesiv, pAesiv, sizeof(aesiv));
    if(pRtpHost != NULL)
        rtphost = pRtpHost;
    if(pPipeName != NULL)
        pipename = pPipeName;
    if(pLibaoDriver != NULL)
        libao_driver = pLibaoDriver;
    if(pLibaoDeviceName != NULL)
        libao_devicename = pLibaoDeviceName;
    if(pLibaoDeviceId != NULL)
        libao_deviceid = pLibaoDeviceId;
    
    controlport = pCtrlPort;
    timingport = pTimingPort;
    dataport = pDataPort;

    AES_set_decrypt_key(aeskey, 128, &aes);

    memset(fmtp, 0, sizeof(fmtp));
    int i = 0;
    char *arg;
    while ( (arg = strsep(&fmtpstr, " \t")) )
        fmtp[i++] = atoi(arg);

    init_decoder();
    init_buffer();
    init_rtp();      // open a UDP listen port and start a listener; decode into ring buffer
    fflush(stdout);
    init_output();              // resample and output from ring buffer

    char line[128];
    int in_line = 0;
    int n;
    double f;
    while (fgets(line + in_line, sizeof(line) - in_line, stdin)) {
        n = strlen(line);
        if (line[n-1] != '\n') {
            in_line = strlen(line) - 1;
            if (n == sizeof(line)-1)
                in_line = 0;
            continue;
        }
        if (sscanf(line, "vol: %lf\n", &f)) {
            assert(f<=0);
            if (debug)
                fprintf(stderr, "VOL: %lf\n", f);
            volume = pow(10.0,0.05*f);
            fix_volume = 65536.0 * volume;
            continue;
        }
        if (!strcmp(line, "exit\n")) {
            exit(0);
        }
        if (!strcmp(line, "flush\n")) {
            pthread_mutex_lock(&ab_mutex);
            ab_resync();
            pthread_mutex_unlock(&ab_mutex);
            if (debug)
                fprintf(stderr, "FLUSH\n");
        }
    }
    fprintf(stderr, "bye!\n");
    fflush(stderr);

    return EXIT_SUCCESS;
}
Esempio n. 11
0
void CLASS nikon_compressed_load_raw() // used when tag 0x103 of subifd1 == 0x8799 (34713)
{
  static const uchar nikon_tree[][32] = {
    { 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0,	/* 12-bit lossy */
      5,4,3,6,2,7,1,0,8,9,11,10,12 },
    { 0,1,5,1,1,1,1,1,1,2,0,0,0,0,0,0,	/* 12-bit lossy after split */
      0x39,0x5a,0x38,0x27,0x16,5,4,3,2,1,0,11,12,12 },
    { 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0,  /* 12-bit lossless */
      5,4,6,3,7,2,8,1,9,0,10,11,12 },
    { 0,1,4,3,1,1,1,1,1,2,0,0,0,0,0,0,	/* 14-bit lossy */
      5,6,4,7,8,3,9,2,1,0,10,11,12,13,14 },
    { 0,1,5,1,1,1,1,1,1,1,2,0,0,0,0,0,	/* 14-bit lossy after split */
      8,0x5c,0x4b,0x3a,0x29,7,6,5,4,3,2,1,0,13,14 },
    { 0,1,4,2,2,3,1,2,0,0,0,0,0,0,0,0,	/* 14-bit lossless */
      7,6,8,5,9,4,10,3,11,12,2,0,1,13,14 } };
  struct decode *dindex;
  ushort ver0, ver1, vpred[2][2], hpred[2], csize;
  int i, min, max, step=0, huff=0, split=0, row, col, len, shl, diff;

  fseek (ifp, meta_offset, SEEK_SET); // linearization curve (0x96)
  ver0 = fgetc(ifp);
  ver1 = fgetc(ifp);
  // ver0=0x44, ver1=0x20 for 12bits and 14bits lossy (d300)
  // 0x46, 0x30 for 12bits and 14 lossless (d300 and d700)
  printf("meta_offset=%d, tiff_bps=%d, ver0=%d, ver1=%d\n", meta_offset, tiff_bps, ver0, ver1);
  if (ver0 == 0x49 || ver1 == 0x58) // never seen. firmware update or nikon raw software?
    fseek (ifp, 2110, SEEK_CUR);
  if (ver0 == 0x46) huff = 2; // lossless (implicitly 12bits). have seen a d3x nef with ver0=0x46 and ver1=0x30 (exif 0x131="ver1.00")
  // with d300 lossless : ver0=0x46, ver1=0x30. d700/14b/lossless : ver0=0x46, ver1=0x30

  if (tiff_bps == 14) huff += 3; // 14bits lossly (if huff was ==0) or 14bits lossless if ver0==0x46
  read_shorts (vpred[0], 4); // vertical predictor values ?
  
  max = 1 << tiff_bps & 0x7fff;
  if ((csize = get2()) > 1) // curve size. 567 with D100/12bits/lossy. 32 with d3x/12bits/lossless. 
    step = max / (csize-1);
  if (ver0 == 0x44 && ver1 == 0x20 && step > 0) { // lossy (d300, d90 and d5000). 
  //tag 0x93 = 2. stored curve needs interpolation
    for (i=0; i < csize; i++) // read curve
      curve[i*step] = get2();
      // curve interpolation
    for (i=0; i < max; i++)
      curve[i] = ( curve[i-i%step]*(step-i%step) +
		   curve[i-i%step+step]*(i%step) ) / step;
		   
    fseek (ifp, meta_offset+562, SEEK_SET); // csize seems 257 for recent models (0x44/0x20) like d90 and d300
    // type 2 has the split value and uses a second huffman table
    split = get2();
  } else if (ver0 != 0x46 && csize <= 0x4001) // if not lossless. 
  // with D100/D200/D2X/D40/D80/D60 12bits/lossy : ver0==0x44 && ver1==0x10
    read_shorts (curve, max=csize);
  printf("csize=%d, step=%d, split=%d, huff=%d\n", csize, step, split, huff);

/*
0x96 (linearization table) tag format 

offset how_many   type   name
----+-----------+------+---------------------------------------------------------------------------------------------
0    1           byte   version0
1    1           byte   version1
                         ver0=0x44, ver1=0x20 for 12bits and 14bits lossy (d300)
                         0x44, 0x20 : lossy (d300, d90 and d5000)
                         0x46, 0x30 for 12bits and 14 lossless (d300 and d700)
                         0x46, 0x30 : d3x/12b/lossless
                         0x46, 0x30. with d300 lossless. and d700/14b/lossless
                         0x44, 0x10 : with D100/D200/D2X/D40/D80/D60 12bits/lossy 
                         tag 0x93 = 3 for lossless (0x46/0x30).
                         tag 0x93 = 4 for lossy type 2 (0x44/0x20) 
                         tag 0x93 = 1 for lossy type 1 (0x44/0x10)
2    4           shorts vpred[2][2] (when ver0 == 0x49 || ver1 == 0x58, fseek (ifp, 2110, SEEK_CUR) before)
0x0a 1           short  curve_size. 
                         32 with d3x/12bits/lossless, d300/12bits/lossless
                         34 with 14bits/lossless (d300 and d700)
                         257 with d300/12+14b/lossy.  
                         257 with 12b/lossy for d90
                         567 with D100/12bits/lossy. 
                         683 with 12b/lossy for d200,d2x,d40x,d40,d80,d60
0x0c curve_size  shorts curve[]
                         for lossy type 2, if curve_size == 257 (d90 and d300), end of curve table is 1+257*2 = 526
562  1           short  split_value (for 0x44/0x20 only (lossy type 2), d90 and d300) 
                         
 */

  while (curve[max-2] == curve[max-1]) max--;
  init_decoder();
  make_decoder (nikon_tree[huff], 0);
  fseek (ifp, data_offset, SEEK_SET);
  getbits(-1);
  for (min=row=0; row < height; row++) {
      if (split && row == split) {
      // for lossy type 2 (0x44/0x20)
      init_decoder();
      make_decoder (nikon_tree[huff+1], 0);
      max += (min = 16) << 1;
    }
    for (col=0; col < raw_width; col++) {
      for (dindex=first_decode; dindex->branch[0]; )
	      dindex = dindex->branch[getbits(1)]; // read 12 or 14bits value bit per bit and walking through the huffman tree to find the leaf
      len = dindex->leaf & 15; // length = 4 left most bits
      shl = dindex->leaf >> 4; // shift length? = 8 or 10bits
      diff = ((getbits(len-shl) << 1) + 1) << shl >> 1; // read diff value
      if ((diff & (1 << (len-1))) == 0) // left most bit is certainly the sign 
	      diff -= (1 << len) - !shl;
      if (col < 2) 
        hpred[col] = vpred[row & 1][col] += diff; // vpred used for columns 0 and 1
      else	   
        hpred[col & 1] += diff;
      // very close to jpeg lossless decompression (ljpeg_diff and ljpeg_row), except for the shl value...
      if ((ushort)(hpred[col & 1] + min) >= max) derror();
      if ((unsigned) (col-left_margin) < width)
	      BAYER(row,col-left_margin) = curve[LIM((short)hpred[col & 1],0,0x3fff)];
    }
  }
}
Esempio n. 12
0
static int alloc(struct vidcodec_st **stp, struct vidcodec *vc,
		 const char *name, struct vidcodec_prm *encp,
		 const char *fmtp, vidcodec_enq_h *enqh,
		 vidcodec_send_h *sendh, void *arg)
{
	struct vidcodec_st *st;
	int err = 0;

	if (!encp)
		return EINVAL;

	st = mem_zalloc(sizeof(*st), destructor);
	if (!st)
		return ENOMEM;

	st->vc = mem_ref(vc);
	st->encprm = *encp;

	if (0 == str_casecmp(name, "H263"))
		st->codec_id = CODEC_ID_H263;
	else if (0 == str_casecmp(name, "H264"))
		st->codec_id = CODEC_ID_H264;
	else if (0 == str_casecmp(name, "MP4V-ES"))
		st->codec_id = CODEC_ID_MPEG4;
	else {
		err = EINVAL;
		goto out;
	}

	st->enc.mb  = mbuf_alloc(FF_MIN_BUFFER_SIZE * 20);
	st->dec.mb  = mbuf_alloc(1024);
	st->mb_frag = mbuf_alloc(1024);
	if (!st->enc.mb || !st->dec.mb || !st->mb_frag) {
		err = ENOMEM;
		goto out;
	}

	st->enc.sz_max = st->enc.mb->size;
	st->dec.sz_max = st->dec.mb->size;

	if (st->codec_id == CODEC_ID_H264) {
#ifndef USE_X264
		err = init_encoder(st);
#endif
	}
	else
		err = init_encoder(st);
	if (err) {
		DEBUG_WARNING("%s: could not init encoder\n", name);
		goto out;
	}

	err = init_decoder(st);
	if (err) {
		DEBUG_WARNING("%s: could not init decoder\n", name);
		goto out;
	}

	if (str_isset(fmtp)) {
		struct pl sdp_fmtp;

		pl_set_str(&sdp_fmtp, fmtp);

		fmt_param_apply(&sdp_fmtp, param_handler, st);
	}

	st->enqh  = enqh;
	st->sendh = sendh;
	st->arg = arg;

	re_printf("video codec %s: %d fps, %d bit/s\n", name,
		  encp->fps, encp->bitrate);

 out:
	if (err)
		mem_deref(st);
	else
		*stp = st;

	return err;
}
Esempio n. 13
0
void Decoder::decode(const Packet::ConstPtr &packet,
		sensor_msgs::ImagePtr& image, int &got_image)
{
	/* Declarations */
	int size;
	AVPacket pkt;
	AVFrame* frame_in;
	AVFrame* frame_out;

	const int out_width = width_out_ == -1 ? packet->width : width_out_;
	const int out_height = height_out_ == -1 ? packet->height : height_out_;
	const int out_pix_fmt = pix_fmt_out_ == -1 ? packet->pix_fmt : pix_fmt_out_;

	/* Check if the codec context has to be reinitialized */
	if (!codec_context_ || packet->codec_ID != codec_context_->codec_id
			|| packet->compressed_pix_fmt != codec_context_->pix_fmt
			|| packet->compressed_width != codec_context_->width
			|| packet->compressed_height != codec_context_->height)
	{
		free_context();
		init_decoder(packet->compressed_width, packet->compressed_height,
				packet->compressed_pix_fmt, packet->codec_ID);
	}

	/* Get local references to the AVFrame structs */
	frame_in = frame_in_->get_frame();

	if (out_width == packet->compressed_width
			&& out_height == packet->compressed_height
			&& out_pix_fmt == packet->compressed_pix_fmt)
		frame_out = frame_in_->get_frame();
	else
	{
		/* Check if the output frame has to be reinitialized */
		frame_out = frame_out_ ? frame_out_->get_frame() : NULL;

		if (!frame_out_ || frame_out->width != out_width
				|| frame_out->height != out_height
				|| frame_out->format != out_pix_fmt)
		{
			frame_out_ = boost::make_shared<Frame>(out_width, out_height,
					out_pix_fmt);
			frame_out = frame_out_->get_frame();
		}
	}

	/* Check if the received packet is valid */
	if (previous_packet_ + 1 != packet->seq)
		has_keyframe_ = false;

	previous_packet_ = packet->seq;

	/* Check if there is a valid keyframe stored */
	if (!has_keyframe_)
	{
		if (packet->keyframe)
			has_keyframe_ = true;
		else
		{
			got_image = 0;
			return;
		}
	}

	/* Fill the AVPacket */
	if (av_new_packet(&pkt, packet->data.size())
			|| static_cast<unsigned int>(pkt.size) != packet->data.size())
		throw std::runtime_error("Could not allocate AV packet data.");

	memcpy(pkt.data, &packet->data[0], pkt.size);

	pkt.pts = packet->pts;
	pkt.flags = packet->keyframe ? AV_PKT_FLAG_KEY : 0;

	/* Decode packet */
	if (avcodec_decode_video2(codec_context_, frame_in, &got_image, &pkt) < 0)
		std::cout << "[decode] Could no decode packet." << std::endl;

	/* Free the packet data */
	if (pkt.destruct)
		pkt.destruct(&pkt);
	else
		av_free_packet(&pkt);

	if (!got_image)
		return;

	if (frame_in != frame_out)
	{
		/* Get SWS Context */
		sws_context_ = sws_getCachedContext(sws_context_, frame_in->width,
				frame_in->height, (enum AVPixelFormat) frame_in->format,
				frame_out->width, frame_out->height,
				(enum AVPixelFormat) frame_out->format, SWS_BICUBIC, NULL, NULL,
				NULL);
		if (!sws_context_)
			throw std::runtime_error("Could not initialize sws context.");

		/* Transform image */
		sws_scale(sws_context_, frame_in->data, frame_in->linesize, 0,
				frame_in->height, frame_out->data, frame_out->linesize);
	}

	/* Retrieve the PTS for the AVFrame */
	image->header.stamp = ros::Time(
			static_cast<uint32_t>(frame_in->pkt_pts >> 32),
			static_cast<uint32_t>(frame_in->pkt_pts));

	/* Store image */
	image->header.seq = packet->seq;
	image->width = frame_out->width;
	image->height = frame_out->height;
	image->step = frame_out->linesize[0];

	if (!pix_fmt_libav2ros(frame_out->format, image->encoding,
			image->is_bigendian))
		throw std::runtime_error(
				"Can not handle requested output pixel format.");

	size = frame_out->linesize[0] * frame_out->height;
	image->data.resize(size);
	image->data.assign(frame_out->data[0], frame_out->data[0] + size);
}
Esempio n. 14
0
static void split_video(const char *infilename,
                        const char *outfmt,
                        int gop_size,
                        int chunk_size,
                        int skip,
                        long long length,
                        AVDictionary *_opt)
{
    DecoderContext *dc;
    EncoderContext *ec;

    AVFrame *frame;
    int width, height;
    long long frame_count = 0, out_frame_num = 0;
    int chunk_count = 0;
    char outfilename[MAX_FILENAME_LEN];
    AVDictionary *opt = NULL;
    AVRational framerate;
    enum AVPixelFormat pix_fmt;

    av_dict_copy(&opt, _opt, 0);

    // Initialize the decoder
    dc = init_decoder(infilename);

    // Extract parms needed by encoder
    width = dc->codecCtx->width;
    height = dc->codecCtx->height;
    framerate = dc->codecCtx->framerate;
    pix_fmt = dc->codecCtx->pix_fmt;

    // Skip input frames

    if (skip > 0)
        fprintf(stderr, "Skipping %d frames\n", skip);

    while (skip > 0) {
        // TODO: I'd rather not decode the frames, but this will take some work to
        //       refactor
        if (!read_frame(dc)) {
            fprintf(stderr, "No more frames available, skip = %d\n", skip);
            exit(0);
        }
        --skip;
    }

    // Initialize output
    fprintf(stderr, "\rWriting chunk %05d", chunk_count);
    fflush(stderr);

    snprintf(outfilename, MAX_FILENAME_LEN, outfmt, chunk_count++);
    ec = init_encoder(outfilename, gop_size, width, height, framerate, pix_fmt, opt);

    while (length <= 0 || frame_count < length) {
        frame = read_frame(dc);
        if (!frame)
            break;

        if (out_frame_num == chunk_size) {
            close_encoder(ec);

            fprintf(stderr, "\rWriting chunk %05d", chunk_count);
            fflush(stderr);

            snprintf(outfilename, MAX_FILENAME_LEN, outfmt, chunk_count++);
            ec = init_encoder(outfilename, gop_size, width, height, framerate, pix_fmt, opt);
            out_frame_num = 0;
        }

        set_pict_type(frame, gop_size, out_frame_num);
        frame->pts = out_frame_num++;
        frame_count++;

        write_video_frame(ec, frame);
    }

    close_encoder(ec);
    close_decoder(dc);

    fprintf(stderr, "\nRead %lld frames\n", frame_count);
    fprintf(stderr, "Wrote %d chunks of %d frames each (last chunk: %lld frames)\n", chunk_count, chunk_size, out_frame_num);
    fprintf(stderr, "  for a total of %lld frames\n", (chunk_count-1) * chunk_size + out_frame_num);
}
Esempio n. 15
0
DecoderStatus VorbisDecoder::process(Packet * packet) {
  FXASSERT(packet);

#ifdef HAVE_VORBIS_PLUGIN
  FXfloat ** pcm=NULL;
  FXfloat * buf32=NULL;
#else // HAVE_TREMOR_PLUGIN
  FXint ** pcm=NULL;
  FXshort * buf32=NULL;
#endif

  FXint p,navail=0;

  FXint ngiven,ntotalsamples,nsamples,sample,c,s;

  FXbool  eos=packet->flags&FLAG_EOS;
  FXuint   id=packet->stream;
  FXlong  len=packet->stream_length;


  OggDecoder::process(packet);

  /// Init Decoder
  if (!has_dsp) {
    if (!init_decoder())
      return DecoderError;
    if (!has_dsp)
      return DecoderOk;
    }

  /// Find Stream Position
  if (stream_position==-1 && !find_stream_position())
    return DecoderOk;


  if (out) {
    navail = out->availableFrames();
    }

  while(get_next_packet()) {

    if (__unlikely(is_vorbis_header())) {
      GM_DEBUG_PRINT("[vorbis] unexpected vorbis header found. Resetting decoder\n");
      push_back_packet();
      reset_decoder();
      return DecoderOk;
      }

    if (vorbis_synthesis(&block,&op)==0)
      vorbis_synthesis_blockin(&dsp,&block);

    while((ngiven=vorbis_synthesis_pcmout(&dsp,&pcm))>0) {
      if (len>0) FXASSERT(stream_position+ngiven<=len);

      if (__unlikely(stream_position<stream_decode_offset)) {
        FXlong offset = FXMIN(ngiven,stream_decode_offset - stream_position);
        GM_DEBUG_PRINT("[vorbis] stream decode offset %ld. Skipping %ld of %ld \n",stream_decode_offset,offset,stream_decode_offset-stream_position);
        ngiven-=offset;
        stream_position+=offset;
        sample=offset;
        vorbis_synthesis_read(&dsp,offset);
        if (ngiven==0) continue;
        }
      else {
        sample=0;
        }

      for (ntotalsamples=ngiven;ntotalsamples>0;) {

        /// Get new buffer
        if (out==NULL) {
          out = engine->decoder->get_output_packet();
          if (out==NULL) return DecoderInterrupted;
          out->stream_position=stream_position;
          out->stream_length=len;
          out->af=af;
          navail = out->availableFrames();
          }

#ifdef HAVE_VORBIS_PLUGIN
        buf32 = out->flt();
#else // HAVE_TREMOR_PLUGIN
        buf32 = out->s16();
#endif
        /// Copy Samples
        nsamples = FXMIN(ntotalsamples,navail);
        for (p=0,s=sample;s<(nsamples+sample);s++){
          for (c=0;c<info.channels;c++,p++) {
#ifdef HAVE_VORBIS_PLUGIN
            buf32[p]=pcm[c][s];
#else
            buf32[p]=CLIP_TO_15(pcm[c][s]>>9);
#endif
            }
          }

        /// Update sample counts
        out->wroteFrames(nsamples);

        sample+=nsamples;
        navail-=nsamples;
        ntotalsamples-=nsamples;
        stream_position+=nsamples;

        /// Send out packet if full
        ///FIXME handle EOS.
        if (navail==0) {
          engine->output->post(out);
          out=NULL;
          }
        }
      vorbis_synthesis_read(&dsp,ngiven);
      }
    }

  if (eos) {
    if (out && out->numFrames())  {
      engine->output->post(out);
      out=NULL;
      }
    engine->output->post(new ControlEvent(End,id));
    }
  return DecoderOk;
  }
Esempio n. 16
0
static switch_status_t switch_dahdi_decode(switch_codec_t *codec,
										   switch_codec_t *other_codec,
										   void *encoded_data,
										   uint32_t encoded_data_len,
										   uint32_t encoded_rate, void *decoded_data, uint32_t *decoded_data_len, uint32_t *decoded_rate,
										   unsigned int *flag)
{
	int32_t res;
	short *dbuf_linear;
	// we only can decode up to half ulaw bytes of whatever their destiny linear buffer is
	unsigned char dbuf_ulaw[*decoded_data_len / 2];
	unsigned char *ebuf_g729;
	uint32_t i;
	struct dahdi_context *context;
	switch_status_t status;

#ifdef DEBUG_DAHDI_CODEC
	switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG, "Switch DAHDI decode called to decode %d bytes.\n", encoded_data_len);
#endif

	context = codec->private_info;
	dbuf_linear = decoded_data;
	ebuf_g729 = encoded_data;

	if (context->decoding_fd == -1) {
		if ((status = init_decoder(codec)) != SWITCH_STATUS_SUCCESS) {
			return status;
		}
	}

	if (*flag & SWITCH_CODEC_FLAG_SILENCE) {
		memset(dbuf_linear, 0, codec->implementation->decoded_bytes_per_packet);
		*decoded_data_len = codec->implementation->decoded_bytes_per_packet;
#ifdef DEBUG_DAHDI_CODEC
		switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG, "Switch DAHDI decode in silence returned %d bytes.\n", *decoded_data_len);
#endif
		return SWITCH_STATUS_SUCCESS;
	}
#ifdef DEBUG_DAHDI_CODEC
	switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG, "Writing %d bytes to decode.\n", encoded_data_len);
#endif
	res = write(context->decoding_fd, ebuf_g729, encoded_data_len);
	if (-1 == res) {
		switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "Failed to write to %s decoder device: %s.\n", transcoder_name, strerror(errno));
		return SWITCH_STATUS_FALSE;
	}
	if (encoded_data_len != res) {
		switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "Requested to write %d bytes to %s decoder device, but only wrote %d bytes.\n",
						  encoded_data_len, transcoder_name, res);
		return SWITCH_STATUS_FALSE;
	}
#ifdef DEBUG_DAHDI_CODEC
	switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG, "Attempting to read from device %d bytes of decoded ulaw data.\n", sizeof(dbuf_ulaw));
#endif
	res = wait_for_transcoder(context->decoding_fd);
	if (-1 == res) {
		switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "Failed to poll on %s decoder device: %s.\n", transcoder_name, strerror(errno));
		return SWITCH_STATUS_FALSE;
	}
	if (0 == res) {
		memset(dbuf_linear, 0, codec->implementation->decoded_bytes_per_packet);
		*decoded_data_len = codec->implementation->decoded_bytes_per_packet;
#ifdef DEBUG_DAHDI_CODEC
		switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG, "No output on %s decoder device, returning silence frame of %d bytes.\n", transcoder_name,
						  *decoded_data_len);
#endif
		return SWITCH_STATUS_SUCCESS;
	}
	res = read(context->decoding_fd, dbuf_ulaw, sizeof(dbuf_ulaw));
	if (-1 == res) {
		if (EAGAIN == errno || EWOULDBLOCK == errno) {
			switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG, "No output on %s decoder device (%s).\n", transcoder_name, strerror(errno));
			*decoded_data_len = 0;
			return SWITCH_STATUS_SUCCESS;
		}
		switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "Failed to read from %s decoder device: %s.\n", transcoder_name, strerror(errno));
		return SWITCH_STATUS_FALSE;
	}
	for (i = 0; i < res; i++) {
		dbuf_linear[i] = ulaw_to_linear(dbuf_ulaw[i]);
	}
	*decoded_data_len = i * 2;
#ifdef DEBUG_DAHDI_CODEC
	switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG, "Switch DAHDI decode returned %d bytes.\n", *decoded_data_len);
#endif
	return SWITCH_STATUS_SUCCESS;
}
Esempio n. 17
0
static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
                                      const uint8_t *data, unsigned int data_sz,
                                      void *user_priv, long deadline) {
  const uint8_t *data_start = data;
  const uint8_t * const data_end = data + data_sz;
  vpx_codec_err_t res;
  uint32_t frame_sizes[8];
  int frame_count;

  if (data == NULL && data_sz == 0) {
    ctx->flushed = 1;
    return VPX_CODEC_OK;
  }

  // Reset flushed when receiving a valid frame.
  ctx->flushed = 0;

  // Initialize the decoder workers on the first frame.
  if (ctx->frame_workers == NULL) {
    const vpx_codec_err_t res = init_decoder(ctx);
    if (res != VPX_CODEC_OK)
      return res;
  }

  res = vp9_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
                                   ctx->decrypt_cb, ctx->decrypt_state);
  if (res != VPX_CODEC_OK)
    return res;

  if (ctx->frame_parallel_decode) {
    // Decode in frame parallel mode. When decoding in this mode, the frame
    // passed to the decoder must be either a normal frame or a superframe with
    // superframe index so the decoder could get each frame's start position
    // in the superframe.
    if (frame_count > 0) {
      int i;

      for (i = 0; i < frame_count; ++i) {
        const uint8_t *data_start_copy = data_start;
        const uint32_t frame_size = frame_sizes[i];
        if (data_start < data
            || frame_size > (uint32_t) (data_end - data_start)) {
          set_error_detail(ctx, "Invalid frame size in index");
          return VPX_CODEC_CORRUPT_FRAME;
        }

        if (ctx->available_threads == 0) {
          // No more threads for decoding. Wait until the next output worker
          // finishes decoding. Then copy the decoded frame into cache.
          if (ctx->num_cache_frames < FRAME_CACHE_SIZE) {
            wait_worker_and_cache_frame(ctx);
          } else {
            // TODO(hkuang): Add unit test to test this path.
            set_error_detail(ctx, "Frame output cache is full.");
            return VPX_CODEC_ERROR;
          }
        }

        res = decode_one(ctx, &data_start_copy, frame_size, user_priv,
                         deadline);
        if (res != VPX_CODEC_OK)
          return res;
        data_start += frame_size;
      }
    } else {
      if (ctx->available_threads == 0) {
        // No more threads for decoding. Wait until the next output worker
        // finishes decoding. Then copy the decoded frame into cache.
        if (ctx->num_cache_frames < FRAME_CACHE_SIZE) {
          wait_worker_and_cache_frame(ctx);
        } else {
          // TODO(hkuang): Add unit test to test this path.
          set_error_detail(ctx, "Frame output cache is full.");
          return VPX_CODEC_ERROR;
        }
      }

      res = decode_one(ctx, &data, data_sz, user_priv, deadline);
      if (res != VPX_CODEC_OK)
        return res;
    }
  } else {
    // Decode in serial mode.
    if (frame_count > 0) {
      int i;

      for (i = 0; i < frame_count; ++i) {
        const uint8_t *data_start_copy = data_start;
        const uint32_t frame_size = frame_sizes[i];
        vpx_codec_err_t res;
        if (data_start < data
            || frame_size > (uint32_t) (data_end - data_start)) {
          set_error_detail(ctx, "Invalid frame size in index");
          return VPX_CODEC_CORRUPT_FRAME;
        }

        res = decode_one(ctx, &data_start_copy, frame_size, user_priv,
                         deadline);
        if (res != VPX_CODEC_OK)
          return res;

        data_start += frame_size;
      }
    } else {
      while (data_start < data_end) {
        const uint32_t frame_size = (uint32_t) (data_end - data_start);
        const vpx_codec_err_t res = decode_one(ctx, &data_start, frame_size,
                                               user_priv, deadline);
        if (res != VPX_CODEC_OK)
          return res;

        // Account for suboptimal termination by the encoder.
        while (data_start < data_end) {
          const uint8_t marker = read_marker(ctx->decrypt_cb,
                                             ctx->decrypt_state, data_start);
          if (marker)
            break;
          ++data_start;
        }
      }
    }
  }

  return res;
}
static switch_status_t switch_vpx_decode(switch_codec_t *codec, switch_frame_t *frame)
{
	vpx_context_t *context = (vpx_context_t *)codec->private_info;
	switch_size_t len;
	vpx_codec_ctx_t *decoder = NULL;
	switch_status_t status = SWITCH_STATUS_SUCCESS;
	int is_start = 0, is_keyframe = 0, get_refresh = 0;

	if (context->is_vp9) {
		is_start = is_keyframe = IS_VP9_KEY_FRAME(*(unsigned char *)frame->data);
	} else { // vp8
		is_start = (*(unsigned char *)frame->data & 0x10);
		is_keyframe = IS_VP8_KEY_FRAME((uint8_t *)frame->data);
	}

	// if (is_keyframe) switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "got key %d\n", is_keyframe);

	if (context->need_decoder_reset != 0) {
		vpx_codec_destroy(&context->decoder);
		context->decoder_init = 0;
		init_decoder(codec);
		context->need_decoder_reset = 0;
	}
	
	if (!context->decoder_init) {
		init_decoder(codec);
	}

	if (!context->decoder_init) {
		switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_WARNING, "VPX decoder is not initialized!\n");
		return SWITCH_STATUS_FALSE;
	}

	decoder = &context->decoder;
	
	// switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "len: %d ts: %u mark:%d\n", frame->datalen, frame->timestamp, frame->m);

	// context->last_received_timestamp = frame->timestamp;
	context->last_received_complete_picture = frame->m ? SWITCH_TRUE : SWITCH_FALSE;

	if (is_start) {
		context->got_start_frame = 1;
	}
	
	if (is_keyframe) {
		if (context->got_key_frame <= 0) {
			context->got_key_frame = 1;
			if (!is_keyframe) {
				get_refresh = 1;
			}
		} else {
			context->got_key_frame++;
		}
	} else if (context->got_key_frame <= 0) {
		if ((--context->got_key_frame % 200) == 0) {
			switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG1, "Waiting for key frame %d\n", context->got_key_frame);
		}
		if (!context->got_start_frame) {
			switch_goto_status(SWITCH_STATUS_MORE_DATA, end);
		}
	}


	status = context->is_vp9 ? buffer_vp9_packets(context, frame) : buffer_vp8_packets(context, frame);


	if (context->dec_iter && (frame->img = (switch_image_t *) vpx_codec_get_frame(decoder, &context->dec_iter))) {
		switch_goto_status(SWITCH_STATUS_SUCCESS, end);
	}

	//printf("READ buf:%ld got_key:%d st:%d m:%d\n", switch_buffer_inuse(context->vpx_packet_buffer), context->got_key_frame, status, frame->m);

	len = switch_buffer_inuse(context->vpx_packet_buffer);

	//if (frame->m && (status != SWITCH_STATUS_SUCCESS || !len)) {
		//switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "WTF????? %d %ld\n", status, len);
	//}


	if (status == SWITCH_STATUS_SUCCESS && frame->m && len) {
		uint8_t *data;
		int corrupted = 0;
		int err;

		switch_buffer_peek_zerocopy(context->vpx_packet_buffer, (void *)&data);

		context->dec_iter = NULL;
		err = vpx_codec_decode(decoder, data, (unsigned int)len, NULL, 0);

		if (err != VPX_CODEC_OK) {
			switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG1, "Error decoding %" SWITCH_SIZE_T_FMT " bytes, [%d:%s:%s]\n",
							  len, err, vpx_codec_error(decoder), vpx_codec_error_detail(decoder));
			switch_goto_status(SWITCH_STATUS_RESTART, end);
		}

		if (vpx_codec_control(decoder, VP8D_GET_FRAME_CORRUPTED, &corrupted) != VPX_CODEC_OK) {
			switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_WARNING, "VPX control error!\n");
			switch_goto_status(SWITCH_STATUS_RESTART, end);
		}
		
		if (corrupted) {
			frame->img = NULL;
		} else {
			frame->img = (switch_image_t *) vpx_codec_get_frame(decoder, &context->dec_iter);
		}
		
		switch_buffer_zero(context->vpx_packet_buffer);
		
		if (!frame->img) {
			//context->need_decoder_reset = 1;
			context->got_key_frame = 0;
			context->got_start_frame = 0;
			status = SWITCH_STATUS_RESTART;
		}
	}

end:

	if (status == SWITCH_STATUS_RESTART) {
		switch_buffer_zero(context->vpx_packet_buffer);
		//context->need_decoder_reset = 1;
		context->got_key_frame = 0;
		context->got_start_frame = 0;
		//switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_WARNING, "RESET VPX\n");
	}

	if (!frame->img || status == SWITCH_STATUS_RESTART) {
		status = SWITCH_STATUS_MORE_DATA;
	}

	if (context->got_key_frame <= 0 || get_refresh) {
		switch_set_flag(frame, SFF_WAIT_KEY_FRAME);
	}

	return status;
}