Ejemplo n.º 1
0
u16 r8712_read16(struct _adapter *adapter, u32 addr)
{
	struct io_queue *pio_queue = adapter->pio_queue;
	struct intf_hdl *pintfhdl = &(pio_queue->intf);
	u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);

	_read16 = pintfhdl->io_ops._read16;
	return _read16(pintfhdl, addr);
}
Ejemplo n.º 2
0
u16 _rtw_read16(_adapter *adapter, u32 addr)
{
	u16 r_val;
	//struct	io_queue	*pio_queue = (struct io_queue *)adapter->pio_queue;
	struct io_priv *pio_priv = &adapter->iopriv;
	struct	intf_hdl		*pintfhdl = &(pio_priv->intf);
	u16	(*_read16)(struct intf_hdl *pintfhdl, u32 addr);

	_read16 = pintfhdl->io_ops._read16;

	r_val = _read16(pintfhdl, addr);
	return rtw_le16_to_cpu(r_val);
}
Ejemplo n.º 3
0
u16 read16(_adapter *adapter, u32 addr)
{
	u16 r_val;
	struct	io_queue  	*pio_queue = (struct io_queue *)adapter->pio_queue;
	struct	intf_hdl		*pintfhdl = &(pio_queue->intf);
	u16 	(*_read16)(struct intf_hdl *pintfhdl, u32 addr);
	_func_enter_;
	_read16 = pintfhdl->io_ops._read16;

	r_val = _read16(pintfhdl, addr);
	_func_exit_;
	return r_val;
}
Ejemplo n.º 4
0
u16 _rtw_read16(struct adapter *adapter, u32 addr)
{
	u16 r_val;
	struct io_priv *pio_priv = &adapter->iopriv;
	struct	intf_hdl		*pintfhdl = &(pio_priv->intf);
	u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);
_func_enter_;
	_read16 = pintfhdl->io_ops._read16;

	r_val = _read16(pintfhdl, addr);
_func_exit_;
	return r_val;
}
u16 rtw_read16(_adapter *adapter, u32 addr)
{
	u16 r_val;
	//struct	io_queue  	*pio_queue = (struct io_queue *)adapter->pio_queue;
	struct io_priv *pio_priv = &adapter->iopriv;
	struct	intf_hdl		*pintfhdl = &(pio_priv->intf);
	u16 	(*_read16)(struct intf_hdl *pintfhdl, u32 addr);
	_irqL irqL;
	_func_enter_;
	_read16 = pintfhdl->io_ops._read16;
	_enter_critical_mutex(&pintfhdl->io_mutex, &irqL);
	r_val = _read16(pintfhdl, addr);
	_exit_critical_mutex(&pintfhdl->io_mutex, &irqL);
	_func_exit_;
	return r_val;
}
int32_t aac_rtp_dump(stream_state_t* audio){//pkt_format_data_t *pktdata, indinfo_t ind){
    uint8_t *buf,*pld,*auh;
    uint32_t pkt_size,offset,numNALs,i;
    uint16_t word16,bits16;

    FILE* fout;
    pkt_format_data_t *pktdata;
    bitstream_state_t *bs;
    pktdata = &audio->pkt;
    buf = pktdata->data;
    pkt_size = pktdata->size;
    fout = pktdata->fout;
    pld = buf+12;
    bits16 = _read16(pld,0);
    /*
      Each NAL would have to be preceeded by an ADTS header.
      NAL boundaries are indentified as follows:
      x---RTP header(12)---x---AU header(y) --x--NAL1 NAL2 .... NALk--x
      AU header
      |- Let s = sizeLength,d = indexDeltaLength, i = indexLength in bits obtained from SDP.
      |-> x--AU hdr size y in bits (16 bits) --x--NAL1len(s)SeqnumofNAL1(d)--x...x--NALklen(s)SeqnumofNALk(d)--x
    */
    word16=bits16/8;
    numNALs = (int32_t)(bits16/(audio->ind.sizelen+audio->ind.deltalen));
    //    numNALs/=8;
    auh = buf+12+2;
    //    auh+=2; //account for size of auh
    bs = bio_init_bitstream(auh,word16);
    offset = 12+2+word16;
    pkt_size-=offset;
    pld = buf+offset;
    for(i=0;i<numNALs;i++){
	audio->adts.sample_len = bio_read_bits(bs,audio->ind.sizelen+audio->ind.deltalen);
	audio->adts.sample_len>>=audio->ind.deltalen;
	//	audio->adts.sample_len/=8;
	aac_add_adts(audio);
	nkn_vfs_fwrite(pld,audio->adts.sample_len,sizeof(uint8_t),fout);
	pld+=audio->adts.sample_len;
	auh+=((audio->ind.sizelen+audio->ind.deltalen)/8);
    }
    return VPE_SUCCESS;
}
int32_t
get_nal_size( uint8_t *data,uint8_t NALlen)
{
    uint32_t nal_uint;
    uint16_t nal_short;
    uint8_t nal_char;
    int32_t nal_size;
    if(NALlen == 4){
	nal_uint = _read32(data,0);
	nal_size = (int32_t)(nal_uint);
    }
    if(NALlen == 2){
	nal_short = _read16(data,0);
	nal_size = (int32_t)(nal_short);
    }
    if(NALlen == 1){
	nal_char = *data;
	nal_size = (int32_t)(nal_char);
    }
    return nal_size;
}
int32_t 
get_num_NALs_AU(uint8_t* data,uint8_t NALlen,int32_t data_size)
{
    int32_t data_left = data_size,nal_size,num_NALS = 0;
    while(data_left>0){
	if(NALlen ==4){
	    nal_size = _read32(data,0);
	    num_NALS++;
	    data_left-=nal_size+4;
	    if(data_left<0)
		break;
	    data+=nal_size+4;
	}
	else if(NALlen == 2){
            nal_size = _read16(data,0);
            num_NALS++;
            data_left-=nal_size+2;

            if(data_left<0)
                break;

            data+=nal_size+2;
	}
	else{ 
            nal_size = *data;
            num_NALS++;
            data_left-=nal_size+1;

            if(data_left<0)
                break;

            data+=nal_size+1;
	}
    }

    return num_NALS;
}
Ejemplo n.º 9
0
int
_read (int fd, void *ptr, size_t len)
{
  return _read16 ((short)fd, ptr, (short)len);
}
int32_t h264_rtp_dump(pkt_format_data_t *pktdata){
    /*
      Core level function that blindly extracts the h.264 payload and dumps into the file specified.
      Does not check for sequence numbers:
      1. Special handling required for SPS/PPs can be a part of separate function.
      2. For each NAL - being represented by the NAL octet in the RTP header, add  Annex B ompliance - 0x00000001

    */

    uint8_t *buf,*pld,uword;
    uint32_t pkt_size,pos=0,word32,ts;
    static uint32_t second_ts = 0;
    FILE* fout;


    buf = pktdata->data;
    pkt_size = pktdata->size;
    fout = pktdata->fout;
    ts = _read32(buf,4);
    //if(ts == 511281197)
    //{
    //    ts =511281197;
    //    ts= ts;
    //}
#if 0
    if(pktdata->prev_time==0 && (ts!= pktdata->prev_time)){
	pktdata->frame_rate = 90000/(ts-pktdata->prev_time);
	pktdata->prev_time = ts;
    }
#endif
#if 0//by suma
    if(pktdata->prev_time==0 && pktdata->frame_rate ==0)
	pktdata->prev_time = ts;
    if(pktdata->prev_time!=ts &&pktdata->frame_rate ==0){
	second_ts = ts;
	pktdata->frame_rate = 90000/(ts-pktdata->prev_time);
    }
    if(ts<second_ts && pktdata->frame_rate && (ts>pktdata->prev_time)){
        second_ts = ts;
        pktdata->frame_rate = 90000/(ts-pktdata->prev_time);
    }
#else

    if(pktdata->prev_time != ts)
	{
	    pktdata->num_frames++;

	}
    pktdata->prev_time = ts;
    //nkn_vfs_fwrite(ts,1,sizeof(uint32_t),fout_data);
    //nkn_vfs_fwrite(pktdata5->prev_time,1,sizeof(uint32_t),fout_data);
#endif

    pld = buf+12;
    uword = pld[pos]&0x1F;
    if(uword == 0)
	return VPE_ERROR;
    if((uword <=MAX_SINGLE_NALU) && (uword >=MIN_SINGLE_NALU)){
	/*single NALU*/
	word32 = NALU_DELIMIT;
	word32 = htonl(word32); 
	nkn_vfs_fwrite(&word32,1,sizeof(uint32_t),fout);
        nkn_vfs_fwrite(pld,pkt_size-12,sizeof(uint8_t),fout);
    }
   else if(uword ==STAPA){
        /*STAPS */
       uint32_t wsize;
       uint16_t NALsize;
      
       pos++;
       wsize = pkt_size-12-1;
       while(wsize){
	   NALsize = _read16(pld,pos);
	  
	   pos+=2;
	  
	   word32 = NALU_DELIMIT;
	   word32 = htonl(word32);
	   nkn_vfs_fwrite(&word32,1,sizeof(uint32_t),fout);
	   nkn_vfs_fwrite(&pld[pos],NALsize,sizeof(uint8_t),fout);
	   pos+=NALsize;
	  
	   wsize= wsize - NALsize - 2;
       }
      

    }   
   else if(uword == FUA){
       /*Fragmented unit*/
       uint8_t S,nal_h=0;
       uint32_t wsize;
       nal_h = pld[pos]>>5;
       nal_h<<=5;
       pos++;
       nal_h|=(pld[pos]&0x1F);
       uword = pld[pos++];
       S = uword >> 7;
       if(S){
	   word32 = NALU_DELIMIT;
	   word32 = htonl(word32);
	   nkn_vfs_fwrite(&word32,1,sizeof(uint32_t),fout);
	   nkn_vfs_fwrite(&nal_h,1,1,fout);
       }
       wsize = pkt_size-12-2;
       nkn_vfs_fwrite(&pld[pos],wsize,sizeof(uint8_t),fout);
    }
//#define VPE_DUMP_FILE "/nkn/vpe/dump_file"
int32_t rtp_format_convert(rtp_formatizer_t *rtp){
    uint8_t *data, *data_init;
    int32_t datalen = 0,i,j;
    uint32_t cid,rtpsize,pos = 0,count = 0;
    int32_t seek_box_size = 0;
    uint8_t *seek_data;
    int32_t temp=0;
    FILE *fout_data;

    if(!rtp)//||rtp->fin==NULL))
	return VPE_ERROR;

    /*Malloc for audio and video streams*/
    if(allocate_rtp_streams(rtp)!=VPE_SUCCESS)
	return VPE_ERROR;

    /*Write the SPS PPS in the file*/
    // fout_data = fopen(VPE_DUMP_FILE, "wb");
    //if(fout_data == NULL)
    //printf("Error in opening dump file \n");

    for(i=0;i<rtp->num_video_streams;i++){
        if(rtp->video[i].codec_type == VID_H264)
            write_h264_ps(rtp->video[i].sdp,rtp->video[i].fout);
	rtp->video[i].pkt.prev_time = 0;
    }

    data_init = data = (uint8_t*)malloc(sizeof(uint8_t)*rtp->rtp_data_size);
    datalen = rtp->rtp_data_size;

    //fread(data,datalen,1,rtp->fin);
    temp = read(rtp->fin, data, datalen);
#if 0
    if(temp != datalen) {
      assert(0);
    }
    
    {

        uint8_t *zero_data;
        zero_data = (uint8_t*)calloc(1, 100);
        if( !memcmp(data + 16384, zero_data, 100) ) {
	    /* hit error case */
	    lseek(rtp->fin, -datalen, SEEK_CUR);
	    read(rtp->fin, data, datalen);
	}
    }
#endif
    //nkn_vfs_fwrite(data,datalen,sizeof(uint8_t),fout_data);   
    //fclose(fout_data);
    /*Skip the seek boxes*/

    pos = 4;
    seek_box_size = _read32(data,pos);
    seek_box_size = ntohl(seek_box_size);
    pos+=4;
#ifdef _HACK_
    pos+=4; ///Remove MFC
#endif
    data+=pos;
    seek_data = data;
    while(datalen>0){
	/*'$' separated RTP packets*/
	count++;
	pos =0;	
#ifdef NKN_DEBUG_PRINT
	printf("dataleft = %u\n",datalen);
	if(data[pos]!=0x24)
	    printf("$ missing here\n");
#endif
	pos++;
	cid = data[pos];
	pos++;
	rtpsize= _read16(data,pos);
	pos+=2;
#ifndef _HACK_
	rtpsize = ntohs(rtpsize);
	pos+=4; //Skip timestamp
#endif
	for(i = 0;i <rtp->num_audio_streams;i++){
	    if(cid == rtp->audio[i].cid){
		switch(rtp->audio[i].codec_type){
		    case AUD_AAC:
			rtp->audio[i].pkt.data = data+pos;
			rtp->audio[i].pkt.size = rtpsize;
			rtp->audio[i].pkt.fout = rtp->audio[i].fout;
			//			aac_add_adts(&rtp->audio[i]);
			aac_rtp_dump(&rtp->audio[i]);
			break;
		    default:
			break;

		}
		break;
	    }
	}
	for(j=0;j <rtp->num_video_streams;j++){
	    if(cid == rtp->video[j].cid){
                switch(rtp->video[j].codec_type){
                    case VID_H264:
                        rtp->video[j].pkt.data = data+pos;
                        rtp->video[j].pkt.size = rtpsize;
                        rtp->video[j].pkt.fout = rtp->video[j].fout;
                        h264_rtp_dump(&rtp->video[j].pkt);
                        break;
                    default:
                        break;

                }
                break;
	    }
	}
	pos+=rtpsize;
	if((uint32_t)datalen<pos+1){	    
	    int iiii = 0;
	    break;
	}
	data+=pos;
	/*Skip the seek boxes*/
	if((int32_t)(data-seek_data)>= seek_box_size){
	    int32_t pos1 ;
	    pos1 = 4;
	    seek_box_size = _read32(data,pos1);
	    seek_box_size = ntohl(seek_box_size);
	    pos1+=4;	    
	    data+=pos1;
	    seek_data = data;
	    pos+=pos1;
	}
	datalen-=pos;
    }

#ifdef NKN_DEBUG_PRINT
    printf("Deleting All temp Files\n");
#endif
    /*Close all the raw files*/
    for(j=0;j <rtp->num_video_streams;j++){
	if( rtp->video[j].fout!=NULL)
	    fclose(rtp->video[j].fout);
    }
    for(j=0;j <rtp->num_audio_streams;j++){
        if( rtp->audio[j].fout!=NULL)
            fclose(rtp->audio[j].fout);
    }

    if(data_init) {
	free(data_init);
    }
    return VPE_SUCCESS;
}
Ejemplo n.º 12
0
static void
_load_frames (struct ov_animation *anim,
              unsigned char *data,
              struct _iqm_header *h)
{
    int i;
    int k;
    int n;
    struct chan *chans = malloc (h->num_joints * sizeof(struct chan));
    unsigned char *p = data + h->ofs_frames;

    anim->num_frames = h->num_frames;
    anim->poses = malloc(h->num_frames * sizeof (struct iqm_pose *));
    int ofs_poses = h->ofs_poses;
    for (k=0; k<h->num_joints; k++) {
        chans[k].mask = _read32(data + ofs_poses + 4);
        for (n=0; n<10; n++) {
            chans[k].offset[n] = _readfloat(data + ofs_poses + 8 + n * 4);
            chans[k].scale[n] = _readfloat(data + ofs_poses + 8 + 10 * 4 + n * 4);
        }
        ofs_poses += 22*4;
    }

    for (i=0; i<h->num_frames; i++) {
        anim->poses[i] = malloc (h->num_joints * sizeof(struct iqm_pose));
        for (k=0; k<h->num_joints; k++) {
            for (n=0; n<3; n++) {
                anim->poses[i][k].translate[n] = chans[k].offset[n];
                anim->poses[i][k].rotate[n] = chans[k].offset[3+n];
                anim->poses[i][k].scale[n]     = chans[k].offset[7+n];
            }
            anim->poses[i][k].rotate[3] = chans[k].offset[6];

			if (chans[k].mask & 0x01) {
                anim->poses[i][k].translate[0] += _read16(p) * chans[k].scale[0]; p += 2;
            }
			if (chans[k].mask & 0x02) {
                anim->poses[i][k].translate[1] += _read16(p) * chans[k].scale[1]; p += 2;
            }
			if (chans[k].mask & 0x04) {
                anim->poses[i][k].translate[2] += _read16(p) * chans[k].scale[2]; p += 2;
            }
			if (chans[k].mask & 0x08) {
                anim->poses[i][k].rotate[0] += _read16(p) * chans[k].scale[3]; p += 2;
            }
			if (chans[k].mask & 0x10) {
                anim->poses[i][k].rotate[1] += _read16(p) * chans[k].scale[4]; p += 2;
            }
			if (chans[k].mask & 0x20) {
                anim->poses[i][k].rotate[2] += _read16(p) * chans[k].scale[5]; p += 2;
            }
			if (chans[k].mask & 0x40) {
                anim->poses[i][k].rotate[3] += _read16(p) * chans[k].scale[6]; p += 2;
            }
			if (chans[k].mask & 0x80) {
                anim->poses[i][k].scale[0] += _read16(p) * chans[k].scale[7]; p += 2;
            }
			if (chans[k].mask & 0x100) {
                anim->poses[i][k].scale[1] += _read16(p) * chans[k].scale[8]; p += 2;
            }
			if (chans[k].mask & 0x200) {
                anim->poses[i][k].scale[2] += _read16(p) * chans[k].scale[9]; p += 2;
            }
		}
    }
    free (chans);
}