static void put_amf_bool( ByteIOContext *pb, int b ) { LogStr("Init"); put_byte(pb, AMF_DATA_TYPE_BOOL); put_byte(pb, !!b); LogStr("Exit"); }
void ff_fetch_timestamp( AVCodecParserContext *s, int off, int remove ) { LogStr("Init"); int i; s->dts = s->pts = AV_NOPTS_VALUE; s->offset = 0; for (i = 0; i < AV_PARSER_PTS_NB; i++) { if (s->next_frame_offset + off >= s->cur_frame_offset[i] && (s-> frame_offset < s->cur_frame_offset[i] || !s->frame_offset) //check is disabled becausue mpeg-ts doesnt send complete PES packets && /*s->next_frame_offset + off <*/s->cur_frame_end[i]) { s->dts = s->cur_frame_dts[i]; s->pts = s->cur_frame_pts[i]; s->offset = s->next_frame_offset - s->cur_frame_offset[i]; if (remove) { s->cur_frame_offset[i] = INT64_MAX; } } } LogStr("Exit"); }
static void put_amf_double( ByteIOContext *pb, double d ) { LogStr("Init"); put_byte(pb, AMF_DATA_TYPE_NUMBER); put_be64(pb, av_dbl2int(d)); LogStr("Exit"); }
/** * * @param buf input * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output) * @param pts input presentation timestamp * @param dts input decoding timestamp * @param poutbuf will contain a pointer to the first byte of the output frame * @param poutbuf_size will contain the length of the output frame * @return the number of bytes of the input bitstream used * * Example: * @code * while(in_len){ * len = av_parser_parse(myparser, AVCodecContext, &data, &size, * in_data, in_len, * pts, dts); * in_data += len; * in_len -= len; * * if(size) * decode_frame(data, size); * } * @endcode */ int av_parser_parse( AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int64_t pts, int64_t dts ) { LogStr("Init"); int index, i; uint8_t dummy_buf[FF_INPUT_BUFFER_PADDING_SIZE]; if (buf_size == 0) { /* padding is always necessary even if EOF, so we add it here */ memset(dummy_buf, 0, sizeof(dummy_buf)); buf = dummy_buf; } else { /* add a new packet descriptor */ if (pts != AV_NOPTS_VALUE || dts != AV_NOPTS_VALUE) { i = (s->cur_frame_start_index + 1) & (AV_PARSER_PTS_NB - 1); s->cur_frame_start_index = i; s->cur_frame_offset[i] = s->cur_offset; s->cur_frame_end[i] = s->cur_offset + buf_size; s->cur_frame_pts[i] = pts; s->cur_frame_dts[i] = dts; } } if (s->fetch_timestamp) { s->fetch_timestamp = 0; s->last_pts = s->pts; s->last_dts = s->dts; ff_fetch_timestamp(s, 0, 0); } /* WARNING: the returned index can be negative */ index = s->parser->parser_parse(s, avctx, (const uint8_t **) poutbuf, poutbuf_size, buf, buf_size); //av_log(NULL, AV_LOG_DEBUG, "parser: in:%"PRId64", %"PRId64", out:%"PRId64", %"PRId64", in:%d out:%d id:%d\n", pts, dts, s->last_pts, s->last_dts, buf_size, *poutbuf_size, avctx->codec_id); /* update the file pointer */ if (*poutbuf_size) { /* fill the data for the current frame */ s->frame_offset = s->next_frame_offset; /* offset of the next frame */ s->next_frame_offset = s->cur_offset + index; s->fetch_timestamp = 1; } if (index < 0) { index = 0; } s->cur_offset += index; LogStr("Exit"); return index; }
static void put_amf_string( ByteIOContext *pb, const char *str ) { LogStr("Init"); size_t len = strlen(str); put_be16(pb, len); put_buffer(pb, str, len); LogStr("Exit"); }
void ff_parse_close( AVCodecParserContext *s ) { LogStr("Init"); ParseContext *pc = s->priv_data; av_free(pc->buffer); LogStr("Exit"); }
void ff_parse1_close( AVCodecParserContext *s ) { LogStr("Init"); ParseContext1 *pc1 = s->priv_data; av_free(pc1->pc.buffer); av_free(pc1->enc); LogStr("Exit"); }
void av_parser_close( AVCodecParserContext *s ) { LogStr("Init"); if (s) { if (s->parser->parser_close) { s->parser->parser_close(s); } av_free(s->priv_data); av_free(s); } LogStr("Exit"); }
void combatManager::CombatMessage(char* msg, int doUpdate, int keepPrevMessage, int a5) { // It already does this logging if gbNoShowCombat is true if (!gbNoShowCombat) { LogStr(msg); } this->CombatMessage_orig(msg, doUpdate, keepPrevMessage, a5); }
XBool XAudioStream::getAFrame() //从流中提取一帧数据 { if(!m_isLoaded) return XFalse; av_free_packet(&m_dataPacket); //释放上一次获得的数据 av_init_packet(&m_dataPacket); if(av_read_frame(m_pFormatCtx,&m_dataPacket) == 0) {//这里见音频数据解码 m_pFrame = av_frame_alloc(); int isFinished; if(avcodec_decode_audio4(m_pAudioCodecCtx,m_pFrame,&isFinished,&m_dataPacket) < 0) return XFalse; if(isFinished) { uint8_t *out[] = {m_audioBuf}; int outSize = ((AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2) / m_frameDataSum; m_dataLen = swr_convert(m_pSwrContext,out,outSize,(const uint8_t **)m_pFrame->extended_data,m_pFrame->nb_samples); m_dataLen = m_dataLen * m_frameDataSum; av_frame_free(&m_pFrame); return XTrue;//解码成功 } av_frame_free(&m_pFrame); }else { gotoFrame(0.0f); //跳到头 LogStr("File end!"); return XFalse; } return XFalse; }
AVCodecParser* av_parser_next( AVCodecParser *p ) { LogStr("Init"); if (p) { LogStr("Exit"); return p->next; } else { LogStr("Exit"); return av_first_parser; } LogStr("Exit"); }
bool turnOnSystem(const void *MAC) { if (MAC == NULL) return false; unsigned char data[128]; int offset = 6; memset(data, 0xff, 6); for (int i = 0; i < 16; ++i) { memcpy(data + offset, MAC, 6); offset += 6; } //启动WSA WSADATA WSAData; if (WSAStartup(MAKEWORD(2, 0), &WSAData) != 0) { LogNull("WSAStartup failed: %d\n", GetLastError()); return false; } //创建socket SOCKET sock = socket(AF_INET, SOCK_DGRAM, 0); if (sock == INVALID_SOCKET) { LogNull("Socket create error: %d\n", GetLastError()); return false; } //设置为广播发送 BOOL bOptVal = TRUE; int iOptLen = sizeof(BOOL); if (setsockopt(sock, SOL_SOCKET, SO_BROADCAST, (char*)&bOptVal, iOptLen) == SOCKET_ERROR) { LogNull("setsockopt error: %d\n", WSAGetLastError()); closesocket(sock); WSACleanup(); return false; } sockaddr_in to; to.sin_family = AF_INET; to.sin_port = htons(0); #ifdef WITH_LOCAL_BOARDCAST_IP to.sin_addr.s_addr = inet_addr(BOARDCASR_IP); #else to.sin_addr.s_addr = htonl(INADDR_BROADCAST); #endif //发送Magic Packet if (sendto(sock, (const char *)data, offset, 0, (const struct sockaddr *)&to, sizeof(to)) == SOCKET_ERROR) LogNull("Magic packet send error: %d", WSAGetLastError()); else LogStr("Magic packet send!"); closesocket(sock); WSACleanup(); return true; }
AVCodecParserContext *av_parser_init( int codec_id ) { LogStr("Init"); AVCodecParserContext *s; AVCodecParser *parser; int ret; if (codec_id == CODEC_ID_NONE) { LogStr("Exit"); return NULL; } for (parser = av_first_parser; parser != NULL; parser = parser->next) { if (parser->codec_ids[0] == codec_id || parser->codec_ids[1] == codec_id || parser->codec_ids[2] == codec_id || parser->codec_ids[3] == codec_id || parser->codec_ids[4] == codec_id) { goto found; } } LogStr("Exit"); return NULL; found: s = av_mallocz(sizeof(AVCodecParserContext)); if (!s) { LogStr("Exit"); return NULL; } s->parser = parser; s->priv_data = av_mallocz(parser->priv_data_size); if (!s->priv_data) { av_free(s); LogStr("Exit"); return NULL; } if (parser->parser_init) { ret = parser->parser_init(s); if (ret != 0) { av_free(s->priv_data); av_free(s); LogStr("Exit"); return NULL; } } s->fetch_timestamp = 1; s->pict_type = FF_I_TYPE; LogStr("Exit"); return s; }
/** * Initialize LZW encoder. Please set s->clear_code, s->end_code and s->maxbits before run. * @param s LZW state * @param outbuf Output buffer * @param outsize Size of output buffer * @param maxbits Maximum length of code */ void ff_lzw_encode_init(LZWEncodeState * s, uint8_t * outbuf, int outsize, int maxbits) { LogStr("Init"); s->clear_code = 256; s->end_code = 257; s->maxbits = maxbits; init_put_bits(&s->pb, outbuf, outsize); s->bufsize = outsize; assert(9 <= s->maxbits && s->maxbits <= s->maxbits); s->maxcode = 1 << s->maxbits; s->output_bytes = 0; s->last_code = LZW_PREFIX_EMPTY; s->bits = 9; LogStr("Exit"); }
int ff_mpeg4video_split( AVCodecContext *avctx, const uint8_t *buf, int buf_size ) { LogStr("Init"); int i; uint32_t state = -1; for (i = 0; i < buf_size; i++) { state = (state << 8) | buf[i]; if (state == 0x1B3 || state == 0x1B6) { LogStr("Exit"); return i - 3; } } LogStr("Exit"); return 0; }
static av_cold int flashsv_encode_init( AVCodecContext *avctx ) { LogStr("Init"); FlashSVContext *s = avctx->priv_data; s->avctx = avctx; if ((avctx->width > 4095) || (avctx->height > 4095)) { av_log(avctx, AV_LOG_ERROR, "Input dimensions too large, input must be max 4096x4096 !\n"); LogStr("Exit"); return -1; } if (avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) { LogStr("Exit"); return -1; } // Needed if zlib unused or init aborted before deflateInit memset(&(s->zstream), 0, sizeof(z_stream)); s->last_key_frame = 0; s->image_width = avctx->width; s->image_height = avctx->height; s->tmpblock = av_mallocz(3 * 256 * 256); s->encbuffer = av_mallocz(s->image_width * s->image_height * 3); if (!s->tmpblock || !s->encbuffer) { av_log(avctx, AV_LOG_ERROR, "Memory allocation failed.\n"); LogStr("Exit"); return -1; } LogStr("Exit"); return 0; }
bool setRegRun(bool stat) { HKEY hKey; //打开指定子键 DWORD dwDisposition = REG_OPENED_EXISTING_KEY; // 如果不存在不创建 if (RegCreateKeyEx(HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run", 0, NULL, REG_OPTION_NON_VOLATILE, KEY_ALL_ACCESS, NULL, &hKey, &dwDisposition) != ERROR_SUCCESS) return false; //创建一个新的键值,设置键值数据为文件名 if (stat) { if (createLink()) { std::string filename = XFile::getCurrentExeFileFullPath(); if (filename.size() >= 3) { memcpy(&filename[filename.size() - 3], &"lnk", 3); //filename[filename.size() - 3] = 'l'; //filename[filename.size() - 2] = 'n'; //filename[filename.size() - 1] = 'k'; } else return false; //读取注册表中该键的键值,如果已经相同则不重复设置。 if (RegSetValueEx(hKey, XEG.m_windowData.windowTitle.c_str(), 0, REG_SZ, (BYTE*)filename.c_str(), filename.length()) == ERROR_SUCCESS) LogStr("自动启动设置成功!"); } else { RegCloseKey(hKey); return false; } } else { if (RegDeleteValue(hKey, XEG.m_windowData.windowTitle.c_str()) == ERROR_SUCCESS) LogStr("取消自动启动!"); } // 关闭子键句柄 RegCloseKey(hKey); return true; }
/** * * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed * @deprecated use AVBitstreamFilter */ int av_parser_change( AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe ) { LogStr("Init"); if (s && s->parser->split) { if ((avctx->flags & CODEC_FLAG_GLOBAL_HEADER) || (avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER)) { int i = s->parser->split(avctx, buf, buf_size); buf += i; buf_size -= i; } } /* cast to avoid warning about discarding qualifiers */ *poutbuf = (uint8_t *) buf; *poutbuf_size = buf_size; if (avctx->extradata) { if ((keyframe && (avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER)) /*||(s->pict_type != FF_I_TYPE && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_NOKEY))*/ /*||(? && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_BEGIN)*/) { int size = buf_size + avctx->extradata_size; *poutbuf_size = size; *poutbuf = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(*poutbuf, avctx->extradata, avctx->extradata_size); memcpy((*poutbuf) + avctx->extradata_size, buf, buf_size + FF_INPUT_BUFFER_PADDING_SIZE); LogStr("Exit"); return 1; } } LogStr("Exit"); return 0; }
static int flv_write_trailer( AVFormatContext *s ) { LogStr("Init"); int64_t file_size; ByteIOContext *pb = s->pb; FLVContext *flv = s->priv_data; file_size = url_ftell(pb); /* update informations */ url_fseek(pb, flv->duration_offset, SEEK_SET); put_amf_double(pb, flv->duration / (double) 1000); url_fseek(pb, flv->filesize_offset, SEEK_SET); put_amf_double(pb, file_size); url_fseek(pb, file_size, SEEK_SET); LogStr("Exit"); return 0; }
void XAudioStream::setSpeed(float speed) { if(m_speed == speed || speed <= 0.0f) return; m_speed = speed; swr_free(&m_pSwrContext); m_pSwrContext = swr_alloc(); if(m_pSwrContext == NULL) return; if(m_pAudioCodecCtx->channel_layout == 0) { swr_alloc_set_opts(m_pSwrContext,av_get_default_channel_layout(XEG.getAudioChannelSum()),getSampleFormat(),XEG.getAudioSampleRate() * m_speed, av_get_default_channel_layout(m_pAudioCodecCtx->channels),m_pAudioCodecCtx->sample_fmt,m_pAudioCodecCtx->sample_rate,0,NULL); }else { swr_alloc_set_opts(m_pSwrContext,av_get_default_channel_layout(XEG.getAudioChannelSum()),getSampleFormat(),XEG.getAudioSampleRate() * m_speed, m_pAudioCodecCtx->channel_layout,m_pAudioCodecCtx->sample_fmt,m_pAudioCodecCtx->sample_rate,0,NULL); } if(swr_init(m_pSwrContext) < 0) { LogStr("swr_init() fail"); return; } }
static int flv_write_header( AVFormatContext *s ) { LogStr("Init"); ByteIOContext *pb = s->pb; FLVContext *flv = s->priv_data; AVCodecContext *audio_enc = NULL, *video_enc = NULL; int i; double framerate = 0.0; int metadata_size_pos, data_size; for (i = 0; i < s->nb_streams; i++) { AVCodecContext *enc = s->streams[i]->codec; if (enc->codec_type == CODEC_TYPE_VIDEO) { if (s->streams[i]->r_frame_rate.den && s->streams[i]->r_frame_rate.num) { framerate = av_q2d(s->streams[i]->r_frame_rate); } else { framerate = 1 / av_q2d(s->streams[i]->codec->time_base); } video_enc = enc; if (enc->codec_tag == 0) { av_log(enc, AV_LOG_ERROR, "video codec not compatible with flv\n"); LogStr("Exit"); return -1; } } else { audio_enc = enc; if (get_audio_flags(enc) < 0) { LogStr("Exit"); return -1; } } av_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */ } put_tag(pb, "FLV"); put_byte(pb, 1); put_byte(pb, FLV_HEADER_FLAG_HASAUDIO * !!audio_enc + FLV_HEADER_FLAG_HASVIDEO * !!video_enc); put_be32(pb, 9); put_be32(pb, 0); for (i = 0; i < s->nb_streams; i++) { if (s->streams[i]->codec->codec_tag == 5) { put_byte(pb, 8); // message type put_be24(pb, 0); // include flags put_be24(pb, 0); // time stamp put_be32(pb, 0); // reserved put_be32(pb, 11); // size flv->reserved = 5; } } /* write meta_tag */ put_byte(pb, 18); // tag type META metadata_size_pos = url_ftell(pb); put_be24(pb, 0); // size of data part (sum of all parts below) put_be24(pb, 0); // time stamp put_be32(pb, 0); // reserved /* now data of data_size size */ /* first event name as a string */ put_byte(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, "onMetaData"); // 12 bytes /* mixed array (hash) with size and string/type/data tuples */ put_byte(pb, AMF_DATA_TYPE_MIXEDARRAY); put_be32(pb, 5 * !!video_enc + 4 * !!audio_enc + 2); // +2 for duration and file size put_amf_string(pb, "duration"); flv->duration_offset = url_ftell(pb); put_amf_double(pb, 0); // delayed write if (video_enc) { put_amf_string(pb, "width"); put_amf_double(pb, video_enc->width); put_amf_string(pb, "height"); put_amf_double(pb, video_enc->height); put_amf_string(pb, "videodatarate"); put_amf_double(pb, s->bit_rate / 1024.0); put_amf_string(pb, "framerate"); put_amf_double(pb, framerate); put_amf_string(pb, "videocodecid"); put_amf_double(pb, video_enc->codec_tag); } if (audio_enc) { put_amf_string(pb, "audiosamplerate"); put_amf_double(pb, audio_enc->sample_rate); put_amf_string(pb, "audiosamplesize"); put_amf_double(pb, audio_enc->codec_id == CODEC_ID_PCM_S8 ? 8 : 16); put_amf_string(pb, "stereo"); put_amf_bool(pb, audio_enc->channels == 2); put_amf_string(pb, "audiocodecid"); put_amf_double(pb, audio_enc->codec_tag); } put_amf_string(pb, "filesize"); flv->filesize_offset = url_ftell(pb); put_amf_double(pb, 0); // delayed write put_amf_string(pb, ""); put_byte(pb, AMF_END_OF_OBJECT); /* write total size of tag */ data_size = url_ftell(pb) - metadata_size_pos - 10; url_fseek(pb, metadata_size_pos, SEEK_SET); put_be24(pb, data_size); url_fseek(pb, data_size + 10 - 3, SEEK_CUR); put_be32(pb, data_size + 11); for (i = 0; i < s->nb_streams; i++) { AVCodecContext *enc = s->streams[i]->codec; if (enc->codec_id == CODEC_ID_AAC || enc->codec_id == CODEC_ID_H264) { offset_t pos; put_byte(pb, enc->codec_type == CODEC_TYPE_VIDEO ? FLV_TAG_TYPE_VIDEO : FLV_TAG_TYPE_AUDIO); put_be24(pb, 0); // size patched later put_be24(pb, 0); // ts put_byte(pb, 0); // ts ext put_be24(pb, 0); // streamid pos = url_ftell(pb); if (enc->codec_id == CODEC_ID_AAC) { put_byte(pb, get_audio_flags(enc)); put_byte(pb, 0); // AAC sequence header put_buffer(pb, enc->extradata, enc->extradata_size); } else { put_byte(pb, enc->codec_tag | FLV_FRAME_KEY); // flags put_byte(pb, 0); // AVC sequence header put_be24(pb, 0); // composition time ff_isom_write_avcc(pb, enc->extradata, enc->extradata_size); } data_size = url_ftell(pb) - pos; url_fseek(pb, -data_size - 10, SEEK_CUR); put_be24(pb, data_size); url_fseek(pb, data_size + 10 - 3, SEEK_CUR); put_be32(pb, data_size + 11); // previous tag size } } LogStr("Exit"); return 0; }
static int flv_write_packet( AVFormatContext *s, AVPacket *pkt ) { LogStr("Init"); //Fernando: //printf("-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n"); //printf("************* proc: %s - line: %d\n", __func__, __LINE__); //dumpPacket(&pkt); //getchar(); ByteIOContext *pb = s->pb; AVCodecContext *enc = s->streams[pkt->stream_index]->codec; FLVContext *flv = s->priv_data; unsigned ts; int size = pkt->size; int flags, flags_size; // av_log(s, AV_LOG_DEBUG, "type:%d pts: %"PRId64" size:%d\n", enc->codec_type, timestamp, size); if (enc->codec_id == CODEC_ID_VP6 || enc->codec_id == CODEC_ID_VP6F || enc->codec_id == CODEC_ID_AAC) { flags_size = 2; } else if (enc->codec_id == CODEC_ID_H264) { flags_size = 5; } else { flags_size = 1; } if (enc->codec_type == CODEC_TYPE_VIDEO) { put_byte(pb, FLV_TAG_TYPE_VIDEO); flags = enc->codec_tag; if (flags == 0) { av_log(enc, AV_LOG_ERROR, "video codec %X not compatible with flv\n", enc->codec_id); LogStr("Exit"); return -1; } flags |= pkt->flags & PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER; } else { assert(enc->codec_type == CODEC_TYPE_AUDIO); flags = get_audio_flags(enc); assert(size); put_byte(pb, FLV_TAG_TYPE_AUDIO); } if (enc->codec_id == CODEC_ID_H264 && /* check if extradata looks like mp4 formated */ enc->extradata_size > 0 && *(uint8_t*) enc->extradata != 1) { if (ff_avc_parse_nal_units(pkt->data, &pkt->data, &pkt->size) < 0) { LogStr("Exit"); return -1; } assert(pkt->size); size = pkt->size; /* cast needed to get negative value */ if (!flv->delay && pkt->dts < 0) { flv->delay = -pkt->dts; } } ts = pkt->dts + flv->delay; // add delay to force positive dts put_be24(pb, size + flags_size); put_be24(pb, ts); put_byte(pb, (ts >> 24) & 0x7F); // timestamps are 32bits _signed_ put_be24(pb, flv->reserved); put_byte(pb, flags); if (enc->codec_id == CODEC_ID_VP6) { put_byte(pb, 0); } if (enc->codec_id == CODEC_ID_VP6F) { put_byte(pb, enc->extradata_size ? enc->extradata[0] : 0); } else if (enc->codec_id == CODEC_ID_AAC) { put_byte(pb, 1); // AAC raw } else if (enc->codec_id == CODEC_ID_H264) { put_byte(pb, 1); // AVC NALU put_be24(pb, pkt->pts - pkt->dts); } put_buffer(pb, pkt->data, size); put_be32(pb, size + flags_size + 11); // previous tag size flv->duration = FFMAX(flv->duration, pkt->pts + flv->delay + pkt->duration); put_flush_packet(pb); LogStr("Exit"); return 0; }
/** * combines the (truncated) bitstream to a complete frame * @returns -1 if no complete frame could be created, AVERROR(ENOMEM) if there was a memory allocation error */ int ff_combine_frame( ParseContext *pc, int next, const uint8_t **buf, int *buf_size ) { LogStr("Init"); #if 0 if(pc->overread) { printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index); printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]); } #endif /* Copy overread bytes from last frame into buffer. */ for (; pc->overread > 0; pc->overread--) { pc->buffer[pc->index++] = pc->buffer[pc->overread_index++]; } /* flush remaining if EOF */ if (!*buf_size && next == END_NOT_FOUND) { next = 0; } pc->last_index = pc->index; /* copy into buffer end return */ if (next == END_NOT_FOUND) { void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE); if (!new_buffer) { LogStr("Exit"); return AVERROR(ENOMEM); } pc->buffer = new_buffer; memcpy(&pc->buffer[pc->index], *buf, *buf_size); pc->index += *buf_size; LogStr("Exit"); return -1; } *buf_size = pc->overread_index = pc->index + next; /* append to buffer */ if (pc->index) { void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE); if (!new_buffer) { LogStr("Exit"); return AVERROR(ENOMEM); } pc->buffer = new_buffer; memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE ); pc->index = 0; *buf = pc->buffer; } /* store overread bytes */ for (; next < 0; next++) { pc->state = (pc->state << 8) | pc->buffer[pc->last_index + next]; pc->overread++; } #if 0 if(pc->overread) { printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index); printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]); } #endif LogStr("Exit"); return 0; }
XBool XAudioStream::load(const char * filename) { if(m_isLoaded || filename == NULL) return XFalse; av_register_all(); m_pFormatCtx = NULL; if(avformat_open_input(&m_pFormatCtx,filename,NULL,NULL) != 0) { LogStr("File open error!"); return XFalse; } if(avformat_find_stream_info(m_pFormatCtx,NULL) < 0) //检查视频流信息 { LogStr("can not find stream information!"); return XFalse; } m_pAudioCodecCtx = m_pFormatCtx->streams[0]->codec; AVCodec *aCodec = avcodec_find_decoder(m_pAudioCodecCtx->codec_id); if(aCodec == NULL) {//找不到音频解码器 LogStr("can not find audio decoder information!"); return XFalse; } if(avcodec_open2(m_pAudioCodecCtx,aCodec,NULL) < 0) {//找不到音频解码包 LogStr("can not open audio decoder!"); return XFalse; } m_frameDataSum = XEG.getAudioChannelSum() * av_get_bytes_per_sample(getSampleFormat()); XMem::XDELETE_ARRAY(m_audioBuf); m_audioBuf = XMem::createArrayMem<uint8_t>((AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2); if(m_audioBuf == NULL) return XFalse; //m_pFrame = av_frame_alloc(); //if(m_pFrame == NULL) //{ // printf("malloc Frame failed!\n"); // return XFalse; //} m_allFrameSum = m_pFormatCtx->streams[0]->duration; av_init_packet(&m_dataPacket); m_pSwrContext = swr_alloc(); if(m_pSwrContext == NULL) return XFalse; if(m_pAudioCodecCtx->channel_layout == 0) { swr_alloc_set_opts(m_pSwrContext,av_get_default_channel_layout(XEG.getAudioChannelSum()),getSampleFormat(),XEG.getAudioSampleRate() * m_speed, av_get_default_channel_layout(m_pAudioCodecCtx->channels),m_pAudioCodecCtx->sample_fmt,m_pAudioCodecCtx->sample_rate,0,NULL); }else { swr_alloc_set_opts(m_pSwrContext,av_get_default_channel_layout(XEG.getAudioChannelSum()),getSampleFormat(),XEG.getAudioSampleRate() * m_speed, m_pAudioCodecCtx->channel_layout,m_pAudioCodecCtx->sample_fmt,m_pAudioCodecCtx->sample_rate,0,NULL); } if(swr_init(m_pSwrContext) < 0) { LogStr("swr_init() fail"); return XFalse; } m_isLoaded = XTrue; return XTrue; }
static int get_audio_flags( AVCodecContext *enc ) { LogStr("Init"); int flags = (enc->bits_per_sample == 16) ? FLV_SAMPLESSIZE_16BIT : FLV_SAMPLESSIZE_8BIT; if (enc->codec_id == CODEC_ID_AAC) // specs force these parameters { return FLV_CODECID_AAC | FLV_SAMPLERATE_44100HZ | FLV_SAMPLESSIZE_16BIT | FLV_STEREO; } else { switch (enc->sample_rate) { case 44100: flags |= FLV_SAMPLERATE_44100HZ; break; case 22050: flags |= FLV_SAMPLERATE_22050HZ; break; case 11025: flags |= FLV_SAMPLERATE_11025HZ; break; case 8000: //nellymoser only case 5512: //not mp3 if (enc->codec_id != CODEC_ID_MP3) { flags |= FLV_SAMPLERATE_SPECIAL; break; } default: av_log(enc, AV_LOG_ERROR, "flv does not support that sample rate, choose from (44100, 22050, 11025).\n"); LogStr("Exit"); return -1; } } if (enc->channels > 1) { flags |= FLV_STEREO; } switch (enc->codec_id) { case CODEC_ID_MP3: flags |= FLV_CODECID_MP3 | FLV_SAMPLESSIZE_16BIT; break; case CODEC_ID_PCM_S8: flags |= FLV_CODECID_PCM | FLV_SAMPLESSIZE_8BIT; break; case CODEC_ID_PCM_S16BE: flags |= FLV_CODECID_PCM | FLV_SAMPLESSIZE_16BIT; break; case CODEC_ID_PCM_S16LE: flags |= FLV_CODECID_PCM_LE | FLV_SAMPLESSIZE_16BIT; break; case CODEC_ID_ADPCM_SWF: flags |= FLV_CODECID_ADPCM | FLV_SAMPLESSIZE_16BIT; break; case CODEC_ID_NELLYMOSER: flags |= FLV_CODECID_NELLYMOSER | FLV_SAMPLESSIZE_16BIT; break; case 0: flags |= enc->codec_tag << 4; break; default: av_log(enc, AV_LOG_ERROR, "codec not compatible with flv\n"); LogStr("Exit"); return -1; } LogStr("Exit"); return flags; }
XBool XNetServer::getDataPacket(unsigned char *buff,int len) { if(len <= 0) return XTrue; int offset = 0; if(m_recvPacketSize == 0) { if(len >= PACKET_HEAD_LEN) {//完整的包头 m_recvPacket = XMem::createMem<XNetData>(); if(m_recvPacket == NULL) return XFalse; m_recvPacket->type = (XNetDataType)buff[0]; memcpy(&(m_recvPacket->dataLen),buff + 1,sizeof(int)); m_recvPacket->data = XMem::createArrayMem<unsigned char>(m_recvPacket->dataLen); if(m_recvPacket->data == NULL) return XFalse; if(len - PACKET_HEAD_LEN >= m_recvPacket->dataLen) {//数据完整 m_recvPacket->isEnable = XTrue; memcpy(m_recvPacket->data,buff + PACKET_HEAD_LEN,m_recvPacket->dataLen); offset = PACKET_HEAD_LEN + m_recvPacket->dataLen; m_mutex.Lock(); m_recvDataBuff.push_back(m_recvPacket); if(m_recvDataBuff.size() > MAX_RECV_DATA_BUFF) { XNetData *tmp = m_recvDataBuff[0]; m_recvDataBuff.pop_front(); LogStr("XNetServer接收队列数据发生拥堵,丢弃较老的数据!"); XMem::XDELETE_ARRAY(tmp->data); XMem::XDELETE(tmp); } m_mutex.Unlock(); m_recvPacketSize = 0; //继续迭代 return getDataPacket(buff + offset,len - offset); }else {//数据不完整 m_recvPacket->isEnable = XFalse; //数据包尚不完整 memcpy(m_recvPacket->data,buff + PACKET_HEAD_LEN,len - PACKET_HEAD_LEN); m_recvPacketSize = len; return XTrue; } }else {//不完整的包头 m_recvPacketSize = len; memcpy(m_packetHeadData,buff,len); return XTrue; } }else { if(m_recvPacketSize >= PACKET_HEAD_LEN) {//包头已经接收完整 if(len >= m_recvPacket->dataLen - m_recvPacketSize + PACKET_HEAD_LEN) {//完整的 memcpy(m_recvPacket->data + m_recvPacketSize - PACKET_HEAD_LEN,buff,m_recvPacket->dataLen - m_recvPacketSize + PACKET_HEAD_LEN); m_recvPacket->isEnable = XTrue; offset = m_recvPacket->dataLen - m_recvPacketSize + PACKET_HEAD_LEN; m_mutex.Lock(); m_recvDataBuff.push_back(m_recvPacket); if(m_recvDataBuff.size() > MAX_RECV_DATA_BUFF) { XNetData *tmp = m_recvDataBuff[0]; m_recvDataBuff.pop_front(); m_mutex.Unlock(); LogStr("XNetServer接收队列数据发生拥堵,丢弃较老的数据!"); XMem::XDELETE_ARRAY(tmp->data); XMem::XDELETE(tmp); }else m_mutex.Unlock(); m_recvPacketSize = 0; //迭代 return getDataPacket(buff + offset,len - offset); }else {//不完整的 memcpy(m_recvPacket->data + m_recvPacketSize - PACKET_HEAD_LEN,buff,len); m_recvPacketSize += len; return XTrue; } }else {//包头尚未接收完整 if(m_recvPacketSize + len >= PACKET_HEAD_LEN) {//包头将会完整 memcpy(m_packetHeadData + m_recvPacketSize,buff,PACKET_HEAD_LEN - m_recvPacketSize); //解析包头 m_recvPacket = XMem::createMem<XNetData>(); if(m_recvPacket == NULL) return XFalse; m_recvPacket->type = (XNetDataType)m_packetHeadData[0]; memcpy(&(m_recvPacket->dataLen),m_packetHeadData + 1,sizeof(int)); m_recvPacket->data = XMem::createArrayMem<unsigned char>(m_recvPacket->dataLen); if(m_recvPacket->data == NULL) return XFalse; //解析余下的数据 if(len - (PACKET_HEAD_LEN - m_recvPacketSize) >= m_recvPacket->dataLen) {//数据完整 m_recvPacket->isEnable = XTrue; memcpy(m_recvPacket->data,buff + (PACKET_HEAD_LEN - m_recvPacketSize),m_recvPacket->dataLen); offset = PACKET_HEAD_LEN - m_recvPacketSize + m_recvPacket->dataLen; m_mutex.Lock(); m_recvDataBuff.push_back(m_recvPacket); m_mutex.Unlock(); m_recvPacketSize = 0; //继续迭代 return getDataPacket(buff + offset,len - offset); }else {//数据不完整 m_recvPacket->isEnable = XFalse; //数据包尚不完整 memcpy(m_recvPacket->data,buff + (PACKET_HEAD_LEN - m_recvPacketSize),len - (PACKET_HEAD_LEN - m_recvPacketSize)); m_recvPacketSize += len; return XTrue; } }else {//包头仍然不完整 memcpy(m_packetHeadData + m_recvPacketSize,buff,len); m_recvPacketSize += len; return XTrue; } } } }