/*! * \brief Parses the config file if any and clears the existing structure */ GF_Err gf_cfg_parse_config_file(GF_Config * tmp, const char * filePath, const char * file_name) { IniSection *p; IniKey *k; FILE *file; char *ret; char *line; u32 line_alloc = MAX_INI_LINE; char fileName[GF_MAX_PATH]; gf_cfg_clear(tmp); if (filePath && ((filePath[strlen(filePath)-1] == '/') || (filePath[strlen(filePath)-1] == '\\')) ) { strcpy(fileName, filePath); strcat(fileName, file_name); } else if (filePath) { sprintf(fileName, "%s%c%s", filePath, GF_PATH_SEPARATOR, file_name); } else { strcpy(fileName, file_name); } tmp->fileName = gf_strdup(fileName); tmp->sections = gf_list_new(); file = gf_f64_open(fileName, "rt"); if (!file) return GF_IO_ERR; /* load the file */ p = NULL; line = gf_malloc(sizeof(char)*line_alloc); memset(line, 0, sizeof(char)*line_alloc); while (!feof(file)) { u32 read; ret = fgets(line, line_alloc, file); read = strlen(line); while (read + 1 == line_alloc) { line_alloc += MAX_INI_LINE; line = gf_realloc(line, sizeof(char)*line_alloc); ret = fgets(line+read, MAX_INI_LINE, file); read = strlen(line); } if (!ret) continue; /* get rid of the end of line stuff */ while (1) { u32 len = strlen(line); if (!len) break; if ((line[len-1] != '\n') && (line[len-1] != '\r')) break; line[len-1] = 0; } if (!strlen(line)) continue; if (line[0] == '#') continue; /* new section */ if (line[0] == '[') { p = (IniSection *) gf_malloc(sizeof(IniSection)); p->keys = gf_list_new(); p->section_name = gf_strdup(line + 1); p->section_name[strlen(line) - 2] = 0; while (p->section_name[strlen(p->section_name) - 1] == ']' || p->section_name[strlen(p->section_name) - 1] == ' ') p->section_name[strlen(p->section_name) - 1] = 0; gf_list_add(tmp->sections, p); } else if (strlen(line) && (strchr(line, '=') != NULL) ) { if (!p) { gf_list_del(tmp->sections); gf_free(tmp->fileName); gf_free(tmp); fclose(file); gf_free(line); return GF_IO_ERR; } k = (IniKey *) gf_malloc(sizeof(IniKey)); memset((void *)k, 0, sizeof(IniKey)); ret = strchr(line, '='); if (ret) { ret[0] = 0; k->name = gf_strdup(line); while (k->name[strlen(k->name) - 1] == ' ') k->name[strlen(k->name) - 1] = 0; ret[0] = '='; ret += 1; while (ret[0] == ' ') ret++; if ( ret[0] != 0) { k->value = gf_strdup(ret); while (k->value[strlen(k->value) - 1] == ' ') k->value[strlen(k->value) - 1] = 0; } else { k->value = gf_strdup(""); } } gf_list_add(p->keys, k); } } gf_free(line); fclose(file); return GF_OK; }
int main(int argc, char **argv) { /* The ISO progressive reader */ ISOProgressiveReader reader; /* Error indicator */ GF_Err e; /* input file to be read in the data buffer */ FILE *input; /* number of bytes read from the file at each read operation */ u32 read_bytes; /* number of bytes read from the file (total) */ u64 total_read_bytes; /* size of the input file */ u64 file_size; /* number of bytes required to finish the current ISO Box reading (not used here)*/ u64 missing_bytes; /* Thread used to run the ISO parsing in */ GF_Thread *reading_thread; /* Return value for the program */ int ret = 0; /* Usage */ if (argc != 2) { fprintf(stdout, "Usage: %s filename\n", argv[0]); return 1; } /* Initializing GPAC framework */ /* Enables GPAC memory tracking in debug mode only */ #if defined(DEBUG) || defined(_DEBUG) gf_sys_init(GF_MemTrackerSimple); gf_log_set_tool_level(GF_LOG_ALL, GF_LOG_WARNING); gf_log_set_tool_level(GF_LOG_MEMORY, GF_LOG_INFO); #else gf_sys_init(GF_MemTrackerNone); gf_log_set_tool_level(GF_LOG_ALL, GF_LOG_WARNING); #endif /* This is an input file to read data from. Could be replaced by any other method to retrieve the data (e.g. JavaScript, socket, ...)*/ input = gf_fopen(argv[1], "rb"); if (!input) { fprintf(stdout, "Could not open file %s for reading.\n", argv[1]); gf_sys_close(); return 1; } gf_fseek(input, 0, SEEK_END); file_size = gf_ftell(input); gf_fseek(input, 0, SEEK_SET); /* Initializing the progressive reader */ memset(&reader, 0, sizeof(ISOProgressiveReader)); reading_thread = gf_th_new("ISO reading thread"); reader.mutex = gf_mx_new("ISO Segment"); reader.do_run = GF_TRUE; /* we want to parse the first track */ reader.track_id = 1; /* start the async parsing */ gf_th_run(reading_thread, iso_progressive_read_thread, &reader); /* start the data reading */ reader.data_size = BUFFER_BLOCK_SIZE; reader.data = (u8 *)gf_malloc(reader.data_size); reader.valid_data_size = 0; total_read_bytes = 0; while (1) { /* block the parser until we are done manipulating the data buffer */ gf_mx_p(reader.mutex); if (reader.valid_data_size + BUFFER_BLOCK_SIZE > MAX_BUFFER_SIZE) { /* regulate the reader to limit the max buffer size and let some time to the parser to release buffer data */ fprintf(stdout, "Buffer full (%d/%d)- waiting to read next data \r", reader.valid_data_size, reader.data_size); gf_mx_v(reader.mutex); //gf_sleep(10); } else { /* make sure we have enough space in the buffer to read the next bloc of data */ if (reader.valid_data_size + BUFFER_BLOCK_SIZE > reader.data_size) { reader.data = (u8 *)gf_realloc(reader.data, reader.data_size + BUFFER_BLOCK_SIZE); reader.data_size += BUFFER_BLOCK_SIZE; } /* read the next bloc of data and update the data buffer url */ read_bytes = fread(reader.data+reader.valid_data_size, 1, BUFFER_BLOCK_SIZE, input); total_read_bytes += read_bytes; fprintf(stdout, "Read "LLD" bytes of "LLD" bytes from input file %s (buffer status: %5d/%5d)\r", total_read_bytes, file_size, argv[1], reader.valid_data_size, reader.data_size); if (read_bytes) { reader.valid_data_size += read_bytes; sprintf(reader.data_url, "gmem://%d@%p", reader.valid_data_size, reader.data); } else { /* end of file we can quit */ gf_mx_v(reader.mutex); break; } /* if the file is not yet opened (no movie), open it in progressive mode (to update its data later on) */ if (!reader.movie) { /* let's initialize the parser */ e = gf_isom_open_progressive(reader.data_url, 0, 0, &reader.movie, &missing_bytes); if (reader.movie) { gf_isom_set_single_moof_mode(reader.movie, GF_TRUE); } /* we can let parser try to work now */ gf_mx_v(reader.mutex); if ((e == GF_OK || e == GF_ISOM_INCOMPLETE_FILE) && reader.movie) { /* nothing to do, this is normal */ } else { fprintf(stdout, "Error opening fragmented mp4 in progressive mode: %s (missing "LLD" bytes)\n", gf_error_to_string(e), missing_bytes); ret = 1; goto exit; } } else { /* let inform the parser that the buffer has been updated with new data */ e = gf_isom_refresh_fragmented(reader.movie, &missing_bytes, reader.data_url); /* we can let parser try to work now */ gf_mx_v(reader.mutex); if (e != GF_OK && e != GF_ISOM_INCOMPLETE_FILE) { fprintf(stdout, "Error refreshing fragmented mp4: %s (missing "LLD" bytes)\n", gf_error_to_string(e), missing_bytes); ret = 1; goto exit; } } //gf_sleep(1); } } exit: /* stop the parser */ reader.do_run = GF_FALSE; gf_th_stop(reading_thread); /* clean structures */ gf_th_del(reading_thread); gf_mx_del(reader.mutex); gf_free(reader.data); gf_isom_close(reader.movie); gf_fclose(input); gf_sys_close(); return ret; }
static void realloc_chain(GF_List *ptr) { GF_LIST_REALLOC(ptr->allocSize); ptr->slots = gf_realloc(ptr->slots, ptr->allocSize*sizeof(void*)); }
GF_EXPORT GF_TextSample *gf_isom_parse_texte_sample(GF_BitStream *bs) { GF_TextSample *s = gf_isom_new_text_sample(); /*empty sample*/ if (!bs || !gf_bs_available(bs)) return s; s->len = gf_bs_read_u16(bs); if (s->len) { /*2 extra bytes for UTF-16 term char just in case (we don't know if a BOM marker is present or not since this may be a sample carried over RTP*/ s->text = (char *)gf_malloc(sizeof(char)*(s->len + 2)); s->text[s->len] = 0; s->text[s->len + 1] = 0; gf_bs_read_data(bs, s->text, s->len); } while (gf_bs_available(bs)) { GF_Box *a; GF_Err e = gf_isom_parse_box(&a, bs); if (!e) { switch (a->type) { case GF_ISOM_BOX_TYPE_STYL: if (s->styles) { GF_TextStyleBox *st2 = (GF_TextStyleBox *)a; if (!s->styles->entry_count) { gf_isom_box_del((GF_Box*)s->styles); s->styles = st2; } else { s->styles->styles = (GF_StyleRecord*)gf_realloc(s->styles->styles, sizeof(GF_StyleRecord) * (s->styles->entry_count + st2->entry_count)); memcpy(&s->styles->styles[s->styles->entry_count], st2->styles, sizeof(GF_StyleRecord) * st2->entry_count); s->styles->entry_count += st2->entry_count; gf_isom_box_del(a); } } else { s->styles = (GF_TextStyleBox*)a; } break; case GF_ISOM_BOX_TYPE_KROK: s->cur_karaoke = (GF_TextKaraokeBox*)a; case GF_ISOM_BOX_TYPE_HLIT: case GF_ISOM_BOX_TYPE_HREF: case GF_ISOM_BOX_TYPE_BLNK: gf_list_add(s->others, a); break; case GF_ISOM_BOX_TYPE_HCLR: if (s->highlight_color) gf_isom_box_del(a); else s->highlight_color = (GF_TextHighlightColorBox *)a; break; case GF_ISOM_BOX_TYPE_DLAY: if (s->scroll_delay) gf_isom_box_del(a); else s->scroll_delay = (GF_TextScrollDelayBox*)a; break; case GF_ISOM_BOX_TYPE_TBOX: if (s->box) gf_isom_box_del(a); else s->box = (GF_TextBoxBox *)a; break; case GF_ISOM_BOX_TYPE_TWRP: if (s->wrap) gf_isom_box_del(a); else s->wrap = (GF_TextWrapBox*)a; break; default: gf_isom_box_del(a); break; } } } return s; }
static GF_Err CENC_ProcessData(ISMAEAPriv *priv, GF_IPMPEvent *evt) { GF_Err e; GF_BitStream *pleintext_bs, *cyphertext_bs, *sai_bs; char IV[17]; bin128 KID; char *buffer; u32 max_size, i, subsample_count; GF_CENCSampleAuxInfo *sai; pleintext_bs = cyphertext_bs = sai_bs = NULL; buffer = NULL; max_size = 4096; if (!priv->crypt) return GF_SERVICE_ERROR; if (!evt->is_encrypted || !evt->IV_size || !evt->saiz) return GF_OK; cyphertext_bs = gf_bs_new(evt->data, evt->data_size, GF_BITSTREAM_READ); sai_bs = gf_bs_new(evt->sai, evt->saiz, GF_BITSTREAM_READ); pleintext_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); buffer = (char*)gf_malloc(sizeof(char) * max_size); sai = (GF_CENCSampleAuxInfo *)gf_malloc(sizeof(GF_CENCSampleAuxInfo)); if (!sai) { e = GF_IO_ERR; goto exit; } memset(sai, 0, sizeof(GF_CENCSampleAuxInfo)); sai->IV_size = evt->IV_size; /*read sample auxiliary information from bitstream*/ gf_bs_read_data(sai_bs, (char *)KID, 16); gf_bs_read_data(sai_bs, (char *)sai->IV, sai->IV_size); sai->subsample_count = gf_bs_read_u16(sai_bs); if (sai->subsample_count) { sai->subsamples = (GF_CENCSubSampleEntry *)gf_malloc(sai->subsample_count*sizeof(GF_CENCSubSampleEntry)); for (i = 0; i < sai->subsample_count; i++) { sai->subsamples[i].bytes_clear_data = gf_bs_read_u16(sai_bs); sai->subsamples[i].bytes_encrypted_data = gf_bs_read_u32(sai_bs); } } for (i = 0; i < priv->KID_count; i++) { if (!strncmp((const char *)KID, (const char *)priv->KIDs[i], 16)) { memmove(priv->key, priv->keys[i], 16); break; } } if (priv->first_crypted_samp) { memmove(IV, sai->IV, sai->IV_size); if (sai->IV_size == 8) memset(IV+8, 0, sizeof(char)*8); e = gf_crypt_init(priv->crypt, priv->key, 16, IV); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_AUTHOR, ("[CENC] Cannot initialize AES-128 AES-128 %s (%s)\n", priv->is_cenc ? "CTR" : "CBC", gf_error_to_string(e)) ); e = GF_IO_ERR; goto exit; } priv->first_crypted_samp = GF_FALSE; } else { if (priv->is_cenc) { GF_BitStream *bs; bs = gf_bs_new(IV, 17, GF_BITSTREAM_WRITE); gf_bs_write_u8(bs, 0); /*begin of counter*/ gf_bs_write_data(bs,(char *)sai->IV, sai->IV_size); if (sai->IV_size == 8) gf_bs_write_u64(bs, 0); /*0-padded if IV_size == 8*/ gf_bs_del(bs); gf_crypt_set_state(priv->crypt, IV, 17); } e = gf_crypt_set_key(priv->crypt, priv->key, 16, IV); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_AUTHOR, ("[CENC] Cannot set key AES-128 %s (%s)\n", priv->is_cenc ? "CTR" : "CBC", gf_error_to_string(e)) ); e = GF_IO_ERR; goto exit; } } //sub-sample encryption if (sai->subsample_count) { subsample_count = 0; while (gf_bs_available(cyphertext_bs)) { if (subsample_count >= sai->subsample_count) break; /*read clear data and write it to pleintext bitstream*/ if (max_size < sai->subsamples[subsample_count].bytes_clear_data) { buffer = (char*)gf_realloc(buffer, sizeof(char)*sai->subsamples[subsample_count].bytes_clear_data); max_size = sai->subsamples[subsample_count].bytes_clear_data; } gf_bs_read_data(cyphertext_bs, buffer, sai->subsamples[subsample_count].bytes_clear_data); gf_bs_write_data(pleintext_bs, buffer, sai->subsamples[subsample_count].bytes_clear_data); /*now read encrypted data, decrypted it and write to pleintext bitstream*/ if (max_size < sai->subsamples[subsample_count].bytes_encrypted_data) { buffer = (char*)gf_realloc(buffer, sizeof(char)*sai->subsamples[subsample_count].bytes_encrypted_data); max_size = sai->subsamples[subsample_count].bytes_encrypted_data; } gf_bs_read_data(cyphertext_bs, buffer, sai->subsamples[subsample_count].bytes_encrypted_data); gf_crypt_decrypt(priv->crypt, buffer, sai->subsamples[subsample_count].bytes_encrypted_data); gf_bs_write_data(pleintext_bs, buffer, sai->subsamples[subsample_count].bytes_encrypted_data); subsample_count++; } if (buffer) gf_free(buffer); gf_bs_get_content(pleintext_bs, &buffer, &evt->data_size); } //full sample encryption else { if (max_size < evt->data_size) { buffer = (char*)gf_realloc(buffer, sizeof(char)*evt->data_size); } gf_bs_read_data(cyphertext_bs, buffer, evt->data_size); gf_crypt_decrypt(priv->crypt, buffer, evt->data_size); } memmove(evt->data, buffer, evt->data_size); exit: if (pleintext_bs) gf_bs_del(pleintext_bs); if (sai_bs) gf_bs_del(sai_bs); if (cyphertext_bs) gf_bs_del(cyphertext_bs); if (buffer) gf_free(buffer); if (sai && sai->subsamples) gf_free(sai->subsamples); if (sai) gf_free(sai); return e; }
GF_Err gf_webvtt_parser_parse(GF_WebVTTParser *parser, u32 duration) { char szLine[2048]; char *sOK; u32 len; GF_Err e; Bool do_parse = GF_TRUE; GF_WebVTTCue *cue = NULL; u32 start = 0; u32 end = 0; char *prevLine = NULL; char *header = NULL; u32 header_len = 0; Bool had_marks = GF_FALSE; if (!parser) return GF_BAD_PARAM; if (parser->is_srt) { parser->on_header_parsed(parser->user, "WEBVTT\n"); } while (do_parse) { sOK = gf_text_get_utf8_line(szLine, 2048, parser->vtt_in, parser->unicode_type); REM_TRAIL_MARKS(szLine, "\r\n") len = (u32) strlen(szLine); switch (parser->state) { case WEBVTT_PARSER_STATE_WAITING_SIGNATURE: if (!sOK || len < 6 || strnicmp(szLine, "WEBVTT", 6) || (len > 6 && szLine[6] != ' ' && szLine[6] != '\t')) { e = GF_CORRUPTED_DATA; parser->report_message(parser->user, e, "Bad WEBVTT file signature %s", szLine); goto exit; } else { if (had_marks) { szLine[len] = '\n'; len++; } header = gf_strdup(szLine); header_len = len; parser->state = WEBVTT_PARSER_STATE_WAITING_HEADER; } break; /* proceed to next line */ case WEBVTT_PARSER_STATE_WAITING_HEADER: if (prevLine) { u32 prev_len = (u32) strlen(prevLine); header = (char *)gf_realloc(header, header_len + prev_len + 1); strcpy(header+header_len,prevLine); header_len += prev_len; gf_free(prevLine); prevLine = NULL; } if (sOK && len) { if (strstr(szLine, "-->")) { parser->on_header_parsed(parser->user, header); /* continue to the next state without breaking */ parser->state = WEBVTT_PARSER_STATE_WAITING_CUE_TIMESTAMP; /* no break, continue to the next state*/ } else { if (had_marks) { szLine[len] = '\n'; len++; } prevLine = gf_strdup(szLine); break; /* proceed to next line */ } } else { parser->on_header_parsed(parser->user, header); if (!sOK) { /* end of file, parsing is done */ do_parse = GF_FALSE; break; } else { /* empty line means end of header */ parser->state = WEBVTT_PARSER_STATE_WAITING_CUE; /* no break, continue to the next state*/ } } case WEBVTT_PARSER_STATE_WAITING_CUE: if (sOK && len) { if (strstr(szLine, "-->")) { parser->state = WEBVTT_PARSER_STATE_WAITING_CUE_TIMESTAMP; /* continue to the next state without breaking */ } else { /* discard the previous line */ /* should we do something with it ? callback ?*/ if (prevLine) { gf_free(prevLine); prevLine = NULL; } /* save this new line */ if (had_marks) { szLine[len] = '\n'; len++; } prevLine = gf_strdup(szLine); /* stay in the same state */ break; } } else { /* discard the previous line */ /* should we do something with it ? callback ?*/ if (prevLine) { gf_free(prevLine); prevLine = NULL; } if (!sOK) { do_parse = GF_FALSE; break; } else { /* remove empty lines and stay in the same state */ break; } } case WEBVTT_PARSER_STATE_WAITING_CUE_TIMESTAMP: if (sOK && len) { if (cue == NULL) { cue = gf_webvtt_cue_new(); } if (prevLine) { gf_webvtt_cue_add_property(cue, WEBVTT_ID, prevLine, (u32) strlen(prevLine)); gf_free(prevLine); prevLine = NULL; } e = gf_webvtt_parser_parse_timings_settings(parser, cue, szLine, len); if (e) { if (cue) gf_webvtt_cue_del(cue); cue = NULL; parser->state = WEBVTT_PARSER_STATE_WAITING_CUE; } else { start = (u32)gf_webvtt_timestamp_get(&cue->start); end = (u32)gf_webvtt_timestamp_get(&cue->end); parser->state = WEBVTT_PARSER_STATE_WAITING_CUE_PAYLOAD; } } else { /* not possible */ assert(0); } break; case WEBVTT_PARSER_STATE_WAITING_CUE_PAYLOAD: if (sOK && len) { if (had_marks) { szLine[len] = '\n'; len++; } gf_webvtt_cue_add_property(cue, WEBVTT_PAYLOAD, szLine, len); /* remain in the same state as a cue payload can have multiple lines */ break; } else { /* end of the current cue */ gf_webvtt_add_cue_to_samples(parser, parser->samples, cue); cue = NULL; gf_set_progress("Importing WebVTT", gf_ftell(parser->vtt_in), parser->file_size); if ((duration && (end >= duration)) || !sOK) { do_parse = GF_FALSE; break; } else { /* empty line, move to next cue */ parser->state = WEBVTT_PARSER_STATE_WAITING_CUE; break; } } } if (duration && (start >= duration)) { break; } } /* no more cues to come, flush everything */ if (cue) { gf_webvtt_add_cue_to_samples(parser, parser->samples, cue); cue = NULL; } while (gf_list_count(parser->samples) > 0) { GF_WebVTTSample *sample = (GF_WebVTTSample *)gf_list_get(parser->samples, 0); parser->last_duration = sample->end - sample->start; gf_list_rem(parser->samples, 0); parser->on_sample_parsed(parser->user, sample); } gf_set_progress("Importing WebVTT", parser->file_size, parser->file_size); e = GF_OK; exit: if (cue) gf_webvtt_cue_del(cue); if (prevLine) gf_free(prevLine); if (header) gf_free(header); return e; }
static void MediaDecoder_GetNextAU(GF_Codec *codec, GF_Channel **activeChannel, GF_DBUnit **nextAU) { GF_Channel *ch; GF_DBUnit *AU; u32 count, minDTS, i; count = gf_list_count(codec->inChannels); *nextAU = NULL; *activeChannel = NULL; if (!count) return; minDTS = 0; /*browse from base to top layer*/ for (i=0;i<count;i++) { ch = (GF_Channel*)gf_list_get(codec->inChannels, i); if ((codec->type==GF_STREAM_OCR) && ch->IsClockInit) { /*check duration - we assume that scalable OCR streams are just pure nonsense...*/ if (ch->is_pulling && codec->odm->duration) { if (gf_clock_time(codec->ck) > codec->odm->duration) gf_es_on_eos(ch); } return; } AU = gf_es_get_au(ch); if (!AU) { if (! (*activeChannel)) *activeChannel = ch; continue; } /*aggregate all AUs with the same timestamp on the base AU and delete the upper layers)*/ if (! *nextAU) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d#CH%d AU CTS %d selected as first layer\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, AU->CTS)); *nextAU = AU; *activeChannel = ch; minDTS = AU->DTS; } else if (AU->DTS == minDTS) { GF_DBUnit *baseAU = *nextAU; assert(baseAU); baseAU->data = gf_realloc(baseAU->data, baseAU->dataLength + AU->dataLength); memcpy(baseAU->data + baseAU->dataLength , AU->data, AU->dataLength); baseAU->dataLength += AU->dataLength; GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[%s] ODM%d#CH%d AU CTS %d reaggregated on base layer %d\n", codec->decio->module_name, codec->odm->OD->objectDescriptorID, ch->esd->ESID, AU->CTS, (*activeChannel)->esd->ESID)); gf_es_drop_au(ch); } else { break; } } if (codec->is_reordering && *nextAU && codec->first_frame_dispatched) { if ((*activeChannel)->esd->slConfig->no_dts_signaling) { u32 CTS = (*nextAU)->CTS; /*reordering !!*/ u32 prev_ts_diff; u32 diff = 0; if (codec->recomputed_cts && (codec->recomputed_cts > (*nextAU)->CTS)) { diff = codec->recomputed_cts - CTS; } prev_ts_diff = (CTS > codec->last_unit_cts) ? (CTS - codec->last_unit_cts) : (codec->last_unit_cts - CTS); if (!diff) diff = prev_ts_diff; else if (prev_ts_diff && (prev_ts_diff < diff) ) diff = prev_ts_diff; if (!codec->min_au_duration || (diff < codec->min_au_duration)) codec->min_au_duration = diff; } else { codec->min_au_duration = 0; /*FIXME - we're breaking sync (couple of frames delay)*/ (*nextAU)->CTS = (*nextAU)->DTS; } } }
/*dispatch the AU in the DB*/ static void Channel_DispatchAU(GF_Channel *ch, u32 duration) { u32 time; GF_DBUnit *au; if (!ch->buffer || !ch->len) { if (ch->buffer) { gf_free(ch->buffer); ch->buffer = NULL; } return; } au = gf_db_unit_new(); if (!au) { gf_free(ch->buffer); ch->buffer = NULL; ch->len = 0; return; } au->CTS = ch->CTS; au->DTS = ch->DTS; if (ch->IsRap) au->flags |= GF_DB_AU_RAP; if (ch->CTS_past_offset) { au->CTS = ch->CTS_past_offset; au->flags |= GF_DB_AU_CTS_IN_PAST; ch->CTS_past_offset = 0; } if (ch->no_timestamps) { au->flags |= GF_DB_AU_NO_TIMESTAMPS; ch->no_timestamps=0; } au->data = ch->buffer; au->dataLength = ch->len; au->PaddingBits = ch->padingBits; ch->IsRap = 0; ch->padingBits = 0; au->next = NULL; ch->buffer = NULL; if (ch->len + ch->media_padding_bytes != ch->allocSize) { au->data = (char*)gf_realloc(au->data, sizeof(char) * (au->dataLength + ch->media_padding_bytes)); } if (ch->media_padding_bytes) memset(au->data + au->dataLength, 0, sizeof(char)*ch->media_padding_bytes); ch->len = ch->allocSize = 0; gf_es_lock(ch, 1); if (ch->service && ch->service->cache) { GF_SLHeader slh; memset(&slh, 0, sizeof(GF_SLHeader)); slh.accessUnitEndFlag = slh.accessUnitStartFlag = 1; slh.compositionTimeStampFlag = slh.decodingTimeStampFlag = 1; slh.decodingTimeStamp = ch->net_dts; slh.compositionTimeStamp = ch->net_cts; slh.randomAccessPointFlag = (au->flags & GF_DB_AU_RAP) ? 1 : 0; ch->service->cache->Write(ch->service->cache, ch, au->data, au->dataLength, &slh); } if (!ch->AU_buffer_first) { ch->AU_buffer_first = au; ch->AU_buffer_last = au; ch->AU_Count = 1; } else { if (ch->AU_buffer_last->DTS<=au->DTS) { ch->AU_buffer_last->next = au; ch->AU_buffer_last = ch->AU_buffer_last->next; } /*enable deinterleaving only for audio channels (some video transport may not be able to compute DTS, cf MPEG1-2/RTP) HOWEVER, we must recompute a monotone increasing DTS in case the decoder does perform frame reordering in which case the DTS is used for presentation time!!*/ else if (ch->esd->decoderConfig->streamType!=GF_STREAM_AUDIO) { #if 0 GF_DBUnit *au_prev, *ins_au; u32 DTS; #endif au->DTS = 0; /*append AU*/ ch->AU_buffer_last->next = au; ch->AU_buffer_last = ch->AU_buffer_last->next; #if 0 GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] Media deinterleaving OD %d ch %d\n", ch->esd->ESID, ch->odm->OD->objectDescriptorID)); DTS = au->DTS; au_prev = ch->AU_buffer_first; /*locate first AU in buffer with DTS greater than new unit CTS*/ while (au_prev->next && (au_prev->DTS < DTS) ) au_prev = au_prev->next; /*remember insertion point*/ ins_au = au_prev; /*shift all following frames DTS*/ while (au_prev->next) { au_prev->next->DTS = au_prev->DTS; au_prev = au_prev->next; } /*and apply*/ ins_au->DTS = DTS; #endif } else { GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] Audio deinterleaving OD %d ch %d\n", ch->esd->ESID, ch->odm->OD->objectDescriptorID)); /*de-interleaving of AUs*/ if (ch->AU_buffer_first->DTS > au->DTS) { au->next = ch->AU_buffer_first; ch->AU_buffer_first = au; } else { GF_DBUnit *au_prev = ch->AU_buffer_first; while (au_prev->next && au_prev->next->DTS<au->DTS) { au_prev = au_prev->next; } assert(au_prev); if (au_prev->next->DTS==au->DTS) { gf_free(au->data); gf_free(au); } else { au->next = au_prev->next; au_prev->next = au; } } } ch->AU_Count += 1; } Channel_UpdateBufferTime(ch); ch->au_duration = 0; if (duration) ch->au_duration = (u32) ((u64)1000 * duration / ch->ts_res); GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d - Dispatch AU DTS %d - CTS %d - size %d time %d Buffer %d Nb AUs %d - First AU relative timing %d\n", ch->esd->ESID, au->DTS, au->CTS, au->dataLength, gf_clock_real_time(ch->clock), ch->BufferTime, ch->AU_Count, ch->AU_buffer_first ? ch->AU_buffer_first->DTS - gf_clock_time(ch->clock) : 0 )); /*little optimisation: if direct dispatching is possible, try to decode the AU we must lock the media scheduler to avoid deadlocks with other codecs accessing the scene or media resources*/ if (ch->dispatch_after_db) { u32 retry = 100; u32 current_frame; GF_Terminal *term = ch->odm->term; ch_buffer_off(ch); gf_es_lock(ch, 0); if (gf_mx_try_lock(term->mm_mx)) { switch (ch->esd->decoderConfig->streamType) { case GF_STREAM_OD: gf_codec_process(ch->odm->subscene->od_codec, 100); break; case GF_STREAM_SCENE: if (ch->odm->codec) gf_codec_process(ch->odm->codec, 100); else gf_codec_process(ch->odm->subscene->scene_codec, 100); break; } gf_mx_v(term->mm_mx); } gf_es_lock(ch, 1); current_frame = term->compositor->frame_number; /*wait for initial setup to complete before giving back the hand to the caller service*/ while (retry) { /*Scene bootstrap: if the scene is attached, wait for first frame to complete so that initial PLAY on objects can be evaluated*/ if (term->compositor->scene && (term->compositor->frame_number==current_frame) ) { retry--; gf_sleep(1); continue; } /*Media bootstrap: wait for all pending requests on media objects are processed*/ if (gf_list_count(term->media_queue)) { retry--; gf_sleep(1); continue; } break; } } time = gf_term_get_time(ch->odm->term); if (ch->BufferOn) { ch->last_au_time = time; Channel_UpdateBuffering(ch, 1); } else { /*trigger the data progress every 500 ms*/ if (ch->last_au_time + 500 > time) { gf_term_service_media_event(ch->odm, GF_EVENT_MEDIA_DATA_PROGRESS); ch->last_au_time = time; } } gf_es_lock(ch, 0); }
/*handles reception of an SL-PDU, logical or physical*/ void gf_es_receive_sl_packet(GF_ClientService *serv, GF_Channel *ch, char *payload, u32 payload_size, GF_SLHeader *header, GF_Err reception_status) { GF_SLHeader hdr; u32 nbAU, OldLength, size, AUSeqNum; Bool EndAU, NewAU; if (ch->bypass_sl_and_db) { GF_SceneDecoder *sdec; ch->IsClockInit = 1; if (ch->odm->subscene) { sdec = (GF_SceneDecoder *)ch->odm->subscene->scene_codec->decio; } else { sdec = (GF_SceneDecoder *)ch->odm->codec->decio; } gf_mx_p(ch->mx); sdec->ProcessData(sdec, payload, payload_size, ch->esd->ESID, 0, 0); gf_mx_v(ch->mx); return; } if (ch->es_state != GF_ESM_ES_RUNNING) return; if (ch->skip_sl) { Channel_ReceiveSkipSL(serv, ch, payload, payload_size); return; } if (ch->is_raw_channel) { ch->CTS = ch->DTS = (u32) (ch->ts_offset + (header->compositionTimeStamp - ch->seed_ts) * 1000 / ch->ts_res); if (!ch->IsClockInit) { gf_es_check_timing(ch); } if (payload) gf_es_dispatch_raw_media_au(ch, payload, payload_size, ch->CTS); return; } /*physical SL-PDU - depacketize*/ if (!header) { u32 SLHdrLen; if (!payload_size) return; gf_sl_depacketize(ch->esd->slConfig, &hdr, payload, payload_size, &SLHdrLen); payload_size -= SLHdrLen; payload += SLHdrLen; } else { hdr = *header; } /*we ignore OCRs for the moment*/ if (hdr.OCRflag) { if (!ch->IsClockInit) { /*channel is the OCR, re-initialize the clock with the proper OCR*/ if (gf_es_owns_clock(ch)) { u32 OCR_TS; /*if SL is mapped from network module(eg not coded), OCR=PCR shall be given in 27Mhz units*/ if (hdr.m2ts_pcr) { OCR_TS = (u32) ( hdr.objectClockReference / 27000); } else { OCR_TS = (u32) ( (s64) (hdr.objectClockReference) * ch->ocr_scale); } ch->clock->clock_init = 0; gf_clock_set_time(ch->clock, OCR_TS); /*many TS streams deployed with HLS have broken PCRs - we will check their consistency when receiving the first AU with DTS/CTS on this channel*/ ch->clock->probe_ocr = 1; GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: initializing clock at STB %d from OCR TS %d (origial TS "LLD") - %d buffering - OTB %d\n", ch->esd->ESID, gf_term_get_time(ch->odm->term), OCR_TS, hdr.objectClockReference, ch->clock->Buffering, gf_clock_time(ch->clock) )); if (ch->clock->clock_init) ch->IsClockInit = 1; } } #if 0 /*adjust clock if M2TS PCR discontinuity*/ else if (hdr.m2ts_pcr==2) { u32 ck; u32 OCR_TS = (u32) ( hdr.objectClockReference / 27000); ck = gf_clock_time(ch->clock); GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: At OTB %d - OCR Discontinuity OCR: adjusting to %d (origial TS "LLD") - original clock %d\n", ch->esd->ESID, gf_clock_real_time(ch->clock), OCR_TS, hdr.objectClockReference, ck)); // gf_clock_set_time(ch->clock, (u32) OCR_TS); } /*compute clock drift*/ else { u32 ck; u32 OCR_TS; if (hdr.m2ts_pcr) { OCR_TS = (u32) ( hdr.objectClockReference / 27000); } else { OCR_TS = (u32) ( (s64) (hdr.objectClockReference) * ch->ocr_scale); } ck = gf_clock_time(ch->clock); GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: At OTB %d adjusting OCR to %d (origial TS "LLD") - diff %d\n", ch->esd->ESID, gf_clock_real_time(ch->clock), OCR_TS, hdr.objectClockReference, (s32) OCR_TS - (s32) ck)); // gf_clock_set_time(ch->clock, (u32) OCR_TS); } #else { u32 ck; u32 OCR_TS; if (hdr.m2ts_pcr) { OCR_TS = (u32) ( hdr.objectClockReference / 27000); } else { OCR_TS = (u32) ( (s64) (hdr.objectClockReference) * ch->ocr_scale); } ck = gf_clock_time(ch->clock); GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: At OTB %d got OCR %d (origial TS "LLD") - diff %d%s\n", ch->esd->ESID, gf_clock_real_time(ch->clock), OCR_TS, hdr.objectClockReference, (s32) OCR_TS - (s32) ck, (hdr.m2ts_pcr==2) ? " - PCR Discontinuity flag" : "" )); } #endif if (!payload_size) return; } /*check state*/ if (!ch->codec_resilient && (reception_status==GF_CORRUPTED_DATA)) { Channel_WaitRAP(ch); return; } if (!ch->esd->slConfig->useAccessUnitStartFlag) { /*no AU signaling - each packet is an AU*/ if (!ch->esd->slConfig->useAccessUnitEndFlag) hdr.accessUnitEndFlag = hdr.accessUnitStartFlag = 1; /*otherwise AU are signaled by end of previous packet*/ else hdr.accessUnitStartFlag = ch->NextIsAUStart; } /*get RAP*/ if (ch->esd->slConfig->hasRandomAccessUnitsOnlyFlag) { hdr.randomAccessPointFlag = 1; } else if ((ch->carousel_type!=GF_ESM_CAROUSEL_MPEG2) && (!ch->esd->slConfig->useRandomAccessPointFlag || ch->codec_resilient) ) { ch->stream_state = 0; } if (ch->esd->slConfig->packetSeqNumLength) { if (ch->pck_sn && hdr.packetSequenceNumber) { /*repeated -> drop*/ if (ch->pck_sn == hdr.packetSequenceNumber) { GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: repeated packet, droping\n", ch->esd->ESID)); return; } /*if codec has no resiliency check packet drops*/ if (!ch->codec_resilient && !hdr.accessUnitStartFlag) { if (ch->pck_sn == (u32) (1<<ch->esd->slConfig->packetSeqNumLength) ) { if (hdr.packetSequenceNumber) { GF_LOG(GF_LOG_WARNING, GF_LOG_SYNC, ("[SyncLayer] ES%d: packet loss, droping & wait RAP\n", ch->esd->ESID)); Channel_WaitRAP(ch); return; } } else if (ch->pck_sn + 1 != hdr.packetSequenceNumber) { GF_LOG(GF_LOG_WARNING, GF_LOG_SYNC, ("[SyncLayer] ES%d: packet loss, droping & wait RAP\n", ch->esd->ESID)); Channel_WaitRAP(ch); return; } } } ch->pck_sn = hdr.packetSequenceNumber; } /*if empty, skip the packet*/ if (hdr.paddingFlag && !hdr.paddingBits) { GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: Empty packet - skipping\n", ch->esd->ESID)); return; } /*IDLE stream shall be processed*/ NewAU = 0; if (hdr.accessUnitStartFlag) { NewAU = 1; ch->NextIsAUStart = 0; ch->skip_carousel_au = 0; /*if we have a pending AU, add it*/ if (ch->buffer) { if (ch->esd->slConfig->useAccessUnitEndFlag) { GF_LOG(GF_LOG_WARNING, GF_LOG_SYNC, ("[SyncLayer] ES%d: missed end of AU (DTS %d)\n", ch->esd->ESID, ch->DTS)); } if (ch->codec_resilient) { if (!ch->IsClockInit) gf_es_check_timing(ch); Channel_DispatchAU(ch, 0); } else { gf_free(ch->buffer); ch->buffer = NULL; ch->AULength = 0; ch->len = ch->allocSize = 0; } } AUSeqNum = hdr.AU_sequenceNumber; /*Get CTS */ if (ch->esd->slConfig->useTimestampsFlag) { if (hdr.compositionTimeStampFlag) { ch->net_dts = ch->net_cts = hdr.compositionTimeStamp; /*get DTS */ if (hdr.decodingTimeStampFlag) ch->net_dts = hdr.decodingTimeStamp; #if 0 /*until clock is not init check seed ts*/ if (!ch->IsClockInit && (ch->net_dts < ch->seed_ts)) ch->seed_ts = ch->net_dts; #endif if (ch->net_cts<ch->seed_ts) { u64 diff = ch->seed_ts - ch->net_cts; ch->CTS_past_offset = (u32) (diff * 1000 / ch->ts_res) + ch->ts_offset; ch->net_dts = ch->net_cts = 0; ch->CTS = ch->DTS = gf_clock_time(ch->clock); } else { if (ch->net_dts>ch->seed_ts) ch->net_dts -= ch->seed_ts; else ch->net_dts=0; ch->net_cts -= ch->seed_ts; ch->CTS_past_offset = 0; /*TS Wraping not tested*/ ch->CTS = (u32) (ch->ts_offset + (s64) (ch->net_cts) * 1000 / ch->ts_res); ch->DTS = (u32) (ch->ts_offset + (s64) (ch->net_dts) * 1000 / ch->ts_res); } if (ch->clock->probe_ocr && gf_es_owns_clock(ch)) { s32 diff_ts = ch->DTS; diff_ts -= ch->clock->init_time; if (ABS(diff_ts) > 10000) { GF_LOG(GF_LOG_ERROR, GF_LOG_SYNC, ("[SyncLayer] ES%d: invalid clock reference detected - DTS %d OCR %d - using DTS as OCR\n", ch->DTS, ch->clock->init_time)); ch->clock->clock_init = 0; gf_clock_set_time(ch->clock, ch->DTS-1000); } ch->clock->probe_ocr = 0; } ch->no_timestamps = 0; } else { ch->no_timestamps = 1; } } else { /*use CU duration*/ if (!ch->IsClockInit) ch->DTS = ch->CTS = ch->ts_offset; if (!ch->esd->slConfig->AUSeqNumLength) { if (!ch->au_sn) { ch->CTS = ch->ts_offset; ch->au_sn = 1; } else { ch->CTS += ch->esd->slConfig->CUDuration; } } else { //use the sequence number to get the TS if (AUSeqNum < ch->au_sn) { nbAU = ( (1<<ch->esd->slConfig->AUSeqNumLength) - ch->au_sn) + AUSeqNum; } else { nbAU = AUSeqNum - ch->au_sn; } ch->CTS += nbAU * ch->esd->slConfig->CUDuration; } } /*if the AU Length is carried in SL, get its size*/ if (ch->esd->slConfig->AULength > 0) { ch->AULength = hdr.accessUnitLength; } else { ch->AULength = 0; } /*carousel for repeated AUs.*/ if (ch->carousel_type) { /* not used : Bool use_rap = hdr.randomAccessPointFlag; */ if (ch->carousel_type==GF_ESM_CAROUSEL_MPEG2) { AUSeqNum = hdr.m2ts_version_number_plus_one-1; /*mpeg-2 section carrouseling does not take into account the RAP nature of the tables*/ if (AUSeqNum==ch->au_sn) { if (ch->stream_state) { ch->stream_state=0; GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: MPEG-2 Carousel: tuning in\n", ch->esd->ESID)); } else { ch->skip_carousel_au = 1; GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: MPEG-2 Carousel: repeated AU (TS %d) - skipping\n", ch->esd->ESID, ch->CTS)); return; } } else { GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: MPEG-2 Carousel: updated AU (TS %d)\n", ch->esd->ESID, ch->CTS)); ch->stream_state=0; ch->au_sn = AUSeqNum; } } else { if (hdr.randomAccessPointFlag) { /*initial tune-in*/ if (ch->stream_state==1) { GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: RAP Carousel found (TS %d) - tuning in\n", ch->esd->ESID, ch->CTS)); ch->au_sn = AUSeqNum; ch->stream_state = 0; } /*carousel RAP*/ else if (AUSeqNum == ch->au_sn) { /*error recovery*/ if (ch->stream_state==2) { GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: RAP Carousel found (TS %d) - recovering\n", ch->esd->ESID, ch->CTS)); ch->stream_state = 0; } else { ch->skip_carousel_au = 1; GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: RAP Carousel found (TS %d) - skipping\n", ch->esd->ESID, ch->CTS)); return; } } /*regular RAP*/ else { if (ch->stream_state==2) { GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: RAP Carousel found (TS %d) - recovering from previous errors\n", ch->esd->ESID, ch->CTS)); } ch->au_sn = AUSeqNum; ch->stream_state = 0; } } /*regular AU but waiting for RAP*/ else if (ch->stream_state) { #if 0 ch->skip_carousel_au = 1; GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: Waiting for RAP Carousel - skipping\n", ch->esd->ESID)); return; #else GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: Tuning in before RAP\n", ch->esd->ESID)); #endif } /*previous packet(s) loss: check for critical or non-critical AUs*/ else if (reception_status == GF_REMOTE_SERVICE_ERROR) { if (ch->au_sn == AUSeqNum) { GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: Lost a non critical packet\n", ch->esd->ESID)); } /*Packet lost are critical*/ else { ch->stream_state = 2; GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: Lost a critical packet - skipping\n", ch->esd->ESID)); return; } } else { ch->au_sn = AUSeqNum; GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: NON-RAP AU received (TS %d)\n", ch->esd->ESID, ch->DTS)); } } } /*no carousel signaling, tune-in at first RAP*/ else if (hdr.randomAccessPointFlag) { ch->stream_state = 0; GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: RAP AU received\n", ch->esd->ESID)); } /*waiting for RAP, return*/ else if (ch->stream_state) { GF_LOG(GF_LOG_INFO, GF_LOG_SYNC, ("[SyncLayer] ES%d: Waiting for RAP - skipping AU (DTS %d)\n", ch->esd->ESID, ch->DTS)); return; } } /*update the RAP marker on a packet base (to cope with AVC/H264 NALU->AU reconstruction)*/ if (hdr.randomAccessPointFlag) ch->IsRap = 1; /*get AU end state*/ OldLength = ch->buffer ? ch->len : 0; EndAU = hdr.accessUnitEndFlag; if (ch->AULength == OldLength + payload_size) EndAU = 1; if (EndAU) ch->NextIsAUStart = 1; if (EndAU && !ch->IsClockInit) gf_es_check_timing(ch); /* we need to skip all the packets of the current AU in the carousel scenario */ if (ch->skip_carousel_au == 1) return; if (!payload_size && EndAU && ch->buffer) { GF_LOG(GF_LOG_DEBUG, GF_LOG_SYNC, ("[SyncLayer] ES%d: Empty packet, flushing buffer\n", ch->esd->ESID)); Channel_DispatchAU(ch, 0); return; } if (!payload_size) return; /*missed begining, unusable*/ if (!ch->buffer && !NewAU) { if (ch->esd->slConfig->useAccessUnitStartFlag) { GF_LOG(GF_LOG_ERROR, GF_LOG_SYNC, ("[SyncLayer] ES%d: missed begin of AU\n", ch->esd->ESID)); } if (ch->codec_resilient) NewAU = 1; else return; } /*Write the Packet payload to the buffer*/ if (NewAU) { /*we should NEVER have a bitstream at this stage*/ assert(!ch->buffer); /*ignore length fields*/ size = payload_size + ch->media_padding_bytes; ch->buffer = (char*)gf_malloc(sizeof(char) * size); if (!ch->buffer) { assert(0); return; } ch->allocSize = size; memset(ch->buffer, 0, sizeof(char) * size); ch->len = 0; } if (!ch->esd->slConfig->usePaddingFlag) hdr.paddingFlag = 0; if (ch->ipmp_tool) { GF_Err e; GF_IPMPEvent evt; memset(&evt, 0, sizeof(evt)); evt.event_type=GF_IPMP_TOOL_PROCESS_DATA; evt.channel = ch; evt.data = payload; evt.data_size = payload_size; evt.is_encrypted = hdr.isma_encrypted; evt.isma_BSO = hdr.isma_BSO; e = ch->ipmp_tool->process(ch->ipmp_tool, &evt); /*we discard undecrypted AU*/ if (e) { if (e==GF_EOS) { gf_es_on_eos(ch); /*restart*/ if (evt.restart_requested) { if (ch->odm->parentscene->is_dynamic_scene) { gf_scene_restart_dynamic(ch->odm->parentscene, 0); } else { mediacontrol_restart(ch->odm); } } } return; } } if (hdr.paddingFlag && !EndAU) { /*to do - this shouldn't happen anyway */ } else { /*check if enough space*/ size = ch->allocSize; if (size && (payload_size + ch->len <= size)) { memcpy(ch->buffer+ch->len, payload, payload_size); ch->len += payload_size; } else { size = payload_size + ch->len + ch->media_padding_bytes; ch->buffer = (char*)gf_realloc(ch->buffer, sizeof(char) * size); memcpy(ch->buffer+ch->len, payload, payload_size); ch->allocSize = size; ch->len += payload_size; } if (hdr.paddingFlag) ch->padingBits = hdr.paddingBits; } if (EndAU) Channel_DispatchAU(ch, hdr.au_duration); }
static void SAF_NetIO(void *cbk, GF_NETIO_Parameter *param) { GF_Err e; Bool is_rap, go; SAFChannel *ch; u32 cts, au_sn, au_size, type, i, stream_id; u64 bs_pos; GF_BitStream *bs; GF_SLHeader sl_hdr; SAFIn *read = (SAFIn *) cbk; e = param->error; /*done*/ if (param->msg_type==GF_NETIO_DATA_TRANSFERED) { if (read->stream && (read->saf_type==SAF_FILE_REMOTE)) read->saf_type = SAF_FILE_LOCAL; return; } else { /*handle service message*/ gf_service_download_update_stats(read->dnload); if (param->msg_type!=GF_NETIO_DATA_EXCHANGE) { if (e<0) { if (read->needs_connection) { read->needs_connection = 0; gf_service_connect_ack(read->service, NULL, e); } return; } if (read->needs_connection) { u32 total_size; gf_dm_sess_get_stats(read->dnload, NULL, NULL, &total_size, NULL, NULL, NULL); if (!total_size) read->saf_type = SAF_LIVE_STREAM; } return; } } if (!param->size) return; if (!read->run_state) return; if (read->alloc_size < read->saf_size + param->size) { read->saf_data = (char*)gf_realloc(read->saf_data, sizeof(char)*(read->saf_size + param->size) ); read->alloc_size = read->saf_size + param->size; } memcpy(read->saf_data + read->saf_size, param->data, sizeof(char)*param->size); read->saf_size += param->size; /*first AU not complete yet*/ if (read->saf_size<10) return; bs = gf_bs_new(read->saf_data, read->saf_size, GF_BITSTREAM_READ); bs_pos = 0; go = 1; while (go) { u64 avail = gf_bs_available(bs); bs_pos = gf_bs_get_position(bs); if (avail<10) break; is_rap = gf_bs_read_int(bs, 1); au_sn = gf_bs_read_int(bs, 15); gf_bs_read_int(bs, 2); cts = gf_bs_read_int(bs, 30); au_size = gf_bs_read_int(bs, 16); avail-=8; if (au_size > avail) break; assert(au_size>=2); is_rap = 1; type = gf_bs_read_int(bs, 4); stream_id = gf_bs_read_int(bs, 12); au_size -= 2; ch = saf_get_channel(read, stream_id, NULL); switch (type) { case 1: case 2: case 7: if (ch) { gf_bs_skip_bytes(bs, au_size); } else { SAFChannel *first = (SAFChannel *)gf_list_get(read->channels, 0); GF_SAFEALLOC(ch, SAFChannel); ch->stream_id = stream_id; ch->esd = gf_odf_desc_esd_new(0); ch->esd->ESID = stream_id; ch->esd->OCRESID = first ? first->stream_id : stream_id; ch->esd->slConfig->useRandomAccessPointFlag = 1; ch->esd->slConfig->AUSeqNumLength = 0; ch->esd->decoderConfig->objectTypeIndication = gf_bs_read_u8(bs); ch->esd->decoderConfig->streamType = gf_bs_read_u8(bs); ch->ts_res = ch->esd->slConfig->timestampResolution = gf_bs_read_u24(bs); ch->esd->decoderConfig->bufferSizeDB = gf_bs_read_u16(bs); au_size -= 7; if ((ch->esd->decoderConfig->objectTypeIndication == 0xFF) && (ch->esd->decoderConfig->streamType == 0xFF) ) { u16 mimeLen = gf_bs_read_u16(bs); gf_bs_skip_bytes(bs, mimeLen); au_size -= mimeLen+2; } if (type==7) { u16 urlLen = gf_bs_read_u16(bs); ch->esd->URLString = (char*)gf_malloc(sizeof(char)*(urlLen+1)); gf_bs_read_data(bs, ch->esd->URLString, urlLen); ch->esd->URLString[urlLen] = 0; au_size -= urlLen+2; } if (au_size) { ch->esd->decoderConfig->decoderSpecificInfo->dataLength = au_size; ch->esd->decoderConfig->decoderSpecificInfo->data = (char*)gf_malloc(sizeof(char)*au_size); gf_bs_read_data(bs, ch->esd->decoderConfig->decoderSpecificInfo->data, au_size); } if (ch->esd->decoderConfig->streamType==4) ch->buffer_min=100; else if (ch->esd->decoderConfig->streamType==5) ch->buffer_min=400; else ch->buffer_min=0; if (read->needs_connection && (ch->esd->decoderConfig->streamType==GF_STREAM_SCENE)) { gf_list_add(read->channels, ch); read->needs_connection = 0; gf_service_connect_ack(read->service, NULL, GF_OK); } else if (read->needs_connection) { gf_odf_desc_del((GF_Descriptor *) ch->esd); gf_free(ch); ch = NULL; } else { GF_ObjectDescriptor *od; gf_list_add(read->channels, ch); od = (GF_ObjectDescriptor*)gf_odf_desc_new(GF_ODF_OD_TAG); gf_list_add(od->ESDescriptors, ch->esd); ch->esd = NULL; od->objectDescriptorID = ch->stream_id; gf_service_declare_media(read->service, (GF_Descriptor*)od, 0); } } break; case 4: if (ch) { bs_pos = gf_bs_get_position(bs); memset(&sl_hdr, 0, sizeof(GF_SLHeader)); sl_hdr.accessUnitLength = au_size; sl_hdr.AU_sequenceNumber = au_sn; sl_hdr.compositionTimeStampFlag = 1; sl_hdr.compositionTimeStamp = cts; sl_hdr.randomAccessPointFlag = is_rap; if (read->start_range && (read->start_range*ch->ts_res>cts*1000)) { sl_hdr.compositionTimeStamp = read->start_range*ch->ts_res/1000; } gf_service_send_packet(read->service, ch->ch, read->saf_data+bs_pos, au_size, &sl_hdr, GF_OK); } gf_bs_skip_bytes(bs, au_size); break; case 3: if (ch) gf_service_send_packet(read->service, ch->ch, NULL, 0, NULL, GF_EOS); break; case 5: go = 0; read->run_state = 0; i=0; while ((ch = (SAFChannel *)gf_list_enum(read->channels, &i))) { gf_service_send_packet(read->service, ch->ch, NULL, 0, NULL, GF_EOS); } break; } } gf_bs_del(bs); if (bs_pos) { u32 remain = (u32) (read->saf_size - bs_pos); if (remain) memmove(read->saf_data, read->saf_data+bs_pos, sizeof(char)*remain); read->saf_size = remain; } SAF_Regulate(read); }
int live_session(int argc, char **argv) { GF_Err e; u32 i; char *filename = NULL; char *dst = NULL; char *ifce_addr = NULL; char *sdp_name = "session.sdp"; u16 dst_port = 7000; u32 load_type=0; u32 check; u32 ttl = 1; u32 path_mtu = 1450; s32 next_time; u64 last_src_modif, mod_time; char *src_name = NULL; Bool run, has_carousel, no_rap; Bool udp = 0; u16 sk_port=0; GF_Socket *sk = NULL; LiveSession livesess; RTPChannel *ch; char *update_buffer = NULL; u32 update_buffer_size = 0; u16 aggregate_on_stream; Bool adjust_carousel_time, force_rap, aggregate_au, discard_pending, signal_rap, version_inc; Bool update_context; u32 period, ts_delta, signal_critical; u16 es_id; e = GF_OK; aggregate_au = 1; es_id = 0; no_rap = 0; gf_sys_init(GF_FALSE); memset(&livesess, 0, sizeof(LiveSession)); gf_log_set_tool_level(GF_LOG_ALL, GF_LOG_INFO); for (i=1; i<(u32) argc; i++) { char *arg = argv[i]; if (arg[0] != '-') filename = arg; else if (!strnicmp(arg, "-dst=", 5)) dst = arg+5; else if (!strnicmp(arg, "-port=", 6)) dst_port = atoi(arg+6); else if (!strnicmp(arg, "-sdp=", 5)) sdp_name = arg+5; else if (!strnicmp(arg, "-mtu=", 5)) path_mtu = atoi(arg+5); else if (!strnicmp(arg, "-ttl=", 5)) ttl = atoi(arg+5); else if (!strnicmp(arg, "-ifce=", 6)) ifce_addr = arg+6; else if (!strnicmp(arg, "-no-rap", 7)) no_rap = 1; else if (!strnicmp(arg, "-dims", 5)) load_type = GF_SM_LOAD_DIMS; else if (!strnicmp(arg, "-src=", 5)) src_name = arg+5; else if (!strnicmp(arg, "-udp=", 5)) { sk_port = atoi(arg+5); udp = 1; } else if (!strnicmp(arg, "-tcp=", 5)) { sk_port = atoi(arg+5); udp = 0; } } if (!filename) { fprintf(stderr, "Missing filename\n"); PrintLiveUsage(); return 1; } if (dst_port && dst) livesess.streams = gf_list_new(); livesess.seng = gf_seng_init(&livesess, filename, load_type, NULL, (load_type == GF_SM_LOAD_DIMS) ? 1 : 0); if (!livesess.seng) { fprintf(stderr, "Cannot create scene engine\n"); return 1; } if (livesess.streams) live_session_setup(&livesess, dst, dst_port, path_mtu, ttl, ifce_addr, sdp_name); has_carousel = 0; last_src_modif = src_name ? gf_file_modification_time(src_name) : 0; if (sk_port) { sk = gf_sk_new(udp ? GF_SOCK_TYPE_UDP : GF_SOCK_TYPE_TCP); if (udp) { e = gf_sk_bind(sk, NULL, sk_port, NULL, 0, 0); if (e != GF_OK) { if (sk) gf_sk_del(sk); sk = NULL; } } else { } } for (i=0; i<(u32) argc; i++) { char *arg = argv[i]; if (!strnicmp(arg, "-rap=", 5)) { u32 period, id, j; RTPChannel *ch; period = id = 0; if (strchr(arg, ':')) { sscanf(arg, "-rap=ESID=%u:%u", &id, &period); e = gf_seng_enable_aggregation(livesess.seng, id, 1); if (e) { fprintf(stderr, "Cannot enable aggregation on stream %u: %s\n", id, gf_error_to_string(e)); goto exit; } } else { sscanf(arg, "-rap=%u", &period); } j=0; while (NULL != (ch = gf_list_enum(livesess.streams, &j))) { if (!id || (ch->ESID==id)) ch->carousel_period = period; } has_carousel = 1; } } i=0; while (NULL != (ch = gf_list_enum(livesess.streams, &i))) { if (ch->carousel_period) { has_carousel = 1; break; } } update_context = 0; if (has_carousel || !no_rap) { livesess.carousel_generation = 1; gf_seng_encode_context(livesess.seng, live_session_callback); livesess.carousel_generation = 0; } live_session_send_carousel(&livesess, NULL); check = 10; run = 1; while (run) { check--; if (!check) { check = 10; if (gf_prompt_has_input()) { char c = gf_prompt_get_char(); switch (c) { case 'q': run=0; break; case 'U': livesess.critical = 1; case 'u': { GF_Err e; char szCom[8192]; fprintf(stderr, "Enter command to send:\n"); szCom[0] = 0; if (1 > scanf("%[^\t\n]", szCom)) { fprintf(stderr, "No command entered properly, aborting.\n"); break; } /*stdin flush bug*/ while (getchar()!='\n') {} e = gf_seng_encode_from_string(livesess.seng, 0, 0, szCom, live_session_callback); if (e) fprintf(stderr, "Processing command failed: %s\n", gf_error_to_string(e)); e = gf_seng_aggregate_context(livesess.seng, 0); livesess.critical = 0; update_context = 1; } break; case 'E': livesess.critical = 1; case 'e': { GF_Err e; char szCom[8192]; fprintf(stderr, "Enter command to send:\n"); szCom[0] = 0; if (1 > scanf("%[^\t\n]", szCom)) { printf("No command entered properly, aborting.\n"); break; } /*stdin flush bug*/ while (getchar()!='\n') {} e = gf_seng_encode_from_string(livesess.seng, 0, 1, szCom, live_session_callback); if (e) fprintf(stderr, "Processing command failed: %s\n", gf_error_to_string(e)); livesess.critical = 0; e = gf_seng_aggregate_context(livesess.seng, 0); } break; case 'p': { char rad[GF_MAX_PATH]; fprintf(stderr, "Enter output file name - \"std\" for stderr: "); if (1 > scanf("%s", rad)) { fprintf(stderr, "No ouput file name entered, aborting.\n"); break; } e = gf_seng_save_context(livesess.seng, !strcmp(rad, "std") ? NULL : rad); fprintf(stderr, "Dump done (%s)\n", gf_error_to_string(e)); } break; case 'F': update_context = 1; case 'f': livesess.force_carousel = 1; break; } e = GF_OK; } } /*process updates from file source*/ if (src_name) { mod_time = gf_file_modification_time(src_name); if (mod_time != last_src_modif) { FILE *srcf; char flag_buf[201], *flag; fprintf(stderr, "Update file modified - processing\n"); last_src_modif = mod_time; srcf = gf_fopen(src_name, "rt"); if (!srcf) continue; /*checks if we have a broadcast config*/ if (!fgets(flag_buf, 200, srcf)) flag_buf[0] = '\0'; gf_fclose(srcf); aggregate_on_stream = (u16) -1; adjust_carousel_time = force_rap = discard_pending = signal_rap = signal_critical = 0; aggregate_au = version_inc = 1; period = -1; ts_delta = 0; es_id = 0; /*find our keyword*/ flag = strstr(flag_buf, "gpac_broadcast_config "); if (flag) { flag += strlen("gpac_broadcast_config "); /*move to next word*/ while (flag && (flag[0]==' ')) flag++; while (1) { char *sep = strchr(flag, ' '); if (sep) sep[0] = 0; if (!strnicmp(flag, "esid=", 5)) es_id = atoi(flag+5); else if (!strnicmp(flag, "period=", 7)) period = atoi(flag+7); else if (!strnicmp(flag, "ts=", 3)) ts_delta = atoi(flag+3); else if (!strnicmp(flag, "carousel=", 9)) aggregate_on_stream = atoi(flag+9); else if (!strnicmp(flag, "restamp=", 8)) adjust_carousel_time = atoi(flag+8); else if (!strnicmp(flag, "discard=", 8)) discard_pending = atoi(flag+8); else if (!strnicmp(flag, "aggregate=", 10)) aggregate_au = atoi(flag+10); else if (!strnicmp(flag, "force_rap=", 10)) force_rap = atoi(flag+10); else if (!strnicmp(flag, "rap=", 4)) signal_rap = atoi(flag+4); else if (!strnicmp(flag, "critical=", 9)) signal_critical = atoi(flag+9); else if (!strnicmp(flag, "vers_inc=", 9)) version_inc = atoi(flag+9); if (sep) { sep[0] = ' '; flag = sep+1; } else { break; } } set_broadcast_params(&livesess, es_id, period, ts_delta, aggregate_on_stream, adjust_carousel_time, force_rap, aggregate_au, discard_pending, signal_rap, signal_critical, version_inc); } e = gf_seng_encode_from_file(livesess.seng, es_id, aggregate_au ? 0 : 1, src_name, live_session_callback); if (e) fprintf(stderr, "Processing command failed: %s\n", gf_error_to_string(e)); e = gf_seng_aggregate_context(livesess.seng, 0); update_context = no_rap ? 0 : 1; } } /*process updates from socket source*/ if (sk) { char buffer[2049]; u32 bytes_read; u32 update_length; u32 bytes_received; e = gf_sk_receive(sk, buffer, 2048, 0, &bytes_read); if (e == GF_OK) { u32 hdr_length = 0; u8 cmd_type = buffer[0]; bytes_received = 0; switch (cmd_type) { case 0: { GF_BitStream *bs = gf_bs_new(buffer, bytes_read, GF_BITSTREAM_READ); gf_bs_read_u8(bs); es_id = gf_bs_read_u16(bs); aggregate_on_stream = gf_bs_read_u16(bs); if (aggregate_on_stream==0xFFFF) aggregate_on_stream = -1; adjust_carousel_time = gf_bs_read_int(bs, 1); force_rap = gf_bs_read_int(bs, 1); aggregate_au = gf_bs_read_int(bs, 1); discard_pending = gf_bs_read_int(bs, 1); signal_rap = gf_bs_read_int(bs, 1); signal_critical = gf_bs_read_int(bs, 1); version_inc = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 1); period = gf_bs_read_u16(bs); if (period==0xFFFF) period = -1; ts_delta = gf_bs_read_u16(bs); update_length = gf_bs_read_u32(bs); hdr_length = 12; gf_bs_del(bs); } set_broadcast_params(&livesess, es_id, period, ts_delta, aggregate_on_stream, adjust_carousel_time, force_rap, aggregate_au, discard_pending, signal_rap, signal_critical, version_inc); break; default: update_length = 0; break; } if (update_buffer_size <= update_length) { update_buffer = gf_realloc(update_buffer, update_length+1); update_buffer_size = update_length+1; } if (update_length && (bytes_read>hdr_length) ) { memcpy(update_buffer, buffer+hdr_length, bytes_read-hdr_length); bytes_received = bytes_read-hdr_length; } while (bytes_received<update_length) { e = gf_sk_receive(sk, buffer, 2048, 0, &bytes_read); switch (e) { case GF_IP_NETWORK_EMPTY: break; case GF_OK: memcpy(update_buffer+bytes_received, buffer, bytes_read); bytes_received += bytes_read; break; default: fprintf(stderr, "Error with UDP socket : %s\n", gf_error_to_string(e)); break; } } update_buffer[update_length] = 0; if (update_length) { e = gf_seng_encode_from_string(livesess.seng, es_id, aggregate_au ? 0 : 1, update_buffer, live_session_callback); if (e) fprintf(stderr, "Processing command failed: %s\n", gf_error_to_string(e)); e = gf_seng_aggregate_context(livesess.seng, 0); update_context = 1; } } } if (update_context) { livesess.carousel_generation=1; e = gf_seng_encode_context(livesess.seng, live_session_callback ); livesess.carousel_generation=0; update_context = 0; } if (livesess.force_carousel) { live_session_send_carousel(&livesess, NULL); livesess.force_carousel = 0; continue; } if (!has_carousel) { gf_sleep(10); continue; } ch = next_carousel(&livesess, (u32 *) &next_time); if ((ch==NULL) || (next_time > 20)) { gf_sleep(20); continue; } if (next_time) gf_sleep(next_time); live_session_send_carousel(&livesess, ch); } exit: live_session_shutdown(&livesess); if (update_buffer) gf_free(update_buffer); if (sk) gf_sk_del(sk); gf_sys_close(); return e ? 1 : 0; }
void m2ts_net_io(void *cbk, GF_NETIO_Parameter *param) { GF_Err e; M2TSIn *m2ts = (M2TSIn *) cbk; assert( m2ts ); /*handle service message*/ gf_term_download_update_stats(m2ts->ts->dnload); if (param->msg_type==GF_NETIO_DATA_TRANSFERED) { e = GF_EOS; } else if (param->msg_type==GF_NETIO_DATA_EXCHANGE) { e = GF_OK; assert( m2ts->ts); if (param->size > 0){ /*process chunk*/ assert(param->data); if (m2ts->network_buffer_size < param->size){ m2ts->network_buffer = gf_realloc(m2ts->network_buffer, sizeof(char) * param->size); m2ts->network_buffer_size = param->size; } assert( m2ts->network_buffer ); memcpy(m2ts->network_buffer, param->data, param->size); gf_m2ts_process_data(m2ts->ts, m2ts->network_buffer, param->size); } /*if asked to regulate, wait until we get a play request*/ if (m2ts->ts->run_state && !m2ts->ts->nb_playing && (m2ts->ts->file_regulate==1)) { while (m2ts->ts->run_state && !m2ts->ts->nb_playing && (m2ts->ts->file_regulate==1) ) { gf_sleep(50); continue; } } else { gf_sleep(1); } #if 1 //see commit 3642: crashes when reload quickly with http if (!m2ts->ts->run_state) { if (m2ts->ts->dnload) gf_term_download_del( m2ts->ts->dnload ); m2ts->ts->dnload = NULL; } #endif } else { e = param->error; } switch (e){ case GF_EOS: if (!m2ts->is_connected) { gf_term_on_connect(m2ts->service, NULL, GF_OK); } return; case GF_OK: return; default: if (!m2ts->ts_setup) { m2ts->ts_setup = 1; } GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER,("[MPEGTSIn] : Error while getting data : %s\n", gf_error_to_string(e))); gf_term_on_connect(m2ts->service, NULL, e); } }
/** * A function which takes FFmpeg H265 extradata (SPS/PPS) and bring them ready to be pushed to the MP4 muxer. * @param extradata * @param extradata_size * @param dstcfg * @returns GF_OK is the extradata was parsed and is valid, other values otherwise. */ static GF_Err hevc_import_ffextradata(const u8 *extradata, const u64 extradata_size, GF_HEVCConfig *dst_cfg) { #ifdef GPAC_DISABLE_AV_PARSERS return GF_OK; #else HEVCState hevc; GF_HEVCParamArray *vpss = NULL, *spss = NULL, *ppss = NULL; GF_BitStream *bs; char *buffer = NULL; u32 buffer_size = 0; if (!extradata || (extradata_size < sizeof(u32))) return GF_BAD_PARAM; bs = gf_bs_new(extradata, extradata_size, GF_BITSTREAM_READ); if (!bs) return GF_BAD_PARAM; memset(&hevc, 0, sizeof(HEVCState)); hevc.sps_active_idx = -1; while (gf_bs_available(bs)) { s32 idx; GF_AVCConfigSlot *slc; u8 nal_unit_type, temporal_id, layer_id; u64 nal_start; u32 nal_size; if (gf_bs_read_u32(bs) != 0x00000001) { gf_bs_del(bs); return GF_BAD_PARAM; } nal_start = gf_bs_get_position(bs); nal_size = gf_media_nalu_next_start_code_bs(bs); if (nal_start + nal_size > extradata_size) { gf_bs_del(bs); return GF_BAD_PARAM; } if (nal_size > buffer_size) { buffer = (char*)gf_realloc(buffer, nal_size); buffer_size = nal_size; } gf_bs_read_data(bs, buffer, nal_size); gf_bs_seek(bs, nal_start); gf_media_hevc_parse_nalu(bs, &hevc, &nal_unit_type, &temporal_id, &layer_id); if (layer_id) { gf_bs_del(bs); gf_free(buffer); return GF_BAD_PARAM; } switch (nal_unit_type) { case GF_HEVC_NALU_VID_PARAM: idx = gf_media_hevc_read_vps(buffer, nal_size , &hevc); if (idx < 0) { gf_bs_del(bs); gf_free(buffer); return GF_BAD_PARAM; } assert(hevc.vps[idx].state == 1); //we don't expect multiple VPS if (hevc.vps[idx].state == 1) { hevc.vps[idx].state = 2; hevc.vps[idx].crc = gf_crc_32(buffer, nal_size); dst_cfg->avgFrameRate = hevc.vps[idx].rates[0].avg_pic_rate; dst_cfg->constantFrameRate = hevc.vps[idx].rates[0].constand_pic_rate_idc; dst_cfg->numTemporalLayers = hevc.vps[idx].max_sub_layers; dst_cfg->temporalIdNested = hevc.vps[idx].temporal_id_nesting; if (!vpss) { GF_SAFEALLOC(vpss, GF_HEVCParamArray); vpss->nalus = gf_list_new(); gf_list_add(dst_cfg->param_array, vpss); vpss->array_completeness = 1; vpss->type = GF_HEVC_NALU_VID_PARAM; } slc = (GF_AVCConfigSlot*)gf_malloc(sizeof(GF_AVCConfigSlot)); slc->size = nal_size; slc->id = idx; slc->data = (char*)gf_malloc(sizeof(char)*slc->size); memcpy(slc->data, buffer, sizeof(char)*slc->size); gf_list_add(vpss->nalus, slc); } break; case GF_HEVC_NALU_SEQ_PARAM: idx = gf_media_hevc_read_sps(buffer, nal_size, &hevc); if (idx < 0) { gf_bs_del(bs); gf_free(buffer); return GF_BAD_PARAM; } assert(!(hevc.sps[idx].state & AVC_SPS_DECLARED)); //we don't expect multiple SPS if ((hevc.sps[idx].state & AVC_SPS_PARSED) && !(hevc.sps[idx].state & AVC_SPS_DECLARED)) { hevc.sps[idx].state |= AVC_SPS_DECLARED; hevc.sps[idx].crc = gf_crc_32(buffer, nal_size); } dst_cfg->configurationVersion = 1; dst_cfg->profile_space = hevc.sps[idx].ptl.profile_space; dst_cfg->tier_flag = hevc.sps[idx].ptl.tier_flag; dst_cfg->profile_idc = hevc.sps[idx].ptl.profile_idc; dst_cfg->general_profile_compatibility_flags = hevc.sps[idx].ptl.profile_compatibility_flag; dst_cfg->progressive_source_flag = hevc.sps[idx].ptl.general_progressive_source_flag; dst_cfg->interlaced_source_flag = hevc.sps[idx].ptl.general_interlaced_source_flag; dst_cfg->non_packed_constraint_flag = hevc.sps[idx].ptl.general_non_packed_constraint_flag; dst_cfg->frame_only_constraint_flag = hevc.sps[idx].ptl.general_frame_only_constraint_flag; dst_cfg->constraint_indicator_flags = hevc.sps[idx].ptl.general_reserved_44bits; dst_cfg->level_idc = hevc.sps[idx].ptl.level_idc; dst_cfg->chromaFormat = hevc.sps[idx].chroma_format_idc; dst_cfg->luma_bit_depth = hevc.sps[idx].bit_depth_luma; dst_cfg->chroma_bit_depth = hevc.sps[idx].bit_depth_chroma; if (!spss) { GF_SAFEALLOC(spss, GF_HEVCParamArray); spss->nalus = gf_list_new(); gf_list_add(dst_cfg->param_array, spss); spss->array_completeness = 1; spss->type = GF_HEVC_NALU_SEQ_PARAM; } slc = (GF_AVCConfigSlot*)gf_malloc(sizeof(GF_AVCConfigSlot)); slc->size = nal_size; slc->id = idx; slc->data = (char*)gf_malloc(sizeof(char)*slc->size); memcpy(slc->data, buffer, sizeof(char)*slc->size); gf_list_add(spss->nalus, slc); break; case GF_HEVC_NALU_PIC_PARAM: idx = gf_media_hevc_read_pps(buffer, nal_size, &hevc); if (idx < 0) { gf_bs_del(bs); gf_free(buffer); return GF_BAD_PARAM; } assert(hevc.pps[idx].state == 1); //we don't expect multiple PPS if (hevc.pps[idx].state == 1) { hevc.pps[idx].state = 2; hevc.pps[idx].crc = gf_crc_32(buffer, nal_size); if (!ppss) { GF_SAFEALLOC(ppss, GF_HEVCParamArray); ppss->nalus = gf_list_new(); gf_list_add(dst_cfg->param_array, ppss); ppss->array_completeness = 1; ppss->type = GF_HEVC_NALU_PIC_PARAM; } slc = (GF_AVCConfigSlot*)gf_malloc(sizeof(GF_AVCConfigSlot)); slc->size = nal_size; slc->id = idx; slc->data = (char*)gf_malloc(sizeof(char)*slc->size); memcpy(slc->data, buffer, sizeof(char)*slc->size); gf_list_add(ppss->nalus, slc); } break; default: break; } if (gf_bs_seek(bs, nal_start+nal_size)) { assert(nal_start+nal_size <= gf_bs_get_size(bs)); break; } } gf_bs_del(bs); gf_free(buffer); return GF_OK; #endif }
GF_Err LSR_UpdateESD(GF_LASeRSampleEntryBox *lsr, GF_ESD *esd) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)lsr, GF_TRUE); if (lsr->descr) gf_isom_box_del((GF_Box *) lsr->descr); lsr->descr = NULL; btrt->avgBitrate = esd->decoderConfig->avgBitrate; btrt->maxBitrate = esd->decoderConfig->maxBitrate; btrt->bufferSizeDB = esd->decoderConfig->bufferSizeDB; if (gf_list_count(esd->IPIDataSet) || gf_list_count(esd->IPMPDescriptorPointers) || esd->langDesc || gf_list_count(esd->extensionDescriptors) || esd->ipiPtr || esd->qos || esd->RegDescriptor) { lsr->descr = (GF_MPEG4ExtensionDescriptorsBox *)gf_isom_box_new(GF_ISOM_BOX_TYPE_M4DS); if (esd->RegDescriptor) { gf_list_add(lsr->descr->descriptors, esd->RegDescriptor); esd->RegDescriptor = NULL; } if (esd->qos) { gf_list_add(lsr->descr->descriptors, esd->qos); esd->qos = NULL; } if (esd->ipiPtr) { gf_list_add(lsr->descr->descriptors, esd->ipiPtr); esd->ipiPtr= NULL; } while (gf_list_count(esd->IPIDataSet)) { GF_Descriptor *desc = (GF_Descriptor *)gf_list_get(esd->IPIDataSet, 0); gf_list_rem(esd->IPIDataSet, 0); gf_list_add(lsr->descr->descriptors, desc); } while (gf_list_count(esd->IPMPDescriptorPointers)) { GF_Descriptor *desc = (GF_Descriptor *)gf_list_get(esd->IPMPDescriptorPointers, 0); gf_list_rem(esd->IPMPDescriptorPointers, 0); gf_list_add(lsr->descr->descriptors, desc); } if (esd->langDesc) { gf_list_add(lsr->descr->descriptors, esd->langDesc); esd->langDesc = NULL; } while (gf_list_count(esd->extensionDescriptors)) { GF_Descriptor *desc = (GF_Descriptor *)gf_list_get(esd->extensionDescriptors, 0); gf_list_rem(esd->extensionDescriptors, 0); gf_list_add(lsr->descr->descriptors, desc); } } /*update GF_AVCConfig*/ if (!lsr->lsr_config) lsr->lsr_config = (GF_LASERConfigurationBox *)gf_isom_box_new(GF_ISOM_BOX_TYPE_LSRC); if (esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) { lsr->lsr_config->hdr = gf_realloc(lsr->lsr_config->hdr, sizeof(char) * esd->decoderConfig->decoderSpecificInfo->dataLength); lsr->lsr_config->hdr_size = esd->decoderConfig->decoderSpecificInfo->dataLength; memcpy(lsr->lsr_config->hdr, esd->decoderConfig->decoderSpecificInfo->data, sizeof(char)*esd->decoderConfig->decoderSpecificInfo->dataLength); } gf_odf_desc_del((GF_Descriptor *)esd); return GF_OK; }
/* Rewrite mode: * mode = 0: playback * mode = 1: streaming */ GF_Err gf_isom_nalu_sample_rewrite(GF_MediaBox *mdia, GF_ISOSample *sample, u32 sampleNumber, GF_MPEGVisualSampleEntryBox *entry) { Bool is_hevc = 0; GF_Err e = GF_OK; GF_ISOSample *ref_samp; GF_BitStream *src_bs, *ref_bs, *dst_bs; u64 offset; u32 ref_nalu_size, data_offset, data_length, copy_size, nal_size, max_size, di, nal_unit_size_field, cur_extract_mode, extractor_mode; Bool rewrite_ps, rewrite_start_codes; u8 ref_track_ID, ref_track_num; s8 sample_offset, nal_type; u32 nal_hdr; char *buffer; GF_ISOFile *file = mdia->mediaTrack->moov->mov; src_bs = ref_bs = dst_bs = NULL; ref_samp = NULL; buffer = NULL; rewrite_ps = (mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_INBAND_PS_FLAG) ? 1 : 0; if (! sample->IsRAP) rewrite_ps = 0; rewrite_start_codes = (mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_ANNEXB_FLAG) ? 1 : 0; extractor_mode = mdia->mediaTrack->extractor_mode&0x0000FFFF; if (extractor_mode == GF_ISOM_NALU_EXTRACT_INSPECT) { if (!rewrite_ps && !rewrite_start_codes) return GF_OK; } if (!entry) return GF_BAD_PARAM; nal_unit_size_field = 0; /*if svc rewrire*/ if (entry->svc_config && entry->svc_config->config) nal_unit_size_field = entry->svc_config->config->nal_unit_size; /*if mvc rewrire*/ /*otherwise do nothing*/ else if (!rewrite_ps && !rewrite_start_codes) { return GF_OK; } if (!nal_unit_size_field) { if (entry->avc_config) nal_unit_size_field = entry->avc_config->config->nal_unit_size; else if (entry->hevc_config) { nal_unit_size_field = entry->hevc_config->config->nal_unit_size; is_hevc = 1; } } if (!nal_unit_size_field) return GF_ISOM_INVALID_FILE; dst_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); src_bs = gf_bs_new(sample->data, sample->dataLength, GF_BITSTREAM_READ); max_size = 4096; /*rewrite start code with NALU delim*/ if (rewrite_start_codes) { gf_bs_write_int(dst_bs, 1, 32); if (is_hevc) { gf_bs_write_int(dst_bs, 0, 1); gf_bs_write_int(dst_bs, GF_HEVC_NALU_ACCESS_UNIT, 6); gf_bs_write_int(dst_bs, 0, 9); /*pic-type - by default we signal all slice types possible*/ gf_bs_write_int(dst_bs, 2, 3); gf_bs_write_int(dst_bs, 0, 5); } else { gf_bs_write_int(dst_bs, (sample->data[0] & 0x60) | GF_AVC_NALU_ACCESS_UNIT, 8); gf_bs_write_int(dst_bs, 0xF0 , 8); /*7 "all supported NALUs" (=111) + rbsp trailing (10000)*/; } } if (rewrite_ps) { if (is_hevc) { u32 i, count; count = gf_list_count(entry->hevc_config->config->param_array); for (i=0; i<count; i++) { GF_HEVCParamArray *ar = gf_list_get(entry->hevc_config->config->param_array, i); rewrite_nalus_list(ar->nalus, dst_bs, rewrite_start_codes, nal_unit_size_field); } /*little optimization if we are not asked to start codes: copy over the sample*/ if (!rewrite_start_codes) { gf_bs_write_data(dst_bs, sample->data, sample->dataLength); gf_free(sample->data); sample->data = NULL; gf_bs_get_content(dst_bs, &sample->data, &sample->dataLength); gf_bs_del(src_bs); gf_bs_del(dst_bs); return GF_OK; } } else { /*this is an SVC track: get all SPS/PPS from this track down to the base layer and rewrite them*/ if (mdia->mediaTrack->has_base_layer) { u32 j; GF_List *nalu_sps = gf_list_new(); GF_List *nalu_pps = gf_list_new(); GF_TrackReferenceTypeBox *dpnd = NULL; Track_FindRef(mdia->mediaTrack, GF_ISOM_REF_SCAL, &dpnd); #if 0 /*get all upper layers with SCAL reference to this track*/ for (j = 0; j < gf_isom_get_track_count(file); j++) { if (gf_isom_has_track_reference(file, j+1, GF_ISOM_REF_SCAL, mdia->mediaTrack->Header->trackID)) { u32 tkID; GF_TrackBox *base_track; GF_MPEGVisualSampleEntryBox *base_entry; gf_isom_get_reference_ID(file, j+1, GF_ISOM_REF_SCAL, 1, &tkID); base_track = GetTrackbyID(mdia->mediaTrack->moov, tkID); base_entry = base_track ? gf_list_get(base_track->Media->information->sampleTable->SampleDescription->other_boxes, 0) : NULL; if (base_entry) merge_nalus(base_entry, nalu_sps, nalu_pps); } } #endif merge_nalus(entry, nalu_sps, nalu_pps); if (dpnd) { for (j=0; j<dpnd->trackIDCount; j++) { GF_TrackBox *base_track = GetTrackbyID(mdia->mediaTrack->moov, dpnd->trackIDs[j]); GF_MPEGVisualSampleEntryBox *base_entry = base_track ? gf_list_get(base_track->Media->information->sampleTable->SampleDescription->other_boxes, 0) : NULL; if (base_entry) merge_nalus(base_entry, nalu_sps, nalu_pps); } } //rewrite nalus rewrite_nalus_list(nalu_sps, dst_bs, rewrite_start_codes, nal_unit_size_field); rewrite_nalus_list(nalu_pps, dst_bs, rewrite_start_codes, nal_unit_size_field); gf_list_del(nalu_sps); gf_list_del(nalu_pps); } else { if (entry->avc_config) { rewrite_nalus_list(entry->avc_config->config->sequenceParameterSets, dst_bs, rewrite_start_codes, nal_unit_size_field); rewrite_nalus_list(entry->avc_config->config->pictureParameterSets, dst_bs, rewrite_start_codes, nal_unit_size_field); rewrite_nalus_list(entry->avc_config->config->sequenceParameterSetExtensions, dst_bs, rewrite_start_codes, nal_unit_size_field); } /*add svc config */ if (entry->svc_config) { rewrite_nalus_list(entry->svc_config->config->sequenceParameterSets, dst_bs, rewrite_start_codes, nal_unit_size_field); rewrite_nalus_list(entry->svc_config->config->pictureParameterSets, dst_bs, rewrite_start_codes, nal_unit_size_field); } /*little optimization if we are not asked to rewrite extractors or start codes: copy over the sample*/ if (!entry->svc_config && !rewrite_start_codes) { gf_bs_write_data(dst_bs, sample->data, sample->dataLength); gf_free(sample->data); sample->data = NULL; gf_bs_get_content(dst_bs, &sample->data, &sample->dataLength); gf_bs_del(src_bs); gf_bs_del(dst_bs); return GF_OK; } } } } buffer = (char *)gf_malloc(sizeof(char)*max_size); while (gf_bs_available(src_bs)) { nal_size = gf_bs_read_int(src_bs, 8*nal_unit_size_field); if (nal_size>max_size) { buffer = (char*) gf_realloc(buffer, sizeof(char)*nal_size); max_size = nal_size; } if (is_hevc) { nal_hdr = gf_bs_read_u16(src_bs); nal_type = (nal_hdr&0x7E00) >> 9; } else { nal_hdr = gf_bs_read_u8(src_bs); nal_type = nal_hdr & 0x1F; } if (is_hevc) { /*we already wrote this stuff*/ if (nal_type==GF_HEVC_NALU_ACCESS_UNIT) { gf_bs_skip_bytes(src_bs, nal_size-2); continue; } /*rewrite nal*/ gf_bs_read_data(src_bs, buffer, nal_size-2); if (rewrite_start_codes) gf_bs_write_u32(dst_bs, 1); else gf_bs_write_int(dst_bs, nal_size, 8*nal_unit_size_field); gf_bs_write_u16(dst_bs, nal_hdr); gf_bs_write_data(dst_bs, buffer, nal_size-2); continue; } /*we already wrote this stuff*/ if (nal_type==GF_AVC_NALU_ACCESS_UNIT) { gf_bs_skip_bytes(src_bs, nal_size-1); continue; } //extractor if (nal_type == 31) { switch (extractor_mode) { case 0: gf_bs_read_int(src_bs, 24); //3 bytes of NALUHeader in extractor ref_track_ID = gf_bs_read_u8(src_bs); sample_offset = (s8) gf_bs_read_int(src_bs, 8); data_offset = gf_bs_read_u32(src_bs); data_length = gf_bs_read_u32(src_bs); ref_track_num = gf_isom_get_track_by_id(file, ref_track_ID); if (!ref_track_num) { e = GF_BAD_PARAM; goto exit; } cur_extract_mode = gf_isom_get_nalu_extract_mode(file, ref_track_num); gf_isom_set_nalu_extract_mode(file, ref_track_num, GF_ISOM_NALU_EXTRACT_INSPECT); ref_samp = gf_isom_get_sample(file, ref_track_num, sampleNumber+sample_offset, &di); if (!ref_samp) { e = GF_IO_ERR; goto exit; } ref_bs = gf_bs_new(ref_samp->data, ref_samp->dataLength, GF_BITSTREAM_READ); offset = 0; while (gf_bs_available(ref_bs)) { if (gf_bs_get_position(ref_bs) < data_offset) { ref_nalu_size = gf_bs_read_int(ref_bs, 8*nal_unit_size_field); offset += ref_nalu_size + nal_unit_size_field; if ((offset > data_offset) || (offset >= gf_bs_get_size(ref_bs))) { e = GF_BAD_PARAM; goto exit; } e = gf_bs_seek(ref_bs, offset); if (e) goto exit; continue; } ref_nalu_size = gf_bs_read_int(ref_bs, 8*nal_unit_size_field); copy_size = data_length ? data_length : ref_nalu_size; assert(copy_size <= ref_nalu_size); nal_hdr = gf_bs_read_u8(ref_bs); //rewrite NAL type if ((copy_size-1)>max_size) { buffer = (char*)gf_realloc(buffer, sizeof(char)*(copy_size-1)); max_size = copy_size-1; } gf_bs_read_data(ref_bs, buffer, copy_size-1); if (rewrite_start_codes) gf_bs_write_u32(dst_bs, 1); else gf_bs_write_int(dst_bs, copy_size, 8*nal_unit_size_field); gf_bs_write_u8(dst_bs, nal_hdr); gf_bs_write_data(dst_bs, buffer, copy_size-1); } gf_isom_sample_del(&ref_samp); ref_samp = NULL; gf_bs_del(ref_bs); ref_bs = NULL; gf_isom_set_nalu_extract_mode(file, ref_track_num, cur_extract_mode); break; default: //skip to end of this NALU gf_bs_skip_bytes(src_bs, nal_size-1); continue; } } else { gf_bs_read_data(src_bs, buffer, nal_size-1); if (rewrite_start_codes) gf_bs_write_u32(dst_bs, 1); else gf_bs_write_int(dst_bs, nal_size, 8*nal_unit_size_field); gf_bs_write_u8(dst_bs, nal_hdr); gf_bs_write_data(dst_bs, buffer, nal_size-1); } }
static char *xml_translate_xml_string(char *str) { char *value; u32 size, i, j; if (!str || !strlen(str)) return NULL; value = (char *)gf_malloc(sizeof(char) * 500); size = 500; i = j = 0; while (str[i]) { if (j+20 >= size) { size += 500; value = (char *)gf_realloc(value, sizeof(char)*size); } if (str[i] == '&') { if (str[i+1]=='#') { char szChar[20], *end; u16 wchar[2]; u32 val; const unsigned short *srcp; strncpy(szChar, str+i, 10); end = strchr(szChar, ';'); if (!end) break; end[1] = 0; i+=strlen(szChar); wchar[1] = 0; if (szChar[2]=='x') sscanf(szChar, "&#x%x;", &val); else sscanf(szChar, "&#%u;", &val); wchar[0] = val; srcp = wchar; j += gf_utf8_wcstombs(&value[j], 20, &srcp); } else if (!strnicmp(&str[i], "&", sizeof(char)*5)) { value[j] = '&'; j++; i+= 5; } else if (!strnicmp(&str[i], "<", sizeof(char)*4)) { value[j] = '<'; j++; i+= 4; } else if (!strnicmp(&str[i], ">", sizeof(char)*4)) { value[j] = '>'; j++; i+= 4; } else if (!strnicmp(&str[i], "'", sizeof(char)*6)) { value[j] = '\''; j++; i+= 6; } else if (!strnicmp(&str[i], """, sizeof(char)*6)) { value[j] = '\"'; j++; i+= 6; } else { value[j] = str[i]; j++; i++; } } else { value[j] = str[i]; j++; i++; } } value[j] = 0; return value; }