bool interleave(string s1, string s2, string s3) { // Start typing your C/C++ solution below // DO NOT write int main() function int len1 = s1.length(), len2 = s2.length(), len3 = s3.length(); if (len3 == 0) return len1 == 0 && len2 == 0; if (mem[len1][len2] != -1) return mem[len1][len2]; if (s1.length() == 0) { return mem[len1][len2] = (s3 == s2); } if (s2.length() == 0) { return mem[len1][len2] = (s3 == s1); } bool matched = false; if (s3[0] == s1[0]) matched = interleave(s1.substr(1), s2, s3.substr(1)); if (matched) return mem[len1][len2] = true; else { if (s3[0] == s2[0]) matched = interleave(s1, s2.substr(1), s3.substr(1)); else return mem[len1][len2] = false; } return mem[len1][len2] = matched; }
void mono_partial_order_reductiont::operator()( std::vector<unsigned>& interleaving) { interleaving.reserve(nr_threads); interleaving.clear(); unsigned local_t=local_transition(); if(local_t!=unsigned(-1)) { interleave(interleaving, local_t); } else { int current_thread=state.get_current_thread(); for(int t=nr_threads-1; t>=0; --t) { if(t!=current_thread) interleave(interleaving, t); } interleave(interleaving, current_thread); } }
void test_interleaving() { //This part tests interleaving uint8_t message[51]; uint8_t check_message[51]; unsigned int i; for(i = 0; i < 51; i++) message[i] = rand(); memcpy(check_message, message, 51); raw_data rd; rd.length = 51; rd.data = message; for(i = 0; i < 51; i++) printf("%x ", rd.data[i] & 0xff); printf("\n"); interleave(rd); for(i = 0; i < 51; i++) printf("%x ", rd.data[i] & 0xff); printf("\n"); deinterleave(rd); for(i = 0; i < 51; i++) printf("%x ", rd.data[i] & 0xff); printf("\n"); if(!memcmp(message, check_message, 51)) printf("success!\n"); else printf("Failure!\n"); }
BUQTree::BUQTree(PointSet ps, int32_t *_indices, int count, int MAX_CAPACITY, int MAX_CANDIDATES) : bounds(ps, indices, count), best(), indices(_indices), num_indices(count), left(NULL), right(NULL) { if (num_indices <= MAX_CAPACITY) { std::sort(indices, indices + num_indices, [&ps](int32_t i, int32_t j){ return (ps.rank[i] < ps.rank[j]); }); min_value = num_indices > 0 ? ps.rank[indices[0]] : 0x7FFFFFFF; int num_keep = min(MAX_CANDIDATES, num_indices); best.reserve(num_keep); for (int i = 0; i < num_keep; i++) best.push_back(indices[i]); } else { if ((bounds.Mx - bounds.mx) > (bounds.My - bounds.my)) std::sort(indices, indices + num_indices, [&ps](int32_t i, int32_t j){ return (ps.x[i] < ps.x[j]); }); else std::sort(indices, indices + num_indices, [&ps](int32_t i, int32_t j){ return (ps.y[i] < ps.y[j]); }); int half = num_indices / 2; left = new BUQTree(ps, indices, half, MAX_CAPACITY, MAX_CANDIDATES); right = new BUQTree(ps, indices + half, num_indices - half, MAX_CAPACITY, MAX_CANDIDATES); min_value = min(left->min_value, right->min_value); interleave(ps, left->best, right->best, best, MAX_CANDIDATES); } }
/** Return the Morton code of Point @a p. * @pre bounding_box().contains(@a p) * @post cell(result).contains(@a p) */ code_type code(point_type s) const { s -= pmin_; s /= cell_size_; //for (unsigned i = 0; i < DIM; ++i) FMMTL_ASSERT(s[i] < cells_per_side); // Interleave the bits of the s[0], s[1], ... return interleave(s); }
object *fn_lambda (object *args, object *env) { object *lambda = car(args); args = cdr(args); object *list = interleave((((lambda_object *) (lambda))->args),args); object* sexp = replace_atom((((lambda_object *) (lambda))->sexp),list); return eval(sexp,env); }
// poly makes either a polygon or polyline void poly(VGfloat * x, VGfloat * y, VGint n, VGbitfield flag) { VGfloat points[n * 2]; VGPath path = newpath(); interleave(x, y, n, points); vguPolygon(path, points, n, VG_FALSE); vgDrawPath(path, flag); vgDestroyPath(path); }
int main(){ char *s1 = "clipsoool"; char *s2 = "aloe"; printf("%s\n", interleave(s1, s2)); return 0; }
/*Given two strings, print all interleaving strings. Solution #1: Note that interleaving of two strings does not change the actual order of the characters within the two strings. For e.g. str1 = "AB" str2 = "MN" Interleaving strings are, ABMN AMBN AMNB MABN MANB MNAB */ void interleave(char* str1, char* str2, char* str, int len) { int i=0; if(str1[0] == '\0' && str2[0] == '\0') { printf("%s\n", str-len); return; } if(str1[0] != '\0') { str[0] = str1[0]; interleave(str1+1, str2, str+1, len); } if(str2[0] != '\0') { str[0] = str2[0]; interleave(str1, str2+1, str+1, len); } }
bool isInterleave(string s1, string s2, string s3) { int len2 = s2.length(); int len1 = s1.length(); mem.clear(); mem.resize(len1 + 1); for (int i = 0; i <= len1; i++) { mem[i].resize(len2 + 1, -1); } return interleave(s1, s2, s3); }
object *fn_lambda(object *args, object *env) { // Lambda objects hold two lists, the parameters and the function. object *lambda = first(args); args = second(args); // Extract the list of arguments object *list = interleave((((lambda_object *) (lambda))->args), args); // Extract the function S-Expression object* sexp = replace_atom((((lambda_object *) (lambda))->sexp), list); return eval(sexp, env); }
static av_always_inline void horizontal_compose_haari(IDWTELEM *b, IDWTELEM *temp, int w, int shift) { const int w2 = w >> 1; int x; for (x = 0; x < w2; x++) { temp[x ] = COMPOSE_HAARiL0(b[x ], b[x+w2]); temp[x+w2] = COMPOSE_HAARiH0(b[x+w2], temp[x]); } interleave(b, temp, temp+w2, w2, shift, shift); }
static void horizontal_compose_dirac53i(IDWTELEM *b, IDWTELEM *temp, int w) { const int w2 = w >> 1; int x; temp[0] = COMPOSE_53iL0(b[w2], b[0], b[w2]); for (x = 1; x < w2; x++) { temp[x ] = COMPOSE_53iL0 (b[x+w2-1], b[x ], b[x+w2]); temp[x+w2-1] = COMPOSE_DIRAC53iH0(temp[x-1], b[x+w2-1], temp[x]); } temp[w-1] = COMPOSE_DIRAC53iH0(temp[w2-1], b[w-1], temp[w2-1]); interleave(b, temp, temp+w2, w2, 1, 1); }
/** * find the nth elements of a nElem permutation */ long unsigned int position(int nth, int nElem) { int i; long int res = 0; int reach = nth; for (i = nElem-1; i > 0; i--) { int k = interleave(reach, i); res = res*10 + k; reach = reach - k*fact(i); printf("\nreach -> %d; i -> %d; res -> %lu", reach, i, res); } return res*10; }
int main() { char* str1 = "AB"; char* str2 = "MNO"; int len1 = strlen(str1); int len2 = strlen(str2); int len = len1+len2; char* str = (char*)malloc(len+1); memset(str, 0, len+1); interleave(str1, str2, str, len); return 0; }
std::vector<LDPCEncoder::symbol> LDPCEncoder::modulate(std::vector<int> coded) { coded = interleave(coded); std::vector<LDPCEncoder::symbol> qam; int modSize = log2(symbols); for (int i = 0; i < coded.size(); i += modSize) { std::vector<int> next; for (int j = 0; j < modSize; j++) { if (i + j >= coded.size()) { break; } next.push_back(coded.at(i + j)); } qam.push_back(modulateSymbol(next)); } return qam; }
int paCallback( const void *input, void *output, unsigned long frameCount, const PaStreamCallbackTimeInfo* timeInfo, PaStreamCallbackFlags statusFlags, void * userData ){ AudioIO& io = *(AudioIO *)userData; const float * paI = (const float *)input; float * paO = (float *)output; bool bDeinterleave = true; if(bDeinterleave){ deinterleave(const_cast<float *>(&io.in(0,0)), paI, io.framesPerBuffer(), io.channelsInDevice() ); //deinterleave(&io.out(0,0), paO, io.framesPerBuffer(), io.channelsOutDevice()); } if(io.autoZeroOut()) io.zeroOut(); io(); // call callback // kill pesky nans so we don't hurt anyone's ears if(io.zeroNANs()){ for(int i=0; i<io.framesPerBuffer()*io.channelsOutDevice(); ++i){ float& s = (&io.out(0,0))[i]; if(isnan(s)) s = 0.f; } } if(io.clipOut()){ for(int i=0; i<io.framesPerBuffer()*io.channelsOutDevice(); ++i){ float& s = (&io.out(0,0))[i]; if (s<-1.f) s =-1.f; else if (s> 1.f) s = 1.f; } } if(bDeinterleave){ interleave(paO, &io.out(0,0), io.framesPerBuffer(), io.channelsOutDevice()); } return 0; }
void C1Circ::pushF3Frame(F3Frame f3Frame) { previousF3Data = currentF3Data; currentF3Data = f3Frame.getDataSymbols(); previousF3Errors = currentF3Errors; currentF3Errors = f3Frame.getErrorSymbols(); c1BufferLevel++; if (c1BufferLevel > 1) { c1BufferLevel = 2; // Interleave the F3 data and perform C1 error correction interleave(); errorCorrect(); } }
uint8_t sdlAccelRender::display(uint8_t *ptr, uint32_t w, uint32_t h,renderZoom zoom) { #ifdef __WIN32 // DirectX playback doesn't refresh correctly if the parent window is moved. // Detect when the parent window has moved and force a coordinate update. if (strcmp(getenv("SDL_VIDEODRIVER"), "directx") == 0) { static RECT lastPos; RECT currentPos; GetWindowRect(sdlWin32, ¤tPos); if (currentPos.left != lastPos.left || currentPos.top != lastPos.top) { // By default SetWindowPos doesn't work if the new coordinates are the same as the // current so use SWP_FRAMECHANGED to force an update. SetWindowPos(sdlWin32, NULL, 0, 0, 0, 0, SWP_NOSIZE | SWP_NOZORDER | SWP_FRAMECHANGED); lastPos = currentPos; } } #endif int pitch; int page=w*h; ADM_assert(sdl_overlay); SDL_LockYUVOverlay(sdl_overlay); pitch=sdl_overlay->pitches[0]; // printf("SDL: new pitch :%d\n",pitch); if(useYV12) { if(pitch==w) memcpy(sdl_overlay->pixels[0],ptr,w*h); else interleave(sdl_overlay->pixels[0],ptr,w,pitch,h); pitch=sdl_overlay->pitches[1]; if(pitch==(w>>1)) memcpy(sdl_overlay->pixels[1],ptr+page,(w*h)>>2); else interleave(sdl_overlay->pixels[1],ptr+page,w>>1,pitch,h>>1); pitch=sdl_overlay->pitches[2]; if(pitch==(w>>1)) memcpy(sdl_overlay->pixels[2],ptr+(page*5)/4,(w*h)>>2); else
/** * This test application takes in input the convolutionally encoded bits of the 802.11-2012 * standard (annex J), and performs the interleaving step. */ int main(int argc, char **argv) { if (argc != 2) { printf("error: missing input file\n"); return 1; } //encoded psdu loaded from data file char encoded_psdu[1000]; //ofdm encoding parameters struct OFDM_PARAMETERS params = get_ofdm_parameter(BW_20_DR_36_MBPS); //transmission parameters struct TX_PARAMETERS tx_params; //encoded data field char *interleaved_data; //read the scrambled psdu from text file int rb = read_bits_from_file(argv[1], encoded_psdu, 1000); if (rb == ERR_CANNOT_READ_FILE) { printf("Cannot read file \"%s\": file not found?\n", argv[1]); return 1; } if (rb == ERR_INVALID_FORMAT) { printf("Invalid file format\n"); return 1; } //the size of the psdu in the 802.11-2012 example is 100 bytes tx_params = get_tx_parameters(params.data_rate, 100); //after interleaving, the size is the same after encoding interleaved_data = (char *)calloc(rb, sizeof(char)); //perform interleaving interleave(encoded_psdu, interleaved_data, rb, params.n_cbps, params.n_bpsc); //output bits and we're done print_bits_array(interleaved_data, rb, '\n'); free(interleaved_data); return 0; }
bool AudioDecoder::start() { auto oldTime = clockNow(); auto nowTime = oldTime; bool ret; do { ret = decodeToPcm(); if (!ret) { ALOGE("decodeToPcm (%s) failed!", _url.c_str()); break; } nowTime = clockNow(); ALOGD("Decoding (%s) to pcm data wasted %fms", _url.c_str(), intervalInMS(oldTime, nowTime)); oldTime = nowTime; ret = resample(); if (!ret) { ALOGE("resample (%s) failed!", _url.c_str()); break; } nowTime = clockNow(); ALOGD("Resampling (%s) wasted %fms", _url.c_str(), intervalInMS(oldTime, nowTime)); oldTime = nowTime; ret = interleave(); if (!ret) { ALOGE("interleave (%s) failed!", _url.c_str()); break; } nowTime = clockNow(); ALOGD("Interleave (%s) wasted %fms", _url.c_str(), intervalInMS(oldTime, nowTime)); } while(false); ALOGV_IF(!ret, "%s returns false, decode (%s)", __FUNCTION__, _url.c_str()); return ret; }
static void process_deck(struct device *dv, jack_nframes_t nframes) { int n; jack_default_audio_sample_t *in[DEVICE_CHANNELS], *out[DEVICE_CHANNELS]; jack_nframes_t remain; struct jack *jack = (struct jack*)dv->local; assert(dv->timecoder != NULL); assert(dv->player != NULL); for (n = 0; n < DEVICE_CHANNELS; n++) { in[n] = jack_port_get_buffer(jack->input_port[n], nframes); assert(in[n] != NULL); out[n] = jack_port_get_buffer(jack->output_port[n], nframes); assert(out[n] != NULL); } /* For large values of nframes, communicate with the timecoder and * player in smaller blocks */ remain = nframes; while (remain > 0) { signed short buf[MAX_BLOCK * DEVICE_CHANNELS]; jack_nframes_t block; if (remain < MAX_BLOCK) block = remain; else block = MAX_BLOCK; /* Timecode input */ interleave(buf, in, block); device_submit(dv, buf, block); /* Audio output is handle in the inner loop, so that * we get the timecoder applied in small steps */ device_collect(dv, buf, block); uninterleave(out, buf, block); remain -= block; } }
static void horizontal_compose_fidelityi(IDWTELEM *b, IDWTELEM *tmp, int w) { const int w2 = w >> 1; int i, x; IDWTELEM v[8]; for (x = 0; x < w2; x++) { for (i = 0; i < 8; i++) v[i] = b[av_clip(x-3+i, 0, w2-1)]; tmp[x] = COMPOSE_FIDELITYiH0(v[0], v[1], v[2], v[3], b[x+w2], v[4], v[5], v[6], v[7]); } for (x = 0; x < w2; x++) { for (i = 0; i < 8; i++) v[i] = tmp[av_clip(x-4+i, 0, w2-1)]; tmp[x+w2] = COMPOSE_FIDELITYiL0(v[0], v[1], v[2], v[3], b[x], v[4], v[5], v[6], v[7]); } interleave(b, tmp+w2, tmp, w2, 0, 0); }
static void process_deck(struct device_t *dv, jack_nframes_t nframes) { int n; signed short buf[MAX_BLOCK * DEVICE_CHANNELS]; jack_default_audio_sample_t *in[DEVICE_CHANNELS], *out[DEVICE_CHANNELS]; jack_nframes_t remain, block; struct jack_t *jack = (struct jack_t*)dv->local; for (n = 0; n < DEVICE_CHANNELS; n++) { in[n] = jack_port_get_buffer(jack->input_port[n], nframes); assert(in[n] != NULL); out[n] = jack_port_get_buffer(jack->output_port[n], nframes); assert(out[n] != NULL); } /* For large values of nframes, communicate with the timecoder and * player in smaller blocks */ remain = nframes; while (remain > 0) { if (remain < MAX_BLOCK) block = remain; else block = MAX_BLOCK; /* Timecode input */ interleave(buf, in, block); if (dv->timecoder) timecoder_submit(dv->timecoder, buf, block); /* Audio output -- handle in the same loop for finer granularity */ player_collect(dv->player, buf, block, rate); uninterleave(out, buf, block); remain -= block; } }
char* x123_encode(const char* in_str, const char* key, char* out_str) { //Cdbg(DBE, "in_str=%s, key=%s", in_str, key); char* ibinstr; str2binstr( in_str, &ibinstr ); //Cdbg(DBE, "ibinstr = %s", ibinstr); int shiftamount = getshiftamount( key, ibinstr ); //Cdbg(DBE, "shiftamount = %d", shiftamount); char* unshiftbinstr = NULL; strrightshift( ibinstr, shiftamount, &unshiftbinstr ); //Cdbg(DBE, "unshiftbinstr = %s", unshiftbinstr); char* binstr; interleave( unshiftbinstr, 8, &binstr ); //Cdbg(DBE, "binstr = %s", binstr); char* hexstr; binstr2hexstr(binstr, &hexstr); //Cdbg(DBE, "hexstr = %s", hexstr); int out_len = strlen(hexstr)+9; //Cdbg(DBE, "out_len = %d, %d", out_len, strlen(hexstr)); out_str = (char*)malloc(out_len); memset(out_str , '\0', out_len); strcpy(out_str, "ASUSHARE"); strcat(out_str, hexstr); free(unshiftbinstr); free(ibinstr); free(binstr); free(hexstr); return out_str; }
void permute (char * string, int length) { if (length == 1) { list[listLength] = malloc (100); strcpy (list[listLength], string); listLength = 1; //printf ("permute finished....\n\n"); return; } // if it's not length 1 // permute it locking in this first character // interleave the first character into the global list //printf ("permuting on %s with length %d\n", string+1, length-1); permute (string+1, length-1); // now for all those permutations, // interleave our character with all the permutations int globalIdx = 0; char interleaver = string[0]; while (globalIdx < listLength) { int firstCall = 0; if (globalIdx == 0) firstCall = 1; //printf ("calling interleave with %s interleaving char %c\n", list[globalIdx], interleaver); interleave (list[globalIdx], interleaver, firstCall); globalIdx++; } // replace global list with working list free (list); list = workingList; listLength = workingListLength; workingListLength = 0; //printf ("permute finished....\n\n"); return; }
void CDMRTrellis::encode(const unsigned char* payload, unsigned char* data) { assert(payload != NULL); assert(data != NULL); unsigned char tribits[49U]; bitsToTribits(payload, tribits); unsigned char points[49U]; unsigned char state = 0U; for (unsigned int i = 0U; i < 49U; i++) { unsigned char tribit = tribits[i]; points[i] = ENCODE_TABLE[state * 8U + tribit]; state = tribit; } signed char dibits[98U]; pointsToDibits(points, dibits); interleave(dibits, data); }
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref) { AResampleContext *aresample = inlink->dst->priv; AVFilterLink * const outlink = inlink->dst->outputs[0]; int i, in_nb_samples = insamplesref->audio->nb_samples, cached_nb_samples = in_nb_samples + aresample->unconsumed_nb_samples, requested_out_nb_samples = aresample->ratio * cached_nb_samples, nb_channels = av_get_channel_layout_nb_channels(inlink->channel_layout); if (cached_nb_samples > aresample->max_cached_nb_samples) { for (i = 0; i < nb_channels; i++) { aresample->cached_data[i] = av_realloc(aresample->cached_data[i], cached_nb_samples * sizeof(int16_t)); aresample->resampled_data[i] = av_realloc(aresample->resampled_data[i], FFALIGN(sizeof(int16_t) * requested_out_nb_samples, 16)); if (aresample->cached_data[i] == NULL || aresample->resampled_data[i] == NULL) return; } aresample->max_cached_nb_samples = cached_nb_samples; if (aresample->outsamplesref) avfilter_unref_buffer(aresample->outsamplesref); aresample->outsamplesref = avfilter_get_audio_buffer(outlink, AV_PERM_WRITE, requested_out_nb_samples); outlink->out_buf = aresample->outsamplesref; } avfilter_copy_buffer_ref_props(aresample->outsamplesref, insamplesref); aresample->outsamplesref->audio->sample_rate = outlink->sample_rate; aresample->outsamplesref->pts = av_rescale(outlink->sample_rate, insamplesref->pts, inlink->sample_rate); /* av_resample() works with planar audio buffers */ if (!inlink->planar && nb_channels > 1) { int16_t *out[8]; for (i = 0; i < nb_channels; i++) out[i] = aresample->cached_data[i] + aresample->unconsumed_nb_samples; deinterleave(out, (int16_t *)insamplesref->data[0], nb_channels, in_nb_samples); } else { for (i = 0; i < nb_channels; i++) memcpy(aresample->cached_data[i] + aresample->unconsumed_nb_samples, insamplesref->data[i], in_nb_samples * sizeof(int16_t)); } for (i = 0; i < nb_channels; i++) { int consumed_nb_samples; const int is_last = i+1 == nb_channels; aresample->outsamplesref->audio->nb_samples = av_resample(aresample->resample, aresample->resampled_data[i], aresample->cached_data[i], &consumed_nb_samples, cached_nb_samples, requested_out_nb_samples, is_last); /* move unconsumed data back to the beginning of the cache */ aresample->unconsumed_nb_samples = cached_nb_samples - consumed_nb_samples; memmove(aresample->cached_data[i], aresample->cached_data[i] + consumed_nb_samples, aresample->unconsumed_nb_samples * sizeof(int16_t)); } /* copy resampled data to the output samplesref */ if (!inlink->planar && nb_channels > 1) { interleave((int16_t *)aresample->outsamplesref->data[0], aresample->resampled_data, nb_channels, aresample->outsamplesref->audio->nb_samples); } else { for (i = 0; i < nb_channels; i++) memcpy(aresample->outsamplesref->data[i], aresample->resampled_data[i], aresample->outsamplesref->audio->nb_samples * sizeof(int16_t)); } avfilter_filter_samples(outlink, avfilter_ref_buffer(aresample->outsamplesref, ~0)); avfilter_unref_buffer(insamplesref); }
uint8_t ADMVideoTelecide::getFrameNumberNoAlloc(uint32_t frame, uint32_t *len, ADMImage *data, uint32_t *flags) { uint32_t uvlen; uint32_t dummylen; uint8_t motion; uint32_t cmatch,nmatch,n2match; ADMImage *cur,*next; if(frame>=_info.nb_frames) return 0; uvlen= _info.width*_info.height; *len=uvlen+(uvlen>>1); cur=vidCache->getImage(frame); if(!cur) return 0; data->copyInfo(cur); if(!frame || frame==_info.nb_frames-1) { data->duplicate(cur); vidCache->unlockAll(); return 1; } next=vidCache->getImage(frame-1); if(!next) { vidCache->unlockAll(); return 0; } // for u & v , no action -> copy it as is memcpy(UPLANE(data),UPLANE(cur),uvlen>>2); memcpy(VPLANE(data),VPLANE(cur),uvlen>>2); data->copyInfo(cur); // No interleaving detected if(!(motion=hasMotion(data)) ) { printf("\n Not interlaced !\n"); memcpy(YPLANE(data),YPLANE(cur),uvlen); vidCache->unlockAll(); return 1; // over ! } // else cmatch is the current match cmatch=getMatch(cur); /* ------------------------------------------------------------------------------------ Try to complete with next frame fields ----------------------------------------------------------------------------------- */ // Interleav next in even field interleave(cur,_uncompressed,1); interleave(next,_uncompressed,0); nmatch=getMatch(_uncompressed); interleave(cur,_uncompressed,0); interleave(next,_uncompressed,1); n2match=getMatch(_uncompressed); printf(" Cur : %lu \n",cmatch); printf(" Next : %lu \n",nmatch); printf(" NextP: %lu \n",n2match); if((cmatch<nmatch)&&(cmatch<n2match)) { printf("\n __ pure interlaced __\n"); interleave(cur,_uncompressed,0); interleave(cur,_uncompressed,1); hasMotion(_uncompressed); doBlend(_uncompressed,data); vidCache->unlockAll(); return 1; } if( nmatch > n2match) { printf("\n -------Shifted-P is better \n"); if(hasMotion(_uncompressed)) { doBlend(_uncompressed,data); printf(" but there is still motion \n"); } else data->duplicate(_uncompressed); } else { printf("\n -------Shifted-O is better \n"); interleave(cur,_uncompressed,1); interleave(next,_uncompressed,0); if(hasMotion(_uncompressed)) { doBlend(_uncompressed,data); printf(" but there is still motion \n"); } else data->duplicate(_uncompressed); } // which chroma is better ? from current or from next ? // search for a transition and see if there is also one ? vidCache->unlockAll(); return 1; }
int main(int argc, char *argv[]) { if (argc == 3) { video_filename = argv[1]; pcm_filaname = argv[2]; } else { printf("Missing input video file or output file.\n"); return 1; } int err; av_register_all(); AVFormatContext * fmt_ctx = NULL; err = avformat_open_input( &fmt_ctx, video_filename, NULL, NULL ); assert( err == 0 ); err = avformat_find_stream_info( fmt_ctx, NULL ); assert( err >= 0 ); av_dump_format( fmt_ctx, 0, argv[1], 0 ); // int const video_idx = get_stream_idx( fmt_ctx, AVMEDIA_TYPE_VIDEO ); int const audio_idx = get_stream_idx( fmt_ctx, AVMEDIA_TYPE_AUDIO ); assert(audio_idx >= 0); // AVCodecContext * video_codec_ctx = new_codec_ctx( fmt_ctx, video_idx ); AVCodecContext * audio_codec_ctx = new_codec_ctx( fmt_ctx, audio_idx ); int is_frame_finish; static AVPacket pkt; static uint8_t *pkt_data = NULL; static int pkt_size = 0; static AVFrame frame; AVFrame *decoded_frame = av_frame_alloc(); assert( decoded_frame ); while ( true ) { if ( av_read_frame( fmt_ctx, &pkt ) < 0 ) { break; } if ( pkt.stream_index == audio_idx ) { pkt_data = pkt.data; pkt_size = pkt.size; while ( pkt_size > 0 ) { int const byte_consumed = avcodec_decode_audio4( audio_codec_ctx, decoded_frame, &is_frame_finish, &pkt ); ASSERT_OR_ERRMSG(byte_consumed >= 0, "Errors when decode audio.\n"); pkt_data += byte_consumed; pkt_size -= byte_consumed; assert(pkt_size >= 0); if ( is_frame_finish ) { int const data_size = av_samples_get_buffer_size( NULL, audio_codec_ctx->channels, decoded_frame->nb_samples, audio_codec_ctx->sample_fmt, 1); if ( av_sample_fmt_is_planar(audio_codec_ctx->sample_fmt)) { uint8_t *buf = malloc(data_size); interleave(decoded_frame->data, buf, audio_codec_ctx->channels, audio_codec_ctx->sample_fmt, data_size); write_pcm( buf, data_size, pcm_filaname ); free(buf); } else { write_pcm( decoded_frame->data[0], data_size, pcm_filaname ); } } } if (pkt.data) { av_free_packet( &pkt ); } } else { av_free_packet( &pkt ); } } avcodec_close(audio_codec_ctx); avformat_close_input(&fmt_ctx); printf("audio decode done\n"); return 0; }