static void update(pjmedia_delay_buf *b, enum OP op) { /* Sequential operation */ if (op == b->last_op) { ++b->level; return; } /* Switching operation */ if (b->level > b->max_level) b->max_level = b->level; b->recalc_timer -= (b->level * b->ptime) >> 1; b->last_op = op; b->level = 1; /* Recalculate effective count based on max_level */ if (b->recalc_timer <= 0) { unsigned new_eff_cnt = (b->max_level+SAFE_MARGIN)*b->samples_per_frame; /* Smoothening effective count transition */ AGC(b->eff_cnt, new_eff_cnt); /* Make sure the new effective count is multiplication of * channel_count, so let's round it up. */ if (b->eff_cnt % b->channel_count) b->eff_cnt += b->channel_count - (b->eff_cnt % b->channel_count); TRACE__((b->obj_name,"Cur eff_cnt=%d", b->eff_cnt)); b->max_level = 0; b->recalc_timer = RECALC_TIME; } /* See if we need to shrink the buffer to reduce delay */ if (op == OP_PUT && pjmedia_circ_buf_get_len(b->circ_buf) > b->samples_per_frame + b->eff_cnt) { unsigned erase_cnt = b->samples_per_frame >> 1; unsigned old_buf_cnt = pjmedia_circ_buf_get_len(b->circ_buf); shrink_buffer(b, erase_cnt); PJ_LOG(4,(b->obj_name,"Buffer size adjusted from %d to %d (eff_cnt=%d)", old_buf_cnt, pjmedia_circ_buf_get_len(b->circ_buf), b->eff_cnt)); }
/* Buffer filling thread. */ static void buffer_thread (http_desc_t *desc) { pthread_mutex_t mut; /* Temporary mutex. */ int BLOCK_SIZE = HTTP_BLOCK_SIZE; void *ibuffer; int rest = 0; int metasize = 0, metapos = 0, extra_read = 0; char *p; /* Init */ pthread_mutex_init (&mut, NULL); if (desc->icy_metaint) { BLOCK_SIZE = (HTTP_BLOCK_SIZE > desc->icy_metaint) ? desc->icy_metaint : HTTP_BLOCK_SIZE; } ibuffer = malloc (BLOCK_SIZE << 1); /* Process while main thread allow it. */ while (desc->going) { void *newbuf; int readed; #ifdef DEBUG_HTTP_BUFFERING print_debug_info (desc); #endif rest = metasize = 0; /* trying to shrink buffer */ pthread_mutex_lock (&desc->buffer_lock); shrink_buffer (desc); pthread_mutex_unlock (&desc->buffer_lock); /* let them know about our state, heh */ status_notify (desc); /* check for overflow */ if (desc->len > http_buffer_size) { /* Notice waiting function that the new block of data has arrived */ desc->new_datablock = 1; pthread_cond_signal (&desc->new_datablock_signal); /* Make pause */ if (!desc->dont_wait) { pthread_mutex_lock (&mut); cond_timedwait_relative (&desc->dont_wait_signal, &mut, calc_time_to_wait (desc)); pthread_mutex_unlock (&mut); } else { desc->dont_wait--; } continue; } /* read to internal buffer */ readed = read_data (desc->sock, ibuffer, BLOCK_SIZE); /* reasons to stop */ if (readed == 0) { desc->going = 0; } else if (readed <0) { desc->error = 1; desc->going = 0; } else { /* Something readed */ /* Metadata stuff */ if (desc->icy_metaint > 0 && (desc->buffer_pos+readed) > desc->icy_metaint) { /* Metadata block is next! */ rest = (desc->buffer_pos+readed) - desc->icy_metaint; p = ((char *)ibuffer); p += (readed-rest); metapos = (readed-rest); if (rest) { metasize = *(int8_t *)p; metasize <<= 4; if (rest < metasize) { /* Uh oh, big trouble ahead, or maybe not? */ extra_read = read_data (desc->sock, ibuffer+readed, metasize); readed += extra_read; rest += extra_read; } if (metasize > 4080) { alsaplayer_error("Invalid metasize (%d)", metasize); } else if (metasize > 0) { p++; p[metasize] = '\0'; pthread_mutex_lock (&desc->meta_lock); if (desc->metadata) { free(desc->metadata); } desc->metadata = malloc(strlen(p)+1); memcpy(desc->metadata, p, strlen(p)); pthread_mutex_unlock (&desc->meta_lock); } else { /* Metadata is zero length */ } } else { alsaplayer_error("Rest = 0???"); } metasize++; /* Length byte */ } else { desc->buffer_pos += readed; } /* These operations are fast. -> doesn't break reader_read */ /* ---------------- lock buffer ( */ pthread_mutex_lock (&desc->buffer_lock); /* enlarge buffer */ newbuf = malloc (desc->len + (BLOCK_SIZE * 2)); /* HTTP_BLOCK_SIZE */ memcpy (newbuf, desc->buffer, desc->len); if (metasize) { memcpy(newbuf + desc->len, ibuffer, metapos); memcpy(newbuf + desc->len + metapos, ibuffer+metapos+metasize, rest - metasize); readed -= metasize; desc->buffer_pos = rest - metasize; } else { memcpy (newbuf + desc->len, ibuffer, readed); } /* switch buffers */ free (desc->buffer); desc->buffer = newbuf; desc->len += readed; /* unlock buffer ) */ pthread_mutex_unlock (&desc->buffer_lock); } /* Notice waiting function that the new block of data has arrived */ desc->new_datablock = 1; pthread_cond_signal (&desc->new_datablock_signal); /* Do wait */ if (desc->going && !desc->dont_wait) { pthread_mutex_lock (&mut); cond_timedwait_relative (&desc->dont_wait_signal, &mut, calc_time_to_wait (desc)); pthread_mutex_unlock (&mut); } if (desc->dont_wait) desc->dont_wait--; } free (ibuffer); pthread_exit (NULL); } /* end of: buffer_thread */
void buffering_thread(void) { bool filling = false; struct queue_event ev; while (true) { if (!filling) { cancel_cpu_boost(); } queue_wait_w_tmo(&buffering_queue, &ev, filling ? 5 : HZ/2); switch (ev.id) { case Q_START_FILL: LOGFQUEUE("buffering < Q_START_FILL %d", (int)ev.data); /* Call buffer callbacks here because this is one of two ways * to begin a full buffer fill */ send_event(BUFFER_EVENT_BUFFER_LOW, 0); shrink_buffer(); queue_reply(&buffering_queue, 1); filling |= buffer_handle((int)ev.data); break; case Q_BUFFER_HANDLE: LOGFQUEUE("buffering < Q_BUFFER_HANDLE %d", (int)ev.data); queue_reply(&buffering_queue, 1); buffer_handle((int)ev.data); break; case Q_RESET_HANDLE: LOGFQUEUE("buffering < Q_RESET_HANDLE %d", (int)ev.data); queue_reply(&buffering_queue, 1); reset_handle((int)ev.data); break; case Q_CLOSE_HANDLE: LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev.data); queue_reply(&buffering_queue, close_handle((int)ev.data)); break; case Q_HANDLE_ADDED: LOGFQUEUE("buffering < Q_HANDLE_ADDED %d", (int)ev.data); /* A handle was added: the disk is spinning, so we can fill */ filling = true; break; case Q_BASE_HANDLE: LOGFQUEUE("buffering < Q_BASE_HANDLE %d", (int)ev.data); base_handle_id = (int)ev.data; break; #ifndef SIMULATOR case SYS_USB_CONNECTED: LOGFQUEUE("buffering < SYS_USB_CONNECTED"); usb_acknowledge(SYS_USB_CONNECTED_ACK); usb_wait_for_disconnect(&buffering_queue); break; #endif case SYS_TIMEOUT: LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT"); break; } update_data_counters(); /* If the buffer is low, call the callbacks to get new data */ if (num_handles > 0 && data_counters.useful <= conf_watermark) send_event(BUFFER_EVENT_BUFFER_LOW, 0); #if 0 /* TODO: This needs to be fixed to use the idle callback, disable it * for simplicity until its done right */ #if MEM > 8 /* If the disk is spinning, take advantage by filling the buffer */ else if (storage_disk_is_active() && queue_empty(&buffering_queue)) { if (num_handles > 0 && data_counters.useful <= high_watermark) send_event(BUFFER_EVENT_BUFFER_LOW, 0); if (data_counters.remaining > 0 && BUF_USED <= high_watermark) { /* This is a new fill, shrink the buffer up first */ if (!filling) shrink_buffer(); filling = fill_buffer(); update_data_counters(); } } #endif #endif if (queue_empty(&buffering_queue)) { if (filling) { if (data_counters.remaining > 0 && BUF_USED < buffer_len) filling = fill_buffer(); else if (data_counters.remaining == 0) filling = false; } else if (ev.id == SYS_TIMEOUT) { if (data_counters.remaining > 0 && data_counters.useful <= conf_watermark) { shrink_buffer(); filling = fill_buffer(); } } } } }