static void test_init_buffer_state() { RingBuffer rb; _ringbuffer_init(&rb); assert_false(ring_buffer_is_full(&rb), "buffer should not be full"); assert_true(ring_buffer_is_empty(&rb), "buffer should be empty"); assert_true(ring_buffer_count(&rb) == 0, "buffer should be empty"); assert_true(ring_buffer_capacity(&rb) == capacity, "invalid buffer capacity"); ring_buffer_free(&rb); }
gpointer ring_buffer_tail (RingBuffer *self) { gpointer r = NULL; g_assert(self->buffer != NULL); if (ring_buffer_is_full(self)) return NULL; r = self->buffer + self->tail * self->element_size; return r; }
static void test_ring_buffer_is_full() { RingBuffer rb; int i; TestData *last = NULL; _ringbuffer_init(&rb); for (i = 1; !ring_buffer_is_full(&rb); i++) { TestData *td = ring_buffer_push(&rb); assert_true(td != NULL, "ring_buffer_push failed"); td->idx = i; last = td; } assert_true(ring_buffer_count(&rb) == capacity, "buffer count(%d) is not equal to capacity(%d)", ring_buffer_count(&rb), capacity); assert_true(last->idx == capacity, "buffer is not full, number of inserted items: %d, capacity: %d", last->idx, capacity); ring_buffer_free(&rb); }
/** read from beginning of queue (and remove that element) */ int pop(struct buffer *buffer) /*@ requires [?f]buffer(buffer, ?id_text, ?id_progress_read, ?id_progress_write) &*& token(id_progress_read, ?t1) &*& op(id_text, id_progress_read, t1, ?c, ?t2); @*/ /*@ ensures [f]buffer(buffer, id_text, id_progress_read, id_progress_write) &*& token(id_progress_read, t2) &*& result == c; @*/ { //@ open buffer(buffer, _, _, _); //@ assert [f]buffer->mutex |-> ?mutex; mutex_acquire(buffer->mutex); //@ open buffer_protected(buffer, id_text, id_progress_read, id_progress_write)(); //@ open token(id_progress_read, ?n_read); //@ assert [_]ghost_cell<list<int> >(id_text, ?alltext); while (ring_buffer_is_empty(buffer->ring_buffer)) /*@ invariant buffer->ring_buffer |-> ?ring_buffer &*& [f]buffer->mutex |-> mutex &*& ring_buffer(ring_buffer, ?size, ?contents) &*& [f]buffer->cond_can_pop |-> ?cond_can_pop &*& [f]mutex_cond(cond_can_pop, mutex) &*& mutex_held(mutex, (buffer_protected)(buffer, id_text, id_progress_read, id_progress_write), currentThread, f) &*& [_]ghost_cell<list<int> >(id_text, alltext) &*& [1/2]ghost_cell<int>(id_progress_write, ?n_write) &*& [1/2]ghost_cell<int>(id_progress_read, n_read) &*& [1/2]ghost_cell<int>(id_progress_read, n_read) &*& take(n_write - n_read, drop(n_read, alltext)) == contents ; @*/ { //@ close buffer_protected(buffer, id_text, id_progress_read, id_progress_write)(); mutex_cond_wait(buffer->cond_can_pop, buffer->mutex); //@ open buffer_protected(buffer, id_text, id_progress_read, id_progress_write)(); } bool was_full = ring_buffer_is_full(buffer->ring_buffer); int x = ring_buffer_pop(buffer->ring_buffer); if (was_full){ mutex_cond_signal(buffer->cond_can_push); } //@ ghost_cell_mutate(id_progress_read, t2); //@ close token(id_progress_read, t2); //@ open op(_, _, _, c, _); //@ assert c == nth(t1, alltext); //@ assert x == head(contents); //@ assert x == head(take(n_write - n_read, drop(n_read, alltext))); //@ assume (x == nth(n_read, alltext)); //@ assert c == x; //@ assert take(n_write - n_read, drop(n_read, alltext)) == contents; //@ assume (take(n_write - (n_read + 1), drop((n_read+1), alltext)) == tail(contents)); //@ close buffer_protected(buffer, id_text, id_progress_read, id_progress_write)(); mutex_release(buffer->mutex); //@ close [f]buffer(buffer, id_text, id_progress_read, id_progress_write); return x; }
/** add to end of queue */ void push(struct buffer *buffer, int x) /*@ requires [?f]buffer(buffer, ?id_text, ?id_progress_read, ?id_progress_write) &*& token(id_progress_write, ?t1) &*& op(id_text, id_progress_write, t1, x, ?t2); @*/ /*@ ensures [f]buffer(buffer, id_text, id_progress_read, id_progress_write) &*& token(id_progress_write, t2); @*/ { //@ open buffer(buffer, _, _, _); //@ assert [f]buffer->mutex |-> ?mutex; mutex_acquire(buffer->mutex); //@ open buffer_protected(buffer, id_text, id_progress_read, id_progress_write)(); //@ open token(id_progress_write, ?n_write); //@ assert [_]ghost_cell<list<int> >(id_text, ?alltext); while (ring_buffer_is_full(buffer->ring_buffer)) /*@ invariant buffer->ring_buffer |-> ?ring_buffer &*& [f]buffer->mutex |-> mutex &*& ring_buffer(ring_buffer, ?size, ?contents) &*& [f]buffer->cond_can_push |-> ?cond_can_push &*& [f]mutex_cond(cond_can_push, mutex) &*& mutex_held(mutex, (buffer_protected)(buffer, id_text, id_progress_read, id_progress_write), currentThread, f) &*& [_]ghost_cell<list<int> >(id_text, alltext) &*& [1/2]ghost_cell<int>(id_progress_read, ?n_read) &*& [1/2]ghost_cell<int>(id_progress_write, n_write) &*& [1/2]ghost_cell<int>(id_progress_write, n_write) &*& take(n_write - n_read, drop(n_read, alltext)) == contents ; @*/ { //@ close buffer_protected(buffer, id_text, id_progress_read, id_progress_write)(); mutex_cond_wait(buffer->cond_can_push, buffer->mutex); //@ open buffer_protected(buffer, id_text, id_progress_read, id_progress_write)(); } bool was_empty = ring_buffer_is_empty(buffer->ring_buffer); ring_buffer_push(buffer->ring_buffer, x); if (was_empty){ mutex_cond_signal(buffer->cond_can_pop); } //@ ghost_cell_mutate(id_progress_write, n_write + 1); //@ open op(_, _, _, _, _); //@ assert t2 == n_write + 1; //@ close token(id_progress_write, n_write + 1); //@ assert take(n_write - n_read, drop(n_read, alltext)) == contents; //@ assume (take(n_write+1 - n_read, drop(n_read, alltext)) == append(contents, cons(nth(n_write, alltext), nil))); //@ close buffer_protected(buffer, id_text, id_progress_read, id_progress_write)(); mutex_release(buffer->mutex); //@ close [f]buffer(buffer, id_text, id_progress_read, id_progress_write); }
/** * Reads one integer from the given queue. * * This is blocking. If the queue is empty, this function waits until the queue is not empty anymore. */ int getchar/*@<u> @*/(struct queue *queue, struct proph_tree *tree) //@ requires [?f_queue]queue(?queue_id, queue) &*& getchar_io<u>(queue_id, ?t1, ?c, ?t2, tree) &*& token(t1); //@ ensures [f_queue]queue(queue_id, queue) &*& token(t2) &*& result == c; { //@ open [f_queue]queue(_,_); //@ assert [f_queue]queue->mutex |-> ?mutex; // bind mutex so we know it won't change. mutex_acquire(queue->mutex); //@ open queue_invariant(queue_id, queue)(); //@ open token(t1); while (ring_buffer_is_empty(queue->ring_buffer)) /*@ invariant // from queue: [f_queue]queue->mutex |-> mutex &*& [f_queue]queue->cond_can_pop |-> ?cond_can_pop &*& [f_queue]mutex_cond(cond_can_pop, mutex) // from the mutex: &*& queue->ring_buffer |-> ?buffer &*& ring_buffer(buffer, _, ?buffer_contents) &*& [1/2]ghost_cell<list<int> >(queue_id, buffer_contents) &*& mutex_held(mutex, (queue_invariant)(queue_id, queue), currentThread, f_queue); @*/ { //@ close queue_invariant(queue_id, queue)(); mutex_cond_wait(queue->cond_can_pop, queue->mutex); //@ open queue_invariant(queue_id, queue)(); } bool was_full = ring_buffer_is_full(queue->ring_buffer); //@ open getchar_io(queue_id, t1, c, t2, _); int ret = ring_buffer_pop(queue->ring_buffer); //@ open proph_tree(_, _, _, _, _); prophecy_assign(tree->id, ret); free(tree); if (was_full){ mutex_cond_signal(queue->cond_can_push); } /*@ { predicate pre() = [1/2]ghost_cell<list<int> >(queue_id, buffer_contents) &*& c == head(buffer_contents) &*& token_without_invar(t1) &*& is_getchar_invar_updatable(?invar_updater, queue_id, t1, c, t2); predicate post() = [1/2]ghost_cell(queue_id, tail(buffer_contents)) &*& token_without_invar(t2); close pre(); produce_lemma_function_pointer_chunk(empty_lemma) : ghost_mutex_critical_section_t(place_io_invar(t1), pre, post)() { open pre(); assert is_getchar_invar_updatable(?invar_updater, queue_id, t1, c, t2); close exists(place_io_invar(t1)); open token_without_invar(t1); invar_updater(); close token_without_invar(t2); close post(); leak is_getchar_invar_updatable(_, _, _, _, _); call(); } { ghost_mutex_use(place_mutex(t1), pre, post); } open post(); } @*/ //@ close queue_invariant(queue_id, queue)(); mutex_release(queue->mutex); //@ close [f_queue]queue(queue_id, queue); return ret; }