static void cleanup_buffer(struct print_buffer *buffer) { struct print_buffer *prev, *next; assert_nrt(); pthread_setspecific(buffer_key, NULL); pthread_mutex_lock(&buffer_lock); print_buffers(); pthread_mutex_unlock(&buffer_lock); #ifdef CONFIG_XENO_FASTSYNCH /* Return the buffer to the pool */ { unsigned long old_bitmap, bitmap; unsigned i, j; if ((unsigned long)buffer - pool_start >= pool_len) goto dofree; j = ((unsigned long)buffer - pool_start) / pool_buf_size; i = j / BITS_PER_LONG; j = j % BITS_PER_LONG; old_bitmap = xnarch_atomic_get(&pool_bitmap[i]); do { bitmap = old_bitmap; old_bitmap = xnarch_atomic_cmpxchg(&pool_bitmap[i], bitmap, bitmap | (1UL << j)); } while (old_bitmap != bitmap); return; } dofree: #endif /* CONFIG_XENO_FASTSYNCH */ pthread_mutex_lock(&buffer_lock); prev = buffer->prev; next = buffer->next; if (prev) prev->next = next; else first_buffer = next; if (next) next->prev = prev; buffers--; pthread_mutex_unlock(&buffer_lock); free(buffer->ring); free(buffer); }
static void cleanup_buffer(struct print_buffer *buffer) { struct print_buffer *prev, *next; assert_nrt(); pthread_setspecific(buffer_key, NULL); pthread_mutex_lock(&buffer_lock); print_buffers(); pthread_mutex_unlock(&buffer_lock); /* Return the buffer to the pool */ { unsigned long old_bitmap, bitmap; unsigned i, j; if ((unsigned long)buffer - pool_start >= pool_len) goto dofree; j = ((unsigned long)buffer - pool_start) / pool_buf_size; i = j / __WORDSIZE; j = j % __WORDSIZE; old_bitmap = atomic_long_read(&pool_bitmap[i]); do { bitmap = old_bitmap; old_bitmap = atomic_long_cmpxchg(&pool_bitmap[i], bitmap, bitmap | (1UL << j)); } while (old_bitmap != bitmap); return; } dofree: pthread_mutex_lock(&buffer_lock); prev = buffer->prev; next = buffer->next; if (prev) prev->next = next; else first_buffer = next; if (next) next->prev = prev; buffers--; pthread_mutex_unlock(&buffer_lock); free(buffer->ring); free(buffer); }
/* *** Deferred Output Management *** */ void rt_print_flush_buffers(void) { assert_nrt(); pthread_mutex_lock(&buffer_lock); print_buffers(); pthread_mutex_unlock(&buffer_lock); }
/* vsyscall-based services */ int __wrap_gettimeofday(struct timeval *tv, struct timezone *tz) { assert_nrt(); return __STD(gettimeofday(tv, tz)); }
void __wrap_free(void *ptr) { assert_nrt(); __STD(free(ptr)); }
/* Memory allocation services */ void *__wrap_malloc(size_t size) { assert_nrt(); return __STD(malloc(size)); }
int rt_print_init(size_t buffer_size, const char *buffer_name) { struct print_buffer *buffer = pthread_getspecific(buffer_key); size_t size = buffer_size; unsigned long old_bitmap; unsigned j; if (!size) size = __cobalt_print_bufsz; else if (size < RT_PRINT_LINE_BREAK) return EINVAL; if (buffer) { /* Only set name if buffer size is unchanged or default */ if (size == buffer->size || !buffer_size) { set_buffer_name(buffer, buffer_name); return 0; } cleanup_buffer(buffer); buffer = NULL; } /* Find a free buffer in the pool */ do { unsigned long bitmap; unsigned i; for (i = 0; i < pool_bitmap_len; i++) { old_bitmap = atomic_long_read(&pool_bitmap[i]); if (old_bitmap) goto acquire; } goto not_found; acquire: do { bitmap = old_bitmap; j = __builtin_ffsl(bitmap) - 1; old_bitmap = atomic_long_cmpxchg(&pool_bitmap[i], bitmap, bitmap & ~(1UL << j)); } while (old_bitmap != bitmap && old_bitmap); j += i * __WORDSIZE; } while (!old_bitmap); buffer = (struct print_buffer *)(pool_start + j * pool_buf_size); not_found: if (!buffer) { assert_nrt(); buffer = malloc(sizeof(*buffer)); if (!buffer) return ENOMEM; buffer->ring = malloc(size); if (!buffer->ring) return ENOMEM; rt_print_init_inner(buffer, size); } set_buffer_name(buffer, buffer_name); pthread_setspecific(buffer_key, buffer); return 0; }
int __wrap_clock_gettime(clockid_t clk_id, struct timespec *tp) { assert_nrt(); return __real_clock_gettime(clk_id, tp); }
void __wrap_free(void *ptr) { assert_nrt(); __real_free(ptr); }
/* Memory allocation services */ void *__wrap_malloc(size_t size) { assert_nrt(); return __real_malloc(size); }