Пример #1
0
/* state change means moving between lists */
void change_client_state(PgSocket *client, SocketState newstate)
{
	PgPool *pool = client->pool;

	/* remove from old location */
	switch (client->state) {
	case CL_FREE:
		break;
	case CL_JUSTFREE:
		statlist_remove(&justfree_client_list, &client->head);
		break;
	case CL_LOGIN:
		if (newstate == CL_WAITING)
			newstate = CL_WAITING_LOGIN;
		statlist_remove(&login_client_list, &client->head);
		break;
	case CL_WAITING_LOGIN:
		if (newstate == CL_ACTIVE)
			newstate = CL_LOGIN;
	case CL_WAITING:
		statlist_remove(&pool->waiting_client_list, &client->head);
		break;
	case CL_ACTIVE:
		statlist_remove(&pool->active_client_list, &client->head);
		break;
	case CL_CANCEL:
		statlist_remove(&pool->cancel_req_list, &client->head);
		break;
	default:
		fatal("bad cur client state: %d", client->state);
	}

	client->state = newstate;

	/* put to new location */
	switch (client->state) {
	case CL_FREE:
		varcache_clean(&client->vars);
		slab_free(client_cache, client);
		break;
	case CL_JUSTFREE:
		statlist_append(&justfree_client_list, &client->head);
		break;
	case CL_LOGIN:
		statlist_append(&login_client_list, &client->head);
		break;
	case CL_WAITING:
	case CL_WAITING_LOGIN:
		statlist_append(&pool->waiting_client_list, &client->head);
		break;
	case CL_ACTIVE:
		statlist_append(&pool->active_client_list, &client->head);
		break;
	case CL_CANCEL:
		statlist_append(&pool->cancel_req_list, &client->head);
		break;
	default:
		fatal("bad new client state: %d", client->state);
	}
}
Пример #2
0
void ipc_call_release(call_t *call)
{
	if (atomic_predec(&call->refcnt) == 0) {
		if (call->buffer)
			free(call->buffer);
		slab_free(ipc_call_slab, call);
	}
}
Пример #3
0
static void
iio_stop()
{
    nioDbg("Stopping IIO ...");
    free(apictx->devices);
    slab_free(&apictx->msg_pool);
    return;
}
Пример #4
0
L4_ThreadId_t thread_new(AddrSpace_t *space)
{
    assert (space != NULL);

    L4_Word_t tno;
    L4_ThreadId_t tid;
    L4_ThreadId_t space_spec;
    L4_Word_t utcb_location;
    slab_t *sb;
    list_t *li;
    thread_t *this;

    mutex_lock(&thrlock);
    tno = threadno_find_free(bitmap, MAX_TASKS);
    if (!tno) {
        mutex_unlock(&thrlock);
        return L4_nilthread;
    }

    tid = L4_GlobalId(tno, 1);
    utcb_location = UTCB_AREA_LOCATION;

    space_spec = space->tid;
    tno = threadno_find_free(space->threads, MAX_THREADS_PER_TASK);
    if (!tno) {
        mutex_unlock(&thrlock);
        return L4_nilthread;
    }
    utcb_location += tno * UTCB_SIZE;

    sb = slab_alloc(&thrpool);
    if (!sb) {
        mutex_unlock(&thrlock);
        return L4_nilthread;
    }
    
    if (FALSE == (L4_ThreadControl(tid, space_spec, tid, space->pager, (void *) utcb_location))) {
        slab_free(&thrpool, sb);
        mutex_unlock(&thrlock);
        return L4_nilthread;
    }

    li = LIST_TYPE(sb->data);
    this = (thread_t *) li->data;
    list_push(&thread_list, li);

    this->tid = tid;
    this->space = space;
    this->index = tno;
    this->creation = L4_SystemClock();

    threadno_alloc(bitmap, L4_ThreadNo(tid));
    threadno_alloc(space->threads, tno);
    mutex_unlock(&thrlock);
    return tid;
}
Пример #5
0
/**
 * Deallocate space associated with slab
 *
 * @return number of freed frames
 */
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
{
	frame_free(KA2PA(slab->start));
	if (! (cache->flags & SLAB_CACHE_SLINSIDE))
    slab_free(slab_cache, slab);

//	atomic_dec(&cache->allocated_slabs);

	return 1 << cache->order;
}
Пример #6
0
static void EXPORT_in_circle(struct crabql *crabql, msgpack_object *o, UT_string *s, int nargs) {
    ERROR_ASSERT(nargs == 3);
    ERROR_ASSERT(o[2].type == MSGPACK_OBJECT_ARRAY);
    ERROR_ASSERT(o[2].via.array.size == 2);
    ERROR_ASSERT(o[2].via.array.ptr[0].type == MSGPACK_OBJECT_POSITIVE_INTEGER);
    ERROR_ASSERT(o[2].via.array.ptr[1].type == MSGPACK_OBJECT_POSITIVE_INTEGER);

    struct circle *circle = circle_make(*(struct point *) &o[2].via.array.ptr[0].via.u64, o[2].via.array.ptr[1].via.u64);
    char *binstr = dump_binstrescape((const char *) circle, sizeof(struct circle));

    uts_printf_concat(s, "cb_point_in_circle((uint64_t) ");
    crabql_generate_code(crabql, &o[1], s);
    uts_printf_concat(s, ", \"");
    utstring_bincpy(s, binstr, strlen(binstr));
    uts_printf_concat(s, "\")");

    slab_free(circle);
    slab_free(binstr);
}
Пример #7
0
void __heap_free(heap_t * self, void *ptr, const char *file, int line)
{
	if (unlikely(self == NULL))
		throw_unexpected(HEAP_NULL);

	void *data = (void *)((uint32_t) ptr & ~(self->page_size - 1));
	cache_t *c = (cache_t *) (data + self->page_size - sizeof(cache_t));
	cache_check(c);
	slab_free(cache_slab(c), ptr);
}
Пример #8
0
int
zlib_deinit(void **data)
{
	if (*data) {
		z_stream *zs = (z_stream *)(*data);
		deflateEnd(zs);
		slab_free(NULL, *data);
	}
	return (0);
}
Пример #9
0
int
lz_fx_deinit(void **data)
{
	struct lzfx_params *lzdat = (struct lzfx_params *)(*data);
	
	if (lzdat) {
		slab_free(NULL, lzdat);
	}
	*data = NULL;
	return (0);
}
Пример #10
0
void thr_destructor_arch(thread_t *t)
{
	if (t->arch.uspace_window_buffer) {
		uintptr_t uw_buf = (uintptr_t) t->arch.uspace_window_buffer;
		/*
		 * Mind the possible alignment of the userspace window buffer
		 * belonging to a killed thread.
		 */
		slab_free(uwb_cache, (uint8_t *) ALIGN_DOWN(uw_buf,
		    UWB_ALIGNMENT));
	}
}
Пример #11
0
/*
 * Block drivers call this function to signal completion of an I/O request.
 */
void block_request_completed(struct block_device *dev, struct request *req)
{
	req->buf->b_flags |= BUF_UPTODATE;
	release_buffer(req->buf);
	buffer_unlock(req->buf);
	slab_free(request_cachep, req);

	if (request_queue_empty(dev))
		return;
	req = list_first_entry(&dev->requests, struct request, chain);
	list_del(&req->chain);
	dev->handle_request(dev, req);
}
Пример #12
0
static void
pat_destroy(patricia *node)
{
  int n;

  if (!node)
    return;

  for (n = 0; n < 2; n++)
    if (node->links[n]->bit > node->bit)
      pat_destroy(node->links[n]);

  slab_free(intmap_slab, node);
}
Пример #13
0
static void r600_buffer_transfer_unmap(struct pipe_context *ctx,
				       struct pipe_transfer *transfer)
{
	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;

	if (transfer->usage & PIPE_TRANSFER_WRITE &&
	    !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
		r600_buffer_do_flush_region(ctx, transfer, &transfer->box);

	if (rtransfer->staging)
		r600_resource_reference(&rtransfer->staging, NULL);

	slab_free(&rctx->pool_transfers, transfer);
}
Пример #14
0
Файл: pipe.c Проект: drewt/Telos
int pipe_release(struct inode *inode, struct file *file)
{
	struct pipe_private *pipe = file->f_private;
	if (file == pipe->read_end) {
		pipe->read_end = NULL;
		wake_all(&pipe->write_wait, 0);
	}
	if (file == pipe->write_end) {
		pipe->write_end = NULL;
		wake_all(&pipe->read_wait, 0);
	}
	if (!pipe->read_end && !pipe->write_end)
		slab_free(pipe_cachep, pipe);
	return 0;
}
Пример #15
0
int test_slab (int argc, char **argv) {
    // track runtime
    struct timeval t0, t1; 
    
    // malloc version
    gettimeofday (&t0, NULL);
    for (int p = 0; p < PASSES; ++p) {
        for (int i = 0; i < ALLOCS; ++i) {
            test_s *ts = malloc (sizeof(test_s));
            ts->a = i;
            ts->b = i;
            ts->c = (i % 2 == 0);
            tsps[i] = ts;
        }
        for (int i = 0; i < ALLOCS; ++i) {
            free (tsps[i]);
        }
    }
    gettimeofday (&t1, NULL);
    double mdt = ((t1.tv_usec + 1000000 * t1.tv_sec) - (t0.tv_usec + 1000000 * t0.tv_sec)) / 1000000.0;
    
    // slab alloc version
    gettimeofday (&t0, NULL);
    slab_init (SLAB_SIZE);
    for (int p = 0; p < PASSES; ++p) {
        slab_free ();
        for (int i = 0; i < ALLOCS; ++i) {
            test_s *ts = slab_alloc (sizeof(test_s));
            ts->a = i;
            ts->b = i;
            ts->c = (i % 2 == 0);
            tsps[i] = ts;
        }
    }    
    gettimeofday (&t1, NULL);
    double sdt = ((t1.tv_usec + 1000000 * t1.tv_sec) - (t0.tv_usec + 1000000 * t0.tv_sec)) / 1000000.0;

    /* Check that results are coherent, and collapse the wavefunction. */
    for (int i = 0; i < ALLOCS; ++i) {
        test_s ts = *(tsps[i]);
        if (ts.a != i || ts.b != i || ((i % 2 == 0) != ts.c)) {
            printf ("ERR ptr=%p, i=%d, a=%d, b=%f, c=%s\n", tsps[i], i, ts.a, ts.b, ts.c ? "EVEN" : "ODD");
        }
    }
    
    fprintf (stderr, "%f sec malloc, %f sec slab, speedup %f\n", mdt, sdt, mdt/sdt);
    return 0;
}
Пример #16
0
int
adapt_deinit(void **data)
{
	struct adapt_data *adat = (struct adapt_data *)(*data);
	int rv = 0;

	if (adat) {
		rv = ppmd_deinit(&(adat->ppmd_data));
		if (adat->lzma_data)
			rv += lzma_deinit(&(adat->lzma_data));
		if (adat->lz4_data)
			rv += lz4_deinit(&(adat->lz4_data));
		slab_free(NULL, adat);
		*data = NULL;
	}
	return (rv);
}
Пример #17
0
int thread_destroy(L4_ThreadId_t tid)
{
    list_t *li;
    AddrSpace_t *as;
    L4_Word_t tno;
   
    mutex_lock(&thrlock);

    li = list_find(thread_list, &tid, sizeof(L4_ThreadId_t));
    if (li == NULL) {
        mutex_unlock(&thrlock);
        return FALSE;
    }

    if (FALSE == L4_ThreadControl(tid, L4_nilthread, L4_nilthread, L4_nilthread, (void *) -1)) {
        mutex_unlock(&thrlock);
        return FALSE;
    }

    as = THREAD_TYPE(li->data)->space;
    tno = THREAD_TYPE(li->data)->index;

    list_remove(&thread_list, li);
    slab_free(&thrpool, SLAB_FROM_DATA(li));

    threadno_free(bitmap, L4_ThreadNo(tid));
    threadno_free(as->threads, tno);

    if (tid.raw == as->tid.raw) {
        for (li = thread_list; li; li = li->next) {
            if (THREAD_TYPE(li->data)->space == as) break;
        }
        if (li == NULL) {
            // task destroy notification should go here
            address_space_destroy(as);
        } else {
            as->tid = THREAD_TYPE(li->data)->tid;
        }
    }

    mutex_unlock(&thrlock);
    
    // thread destroy notification should go here
    return TRUE;
} 
Пример #18
0
/* reposition at buffer start again */
static void sbuf_try_resync(SBuf *sbuf, bool release)
{
	IOBuf *io = sbuf->io;

	if (io) {
		log_noise("resync: done=%d, parse=%d, recv=%d",
			  io->done_pos, io->parse_pos, io->recv_pos);
	}
	AssertActive(sbuf);

	if (!io)
		return;

	if (release && iobuf_empty(io)) {
		slab_free(iobuf_cache, io);
		sbuf->io = NULL;
	} else {
		iobuf_try_resync(io, SBUF_SMALL_PKT);
	}
}
Пример #19
0
static int slab_tests_run(int argc, char *argv[])
{
	// 1. Create slab cache
	srand(time(0));
	const unsigned pattern = 0xdeadbeef;
	slab_cache_t cache;
	int ret = slab_cache_init(&cache, sizeof(int));
	ok(ret == 0, "slab: created empty cache");

	// 2. Couple alloc/free
	bool valid_free = true;
	lives_ok({
	for(int i = 0; i < 100; ++i) {
		int* data = (int*)slab_cache_alloc(&cache);
		*data = pattern;
		slab_free(data);
		if (*data == pattern)
			valid_free = false;
	}
	}, "slab: couple alloc/free");
Пример #20
0
void destroy_translation_map(struct vm_translation_map *map)
{
    int i;
    unsigned int *pgdir;
    int old_flags;

    old_flags = acquire_spinlock_int(&kernel_space_lock);
    list_remove_node(map);
    release_spinlock_int(&kernel_space_lock, old_flags);

    // Free user space page tables
    pgdir = (unsigned int*) PA_TO_VA(map->page_dir);
    for (i = 0; i < 768; i++)
    {
        if (pgdir[i] & PAGE_PRESENT)
            dec_page_ref(pa_to_page(PAGE_ALIGN(pgdir[i])));
    }

    dec_page_ref(pa_to_page(map->page_dir));
    slab_free(&translation_map_slab, map);
}
Пример #21
0
void task_destroy(AddrSpace_t *space)
{
    list_t *li, *next;
    AddrSpace_t *as;
    L4_Word_t tno;
    L4_ThreadId_t tid;
   
    assert(space != NULL);

    mutex_lock(&thrlock);

    next = NULL;
    for (li = thread_list; li; li = next) {
        as = THREAD_TYPE(li->data)->space;
        next = li->next;
        if (as != space) continue;

        tno = THREAD_TYPE(li->data)->index;
        tid = THREAD_TYPE(li->data)->tid;

        /* hope this succeeds */
        L4_ThreadControl(tid, L4_nilthread, L4_nilthread, L4_nilthread, (void *) -1);

        list_remove(&thread_list, li);
        slab_free(&thrpool, SLAB_FROM_DATA(li));
        threadno_free(bitmap, L4_ThreadNo(tid));

        // thread destroy notification should go here
    }

    address_space_destroy(space);

    mutex_unlock(&thrlock);
    
    // task destroy notification should go here
    return;
}
Пример #22
0
void *
slab_realloc(const char *file, unsigned int line, void *ptr, size_t size)
{
    alloc_header_t *orig;
    void *newblock;
    size_t osize;

    if (!ptr)
        return slab_malloc(file, line, size);

    verify(ptr);
    orig = (alloc_header_t*)ptr - 1;
#if SLAB_DEBUG & SLAB_DEBUG_HEADER
    osize = orig->size;
#else
    osize = *orig;
#endif
    if (osize >= size)
        return ptr;
    newblock = slab_malloc(file, line, size);
    memcpy(newblock, ptr, osize);
    slab_free(file, line, ptr);
    return newblock;
}
Пример #23
0
static void EXPORT_in(struct crabql *crabql, msgpack_object *o, UT_string *s, int nargs) {
    ERROR_ASSERT(nargs == 3);

    if (o[2].type == MSGPACK_OBJECT_ARRAY) {

        msgpack_object *p = o[2].via.array.ptr;
        uts_printf_concat(s, "bs64((int64_t) ");
        crabql_generate_code(crabql, &o[1], s);
        uts_printf_concat(s, ", \"");
        size_t len = o[2].via.array.size;
        int64_t *nums = slab_alloc(len * 8);
        for (size_t i = 0; i < len; i++) {
            ERROR_ASSERT(p[i].type == MSGPACK_OBJECT_POSITIVE_INTEGER || p[i].type == MSGPACK_OBJECT_NEGATIVE_INTEGER);
            if (p[i].type == MSGPACK_OBJECT_POSITIVE_INTEGER)
                ERROR_ASSERT(p[i].via.u64 < INT64_MAX);

            nums[i] = p[i].via.i64;
        }

        qsort(nums, len, 8, cmp64);

        //   \x01\x00\x00\x00\x00\x00\x00\x00

        char *binstr = dump_binstrescape((const char *) nums, len * sizeof(nums[0]));
        utstring_bincpy(s, binstr, strlen(binstr));
        slab_free(nums);
        slab_free(binstr);
        uts_printf_concat(s, "\", (size_t) %zu)", o[2].via.array.size);
    }
    else if (o[2].type == MSGPACK_OBJECT_RAW) {
        char *bucket_name;
        int64_t table_id;
        if (parse_bucket_and_table(o[2].via.raw.ptr, o[2].via.raw.size, &bucket_name, &table_id) == 0) {
            struct bucket *bucket = bucket_get(bucket_name, CAN_RETURN_NULL);
            free(bucket_name);

            if (bucket) {
                struct table *table = bucket_get_table(bucket, table_id, CAN_RETURN_NULL);
                if (table) {
                    lock_table(crabql, table);
                    struct schema *schema = table->schema;
                    if (schema) {
                        struct field *field = schema_field_get_primary(schema);

                        size_t datasize = ceildiv(schema->nbits, 8);

                        uts_printf_concat(s, "bsintable(");
                        crabql_generate_code(crabql, &o[1], s);
                        uts_printf_concat(s, ", (size_t) %pULL, (size_t) %pULL, (size_t) %zu, (size_t) %zu)", table->data, field, datasize, table->len);
                    }
                    else {
                        log_warn("schema is NULL, return 0.");
                        uts_printf_concat(s, "0");
                    }
                }
                else {
                    uts_printf_concat(s, "0");
                }
            }
            else {
                log_warn("bucket is NULL, return 0.");
                uts_printf_concat(s, "0");
            }
        }
        else {
            log_warn("cannot parse bucket and table");
            crabql->error = 34;
        }
    }
};
Пример #24
0
static int
execute_one_slave(int slave_id, int arg_cnt, ...)
{
    va_list ap;
    int fd;
    char *request_stream;
    size_t request_len;
    int nread;

    fd = crt_tcp_timeout_connect(slave_ip(slave_id), slave_port(slave_id), MASTER_CONNECT_SLAVE_TIMEOUT);
    if (fd < 0)
    {
        if (crt_errno == ECONNREFUSED)
            return RET_CONNECT_REFUSED;
        else if (crt_errno == EWOULDBLOCK)
            return RET_CONNECT_TIMEOUT;
        else
            return RET_CONNECT_FAILED;
    }

    va_start(ap, arg_cnt);
    for (int i=0; i<arg_cnt; i++)
    {
        request_stream = va_arg(ap, char *);
        request_len = va_arg(ap, size_t);
        crt_tcp_write_to(fd, request_stream, request_len, MASTER_SEND_SLAVE_TIMEOUT);
    }
    va_end(ap);

    for ( ; ; )
    {
        nread = crt_tcp_read_to(fd, 

    return 0;
}

static struct query_response_s *
fetch_result(struct query_request_s *request_ptr)
{
    switch (request_ptr->cmd_type)
    {
    case cmd_sync:
    case cmd_power:
    case cmd_set:
    case cmd_delete:
        break;

    case cmd_get:
        break;

    default:
        break;
    }
    return NULL;
}

static int
sendback_result(int fd, struct query_response_s *response_ptr)
{
    return 0;
}

static void 
sendback_error_mesg(int fd)
{
    const char *error_mesg = MC_ERR_STR;
    crt_tcp_write_to(fd, (void*)error_mesg, strlen(error_mesg), g_master_settings.write_timeout);
}

static void *
handle_request(void *arg)
{
    int fd = (intptr_t)arg;
    ssize_t nread;
    struct query_request_s request;
    struct query_response_s *response_ptr;
    char *p;
    char remote_ip[IP_STR_LEN];
    struct sockaddr_in remote_peer;
    socklen_t addrlen;
    char warn_log_mesg[1024] = {0};
    int ret;
    pre_timer();

    launch_timer();
    crt_set_nonblock(fd);
    addrlen = sizeof remote_peer;
    getpeername(fd, (struct sockaddr *)&remote_peer, &addrlen);
    inet_ntop(AF_INET, &(remote_peer.sin_addr), remote_ip, sizeof remote_ip);
    memset(&request, 0, sizeof request);
    /* {{{ recv header */
    for (int i=0; i<MAX_RESP_HEADER_SIZE; i++)
    {
        nread = crt_tcp_read_to(fd, &request.query_head_buf[i], 1, g_master_settings.read_timeout);
        if (nread == 1)
        {
            if ((p=try_parse_memcached_string_head(&request)) == NULL)
                continue;
            else
                break;
        }
        else
        {
            ret = snprintf(warn_log_mesg, sizeof warn_log_mesg, "read sock failed: %d - %s", crt_errno, strerror(crt_errno));
            assert(ret < (int)sizeof warn_log_mesg); /* NOT <= */
            assert(ret >= 0);
            goto done;
        }
    }
    /* }}} */
    /* {{{ parse header & prepare for receiving data */
    request.query_head_buf_size = strlen(request.query_head_buf);
    if ((parse_memcached_string_head(&request) == RET_SUCCESS)
         && (request.parse_status == ps_succ))
    {
        switch (request.cmd_type)
        {
            case cmd_set:
                request.data_size += 2; /* alloc for tailing '\r\n' */
                request.data = slab_alloc(request.data_size);
                assert(request.data != NULL);
                nread = crt_tcp_read(fd, request.data, request.data_size);
                if (nread != (ssize_t)request.data_size)
                {
                    ret = snprintf(warn_log_mesg, sizeof warn_log_mesg,
                                   "read request body failed: nread = %zd, error = %d - %s",
                                   nread, crt_errno, strerror(crt_errno));
                    assert(ret < (int)sizeof warn_log_mesg); /* NOT <= */
                    assert(ret >= 0);
                    goto done;
                }
                break;

            case cmd_get:
            case cmd_delete:
                /* nothing to do */
                break;

            /* TODO */
            case cmd_sync:
            case cmd_power:
            default:
                break;
        }
    }
    else
    {
        sendback_error_mesg(fd);
        ret = snprintf(warn_log_mesg, sizeof warn_log_mesg, "parse request failed, cmd type: %d", request.cmd_type);
        assert(ret < (int)sizeof warn_log_mesg); /* NOT <= */
        assert(ret >= 0);
    }
    /* }}} */
    response_ptr = fetch_result(&request);
    sendback_result(fd, response_ptr);

done:
    if (request.data != NULL)
        slab_free(request.data);
    stop_timer();
    if (warn_log_mesg[0] == '\0')
    {
        log_notice("%s <Elapsed %ld.%06lds> {from: %s, fd = %d}", cmd_type_name(request.cmd_type), 
                   __used.tv_sec, __used.tv_usec, remote_ip, fd);
    }
    else
    {
        log_warn("%s <Elapsed %ld.%06lds> {from: %s, fd = %d} %s", cmd_type_name(request.cmd_type),
                 __used.tv_sec, __used.tv_usec, remote_ip, fd, warn_log_mesg);
    }
    close(fd);
    crt_exit(NULL);
}
static void *
etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
                  unsigned level,
                  unsigned usage,
                  const struct pipe_box *box,
                  struct pipe_transfer **out_transfer)
{
   struct etna_context *ctx = etna_context(pctx);
   struct etna_resource *rsc = etna_resource(prsc);
   struct etna_transfer *trans;
   struct pipe_transfer *ptrans;
   enum pipe_format format = prsc->format;

   trans = slab_alloc(&ctx->transfer_pool);
   if (!trans)
      return NULL;

   /* slab_alloc() doesn't zero */
   memset(trans, 0, sizeof(*trans));

   ptrans = &trans->base;
   pipe_resource_reference(&ptrans->resource, prsc);
   ptrans->level = level;
   ptrans->usage = usage;
   ptrans->box = *box;

   assert(level <= prsc->last_level);

   /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
    * being mapped. If we add buffer reallocation to avoid CPU/GPU sync this
    * check needs to be extended to coherent mappings and shared resources.
    */
   if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
       !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
       prsc->last_level == 0 &&
       prsc->width0 == box->width &&
       prsc->height0 == box->height &&
       prsc->depth0 == box->depth &&
       prsc->array_size == 1) {
      usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
   }

   if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture))) {
      /* We have a texture resource which is the same age or newer than the
       * render resource. Use the texture resource, which avoids bouncing
       * pixels between the two resources, and we can de-tile it in s/w. */
      rsc = etna_resource(rsc->texture);
   } else if (rsc->ts_bo ||
              (rsc->layout != ETNA_LAYOUT_LINEAR &&
               util_format_get_blocksize(format) > 1 &&
               /* HALIGN 4 resources are incompatible with the resolve engine,
                * so fall back to using software to detile this resource. */
               rsc->halign != TEXTURE_HALIGN_FOUR)) {
      /* If the surface has tile status, we need to resolve it first.
       * The strategy we implement here is to use the RS to copy the
       * depth buffer, filling in the "holes" where the tile status
       * indicates that it's clear. We also do this for tiled
       * resources, but only if the RS can blit them. */
      if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
         slab_free(&ctx->transfer_pool, trans);
         BUG("unsupported transfer flags %#x with tile status/tiled layout", usage);
         return NULL;
      }

      if (prsc->depth0 > 1) {
         slab_free(&ctx->transfer_pool, trans);
         BUG("resource has depth >1 with tile status");
         return NULL;
      }

      struct pipe_resource templ = *prsc;
      templ.nr_samples = 0;
      templ.bind = PIPE_BIND_RENDER_TARGET;

      trans->rsc = etna_resource_alloc(pctx->screen, ETNA_LAYOUT_LINEAR,
                                       DRM_FORMAT_MOD_LINEAR, &templ);
      if (!trans->rsc) {
         slab_free(&ctx->transfer_pool, trans);
         return NULL;
      }

      /* Need to align the transfer region to satisfy RS restrictions, as we
       * really want to hit the RS blit path here.
       */
      unsigned w_align, h_align;

      if (rsc->layout & ETNA_LAYOUT_BIT_SUPER) {
         w_align = h_align = 64;
      } else {
         w_align = ETNA_RS_WIDTH_MASK + 1;
         h_align = ETNA_RS_HEIGHT_MASK + 1;
      }
      h_align *= ctx->screen->specs.pixel_pipes;

      ptrans->box.width += ptrans->box.x & (w_align - 1);
      ptrans->box.x = ptrans->box.x & ~(w_align - 1);
      ptrans->box.width = align(ptrans->box.width, (ETNA_RS_WIDTH_MASK + 1));
      ptrans->box.height += ptrans->box.y & (h_align - 1);
      ptrans->box.y = ptrans->box.y & ~(h_align - 1);
      ptrans->box.height = align(ptrans->box.height,
                                 (ETNA_RS_HEIGHT_MASK + 1) *
                                  ctx->screen->specs.pixel_pipes);

      if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
         etna_copy_resource_box(pctx, trans->rsc, prsc, level, &ptrans->box);

      /* Switch to using the temporary resource instead */
      rsc = etna_resource(trans->rsc);
   }

   struct etna_resource_level *res_level = &rsc->levels[level];

   /*
    * Always flush if we have the temporary resource and have a copy to this
    * outstanding. Otherwise infer flush requirement from resource access and
    * current GPU usage (reads must wait for GPU writes, writes must have
    * exclusive access to the buffer).
    */
   if ((trans->rsc && (etna_resource(trans->rsc)->status & ETNA_PENDING_WRITE)) ||
       (!trans->rsc &&
        (((usage & PIPE_TRANSFER_READ) && (rsc->status & ETNA_PENDING_WRITE)) ||
        ((usage & PIPE_TRANSFER_WRITE) && rsc->status))))
      pctx->flush(pctx, NULL, 0);

   /* XXX we don't handle PIPE_TRANSFER_FLUSH_EXPLICIT; this flag can be ignored
    * when mapping in-place,
    * but when not in place we need to fire off the copy operation in
    * transfer_flush_region (currently
    * a no-op) instead of unmap. Need to handle this to support
    * ARB_map_buffer_range extension at least.
    */
   /* XXX we don't take care of current operations on the resource; which can
      be, at some point in the pipeline
      which is not yet executed:

      - bound as surface
      - bound through vertex buffer
      - bound through index buffer
      - bound in sampler view
      - used in clear_render_target / clear_depth_stencil operation
      - used in blit
      - used in resource_copy_region

      How do other drivers record this information over course of the rendering
      pipeline?
      Is it necessary at all? Only in case we want to provide a fast path and
      map the resource directly
      (and for PIPE_TRANSFER_MAP_DIRECTLY) and we don't want to force a sync.
      We also need to know whether the resource is in use to determine if a sync
      is needed (or just do it
      always, but that comes at the expense of performance).

      A conservative approximation without too much overhead would be to mark
      all resources that have
      been bound at some point as busy. A drawback would be that accessing
      resources that have
      been bound but are no longer in use for a while still carry a performance
      penalty. On the other hand,
      the program could be using PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE or
      PIPE_TRANSFER_UNSYNCHRONIZED to
      avoid this in the first place...

      A) We use an in-pipe copy engine, and queue the copy operation after unmap
      so that the copy
         will be performed when all current commands have been executed.
         Using the RS is possible, not sure if always efficient. This can also
      do any kind of tiling for us.
         Only possible when PIPE_TRANSFER_DISCARD_RANGE is set.
      B) We discard the entire resource (or at least, the mipmap level) and
      allocate new memory for it.
         Only possible when mapping the entire resource or
      PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE is set.
    */

   /*
    * Pull resources into the CPU domain. Only skipped for unsynchronized
    * transfers without a temporary resource.
    */
   if (trans->rsc || !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
      uint32_t prep_flags = 0;

      if (usage & PIPE_TRANSFER_READ)
         prep_flags |= DRM_ETNA_PREP_READ;
      if (usage & PIPE_TRANSFER_WRITE)
         prep_flags |= DRM_ETNA_PREP_WRITE;

      if (etna_bo_cpu_prep(rsc->bo, prep_flags))
         goto fail_prep;
   }

   /* map buffer object */
   void *mapped = etna_bo_map(rsc->bo);
   if (!mapped)
      goto fail;

   *out_transfer = ptrans;

   if (rsc->layout == ETNA_LAYOUT_LINEAR) {
      ptrans->stride = res_level->stride;
      ptrans->layer_stride = res_level->layer_stride;

      return mapped + res_level->offset +
             etna_compute_offset(prsc->format, box, res_level->stride,
                                 res_level->layer_stride);
   } else {
      unsigned divSizeX = util_format_get_blockwidth(format);
      unsigned divSizeY = util_format_get_blockheight(format);

      /* No direct mappings of tiled, since we need to manually
       * tile/untile.
       */
      if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
         goto fail;

      mapped += res_level->offset;
      ptrans->stride = align(box->width, divSizeX) * util_format_get_blocksize(format); /* row stride in bytes */
      ptrans->layer_stride = align(box->height, divSizeY) * ptrans->stride;
      size_t size = ptrans->layer_stride * box->depth;

      trans->staging = MALLOC(size);
      if (!trans->staging)
         goto fail;

      if (usage & PIPE_TRANSFER_READ) {
         if (rsc->layout == ETNA_LAYOUT_TILED) {
            etna_texture_untile(trans->staging,
                                mapped + ptrans->box.z * res_level->layer_stride,
                                ptrans->box.x, ptrans->box.y, res_level->stride,
                                ptrans->box.width, ptrans->box.height, ptrans->stride,
                                util_format_get_blocksize(rsc->base.format));
         } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
            util_copy_box(trans->staging, rsc->base.format, ptrans->stride,
                          ptrans->layer_stride, 0, 0, 0, /* dst x,y,z */
                          ptrans->box.width, ptrans->box.height,
                          ptrans->box.depth, mapped, res_level->stride,
                          res_level->layer_stride, ptrans->box.x,
                          ptrans->box.y, ptrans->box.z);
         } else {
            /* TODO supertiling */
            BUG("unsupported tiling %i for reading", rsc->layout);
         }
      }

      return trans->staging;
   }

fail:
   etna_bo_cpu_fini(rsc->bo);
fail_prep:
   etna_transfer_unmap(pctx, ptrans);
   return NULL;
}
Пример #26
0
/*
 * get a bucket with the specified name,
 * if it does not exits, create it implicitly,
 * returns a pointer to the bucket on success,
 * returns NULL if it fails
 */
struct bucket *bucket_get(const char *name, int flags) {
    if (!is_valid_name(name)) {
        log_warn("invalid bucket name: [%s]", name);
        return NULL;
    }
    
    struct bucket *bucket;
    pthread_rwlock_rdlock(&rwlock);
    int wrlocked = 0;
again:
    HASH_FIND_STR(buckets, name, bucket);
    if (bucket == NULL) {
        if (wrlocked == 0) {
            pthread_rwlock_unlock(&rwlock);
            pthread_rwlock_wrlock(&rwlock);
            wrlocked = 1;
            goto again;
        }
        
        // find schema before malloc
        struct schema *schema = schema_get_latest(name);
        if (schema == NULL && (flags & CAN_RETURN_NULL))
            goto done;
        
        if (mkdir(name, 0755) == -1) {
            log_error("mkdir failed: %s", strerror(errno));
            if (errno != EEXIST)
                goto done;
        }

        bucket = (struct bucket *)slab_alloc(sizeof(struct bucket));
        /* XXX: free the schema that no one can be sure if it's referenced by others */
        if (!bucket) {
            log_error("can't alloc bucket %s", name);
            goto done;
        }

        memset(bucket, 0, sizeof(struct bucket));
        pthread_rwlock_init(&bucket->rwlock, NULL);

        size_t namelen = strlen(name);
        char filename[namelen + 4];
        memcpy(filename, name, namelen);
        memcpy(filename + namelen, ".db", 4);

        if (open_storage(filename, 100 * 1024 * 1024, &bucket->bsp) != 0) {
            log_error("open_storage failed");
            pthread_rwlock_destroy(&bucket->rwlock);
            slab_free(bucket);
            pthread_rwlock_unlock(&rwlock);
            return NULL;
        }

        strncpy(bucket->name, name, sizeof(bucket->name));
        bucket->schema = schema;
        bucket->tables = NULL;
        HASH_ADD_STR(buckets, name, bucket);

        if (schema)
            schema_save_to_file(schema, bucket->name);
    }
done:
    pthread_rwlock_unlock(&rwlock);
    
    return bucket;
}
Пример #27
0
/** Free a player_dbref struct. */
static void
delete_dbref(void *data)
{
  slab_free(player_dbref_slab, data);
}
Пример #28
0
/* destroy PgUser, for usage with btree */
static void user_node_release(struct AANode *node, void *arg)
{
	PgUser *user = container_of(node, PgUser, tree_node);
	slab_free(user_cache, user);
}
Пример #29
0
/* state change means moving between lists */
void change_server_state(PgSocket *server, SocketState newstate)
{
	PgPool *pool = server->pool;

	/* remove from old location */
	switch (server->state) {
	case SV_FREE:
		break;
	case SV_JUSTFREE:
		statlist_remove(&justfree_server_list, &server->head);
		break;
	case SV_LOGIN:
		statlist_remove(&pool->new_server_list, &server->head);
		break;
	case SV_USED:
		statlist_remove(&pool->used_server_list, &server->head);
		break;
	case SV_TESTED:
		statlist_remove(&pool->tested_server_list, &server->head);
		break;
	case SV_IDLE:
		statlist_remove(&pool->idle_server_list, &server->head);
		break;
	case SV_ACTIVE:
		statlist_remove(&pool->active_server_list, &server->head);
		break;
	default:
		fatal("change_server_state: bad old server state: %d", server->state);
	}

	server->state = newstate;

	/* put to new location */
	switch (server->state) {
	case SV_FREE:
		varcache_clean(&server->vars);
		slab_free(server_cache, server);
		break;
	case SV_JUSTFREE:
		statlist_append(&justfree_server_list, &server->head);
		break;
	case SV_LOGIN:
		statlist_append(&pool->new_server_list, &server->head);
		break;
	case SV_USED:
		/* use LIFO */
		statlist_prepend(&pool->used_server_list, &server->head);
		break;
	case SV_TESTED:
		statlist_append(&pool->tested_server_list, &server->head);
		break;
	case SV_IDLE:
		if (server->close_needed || cf_server_round_robin) {
			/* try to avoid immediate usage then */
			statlist_append(&pool->idle_server_list, &server->head);
		} else {
			/* otherwise use LIFO */
			statlist_prepend(&pool->idle_server_list, &server->head);
		}
		break;
	case SV_ACTIVE:
		statlist_append(&pool->active_server_list, &server->head);
		break;
	default:
		fatal("bad server state");
	}
}
static void
etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
{
   struct etna_context *ctx = etna_context(pctx);
   struct etna_transfer *trans = etna_transfer(ptrans);
   struct etna_resource *rsc = etna_resource(ptrans->resource);

   /* XXX
    * When writing to a resource that is already in use, replace the resource
    * with a completely new buffer
    * and free the old one using a fenced free.
    * The most tricky case to implement will be: tiled or supertiled surface,
    * partial write, target not aligned to 4/64. */
   assert(ptrans->level <= rsc->base.last_level);

   if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture)))
      rsc = etna_resource(rsc->texture); /* switch to using the texture resource */

   /*
    * Temporary resources are always pulled into the CPU domain, must push them
    * back into GPU domain before the RS execs the blit to the base resource.
    */
   if (trans->rsc)
      etna_bo_cpu_fini(etna_resource(trans->rsc)->bo);

   if (ptrans->usage & PIPE_TRANSFER_WRITE) {
      if (trans->rsc) {
         /* We have a temporary resource due to either tile status or
          * tiling format. Write back the updated buffer contents.
          * FIXME: we need to invalidate the tile status. */
         etna_copy_resource_box(pctx, ptrans->resource, trans->rsc, ptrans->level, &ptrans->box);
      } else if (trans->staging) {
         /* map buffer object */
         struct etna_resource_level *res_level = &rsc->levels[ptrans->level];
         void *mapped = etna_bo_map(rsc->bo) + res_level->offset;

         if (rsc->layout == ETNA_LAYOUT_TILED) {
            etna_texture_tile(
               mapped + ptrans->box.z * res_level->layer_stride,
               trans->staging, ptrans->box.x, ptrans->box.y,
               res_level->stride, ptrans->box.width, ptrans->box.height,
               ptrans->stride, util_format_get_blocksize(rsc->base.format));
         } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
            util_copy_box(mapped, rsc->base.format, res_level->stride,
                          res_level->layer_stride, ptrans->box.x,
                          ptrans->box.y, ptrans->box.z, ptrans->box.width,
                          ptrans->box.height, ptrans->box.depth,
                          trans->staging, ptrans->stride,
                          ptrans->layer_stride, 0, 0, 0 /* src x,y,z */);
         } else {
            BUG("unsupported tiling %i", rsc->layout);
         }

         FREE(trans->staging);
      }

      rsc->seqno++;

      if (rsc->base.bind & PIPE_BIND_SAMPLER_VIEW) {
         ctx->dirty |= ETNA_DIRTY_TEXTURE_CACHES;
      }
   }

   /*
    * Transfers without a temporary are only pulled into the CPU domain if they
    * are not mapped unsynchronized. If they are, must push them back into GPU
    * domain after CPU access is finished.
    */
   if (!trans->rsc && !(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED))
      etna_bo_cpu_fini(rsc->bo);

   pipe_resource_reference(&trans->rsc, NULL);
   pipe_resource_reference(&ptrans->resource, NULL);
   slab_free(&ctx->transfer_pool, trans);
}