static gpudata *cuda_alloc(void *c, size_t size, void *data, int flags, int *ret) { gpudata *res = NULL, *prev = NULL; cuda_context *ctx = (cuda_context *)c; size_t asize; int err; if ((flags & GA_BUFFER_INIT) && data == NULL) FAIL(NULL, GA_VALUE_ERROR); if ((flags & (GA_BUFFER_READ_ONLY|GA_BUFFER_WRITE_ONLY)) == (GA_BUFFER_READ_ONLY|GA_BUFFER_WRITE_ONLY)) FAIL(NULL, GA_VALUE_ERROR); /* TODO: figure out how to make this work */ if (flags & GA_BUFFER_HOST) FAIL(NULL, GA_DEVSUP_ERROR); /* We don't want to manage really small allocations so we round up * to a multiple of FRAG_SIZE. This also ensures that if we split a * block, the next block starts properly aligned for any data type. */ if (!(ctx->flags & GA_CTX_DISABLE_ALLOCATION_CACHE)) { asize = roundup(size, FRAG_SIZE); find_best(ctx, &res, &prev, asize); } else { asize = size; } if (res == NULL) { err = allocate(ctx, &res, &prev, asize); if (err != GA_NO_ERROR) FAIL(NULL, err); } err = extract(res, prev, asize); if (err != GA_NO_ERROR) FAIL(NULL, err); /* It's out of the freelist, so add a ref */ res->ctx->refcnt++; /* We consider this buffer allocated and ready to go */ res->refcnt = 1; if (flags & GA_BUFFER_INIT) { err = cuda_write(res, 0, data, size); if (err != GA_NO_ERROR) { cuda_free(res); FAIL(NULL, err); } } return res; }
/* Construct and send a cuda request */ int cuda_request(struct adb_request *req, void (*done)(struct adb_request *), int nbytes, ...) { va_list list; int i; if (via == NULL) { req->complete = 1; return -ENXIO; } req->nbytes = nbytes; req->done = done; va_start(list, nbytes); for (i = 0; i < nbytes; ++i) req->data[i] = va_arg(list, int); va_end(list); req->reply_expected = 1; return cuda_write(req); }
/* Send an ADB command */ static int cuda_send_request(struct adb_request *req, int sync) { int i; if ((via == NULL) || !cuda_fully_inited) { req->complete = 1; return -ENXIO; } req->reply_expected = 1; i = cuda_write(req); if (i) return i; if (sync) { while (!req->complete) cuda_poll(); } return 0; }