/* increment the counter for # of contexts accessing the device. */ void gdev_access_start(struct gdev_device *gdev) { struct gdev_device *phys = gdev_phys_get(gdev); retry: if (phys) { gdev_lock(&phys->global_lock); if (phys->blocked) { gdev_unlock(&phys->global_lock); SCHED_YIELD(); goto retry; } phys->accessed++; gdev_unlock(&phys->global_lock); } else { gdev_lock(&gdev->global_lock); if (gdev->blocked) { gdev_unlock(&gdev->global_lock); SCHED_YIELD(); goto retry; } gdev->accessed++; gdev_unlock(&gdev->global_lock); } }
/* set the flag to block any access to the device. */ void gdev_block_start(struct gdev_device *gdev) { struct gdev_device *phys = gdev_phys_get(gdev); /* we have to spin while some context is accessing the GPU. */ retry: if (phys) { gdev_lock(&phys->global_lock); if (phys->accessed || phys->blocked) { gdev_unlock(&phys->global_lock); SCHED_YIELD(); goto retry; } phys->blocked++; gdev_unlock(&phys->global_lock); } else { gdev_lock(&gdev->global_lock); if (gdev->accessed || gdev->blocked) { gdev_unlock(&gdev->global_lock); SCHED_YIELD(); goto retry; } gdev->blocked++; gdev_unlock(&gdev->global_lock); } }
/* poll until the resource becomes available. */ int gdev_poll(struct gdev_ctx *ctx, uint32_t seq, struct gdev_time *timeout) { struct gdev_time time_start, time_now, time_elapse, time_relax; struct gdev_vas *vas = ctx->vas; struct gdev_device *gdev = vas->gdev; struct gdev_compute *compute = gdev->compute; gdev_time_stamp(&time_start); gdev_time_ms(&time_relax, 100); /* relax polling when 100 ms elapsed. */ while (seq != compute->fence_read(ctx, seq)) { gdev_time_stamp(&time_now); /* time_elapse = time_now - time_start */ gdev_time_sub(&time_elapse, &time_now, &time_start); /* relax polling after some time. */ if (gdev_time_ge(&time_elapse, &time_relax)) SCHED_YIELD(); /* check timeout. */ if (timeout && gdev_time_ge(&time_elapse, timeout)) return -ETIME; } compute->fence_reset(ctx, seq); return 0; }
void gdev_fifo_push(struct gdev_ctx *ctx, uint64_t base, uint32_t len, int flags) { uint64_t w = base | (uint64_t)len << 40 | (uint64_t)flags << 40; while (((ctx->fifo.ib_put + 1) & ctx->fifo.ib_mask) == ctx->fifo.ib_get) { uint32_t old = ctx->fifo.ib_get; ctx->fifo.ib_get = __gdev_fifo_read_reg(ctx, 0x88); if (old == ctx->fifo.ib_get) { SCHED_YIELD(); } } ctx->fifo.ib_map[ctx->fifo.ib_put * 2] = w; ctx->fifo.ib_map[ctx->fifo.ib_put * 2 + 1] = w >> 32; ctx->fifo.ib_put++; ctx->fifo.ib_put &= ctx->fifo.ib_mask; MB(); /* is this needed? */ ctx->dummy = ctx->fifo.ib_map[0]; /* flush writes */ __gdev_fifo_write_reg(ctx, 0x8c, ctx->fifo.ib_put); }