/* * Play on @sock. * The session must be in the playing phase. * Return 0 when the session ended, -1 on error. */ int play(int sock) { /* * Player input flows from INPUT_FD through recv_input() into ring * buffer INBUF, which drains into SOCK. This must not block. * Server output flows from SOCK into recv_output(). Reading SOCK * must not block. */ struct sigaction sa; struct ring inbuf; /* input buffer, draining to SOCK */ int eof_fd0; /* read fd 0 hit EOF? */ int partial_line_sent; /* partial input line sent? */ fd_set rdfd, wrfd; int n; sa.sa_flags = 0; sigemptyset(&sa.sa_mask); sa.sa_handler = intr; sigaction(SIGINT, &sa, NULL); sa.sa_handler = SIG_IGN; sigaction(SIGPIPE, &sa, NULL); ring_init(&inbuf); eof_fd0 = partial_line_sent = send_eof = send_intr = 0; input_fd = 0; sysdep_stdin_init(); for (;;) { FD_ZERO(&rdfd); FD_ZERO(&wrfd); /* * Want to read player input only when we don't need to send * cookies, and INPUT_FD is still open, and INBUF can accept * some. */ if (!send_intr && !send_eof && input_fd >= 0 && ring_space(&inbuf)) FD_SET(input_fd, &rdfd); /* Want to send player input only when we have something */ if (send_intr || send_eof || ring_len(&inbuf)) FD_SET(sock, &wrfd); /* Always want to read server output */ FD_SET(sock, &rdfd); n = select(MAX(input_fd, sock) + 1, &rdfd, &wrfd, NULL, NULL); if (n < 0) { if (errno != EINTR) { perror("select"); return -1; } } if ((send_eof || send_intr) && partial_line_sent && ring_putc(&inbuf, '\n') != EOF) partial_line_sent = 0; if (send_eof && !partial_line_sent && ring_putm(&inbuf, EOF_COOKIE, sizeof(EOF_COOKIE) - 1) >= 0) send_eof--; if (send_intr && !partial_line_sent && ring_putm(&inbuf, INTR_COOKIE, sizeof(INTR_COOKIE) - 1) >= 0) { send_intr = 0; if (input_fd) { /* execute aborted, switch back to fd 0 */ close(input_fd); input_fd = eof_fd0 ? -1 : 0; } } if (n < 0) continue; /* read player input */ if (input_fd >= 0 && FD_ISSET(input_fd, &rdfd)) { n = recv_input(input_fd, &inbuf); if (n < 0) { perror("read stdin"); /* FIXME stdin misleading, could be execing */ n = 0; } if (n == 0) { /* EOF on input */ send_eof++; if (input_fd) { /* execute done, switch back to fd 0 */ close(input_fd); input_fd = eof_fd0 ? -1 : 0; } else { /* stop reading input, drain socket ring buffers */ eof_fd0 = 1; input_fd = -1; sa.sa_handler = SIG_DFL; sigaction(SIGINT, &sa, NULL); } } else partial_line_sent = ring_peek(&inbuf, -1) != '\n'; } /* send it to the server */ if (FD_ISSET(sock, &wrfd)) { n = ring_to_file(&inbuf, sock); if (n < 0) { perror("write socket"); return -1; } } /* read server output and print it */ if (FD_ISSET(sock, &rdfd)) { n = recv_output(sock); if (n < 0) { perror("read socket"); return -1; } if (n == 0) return 0; } } }
static int init_ring_common(struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = ring->dev->dev_private; struct drm_i915_gem_object *obj = ring->obj; u32 head; /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); ring->write_tail(ring, 0); /* Initialize the ring. */ I915_WRITE_START(ring, obj->gtt_offset); head = I915_READ_HEAD(ring) & HEAD_ADDR; /* G45 ring initialization fails to reset head to zero */ if (head != 0) { DRM_DEBUG_KMS("%s head not reset to zero " "ctl %08x head %08x tail %08x start %08x\n", ring->name, I915_READ_CTL(ring), I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); I915_WRITE_HEAD(ring, 0); if (I915_READ_HEAD(ring) & HEAD_ADDR) { DRM_ERROR("failed to set %s head to zero " "ctl %08x head %08x tail %08x start %08x\n", ring->name, I915_READ_CTL(ring), I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); } } I915_WRITE_CTL(ring, ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_REPORT_64K | RING_VALID); /* If the head is still not zero, the ring is dead */ if ((I915_READ_CTL(ring) & RING_VALID) == 0 || I915_READ_START(ring) != obj->gtt_offset || (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", ring->name, I915_READ_CTL(ring), I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); return -EIO; } if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) i915_kernel_lost_context(ring->dev); else { ring->head = I915_READ_HEAD(ring); ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring_space(ring); } return 0; }
int nugpgpu_ringbuffer_render_init(struct nugpgpu_private *gpu_priv) { int ret; u32 head; printk(LOG_INFO "nugpgpu_ringbuffer_render_init\n" LOG_END); TRACE_IN RING->mmio_base = RENDER_RING_BASE; RING->size = PAGE_SIZE * RING_PAGES; /* Allocate the status page. */ ret = allocate_object(gpu_priv, &RING->status_obj, 1); if (ret){ printk(LOG_ERR "Failed to allocate the status page\n" LOG_END); return 1; } RING->gva_status = nugpgpu_gtt_insert(gpu_priv, RING->status_obj.pg_list, NUGPGPU_CACHE_LLC); if (RING->gva_status == (unsigned int)-1){ printk(LOG_ERR "Failed to insert the status page in gtt\n" LOG_END); return 1; } printk(LOG_INFO "RING->gva_status : 0x%x\n" LOG_END, (unsigned int) RING->gva_status); RING->page_status = kmap(sg_page(RING->status_obj.pg_list->sgl)); if (RING->page_status == NULL) { printk(LOG_ERR "Failed to map page_status\n" LOG_END); return 1; } memset(RING->page_status, 0, PAGE_SIZE); printk(LOG_INFO "RING->page_status : 0x%lx\n" LOG_END, (unsigned long) RING->page_status); /* Allocate the ringbuffer object */ ret = allocate_object(gpu_priv, &RING->ringbuf_obj, RING_PAGES); if (ret){ printk(LOG_ERR "Failed to allocate the status page\n" LOG_END); return 1; } RING->gva_ringbuffer = nugpgpu_gtt_insert(gpu_priv, RING->ringbuf_obj.pg_list, NUGPGPU_CACHE_LLC); if (RING->gva_ringbuffer == (unsigned int)-1){ printk(LOG_ERR "Failed to insert the status page in gtt\n" LOG_END); return 1; } printk(LOG_INFO "RING->gva_ringbuffer : 0x%x\n" LOG_END, (unsigned int) RING->gva_ringbuffer); RING->page_ringbuffer = kmap(sg_page(RING->ringbuf_obj.pg_list->sgl)); if (RING->page_ringbuffer == NULL) { printk(LOG_ERR "Failed to map page_ringbuffer\n" LOG_END); return 1; } RING->virtual_start = ioremap_wc(gpu_priv->gtt.mappable_base + PAGE_SIZE, RING->size); if (RING->virtual_start == NULL) { printk(LOG_ERR "Problem while mapping virtual start ioremap_wc\n" LOG_END); return 1; } printk(LOG_INFO "Allocated the ringbuffer\n" LOG_END); /* Initialize the ring now.*/ gpu_forcewake_get(gpu_priv); /* Write status page register */ printk(LOG_INFO "writing status page register\n" LOG_END); NUGPGPU_WRITE(RENDER_HWS_PGA_GEN7, RING->gva_status); NUGPGPU_READ(RENDER_HWS_PGA_GEN7); flushtlb(gpu_priv); // Stop ring printk(LOG_INFO "stopping ring\n" LOG_END); RING_WRITE_CTL(RING, 0); RING_WRITE_HEAD(RING, 0); RING_WRITE_TAIL(RING, 0); // The doc says this enforces ordering between multiple writes head = RING_READ_HEAD(RING) & RING_HEAD_ADDR; if ( head !=0 ){ printk(LOG_ERR "failed to set head to zero\n" LOG_END); RING_WRITE_HEAD(RING, 0); if (RING_READ_HEAD(RING) & RING_HEAD_ADDR) { printk(LOG_ERR "failed to set ring head to zero " "ctl %08x head %08x tail %08x start %08x\n" LOG_END, RING_READ_CTL(RING), RING_READ_HEAD(RING), RING_READ_TAIL(RING), RING_READ_START(RING)); } } /* i915 driver says the below line...?? */ /* Enforce ordering by reading HEAD register back */ RING_READ_HEAD(RING); /* Comment taken directly from i915 driver */ /* Initialize the ring. This must happen _after_ we've cleared the ring * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring * register values. */ RING_WRITE_START(RING, RING->gva_ringbuffer); RING_WRITE_CTL(RING, (((RING->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID)); /* If the head is still not zero, the ring is dead */ if( wait_for((RING_READ_CTL(RING) & RING_VALID) != 0 && RING_READ_START(RING) == RING->gva_ringbuffer && (RING_READ_HEAD(RING) & RING_HEAD_ADDR) == 0, 50) ){ printk(LOG_ERR "ring failed to start ring\n" LOG_END); return -EIO; } RING->head = RING_READ_HEAD(RING); RING->tail = RING_READ_TAIL(RING) & RING_TAIL_ADDR; RING->space = ring_space(RING); printk(LOG_INFO "ring->space = %d\n" LOG_END, RING->space); gpu_forcewake_put(gpu_priv); RING_WRITE_MODE(RING, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); RING_WRITE_MODE(RING, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); RING_WRITE_MODE_GEN7(RING, _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); RING_WRITE_INSTPM(RING, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); dword_check(gpu_priv, RING, temp); TRACE_OUT return 0; }