static VALUE buffer_outvar(VALUE self) { GET_BUFFER(); buffer->outvar = Qtrue; return self; }
void render_vertexbuffer_set_num_elements(object_t id, size_t num) { render_vertexbuffer_t* buffer = GET_BUFFER(id); if (buffer) { buffer->used = (buffer->allocated < num) ? buffer->allocated : num; buffer->flags |= RENDERBUFFER_DIRTY; } }
static VALUE buffer_dirty(VALUE self) { GET_BUFFER(); if (buffer->dirty == Qtrue) return Qtrue; if (buffer->data == NULL) return Qtrue; if (buffer->cachebuf == NULL) return Qtrue; if (RARRAY_LEN(self) != buffer->num_items) return Qtrue; if (SYM2ID(rb_funcall(self, id_data_type, 0)) != buffer->type) return Qtrue; return Qfalse; }
void render_vertexbuffer_restore(object_t id) { render_vertexbuffer_t* buffer = GET_BUFFER(id); if (buffer) { buffer->backend->vtable.allocate_buffer(buffer->backend, (render_buffer_t*)buffer); //... //All loadable resources should have a stream identifier, an offset and a size //to be able to repoen the stream and read the raw buffer back //... buffer->flags |= RENDERBUFFER_DIRTY; } }
void render_buffer_lock(object_t id, unsigned int lock) { render_buffer_t* buffer = GET_BUFFER(id); if (render_buffer_ref(id) != id) return; if (lock & RENDERBUFFER_LOCK_WRITE) { atomic_incr32(&buffer->locks); buffer->access = buffer->store; } else if (lock & RENDERBUFFER_LOCK_READ) { atomic_incr32(&buffer->locks); buffer->access = buffer->store; } buffer->flags |= (lock & RENDERBUFFER_LOCK_BITS); }
static VALUE buffer_update_cache(VALUE self) { GET_BUFFER(); if (buffer_dirty(self) == Qtrue) { size_t old_num_items = buffer->num_items; buffer->num_items = RARRAY_LEN(self); buffer->type = SYM2ID(rb_funcall(self, id_data_type, 0)); buffer->member_size = FIX2INT(rb_hash_aref(rb_hTypes, ID2SYM(buffer->type))); if (buffer->num_items != old_num_items) buffer_size_changed(buffer); buffer->dirty = Qfalse; return Qtrue; } return Qnil; }
void render_buffer_unlock(object_t id) { render_buffer_t* buffer = GET_BUFFER(id); if (!atomic_load32(&buffer->locks)) return; if (atomic_decr32(&buffer->locks) == 0) { buffer->access = nullptr; if ((buffer->flags & RENDERBUFFER_LOCK_WRITE) && !(buffer->flags & RENDERBUFFER_LOCK_NOUPLOAD)) { buffer->flags |= RENDERBUFFER_DIRTY; if ((buffer->policy == RENDERBUFFER_UPLOAD_ONUNLOCK) || (buffer->flags & RENDERBUFFER_LOCK_FORCEUPLOAD)) render_buffer_upload(buffer); } buffer->flags &= ~RENDERBUFFER_LOCK_BITS; } render_buffer_destroy(id); }
void render_buffer_destroy(object_t id) { int32_t ref; render_buffer_t* buffer = GET_BUFFER(id); if (buffer) { do { ref = atomic_load32(&buffer->ref); if ((ref > 0) && atomic_cas32(&buffer->ref, ref - 1, ref)) { if (ref == 1) { objectmap_free(_render_map_buffer, id); buffer->backend->vtable.deallocate_buffer(buffer->backend, buffer, true, true); memory_deallocate(buffer); } return; } } while (ref > 0); } }
static VALUE buffer_read(VALUE self, cl_command_queue queue) { unsigned int i, index; GET_BUFFER(); if (buffer->outvar != Qtrue) return Qnil; if (queue != NULL) { clEnqueueReadBuffer(queue, buffer->data, CL_TRUE, 0, buffer->num_items * buffer->member_size, buffer->cachebuf, 0, NULL, NULL); } for (i = 0, index = 0; i < buffer->num_items; i++, index += buffer->member_size) { VALUE value = type_to_ruby(buffer->cachebuf + index, buffer->type); rb_ary_store(self, i, value); } return self; }
static VALUE buffer_write(VALUE self, cl_command_queue queue) { unsigned int i, index; unsigned long data_ptr[16]; // data buffer GET_BUFFER(); if (NIL_P(RARRAY_PTR(self)[0])) return Qnil; for (i = 0, index = 0; i < buffer->num_items; i++, index += buffer->member_size) { VALUE item = RARRAY_PTR(self)[i]; type_to_native(item, buffer->type, data_ptr); memcpy(buffer->cachebuf + index, data_ptr, buffer->member_size); } if (queue != NULL) { clEnqueueWriteBuffer(queue, buffer->data, CL_TRUE, 0, buffer->num_items * buffer->member_size, buffer->cachebuf, 0, NULL, NULL); } return self; }
render_buffer_uploadpolicy_t render_vertexbuffer_upload_policy(object_t id) { render_vertexbuffer_t* buffer = GET_BUFFER(id); return buffer ? (render_buffer_uploadpolicy_t)buffer->policy : RENDERBUFFER_UPLOAD_ONDISPATCH; }
void render_vertexbuffer_set_uuid(object_t id, const uuid_t uuid) { render_vertexbuffer_t* buffer = GET_BUFFER(id); if (buffer) buffer->uuid = uuid; }
void render_vertexbuffer_set_upload_policy(object_t id, render_buffer_uploadpolicy_t policy) { render_vertexbuffer_t* buffer = GET_BUFFER(id); if (buffer) buffer->policy = policy; }
static VALUE buffer_mark_dirty(VALUE self) { GET_BUFFER(); return (buffer->dirty = Qtrue); }
void render_verexbuffer_upload(object_t id) { render_buffer_t* buffer = GET_BUFFER(id); if (buffer) render_buffer_upload(buffer); }
static VALUE buffer_is_outvar(VALUE self) { GET_BUFFER(); return buffer->outvar; }
size_t render_vertexbuffer_num_elements(object_t id) { render_vertexbuffer_t* buffer = GET_BUFFER(id); return buffer ? buffer->used : 0; }
void* render_vertexbuffer_element(object_t id, size_t element) { render_vertexbuffer_t* buffer = GET_BUFFER(id); return pointer_offset(buffer->access, buffer->size * element); }
size_t render_vertexbuffer_num_allocated(object_t id) { render_vertexbuffer_t* buffer = GET_BUFFER(id); return buffer ? buffer->allocated : 0; }
const render_vertex_decl_t* render_vertexbuffer_decl(object_t id) { render_vertexbuffer_t* buffer = GET_BUFFER(id); return buffer ? &buffer->decl : 0; }
size_t render_vertexbuffer_element_size(object_t id) { render_vertexbuffer_t* buffer = GET_BUFFER(id); return buffer ? buffer->size : 0; }
void render_vertexbuffer_release(object_t id, bool sys, bool aux) { render_vertexbuffer_t* buffer = GET_BUFFER(id); if (buffer) buffer->backend->vtable.deallocate_buffer(buffer->backend, (render_buffer_t*)buffer, sys, aux); }
uuid_t render_vertexbuffer_uuid(object_t id) { render_vertexbuffer_t* buffer = GET_BUFFER(id); return buffer ? buffer->uuid : uuid_null(); }
int tel_putdata(struct buffer *bp) { unsigned char c; int ret; int size; int frombuf; while (bp->b_hold) { if (Nvt.servertype == SRV_SOCKET) { size = min(bp->b_hold, Nvt.iosize); COPY_FROM_BUFFER(bp, Comobuf, size); if ((ret = sock_write(Comobuf, size)) != size) { if (ret < 0) { return (ret); } else { frombuf = size - ret; REWIND_BUFFER(bp, frombuf); } break; } } else { /* OH OH -> Handling IAC */ frombuf = min(bp->b_hold, Nvt.iosize); size = 0; while (frombuf--) { c = GET_BUFFER(bp); if (c == IAC) { if (size == Nvt.iosize - 1) { /* avoid break IAC mapping */ REWIND_BUFFER(bp, 1); frombuf++; break; } Comobuf[size++] = IAC; } Comobuf[size++] = c; if (size == Nvt.iosize) { break; } } /* the following used to be #if 1 in the linux tree and not included for HPUX */ #ifdef __linux__ frombuf = 0; while (size > 0) { if ((ret = sock_write(&Comobuf[frombuf], size)) == size) { break; } if (Debug > 2) { sysmessage(MSG_DEBUG, "Sock write: %d of %d\n", ret, size); } if (ret < 0) { return (ret); } frombuf += ret; size -= ret; } #else if ((ret = sock_write(Comobuf, size)) != size) { if (Debug > 2) { sysmessage(MSG_DEBUG, "Sock write: %d\n", ret); } if (ret < 0) { return (ret); } else { frombuf = 0; while (size-- > ret) { if ((c = Comobuf[size - 1]) == IAC) { continue; } frombuf++; } } if (Debug > 2) { sysmessage(MSG_DEBUG, "Buffer rewind %d\n", frombuf); } REWIND_BUFFER(bp, frombuf); break; } #endif } } if (bp->b_hold == 0) { RESET_BUFFER(bp); } return (0); }
render_usage_t render_vertexbuffer_usage(object_t id) { render_vertexbuffer_t* buffer = GET_BUFFER(id); return buffer ? (render_usage_t)buffer->usage : RENDERUSAGE_INVALID; }