void DirectXInterop::DrawFrame() { static float t = 0.0f; // Map the resources cudaGraphicsResource *resource = m_hdrTextureCuda->GetCurrentGraphicsResource(); CE(cudaGraphicsMapResources(1, &resource)); // Run the kernel RenderFrame(m_hdrTextureCuda->GetTextureData(), m_clientWidth, m_clientHeight, m_hdrTextureCuda->GetTexturePitch(), t); // Copy the frame over to the d3d texture m_hdrTextureCuda->CopyTextureDataToRegisteredResource(); // Unmap the resources CE(cudaGraphicsUnmapResources(1, &resource)); // Draw the frame to the screen m_immediateContext->VSSetShader(m_fullscreenTriangleVS, nullptr, 0u); m_immediateContext->PSSetShader(m_copyCudaOutputToBackbufferPS, nullptr, 0u); ID3D11ShaderResourceView *hdrSRV = m_hdrTextureD3D->GetShaderResource(); m_immediateContext->PSSetShaderResources(0, 1, &hdrSRV); m_immediateContext->Draw(3u, 0u); m_swapChain->Present(1u, 0u); t += 0.1f; }
static __inline__ mpqueue_head_t * timer_call_entry_enqueue_deadline( timer_call_t entry, mpqueue_head_t *queue, uint64_t deadline) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); if (!hw_lock_held((hw_lock_t)&entry->lock)) panic("_call_entry_enqueue_deadline() " "entry %p is not locked\n", entry); /* XXX More lock pretense: */ if (!hw_lock_held((hw_lock_t)&queue->lock_data)) panic("_call_entry_enqueue_deadline() " "queue %p is not locked\n", queue); if (old_queue != NULL && old_queue != queue) panic("_call_entry_enqueue_deadline() " "old_queue %p != queue", old_queue); call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline); /* For efficiency, track the earliest soft deadline on the queue, so that * fuzzy decisions can be made without lock acquisitions. */ queue->earliest_soft_deadline = ((timer_call_t)queue_first(&queue->head))->soft_deadline; if (old_queue) old_queue->count--; queue->count++; return (old_queue); }
GLData::GLData(QGLContext * glcontext, CloudList *cl, LayerList *ll, QObject *parent) : QObject(parent) { ll_ = ll; cl_ = cl; glcontext_ = glcontext; selection_color_[0] = 0.0f; selection_color_[1] = 0.0f; selection_color_[2] = 1.0f; selection_color_[3] = 1.0f; // // Set up color lookup buffer // glcontext_->makeCurrent(); color_lookup_buffer_.reset(new QGLBuffer(QGLBuffer::VertexBuffer)); CE(); IF_FAIL("create failed") = color_lookup_buffer_->create(); CE(); IF_FAIL("bind failed") = color_lookup_buffer_->bind(); CE(); color_lookup_buffer_->allocate(sizeof(float)*4); CE(); color_lookup_buffer_->release(); CE(); CloudList * clp = cl_; connect(clp, SIGNAL(deletingCloud(boost::shared_ptr<PointCloud>)), this, SLOT(deleteCloud(boost::shared_ptr<PointCloud>))); }
static __inline__ mpqueue_head_t * timer_call_entry_dequeue( timer_call_t entry) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); call_entry_dequeue(CE(entry)); old_queue->count--; return old_queue; }
void timer_queue_shutdown( mpqueue_head_t *queue) { timer_call_t call; mpqueue_head_t *new_queue; spl_t s; DBG("timer_queue_shutdown(%p)\n", queue); s = splclock(); /* Note comma operator in while expression re-locking each iteration */ while (timer_queue_lock_spin(queue), !queue_empty(&queue->head)) { call = TIMER_CALL(queue_first(&queue->head)); if (!simple_lock_try(&call->lock)) { /* * case (2b) lock order inversion, dequeue and skip * Don't change the call_entry queue back-pointer * but set the async_dequeue field. */ timer_queue_shutdown_lock_skips++; timer_call_entry_dequeue_async(call); #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, call, call->async_dequeue, CE(call)->queue, 0x2b, 0); #endif timer_queue_unlock(queue); continue; } /* remove entry from old queue */ timer_call_entry_dequeue(call); timer_queue_unlock(queue); /* and queue it on new */ new_queue = timer_queue_assign(CE(call)->deadline); timer_queue_lock_spin(new_queue); timer_call_entry_enqueue_deadline( call, new_queue, CE(call)->deadline); timer_queue_unlock(new_queue); simple_unlock(&call->lock); } timer_queue_unlock(queue); splx(s); }
/************************************************** Function: SwitchToTxMode(); Description: switch to Tx mode // **************************************************/ void SwitchToTxMode() { UINT8 value; SPI_Write_Reg(FLUSH_TX,0);//flush Tx CE(0); value=SPI_Read_Reg(CONFIG); // read register CONFIG's value //PTX value=value&0xfe;//set bit 0 SPI_Write_Reg(WRITE_REG | CONFIG, value); // Set PWR_UP bit, enable CRC(2 length) & Prim:RX. RX_DR enabled. CE(1); }
void GlCube::render() { if(!initalised()) initGL(); QOpenGLFunctions * f = QOpenGLContext::currentContext()->functions(); m_vao_constraints.bind(); CE(); f->glDrawElements(GL_TRIANGLES, m_indicies.size(), GL_UNSIGNED_INT, (void*)(0)); CE(); m_vao_constraints.release(); CE(); // Unbind }
/* * Assumes call_entry and queues unlocked, interrupts disabled. */ __inline__ mpqueue_head_t * timer_call_enqueue_deadline_unlocked( timer_call_t call, mpqueue_head_t *queue, uint64_t deadline) { call_entry_t entry = CE(call); mpqueue_head_t *old_queue; DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue); simple_lock(&call->lock); old_queue = MPQUEUE(entry->queue); if (old_queue != NULL) { timer_queue_lock_spin(old_queue); if (call->async_dequeue) { /* collision (1c): timer already dequeued, clear flag */ #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, call, call->async_dequeue, CE(call)->queue, 0x1c, 0); timer_call_enqueue_deadline_unlocked_async1++; #endif call->async_dequeue = FALSE; entry->queue = NULL; } else if (old_queue != queue) { timer_call_entry_dequeue(call); #if TIMER_ASSERT timer_call_enqueue_deadline_unlocked_async2++; #endif } if (old_queue == timer_longterm_queue) timer_longterm_dequeued_locked(call); if (old_queue != queue) { timer_queue_unlock(old_queue); timer_queue_lock_spin(queue); } } else { timer_queue_lock_spin(queue); } timer_call_entry_enqueue_deadline(call, queue, deadline); timer_queue_unlock(queue); simple_unlock(&call->lock); return (old_queue); }
void FlatView::initializeGL() { #if defined(Q_OS_WIN32) glewExperimental = true; GLenum GlewInitResult = glewInit(); if (GlewInitResult != GLEW_OK) { const GLubyte* errorStr = glewGetErrorString(GlewInitResult); size_t size = strlen(reinterpret_cast<const char*>(errorStr)); qDebug() << "Glew error " << QString::fromUtf8( reinterpret_cast<const char*>(errorStr), size); } #endif //glClearColor(0.0, 0.0, 0.0, 1.0); CE(); glClearColor(1.0, 1.0, 1.0, 1.0); CE(); //glEnable(GL_CULL_FACE); glEnable(GL_MULTISAMPLE); CE(); // // Load shader program // bool succ = program_.addShaderFromSourceFile( QGLShader::Vertex, ":/flatview.vs.glsl"); CE(); if (!succ) qWarning() << "Shader compile log:" << program_.log(); succ = program_.addShaderFromSourceFile( QGLShader::Fragment, ":/flatview.fs.glsl"); CE(); if (!succ) qWarning() << "Shader compile log:" << program_.log(); succ = program_.addShaderFromSourceFile( QGLShader::Geometry, ":/flatview.gs.glsl"); CE(); if (!succ) qWarning() << "Shader compile log:" << program_.log(); succ = program_.link(); CE(); if (!succ) { qWarning() << "Could not link shader program_:" << program_.log(); qWarning() << "Exiting..."; abort(); } // // Resolve uniforms // program_.bind(); CE(); uni_sampler_ = program_.uniformLocation("sampler"); RC(uni_sampler_); uni_camera_ = program_.uniformLocation("camera"); RC(uni_camera_); program_.release(); // // Set up textures & point size // glGenTextures(1, &texture_id_); CE(); // // Generate vao // glGenVertexArrays(1, &vao_); gl_init_ = true; }
mpqueue_head_t * timer_call_dequeue_unlocked( timer_call_t call) { call_entry_t entry = CE(call); mpqueue_head_t *old_queue; DBG("timer_call_dequeue_unlocked(%p)\n", call); simple_lock(&call->lock); old_queue = MPQUEUE(entry->queue); if (old_queue != NULL) { timer_call_lock_spin(old_queue); if (call->async_dequeue) { /* collision (1c): null queue pointer and reset flag */ call->async_dequeue = FALSE; #if TIMER_ASSERT timer_call_dequeue_unlocked_async1++; #endif } else { (void)remque(qe(entry)); #if TIMER_ASSERT timer_call_dequeue_unlocked_async2++; #endif } entry->queue = NULL; timer_call_unlock(old_queue); } simple_unlock(&call->lock); return (old_queue); }
void GLTerrainRect::render() { if(!initalised()) initGL(); QOpenGLFunctions * f = QOpenGLContext::currentContext()->functions(); f->glEnable(GL_BLEND);CE(); f->glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);CE(); m_vao_constraints.bind();CE(); f->glDrawElements(GL_TRIANGLE_STRIP, m_indicies.size(), GL_UNSIGNED_INT, (void*)(0)); CE();CE(); m_vao_constraints.release();CE(); f->glDisable(GL_BLEND);CE(); }
static __inline__ mpqueue_head_t * timer_call_entry_enqueue_deadline( timer_call_t entry, mpqueue_head_t *queue, uint64_t deadline) { return MPQUEUE(call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline)); }
/************************************************** Function: SwitchToRxMode(); Description: switch to Rx mode // **************************************************/ void SwitchToRxMode() { UINT8 value; SPI_Write_Reg(FLUSH_RX,0);//flush Rx value=SPI_Read_Reg(STATUS); // read register STATUS's value SPI_Write_Reg(WRITE_REG|STATUS,value);// clear RX_DR or TX_DS or MAX_RT interrupt flag CE(0); value=SPI_Read_Reg(CONFIG); // read register CONFIG's value //PRX value=value|0x01;//set bit 1 SPI_Write_Reg(WRITE_REG | CONFIG, value); // Set PWR_UP bit, enable CRC(2 length) & Prim:RX. RX_DR enabled.. CE(1); }
void sc_put(uint8_t c) { D("writing to %d: %#02hhx ('%c')", s_fd, c, c); ssize_t r = write(s_fd, &c, 1); D("write: %zd", r); if (r == -1) CE("serial comm failed on write"); }
static __inline__ void timer_call_entry_enqueue_tail( timer_call_t entry, mpqueue_head_t *queue) { call_entry_enqueue_tail(CE(entry), QUEUE(queue)); queue->count++; return; }
int sc_init(const char *device, int baud) { struct termios cntrl; int fd; fd = open(device, O_RDWR|O_DIRECT); if (fd == -1) CE("open '%s'", device); atexit(sc_cleanup); //if (flock(fd, LOCK_EX|LOCK_NB) != 0) //err(1, "flock"); //if (ioctl(fd, TIOCEXCL, 0) != 0) //err(1, "ioctl TIOEXCL"); if (tcgetattr(fd, &cntrl) != 0) CE("tcgetattr"); if (cfsetospeed(&cntrl, baud) != 0) CE("cfsetospeed"); if (cfsetispeed(&cntrl, baud) != 0) CE("cfsetispeed"); cntrl.c_cflag &= ~(CSIZE|PARENB); cntrl.c_cflag |= CS8; cntrl.c_cflag |= CLOCAL; cntrl.c_iflag &= ~(ISTRIP|ICRNL); cntrl.c_oflag &= ~OPOST; cntrl.c_lflag &= ~(ICANON|ISIG|IEXTEN|ECHO); cntrl.c_cc[VMIN] = 1; cntrl.c_cc[VTIME] = 0; if (tcsetattr(fd, TCSADRAIN, &cntrl) != 0) CE("tcsetattr"); return s_fd = fd; }
void timer_call_setup( timer_call_t call, timer_call_func_t func, timer_call_param_t param0) { DBG("timer_call_setup(%p,%p,%p)\n", call, func, param0); call_entry_setup(CE(call), func, param0); simple_lock_init(&(call)->lock, 0); call->async_dequeue = FALSE; }
/* * Remove timer entry from its queue but don't change the queue pointer * and set the async_dequeue flag. This is locking case 2b. */ static __inline__ void timer_call_entry_dequeue_async( timer_call_t entry) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); if (old_queue) { old_queue->count--; (void) remque(qe(entry)); entry->async_dequeue = TRUE; } return; }
static __inline__ mpqueue_head_t * timer_call_entry_enqueue_deadline( timer_call_t entry, mpqueue_head_t *queue, uint64_t deadline) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline); /* For efficiency, track the earliest soft deadline on the queue, * so that fuzzy decisions can be made without lock acquisitions. */ queue->earliest_soft_deadline = ((timer_call_t)queue_first(&queue->head))->soft_deadline; if (old_queue) old_queue->count--; queue->count++; return old_queue; }
int sc_init_tcp(const char *host, uint16_t port) { int fd = addr_connect_socket_p(host, port, NULL, NULL, 0, 0); if (fd == -1) CE("addr_connect_socket_p"); atexit(sc_cleanup); return s_fd = fd; }
boolean_t timer_call_cancel( timer_call_t call) { mpqueue_head_t *old_queue; spl_t s; s = splclock(); TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CANCEL | DBG_FUNC_START, call, CE(call)->deadline, call->soft_deadline, call->flags, 0); old_queue = timer_call_dequeue_unlocked(call); if (old_queue != NULL) { timer_queue_lock_spin(old_queue); if (!queue_empty(&old_queue->head)) { timer_queue_cancel(old_queue, CE(call)->deadline, CE(queue_first(&old_queue->head))->deadline); old_queue->earliest_soft_deadline = ((timer_call_t)queue_first(&old_queue->head))->soft_deadline; } else { timer_queue_cancel(old_queue, CE(call)->deadline, UINT64_MAX); old_queue->earliest_soft_deadline = UINT64_MAX; } timer_queue_unlock(old_queue); } TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CANCEL | DBG_FUNC_END, call, old_queue, CE(call)->deadline - mach_absolute_time(), CE(call)->deadline - CE(call)->entry_time, 0); splx(s); #if CONFIG_DTRACE DTRACE_TMR6(callout__cancel, timer_call_func_t, CE(call)->func, timer_call_param_t, CE(call)->param0, uint32_t, call->flags, 0, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF)); #endif return (old_queue != NULL); }
static __inline__ mpqueue_head_t * timer_call_entry_dequeue( timer_call_t entry) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); if (!hw_lock_held((hw_lock_t)&entry->lock)) panic("_call_entry_dequeue() " "entry %p is not locked\n", entry); /* * XXX The queue lock is actually a mutex in spin mode * but there's no way to test for it being held * so we pretend it's a spinlock! */ if (!hw_lock_held((hw_lock_t)&old_queue->lock_data)) panic("_call_entry_dequeue() " "queue %p is not locked\n", old_queue); call_entry_dequeue(CE(entry)); return (old_queue); }
uint8_t sc_get(void) { uint8_t c = 0; D("reading from %d...", s_fd); ssize_t r = read(s_fd, &c, 1); D("read: %zd: %#02hhx ('%c')", r, c, c); if (r != 1) { CE("serial comm failed on read (%zd)", r); return -1; } return c; }
static __inline__ mpqueue_head_t * timer_call_entry_enqueue_deadline( timer_call_t entry, mpqueue_head_t *queue, uint64_t deadline) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); if (!hw_lock_held((hw_lock_t)&entry->lock)) panic("_call_entry_enqueue_deadline() " "entry %p is not locked\n", entry); /* XXX More lock pretense: */ if (!hw_lock_held((hw_lock_t)&queue->lock_data)) panic("_call_entry_enqueue_deadline() " "queue %p is not locked\n", queue); if (old_queue != NULL && old_queue != queue) panic("_call_entry_enqueue_deadline() " "old_queue %p != queue", old_queue); call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline); return (old_queue); }
boolean_t timer_call_cancel( timer_call_t call) { mpqueue_head_t *old_queue; spl_t s; s = splclock(); old_queue = timer_call_dequeue_unlocked(call); if (old_queue != NULL) { timer_call_lock_spin(old_queue); if (!queue_empty(&old_queue->head)) timer_queue_cancel(old_queue, CE(call)->deadline, CE(queue_first(&old_queue->head))->deadline); else timer_queue_cancel(old_queue, CE(call)->deadline, UINT64_MAX); timer_call_unlock(old_queue); } splx(s); return (old_queue != NULL); }
static boolean_t timer_call_enter_internal( timer_call_t call, timer_call_param_t param1, uint64_t deadline, uint32_t flags) { mpqueue_head_t *queue; mpqueue_head_t *old_queue; spl_t s; uint64_t slop = 0; s = splclock(); call->soft_deadline = deadline; call->flags = flags; if ((flags & TIMER_CALL_CRITICAL) == 0 && mach_timer_coalescing_enabled) { slop = timer_call_slop(deadline); deadline += slop; } #if defined(__i386__) || defined(__x86_64__) uint64_t ctime = mach_absolute_time(); if (__improbable(deadline < ctime)) { uint64_t delta = (ctime - deadline); past_deadline_timers++; past_deadline_deltas += delta; if (delta > past_deadline_longest) past_deadline_longest = deadline; if (delta < past_deadline_shortest) past_deadline_shortest = delta; deadline = ctime + past_deadline_timer_adjustment; call->soft_deadline = deadline; } #endif queue = timer_queue_assign(deadline); old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline); CE(call)->param1 = param1; splx(s); return (old_queue != NULL); }
void GLTerrainRect::initGL() { m_vao_constraints.create(); m_vbo_constraints.create(); m_ibo_constraints.create(); m_vao_constraints.bind(); CE(); m_vbo_constraints.bind(); CE(); m_vbo_constraints.setUsagePattern(QOpenGLBuffer::UsagePattern::StaticDraw); CE(); m_ibo_constraints.bind(); CE(); m_ibo_constraints.setUsagePattern(QOpenGLBuffer::UsagePattern::StaticDraw); CE(); QOpenGLFunctions * f = QOpenGLContext::currentContext()->functions(); f->glEnableVertexAttribArray(0); CE(); f->glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)(0)); CE(); m_vao_constraints.release(); CE(); m_vbo_constraints.release(); CE(); m_ibo_constraints.release(); CE(); fillBuffers(); }
void FlatView::resizeGL(int width, int height) { glViewport(0, 0, width, qMax(height, 1)); if(pc_.expired()) return; auto pc = pc_.lock(); float war = width/float(height); float sar = pc->scan_width()/float(pc->scan_height()); float cfx = sar/war; float cfy = (1/sar)/(1/war); // screen if wider than scan if(war < sar){ aspect_ = Eigen::Vector2f(2.0f/pc->scan_width(), 2.0/(cfx*pc->scan_height())); } else { aspect_ = Eigen::Vector2f(2.0/(cfy*pc->scan_width()), 2.0f/pc->scan_height()); } program_.bind(); CE(); glUniformMatrix3fv(uni_camera_, 1, GL_FALSE, getCamera().data()); CE(); program_.release(); CE(); }
/* * _call_dequeue: * * Remove an entry from a queue. * * Returns TRUE if the entry was on a queue. * * Called with thread_call_lock held. */ static __inline__ boolean_t _call_dequeue( thread_call_t call, thread_call_group_t group) { queue_head_t *old_queue; old_queue = call_entry_dequeue(CE(call)); if (old_queue != NULL) { call->tc_finish_count++; if (old_queue == &group->pending_queue) group->pending_count--; } return (old_queue != NULL); }
/* * _delayed_call_enqueue: * * Place an entry on the delayed queue, * after existing entries with an earlier * (or identical) deadline. * * Returns TRUE if the entry was already * on a queue. * * Called with thread_call_lock held. */ static __inline__ boolean_t _delayed_call_enqueue( thread_call_t call, thread_call_group_t group, uint64_t deadline) { queue_head_t *old_queue; old_queue = call_entry_enqueue_deadline(CE(call), &group->delayed_queue, deadline); if (old_queue == &group->pending_queue) group->pending_count--; else if (old_queue == NULL) call->tc_submit_count++; return (old_queue != NULL); }