static enum ymsglooper_state get_state(struct ymsglooper *ml) { enum ymsglooper_state st; lock_state(ml); st = get_state_locked(ml); unlock_state(ml); return st; }
void GC_locker::stall_until_clear() { assert(!JavaThread::current()->in_critical(), "Would deadlock"); MutexLocker ml(JNICritical_lock); // Wait for _needs_gc to be cleared jlong current_state = lock_state(); while (GC_locker::needs_gc(current_state)) { JNICritical_lock.wait(); } }
async_accept(implementation_type& impl, basic_socket<Protocol1, SocketService>& peer, endpoint_type* peer_endpoint, BOOST_ASIO_MOVE_ARG(AcceptHandler) handler, typename enable_if< is_convertible<Protocol, Protocol1>::value>::type* = 0) { boost::asio::detail::async_result_init<AcceptHandler, void(boost::system::error_code)> init(BOOST_ASIO_MOVE_CAST(AcceptHandler)(handler)); { boost::recursive_mutex::scoped_lock lock_state(impl->state_mutex); if (impl->closed) { auto handler_to_post = [init]() mutable { init.handler( boost::system::error_code(ssf::error::not_connected, ssf::error::get_ssf_category())); }; this->get_io_service().post(handler_to_post); return init.result.get(); } } boost::system::error_code ec; auto fiber_impl = peer.native_handle(); fiber_impl->p_fib_demux = impl->p_fib_demux; fiber_impl->id.set_local_port(impl->id.local_port()); BOOST_LOG_TRIVIAL(debug) << "fiber acceptor: local port set " << impl->id.local_port(); typedef detail::pending_accept_operation< AcceptHandler, typename Protocol::socket_type> op; typename op::ptr p = { boost::asio::detail::addressof(init.handler), boost_asio_handler_alloc_helpers::allocate(sizeof(op), init.handler), 0}; p.p = new (p.v) op(peer.native_handle(), &(peer.native_handle()->id), init.handler); { boost::recursive_mutex::scoped_lock lock(impl->accept_op_queue_mutex); impl->accept_op_queue.push(p.p); } p.v = p.p = 0; impl->a_queues_handler(); return init.result.get(); }
void GC_locker::unlock_concurrent_gc(){ MutexLocker mu(JNICritical_lock); while (1) { jlong old_state = lock_state(); jlong new_state = clear_doing_gc(old_state); if ((old_state = Atomic::cmpxchg(new_state, state_addr(), old_state))) { // clear successful break; } // clear failed, loop around and try again. } JNICritical_lock.notify_all(); }
/** * do the needful when we receive a new async byte. This * should update the fifo, recalculate whether or not we * should be holding irq low, etc. * * @param state uart state (unlocked) * @param byte data byte received */ void receive_byte(uart_state_t *state, uint8_t byte) { lock_state(state); if(((state->head_buffer_pos + 1) % UART_MAX_BUFFER) == state->tail_buffer_pos) { /* we just dropped a char */ state->LSR = state->LSR | LSR_OE; WARN("just dropped byte"); } else { state->buffer[state->head_buffer_pos] = byte; state->head_buffer_pos = (state->head_buffer_pos + 1) % UART_MAX_BUFFER; /* mark rx available */ state->LSR = state->LSR | LSR_DR; } recalculate_irq(state); unlock_state(state); }
int ymsglooper_stop(struct ymsglooper *ml) { int r __unused; lock_state(ml); switch (get_state_locked(ml)) { case YMSGLOOPER_LOOP: r = ymsgq_en(ml->mq, create_dummy_msg()); if (unlikely(r)) { unlock_state(ml); return r; } /* missing break in intention */ /* @suppress("No break at end of case") */ case YMSGLOOPER_READY: set_state_locked(ml, YMSGLOOPER_STOPPING); /* @suppress("No break at end of case") */ default: /* do nothing */ ; } unlock_state(ml); return 0; }
int ymsglooper_loop(void) { int r __unused; struct ymsglooper *ml = ymsglooper_get(); if (unlikely(!ml)) return -EPERM; dfpr("start LOOP!"); lock_state(ml); if (YMSGLOOPER_READY != get_state_locked(ml)) { unlock_state(ml); goto skip_loop; } set_state_locked(ml, YMSGLOOPER_LOOP); unlock_state(ml); while (TRUE) { struct ymsg *m = ymsgq_de(ml->mq); struct ymsg_ *m_ = msg_mutate(m); /* Operation is NOT locked intentionally. * See comment for 'struct ymsglooper'. */ if (YMSGLOOPER_STOPPING == get_state(ml)) { if (likely(m)) ymsg_destroy(m); dfpr("break!!!! from loop!"); break; } if (unlikely(!m)) continue; /* Handler code here! */ yassert(m_->handler && m_->handler->handle); (*m_->handler->handle)(m); ymsg_destroy(m); } skip_loop: r = pthread_setspecific(_tkey, NULL); yassert(!r); set_state(ml, YMSGLOOPER_TERMINATED); return 0; }
void GC_locker::jni_lock_slow() { ThreadBlockInVM tbinvm(JavaThread::current(),"JNICritical_lock"); MutexLocker mu(JNICritical_lock); // Block entering threads if we know at least one thread is in a // JNI critical region and we need a GC. // We check that at least one thread is in a critical region before // blocking because blocked threads are woken up by a thread exiting // a JNI critical region. while (1) { jlong old_state = lock_state(); if ( (is_jni_active(old_state) && needs_gc(old_state)) || doing_gc(old_state) ) { JNICritical_lock.wait(); } else { jlong new_state = increment_lock_count(old_state); if ( old_state == Atomic::cmpxchg(new_state, state_addr(), old_state) ) { // lock successful break; } // lock failed, loop around and try again. } } }
// Shorthand static inline bool is_active_and_needs_gc() { return is_active() && needs_gc(lock_state());}
static bool is_jni_active() { return jni_lock_count(lock_state()) > 0; }
//----------------------------DRAW--------------------------- void draw(Display * dpy, Window win, int s_width, int s_height) { copy_state(state, state_buffer, 1); lock_state(state); glViewport(0, 0, s_width, s_height); glClearColor(0.0, 0.0, 0.0, 0.0); glClear(GL_COLOR_BUFFER_BIT); Dimension d = get_grid_for_num_instruments(state->num_instruments, s_width, s_height); int i; for (i = 0; i < state->num_instruments; ++i) { float left = s_width / d.x * (i % d.x); float bottom = s_height / d.y * (d.y - 1 - i / d.x); float width = s_width / d.x; float height = s_height / d.y; glViewport(left, bottom, width, height); Instrument_State * is = &state->instruments[i]; GLfloat brightness = pow(1 - pow((float)is->draw_state / MAX_DRAW_STATE * 2 - 1, 2), 0.5); if (is->draw_state) --is->draw_state; GLfloat up[] = { -T_BOUND, -T_BOUND, 0.0, T_BOTTOM_BRIGHTNESS, 0.0, brightness, 0.0, T_BOUND, 0.0, 1.0, 0.0, brightness, T_BOUND, -T_BOUND, 0.0, T_BOTTOM_BRIGHTNESS, 0.0, brightness }; GLfloat down[] = { -T_BOUND, T_BOUND, T_BOTTOM_BRIGHTNESS, 0.0, 0.0, brightness, 0.0, -T_BOUND, 1.0, 0.0, 0.0, brightness, T_BOUND, T_BOUND, T_BOTTOM_BRIGHTNESS, 0.0, 0.0, brightness }; glBindBuffer(GL_ARRAY_BUFFER, gla.array_buffer); if (is->direction == 'u') { glBufferData(GL_ARRAY_BUFFER, sizeof(up), up, GL_STATIC_DRAW); } else { glBufferData(GL_ARRAY_BUFFER, sizeof(down), down, GL_STATIC_DRAW); } glEnableVertexAttribArray(gla.position); glVertexAttribPointer(gla.position, 2, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), 0); glEnableVertexAttribArray(gla.color); glVertexAttribPointer(gla.color, 4, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (void *)(2 * sizeof(GLfloat))); glDrawArrays(GL_TRIANGLES, 0, 3); #ifdef SHOW_TEXT glListBase(gla.font_base); glColor4f(1.0, 1.0, 1.0, brightness / 2 + 0.5); float i_length = strlen(is->instrument); GLfloat i_left = -i_length * gla.font_width / width; GLfloat i_bottom = 0.9 - gla.font_height / height * 2; glRasterPos2f(i_left, i_bottom); glCallLists(i_length, GL_UNSIGNED_BYTE, (unsigned char *)is->instrument); char price[16] = {0}; sprintf(price, "%f", is->price); float p_length = strlen(price); GLfloat p_left = -p_length * gla.font_width / width; GLfloat p_bottom = -0.9; glRasterPos2f(p_left, p_bottom); glCallLists(p_length, GL_UNSIGNED_BYTE, (unsigned char *)price); #endif } unlock_state(state); glXSwapBuffers(dpy, win); }
/** * Perform a memory operation on a registered memory address * * @param addr address of memory operation * @param memop a memop constant (@see MEMOP_READ, @see MEMOP_WRITE) * @param data byte to write (on MEMOP_WRITE) * @return data item (on MEMOP_READ) */ uint8_t uart_memop(hw_reg_t *hw, uint16_t addr, uint8_t memop, uint8_t data) { uart_state_t *state = (uart_state_t*)(hw->state); uint16_t addr_offset = addr - hw->remap[0].mem_start; int read = (memop == MEMOP_READ); int dlab = (state->LCR & LCR_DLAB); uint8_t retval; switch(addr_offset) { case 0: /* RBR, THR, DLL */ if(dlab) { if(read) return state->DLL; state->DLL = data; } else { if(read) { /* pull something out of the receive buffer */ lock_state(state); if(state->head_buffer_pos == state->tail_buffer_pos) { /* nothing in the buffer */ retval = 0; DEBUG("read on empty fifo"); } else { retval = state->buffer[state->tail_buffer_pos]; state->tail_buffer_pos = (state->tail_buffer_pos + 1) % UART_MAX_BUFFER; if(state->tail_buffer_pos == state->head_buffer_pos) { /* fifo is empty */ state->LSR &= ~LSR_DR; recalculate_irq(state); } } unlock_state(state); return retval; } /* write to THR -- drop the byte out the pts */ DEBUG("Writing byte %02X to pty", data); write(state->pty, &data, 1); } break; case REG_IER: /* IER, DLM */ if(dlab) { if(read) return state->DLM; state->DLM = data; } else { if(read) return state->IER; state->IER = data; } break; case REG_IIR: /* IIR, FCR */ if(read) return state->IIR; state->FCR = data; break; case REG_LCR: if(read) return state->LCR; state->LCR = data; break; case REG_MCR: if(read) return state->MCR; state->MCR = data; break; case REG_LSR: if(read) { /* error flags reset on read */ lock_state(state); retval = state->LSR; state->LSR &= ~(LSR_OE | LSR_PE | LSR_FE | LSR_BI | LSR_ERF); unlock_state(state); return retval; } /* can't write LSR */ break; case 6: if(read) { lock_state(state); retval = state->MSR; /* we clear the delta states on msr read */ state->MSR &= ~(MSR_DCTS | MSR_DDSR | MSR_DDCD | MSR_TERI); unlock_state(state); return retval; } state->MSR = data; break; case 7: if(read) return state->SCR; state->SCR = data; break; } /* if(memop == MEMOP_READ) { */ /* return mem_state->mem[addr - hw->remap[0].mem_start]; */ /* } */ /* if(memop == MEMOP_WRITE) { */ /* mem_state->mem[addr - hw->remap[0].mem_start] = data; */ /* return 1; */ /* } */ return 0; }
static void set_state(struct ymsglooper *ml, enum ymsglooper_state state) { lock_state(ml); set_state_locked(ml, state); unlock_state(ml); }
/** * Perform a memory operation on a registered memory address * * @param addr address of memory operation * @param memop a memop constant (@see MEMOP_READ, @see MEMOP_WRITE) * @param data byte to write (on MEMOP_WRITE) * @return data item (on MEMOP_READ) */ uint8_t uart_memop(hw_reg_t *hw, uint16_t addr, uint8_t memop, uint8_t data) { uart_state_t *state = (uart_state_t*)(hw->state); uint16_t addr_offset = addr - hw->remap[0].mem_start; int read = (memop == MEMOP_READ); uint8_t retval; switch(addr_offset) { case 0: /* tx/rx register */ if(read) { lock_state(state); if(state->head_buffer_pos == state->tail_buffer_pos) { /* nothing in the buffer */ retval = 0; } else { retval = state->buffer[state->tail_buffer_pos]; state->tail_buffer_pos = (state->tail_buffer_pos + 1) % UART_MAX_BUFFER; if(state->tail_buffer_pos == state->head_buffer_pos) { /* fifo is empty */ state->SR &= ~SR_RDRF; recalculate_irq(state); } } unlock_state(state); return retval; } else { write(state->pty, &data, 1); } break; case 1: /* SR, PROGRAM RESET */ if(read) { return state->SR; } else { /* program reset */ /* reset registers */ state->CMD &= 0x70; state->CMD |= 0x02; state->SR &= ~SR_OR; /* reset the rx/tx fifos */ state->head_buffer_pos = 0; state->tail_buffer_pos = 0; } break; case 2: /* CMD */ if(read) return state->CMD; state->CMD = data; break; case 3: /* CTL */ if(read) return state->CTL; state->CTL = data; break; } return 0; }
/// Determine whether the fiber is open. bool is_open(const implementation_type& impl) const { boost::recursive_mutex::scoped_lock lock_state(impl->state_mutex); return !impl->closed; }
void GC_locker::jni_unlock_slow() { // There isn't a slow path jni_unlock with GPGC or PGC. assert0((!UseGenPauselessGC)); MutexLocker mu(JNICritical_lock); jlong old_state; jlong new_state; bool do_a_gc; bool do_a_notify; while (1) { do_a_gc = false; do_a_notify = false; old_state = lock_state(); new_state = decrement_lock_count(old_state); if ( needs_gc(new_state) && !is_jni_active(new_state) ) { do_a_notify = true; // GC will also check is_active, so this check is not // strictly needed. It's added here to make it clear that // the GC will NOT be performed if any other caller // of GC_locker::lock() still needs GC locked. if ( (!doing_gc(new_state)) && (!is_active()) ) { do_a_gc = true; new_state = set_doing_gc(new_state); } else { new_state = clear_needs_gc(new_state); } } if ((old_state = Atomic::cmpxchg(new_state, state_addr(), old_state))) { // unlocked successful break; } // unlock failed, loop around and try again. } if ( do_a_gc ) { { // Must give up the lock while at a safepoint MutexUnlocker munlock(JNICritical_lock); Universe::heap()->collect(GCCause::_gc_locker); } // Now that the lock is reaquired, unset _doing_gc and _needs_gc: while (1) { old_state = lock_state(); new_state = clear_needs_gc(clear_doing_gc(old_state)); if ((old_state = Atomic::cmpxchg(new_state, state_addr(), old_state))) { // clear successful break; } // clear failed, loop around and try again. } } if ( do_a_notify ) { JNICritical_lock.notify_all(); } }