// Overview: These functions convert a JSString from holding a string in rope form // down to a simple String representation. It does so by building up the string // backwards, since we want to avoid recursion, we expect that the tree structure // representing the rope is likely imbalanced with more nodes down the left side // (since appending to the string is likely more common) - and as such resolving // in this fashion should minimize work queue size. (If we built the queue forwards // we would likely have to place all of the constituent StringImpls into the // Vector before performing any concatenation, but by working backwards we likely // only fill the queue with the number of substrings at any given level in a // rope-of-ropes.) void JSRopeString::resolveRopeSlowCase8(LChar* buffer) const { LChar* position = buffer + length(); // We will be working backwards over the rope. Vector<JSString*, 32, UnsafeVectorOverflow> workQueue; // Putting strings into a Vector is only OK because there are no GC points in this method. for (size_t i = 0; i < s_maxInternalRopeLength && fiber(i); ++i) workQueue.append(fiber(i).get()); while (!workQueue.isEmpty()) { JSString* currentFiber = workQueue.last(); workQueue.removeLast(); const LChar* characters; if (currentFiber->isRope()) { JSRopeString* currentFiberAsRope = static_cast<JSRopeString*>(currentFiber); if (!currentFiberAsRope->isSubstring()) { for (size_t i = 0; i < s_maxInternalRopeLength && currentFiberAsRope->fiber(i); ++i) workQueue.append(currentFiberAsRope->fiber(i).get()); continue; } ASSERT(!currentFiberAsRope->substringBase()->isRope()); characters = currentFiberAsRope->substringBase()->m_value.characters8() + currentFiberAsRope->substringOffset(); } else characters = currentFiber->m_value.characters8(); unsigned length = currentFiber->length(); position -= length; StringImpl::copyChars(position, characters, length); } ASSERT(buffer == position); }
void JSRopeString::visitFibers(SlotVisitor& visitor) { if (isSubstring()) { visitor.append(substringBase()); return; } for (size_t i = 0; i < s_maxInternalRopeLength && fiber(i); ++i) visitor.append(fiber(i)); }
static int lbox_tuple_to_string(struct lua_State *L) { struct tuple *tuple = lua_checktuple(L, 1); size_t used = region_used(&fiber()->gc); char *res = tuple_to_yaml(tuple); if (res == NULL) { region_truncate(&fiber()->gc, used); return luaT_error(L); } lua_pushstring(L, res); region_truncate(&fiber()->gc, used); return 1; }
void JSRopeString::resolveRopeInternal8NoSubstring(LChar* buffer) const { for (size_t i = 0; i < s_maxInternalRopeLength && fiber(i); ++i) { if (fiber(i)->isRope()) { resolveRopeSlowCase8(buffer); return; } } LChar* position = buffer; for (size_t i = 0; i < s_maxInternalRopeLength && fiber(i); ++i) { const StringImpl& fiberString = *fiber(i)->m_value.impl(); unsigned length = fiberString.length(); StringImpl::copyChars(position, fiberString.characters8(), length); position += length; } ASSERT((buffer + length()) == position); }
void JSRopeString::resolveRopeSlowCase(UChar* buffer) const { UChar* position = buffer + length(); // We will be working backwards over the rope. Vector<JSString*, 32, UnsafeVectorOverflow> workQueue; // These strings are kept alive by the parent rope, so using a Vector is OK. for (size_t i = 0; i < s_maxInternalRopeLength && fiber(i); ++i) workQueue.append(fiber(i).get()); while (!workQueue.isEmpty()) { JSString* currentFiber = workQueue.last(); workQueue.removeLast(); if (currentFiber->isRope()) { JSRopeString* currentFiberAsRope = static_cast<JSRopeString*>(currentFiber); if (currentFiberAsRope->isSubstring()) { ASSERT(!currentFiberAsRope->substringBase()->isRope()); StringImpl* string = static_cast<StringImpl*>( currentFiberAsRope->substringBase()->m_value.impl()); unsigned offset = currentFiberAsRope->substringOffset(); unsigned length = currentFiberAsRope->length(); position -= length; if (string->is8Bit()) StringImpl::copyChars(position, string->characters8() + offset, length); else StringImpl::copyChars(position, string->characters16() + offset, length); continue; } for (size_t i = 0; i < s_maxInternalRopeLength && currentFiberAsRope->fiber(i); ++i) workQueue.append(currentFiberAsRope->fiber(i).get()); continue; } StringImpl* string = static_cast<StringImpl*>(currentFiber->m_value.impl()); unsigned length = string->length(); position -= length; if (string->is8Bit()) StringImpl::copyChars(position, string->characters8(), length); else StringImpl::copyChars(position, string->characters16(), length); } ASSERT(buffer == position); }
int lbox_error(lua_State *L) { struct error *e = diag_last_error(&fiber()->diag); assert(e != NULL); luaL_pusherror(L, e); lua_error(L); assert(0); /* unreachable */ return 0; }
//------------------------------------------------------------------------------ //////////////////////////////////////////////////////////////////////////////// //------------------------------------------------------------------------------ void Server::open() { assert( acceptFiber_ == NULL ); AcceptFiber * acceptFiber = newObject<AcceptFiber>(); ksys::AutoPtr<ksys::Fiber> fiber(acceptFiber); acceptFiber->open(); for( intptr_t i = bindAddrs_.count() - 1; i >= 0; i-- ) acceptFiber->bind(bindAddrs_[i]); acceptFiber->listen(); attachFiber(fiber); //acceptFiber->thread()->maxFibersPerThread(1); acceptFiber_ = acceptFiber; }
static inline int lbox_catch(lua_State *L) { struct error *e = luaL_iserror(L, -1); if (e != NULL) { /* Re-throw original error */ diag_add_error(&fiber()->diag, e); } else { /* Convert Lua error to a Tarantool exception. */ diag_set(LuajitError, lua_tostring(L, -1)); } return 1; }
void start() { watchdog_.reset(new fiber(fiber::attributes(fiber::attributes::stick_with_parent), &server_engine::watchdog, this)); boost::system::error_code ec; // Loop until accept closed while (true) { connection_type sc(host_, read_timeout_, write_timeout_, arg_); ec=accept(sc); if(ec) break; sc.read_timeout_=read_timeout_; sc.write_timeout_=write_timeout_; fiber(&server_engine::servant, this, std::move(sc)).detach(); } watchdog_->join(); }
//--------------------------------------------------------------------------- void AcceptFiber::fiberExecute() { Server * server = dynamic_cast<Server *>(ksys::Fiber::thread()->server()); assert( server != NULL ); try { for(;;){ ServerFiber * serverFiber = dynamic_cast<ServerFiber *>(server->newFiber()); assert( serverFiber != NULL ); ksys::AutoPtr<ksys::Fiber> fiber(serverFiber); accept(*serverFiber); server->attachFiber(fiber); } } catch( ksys::ExceptionSP & e ){ #if defined(__WIN32__) || defined(__WIN64__) if( e->code() != ERROR_OPERATION_ABORTED + ksys::errorOffset && e->code() != WSAENOTSOCK + ksys::errorOffset) throw; #else if( !e->searchCode(ECANCELED) ) throw; #endif } }
uv_stream_t* stream() { return reinterpret_cast<uv_stream_t*>(fiber()->scheduler().tty()); }
/** * Tuple transforming function. * * Remove the fields designated by 'offset' and 'len' from an tuple, * and replace them with the elements of supplied data fields, * if any. * * Function returns newly allocated tuple. * It does not change any parent tuple data. */ static int lbox_tuple_transform(struct lua_State *L) { struct tuple *tuple = lua_checktuple(L, 1); int argc = lua_gettop(L); if (argc < 3) luaL_error(L, "tuple.transform(): bad arguments"); lua_Integer offset = lua_tointeger(L, 2); /* Can be negative and can be > INT_MAX */ lua_Integer len = lua_tointeger(L, 3); lua_Integer field_count = box_tuple_field_count(tuple); /* validate offset and len */ if (offset == 0) { luaL_error(L, "tuple.transform(): offset is out of bound"); } else if (offset < 0) { if (-offset > field_count) luaL_error(L, "tuple.transform(): offset is out of bound"); offset += field_count + 1; } else if (offset > field_count) { offset = field_count + 1; } if (len < 0) luaL_error(L, "tuple.transform(): len is negative"); if (len > field_count + 1 - offset) len = field_count + 1 - offset; assert(offset + len <= field_count + 1); /* * Calculate the number of operations and length of UPDATE expression */ uint32_t op_cnt = 0; if (offset < field_count + 1 && len > 0) op_cnt++; if (argc > 3) op_cnt += argc - 3; if (op_cnt == 0) { /* tuple_update() does not accept an empty operation list. */ luaT_pushtuple(L, tuple); return 1; } struct ibuf *buf = tarantool_lua_ibuf; ibuf_reset(buf); struct mpstream stream; mpstream_init(&stream, buf, ibuf_reserve_cb, ibuf_alloc_cb, luamp_error, L); /* * Prepare UPDATE expression */ mpstream_encode_array(&stream, op_cnt); if (len > 0) { mpstream_encode_array(&stream, 3); mpstream_encode_str(&stream, "#"); mpstream_encode_uint(&stream, offset); mpstream_encode_uint(&stream, len); } for (int i = argc ; i > 3; i--) { mpstream_encode_array(&stream, 3); mpstream_encode_str(&stream, "!"); mpstream_encode_uint(&stream, offset); luamp_encode(L, luaL_msgpack_default, &stream, i); } mpstream_flush(&stream); uint32_t new_size = 0, bsize; const char *old_data = tuple_data_range(tuple, &bsize); struct region *region = &fiber()->gc; size_t used = region_used(region); struct tuple *new_tuple = NULL; /* * Can't use box_tuple_update() since transform must reset * the tuple format to default. The new tuple most likely * won't coerce into the original space format, so we have * to use the default one with no restrictions on field * count or types. */ const char *new_data = tuple_update_execute(region_aligned_alloc_cb, region, buf->buf, buf->buf + ibuf_used(buf), old_data, old_data + bsize, &new_size, 1, NULL); if (new_data != NULL) new_tuple = tuple_new(box_tuple_format_default(), new_data, new_data + new_size); region_truncate(region, used); if (new_tuple == NULL) luaT_error(L); luaT_pushtuple(L, new_tuple); ibuf_reset(buf); return 1; }
int fiber_channel_get_msg_timeout(struct fiber_channel *ch, struct ipc_msg **msg, ev_tstamp timeout) { /** Ensure delivery fairness in case of prolonged wait. */ bool first_try = true; ev_tstamp start_time = ev_monotonic_now(loop()); while (true) { struct fiber *f; /* * Buffered messages take priority over waiting * fibers, if any, since they arrived earlier. * Try to take a message from the buffer first. */ if (ch->count > 0) { /** * There can't be any buffered stuff in * a closed channel - everything is * destroyed at close. */ assert(ch->is_closed == false); *msg = fiber_channel_buffer_pop(ch); if (fiber_channel_has_writers(ch)) { /* * Move a waiting writer, if any, * from the wait list to the tail * the buffer, to preserve fairness * in message delivery order. */ f = rlist_first_entry(&ch->waiters, struct fiber, state); fiber_channel_buffer_push(ch, f->wait_pad->msg); fiber_channel_waiter_wakeup(f, FIBER_CHANNEL_WAIT_DONE); } return 0; } if (fiber_channel_has_writers(ch)) { /** * There is no buffered messages, *but* * there is a writer. This is only * possible when the channel is * unbuffered. * Take the message directly from the * writer and be done with it. */ assert(ch->size == 0); f = rlist_first_entry(&ch->waiters, struct fiber, state); *msg = f->wait_pad->msg; fiber_channel_waiter_wakeup(f, FIBER_CHANNEL_WAIT_DONE); return 0; } if (fiber_channel_check_wait(ch, start_time, timeout)) return -1; f = fiber(); /** * No reader and no space in the buffer. * Have to wait. */ struct ipc_wait_pad pad; pad.status = FIBER_CHANNEL_WAIT_READER; f->wait_pad = &pad; if (first_try) { rlist_add_tail_entry(&ch->waiters, f, state); first_try = false; } else { rlist_add_entry(&ch->waiters, f, state); } fiber_yield_timeout(timeout); /* * In case of yield timeout, fiber->state * is in the ch->waiters list, remove. * rlist_del_entry() is a no-op if already done. */ rlist_del_entry(f, state); f->wait_pad = NULL; if (pad.status == FIBER_CHANNEL_WAIT_CLOSED) { diag_set(ChannelIsClosed); return -1; } if (pad.status == FIBER_CHANNEL_WAIT_DONE) { *msg = pad.msg; return 0; } timeout -= ev_monotonic_now(loop()) - start_time; }
int fiber_channel_put_msg_timeout(struct fiber_channel *ch, struct ipc_msg *msg, ev_tstamp timeout) { /** Ensure delivery fairness in case of prolonged wait. */ bool first_try = true; ev_tstamp start_time = ev_monotonic_now(loop()); while (true) { /* * Check if there is a ready reader first, and * only if there is no reader try to put a message * into the channel buffer. */ if (fiber_channel_has_readers(ch)) { /** * There is a reader, push the message * immediately. */ /* * There can be no reader if there is * a buffered message or the channel is * closed. */ assert(ch->count == 0); assert(ch->is_closed == false); struct fiber *f = rlist_first_entry(&ch->waiters, struct fiber, state); /* Place the message on the pad. */ f->wait_pad->msg = msg; fiber_channel_waiter_wakeup(f, FIBER_CHANNEL_WAIT_DONE); return 0; } if (ch->count < ch->size) { /* * No reader, but the channel is buffered. * Nice, drop the message in the buffer. */ /* * Closed channels, are, well, closed, * even if there is space in the buffer. */ if (ch->is_closed) { diag_set(ChannelIsClosed); return -1; } fiber_channel_buffer_push(ch, msg); return 0; } /** * No reader and no space in the buffer. * Have to wait. */ struct fiber *f = fiber(); if (fiber_channel_check_wait(ch, start_time, timeout)) return -1; /* Prepare a wait pad. */ struct ipc_wait_pad pad; pad.status = FIBER_CHANNEL_WAIT_WRITER; pad.msg = msg; f->wait_pad = &pad; if (first_try) { rlist_add_tail_entry(&ch->waiters, f, state); first_try = false; } else { rlist_add_entry(&ch->waiters, f, state); } fiber_yield_timeout(timeout); /* * In case of yield timeout, fiber->state * is in the ch->waiters list, remove. * rlist_del_entry() is a no-op if already done. */ rlist_del_entry(f, state); f->wait_pad = NULL; if (pad.status == FIBER_CHANNEL_WAIT_CLOSED) { /* * The channel is closed. Do not touch * the channel object. It might be gone * already. */ diag_set(ChannelIsClosed); return -1; } if (pad.status == FIBER_CHANNEL_WAIT_DONE) return 0; /* OK, someone took the message. */ timeout -= ev_monotonic_now(loop()) - start_time; } }