void hpc_rpc_session::do_read(int sz) { add_ref(); _read_event.callback = [this](int err, uint32_t length, uintptr_t lolp) { //dinfo("WSARecv completed, err = %d, size = %u", err, length); dassert((LPOVERLAPPED)lolp == &_read_event.olp, "must be exact this overlapped"); if (err != ERROR_SUCCESS) { dwarn("WSARecv failed, err = %d", err); on_failure(); } else { int read_next; message_ex* msg = _parser->get_message_on_receive((int)length, read_next); while (msg != nullptr) { this->on_read_completed(msg); msg = _parser->get_message_on_receive(0, read_next); } do_read(read_next); } release_ref(); }; memset(&_read_event.olp, 0, sizeof(_read_event.olp)); WSABUF buf[1]; void* ptr = _parser->read_buffer_ptr((int)sz); int remaining = _parser->read_buffer_capacity(); buf[0].buf = (char*)ptr; buf[0].len = remaining; DWORD bytes = 0; DWORD flag = 0; int rt = WSARecv( _socket, buf, 1, &bytes, &flag, &_read_event.olp, NULL ); if (SOCKET_ERROR == rt && (WSAGetLastError() != ERROR_IO_PENDING)) { dwarn("WSARecv failed, err = %d", ::WSAGetLastError()); release_ref(); on_failure(); } //dinfo("WSARecv called, err = %d", rt); }
void asio_rpc_session::connect() { if (try_connecting()) { boost::asio::ip::tcp::endpoint ep( boost::asio::ip::address_v4(_remote_addr.ip()), _remote_addr.port()); add_ref(); _socket->async_connect(ep, [this](boost::system::error_code ec) { if (!ec) { dinfo("client session %s connected", _remote_addr.to_string() ); set_options(); set_connected(); on_send_completed(); do_read(); } else { derror("client session connect to %s failed, error = %s", _remote_addr.to_string(), ec.message().c_str() ); on_failure(); } release_ref(); }); } }
void asio_rpc_session::do_read(size_t sz) { add_ref(); void* ptr = _parser->read_buffer_ptr((int)sz); int remaining = _parser->read_buffer_capacity(); _socket->async_read_some(boost::asio::buffer(ptr, remaining), [this](boost::system::error_code ec, std::size_t length) { if (!!ec) { derror("asio read failed: %s", ec.message().c_str()); on_failure(); } else { int read_next; message_ex* msg = _parser->get_message_on_receive((int)length, read_next); while (msg != nullptr) { this->on_message_read(msg); msg = _parser->get_message_on_receive(0, read_next); } do_read(read_next); } release_ref(); }); }
void task_poll_for_work() { Task* t; while((t = task_get())) { t->task(t->user); release_ref(t, kmem_free); } }
void asio_rpc_session::write(uint64_t signature) { std::vector<boost::asio::const_buffer> buffers2; int bcount = (int)_sending_buffers.size(); // prepare buffers buffers2.resize(bcount); for (int i = 0; i < bcount; i++) { buffers2[i] = boost::asio::const_buffer(_sending_buffers[i].buf, _sending_buffers[i].sz); } add_ref(); boost::asio::async_write(*_socket, buffers2, [this, signature](boost::system::error_code ec, std::size_t length) { if (!!ec) { derror("asio write failed: %s", ec.message().c_str()); on_failure(); } else { on_send_completed(signature); } release_ref(); }); }
/// enumerate over all top-level windows. // @param callback a function to receive each window object // @function enum_windows def enum_windows(Value callback) { Ref ref; sL = L; ref = make_ref(L,callback); EnumWindows(&enum_callback,ref); release_ref(L,ref); return 0; }
/// enumerate all child windows. // @param a callback which to receive each window object // @function enum_children def enum_children(Value callback) { Ref ref; sL = L; ref = make_ref(L,callback); EnumChildWindows(this->hwnd,&enum_callback,ref); release_ref(L,ref); return 0; }
void * handlemap_release(struct handlemap *m, handleid id) { if (release_ref(m, id)) { return try_delete(m, id); } else { return NULL; } }
void arc_remove(arc_t *cache, const void *key, size_t len) { arc_object_t *obj = ht_get_deep_copy(cache->hash, (void *)key, len, NULL, retain_obj_cb, cache); if (obj) { arc_move(cache, obj, NULL); release_ref(cache->refcnt, obj->node); } }
void arc_drop_resource(arc_t *cache, arc_resource_t res) { arc_object_t *obj = (arc_object_t *)res; if (obj) { arc_move(cache, obj, NULL); release_ref(cache->refcnt, obj->node); } }
DSN_API void dsn_perf_counter_remove(dsn_handle_t handle) { auto sptr = reinterpret_cast<dsn::perf_counter*>(handle); if (dsn::perf_counters::instance().remove_counter(sptr->full_name())) sptr->release_ref(); else { dwarn("cannot remove counter %s as it is not found in our repo", sptr->full_name()); } }
CL_Surface_Generic *CL_Surface_Generic::copy_on_write() { if (ref_count == 1) return this; CL_Surface_Generic *copy = new CL_Surface_Generic(0); *copy = *this; copy->add_ref(); release_ref(); return copy; }
int arc_load(arc_t *cache, const void *key, size_t klen, void *valuep, size_t vlen) { arc_object_t *obj = ht_get_deep_copy(cache->hash, (void *)key, klen, NULL, update_obj_cb, cache); if (obj) { //release_ref(cache->refcnt, obj->node); return 1; } obj = arc_object_create(cache, key, klen); if (!obj) return -1; // let our cache user initialize the underlying object cache->ops->init(key, klen, 0, (arc_resource_t)obj, obj->ptr, cache->ops->priv); cache->ops->store(obj->ptr, valuep, vlen, cache->ops->priv); retain_ref(cache->refcnt, obj->node); // NOTE: atomicity here is ensured by the hashtable implementation int rc = ht_set_if_not_exists(cache->hash, (void *)key, klen, obj, sizeof(arc_object_t)); switch(rc) { case -1: fprintf(stderr, "Can't set the new value in the internal hashtable\n"); release_ref(cache->refcnt, obj->node); break; case 1: // the object has been created in the meanwhile release_ref(cache->refcnt, obj->node); // XXX - yes, we have to release it twice release_ref(cache->refcnt, obj->node); return arc_load(cache, key, klen, valuep, vlen); case 0: break; default: fprintf(stderr, "Unknown return code from ht_set_if_not_exists() : %d\n", rc); release_ref(cache->refcnt, obj->node); rc = -1; } release_ref(cache->refcnt, obj->node); return rc; }
static inline void arc_list_destroy(arc_t *cache, arc_list_t *head) { arc_list_t *pos = (head)->next; while (pos && pos != (head)) { arc_list_t *tmp = pos; arc_object_t *obj = arc_list_entry(pos, arc_object_t, head); pos = pos->next; tmp->prev = tmp->next = NULL; release_ref(cache->refcnt, obj->node); } }
void lcb_free(void *data) { LuaCallback *lcb = (LuaCallback*)data; if (! lcb) return; if (lcb->buf) { free(lcb->buf); lcb->buf = NULL; } if (lcb->handle) { CloseHandle(lcb->handle); lcb->handle = NULL; } release_ref(lcb->L,lcb->callback); }
DSN_API void* dsn_rpc_unregiser_handler(dsn_task_code_t code, dsn_gpid gpid) { auto h = ::dsn::task::get_current_node()->rpc_unregister_handler(code, gpid); void* param = nullptr; if (nullptr != h) { param = h->parameter; if (1 == h->release_ref()) delete h; } return param; }
ref_t slfe_profile(ref_t args, ref_t assoc) { if (trace_fl) { ref_t result, block_name, block; uint64_t start, end; block_name = car(args); block = cdr(args); start = rdtsc(); result = slfe_do(block, assoc); end = rdtsc(); trace(TRACE_NONE, "Timing for block %r: %Ld", block_name, end - start); release_ref(&block); release_ref(&block_name); return result; } else return slfe_do(args, assoc); }
ref_t slfe_dump_stack(ref_t args, ref_t assoc) { ref_t arg, arge; if (trace_fl == 0) return nil(); arg = car(args); if (arg.type != NIL) { arge = eval(arg, assoc); if (arge.type == stack_type) stack_debug_print((stack_t*)arge.data.object, trace_fl); else LOG_WARNING("dump-stack called with an invalid argument."); release_ref(&arge); } else stack_debug_print((stack_t*)assoc.data.object, trace_fl); release_ref(&arg); return nil(); }
void CDA_CellMLElementEventAdaptor::considerDestruction() throw(std::exception&) { // See if we have any events... uint32_t i; for (i = 0; i < sizeof(kSupportedEvents)/sizeof(kSupportedEvents[0]); i++) if (mGotEvent[i]) return; // The typemap is empty, so we need to remove ourselves from our parent and // self-destruct... mParent->mListenerToAdaptor.erase(mCellMLListener); release_ref(); }
static void arp_request(struct netdevice * device, uint32_t targetIp) { sbuff * buf = ethernet_sbuff_alloc(sizeof(struct arp_packet)); add_ref(buf); struct arp_packet * arp = (struct arp_packet*)buf->head; arp->htype = ntos(1); arp->ptype = ntos(0x0800); arp->hlen = 6; arp->plen = 4; arp->operation = ntos(1); memcpy(arp->senderMac, device->mac, 6); arp->senderIp = ntol(device->ip); bzero(arp->targetMac, 6); arp->targetIp = ntol(targetIp); ethernet_send(buf, 0x0806, BROADCAST_MAC, device); release_ref(buf, sbuff_free); }
static void reply(struct netdevice* dev, mac target, uint32_t ip, int broadcast) { sbuff * buf = raw_sbuff_alloc(sizeof(struct ethernet_frame) + sizeof(struct arp_packet)); add_ref(buf); sbuff_push(buf, sizeof(struct ethernet_frame)); struct arp_packet * arp = (struct arp_packet*)buf->head; arp->htype = ntos(1); arp->ptype = ntos(0x0800); arp->hlen = 6; arp->plen = 4; arp->operation = ntos(2); memcpy(arp->senderMac, dev->mac, 6); arp->senderIp = ntol(dev->ip); memcpy(arp->targetMac, target, 6); arp->targetIp = ntol(ip); ethernet_send(buf, 0x0806, broadcast ? BROADCAST_MAC : target, dev); release_ref(buf, sbuff_free); }
BOOL call_lua_direct(lua_State *L, Ref ref, int idx, const char *text, int discard) { BOOL res,ipush = 1; if (idx < 0) lua_pushvalue(L,idx); else if (idx > 0) lua_pushinteger(L,idx); else ipush = 0; push_ref(L,ref); if (idx != 0) lua_pushvalue(L,-2); if (text != NULL) { lua_pushstring(L,text); ++ipush; } lua_call(L, ipush, 1); res = lua_toboolean(L,-1); if (discard) { release_ref(L,ref); } return res; }
int main(int argc, char *argv[]) { int result = 0; ref_t val, answer, assoc, name; FILE *input_fl = stdin, *output_fl = stdout; clock_t start_time, end_time; #define FREE_AND_RETURN(x) {result = x; goto free_and_return;} if (parse_command_line(argc, argv, cmd_opt_decls) == -1) { printf("%s\n\n", get_error()); print_usage(); FREE_AND_RETURN(1); } if (help_flag) { print_usage(); FREE_AND_RETURN(0); } if (input_fname) { input_fl = fopen(input_fname, "r"); if (input_fl == 0) { printf("Could not open input file %s\n", input_fname); FREE_AND_RETURN(1); } } if (output_fname) { output_fl = fopen(output_fname, "w"); if (output_fl == 0) { printf("Could not open output file %s\n", output_fname); FREE_AND_RETURN(1); } } if (trace_file_fname) { trace_fl = fopen(trace_file_fname, "w"); if (trace_fl == 0) { printf("Could not open trace file %s\n", trace_file_fname); FREE_AND_RETURN(1); } } assoc = make_stack(nil()); register_gc_root(assoc); stack_enter(assoc); name = make_symbol("t", 0); stack_let(assoc, name, name); release_ref(&name); register_core_lib(assoc); REG_FN(exit, assoc); REG_FN(trace, assoc); REG_FN(profile, assoc); REG_NAMED_FN("no-trace", slfe_no_trace, assoc); REG_NAMED_FN("dump-stack", slfe_dump_stack, assoc); if (trace_fl) set_trace_file(trace_fl); start_time = clock(); finished = 0; while (! finished) { if (input_fl == stdin) printf("> "); val = read(input_fl); answer = eval(val, assoc); release_ref(&val); if (!quiet_flag) { println(answer, output_fl); fflush(output_fl); } release_ref(&answer); collect_garbage(); } stack_enter(nil()); unregister_gc_root(assoc); release_ref(&assoc); collect_garbage(); end_time = clock(); if (trace_fl) fprintf(trace_fl, "Total time taken: %f seconds\n", (float)(end_time - start_time) / (float)CLOCKS_PER_SEC); #undef FREE_AND_RETURN free_and_return: if (trace_fl && stats_flag) fprintf(trace_fl, "Total symbol evals: %d; total stack switches: %d\n", symbol_eval_count, stack_switch_count); if (input_fl != stdin) fclose(input_fl); if (output_fl != stdout) fclose(output_fl); if (trace_fl) fclose(trace_fl); if (input_fname) X_FREE(input_fname); if (output_fname) X_FREE(output_fname); if (trace_file_fname) X_FREE(trace_file_fname); return result; }
void hpc_rpc_session::do_write(message_ex* msg) { add_ref(); _write_event.callback = [this](int err, uint32_t length, uintptr_t lolp) { dassert((LPOVERLAPPED)lolp == &_write_event.olp, "must be exact this overlapped"); if (err != ERROR_SUCCESS) { dwarn("WSASend failed, err = %d", err); on_failure(); } else { int len = (int)length; int buf_i = _sending_buffer_start_index; while (len > 0) { auto& buf = _sending_buffers[buf_i]; if (len >= (int)buf.sz) { buf_i++; len -= (int)buf.sz; } else { buf.buf = (char*)buf.buf + len; buf.sz -= (uint32_t)len; break; } } _sending_buffer_start_index = buf_i; // message completed, continue next message if (_sending_buffer_start_index == (int)_sending_buffers.size()) { dassert(len == 0, "buffer must be sent completely"); auto lmsg = _sending_msg; _sending_msg = nullptr; on_send_completed(lmsg); } else do_write(_sending_msg); } release_ref(); }; memset(&_write_event.olp, 0, sizeof(_write_event.olp)); // new msg if (_sending_msg != msg) { dassert(_sending_msg == nullptr, "only one sending msg is possible"); _sending_msg = msg; _sending_buffer_start_index = 0; } // continue old msg else { // nothing to do } int buffer_count = (int)_sending_buffers.size() - _sending_buffer_start_index; static_assert (sizeof(dsn_message_parser::send_buf) == sizeof(WSABUF), "make sure they are compatible"); DWORD bytes = 0; int rt = WSASend( _socket, (LPWSABUF)&_sending_buffers[_sending_buffer_start_index], (DWORD)buffer_count, &bytes, 0, &_write_event.olp, NULL ); if (SOCKET_ERROR == rt && (WSAGetLastError() != ERROR_IO_PENDING)) { dwarn("WSASend failed, err = %d", ::WSAGetLastError()); release_ref(); on_failure(); } //dinfo("WSASend called, err = %d", rt); }
/* Lookup an object with the given key. */ void arc_release_resource(arc_t *cache, arc_resource_t res) { arc_object_t *obj = (arc_object_t *)res; release_ref(cache->refcnt, obj->node); }
// the returned object is retained, the caller must call arc_release_resource(obj) to release it arc_resource_t arc_lookup(arc_t *cache, const void *key, size_t len, void **valuep, int async) { // NOTE: this is an atomic operation ensured by the hashtable implementation, // we don't do any real copy in our callback but we just increase the refcount // of the object (if found) arc_object_t *obj = ht_get_deep_copy(cache->hash, (void *)key, len, NULL, retain_obj_cb, cache); if (obj) { if (!ATOMIC_READ(cache->mode) || UNLIKELY(ATOMIC_READ(obj->state) != &cache->mfu)) { if (UNLIKELY(arc_move(cache, obj, &cache->mfu) == -1)) { fprintf(stderr, "Can't move the object into the cache\n"); return NULL; } arc_balance(cache); } if (valuep) *valuep = obj->ptr; return obj; } obj = arc_object_create(cache, key, len); if (!obj) return NULL; // let our cache user initialize the underlying object cache->ops->init(key, len, async, (arc_resource_t)obj, obj->ptr, cache->ops->priv); obj->async = async; retain_ref(cache->refcnt, obj->node); // NOTE: atomicity here is ensured by the hashtable implementation int rc = ht_set_if_not_exists(cache->hash, (void *)key, len, obj, sizeof(arc_object_t)); switch(rc) { case -1: fprintf(stderr, "Can't set the new value in the internal hashtable\n"); release_ref(cache->refcnt, obj->node); break; case 1: // the object has been created in the meanwhile release_ref(cache->refcnt, obj->node); // XXX - yes, we have to release it twice release_ref(cache->refcnt, obj->node); return arc_lookup(cache, key, len, valuep, async); case 0: /* New objects are always moved to the MRU list. */ rc = arc_move(cache, obj, &cache->mru); if (rc >= 0) { arc_balance(cache); *valuep = obj->ptr; return obj; } break; default: fprintf(stderr, "Unknown return code from ht_set_if_not_exists() : %d\n", rc); release_ref(cache->refcnt, obj->node); break; } release_ref(cache->refcnt, obj->node); return NULL; }
error_code distributed_lock_service_zookeeper::finalize() { release_ref(); return ERR_OK; }
void rpc_session::clear_send_queue(bool resend_msgs) { // // - in concurrent case, resending _sending_msgs and _messages // may not maintain the original sending order // - can optimize by batch sending instead of sending one by one // // however, our threading model cannot ensure in-order processing // of incoming messages neither, so this guarantee is not necesssary // and the upper applications should not always rely on this (but can // rely on this with a high probability). // std::vector<message_ex*> swapped_sending_msgs; { // protect _sending_msgs and _sending_buffers in lock utils::auto_lock<utils::ex_lock_nr> l(_lock); _sending_msgs.swap(swapped_sending_msgs); _sending_buffers.clear(); } // resend pending messages if need for (auto& msg : swapped_sending_msgs) { if (resend_msgs) { _net.send_message(msg); } // if not resend, the message's callback will not be invoked until timeout, // it's too slow - let's try to mimic the failure by recving an empty reply else if (msg->header->context.u.is_request && !msg->header->context.u.is_forwarded) { _net.on_recv_reply(msg->header->id, nullptr, 0); } // added in rpc_engine::reply (for server) or rpc_session::send_message (for client) msg->release_ref(); } while (true) { dlink* msg; { utils::auto_lock<utils::ex_lock_nr> l(_lock); msg = _messages.next(); if (msg == &_messages) break; msg->remove(); _message_count.fetch_sub(1, std::memory_order_relaxed); } auto rmsg = CONTAINING_RECORD(msg, message_ex, dl); rmsg->io_session = nullptr; if (resend_msgs) { _net.send_message(rmsg); } // if not resend, the message's callback will not be invoked until timeout, // it's too slow - let's try to mimic the failure by recving an empty reply else if (rmsg->header->context.u.is_request && !rmsg->header->context.u.is_forwarded) { _net.on_recv_reply(rmsg->header->id, nullptr, 0); } // added in rpc_engine::reply (for server) or rpc_session::send_message (for client) rmsg->release_ref(); } }
/* Move the object to the given state. If the state transition requires, * fetch, evict or destroy the object. */ static inline int arc_move(arc_t *cache, arc_object_t *obj, arc_state_t *state) { // In the first conditional we check If the object is being locked, // which means someone is fetching its value and we don't what // don't mess up with it. Whoever is fetching will also take care of moving it // to one of the lists (or dropping it) // NOTE: while the object is being fetched it doesn't belong // to any list, so there is no point in going ahead // also arc_balance() should never go through this object // (since in none of the lists) so it won't be affected. // The only call which would silently fail is arc_remove() // but if the object is being fetched and need to be removed // will be determined by who is fetching the object or by the // next call to arc_balance() (which would anyway happen if // the object will be put into the cache by the fetcher) // // In the second conditional instead we handle a specific corner case which // happens when concurring threads access an item which has been just fetched // but also dropped (so its state is NULL). // If a thread entering arc_lookup() manages to get the object out of the hashtable // before it's being deleted it will try putting the object to the mfu list without checking first // if it was already in a list or not (new objects should be first moved to the // mru list and not the mfu one) if (UNLIKELY(obj->locked || (state == &cache->mfu && ATOMIC_READ(obj->state) == NULL))) return 0; MUTEX_LOCK(&cache->lock); arc_state_t *obj_state = ATOMIC_READ(obj->state); if (LIKELY(obj_state != NULL)) { if (LIKELY(obj_state == state)) { // short path for recurring keys // (those in the mfu list being hit again) if (LIKELY(state->head.next != &obj->head)) arc_list_move_to_head(&obj->head, &state->head); MUTEX_UNLOCK(&cache->lock); return 0; } // if the state is not NULL // (and the object is not going to be being removed) // move the ^ (p) marker if (LIKELY(state != NULL)) { if (obj_state == &cache->mrug) { size_t csize = cache->mrug.size ? (cache->mfug.size / cache->mrug.size) : cache->mfug.size / 2; cache->p = MIN(cache->c, cache->p + MAX(csize, 1)); } else if (obj_state == &cache->mfug) { size_t csize = cache->mfug.size ? (cache->mrug.size / cache->mfug.size) : cache->mrug.size / 2; cache->p = MAX(0, cache->p - MAX(csize, 1)); } } ATOMIC_DECREASE(obj_state->size, obj->size); arc_list_remove(&obj->head); ATOMIC_DECREMENT(obj_state->count); ATOMIC_SET(obj->state, NULL); } if (state == NULL) { if (ht_delete_if_equals(cache->hash, (void *)obj->key, obj->klen, obj, sizeof(arc_object_t)) == 0) release_ref(cache->refcnt, obj->node); } else if (state == &cache->mrug || state == &cache->mfug) { obj->async = 0; arc_list_prepend(&obj->head, &state->head); ATOMIC_INCREMENT(state->count); ATOMIC_SET(obj->state, state); ATOMIC_INCREASE(state->size, obj->size); } else if (obj_state == NULL) { obj->locked = 1; // unlock the cache while the backend is fetching the data // (the object has been locked while being fetched so nobody // will change its state) MUTEX_UNLOCK(&cache->lock); size_t size = 0; int rc = cache->ops->fetch(obj->ptr, &size, cache->ops->priv); switch (rc) { case 1: case -1: { if (ht_delete_if_equals(cache->hash, (void *)obj->key, obj->klen, obj, sizeof(arc_object_t)) == 0) release_ref(cache->refcnt, obj->node); return rc; } default: { if (size >= cache->c) { // the (single) object doesn't fit in the cache, let's return it // to the getter without (re)adding it to the cache if (ht_delete_if_equals(cache->hash, (void *)obj->key, obj->klen, obj, sizeof(arc_object_t)) == 0) release_ref(cache->refcnt, obj->node); return 1; } MUTEX_LOCK(&cache->lock); obj->size = ARC_OBJ_BASE_SIZE(obj) + cache->cos + size; arc_list_prepend(&obj->head, &state->head); ATOMIC_INCREMENT(state->count); ATOMIC_SET(obj->state, state); ATOMIC_INCREASE(state->size, obj->size); ATOMIC_INCREMENT(cache->needs_balance); break; } } // since this object is going to be put back into the cache, // we need to unmark it so that it won't be ignored next time // it's going to be moved to another list obj->locked = 0; } else { arc_list_prepend(&obj->head, &state->head); ATOMIC_INCREMENT(state->count); ATOMIC_SET(obj->state, state); ATOMIC_INCREASE(state->size, obj->size); } MUTEX_UNLOCK(&cache->lock); return 0; }