template <typename T> int TypeMap<T>::mediator_mp_ass_subscript(PyObject *self, PyObject *key, PyObject *value) { static tuples_data_auto_release_t data_assign(TuplesData::factory(v_protof, reinterpret_cast<size_t>(mediator_mp_ass_subscript))) , data_del(TuplesData::factory(v_protof, reinterpret_cast<size_t>(mediator_mp_ass_subscript))) ; PyObject *args_packed = pack_tuple(key, value); if (key && !args_packed) { return -1; } if (value) { tuples_auto_release_t tuples(Tuples::factory(*data_assign)); mp_call_change<&TypeMap<T>::assign> c(*tuples, self, __PRETTY_FUNCTION__); tuples->ubiquitous_caller(c, args_packed, NULL, !self); } else { tuples_auto_release_t tuples(Tuples::factory(*data_del)); mp_call_change<&TypeMap<T>::del> c(*tuples, self, __PRETTY_FUNCTION__); tuples->ubiquitous_caller(c, args_packed, NULL, !self); } Py_DecRef(args_packed); return is_error_occured() ? -1 : 0; }
int main(int argc, char **argv) { Triolet_init(&argc, &argv); int u = tuples(121, 77, 0); int v = tuples(121, 77, 1); printf("(%d)(%d)", u,v); return 0; }
void FillUnmapPageCandidates(collections::List<BufferPageTimeTuple>& pages, vint expectCount)override { vint mappedCount = mappedPages.Count(); if (mappedCount == 0) return; Array<BufferPageTimeTuple> tuples(mappedCount); vint usedCount = 0; for (vint i = 0; i < mappedCount; i++) { auto key = mappedPages.Keys()[i]; auto value = mappedPages.Values()[i]; if (!value->locked) { BufferPage page{key}; tuples[usedCount++] = BufferPageTimeTuple(source, page, value->lastAccessTime); } } if (tuples.Count() > 0) { SortLambda(&tuples[0], usedCount, [](const BufferPageTimeTuple& t1, const BufferPageTimeTuple& t2) { if (t1.f2 < t2.f2) return -1; else if (t1.f2 > t2.f2) return 1; else return 0; }); vint copyCount = usedCount < expectCount ? usedCount : expectCount; for (vint i = 0; i < copyCount; i++) { pages.Add(tuples[i]); } } }
template <typename T> template <void (*F)(Tuples &)> PyObject *TypeSpecial<T>::m_static_method2(PyObject *self, PyObject *args) { static tuples_data_auto_release_t data(TuplesData::factory(v_static_method, reinterpret_cast<size_t>(m_static_method2<F>))); tuples_auto_release_t tuples(Tuples::factory(*data)); call_static<F> c(*tuples, self ? NULL : __PRETTY_FUNCTION__); return tuples->ubiquitous_caller(c, args, NULL, self); }
template <typename T> template <void (*F)(const PyTypeObject &, Tuples &)> PyObject *TypeSpecial<T>::m_class_method3(PyObject *self, PyObject *args, PyObject *kwds) { static tuples_data_auto_release_t data(TuplesData::factory(v_method_kw, reinterpret_cast<size_t>(m_class_method3<F>))); tuples_auto_release_t tuples(Tuples::factory(*data)); call_class<F> c(*tuples, self ? __PRETTY_FUNCTION__ : NULL, self); return tuples->ubiquitous_caller(c, args, kwds, !self); }
template <class T> PyObject *TypeBase<T>::mediator_constructor(PyTypeObject *type, PyObject *args, PyObject *kwds) { static tuples_data_auto_release_t data(TuplesData::factory(v_ctor, reinterpret_cast<size_t>(mediator_constructor))); tuples_auto_release_t tuples(Tuples::factory(*data)); call_constructor c(*tuples, type, __PRETTY_FUNCTION__, !type); tuples->ubiquitous_caller(c, args, kwds, !type); return clear_on_error(c.retval); }
TEST(BitCompressedTests, empty_size_does_not_change_with_reserve) { BitCompressedVector<value_id_t> tuples(1, 1, {1}); ASSERT_EQ(0u, tuples.size()); ASSERT_EQ(64u, tuples.capacity()); tuples.reserve(3); ASSERT_EQ(0u, tuples.size()); ASSERT_EQ(64u, tuples.capacity()); tuples.reserve(65); ASSERT_EQ(0u, tuples.size()); ASSERT_EQ(128u, tuples.capacity()); }
TEST(FixedLengthVectorTest, increment_test) { size_t cols = 1; size_t rows = 3; FixedLengthVector<value_id_t> tuples(cols, rows); tuples.resize(rows); EXPECT_EQ(0u, tuples.get(0,0)); EXPECT_EQ(0u, tuples.inc(0,0)); EXPECT_EQ(1u, tuples.get(0,0)); EXPECT_EQ(1u, tuples.atomic_inc(0,0)); EXPECT_EQ(2u, tuples.get(0,0)); }
TEST(BitCompressedTests, set_retrieve_bits) { std::vector<uint64_t> bits {1, 2, 4, 8, 13}; BitCompressedVector<value_id_t> tuples(5, 2, bits); tuples.resize(2); auto col = 0; for (auto bit: bits) { auto maxval = (1 << bit) - 1; // Maximum value that can be stored is 2^bit-1 tuples.set(col, 0, 0); tuples.set(col, 1, maxval); EXPECT_EQ(0u, tuples.get(col, 0)); EXPECT_EQ(maxval, tuples.get(col, 1)); col++; } }
template <typename T> PyObject *TypeSequence<T>::mediator_sq_inplace_concat(PyObject *self, PyObject *args) { static tuples_data_auto_release_t data(TuplesData::factory(v_protof, reinterpret_cast<size_t>(mediator_sq_inplace_concat))); PyObject *args_packed = pack_tuple(args); if (args && !args_packed) { return NULL; } tuples_auto_release_t tuples(Tuples::factory(*data)); call_concat_inplace c(*tuples, self, __PRETTY_FUNCTION__); tuples->ubiquitous_caller(c, args_packed, NULL, !self); Py_DecRef(args_packed); return clear_on_error(c.self); }
template <typename T> int TypeSequence<T>::mediator_sq_contains(PyObject *self, PyObject *args) { static tuples_data_auto_release_t data(TuplesData::factory(v_protof, reinterpret_cast<size_t>(mediator_sq_contains))); PyObject *args_packed = pack_tuple(args); if (args && !args_packed) { return -1; } tuples_auto_release_t tuples(Tuples::factory(*data)); call_contains c(*tuples, self, __PRETTY_FUNCTION__); tuples->ubiquitous_caller(c, args_packed, NULL, !self); Py_DecRef(args_packed); return is_error_occured() ? -1 : c.retval; }
template <typename T> PyObject *TypeMap<T>::mediator_mp_subscript(PyObject *self, PyObject *key) { static tuples_data_auto_release_t data(TuplesData::factory(v_protof_retval, reinterpret_cast<size_t>(mediator_mp_subscript))); PyObject *key_packed = pack_tuple(key); if (key && !key_packed) { return NULL; } tuples_auto_release_t tuples(Tuples::factory(*data)); mp_call_get c(*tuples, self, __PRETTY_FUNCTION__); PyObject *retval = tuples->ubiquitous_caller(c, key_packed, NULL, !self); Py_DecRef(key_packed); return retval; }
void BasicRemoteOperation::consume(const IMC::RemoteActions* msg) { updateConnectionState(); m_last_action = DUNE::Time::Clock::get(); if (isActive()) { Utils::TupleList tuples(msg->actions); if (tuples.get("Exit", 0)) { IMC::TeleoperationDone top; dispatch(top); } onRemoteActions(msg); } }
void BasicRemoteOperation::consume(const IMC::RemoteActions* msg) { updateConnectionState(); m_last_action = DUNE::Time::Clock::get(); if (isActive()) { Utils::TupleList tuples(msg->actions); if (tuples.get("Exit", 0)) { IMC::TeleoperationDone top; dispatch(top); } if (m_teleop_src == 0 || msg->getSource() == m_teleop_src) onRemoteActions(msg); else debug("ignoring remote control from %s", m_ctx.resolver.resolve(msg->getSource())); } }
VF_A LsmBasisSystem::multiPathBasisSystem(Size dim, Size order, PolynomType polyType) { QL_REQUIRE(dim>0, "zero dimension"); // get single factor basis VF_R pathBasis = pathBasisSystem(order, polyType); VF_A ret; // 0-th order term VF_R term(dim, pathBasis[0]); ret.push_back(MultiDimFct(term)); // start with all 0 tuple VV tuples(1, std::vector<Size>(dim)); // add multi-factor terms for(Size i=1; i<=order; ++i) { tuples = next_order_tuples(tuples); // now we have all tuples of order i // for each tuple add the corresponding term for(Size j=0; j<tuples.size(); ++j) { for(Size k=0; k<dim; ++k) term[k] = pathBasis[tuples[j][k]]; ret.push_back(MultiDimFct(term)); } } return ret; }
template <typename T> int TypeSequence<T>::mediator_sq_ass_slice(PyObject *self, Py_ssize_t begin, Py_ssize_t end, PyObject *args) { int retval; if (self && !args) { call_del c(self, begin, end, __PRETTY_FUNCTION__); retval = exceptionHandler::call(c, reinterpret_cast<size_t>(mediator_sq_ass_slice)) ? 0 : -1; } else { static tuples_data_auto_release_t data(TuplesData::factory(v_protof, reinterpret_cast<size_t>(mediator_sq_ass_slice))); PyObject *args_packed = pack_tuple(args); if (args && !args_packed) { return -1; } tuples_auto_release_t tuples(Tuples::factory(*data)); call_assign c(*tuples, self, begin, end, __PRETTY_FUNCTION__); tuples->ubiquitous_caller(c, args_packed, NULL, !self); Py_DecRef(args_packed); retval = is_error_occured() ? -1 : 0; } return retval; }
void handle_request( std::string request_line, SocketCache& pushers, std::vector<Address>& routing_addresses, std::unordered_map<Key, std::unordered_set<Address>>& key_address_cache, unsigned& seed, std::shared_ptr<spdlog::logger> logger, UserThread& ut, zmq::socket_t& response_puller, zmq::socket_t& key_address_puller, Address& ip, unsigned& thread_id, unsigned& rid, unsigned& trial) { std::vector<std::string> v; split(request_line, ' ', v); Key key; std::string value; if (!((v.size() == 2 && v[0] == "GET") || (v.size() == 3 && v[0] == "PUT"))) { std::cerr << "Usage: GET <key> | PUT <key> <value>" << std::endl; return; } else { if (v[0] == "GET") { key = v[1]; value = ""; } else { key = v[1]; value = v[2]; } } if (trial > 5) { logger->info("Trial #{} for request for key {}.", trial, key); logger->info("Waiting 5 seconds."); std::chrono::seconds dura(5); std::this_thread::sleep_for(dura); logger->info("Waited 5s."); } // get worker address Address worker_address; if (key_address_cache.find(key) == key_address_cache.end()) { // query the routing and update the cache Address target_routing_address = kHashRingUtil ->get_random_routing_thread(routing_addresses, seed, kRoutingThreadCount) .get_key_address_connect_addr(); bool succeed; std::vector<Address> addresses = kHashRingUtil->get_address_from_routing( ut, key, pushers[target_routing_address], key_address_puller, succeed, ip, thread_id, rid); if (succeed) { for (const std::string& address : addresses) { key_address_cache[key].insert(address); } worker_address = addresses[rand_r(&seed) % addresses.size()]; } else { logger->error( "Request timed out when querying routing. This should never happen!"); return; } } else { if (key_address_cache[key].size() == 0) { logger->error("Address cache for key " + key + " has size 0."); return; } worker_address = *(next(begin(key_address_cache[key]), rand_r(&seed) % key_address_cache[key].size())); } KeyRequest req; req.set_response_address(ut.get_request_pulling_connect_addr()); std::string req_id = ip + ":" + std::to_string(thread_id) + "_" + std::to_string(rid); req.set_request_id(req_id); rid += 1; KeyTuple* tp = req.add_tuples(); tp->set_key(key); tp->set_address_cache_size(key_address_cache[key].size()); if (value == "") { // get request req.set_type(get_request_type("GET")); } else { // put request req.set_type(get_request_type("PUT")); tp->set_value(value); tp->set_timestamp(0); } bool succeed; auto res = send_request<KeyRequest, KeyResponse>(req, pushers[worker_address], response_puller, succeed); if (succeed) { KeyTuple tuple = res.tuples(0); // initialize the respond string if (tuple.error() == 2) { trial += 1; if (trial > 5) { for (const auto& address : res.tuples(0).addresses()) { logger->info("Server's return address for key {} is {}.", key, address); } for (const std::string& address : key_address_cache[key]) { logger->info("My cached address for key {} is {}", key, address); } } // update cache and retry key_address_cache.erase(key); handle_request(request_line, pushers, routing_addresses, key_address_cache, seed, logger, ut, response_puller, key_address_puller, ip, thread_id, rid, trial); } else { // succeeded if (tuple.has_invalidate() && tuple.invalidate()) { // update cache key_address_cache.erase(key); } if (value == "" && tuple.error() == 0) { std::cout << "value of key " + tuple.key() + " is " + tuple.value() + "\n"; } else if (value == "" && tuple.error() == 1) { std::cout << "key " + tuple.key() + " does not exist\n"; } else if (value != "") { std::cout << "successfully put key " + tuple.key() + "\n"; } } } else { logger->info( "Request timed out when querying worker: clearing cache due to " "possible node membership changes."); // likely the node has departed. We clear the entries relavant to the // worker_address std::vector<std::string> tokens; split(worker_address, ':', tokens); std::string signature = tokens[1]; std::unordered_set<Key> remove_set; for (const auto& key_pair : key_address_cache) { for (const std::string& address : key_pair.second) { std::vector<std::string> v; split(address, ':', v); if (v[1] == signature) { remove_set.insert(key_pair.first); } } } for (const std::string& key : remove_set) { key_address_cache.erase(key); } trial += 1; handle_request(request_line, pushers, routing_addresses, key_address_cache, seed, logger, ut, response_puller, key_address_puller, ip, thread_id, rid, trial); } }
void ShowCounts::execute() { if ( d->state == 0 ) { parseOptions(); end(); database(); d->state = 1; EString s( Configuration::text( Configuration::DbSchema ) ); d->query = new Query( "select " "(select count(*) from users)::int as users," "(select count(*) from mailboxes where deleted='f')::int" " as mailboxes," "(" + tuples( "messages" ) + ")::int as messages," "(" + tuples( "bodyparts" ) + ")::int as bodyparts," "(" + tuples( "addresses" ) + ")::int as addresses," "(" + tuples( "deleted_messages" ) + ")::int as dm", this ); d->query->bind( 1, s ); d->query->execute(); } if ( d->state == 1 ) { if ( !d->query->done() ) return; Row * r = d->query->nextRow(); if ( d->query->failed() || !r ) error( "Couldn't fetch estimates." ); printf( "Users: %d\n", r->getInt( "users" ) ); printf( "Mailboxes: %d\n", r->getInt( "mailboxes" ) ); if ( opt( 'f' ) == 0 ) { printf( "Messages: %d", r->getInt( "messages" ) ); if ( r->getInt( "dm" ) != 0 ) printf( " (%d deleted)", r->getInt( "dm" ) ); printf( " (estimated)\n" ); printf( "Bodyparts: %d (estimated)\n", r->getInt( "bodyparts" ) ); printf( "Addresses: %d (estimated)\n", r->getInt( "addresses" ) ); d->state = 666; finish(); return; } d->query = new Query( "select count(*)::int as messages, " "coalesce(sum(rfc822size)::bigint,0) as totalsize, " "(select count(*) from mailbox_messages)::int " "as mm, " "(select count(*) from deleted_messages)::int " "as dm from messages", this ); d->query->execute(); d->state = 2; } if ( d->state == 2 ) { if ( !d->query->done() ) return; Row * r = d->query->nextRow(); if ( d->query->failed() || !r ) error( "Couldn't fetch messages/deleted_messages counts." ); int um = r->getInt( "messages" ); int mm = r->getInt( "mm" ); int dm = r->getInt( "dm" ); printf( "Messages: %d unique", um ); printf( " (%d in mailboxes", mm ); if ( dm != 0 ) printf( ", %d deleted", dm ); printf( ", total size: %s", EString::humanNumber( r->getBigint( "totalsize" ) ).cstr() ); printf( ")\n" ); d->query = new Query( "select count(*)::int as bodyparts," "coalesce(sum(length(text))::bigint,0) as textsize," "coalesce(sum(length(data))::bigint,0) as datasize " "from bodyparts", this ); d->query->execute(); d->state = 3; } if ( d->state == 3 ) { if ( !d->query->done() ) return; Row * r = d->query->nextRow(); if ( d->query->failed() || !r ) error( "Couldn't fetch bodyparts counts." ); printf( "Bodyparts: %d (text size: %s, data size: %s)\n", r->getInt( "bodyparts" ), EString::humanNumber( r->getBigint( "textsize" ) ).cstr(), EString::humanNumber( r->getBigint( "datasize" ) ).cstr() ); d->query = new Query( "select count(*)::int as addresses " "from addresses", this ); d->query->execute(); d->state = 4; } if ( d->state == 4 ) { if ( !d->query->done() ) return; Row * r = d->query->nextRow(); if ( d->query->failed() || !r ) error( "Couldn't fetch addresses counts." ); printf( "Addresses: %d\n", r->getInt( "addresses" ) ); d->state = 666; } finish(); }