// tell playdar to start resolving this query void playdar_dispatch( rq_ptr rq ) { json_spirit::Object o; o.push_back( json_spirit::Pair("_msgtype", "query") ); o.push_back( json_spirit::Pair("query", rq->get_json()) ); playdar_write( o ); }
query_uid Resolver::dispatch(rq_ptr rq, rq_callback_t cb) { boost::mutex::scoped_lock lk(m_mutex); if(!add_new_query(rq)) { // already running return rq->id(); } if(cb) rq->register_callback(cb); // setup comet callback if the request has a valid comet session id const string& cometId(rq->comet_session_id()); if (cometId.length()) { boost::mutex::scoped_lock cometlock(m_comets_mutex); std::map< std::string, rq_callback_t >::const_iterator it = m_comets.find(cometId); if (it != m_comets.end()) { rq->register_callback(it->second); } } m_pending.push_front( pair<rq_ptr, unsigned short>(rq, 999) ); m_cond.notify_one(); // set up timer to auto-cancel this query after a while: boost::asio::deadline_timer * t = new boost::asio::deadline_timer(*m_io_service); // give 5 mins additional time to allow setup/results, otherwise it would never be stale // at max_query_lifetime, because the first result updates the atime: t->expires_from_now(boost::posix_time::seconds(max_query_lifetime()+300)); t->async_wait(boost::bind(&Resolver::cancel_query_timeout, this, rq->id())); m_qidtimers[rq->id()] = t; return rq->id(); }
// is this a valid / well formed query? static bool valid( rq_ptr rq ) { return rq->param_exists( "artist" ) && rq->param_type( "artist" ) == json_spirit::str_type && rq->param( "artist").get_str().length() && rq->param_exists( "track" ) && rq->param_type( "track" ) == json_spirit::str_type && rq->param( "track" ).get_str().length(); }
void rs_script::start_resolving(rq_ptr rq) { if(m_dead) { cerr << "Not dispatching to script: " << m_scriptpath << endl; return; } //cout << "gateway dispatch enqueue: " << rq->str() << endl; if(!rq->cancelled()) { boost::mutex::scoped_lock lk(m_mutex); m_pending.push_front( rq ); } m_cond.notify_one(); }
/// go thru list of resolversservices and dispatch in order /// lastweight is the weight of the last resolver we dispatched to. void Resolver::run_pipeline( rq_ptr rq, unsigned short lastweight ) { unsigned short atweight = 0; unsigned int mintime = 0; bool started = false; BOOST_FOREACH( pa_ptr pap, m_resolvers ) { if(pap->weight() >= lastweight) continue; if(!started) { atweight = pap->weight(); mintime = pap->targettime(); started = true; //cout << "Pipeline at weight: " << atweight << endl; } if(pap->weight() != atweight) { // we've dispatched to everything of weight "atweight" // and the shortest targettime at that weight is "mintime" // so schedule a callaback after mintime to carry on down the // chain and dispatch to the next lowest weighted resolver services. //cout << "Will continue pipeline after " << mintime << "ms." << endl; boost::shared_ptr<boost::asio::deadline_timer> t(new boost::asio::deadline_timer( m_work->get_io_service() )); t->expires_from_now(boost::posix_time::milliseconds(mintime)); // pass the timer pointer to the handler so it doesnt autodestruct: t->async_wait(boost::bind(&Resolver::run_pipeline_cont, this, rq, atweight, t)); break; } if(pap->targettime() < mintime) mintime = pap->targettime(); if( pap->localonly() && !rq->origin_local() ) { // Not dispatching (remote query, to local-only plugin) } else { // dispatch to this resolver: //cout << "Pipeline dispatching to " << pap->rs()->name() // << " (lastweight: " << lastweight << ")" << endl; pap->rs()->start_resolving(rq); } } }
void Resolver::run_pipeline_cont( rq_ptr rq, unsigned short lastweight, boost::shared_ptr<boost::asio::deadline_timer> oldtimer) { cout << "Pipeline continues.." << endl; if(rq->solved()) { cout << "Bailing from pipeline: SOLVED @ lastweight: " << lastweight << endl; } else { boost::mutex::scoped_lock lk(m_mutex); m_pending.push_front( pair<rq_ptr, unsigned short>(rq, lastweight) ); m_cond.notify_one(); } }
/// this is some what fugly atm, but gets the job done for now. /// it does the fuzzy library search using the ngram table from the db: void local::process( rq_ptr rq ) { //Ignore this if it's missing artist+track fields if( !rq->param_exists( "artist" ) || rq->param("artist").type() != str_type || !rq->param_exists( "track" ) || rq->param("track").type() != str_type ) { return; } vector< json_spirit::Object > final_results; // check if this is a special "random" query if( rq->param("artist").get_str() == "*" && rq->param("track").get_str() == "*" ) { int fid = m_library->get_random_fid(); if( fid == -1 ) return; json_spirit::Object js; ResolvedItemBuilder::createFromFid( *m_library, fid, js ); js.push_back( json_spirit::Pair( "sid", m_pap->gen_uuid()) ); js.push_back( json_spirit::Pair( "source", m_pap->hostname()) ); js.push_back( json_spirit::Pair( "score", 0.99) ); // dangerous, will propagate forever? muhahah final_results.push_back( js ); } else // end special random query check { // get candidates (rough potential matches): vector<scorepair> candidates = find_candidates(rq, 10); // now do the "real" scoring of candidate results: string reason; // for scoring debug. BOOST_FOREACH(scorepair &sp, candidates) { // multiple files in our collection may have matching metadata. // add them all to the results. vector<int> fids = m_library->get_fids_for_tid(sp.id); BOOST_FOREACH(int fid, fids) { json_spirit::Object js; js.reserve(12); ResolvedItemBuilder::createFromFid( *m_library, fid, js ); js.push_back( json_spirit::Pair( "sid", m_pap->gen_uuid()) ); js.push_back( json_spirit::Pair( "source", m_pap->hostname()) ); final_results.push_back( js ); } }