Ejemplo n.º 1
0
UpdateChecker::UpdateChecker(QWidget * parent, UpdateCheckerData * data) : QObject(parent)
	, net_manager(0)
	, d(0)
{
	d = data;

	check_url = "http://updates.smplayer.info/version_info";
	user_agent = "SMPlayer";

	connect(this, SIGNAL(newVersionFound(const QString &)),
            this, SLOT(reportNewVersionAvailable(const QString &)));

	connect(this, SIGNAL(noNewVersionFound(const QString &)),
            this, SLOT(reportNoNewVersionFound(const QString &)));

	connect(this, SIGNAL(errorOcurred(int, QString)), this, SLOT(reportError(int, QString)));

	net_manager = new QNetworkAccessManager(this);

	QDate now = QDate::currentDate();
	//now = now.addDays(27);
	int days = QDateTime(d->last_checked).daysTo(QDateTime(now));

	qDebug("UpdateChecker::UpdateChecker: enabled: %d", d->enabled);
	qDebug("UpdateChecker::UpdateChecker: days_to_check: %d", d->days_to_check);
	qDebug("UpdateChecker::UpdateChecker: days since last check: %d", days);

	if ((!d->enabled) || (days < d->days_to_check)) return;

	QNetworkRequest req(check_url);
	req.setRawHeader("User-Agent", user_agent);
	QNetworkReply *reply = net_manager->get(req);
	connect(reply, SIGNAL(finished()), this, SLOT(gotReply()));
}
Ejemplo n.º 2
0
void Kommunikator::handleIncoming(QVariant json) {
	QVariantMap m = json.toMap();

	// TODO dans le futur il faudra gérer le fait que le serveur puisse lui aussi
	// initier des communications.

	if(!m.contains("seq")) {
		emit invalidMessage(json);
		return;
	}

	if(m["action"].toString() == "error") {
		emit gotError(m["seq"].toInt(), m["code"].toInt(), m["name"].toString());
		return;
	}

	emit gotReply(m["seq"].toInt(), json);
}
Ejemplo n.º 3
0
// returns true and sets g_errno on error, otherwise, blocks and returns false
bool Msg20::getSummary ( Msg20Request *req ) {
	// reset ourselves in case recycled
	reset();

	// consider it "launched"
	m_launched = true;

	// save it
	m_requestDocId = req->m_docId;
	m_state        = req->m_state;
	m_callback     = req->m_callback;
	m_callback2    = NULL;

	// does this ever happen?
	if ( g_hostdb.getNumHosts() <= 0 ) {
		log("build: hosts2.conf is not in working directory, or "
		    "contains no valid hosts.");
		g_errno = EBADENGINEER;
		return true;
	}

	if ( req->m_docId < 0 && ! req->ptr_ubuf ) {
		log("msg20: docid<0 and no url for msg20::getsummary");
		g_errno = EBADREQUEST;
		return true;
	}

	// get groupId from docId, if positive
	uint32_t shardNum;
	if ( req->m_docId >= 0 ) 
		shardNum = g_hostdb.getShardNumFromDocId(req->m_docId);
	else {
		int64_t pdocId = Titledb::getProbableDocId(req->ptr_ubuf);
		shardNum = getShardNumFromDocId(pdocId);
	}

	// we might be getting inlinks for a spider request
	// so make sure timeout is inifinite for that...
	const int32_t timeout = (req->m_niceness==0)
	                      ? multicast_msg20_summary_timeout
	                      : multicast_infinite_send_timeout;

	// get our group
	int32_t  allNumHosts = g_hostdb.getNumHostsPerShard();
	Host *allHosts    = g_hostdb.getShard ( shardNum );

	// put all alive hosts in this array
	Host *cand[32];
	int64_t  nc = 0;
	for ( int32_t i = 0 ; i < allNumHosts ; i++ ) {
		// get that host
		Host *hh = &allHosts[i];
		// skip if dead
		if ( g_hostdb.isDead(hh) ) continue;

		// Respect no-spider, no-query directives from hosts.conf 
		if ( !req->m_getLinkInfo && ! hh->m_queryEnabled ) continue;
		if ( req->m_getLinkInfo && ! hh->m_spiderEnabled ) continue;
		// add it if alive
		cand[nc++] = hh;
	}
	// if none alive, make them all candidates then
	bool allDead = (nc == 0);
	for ( int32_t i = 0 ; allDead && i < allNumHosts ; i++ ) {
		// NEVER add a noquery host to the candidate list, even
		// if the query host is dead
		if ( ! allHosts[i].m_queryEnabled ) continue;
		cand[nc++] = &allHosts[i];
	}

	if ( nc == 0 ) {
		log("msg20: error sending mcast: no queryable hosts "
		    "availble to handle summary generation");
		g_errno = EBADENGINEER;
		m_gotReply = true;
		return true;
	}

	// route based on docid region, not parity, because we want to hit
	// the urldb page cache as much as possible
	int64_t sectionWidth =((128LL*1024*1024)/nc)+1;
	int64_t probDocId    = req->m_docId;
	// i think reference pages just pass in a url to get the summary
	if ( probDocId < 0 && req->size_ubuf ) 
		probDocId = Titledb::getProbableDocId ( req->ptr_ubuf );
	if ( probDocId < 0        ) {
		log("query: Got bad docid/url combo.");
		probDocId = 0;
	}
	// we mod by 1MB since tied scores resort to sorting by docid
	// so we don't want to overload the host responsible for the lowest
	// range of docids. CAUTION: do this for msg22 too!
	// in this way we should still ensure a pretty good biased urldb
	// cache... 
	// . TODO: fix the urldb cache preload logic
	int32_t hostNum = (probDocId % (128LL*1024*1024)) / sectionWidth;
	if ( hostNum < 0 ) hostNum = 0; // watch out for negative docids
	if ( hostNum >= nc ) { g_process.shutdownAbort(true); }
	int32_t firstHostId = cand [ hostNum ]->m_hostId ;

	m_requestSize = 0;
	m_request = req->serialize ( &m_requestSize );
	// . it sets g_errno on error and returns NULL
	// . we MUST call gotReply() here to set m_gotReply
	//   otherwise Msg40.cpp can end up looping forever
	//   calling Msg40::launchMsg20s()
	if ( ! m_request ) { gotReply(NULL); return true; }

	// . otherwise, multicast to a host in group "groupId"
	// . returns false and sets g_errno on error
	// . use a pre-allocated buffer to hold the reply
	// . TMPBUFSIZE is how much a UdpSlot can hold w/o allocating
	if (!m_mcast.send(m_request, m_requestSize, msg_type_20, false, shardNum, false, probDocId, this, NULL, gotReplyWrapper20, timeout, req->m_niceness, firstHostId, false)) {
		// sendto() sometimes returns "Network is down" so i guess
		// we just had an "error reply".
		log("msg20: error sending mcast %s",mstrerror(g_errno));
		m_gotReply = true;
		return true;
	}

	// we are officially "in progress"
	m_inProgress = true;

	// we blocked
	return false;
}
Ejemplo n.º 4
0
// returns true and sets g_errno on error, otherwise, blocks and returns false
bool Msg20::getSummary ( Msg20Request *req ) {

	// reset ourselves in case recycled
	reset();

	// consider it "launched"
	m_launched = true;

	// save it
	m_requestDocId = req->m_docId;
	m_state        = req->m_state;
	m_callback     = req->m_callback;
	m_callback2    = req->m_callback2;
	m_expected     = req->m_expected;
	m_eventId      = req->m_eventId;

	// clear this
	//m_eventIdBits.clear();
	// set this
	//if ( req->m_eventId ) m_eventIdBits.addEventId(req->m_eventId);

	Hostdb *hostdb = req->m_hostdb;
	// ensure hostdb has a host in it
	if ( ! hostdb ) hostdb = &g_hostdb;
	// does this ever happen?
	if ( hostdb->getNumHosts() <= 0 ) {
		log("build: hosts2.conf is not in working directory, or "
		    "contains no valid hosts.");
		g_errno = EBADENGINEER;
		return true;
	}

	// do not re-route to twins if accessing an external network
	if ( hostdb != &g_hostdb ) req->m_expected = false;

	// get groupId from docId, if positive
	unsigned long shardNum;
	if ( req->m_docId >= 0 ) 
		shardNum = hostdb->getShardNumFromDocId(req->m_docId);
	else {
		long long pdocId = g_titledb.getProbableDocId(req->ptr_ubuf);
		shardNum = getShardNumFromDocId(pdocId);
	}

	// we might be getting inlinks for a spider request
	// so make sure timeout is inifinite for that...
	long timeout = 9999999; // 10 million seconds, basically inf.
	if ( req->m_niceness == 0 ) timeout = 20;

	// get our group
	long  allNumHosts = hostdb->getNumHostsPerShard();
	Host *allHosts    = hostdb->getShard ( shardNum );//getGroup(groupId );

	// put all alive hosts in this array
	Host *cand[32];
	long long  nc = 0;
	for ( long i = 0 ; i < allNumHosts ; i++ ) {
		// get that host
		Host *hh = &allHosts[i];
		// skip if dead
		if ( g_hostdb.isDead(hh) ) continue;
		// add it if alive
		cand[nc++] = hh;
	}
	// if none alive, make them all candidates then
	bool allDead = (nc == 0);
	for ( long i = 0 ; allDead && i < allNumHosts ; i++ ) 
		cand[nc++] = &allHosts[i];

	// route based on docid region, not parity, because we want to hit
	// the urldb page cache as much as possible
	long long sectionWidth =((128LL*1024*1024)/nc)+1;//(DOCID_MASK/nc)+1LL;
	long long probDocId    = req->m_docId;
	// i think reference pages just pass in a url to get the summary
	if ( probDocId < 0 && req->size_ubuf ) 
		probDocId = g_titledb.getProbableDocId ( req->ptr_ubuf );
	if ( probDocId < 0        ) {
		log("query: Got bad docid/url combo.");
		probDocId = 0;
	}
	// we mod by 1MB since tied scores resort to sorting by docid
	// so we don't want to overload the host responsible for the lowest
	// range of docids. CAUTION: do this for msg22 too!
	// in this way we should still ensure a pretty good biased urldb
	// cache... 
	// . TODO: fix the urldb cache preload logic
	long hostNum = (probDocId % (128LL*1024*1024)) / sectionWidth;
	if ( hostNum < 0 ) hostNum = 0; // watch out for negative docids
	if ( hostNum >= nc ) { char *xx = NULL; *xx = 0; }
	long firstHostId = cand [ hostNum ]->m_hostId ;

	// . make buffer m_request to hold the request
	// . tries to use m_requestBuf[] if it is big enough to hold it
	// . allocs a new buf if MAX_MSG20_REQUEST_SIZE is too small
	// . serializes the request into m_request
	// . sets m_requestSize to the size of the serialized request
	m_requestSize = 0;
	m_request = req->serialize ( &m_requestSize, m_requestBuf ,
				     MAX_MSG20_REQUEST_SIZE );
	// . it sets g_errno on error and returns NULL
	// . we MUST call gotReply() here to set m_gotReply
	//   otherwise Msg40.cpp can end up looping forever
	//   calling Msg40::launchMsg20s()
	if ( ! m_request ) { gotReply(NULL); return true; }

        // . otherwise, multicast to a host in group "groupId"
	// . returns false and sets g_errno on error
	// . use a pre-allocated buffer to hold the reply
	// . TMPBUFSIZE is how much a UdpSlot can hold w/o allocating
        if ( ! m_mcast.send ( m_request         ,
			      m_requestSize     , 
			      0x20              , // msgType 0x20
			      false             , // m_mcast own m_request?
			      shardNum          , // send to group (groupKey)
			      false             , // send to whole group?
			      probDocId         , // key is lower bits of docId
			      this              , // state data
			      NULL              , // state data
			      gotReplyWrapper20 ,
			      timeout           , // 60 second time out
			      req->m_niceness   ,
			      false             , // real time?
			      firstHostId       , // first hostid
			      NULL,//m_replyBuf        ,
			      0,//MSG20_MAX_REPLY_SIZE,//m_replyMaxSize
			      false             , // free reply buf?
			      false             , // do disk load balancing?
			      -1                , // max cache age
			      0                 , // cacheKey
			      0                 , // bogus rdbId
			      -1                , // minRecSizes(unknownRDsize)
			      true              , // sendToSelf
			      true              , // retry forever
			      hostdb            )) {
		// sendto() sometimes returns "Network is down" so i guess
		// we just had an "error reply".
		log("msg20: error sending mcast %s",mstrerror(g_errno));
		m_gotReply = true;
		return true;
	}

	// we are officially "in progress"
	m_inProgress = true;

	// we blocked
	return false;
}