void flush() { auto returnedJSON = m_jsExecutor->flush(); m_callback(parseMethodCalls(returnedJSON), true /* = isEndOfBatch */); }
void Msg39::estimateHits ( ) { // no longer in use m_inUse = false; // now this for the query loop on the QueryLogEntries. m_topDocId50 = 0LL; m_topScore50 = 0.0; // a little hack for the seo pipeline in xmldoc.cpp m_topDocId = 0LL; m_topScore = 0.0; m_topDocId2 = 0LL; m_topScore2 = 0.0; long ti = m_tt.getHighNode(); if ( ti >= 0 ) { TopNode *t = &m_tt.m_nodes[ti]; m_topDocId = t->m_docId; m_topScore = t->m_score; } // try the 2nd one too long ti2 = -1; if ( ti >= 0 ) ti2 = m_tt.getNext ( ti ); if ( ti2 >= 0 ) { TopNode *t2 = &m_tt.m_nodes[ti2]; m_topDocId2 = t2->m_docId; m_topScore2 = t2->m_score; } // convenience ptrs. we will store the docids/scores into these arrays long long *topDocIds; float *topScores; key_t *topRecs; // numDocIds counts docs in all tiers when using toptree. long numDocIds = m_tt.m_numUsedNodes; // the msg39 reply we send back long replySize; char *reply; //m_numTotalHits = m_posdbTable.m_docIdVoteBuf.length() / 6; // make the reply? Msg39Reply mr; if ( ! m_callback ) { // if we got clusterdb recs in here, use 'em if ( m_gotClusterRecs ) numDocIds = m_numVisible; // don't send more than the docs that are asked for if ( numDocIds > m_r->m_docsToGet) numDocIds =m_r->m_docsToGet; // # of QueryTerms in query long nqt = m_tmpq.m_numTerms; // start setting the stuff mr.m_numDocIds = numDocIds; // copy # estiamted hits into 8 bytes of reply //long long est = m_posdbTable.m_estimatedTotalHits; // ensure it has at least as many results as we got //if ( est < numDocIds ) est = numDocIds; // or if too big... //if ( numDocIds < m_r->m_docsToGet ) est = numDocIds; // . total estimated hits // . this is now an EXACT count! mr.m_estimatedHits = m_numTotalHits; // sanity check mr.m_nqt = nqt; // the m_errno if any mr.m_errno = m_errno; // shortcut PosdbTable *pt = &m_posdbTable; // the score info, in no particular order right now mr.ptr_scoreInfo = pt->m_scoreInfoBuf.getBufStart(); mr.size_scoreInfo = pt->m_scoreInfoBuf.length(); // that has offset references into posdbtable::m_pairScoreBuf // and m_singleScoreBuf, so we need those too now mr.ptr_pairScoreBuf = pt->m_pairScoreBuf.getBufStart(); mr.size_pairScoreBuf = pt->m_pairScoreBuf.length(); mr.ptr_singleScoreBuf = pt->m_singleScoreBuf.getBufStart(); mr.size_singleScoreBuf = pt->m_singleScoreBuf.length(); // save some time since seo.cpp gets from posdbtable directly, // so we can avoid serializing/copying this stuff at least if ( ! m_r->m_makeReply ) { mr.size_scoreInfo = 0; mr.size_pairScoreBuf = 0; mr.size_singleScoreBuf = 0; } // and now the sitehash list if it exists mr.ptr_siteHashList = pt->m_siteHashList.getBufStart(); mr.size_siteHashList = pt->m_siteHashList.length(); mr.m_sectionStats = pt->m_sectionStats; // reserve space for these guys, we fill them in below mr.ptr_docIds = NULL; mr.ptr_scores = NULL; mr.ptr_clusterRecs = NULL; // this is how much space to reserve mr.size_docIds = 8 * numDocIds; // long long mr.size_scores = 4 * numDocIds; // float // if not doing site clustering, we won't have these perhaps... if ( m_gotClusterRecs ) mr.size_clusterRecs = sizeof(key_t) *numDocIds; else mr.size_clusterRecs = 0; // . that is pretty much it,so serialize it into buffer,"reply" // . mr.ptr_docIds, etc., will point into the buffer so we can // re-serialize into it below from the tree // . returns NULL and sets g_errno on error // . "true" means we should make mr.ptr_* reference into the // newly serialized buffer. reply = serializeMsg ( sizeof(Msg39Reply), // baseSize &mr.size_docIds, // firstSizeParm &mr.size_clusterRecs,//lastSizePrm &mr.ptr_docIds , // firstStrPtr &mr , // thisPtr &replySize , NULL , 0 , true ) ; if ( ! reply ) { log("query: Could not allocated memory " "to hold reply of docids to send back."); sendReply(m_slot,this,NULL,0,0,true); return ; } topDocIds = (long long *) mr.ptr_docIds; topScores = (float *) mr.ptr_scores; topRecs = (key_t *) mr.ptr_clusterRecs; } long docCount = 0; // loop over all results in the TopTree for ( long ti = m_tt.getHighNode() ; ti >= 0 ; ti = m_tt.getPrev(ti) ) { // get the guy TopNode *t = &m_tt.m_nodes[ti]; // skip if clusterLevel is bad! if ( m_gotClusterRecs && t->m_clusterLevel != CR_OK ) continue; // if not sending back a reply... we were called from seo.cpp // State3f logic to evaluate a QueryLogEntry, etc. if ( m_callback ) { // skip results past #50 if ( docCount > 50 ) continue; // set this m_topScore50 = t->m_score; m_topDocId50 = t->m_docId; // that's it continue; } // get the docid ptr //char *diptr = t->m_docIdPtr; //long long docId = getDocIdFromPtr(diptr); // sanity check if ( t->m_docId < 0 ) { char *xx=NULL; *xx=0; } //add it to the reply topDocIds [docCount] = t->m_docId; topScores [docCount] = t->m_score; // supply clusterdb rec? only for full splits if ( m_gotClusterRecs ) topRecs [docCount] = t->m_clusterRec; //topExplicits [docCount] = // getNumBitsOn(t->m_explicits) docCount++; // 50th score? set this for seo.cpp. if less than 50 results // we want the score of the last doc then. if ( docCount <= 50 ) m_topScore50 = t->m_score; if ( m_debug ) { log(LOG_DEBUG,"query: msg39: [%lu] " "%03li) docId=%012llu sum=%.02f", (long)this, docCount, t->m_docId,t->m_score); } //don't send more than the docs that are wanted if ( docCount >= numDocIds ) break; } if ( docCount > 300 && m_debug ) log("query: Had %li nodes in top tree",docCount); // this is sensitive info if ( m_debug ) { log(LOG_DEBUG, "query: msg39: [%li] Intersected lists took %lli (%lli) " "ms " "docIdsToGet=%li docIdsGot=%li " "q=%s", (long)this , m_posdbTable.m_addListsTime , gettimeofdayInMilliseconds() - m_startTime , m_r->m_docsToGet , numDocIds , m_tmpq.getQuery() ); } // if we blocked because we used a thread then call callback if // summoned from a msg3f handler and not a msg39 handler if ( m_callback ) { // if we blocked call user callback if ( m_blocked ) m_callback ( m_state ); // if not sending back a udp reply, return now return; } // now send back the reply sendReply(m_slot,this,reply,replySize,replySize,false); return; }
void CBaseSocket::OnClose() { m_state = SOCKET_STATE_CLOSING; m_callback(m_callback_data, NETLIB_MSG_CLOSE, (net_handle_t)m_socket, NULL); }
void FinishedCallback::invoke() { disconnect(m_connection); m_callback(); }
void invoke(const std::string& message) { reset_interval_timer(); m_callback(message); }
bool PolygonElementParser::parseElementEndTag(const NodeType::XMLNode&, const std::string&) { m_callback(m_model); return true; }
//============================================================================ void BuildIdRequestTrans::Post () { m_callback(m_result, m_param, m_buildId); }
void WsConnection::onError() { m_callback(shared_from_this(), Ws_ErrorEvent, 0); }
void CPHScriptObjectActionN::run() { m_callback(); b_obsolete=true; }
void HttpSniffer::got_packet(const struct pcap_pkthdr *header, const u_char *packet) { /* declare pointers to packet headers */ const struct sniff_ethernet *ethernet; /* The ethernet header [1] */ const struct sniff_ip *ip; /* The IP header */ const struct sniff_ip6 *ip6; /* The IPv6 header */ const struct sniff_tcp *tcp; /* The TCP header */ const char *payload; /* Packet payload */ int size_ip; int size_tcp; int size_payload; int ip_len; string from; string to; /* define ethernet header */ ethernet = (struct sniff_ethernet*)(packet); /* define/compute ip header offset */ u_short ether_type = ntohs(ethernet->ether_type); switch (ether_type) { case ETHERTYPE_IP: ip = (struct sniff_ip*)(packet + SIZE_ETHERNET); size_ip = IP_HL(ip)*4; if (size_ip < 20) throw runtime_error(str(boost::format("Invalid IPv4 header length: %u bytes") % size_ip)); ip_len = ntohs(ip->ip_len); break; case ETHERTYPE_IPV6: // FIXME: Support IPv6 extension headers? ip6 = (struct sniff_ip6*)(packet + SIZE_ETHERNET); size_ip = 40; ip_len = ntohs(ip6->ip6_plen); break; default: cout << (boost::format("Ignoring unknown ethernet packet with type %x") % ntohs(ethernet->ether_type)) << endl; return; } /* ignore non tcp packets */ if (ip->ip_p != IPPROTO_TCP && ip6->ip6_nxt != IPPROTO_TCP) return; /* define tcp header */ tcp = (struct sniff_tcp*)(packet + SIZE_ETHERNET + size_ip); size_tcp = TH_OFF(tcp)*4; if (size_tcp < 20) throw runtime_error(str(boost::format("Invalid TCP header length: %u bytes") % size_tcp)); /* get source/dest */ if (ether_type == ETHERTYPE_IP) { from = str(boost::format("%s:%d") % inet_ntoa(ip->ip_src) % ntohs(tcp->th_sport)); to = str(boost::format("%s:%d") % inet_ntoa(ip->ip_dst) % ntohs(tcp->th_dport)); } else { char src_addr_buf[INET6_ADDRSTRLEN]; inet_ntop(AF_INET6, &ip6->ip6_src, src_addr_buf, sizeof(src_addr_buf)); char dst_addr_buf[INET6_ADDRSTRLEN]; inet_ntop(AF_INET6, &ip6->ip6_dst, dst_addr_buf, sizeof(src_addr_buf)); from = str(boost::format("[%s]:%d") % string(src_addr_buf) % ntohs(tcp->th_sport)); to = str(boost::format("[%s]:%d") % string(dst_addr_buf) % ntohs(tcp->th_dport)); } /* define/compute tcp payload (segment) offset */ payload = (const char *)(packet + SIZE_ETHERNET + size_ip + size_tcp); /* compute tcp payload (segment) size */ size_payload = ip_len - (size_ip + size_tcp); string key; key.append(from); key.append("-"); key.append(to); HttpPacket *http_packet = 0; PacketCacheMap::iterator iter; iter = m_pending_packets.find(key); if (iter == m_pending_packets.end()) http_packet = new HttpPacket(from, to); else { http_packet = iter->second; m_pending_packets.erase(iter); } if (http_packet->parse(payload, size_payload)) { if (http_packet->isComplete()) { m_callback(http_packet); delete http_packet; } else { m_pending_packets[key] = http_packet; } } else { delete http_packet; } }
void WsConnection::onOpen(const void* context) { m_status = FrameStart; m_callback(shared_from_this(), Ws_OpenEvent, context); }
thread_ret_t UVCVisionCam::RunThread() { uint32_t index = 0xFFFFFFFF; void *buffer = NULL; DVP_PRINT(DVP_ZONE_CAM, "UVC Dequeue Thread Running!\n"); // queue up all buffers for (uint32_t i = 0; i < m_numImages; i++) { buffer = v4l2_acquire(m_dev, &index); if (buffer) { if (v4l2_queue(m_dev, index, V4L2_BUF_TYPE_VIDEO_CAPTURE)) { DVP_PRINT(DVP_ZONE_CAM, "UVC: Queued %p[%u] to UVC\n", buffer, index); } else { DVP_PRINT(DVP_ZONE_ERROR, "Failed to queue %p[%u]\n", buffer, index); } } else { DVP_PRINT(DVP_ZONE_ERROR, "Failed to acquire buffer num %u\n", i); } } v4l2_start(m_dev, V4L2_BUF_TYPE_VIDEO_CAPTURE); index = 0xFFFFFFFF; buffer = NULL; while (m_running) { //if (v4l2_wait(m_dev)) { if (v4l2_dequeue(m_dev, &index, V4L2_BUF_TYPE_VIDEO_CAPTURE) == true_e) { buffer = v4l2_search_index(m_dev, index); if (buffer) { DVP_U08 *ptr = (DVP_U08 *)buffer; DVP_Image_t image = *m_images[m_curImage]; // copy meta fields, pointers will be recalculated // copy image pointer over image.pData[0] = ptr; image.color = FOURCC_YUY2; /// @todo reconstruct multi-plane formats.... if (image.color == FOURCC_NV12) { image.pData[1] = &ptr[image.bufHeight * image.y_stride]; } if (DVP_Image_Copy(m_images[m_curImage], &image) == DVP_TRUE) { m_frame.mFrameBuff = m_images[m_curImage]; m_frame.mTimestamp = rtimer_now(); if (m_callback) m_callback(&m_frame); m_curImage = (m_curImage + 1)%m_numImages; } } else { DVP_PRINT(DVP_ZONE_CAM, "Failed to find buffer with index %u\n", index); } // requeue the buffer... v4l2_queue(m_dev, index, V4L2_BUF_TYPE_VIDEO_CAPTURE); } } } DVP_PRINT(DVP_ZONE_CAM, "UVC Dequeue Thread Exiting!\n"); thread_exit(0); }
void invokeCallback(const double callbackId, const folly::dynamic& arguments) { auto returnedJSON = m_jsExecutor->invokeCallback(callbackId, arguments); m_callback(parseMethodCalls(returnedJSON), true /* = isEndOfBatch */); }
void callFunction(const double moduleId, const double methodId, const folly::dynamic& arguments) { auto returnedJSON = m_jsExecutor->callFunction(moduleId, methodId, arguments); m_callback(parseMethodCalls(returnedJSON), true /* = isEndOfBatch */); }
void PoolTask::Run() { if (m_callback){ m_callback(m_callback_data); } }
bool CPHScriptObjectConditionN::is_true() { return m_callback(); }
void performCallback(WebKitURISchemeRequest* request) { ASSERT(m_callback); m_callback(request, m_userData); }
bool is_complete() { return m_callback(); }
//============================================================================ void DownloadRequestTrans::Post () { m_callback(m_result, m_param, m_filename, m_writer); }
bool is_complete() { return m_callback(m_result); }
//============================================================================ void ManifestRequestTrans::Post () { m_callback(m_result, m_param, m_group, m_manifest.Ptr(), m_manifest.Count()); }
void NotificationCallback::call(const Notification ¬ification) const { if (m_callback) m_callback(notification); }
void RunLoopObserver::runLoopObserverFired() { ASSERT(m_runLoopObserver); m_callback(); }
//xie 2016-06-02 start////////////////////////////// void CBaseSocket::OnUDPClose() { log("basesocket onudpclose \n"); m_state = SOCKET_STATE_CLOSING; m_callback(m_callback_data, NETLIB_MSG_CLOSE, (net_handle_t)m_socket, NULL); }
void CompletionCallbackType::componentComplete() { if (m_callback) m_callback(this, m_data); }
void CBaseSocket::OnUDPWrite() { m_callback(m_callback_data, NETLIB_MSG_WRITE_UDP, (net_handle_t)m_socket, NULL); }
bool EventListener::executeCallback(param_list_t params){ m_callback(params[0].Event.data); }
void UIEngine::handleUICommand(int command, Control* control) { ASSERT(m_callback); m_callback(command, control); }
void Msg22::gotReply ( ) { // save g_errno m_errno = g_errno; // shortcut Msg22Request *r = m_r; // back m_outstanding = false; r->m_inUse = 0; // bail on error, multicast will free the reply buffer if it should if ( g_errno ) { if ( r->m_url[0] ) log("db: Had error getting title record for %s : %s.", r->m_url,mstrerror(g_errno)); else log("db: Had error getting title record for docId of " "%lli: %s.",r->m_docId,mstrerror(g_errno)); // free reply buf right away m_mcast.reset(); m_callback ( m_state ); return; } // breathe QUICKPOLL ( r->m_niceness ); // get the reply long replySize = -1 ; long maxSize ; bool freeIt ; char *reply = m_mcast.getBestReply (&replySize, &maxSize, &freeIt); relabel( reply, maxSize, "Msg22-mcastGBR" ); // breathe QUICKPOLL ( r->m_niceness ); // a NULL reply happens when not found at one host and the other host // is dead... we need to fix Multicast to return a g_errno for this if ( ! reply ) { // set g_errno for callback m_errno = g_errno = EBADENGINEER; log("db: Had problem getting title record. Reply is empty."); m_callback ( m_state ); return; } // if replySize is only 8 bytes that means a not found if ( replySize == 8 ) { // we did not find it m_found = false; // get docid provided long long d = *(long long *)reply; // this is -1 or 0 if none available m_availDocId = d; // nuke the reply mfree ( reply , maxSize , "Msg22"); // store error code m_errno = ENOTFOUND; // debug msg //if ( m_availDocId != m_probableDocId && m_url ) // log(LOG_DEBUG,"build: Avail docid %lli != probable " // "of %lli for %s.", // m_availDocId, m_probableDocId , m_urlPtr ); // this is having problems in Msg23::gotTitleRec() m_callback ( m_state ); return; } // sanity check. must either be an empty reply indicating nothing // available or an 8 byte reply above! if ( m_r->m_getAvailDocIdOnly ) { char *xx=NULL;*xx=0; } // otherwise, it was found m_found = true; // if just checking tfndb, do not set this, reply will be empty! if ( ! r->m_justCheckTfndb ) { // && ! r->m_getAvailDocIdOnly ) { *m_titleRecPtrPtr = reply; *m_titleRecSizePtr = replySize; } // if they don't want the title rec, nuke it! else { // nuke the reply mfree ( reply , maxSize , "Msg22"); } // all done m_callback ( m_state ); }
typename base_type::reference dereference() const { if (m_callback) m_callback(); return *(this->base()); }