/** \brief callback notified by \ref socket_full_t when a connection is established */ bool http_sresp_cnx_t::neoip_socket_full_event_cb(void *userptr, socket_full_t &cb_socket_full , const socket_event_t &socket_event) throw() { // log to debug KLOG_DBG("enter event=" << socket_event); // sanity check - the event MUST be full_ok DBG_ASSERT( socket_event.is_full_ok() ); // handle the fatal events if( socket_event.is_fatal() ) return autodelete(); // handle each possible events from its type switch( socket_event.get_value() ){ case socket_event_t::RECVED_DATA: // data MUST NOT be called if it is not a POST if( !m_http_reqhd.method().is_post() ) return autodelete("recved data on non POST"); // forward to the proper handler return handle_recved_data(*socket_event.get_recved_data()); case socket_event_t::MAYSEND_ON: // if MAYSEND_ON occurs and the maysend_threashold is == sendbuf maxlen, this is // the end of the connection, so autodelete // - this is a kludge to implement a weird linger... to fix if( m_socket_full->maysend_tshold() == m_socket_full->xmitbuf_maxlen()) return autodelete(); // try to full the socket_full sendbuf try_fill_xmitbuf(); break; default: DBG_ASSERT(0); } // return tokeep return true; }
/** \brief Handler RECVED_DATA from the socket_full_t */ bool http_sresp_cnx_t::handle_recved_data(const pkt_t &pkt) throw() { // sanity check - data must be received IIF http_method_t::POST DBG_ASSERT( m_http_reqhd.method().is_post() ); // queue the received data to the one already received m_recved_data.append( pkt.to_datum(datum_t::NOCOPY) ); // some logging KLOG_ERR("recved data " << m_recved_data.length() << "-byte"); // if the pending_data are too long, close the connection if( m_recved_data.length() > profile().recv_post_maxlen() ) return autodelete("recved POST too large"); // http_reqhd.content_length() MUST be size_t_ok if( !m_http_reqhd.content_length().is_size_t_ok() ) return autodelete("recved Content-Length is not size_t ok"); // get the http_reqhd.content_length() size_t content_length = m_http_reqhd.content_length().to_size_t(); #if 0 // if the recved_data is larger than content_length, return an error if( m_recved_data.length() > content_length ) return autodelete("recved_data larger than http_reqhd_t Content-Length"); #else // if recved_data is larger than content_length, strip m_recved_data to content_length // - NOTE: needed as IE (tested on IE6sp2/IE7) sends 2 additionnal byte "\r\n". if( m_recved_data.length() > content_length ) m_recved_data.tail_free(m_recved_data.length() - content_length); #endif // if the recved_data length is less than content_length, return now if( m_recved_data.length() < content_length ) return true; // now the http_method_t::POST is fully received, notify the http_sresp_ctx_t return notify_ctx(); }
/** \brief parse a bt_cmdtype_t::BLOCK_REP command * * @erturn a tokeep for the whole bt_swarm_full_t */ bool bt_swarm_full_t::parse_block_rep(pkt_t &pkt) throw() { bt_swarm_stats_t& swarm_stats = bt_swarm->swarm_stats(); const bt_mfile_t & bt_mfile = bt_swarm->get_mfile(); uint32_t piece_idx; uint32_t data_offset; bt_err_t bt_err; // log to debug KLOG_DBG("enter pkt="<< pkt); try { pkt >> piece_idx; pkt >> data_offset; // NOTE: the data replied are the remaining of pkt }catch(serial_except_t &e){ // if the unserialization failed, it is a bug in the protocol, autodelete return autodelete(); } // if the piece_idx is greater than or equal to the number of piece, autodelete if( piece_idx >= bt_mfile.nb_piece() ) return autodelete(); // if the block_rep goes across several pieces, autodelete if( data_offset + pkt.size() > bt_mfile.piecelen() ) return autodelete(); // if the block_rep has a size of 0, autodelete if( pkt.size() == 0 ) return autodelete(); // update the recv_rate m_recv_rate.update(pkt.size()); // update the dloaded_datalen in bt_swarm_stats_t // TODO should it be updated here ? and not in the scheduler ? // - do i update it properly in the ecnx case too ? // - where should i update it ? in the block notify complete ? // - this is not updated for ecnx swarm_stats.dloaded_datalen ( swarm_stats.dloaded_datalen() + pkt.size() ); // if no pending bt_swarm_sched_request_t matches this block_rep, discard it // - NOTE: it may happen in a race such as (i) local peer requests the block to peer alice // and bob, (ii) receives its from bob, (iii) send a BLOCK_DEL to alice, (iv) alice already // replied the data before getting the BLOCK_DEL, (v) local peer received BLOCK_REP without // matching bt_+swarm_sched_request_t. bt_prange_t bt_prange = bt_prange_t(piece_idx, data_offset, pkt.size()); file_range_t totfile_range = bt_prange.to_totfile_range(bt_mfile); if( !full_sched()->has_matching_request(totfile_range) ){ // update the bt_swarm_stats_t swarm_stats.dup_rep_nb ( swarm_stats.dup_rep_nb() + 1 ); swarm_stats.dup_rep_len ( swarm_stats.dup_rep_len() + pkt.size() ); // return tokeep return true; } // notify the callback of this event - up to the scheduler to write the data bt_cmd_t bt_cmd = bt_cmd_t::build_block_rep(bt_prange); bool tokeep = notify_callback( bt_swarm_full_event_t::build_block_rep(bt_cmd, &pkt) ); if( !tokeep ) return false; // return tokeep return true; }
/** \brief callback notified by \ref socket_full_t to provide event */ bool bt_swarm_full_t::neoip_socket_full_event_cb(void *userptr , socket_full_t &cb_socket_full, const socket_event_t &socket_event) throw() { // log to debug KLOG_DBG("enter event=" << socket_event); // sanity check - the event MUST be full_ok DBG_ASSERT( socket_event.is_full_ok() ); // if the socket_event_t is fatal, autodelete this router_resp_cnx_t if( socket_event.is_fatal() ) return autodelete(); // handle each possible events from its type switch( socket_event.get_value() ){ case socket_event_t::RECVED_DATA:{ pkt_t * pkt = socket_event.get_recved_data(); // log to debug KLOG_DBG("data len=" << pkt->length()); // queue the received data to the one already received recved_data.append(pkt->void_ptr(), pkt->length()); // parse the recved command return parse_recved_data();} case socket_event_t::MAYSEND_ON: // to warn the sendq that it can send sendq->notify_maysend(); return true; default: DBG_ASSERT(0); } // return tokeep return true; }
/** \brief callback notified by \ref bt_swarm_t when to notify an event */ bool bt_oload0_swarm_t::neoip_bt_swarm_cb(void *cb_userptr, bt_swarm_t &cb_bt_swarm , const bt_swarm_event_t &swarm_event) throw() { // log to debug KLOG_WARN("enter event=" << swarm_event); // handle the fatal bt_swarm_event_t if( swarm_event.is_fatal() ){ // log the event KLOG_INFO("Closing the bt_swarm_t for " << nested_uri << " due to " << swarm_event); // autodelete return autodelete(); } // handle the bt_swarm_event_t according to its value switch(swarm_event.get_value()){ case bt_swarm_event_t::PIECE_NEWLY_AVAIL:{ std::list<bt_httpo_full_t *>::iterator iter; // notify this new data to each bt_httpo_full_t within the httpo_full_db for(iter = httpo_full_db.begin(); iter != httpo_full_db.end(); iter++){ bt_httpo_full_t *httpo_full = *iter; // notify this bt_httpo_full_t of the new data httpo_full->notify_newly_avail_piece(); } break;} default: DBG_ASSERT( 0 ); } // return tokeep return true; }
/** \brief to be called when a piece_idx is newly marked unavailable * * - WARNING: this function MAY delete the bt_swarm_full_t. take this into account. */ void bt_swarm_full_t::declare_piece_nomore_avail(size_t piece_idx) throw() { // autodelete the connection if there is pending BLOCK_REP for this piece_idx in the sendq // - NOTE: autodelete is the only possible option as it is impossible to notify the // remote peer that a particular request have been canceled (limitation in // bt protocol) // - maybe some stuff about the 'fast' extension (not implemented) const std::list<bt_cmd_t> & cmd_queue = sendq->cmd_queue; std::list<bt_cmd_t>::const_iterator iter; for(iter = cmd_queue.begin(); iter != cmd_queue.end(); iter++){ const bt_cmd_t & bt_cmd = *iter; // if this bt_cmd_t is NOT a BLOCK_REP, skip it if( bt_cmd.cmdtype() != bt_cmdtype_t::BLOCK_REP ) continue; // if this bt_cmd_t is NOT for this piece_idx, skip it if( bt_cmd.prange().piece_idx() != piece_idx ) continue; // else autodelete the bt_swarm_full_t and return immediatly autodelete(); return; } // if the remote peer support bt_utmsgtype_t::PUNAVAIL, notify it if( full_utmsg()->do_support(bt_utmsgtype_t::PUNAVAIL) ){ // TODO may be put a helper to get the it directly without putting the dynamic_cast here // - see what has been done in the bt_swarm_utmsg_t::utmsg_piecewish() bt_utmsg_cnx_vapi_t * cnx_vapi = full_utmsg()->cnx_vapi(bt_utmsgtype_t::PUNAVAIL); bt_utmsg_punavail_cnx_t*punavail_cnx = dynamic_cast<bt_utmsg_punavail_cnx_t*>(cnx_vapi); DBG_ASSERT( punavail_cnx ); punavail_cnx->xmit_punavail(piece_idx); } }
/** \brief callback called when the timeout_t expire */ bool bt_swarm_full_t::neoip_timeout_expire_cb(void *userptr, timeout_t &cb_timeout) throw() { // log to debug KLOG_WARN("enter"); // autodelete return autodelete(); }
bool bt_oload0_swarm_t::neoip_bt_httpo_full_cb(void *cb_userptr, bt_httpo_full_t &cb_bt_httpo_full , const bt_httpo_event_t &httpo_event) throw() { bt_httpo_full_t*httpo_full = &cb_bt_httpo_full; // log to debug KLOG_WARN("enter httpo_event=" << httpo_event); // sanity check - bt_httpo_event_t MUST be is_full_ok() DBG_ASSERT( httpo_event.is_full_ok() ); // handle each possible events from its type switch( httpo_event.get_value() ){ case bt_httpo_event_t::CNX_CLOSED: // remove it from the httpo_full_db httpo_full_db.remove(httpo_full); // delete the bt_httpo_full_t object itself nipmem_zdelete httpo_full; // if httpo_full_db is now empty, autodelete if( httpo_full_db.empty() ) return autodelete(); // return dontkeep - as the httpo_full has just been deleted return false; default: DBG_ASSERT(0); } // return tokeep return true; }
/** \brief function to notify a piece_unavail(pieceidx) from the bt_utmsgtype_t::PUNAVAIL * * @erturn a tokeep for the whole bt_swarm_full_t */ bool bt_swarm_full_t::notify_utmsg_punavail(size_t pieceidx) throw() { // log to debug KLOG_DBG("piece_idx=" << piece_idx); // sanity check - bt_utmsgtype_t::PUNAVAIL MUST be supported DBG_ASSERT( full_utmsg()->do_support(bt_utmsgtype_t::PUNAVAIL) ); // if pieceidx is not in the proper range, autodelete if( pieceidx >= remote_pavail().nb_piece() ) return autodelete(); // if piece_idx piece is already marked as unavail, do nothing if( remote_pavail().is_unavail(pieceidx) ) return true; // mark the piece as available in the pieceavail m_remote_pavail.mark_unavail(pieceidx); // notify the callback of this event bool tokeep = notify_callback( bt_swarm_full_event_t::build_piece_unavail(pieceidx) ); if( !tokeep ) return false; // if this bt_swarm_full_t DO NOT support bt_utmsgtype_t::PIECEWISH, update piecewish if( full_utmsg()->no_support(bt_utmsgtype_t::PIECEWISH) ){ bool tokeep = notify_utmsg_dowish_index(pieceidx); if( !tokeep ) return false; } // return tokeep return true; }
/** \brief callback called when the timeout_t expire */ bool http_client_pool_cnx_t::neoip_timeout_expire_cb(void *userptr, timeout_t &cb_timeout) throw() { // update the http_client_pool_stat_t http_client_pool_stat_t & pool_stat = client_pool->pool_stat; pool_stat.nb_cnx_died_internal ( pool_stat.nb_cnx_died_internal() + 1 ); // autodelete return autodelete(); }
/** \brief parse a bt_cmdtype_t::PIECE_ISAVAIL command * * @erturn a tokeep for the whole bt_swarm_full_t */ bool bt_swarm_full_t::parse_piece_isavail_cmd(pkt_t &pkt) throw() { uint32_t piece_idx; try { pkt >> piece_idx; }catch(serial_except_t &e){ // if the unserialization failed, it is a bug in the protocol, autodelete return autodelete(); } // log to debug KLOG_DBG("piece_idx=" << piece_idx); // if the piece_idx is not in the proper range, autodelete if( piece_idx >= remote_pavail().nb_piece() ) return autodelete(); // if piece_idx piece is already marked as avail, do nothing if( remote_pavail().is_avail(piece_idx) ) return true; // if local peer was not interested and this piece_idx is not locally available, make it interested if( bt_swarm->local_pavail().is_unavail(piece_idx) && !local_dowant_req() ){ m_local_dowant_req = true; sendq->queue_cmd( bt_cmd_t::build_dowant_req() ); } // if bt_swarm_full_t DO NOT support bt_utmsgtype_t::PIECEWISH, update piecewish if( full_utmsg()->no_support(bt_utmsgtype_t::PIECEWISH) ){ bool tokeep = notify_utmsg_nowish_index(piece_idx); if( !tokeep ) return false; } // mark the piece as available in the remote_pavail m_remote_pavail.mark_isavail(piece_idx); // notify the callback of this event bool tokeep = notify_callback( bt_swarm_full_event_t::build_piece_isavail(piece_idx) ); if( !tokeep ) return false; // if the local peer is seed and the remote one too, close the connection // - NOTE: it MUST be done after the notification to have the full and sched in sync when deleting if( bt_swarm->is_seed() && remote_pavail().is_fully_avail() ) return autodelete(); // return tokeep return true; }
/** \brief parse a bt_cmdtype_t::BLOCK_REQ command * * @erturn a tokeep for the whole bt_swarm_full_t */ bool bt_swarm_full_t::parse_block_req(pkt_t &pkt) throw() { const bt_swarm_profile_t & swarm_profile = bt_swarm->profile(); const bt_mfile_t & bt_mfile = bt_swarm->get_mfile(); uint32_t piece_idx; uint32_t data_offset; uint32_t data_len; // log to debug KLOG_DBG("enter pkt="<< pkt); try { pkt >> piece_idx; pkt >> data_offset; pkt >> data_len; }catch(serial_except_t &e){ // if the unserialization failed, it is a bug in the protocol, autodelete return autodelete(); } // if the piece_idx is greater than or equal to the number of piece, autodelete if( piece_idx >= bt_mfile.nb_piece() ) return autodelete(); // if the block_req goes across several pieces, autodelete if( data_offset + data_len > bt_mfile.piecelen() ) return autodelete(); // if the data_len is larger that the swarm_profile.recv_req_maxlen, autodelete if( data_len > swarm_profile.recv_req_maxlen() ) return autodelete(); // if the piece_idx is not locally available, autodelete // - NOTE: this is actually important for bt_io_vapi_t to http if( bt_swarm->local_pavail().is_unavail(piece_idx) ){ KLOG_ERR("received a request from peerid " << remote_peerid().peerid_progfull() << " for piece_idx " << piece_idx << " so autodelete"); return autodelete(); } // if BLOCK_REQ are no authorized, discard it if( !local_doauth_req() ) return true; // queue the BLOCK_REP command bt_prange_t bt_prange = bt_prange_t(piece_idx, data_offset, data_len); sendq->queue_cmd( bt_cmd_t::build_block_rep(bt_prange) ); // return tokeep return true; }
/** \brief callback notified by \ref socket_full_t when a connection is established */ bool http_client_pool_cnx_t::neoip_socket_full_event_cb(void *userptr, socket_full_t &cb_socket_full , const socket_event_t &socket_event) throw() { // log to debug KLOG_DBG("enter event=" << socket_event); // sanity check - the event MUST be full_ok DBG_ASSERT( socket_event.is_full_ok() ); // update the http_client_pool_stat_t http_client_pool_stat_t & pool_stat = client_pool->pool_stat; pool_stat.nb_cnx_died_external ( pool_stat.nb_cnx_died_external() + 1 ); // delete the connection on ALL socket_event_t return autodelete(); }
/** \brief parse a bt_cmdtype_t::PIECE_BFIELD command * * @erturn a tokeep for the whole bt_swarm_full_t */ bool bt_swarm_full_t::parse_piece_bfield_cmd(pkt_t &pkt) throw() { const bt_mfile_t & bt_mfile = bt_swarm->bt_mfile; // log to debug KLOG_DBG("enter pkt="<< pkt); // try to parse it try { m_remote_pavail = bt_pieceavail_t::btformat_from_pkt(pkt, bt_mfile.nb_piece()); }catch(serial_except_t &e){ // if the unserialization failed, it is a bug in the protocol, autodelete return autodelete(); } // if the local peer was not interested and this pieceavail contains pieces not locally avaiable // => switch to interested if( !local_dowant_req() && !bt_swarm->local_pavail().fully_contain(remote_pavail()) ){ m_local_dowant_req = true; sendq->queue_cmd( bt_cmd_t::build_dowant_req() ); } // notify the callback of this event bool tokeep = notify_callback(bt_swarm_full_event_t::build_piece_bfield()); if( !tokeep ) return false; // if bt_swarm_full_t DO NOT support bt_utmsgtype_t::PIECEWISH, update piecewish if( full_utmsg()->no_support(bt_utmsgtype_t::PIECEWISH) ){ bool tokeep = notify_utmsg_dowish_field(~remote_pavail()); if( !tokeep ) return false; } // if the local peer is seed and the remote one too, close the connection // - NOTE: it MUST be done after the notification to have the full and sched in sync when deleting if( bt_swarm->is_seed() && remote_pavail().is_fully_avail() ) return autodelete(); // return tokeep return true; }
/** \brief parse a received command * * @erturn a tokeep for the whole bt_swarm_full_t */ bool bt_swarm_full_t::parse_recved_cmd(pkt_t &pkt) throw() { const bt_swarm_profile_t &profile = bt_swarm->profile(); // log to debug KLOG_DBG("enter pkt=" << pkt); // restart the idle_timeout at eached receved command idle_timeout.start(profile.full_idle_timeout(), this, NULL); // handle the KEEP_ALIVE 'command' - the idle_timeout is restarted above for each command // - it has a length of 0 with no packet type if( pkt.size() == 0 ) return true; // parse the packet type bt_cmdtype_t cmdtype; try { pkt >> cmdtype; }catch(serial_except_t &e){ // if the unserialization failed, it is a bug in the protocol, autodelete return autodelete(); } // log to debug KLOG_DBG("cmdtype=" << cmdtype << "pkt.size=" << pkt.size()); // parse the command depending on the cmdtype switch(cmdtype.get_value()){ case bt_cmdtype_t::DOAUTH_REQ: return parse_doauth_req_cmd(pkt); case bt_cmdtype_t::UNAUTH_REQ: return parse_unauth_req_cmd(pkt); case bt_cmdtype_t::DOWANT_REQ: return parse_dowant_req_cmd(pkt); case bt_cmdtype_t::UNWANT_REQ: return parse_unwant_req_cmd(pkt); case bt_cmdtype_t::PIECE_ISAVAIL: return parse_piece_isavail_cmd(pkt); case bt_cmdtype_t::PIECE_BFIELD: return parse_piece_bfield_cmd(pkt); case bt_cmdtype_t::BLOCK_REQ: return parse_block_req(pkt); case bt_cmdtype_t::BLOCK_REP: return parse_block_rep(pkt); case bt_cmdtype_t::BLOCK_DEL: return parse_block_del(pkt); case bt_cmdtype_t::UTMSG_PAYL: return parse_utmsg_payl(pkt); default: // unknown command type are simply ignored KLOG_ERR("recved unknown command " << cmdtype.get_value() << " from " << remote_peerid().peerid_progfull()); return true; } // return tokeep return true; }
/** \brief function to notify a nowish_field(bitfield_t) from the bt_utmsgtype_t::PIECEWISH * * @erturn a tokeep for the whole bt_swarm_full_t */ bool bt_swarm_full_t::notify_utmsg_dowish_field(const bitfield_t &new_remote_pwish) throw() { // sanity check - a pieceidx MUST NOT simultaneously be in remote_pavail and remote_pwish DBGNET_ASSERT( (remote_pavail() & new_remote_pwish).is_none_set() ); // if a piece is currently available and wished, it is a bug in remote peer, autodelete if( (remote_pavail() & new_remote_pwish).is_any_set() ) return autodelete(); // backup the old remote_pwish bitfield_t old_pwish = remote_pwish(); // update the remote_pwish m_remote_pwish = new_remote_pwish; // notify the callback of this event bool tokeep = notify_callback( bt_swarm_full_event_t::build_pwish_dofield(&old_pwish, &m_remote_pwish) ); if( !tokeep ) return false; // return tokeep return true; }
/** \brief parse the received commands * * - it read a whole command out of this->recved_data and then pass it to parse_recved_cmd() * * @return a tokeep for the whole bt_swarm_full_t */ bool bt_swarm_full_t::parse_recved_data() throw() { const bt_swarm_profile_t &profile = bt_swarm->profile(); uint32_t cmd_len; // if bt_swarm_full_t is not yet notified_as_open, notify it now if( !notified_as_open ){ bool tokeep = notify_cnx_open(); if( !tokeep ) return false; } // parse as many commands as possible while( recved_data.size() ){ // read a command length try { recved_data.unserial_peek( cmd_len ); }catch(serial_except_t &e){ // if unserialization of the cmd_len failed, it is not yet fully received, return tokeep return true; } // if the cmd_len is >= than the profile.recv_cmd_maxlen, autodelete if( cmd_len > profile.recv_cmd_maxlen() ) return autodelete(); // if recved_data are not long enougth to contain the whole command, return tokeep if( recved_data.size() < sizeof(uint32_t) + cmd_len ) return true; // extract the whole command from the recved_data WITHOUT copy pkt_t cmd_pkt; cmd_pkt.work_on_data_nocopy(recved_data.char_ptr() + sizeof(uint32_t), cmd_len); // update the dloaded_fulllen in bt_swarm_stats_t bt_swarm_stats_t & swarm_stats = bt_swarm->swarm_stats(); swarm_stats.dloaded_fulllen(swarm_stats.dloaded_fulllen()+cmd_pkt.size()); // return tokeep bool tokeep = parse_recved_cmd(cmd_pkt); if( !tokeep ) return false; // free just parsed command from the recved_data recved_data.head_free(sizeof(uint32_t) + cmd_len); } // return tokeep return true; }
/** \brief parse a bt_cmdtype_t::BLOCK_DEL command * * @erturn a tokeep for the whole bt_swarm_full_t */ bool bt_swarm_full_t::parse_block_del(pkt_t &pkt) throw() { uint32_t piece_idx; uint32_t data_offset; uint32_t data_len; // log to debug KLOG_DBG("enter pkt="<< pkt); try { pkt >> piece_idx; pkt >> data_offset; pkt >> data_len; }catch(serial_except_t &e){ // if the unserialization failed, it is a bug in the protocol, autodelete return autodelete(); } // ask the sendq to remove this BLOCK_REP bt_prange_t bt_prange = bt_prange_t(piece_idx, data_offset, data_len); sendq->remove_one_block_rep( bt_cmd_t::build_block_rep(bt_prange) ); // return tokeep return true; }
/** \brief function to notify a dowish_index(pieceidx) from the bt_utmsgtype_t::PIECEWISH * * @erturn a tokeep for the whole bt_swarm_full_t */ bool bt_swarm_full_t::notify_utmsg_dowish_index(size_t pieceidx) throw() { // sanity check - pieceidx MUST NOT simultaneously be in remote_pavail and remote_pwish DBGNET_ASSERT( remote_pavail().is_unavail(pieceidx) ); // if pieceidx is currently available, it is a bug in remote peer, autodelete the cnx if( remote_pavail().is_avail(pieceidx) ) return autodelete(); // TODO what if the piece is already dowish // - may that happen in normal operation ? // - is that a bug ? // - if it is a bug, do the usual dbgnet_assert + autodelete // - if it can happen in normal operation, just ignore the operation // update the m_remote_pwish m_remote_pwish.set(pieceidx); // notify the callback of this event bool tokeep = notify_callback( bt_swarm_full_event_t::build_pwish_doindex(pieceidx) ); if( !tokeep ) return false; // return tokeep return true; }
bool SkTIJPEGImageDecoderEntry::WatchdogCallback(void* __item) { bool restart = false; android::List<SkTIJPEGImageDecoderList_Item*>::iterator iter; SkTIJPEGImageDecoderList_Item* item = reinterpret_cast<SkTIJPEGImageDecoderList_Item*>(__item); android::Mutex::Autolock autolock(SkTIJPEGImageDecoderListLock); SkDebugf("SkTIJPEGImageDecoderEntry::WatchdogCallback() item=0x%x", item); if(item->Decoder->GetLoad() == 0 || item->Decoder->GetDeleteAttempts() >= MAX_DEL_ATTEMPTS) { if(item->Decoder->GetDeleteAttempts() > MAX_DEL_ATTEMPTS) SkDebugf(" Restart attempt limit reached. deleting..."); // watchdog has expired, delete our reference to Decoder object SkAutoTDelete<SkTIJPEGImageDecoder> autodelete(item->Decoder); // reset the strong pointer, okay to do here since lifetime will last longer than reset item->WatchdogTimer.clear(); // delete item from Decoder list for(iter = SkTIJPEGImageDecoderList.list.begin(); iter != SkTIJPEGImageDecoderList.list.end(); iter++) { if(((SkTIJPEGImageDecoderList_Item*)*iter) == item) { SkTIJPEGImageDecoderList.list.erase(iter); break; } } }else{ // Decoder is still doing something, increment deleltion attempt, restart watchdog SkDebugf(" Decoder is still doing something, restart watchdog"); item->Decoder->IncDeleteAttempts(); restart = true; } return restart; // return DeleteFromListIfDecoderNotWorking<SkTIJPEGImageDecoderList_Item*, SkTIJPEGImageDecoder>(__item, SkTIJPEGImageDecoderList); }
bool autodelete(const bt_err_t &bt_err) throw() { return autodelete(bt_err.to_string()); }
/** \brief callback called when the timeout_t expire */ bool http_sresp_cnx_t::neoip_timeout_expire_cb(void *userptr, timeout_t &cb_timeout) throw() { return autodelete("expired after " + expire_timeout.get_period().to_string() ); }