/** \brief callback notifying the result of xmlrpc_client_t
 */
bool	bt_cast_mdata_unpublish_t::neoip_xmlrpc_client_cb(void *cb_userptr, xmlrpc_client_t &cb_xmlrpc_client
					, const xmlrpc_err_t &xmlrpc_err
					, const datum_t &xmlrpc_resp)		throw()
{
	bt_err_t	bt_err;
	// process the xmlrpc_resp
	bt_err	= handle_xmlrpc_resp(xmlrpc_err, xmlrpc_resp);
	// notify the caller of the completion
	return notify_callback(bt_err);
}
/** \brief callback notified by \ref bt_io_write_t when to notify the result of the operation
 */
bool	bt_io_cache_blkwr_t::neoip_bt_io_write_cb(void *cb_userptr, bt_io_write_t &cb_bt_io_write
						, const bt_err_t &bt_err)	throw()
{
	// log to debug
	KLOG_ERR("enter");

	// if subio_write return an error, notify the callback 
	if( bt_err.failed() ){
		// TODO pass all the block in block_db into dirty ?
		// - TODO where to i handle the delayed_write error in bt_io_cache_t
		return notify_callback(bt_err);
	} 

	// delete the subio_write
	nipmem_zdelete	m_subio_write;

// pass all the blocks cleaning in blkwr_range in clean now
	// get all the blocks within blkwr_range
	std::list<bt_io_cache_block_t *>	block_db;
	block_db	= io_cache()->block_fully_included_in(blkwr_range());
	// notify the cleaned to all the blocks of block_db 
	std::list<bt_io_cache_block_t *>::iterator	iter;
	for(iter = block_db.begin(); iter != block_db.end(); iter++){
		bt_io_cache_block_t *	cache_block	= *iter;
		// if this cache_block is no more in cleaning, goto the next
		// NOTE: it is possible that this block has been modified DURING the 
		//       cleaning and so is now back in dirty 
		if( cache_block->state().is_not_cleaning() ){
			DBG_ASSERT( cache_block->state().is_dirty() );
			continue;
		}
		// notify cleaned for this block
		cache_block->notify_cleaned();
	}
	
	// notify the caller - NOTE: the cache_block is now owned by the caller
	bool tokeep	= notify_callback(bt_err_t::OK);
	if( !tokeep )	return false;
	
	// return dontkeep
	return false;
}
Ejemplo n.º 3
0
/** \brief callback notified when a kad_nsearch_t has an event to notify
 */
bool 	kad_closestnode_t::neoip_kad_nsearch_cb(void *cb_userptr, kad_nsearch_t &cb_kad_nsearch
					, const kad_event_t &kad_event)		throw()
{
	// log to debug
	KLOG_DBG("kad_event=" << kad_event);
	// sanity check - the event MUST be nsearch_findnode_ok()
	DBG_ASSERT( kad_event.is_nsearch_findnode_ok() );
	
	// just forward the kad_event_t
	return notify_callback(kad_event);
}
Ejemplo n.º 4
0
/** \brief callback called when the neoip_timeout expire
 */
bool nslan_query_t::expire_timeout_cb()	throw()
{
	// log to debug
	KLOG_DBG("nslan_query_t timedout");
	// notify the event
	nslan_event_t	nslan_event = nslan_event_t::build_timedout("");
	// stop the timeout
	expire_timeout.stop();
	// notify the caller
	return notify_callback(nslan_event);
}
Ejemplo n.º 5
0
/** \brief parse a bt_cmdtype_t::UNWANT_REQ command
 * 
 * @erturn a tokeep for the whole bt_swarm_full_t
 */
bool	bt_swarm_full_t::parse_unwant_req_cmd(pkt_t &pkt)			throw()
{
	// set the local variable
	m_other_dowant_req	= false;
	
	// notify the callback of this event
	bool	tokeep	= notify_callback( bt_swarm_full_event_t::build_unwant_req() );
	if( !tokeep )	return false;

	// return tokeep
	return true;
}
Ejemplo n.º 6
0
/** \brief callback called when the \ref zerotimer_t expire
 */
bool	udp_itor_t::neoip_zerotimer_expire_cb(zerotimer_t &cb_zerotimer, void *userptr)	throw()
{
	// build the event to notify
	udp_event_t	udp_event	= udp_event_t::build_cnx_established(udp_full);
	// mark the udp_full as unused
	udp_full	= NULL;
	// notify the caller
	bool		tokeep		= notify_callback(udp_event);
	if( !tokeep )	return false;
	// return tokeep
	return true;
}
Ejemplo n.º 7
0
/** \brief parse a bt_cmdtype_t::UNAUTH_REQ command
 * 
 * @erturn a tokeep for the whole bt_swarm_full_t
 */
bool	bt_swarm_full_t::parse_unauth_req_cmd(pkt_t &pkt)			throw()
{
	// set the local variable
	m_other_doauth_req	= false;
	
	// notify the callback of this event
	// - it is up to the scheduler to delete all pending sched_request
	bool	tokeep	= notify_callback( bt_swarm_full_event_t::build_unauth_req() );
	if( !tokeep )	return false;

	// return tokeep
	return true;
}
/** \brief callback notified by \ref ndiag_aview_t to notify an event
 */
bool 	casti_inetreach_httpd_t::neoip_ndiag_aview_cb(void *cb_userptr, ndiag_aview_t &cb_ndiag_aview
						, const ipport_addr_t &new_listen_pview)	throw()
{
	// log to debug
	KLOG_ERR("enter new_listen_pview=" << new_listen_pview);
	
	// notify the caller that ndiag_aview_t may have changed
	bool	tokeep	= notify_callback(new_listen_pview);
	if( !tokeep )	return false;
	
	// return tokeep
	return true;
}
Ejemplo n.º 9
0
/** \brief function to notify a nowish_index(pieceidx) from the bt_utmsgtype_t::PIECEWISH
 * 
 * @erturn a tokeep for the whole bt_swarm_full_t
 */
bool	bt_swarm_full_t::notify_utmsg_nowish_index(size_t pieceidx)		throw()
{
	// TODO what if the piece is already nowish

	// update the m_remote_pwish
	m_remote_pwish.clear(pieceidx);
	// notify the callback of this event
	bool	tokeep	= notify_callback( bt_swarm_full_event_t::build_pwish_noindex(pieceidx) );
	if( !tokeep )	return false;

	// return tokeep
	return true;	
}
Ejemplo n.º 10
0
/** \brief callback notified by \ref upnp_call_t on completion
 */
bool	upnp_call_delport_t::neoip_upnp_call_cb(void *cb_userptr, upnp_call_t &cb_call
        , const upnp_err_t &upnp_err, const strvar_db_t &strvar_db)	throw()
{
    // log to debug
    KLOG_DBG("enter upnp_err=" << upnp_err << " strvar_db=" << strvar_db
             << " callback=" << callback);

    // if callback == NULL, autodelete - see the file header for details
    if( callback == NULL ) {
        // if there is an error, log it - as it wont be reported in a callback
        if( upnp_err.failed() )	KLOG_ERR("cant delete port due to " << upnp_err);
        // autodelete and return dontkeep
        nipmem_delete this;
        return false;
    }

    // if the upnp_call_t failed, forward the upnp_err_t
    if( upnp_err.failed() )	return notify_callback(upnp_err);

    // NOTE: if this point is reached, the upnp_call_delport_t has been successfull, notify the caller
    return notify_callback(upnp_err_t::OK);
}
/** \brief callback notified by \ref upnp_call_addport_t when completed
 */
bool 	upnp_getportendian_test_t::neoip_upnp_call_addport_cb(void *cb_userptr, upnp_call_addport_t &cb_call_addport
						, const upnp_err_t &cb_upnp_err)	throw()
{
	upnp_err_t	upnp_err	= cb_upnp_err;
	// log to debug
	KLOG_DBG("enter upnp_err=" << upnp_err);
	// delete the upnp_call_addport_t
	nipmem_zdelete	call_addport;

	// if the upnp_call_addport_t failed, forward the error to the caller
	if( upnp_err.failed() )	return notify_callback(upnp_err);
	
	// set the is_bound to true
	is_bound	= true;
	
	// launch the full_getport
	upnp_err	= full_getport_launch();
	if( upnp_err.failed() )	return notify_callback(upnp_err);

	// return dontkeep
	return false;
}
Ejemplo n.º 12
0
/** \brief callback to notify when the fdwatch_t has events to report
 */
bool	tcp_itor_t::neoip_fdwatch_cb( void *cb_userptr, const fdwatch_t &cb_fdwatch
						, const fdwatch_cond_t &cb_fdwatch_cond )	throw()
{
	// log to debug
	KLOG_DBG("enter fdwatch_cond=" << cb_fdwatch_cond);
	// if fdwatch.is_output() then the connection has been established
	if( cb_fdwatch_cond.is_output() ){
		// create the tcp_full_t
		tcp_full_t *	tcp_full	= nipmem_new tcp_full_t(local_addr, remote_addr, fdwatch);
		// mark the fdwatch as unused - DO NOT close it as it now belongs to neoip_tcp_full_t
		fdwatch		= NULL;
		// backup the object_slotid of the tcp_full_t - to be able to return its tokeep value
		// - in fact this should be a fdwatch_t object_slotid_t but as fdwatch_t ownership
		//   is transfered to tcp_full_t and as tcp_full_t dont delete fdwatch_t except if it is
		//   deleted, it works and avoid to have a object_slotid_t for every fdwatch_t
		slot_id_t	tcp_full_slotid	= tcp_full->get_object_slotid();
		// notify the caller
		notify_callback( tcp_event_t::build_cnx_established(tcp_full) );
		// if the fdwatch_t has not been deleted, so 'tokeep' else return 'dontkeep'
		return object_slotid_tokeep(tcp_full_slotid);
	}
	
	// if fdwatch.is_output() then the connection has been established
	if( cb_fdwatch_cond.is_error() ){
		std::string	reason	 = "undetermined";
		int		sockopt_val;
		socklen_t	sockopt_len = sizeof(sockopt_val);
		// get the error code
		if(getsockopt(cb_fdwatch.get_fd(), SOL_SOCKET, SO_ERROR, &sockopt_val, &sockopt_len)==0)
			reason	= neoip_strerror(sockopt_val);
		// notify the caller
		tcp_event_t tcp_event = tcp_event_t::build_cnx_refused(reason);
		return notify_callback(tcp_event);
	}

	// keep the fdwatch running
	return true;
}
Ejemplo n.º 13
0
/** \brief callback notified by \ref file_aread_t when to notify the result of the operation
 */
bool	bt_piece_cpuhash_t::neoip_bt_io_read_cb(void *cb_userptr, bt_io_read_t &cb_bt_io_read
				, const bt_err_t &io_read_err, const datum_t &read_data)	throw()
{
	bt_id_t	piecehash;
	// log to debug
	KLOG_DBG("enter io_read_err=" << io_read_err << " read_data.size()=" << read_data.size());

	// if the bt_io_read_t failed, notify the error to the caller
	if( io_read_err.failed() )	return notify_callback(io_read_err, bt_id_t());	

	// compute the piecehash over the read_data
	skey_auth_t	skey_auth("sha1/nokey/20");
	skey_auth.init_key(NULL, 0);
	skey_auth.update(read_data);
	piecehash	= bt_id_t(skey_auth.get_output());
	DBG_ASSERT( bt_id_t::size() == skey_auth.get_output().size() );

	// delete the bt_io_read_t
	nipmem_zdelete	bt_io_read;

	// notify the result to the caller
	return notify_callback(bt_err_t::OK, piecehash);
}
/** \brief callback notified by \ref upnp_call_getport_t when completed
 */
bool 	upnp_getportendian_test_t::full_getport_cb(void *cb_userptr, upnp_call_getport_t &cb_call_getport
						, const upnp_err_t &cb_upnp_err)	throw()
{
	upnp_call_getport_t *	call_getport	= &cb_call_getport;
	const upnp_portdesc_t &	portdesc	= call_getport->portdesc();
	upnp_err_t		upnp_err	= cb_upnp_err;;
	// log to debug
	KLOG_DBG("enter upnp_err=" << upnp_err);
	
	// if call_getport succeed, then it show the norendian/revendian
	// - NOTE: the description_str is tested to ensure it has been allocated by this apps
	//   and not another apps which won a race and bind it between the itor_getport and 
	//   the full_getport
	if( upnp_err.succeed() && portdesc.desc_str() == "upnp revendian testing"
				&& portdesc.ipport_lview().port() == port_lview ){
		// set the m_is_revendian
		if(full_getport_norendian == call_getport)	m_is_revendian	= false;
		if(full_getport_revendian == call_getport)	m_is_revendian	= true;
		// log to debug
		KLOG_DBG("is_revendian()=" << is_revendian() );
		// notify the caller, that the result is known
		return notify_callback(upnp_err_t::OK);
	}

	// delete the upnp_call_getport_t
	if(full_getport_norendian == &cb_call_getport)	nipmem_zdelete	full_getport_norendian;
	if(full_getport_revendian == &cb_call_getport)	nipmem_zdelete	full_getport_revendian;

	// if both full_getport are completed with non succeeding, assume it is normal endian
	if( !full_getport_norendian && !full_getport_revendian ){
		std::string	reason	= "Impossible to complete the full_getport.. upnp_call_getport_t not supported ?";
		return notify_callback(upnp_err_t(upnp_err_t::ERROR, reason));
	}
	
	// return dontkeep
	return false;
}
Ejemplo n.º 15
0
/** \brief Autodelete this object - aka notify CNX_CLOSED to the scheduler and then delete the object
 */
bool	bt_swarm_full_t::autodelete()	throw()
{
#if 0	// NOTE: move to dtor - but afraid of the nested notification
	// - so i leave it there 
	// if notified_as_open, notify the event bt_swarm_full_event_t::CNX_CLOSED
	if( notified_as_open ){
		bool	tokeep	= notify_callback(bt_swarm_full_event_t::build_cnx_closed());
		if( !tokeep )	return false;
	}
#endif
	// autodelete itself
	nipmem_delete	this;
	// return false - to 'simulate' a dontkeep
	return false;
}
Ejemplo n.º 16
0
/** \brief callback notified by \ref file_aread_t when to notify the result of the operation
 */
bool	bt_io_pfile_read_t::neoip_file_aread_cb(void *cb_userptr, file_aread_t &cb_file_aread
				, const file_err_t &file_err, const datum_t &aread_data)	throw()
{
	// log to debug
	KLOG_DBG("enter file_err=" << file_err << " aread_data.size()=" << aread_data.size());

	// if the file_aread_t failed, notify the error to the caller
	if( file_err.failed() )	return notify_callback(bt_err_from_file(file_err), datum_t());	

#if 1	// TODO sketching for preallocation
	// sanity check - the recved_data is supposed to be preallocated
	DBG_ASSERT( data_queue.tail_freelen() >= aread_data.length());
#endif
	// add the just read data to the bytearray_t
	data_queue.append(aread_data);
	
	// delete the file_aread_t
	nipmem_zdelete	file_aread;
	// delete the file_aio_t
	nipmem_zdelete	file_aio;
	
	// update the prange_idx
	prange_idx++;

	// if all the bt_prange_t has been successfully performed, notify the caller
	if( prange_idx == prange_arr.size() )
		return notify_callback(bt_err_t::OK, data_queue.to_datum(datum_t::NOCOPY));	
	
	// else launch the next bt_prange_t
	bt_err_t	bt_err;
	bt_err		= launch_next_prange();
	if( bt_err.failed() )		return notify_callback( bt_err, datum_t() );

	// notify the data to the caller
	return false;
}
Ejemplo n.º 17
0
/** \brief callback notified when a kad_store_t has an event to notify
 */
bool 	kad_publish_t::neoip_kad_store_cb(void *cb_userptr, kad_store_t &cb_kad_store
						, const kad_event_t &kad_event)		throw()
{
	// log to debug
	KLOG_DBG("kad_event=" << kad_event);
	// sanity check - the event MUST be store_ok()
	DBG_ASSERT( kad_event.is_store_ok() );

	// destroy the kad_store and mark it unused
	nipmem_zdelete kad_store;
			
	// simply forward the kad_event_t to the caller
	notify_callback( kad_event );
	// NOTE: here the tokeep is ignored
	return false;
}
/** \brief Handle fatal udp_event_t received by the udp_client_t
 * 
 * - this function deletes the current udp_client_t and recreate one with the 
 *   same local_ipport/remote_ipport.
 * - it is made to ignore udp_client_t fatal error while sending ESTARELAY_CNX_I2R_ACK
 * 
 * Motivation
 * - it starts sending ESTARELAY_CNX_I2R_ACK immediatly after receiving ESTARELAY_CNX_REQUEST
 *   and replying a ESTARELAY_CNX_REPLY (containing the local_addr_pview)
 * - so if the ESTARELAY_CNX_I2R_ACK reaches the nat box of the itor before it received
 *   the ESTARELAY_CNX_REPLY and opened its own local_addr_pview, the nat box may 
 *   returns an icmp error which would cause udp_client_t to report a fatal udp_event_t
 * - so the udp_client_t is restarted at each fatal udp_event_t during the whole
 *   life of its ntudp_resp_estarelay_t
 * 
 * @return a 'tokeep' for the udp_clien_t which generated this fatal event
 */
bool ntudp_resp_estarelay_t::handle_udp_fatal_event()	throw()
{
	ipport_addr_t	local_ipport	= udp_client->get_local_addr();
	ipport_addr_t	remote_ipport	= udp_client->get_remote_addr();
	inet_err_t	inet_err;
	// delete the current udp_client_t
	nipmem_delete	udp_client;
	// create a new one to replace the failing one
	udp_client	= nipmem_new udp_client_t();
	inet_err	= udp_client->set_local_addr(local_ipport);
	if( inet_err.succeed() )	inet_err = udp_client->start(remote_ipport, this, NULL);
	if( inet_err.failed() )	notify_callback(NULL, pkt_t(), pkt_t());

	// return dontkeep as the udp_client_t which produced this event has been deleted
	return false;	
}
Ejemplo n.º 19
0
void li_throttle_waitqueue_cb(liWaitQueue *wq, gpointer data) {
	liWaitQueueElem *wqe;
	UNUSED(data); /* should contain worker */

	throttle_debug("li_throttle_waitqueue_cb\n");

	while (NULL != (wqe = li_waitqueue_pop(wq))) {
		liThrottleState *state = LI_CONTAINER_OF(wqe, liThrottleState, wqueue_elem);
		liThrottleNotifyCB notify_callback = state->notify_callback;
		gpointer notify_data = wqe->data;

		if (NULL == notify_data || NULL == notify_callback || 0 == state->interested) continue;

		notify_callback(state, notify_data);
	}
	li_waitqueue_update(wq);
}
Ejemplo n.º 20
0
/** \brief Launch the next bt_piece_cpuhash_t
 * 
 * @return a tokeep for the bt_check_t
 */
bool	bt_check_t::launch_piece_cpuhash()	throw()
{
	// sanity check - the piece_cpuhash MUST be null
	DBG_ASSERT( !piece_cpuhash );
	
	// if cur_pieceidx has not reach the end, launch the bt_piece_cpuhash_t for cur_pieceidx
	if( cur_pieceidx != tocheck_bfield.size() ){
		piece_cpuhash	= nipmem_new bt_piece_cpuhash_t(cur_pieceidx, bt_mfile, bt_io_vapi, this, NULL);
		// return tokeep
		return true;
	}
	
	// NOTE: here the all the piece to be check have been checked, notify the result to the caller 
	
	bt_err_t		bt_err	= bt_err_t::OK;
	bt_swarm_resumedata_t	result_resumedata;
	// a non-ok bt_err_t is return IIF a piece_idx was supposed to be available but is not
	// - if check_policy is EVERY_PIECE
	// - if a swarm_resumedata has been provided by the caller
	// - if this caller resumedata declare piece available but are not
	if( check_policy == bt_check_policy_t::EVERY_PIECE && !caller_resumedata.is_null()
			&& (caller_resumedata.pieceavail_local() ^ isavail_bfield).is_any_set() ){
		std::string	reason	= "piece declared available in swarm_resumdata but is not in practice";
		bt_err	= bt_err_t(bt_err_t::ERROR, reason);
	}
	// if the caller_resumedata is not null, copy it from the caller, else build a minimal one
	if( !caller_resumedata.is_null() )	result_resumedata	= caller_resumedata;
	else					result_resumedata.bt_mfile(bt_mfile);
	// set the pieceavail_local in the result_resumedata
	if( check_policy == bt_check_policy_t::EVERY_PIECE ){
		// replace the result_resumedata.pieceavail_local by the isavail_bfield
		result_resumedata.pieceavail_local( bt_pieceavail_t(isavail_bfield) );
	}else{
		// sanity check - 
		DBG_ASSERT( !result_resumedata.pieceavail_local().is_null() );
		DBG_ASSERT( check_policy == bt_check_policy_t::PARTIAL_PIECE );
		// set the result_resumedata.pieceavail_local by the boolean or of the previous one and 
		// the isavail_bfield
		bitfield_t	pieceavail_sum	= isavail_bfield | result_resumedata.pieceavail_local();
		result_resumedata.pieceavail_local( bt_pieceavail_t(pieceavail_sum) );
	}

	// notify the caller
	return notify_callback(bt_err, result_resumedata);
}
Ejemplo n.º 21
0
/** \brief callback notified with fdwatch_t has an condition to notify
 */
bool	asyncexe_t::stdout_fdwatch_cb( void *cb_userptr, const fdwatch_t &cb_fdwatch
						, const fdwatch_cond_t &fdwatch_cond )	throw()
{
	int	readlen		= 0;	
	// log the event
	KLOG_DBG("enter fdwatch_cond=" << fdwatch_cond);

	// if the condition is input
	if( fdwatch_cond.is_input() ){
		size_t	recv_max_len	= 16*1024;
		void *	buffer		= nipmem_alloca(recv_max_len);
		// read data in the socket
		readlen = read(stdout_fdwatch->get_fd(), buffer, recv_max_len);
		// if readlen < 0, treat it as error
		// - due to a weird bug in glib|linux, G_IO_ERR/HUP isnt received
		//   so there is a test if (cond.is_input() && readlen==0) fallthru
		//   and treat it as error
		if( readlen < 0 )	readlen = 0;
		// if some data have been read, add them to the stdout_barray
		if( readlen > 0 )	stdout_barray.append(buffer, readlen);
		// log to debug
		KLOG_DBG("readlen=" << readlen);
	}

	// handle a connection error
	if( fdwatch_cond.is_error() || (fdwatch_cond.is_input() && readlen == 0) ){
		// wait for the pid to get the result
		int	status	= 0;
#ifndef _WIN32
		int	ret	= waitpid(childpid, &status, 0);
		if( ret == -1 && errno == ECHILD )	status = 0;
		else if( ret != childpid )		status = -1;
		// extract the return status
		status		= WEXITSTATUS(status);
#endif
		// log to debug
		KLOG_ERR("childpid=" << childpid << " return status=" << status );
		KLOG_ERR("received error. now recved_data=" << stdout_barray.to_datum() );
		// else notify the caller with a success
		return notify_callback(libsess_err_t::OK, stdout_barray, status);
	}

	// return tokeep
	return true;
}
Ejemplo n.º 22
0
/** \brief handle the timeout expiration (in c++, the c part is handled by external_callback)
 */
void timeout_t::cpp_expired_cb()				throw()
{
	// logging for debug
	KLOG_DBG("enter");

	// NOTE: this callback may stop the timeout or delete it
	// - to stop the timeout, the callback MUST call timeout_t::stop()
	// - it return true if the timeout has been deleted, so the object 
	//   fields MUST NOT be used after that.
	bool	tokeep	= notify_callback();
	if( !tokeep )	return;

	// if the timeout_t is still started and external timer is not start, relaunch the external timer
	// - here "still started" = "has not been stopped by the callback"
	// - the external timer may have been changed during the callback if timeout_t::start() 
	//   has been called
	if( is_started() && !external_is_started() )	launch_timerext(period);
}
Ejemplo n.º 23
0
/** \brief notify bt_swarm_full_event_t::cnx_opened and initial commands send
 * 
 * - it MUST be called before doing any other bt_swarm_full_event_t::notification
 */
bool	bt_swarm_full_t::notify_cnx_open()	throw()
{
	// queue a bt_cmd_t::PIECE_BFIELD
	sendq->queue_cmd( bt_cmd_t::build_piece_bfield() );

	// ask bt_swarm_full_utmsg_t to send an handshake IIF remote peer has bt_protoflag_t::UT_MSGPROTO
	if( remote_protoflag.fully_include(bt_protoflag_t::UT_MSGPROTO) )
		full_utmsg()->send_handshake();

	// mark this bt_swarm_full_t as notified_as_open
	notified_as_open	= true;

	// notify the event bt_swarm_full_event_t::cnx_opened
	bool	tokeep	= notify_callback(bt_swarm_full_event_t::build_cnx_opened());
	if( !tokeep )	return false;
	
	// return tokeep
	return true;
}
Ejemplo n.º 24
0
/** \brief function to notify a nowish_field(bitfield_t) from the bt_utmsgtype_t::PIECEWISH
 * 
 * @erturn a tokeep for the whole bt_swarm_full_t
 */
bool	bt_swarm_full_t::notify_utmsg_dowish_field(const bitfield_t &new_remote_pwish)	throw()
{
	// sanity check - a pieceidx MUST NOT simultaneously be in remote_pavail and remote_pwish
	DBGNET_ASSERT( (remote_pavail() & new_remote_pwish).is_none_set() );
	// if a piece is currently available and wished, it is a bug in remote peer, autodelete
	if( (remote_pavail() & new_remote_pwish).is_any_set() )	return autodelete();

	// backup the old remote_pwish
	bitfield_t	old_pwish	= remote_pwish();
	// update the remote_pwish
	m_remote_pwish	= new_remote_pwish;

	// notify the callback of this event
	bool	tokeep	= notify_callback( bt_swarm_full_event_t::build_pwish_dofield(&old_pwish, &m_remote_pwish) );
	if( !tokeep )	return false;

	// return tokeep
	return true;	
}
Ejemplo n.º 25
0
/** \brief notify a fatal event
 * 
 * - TODO what the f**k is this function ?
 *   - it seems in relation with socket_full_udp_close
 */
bool	socket_full_udp_t::notify_fatal_event(socket_event_t socket_event)	throw()
{
	// sanity check - here the socket_event MUST be fatal
	DBG_ASSERT( socket_event.is_fatal() );
	// sanity check - only one fatal event MUST be reported
	if( typeid(*callback) != typeid(socket_full_close_udp_t))
		DBG_ASSERT( reported_fatal_event.get_value() == socket_event_t::NONE );

	// keep the reported fatal event for socket_full_close_udp_t 
	reported_fatal_event	= socket_event;

	// notify the event
	notify_callback(socket_event);
	// NOTE: here the callback 'tokeep' is ignored as the event is fatal
	// so the connection will still be used internally to complete the closure.
	// so the connection is kept in anycase. even if the caller will likely
	// destroy it
	return true;	
}
Ejemplo n.º 26
0
/** \brief parse a bt_cmdtype_t::PIECE_ISAVAIL command
 * 
 * @erturn a tokeep for the whole bt_swarm_full_t
 */
bool	bt_swarm_full_t::parse_piece_isavail_cmd(pkt_t &pkt)			throw()
{
	uint32_t	piece_idx;
	try {
		pkt >> piece_idx;
	}catch(serial_except_t &e){
		// if the unserialization failed, it is a bug in the protocol, autodelete
		return autodelete();
	}
	// log to debug
	KLOG_DBG("piece_idx=" << piece_idx);
	// if the piece_idx is not in the proper range, autodelete
	if( piece_idx >= remote_pavail().nb_piece() )	return autodelete();	

	// if piece_idx piece is already marked as avail, do nothing 
	if( remote_pavail().is_avail(piece_idx) )	return true;

	// if local peer was not interested and this piece_idx is not locally available, make it interested
	if( bt_swarm->local_pavail().is_unavail(piece_idx) && !local_dowant_req() ){
		m_local_dowant_req	= true;
		sendq->queue_cmd( bt_cmd_t::build_dowant_req() );
	}

	// if bt_swarm_full_t DO NOT support bt_utmsgtype_t::PIECEWISH, update piecewish
	if( full_utmsg()->no_support(bt_utmsgtype_t::PIECEWISH) ){
		bool	tokeep	= notify_utmsg_nowish_index(piece_idx);
		if( !tokeep )	return false;
	}

	// mark the piece as available in the remote_pavail
	m_remote_pavail.mark_isavail(piece_idx);
	// notify the callback of this event
	bool	tokeep	= notify_callback( bt_swarm_full_event_t::build_piece_isavail(piece_idx) );
	if( !tokeep )	return false;

	// if the local peer is seed and the remote one too, close the connection
	// - NOTE: it MUST be done after the notification to have the full and sched in sync when deleting
	if( bt_swarm->is_seed() && remote_pavail().is_fully_avail() )	return autodelete();	

	// return tokeep
	return true;
}
Ejemplo n.º 27
0
/** \brief callback notified by \ref tcp_full_t when to notify an event
 */
bool	tcp_client_t::neoip_tcp_full_event_cb(void *userptr, tcp_full_t &cb_tcp_full
					, const tcp_event_t &tcp_event)	throw()
{
	// log to debug
	KLOG_DBG("enter event=" << tcp_event);
	// sanity check - the event MUST be full_ok
	DBG_ASSERT( tcp_event.is_full_ok() );
	// sanity check - here the tcp_itor MUST be NULL
	DBG_ASSERT( tcp_itor == NULL );

 	// backup the object_slotid of the tcp_full_t - may be needed in case of stolen tcp_full_t
 	slot_id_t	tcp_full_slotid	= tcp_full->get_object_slotid();
 
	// simply forward the event whatever the event type
	bool tokeep = notify_callback(tcp_event);
	// if tokeep == 'dontkeep' compute the tcp_full_t tokeep as is may have stolen during the cb
	if( !tokeep )	return object_slotid_tokeep(tcp_full_slotid);
	// return 'tokeep'
	return true;
}
Ejemplo n.º 28
0
bool	tcp_resp_t::neoip_fdwatch_cb( void *cb_userptr, const fdwatch_t &cb_fdwatch
						, const fdwatch_cond_t &fdwatch_cond )	throw()
{
	struct 	sockaddr_in	addr_in;
	inet_err_t		inet_err;
	ipport_addr_t		local_addr, remote_addr;
	// log to debug
	KLOG_DBG("enter fdwatch_cond=" << fdwatch_cond);
	// sanity check
	DBG_ASSERT( fdwatch_cond.is_input() );	

	// accept the socket
	socklen_t 	addrlen = sizeof(addr_in);
	int 		full_fd	= accept(fdwatch->get_fd(),(struct sockaddr*)&addr_in, &addrlen);
	DBG_ASSERT( full_fd >= 0 );
	
	// get the remote addr
	remote_addr	= ipport_addr_t(addr_in);

	// get the local address
	// - It is impossible to simply read the listen_addr as it may be ANY addr
	addrlen		= sizeof(addr_in);
	if( getsockname(full_fd, (struct sockaddr *)&addr_in, &addrlen) ){
		KLOG_ERR("cant getsockname() socket");
		return true;
	}
	local_addr	= ipport_addr_t(addr_in);

	// log to debug
	KLOG_DBG("received tcp connection. local_addr=" << local_addr << " remote_addr=" << remote_addr );	

	// create the fdwatch_t for tcp_full_t
	fdwatch_t *	full_fdwatch	= nipmem_new fdwatch_t();
	full_fdwatch->start(full_fd, fdwatch_t::NONE, NULL, NULL);
	// create the tcp_full_t
	tcp_full_t *	tcp_full;
	tcp_full	= nipmem_new tcp_full_t(local_addr, remote_addr, full_fdwatch);
	tcp_full->profile(profile().full());
	// notify the caller
	return notify_callback( tcp_event_t::build_cnx_established(tcp_full) );
}
Ejemplo n.º 29
0
/** \brief function to notify a dowish_index(pieceidx) from the bt_utmsgtype_t::PIECEWISH
 * 
 * @erturn a tokeep for the whole bt_swarm_full_t
 */
bool	bt_swarm_full_t::notify_utmsg_dowish_index(size_t pieceidx)		throw()
{
	// sanity check - pieceidx MUST NOT simultaneously be in remote_pavail and remote_pwish
	DBGNET_ASSERT( remote_pavail().is_unavail(pieceidx) );
	// if pieceidx is currently available, it is a bug in remote peer, autodelete the cnx
	if( remote_pavail().is_avail(pieceidx) )	return autodelete();
	
	// TODO what if the piece is already dowish
	// - may that happen in normal operation ?
	// - is that a bug ?
	// - if it is a bug, do the usual dbgnet_assert + autodelete
	// - if it can happen in normal operation, just ignore the operation  
	
	// update the m_remote_pwish
	m_remote_pwish.set(pieceidx);
	// notify the callback of this event
	bool	tokeep	= notify_callback( bt_swarm_full_event_t::build_pwish_doindex(pieceidx) );
	if( !tokeep )	return false;

	// return tokeep
	return true;	
}
Ejemplo n.º 30
0
/** \brief callback called when the \ref zerotimer_t expire
 * 
 * - this zerotimer_t is done to decouple the incoming packet delivery and the 
 *   callback notification.
 *   - as the callback notification may delete any object, nslan_peer_t and
 *     nslan_listener_t included, it could create issues about the 'tokeep' return
 */
bool	nslan_query_t::neoip_zerotimer_expire_cb(zerotimer_t &cb_zerotimer, void *userptr) throw()
{
	nslan_event_t	nslan_event;
	bool		tokeep;	
	// log to debug
	KLOG_DBG("enter");
	// unqueue all the nslan_rec_t one by one
	while( !recved_record_queue.empty() ){
		// copy the front element 
		nslan_rec_t	nslan_rec	= recved_record_queue.front().first;
		ipport_addr_t	src_addr	= recved_record_queue.front().second;
		// unqueue the front element
		recved_record_queue.pop_front();
		// build the event
		nslan_event	= nslan_event_t::build_got_record(nslan_rec, src_addr);
		// notify the event
		tokeep		= notify_callback(nslan_event);
		if( !tokeep )	return false;
	}
	// return tokeep
	return true;
}