PDU& PacketSenderGeneric::send_recv(PDU& spdu, SharedSender& shared_sender, const NetworkInterface& iface, bool promisc, double* rdelay, double* edelay)
	{	
		//wait for previous packet to receive response (TODO: not ideal, plan future change)
		while (sent_pdu) {
			std::this_thread::sleep_for(std::chrono::milliseconds(1000));
		}
		sent_pdu = spdu.clone();

		//start sniff task
		SnifferConfiguration config;
		config.set_promisc_mode(promisc);
		config.set_snap_len(65535);
		config.set_timeout(10);

		//Critical section
		SHARED_SNIFFER_MUTEX.lock();
		Sniffer sniffer{ iface.name(), config };
		SHARED_SNIFFER_MUTEX.unlock();

		bool compute_delay = true;
		if (!rdelay)
			compute_delay = false;
		std::future<void> fresp(std::async(std::launch::async, &PacketSenderGeneric::sniff_task, this, &sniffer, compute_delay));

		//send packet
		std::clock_t effective_sent_time = std::clock();
		std::cout << "Registering packet to send !" << std::endl;
		shared_sender.register_packet(sent_pdu, NetworkInterface(iface));

		//std::cout << "waiting for max " << timeout << "..." << std::endl;
		std::future_status status = fresp.wait_for(std::chrono::seconds(timeout));

		//raise exception in case of timeout
		if (status == std::future_status::timeout)
		{
			sniffer.stop_sniff();
			sent_pdu = NULL;
			throw timeout_elapsed();
		}
		else if (status == std::future_status::deferred)
			std::cout << "DEBUG: packet sniffing deffered... shouldn't happen";

		//Treat response packet
		if (edelay)
			*edelay = ((std::clock() - effective_sent_time) / (double)CLOCKS_PER_SEC) * 1000;
		if (rdelay) {
			*rdelay = response_delay;
		}

		PDU& response(*this->response_pdu);

		//Clean
		sent_pdu = NULL;
		response_delay = NULL;
		//response_pdu = NULL;

		return response;
	}
Exemple #2
0
inline std::string SpellCorrect::get_one_known_edit_of_d2(const std::string & word) const
{
	search_start_ = std::clock();

	const auto word_edits = get_word_edits_of_d1(word);

	for(const auto & edit : word_edits)
	{
		const auto known_edits = known_words(get_word_edits_of_d1(edit));

		if(!known_edits.empty()) return *known_edits.begin();

		if(timeout_elapsed()) return std::string();
	}

	return std::string();
}
Exemple #3
0
static void
iris_thread_worker_exclusive (IrisThread  *thread,
                              IrisQueue   *queue,
                              gboolean     leader)
{
	GTimeVal        tv_now      = {0,0};
	GTimeVal        tv_req      = {0,0};
	IrisThreadWork *thread_work = NULL;
	gint            per_quanta = 0;      /* Completed items within the
	                                      * last quanta. */
	guint           queued      = 0;     /* Items left in the queue at */
	gboolean        has_resized = FALSE;

	iris_debug (IRIS_DEBUG_THREAD);

	g_get_current_time (&tv_now);
	g_get_current_time (&tv_req);
	queued = iris_queue_length (queue);

	/* Since our thread is in exclusive mode, we are responsible for
	 * asking the scheduler manager to add or remove threads based
	 * on the demand of our work queue.
	 *
	 * If the scheduler has maxed out the number of threads it is
	 * allowed, then we will not ask the scheduler to add more
	 * threads and rebalance.
	 */

get_next_item:

	if (G_LIKELY ((thread_work = iris_queue_pop (queue)) != NULL)) {
		if (!VERIFY_THREAD_WORK (thread_work))
			goto get_next_item;

		iris_thread_work_run (thread_work);
		iris_thread_work_free (thread_work);
		per_quanta++;
	}
	else {
#if 0
		g_warning ("Exclusive thread is done managing, received NULL");
#endif
		return;
	}

	if (G_UNLIKELY (!thread->scheduler->maxed && leader)) {
		g_get_current_time (&tv_now);

		if (G_UNLIKELY (timeout_elapsed (&tv_now, &tv_req))) {
			/* We check to see if we have a bunch more work to do
			 * or a potential edge case where we are processing about
			 * the same speed as the pusher, but it creates enough
			 * contention where we dont speed up. This is because
			 * some schedulers will round-robin or steal.  And unless
			 * we look to add another thread even though we have nothing
			 * in the queue, we know there are more coming.
			 */
			queued = iris_queue_length (queue);
			if (queued == 0 && !has_resized) {
				queued = per_quanta * 2;
				has_resized = TRUE;
			}

			if (per_quanta < queued) {
				/* make sure we are not maxed before asking */
				if (!g_atomic_int_get (&thread->scheduler->maxed))
					iris_scheduler_manager_request (thread->scheduler,
									per_quanta,
									queued);
			}

			per_quanta = 0;
			tv_req = tv_now;
			g_time_val_add (&tv_req, QUANTUM_USECS);
		}
	}

	goto get_next_item;
}
Exemple #4
0
static void
iris_thread_worker_exclusive (IrisThread  *thread,
                              IrisQueue   *queue,
                              gboolean     leader)
{
	GTimeVal        tv_now      = {0,0};
	GTimeVal        tv_req      = {0,0};
	IrisThreadWork *thread_work = NULL;
	gint            per_quanta = 0;      /* Completed items within the
	                                      * last quanta. */
	guint           queued      = 0;     /* Items left in the queue at */
	gboolean        has_resized = FALSE;
	gboolean        remove_work;

	iris_debug (IRIS_DEBUG_THREAD);

	g_get_current_time (&tv_now);
	g_get_current_time (&tv_req);
	queued = iris_queue_get_length (queue);

	/* Since our thread is in exclusive mode, we are responsible for
	 * asking the scheduler manager to add or remove threads based
	 * on the demand of our work queue.
	 *
	 * If the scheduler has maxed out the number of threads it is
	 * allowed, then we will not ask the scheduler to add more
	 * threads and rebalance.
	 */

get_next_item:

	if (G_LIKELY ((thread_work = iris_queue_pop (queue)) != NULL)) {
		if (!g_atomic_int_compare_and_exchange(&thread_work->taken, FALSE, TRUE)) {
			remove_work = g_atomic_int_get (&thread_work->remove);

			if (!remove_work)
				/* We lost a race with another thread (remember a lockfree
				 * queue may pop the same item twice). 
				 */
				goto get_next_item;
			/* else: We lost a race with iris_scheduler_unqueue() */
		} else
			/* We won the race. 'remove' is honoured anyway if we can. */
			remove_work = g_atomic_int_get (&thread_work->remove);

		if (!remove_work) {
			iris_thread_work_run (thread_work);
			per_quanta++;
		}

		iris_thread_work_free (thread_work);
	}
	else {
		/* Queue is closed, so scheduler is finalizing. The scheduler will be
		 * waiting until we set thread->scheduler to NULL.
		 */
		g_atomic_pointer_set (&thread->scheduler, NULL);
		iris_scheduler_manager_yield (thread);
		return;
	}

	if (remove_work)
		goto get_next_item;

	if (G_UNLIKELY (!thread->scheduler->maxed && leader)) {
		g_get_current_time (&tv_now);

		if (G_UNLIKELY (timeout_elapsed (&tv_now, &tv_req))) {
			/* We check to see if we have a bunch more work to do
			 * or a potential edge case where we are processing about
			 * the same speed as the pusher, but it creates enough
			 * contention where we dont speed up. This is because
			 * some schedulers will round-robin or steal.  And unless
			 * we look to add another thread even though we have nothing
			 * in the queue, we know there are more coming.
			 */
			queued = iris_queue_get_length (queue);
			if (queued == 0 && !has_resized) {
				queued = per_quanta * 2;
				has_resized = TRUE;
			}

			if (per_quanta < queued) {
				/* make sure we are not maxed before asking */
				if (!g_atomic_int_get (&thread->scheduler->maxed))
					iris_scheduler_manager_request (thread->scheduler,
									per_quanta,
									queued);
			}

			per_quanta = 0;
			tv_req = tv_now;
			g_time_val_add (&tv_req, QUANTUM_USECS);
		}
	}

	goto get_next_item;
}