Esempio n. 1
0
int ms_ticker_detach(MSTicker *ticker,MSFilter *f){
	MSList *sources=NULL;
	MSList *filters=NULL;
	MSList *it;

	if (f->ticker==NULL) {
		ms_message("Filter %s is not scheduled; nothing to do.",f->desc->name);
		return 0;
	}

	ms_mutex_lock(&ticker->lock);

	filters=ms_filter_find_neighbours(f);
	sources=get_sources(filters);
	if (sources==NULL){
		ms_fatal("No sources found around filter %s",f->desc->name);
		ms_list_free(filters);
		ms_mutex_unlock(&ticker->lock);
		return -1;
	}

	for(it=sources;it!=NULL;it=ms_list_next(it)){
		ticker->execution_list=ms_list_remove(ticker->execution_list,it->data);
	}
	ms_mutex_unlock(&ticker->lock);
	ms_list_for_each(filters,(void (*)(void*))ms_filter_postprocess);
	ms_list_free(filters);
	ms_list_free(sources);
	return 0;
}
Esempio n. 2
0
int ms_ticker_attach(MSTicker *ticker,MSFilter *f)
{
	MSList *sources=NULL;
	MSList *filters=NULL;
	MSList *it;
	
	if (f->ticker!=NULL) {
		ms_message("Filter %s is already being scheduled; nothing to do.",f->desc->name);
		return 0;
	}

	find_filters(&filters,f);
	sources=get_sources(filters);
	if (sources==NULL){
		ms_fatal("No sources found around filter %s",f->desc->name);
		ms_list_free(filters);
		return -1;
	}
	/*run preprocess on each filter: */
	for(it=filters;it!=NULL;it=it->next)
		ms_filter_preprocess((MSFilter*)it->data,ticker);
	ms_mutex_lock(&ticker->lock);
	ticker->execution_list=ms_list_concat(ticker->execution_list,sources);
	ms_mutex_unlock(&ticker->lock);
	ms_list_free(filters);
	return 0;
}
Esempio n. 3
0
int ms_ticker_attach_multiple(MSTicker *ticker,MSFilter *f,...)
{
	MSList *sources=NULL;
	MSList *filters=NULL;
	MSList *it;
	MSList *total_sources=NULL;
	va_list l;

	va_start(l,f);

	do{
		if (f->ticker==NULL) {
			filters=ms_filter_find_neighbours(f);
			sources=get_sources(filters);
			if (sources==NULL){
				ms_fatal("No sources found around filter %s",f->desc->name);
				ms_list_free(filters);
				break;
			}
			/*run preprocess on each filter: */
			for(it=filters;it!=NULL;it=it->next)
				ms_filter_preprocess((MSFilter*)it->data,ticker);
			ms_list_free(filters);
			total_sources=ms_list_concat(total_sources,sources);			
		}else ms_message("Filter %s is already being scheduled; nothing to do.",f->desc->name);
	}while ((f=va_arg(l,MSFilter*))!=NULL);
	va_end(l);
	if (total_sources){
		ms_mutex_lock(&ticker->lock);
		ticker->execution_list=ms_list_concat(ticker->execution_list,total_sources);
		ms_mutex_unlock(&ticker->lock);
	}
	return 0;
}
Esempio n. 4
0
/***************************************************************************
 * This thread spews packets as fast as it can
 *
 *      THIS IS WHERE ALL THE EXCITEMENT HAPPENS!!!!
 *      90% of CPU cycles are in the function.
 *
 ***************************************************************************/
static void
transmit_thread(void *v) /*aka. scanning_thread() */
{
    struct ThreadPair *parms = (struct ThreadPair *)v;
    uint64_t i;
    uint64_t start;
    uint64_t end;
    const struct Masscan *masscan = parms->masscan;
    unsigned retries = masscan->retries;
    unsigned rate = (unsigned)masscan->max_rate;
    unsigned r = retries + 1;
    uint64_t range;
    struct BlackRock blackrock;
    uint64_t count_ips = rangelist_count(&masscan->targets);
    struct Throttler *throttler = parms->throttler;
    struct TemplateSet pkt_template = templ_copy(parms->tmplset);
    unsigned *picker = parms->picker;
    struct Adapter *adapter = parms->adapter;
    uint64_t packets_sent = 0;
    unsigned increment = (masscan->shard.of-1) + masscan->nic_count;
    unsigned src_ip;
    unsigned src_ip_mask;
    unsigned src_port;
    unsigned src_port_mask;

    get_sources(masscan, parms->nic_index, 
                &src_ip, &src_ip_mask, 
                &src_port, &src_port_mask);

    LOG(1, "xmit: starting transmit thread #%u\n", parms->nic_index);


    /* Create the shuffler/randomizer. This creates the 'range' variable,
     * which is simply the number of IP addresses times the number of
     * ports */
    range = rangelist_count(&masscan->targets) 
            * rangelist_count(&masscan->ports);
    blackrock_init(&blackrock, range, masscan->seed);

    /* Calculate the 'start' and 'end' of a scan. One reason to do this is
     * to support --shard, so that multiple machines can co-operate on
     * the same scan. Another reason to do this is so that we can bleed
     * a little bit past the end when we have --retries. Yet another
     * thing to do here is deal with multiple network adapters, which
     * is essentially the same logic as shards. */
    start = masscan->resume.index + (masscan->shard.one-1) + parms->nic_index;
    end = range;
    if (masscan->resume.count && end > start + masscan->resume.count)
        end = start + masscan->resume.count;
    end += retries * rate;

    

    /* "THROTTLER" rate-limits how fast we transmit, set with the
     * --max-rate parameter */
    throttler_start(throttler, masscan->max_rate/masscan->nic_count);

    /* -----------------
     * the main loop
     * -----------------*/
    LOG(3, "xmit: starting main loop: [%llu..%llu]\n", start, end);
    for (i=start; i<end; ) {
        uint64_t batch_size;

        /*
         * Do a batch of many packets at a time. That because per-packet
         * throttling is expensive at 10-million pps, so we reduce the
         * per-packet cost by doing batches. At slower rates, the batch
         * size will always be one. (--max-rate)
         */
        batch_size = throttler_next_batch(throttler, packets_sent);

        /*
		 * Transmit packets from other thread, when doing --banners. This
		 * takes priority over sending SYN packets. If there is so much
		 * activity grabbing banners that we cannot transmit more SYN packets,
		 * then "batch_size" will get decremented to zero, and we won't be
		 * able to transmit SYN packets.
		 */
        flush_packets(adapter, parms->packet_buffers, parms->transmit_queue, 
                        &packets_sent, &batch_size);


		/*
		 * Transmit a bunch of packets. At any rate slower than 100,000 
		 * packets/second, the 'batch_size' is likely to be 1
		 */
		while (batch_size && i < end) {
            uint64_t xXx;
            unsigned ip_them;
            unsigned port_them;
            unsigned ip_me;
            unsigned port_me;
            uint64_t cookie;


            /*
             * RANDOMIZE THE TARGET:
             *  This is kinda a tricky bit that picks a random IP and port
             *  number in order to scan. We monotonically increment the
             *  index 'i' from [0..range]. We then shuffle (randomly transmog)
             *  that index into some other, but unique/1-to-1, number in the
             *  same range. That way we visit all targets, but in a random 
             *  order. Then, once we've shuffled the index, we "pick" the
             *  the IP address and port that the index refers to.
             */
            xXx = (i + (r--) * rate);
            while (xXx >= range)
                xXx -= range;
            xXx = blackrock_shuffle(&blackrock,  xXx);
            ip_them = rangelist_pick2(&masscan->targets, xXx % count_ips, picker);
            port_them = rangelist_pick(&masscan->ports, xXx / count_ips);

            /*
             * SYN-COOKIE LOGIC
             */
            ip_me = src_ip + (i & src_ip_mask);
            port_me = src_port + (xXx & src_port_mask);
            cookie = syn_cookie(ip_them, port_them, ip_me, port_me);
            
            /*
             * SEND THE PROBE
             *  This is sorta the entire point of the program, but little
             *  exciting happens here. The thing to note that this may
             *  be a "raw" transmit that bypasses the kernel, meaning
             *  we can call this function millions of times a second.
             */
            rawsock_send_probe(
                    adapter,
                    ip_them, port_them,
                    ip_me, port_me,
                    (unsigned)cookie,
                    !batch_size, /* flush queue on last packet in batch */
                    &pkt_template
                    );
            batch_size--;
			packets_sent++;
            foo_count++; /* TODO: debug thing, will be removed*/

            /*
             * SEQUENTIALLY INCREMENT THROUGH THE RANGE
             *  Yea, I know this is a puny 'i++' here, but it's a core feature
             *  of the system that is linearly increments through the range,
             *  but produces from that a shuffled sequence of targets (as
             *  described above). Because we are linearly incrementing this
             *  number, we can do lots of creative stuff, like doing clever
             *  retransmits and sharding.
             */
            if (r == 0) {
                i += increment; /* <------ increment by 1 normally, more with shards/nics */
                r = retries + 1;
            }

        } /* end of batch */


        /* If the user pressed <ctrl-c>, then we need to exit. but, in case
         * the user wants to --resume the scan later, we save the current
         * state in a file */
        if (control_c_pressed) {
            break;
        }

        /* save our current location for resuming, if the user pressed
         * <ctrl-c> to exit early */
        parms->my_index = i;
    }


    /*
     * We are done transmitting. However, response packets will take several
     * seconds to arrive. Therefore, sit in short loop waiting for those
     * packets to arrive. Pressing <ctrl-c> a second time will exit this
     * prematurely.
     */
    while (!control_c_pressed_again) {
        unsigned k;
		uint64_t batch_size;

        for (k=0; k<1000; k++) {
			/*
			 * Only send a few packets at a time, throttled according to the max
			 * --max-rate set by the user
			 */
			batch_size = throttler_next_batch(throttler, packets_sent);


            /* Transmit packets from the receive thread */
            flush_packets(  adapter, 
                            parms->packet_buffers, 
                            parms->transmit_queue, 
                            &packets_sent,
							&batch_size);

            pixie_usleep(1000);
        }
    }

    /* Thread is about to exit */
    parms->done_transmitting = 1;
    LOG(1, "xmit: stopping transmit thread #%u\n", parms->nic_index);
}