void translate_event_queue(unsigned int base) { struct node *e; remove_event(COMPARE_INT); remove_event(SPECIAL_INT); for(e = q.first; e != NULL; e = e->next) e->data.count = (e->data.count - g_cp0_regs[CP0_COUNT_REG]) + base; add_interupt_event_count(COMPARE_INT, g_cp0_regs[CP0_COMPARE_REG]); add_interupt_event_count(SPECIAL_INT, 0); }
void translate_event_queue(unsigned long base) { interupt_queue *aux; remove_event(COMPARE_INT); remove_event(SPECIAL_INT); aux=q; while (aux != NULL) { aux->count = (aux->count - Count)+base; aux = aux->next; } add_interupt_event_count(COMPARE_INT, Compare); add_interupt_event_count(SPECIAL_INT, 0); }
void translate_event_queue(usf_state_t * state, unsigned int base) { struct node* e; remove_event(state, COMPARE_INT); remove_event(state, SPECIAL_INT); for(e = state->q.first; e != NULL; e = e->next) { e->data.count = (e->data.count - state->g_cp0_regs[CP0_COUNT_REG]) + base; } add_interupt_event_count(state, COMPARE_INT, state->g_cp0_regs[CP0_COMPARE_REG]); add_interupt_event_count(state, SPECIAL_INT, 0); }
static void drop_sock( struct ctl_sock *cs ) { spook_log( SL_DEBUG, "closed control connection" ); remove_event( cs->read_event ); close( cs->fd ); free( cs ); }
/* add an event to list ordered by execution time */ void add_event(squeue_t *sq, timed_event *event) { log_debug_info(DEBUGL_FUNCTIONS, 0, "add_event()\n"); if (event->sq_event) { logit(NSLOG_RUNTIME_ERROR, TRUE, "Error: Adding %s event that seems to already be scheduled\n", EVENT_TYPE_STR(event->event_type)); remove_event(sq, event); } if (event->priority) { event->sq_event = squeue_add_usec(sq, event->run_time, event->priority - 1, event); } else { event->sq_event = squeue_add(sq, event->run_time, event); } if (!event->sq_event) { logit(NSLOG_RUNTIME_ERROR, TRUE, "Error: Failed to add event to squeue '%p' with prio %u: %s\n", sq, event->priority, strerror(errno)); } if (sq == nagios_squeue) track_events(event->event_type, +1); #ifdef USE_EVENT_BROKER else { /* send event data to broker */ broker_timed_event(NEBTYPE_TIMEDEVENT_ADD, NEBFLAG_NONE, NEBATTR_NONE, event, NULL); } #endif return; }
void jump (game_time t) { assert (state->has_ground); remove_event (jump_handler); /* only one jump at a time */ add_event (t, jump_handler, sz_jump); }
static void kill_map (struct tcp_map *map) { if (map->fd >= 0) close (map->fd); if (map->e_fd_write) remove_event (map->e_fd_write); if (map->e_fd_read) remove_event (map->e_fd_read); if (map->next) map->next->prev = map->prev; if (map->prev) map->prev->next = map->next; if (tlist == map) tlist = map->next; fprintf (stderr, "number of TCP maps: %d\n", --num_tcp_maps); FREE (map); }
void resize_laser (void) /* Clear all laser beams from the screen. */ { int j; remove_event (beam_handler); for (j=0; j<beam_table.used; ++j) free (beam_table.data[j]); DA_CLEAR (beam_table); }
void remove_lsa(struct ospf_lsa *lsa) { struct replay_nlist *item; struct ospf_event *event; item = find_in_nlist(ospf0->lsdb->lsa_list[lsa->header->type],(void *)lsa); ospf0->lsdb->lsa_list[lsa->header->type] = remove_from_nlist(ospf0->lsdb->lsa_list[lsa->header->type],item); ospf0->lsdb->count--; event = find_event((void *)lsa,OSPF_EVENT_LSA_AGING); remove_event(event); free(lsa->header); free(lsa); }
void timerstop(int n){ int i; timer.active[n] = -1; for(i = 0; i < timer.numevents; i++){ if(timer.events[i] == n){ remove_event(i); break; } } timer.active[n] = -1; }
void run_simulation(double end_time) { struct Event *e; // Main scheduler loop while ((e=remove_event()) != NULL) { now_time = e->timestamp; if (now_time > end_time) break; event_handle(e); } }
void timerstart(int n, long interval){ int i; for(i = 0; i < timer.numevents; i++){ if(timer.events[i] == n){ remove_event(i); break; } } timer.running = n; timer.active[n] = (int)interval; sethardwaretimer(interval); }
void translate_event_queue(unsigned int base) { interupt_queue *aux; remove_event(COMPARE_INT); remove_event(SPECIAL_INT); aux=q; while (aux != NULL) { #ifdef NEW_COUNT aux->count = ((aux->count - Count)+base)& 0x7FFFFFFF; #else aux->count = (aux->count - Count)+base; #endif aux = aux->next; } #ifdef USE_COMPARE add_interupt_event_count(COMPARE_INT, Compare); #endif #ifdef USE_SPECIAL add_interupt_event_count(SPECIAL_INT, 0); #endif }
static void af_unix_accept(int fd) { int cmd; do { conn = accept(fd, NULL, NULL); } while (conn < 0 && errno == EINTR); // De-register since this is intended to be one listener if (conn >= 0) remove_event(fd); cmd = fcntl(conn, F_GETFD); fcntl(conn, F_SETFD, cmd|FD_CLOEXEC); }
int crash_check (void) /* Return true, if the car crashed. */ { if (! state->has_ground) return 0; if (ground2[car_x+1] == ' ' || ground2[car_x+5] == ' ') { remove_event (jump_handler); state = sz_crash; print_buggy (); start_wheel (); return 1; } return 0; }
static struct xenbus_event *await_event(struct xenbus_event_queue *queue) { struct xenbus_event *event; DEFINE_WAIT(w); spin_lock(&xenbus_req_lock); while (!(event = remove_event(queue))) { minios_add_waiter(w, queue->waitq); spin_unlock(&xenbus_req_lock); minios_wait(w); spin_lock(&xenbus_req_lock); } minios_remove_waiter(w, queue->waitq); spin_unlock(&xenbus_req_lock); return event; }
static void warning_show (gpointer id) { gtk_label_set_markup (GTK_LABEL (warning_label1), g_strdup_printf ("<b><span foreground=\"#FF0000\">%s</span></b>", SAUDACAO)); gtk_widget_show ((GtkWidget *) warning_label1); printf ("%d", count); if (count > 0) { remove_event ((guint *) id); } count++; }
void process_events(void) { struct event_info *e = event_list; struct event_info *del; while (e) {if (--(e->time_remaining) == 0) {e->func(e->info); del = e; e = e->next; remove_event(del); } else e = e->next; } }
int car_meteor_hit (int x) /* Return true, if the car is down and occupies position X. * Then the car crashes immediately. */ { if (car_y == 5 && x >= car_x && x < car_x+7) { remove_event (jump_handler); add_event (current_time (), jump_handler, sz_ram); print_buggy (); start_wheel (); crash_detected = 1; return 1; } return 0; }
void extinguish_laser (void) /* Clear all laser beams from the screen. */ { int j; remove_event (beam_handler); for (j=0; j<beam_table.used; ++j) { struct beam *b = beam_table.data[j]; int i; for (i=b->left; i<b->right; ++i) mvwaddch (moon, LINES-b->y, i, ' '); free (b); } DA_CLEAR (beam_table); wnoutrefresh (moon); }
static void remove_all_individual_event (EmpathyRosterView *self, FolksIndividual *individual) { GList *l; for (l = g_queue_peek_head_link (self->priv->events); l != NULL; l = g_list_next (l)) { Event *event = l->data; if (event->individual == individual) { remove_event (self, event); return; } } }
void empathy_roster_view_remove_event (EmpathyRosterView *self, guint event_id) { GList *l; for (l = g_queue_peek_head_link (self->priv->events); l != NULL; l = g_list_next (l)) { Event *event = l->data; if (event->id == event_id) { remove_event (self, event); return; } } }
void process_events(void) { struct event_info *e = event_list; struct event_info *del; struct timeval start, stop, result; int trig_vnum; gettimeofday(&start, NULL); while (e) { if (--(e->time_remaining) == 0) { trig_vnum = GET_TRIG_VNUM(((struct wait_event_data *)(e->info))->trigger); e->func(e->info); del = e; e = e->next; remove_event(del); // На отработку отложенных тригов выделяем всего 50 мсекунд // По исчерпанию лимита откладываем отработку на следующий тик. // Делаем для более равномерного распределения времени процессора. gettimeofday(&stop, NULL); timediff(&result, &stop, &start); if (result.tv_sec > 0 || result.tv_usec >= MAX_TRIG_USEC) { // Выводим номер триггера который переполнил время работы. sprintf(buf, "[TrigVNum: %d]: process_events overflow %ld sec. %ld us.", trig_vnum, result.tv_sec, result.tv_usec); mudlog(buf, BRF, -1, ERRLOG, TRUE); break; } } else e = e->next; } }
/* this is the main event handler loop */ int event_execution_loop(void) { timed_event *temp_event, *last_event = NULL; time_t last_time = 0L; time_t current_time = 0L; time_t last_status_update = 0L; int poll_time_ms; log_debug_info(DEBUGL_FUNCTIONS, 0, "event_execution_loop() start\n"); time(&last_time); while (1) { struct timeval now; const struct timeval *event_runtime; int inputs; /* super-priority (hardcoded) events come first */ /* see if we should exit or restart (a signal was encountered) */ if (sigshutdown == TRUE || sigrestart == TRUE) break; /* get the current time */ time(¤t_time); if (sigrotate == TRUE) { rotate_log_file(current_time); update_program_status(FALSE); } /* hey, wait a second... we traveled back in time! */ if (current_time < last_time) compensate_for_system_time_change((unsigned long)last_time, (unsigned long)current_time); /* else if the time advanced over the specified threshold, try and compensate... */ else if ((current_time - last_time) >= time_change_threshold) compensate_for_system_time_change((unsigned long)last_time, (unsigned long)current_time); /* get next scheduled event */ current_event = temp_event = (timed_event *)squeue_peek(nagios_squeue); /* if we don't have any events to handle, exit */ if (!temp_event) { log_debug_info(DEBUGL_EVENTS, 0, "There aren't any events that need to be handled! Exiting...\n"); break; } /* keep track of the last time */ last_time = current_time; /* update status information occassionally - NagVis watches the NDOUtils DB to see if Nagios is alive */ if ((unsigned long)(current_time - last_status_update) > 5) { last_status_update = current_time; update_program_status(FALSE); } event_runtime = squeue_event_runtime(temp_event->sq_event); if (temp_event != last_event) { log_debug_info(DEBUGL_EVENTS, 1, "** Event Check Loop\n"); log_debug_info(DEBUGL_EVENTS, 1, "Next Event Time: %s", ctime(&temp_event->run_time)); log_debug_info(DEBUGL_EVENTS, 1, "Current/Max Service Checks: %d/%d (%.3lf%% saturation)\n", currently_running_service_checks, max_parallel_service_checks, ((float)currently_running_service_checks / (float)max_parallel_service_checks) * 100); } last_event = temp_event; gettimeofday(&now, NULL); poll_time_ms = tv_delta_msec(&now, event_runtime); if (poll_time_ms < 0) poll_time_ms = 0; else if (poll_time_ms >= 1500) poll_time_ms = 1500; log_debug_info(DEBUGL_SCHEDULING, 2, "## Polling %dms; sockets=%d; events=%u; iobs=%p\n", poll_time_ms, iobroker_get_num_fds(nagios_iobs), squeue_size(nagios_squeue), nagios_iobs); inputs = iobroker_poll(nagios_iobs, poll_time_ms); if (inputs < 0 && errno != EINTR) { logit(NSLOG_RUNTIME_ERROR, TRUE, "Error: Polling for input on %p failed: %s", nagios_iobs, iobroker_strerror(inputs)); break; } log_debug_info(DEBUGL_IPC, 2, "## %d descriptors had input\n", inputs); /* * if the event we peaked was removed from the queue from * one of the I/O operations, we must take care not to * try to run at, as we're (almost) sure to access free'd * or invalid memory if we do. */ if (!current_event) { log_debug_info(DEBUGL_EVENTS, 0, "Event was cancelled by iobroker input\n"); continue; } gettimeofday(&now, NULL); if (tv_delta_msec(&now, event_runtime) >= 0) continue; /* move on if we shouldn't run this event */ if (should_run_event(temp_event) == FALSE) continue; /* handle the event */ handle_timed_event(temp_event); /* * we must remove the entry we've peeked, or * we'll keep getting the same one over and over. * This also maintains sync with broker modules. */ remove_event(nagios_squeue, temp_event); /* reschedule the event if necessary */ if (temp_event->recurring == TRUE) reschedule_event(nagios_squeue, temp_event); /* else free memory associated with the event */ else my_free(temp_event); } log_debug_info(DEBUGL_FUNCTIONS, 0, "event_execution_loop() end\n"); return OK; }
static int should_run_event(timed_event *temp_event) { int run_event = TRUE; /* default action is to execute the event */ int nudge_seconds = 0; /* we only care about jobs that cause processes to run */ if (temp_event->event_type != EVENT_HOST_CHECK && temp_event->event_type != EVENT_SERVICE_CHECK) { return TRUE; } /* if we can't spawn any more jobs, don't bother */ if (!wproc_can_spawn(&loadctl)) { wproc_reap(100, 3000); return FALSE; } /* run a few checks before executing a service check... */ if (temp_event->event_type == EVENT_SERVICE_CHECK) { service *temp_service = (service *)temp_event->event_data; /* forced checks override normal check logic */ if ((temp_service->check_options & CHECK_OPTION_FORCE_EXECUTION)) return TRUE; /* don't run a service check if we're already maxed out on the number of parallel service checks... */ if (max_parallel_service_checks != 0 && (currently_running_service_checks >= max_parallel_service_checks)) { nudge_seconds = ranged_urand(5, 17); logit(NSLOG_RUNTIME_WARNING, TRUE, "\tMax concurrent service checks (%d) has been reached. Nudging %s:%s by %d seconds...\n", max_parallel_service_checks, temp_service->host_name, temp_service->description, nudge_seconds); run_event = FALSE; } /* don't run a service check if active checks are disabled */ if (execute_service_checks == FALSE) { log_debug_info(DEBUGL_EVENTS | DEBUGL_CHECKS, 1, "We're not executing service checks right now, so we'll skip check event for service '%s;%s'.\n", temp_service->host_name, temp_service->description); run_event = FALSE; } /* reschedule the check if we can't run it now */ if (run_event == FALSE) { remove_event(nagios_squeue, temp_event); if (nudge_seconds) { /* We nudge the next check time when it is due to too many concurrent service checks */ temp_service->next_check = (time_t)(temp_service->next_check + nudge_seconds); } else { temp_service->next_check += check_window(temp_service); } temp_event->run_time = temp_service->next_check; reschedule_event(nagios_squeue, temp_event); update_service_status(temp_service, FALSE); run_event = FALSE; } } /* run a few checks before executing a host check... */ else if (temp_event->event_type == EVENT_HOST_CHECK) { host *temp_host = (host *)temp_event->event_data; /* forced checks override normal check logic */ if ((temp_host->check_options & CHECK_OPTION_FORCE_EXECUTION)) return TRUE; /* don't run a host check if active checks are disabled */ if (execute_host_checks == FALSE) { log_debug_info(DEBUGL_EVENTS | DEBUGL_CHECKS, 1, "We're not executing host checks right now, so we'll skip host check event for host '%s'.\n", temp_host->name); run_event = FALSE; } /* reschedule the host check if we can't run it right now */ if (run_event == FALSE) { remove_event(nagios_squeue, temp_event); temp_host->next_check += check_window(temp_host); temp_event->run_time = temp_host->next_check; reschedule_event(nagios_squeue, temp_event); update_host_status(temp_host, FALSE); run_event = FALSE; } } return run_event; }
void MTC0(void) { switch(PC->f.r.nrd) { case 0: // Index Index = rrt & 0x8000003F; if ((Index & 0x3F) > 31) { DebugMessage(M64MSG_ERROR, "MTC0 instruction writing Index register with TLB index > 31"); stop=1; } break; case 1: // Random break; case 2: // EntryLo0 EntryLo0 = rrt & 0x3FFFFFFF; break; case 3: // EntryLo1 EntryLo1 = rrt & 0x3FFFFFFF; break; case 4: // Context Context = (rrt & 0xFF800000) | (Context & 0x007FFFF0); break; case 5: // PageMask PageMask = rrt & 0x01FFE000; break; case 6: // Wired Wired = rrt; Random = 31; break; case 8: // BadVAddr break; case 9: // Count update_count(); if (next_interupt <= Count) gen_interupt(); debug_count += Count; translate_event_queue(rrt & 0xFFFFFFFF); Count = rrt & 0xFFFFFFFF; debug_count -= Count; break; case 10: // EntryHi EntryHi = rrt & 0xFFFFE0FF; break; case 11: // Compare update_count(); remove_event(COMPARE_INT); add_interupt_event_count(COMPARE_INT, (unsigned int)rrt); Compare = rrt; Cause = Cause & 0xFFFF7FFF; //Timer interupt is clear break; case 12: // Status if((rrt & 0x04000000) != (Status & 0x04000000)) { shuffle_fpr_data(Status, rrt); set_fpr_pointers(rrt); } Status = rrt; PC++; check_interupt(); update_count(); if (next_interupt <= Count) gen_interupt(); PC--; break; case 13: // Cause if (rrt!=0) { DebugMessage(M64MSG_ERROR, "MTC0 instruction trying to write Cause register with non-0 value"); stop = 1; } else Cause = rrt; break; case 14: // EPC EPC = rrt; break; case 15: // PRevID break; case 16: // Config Config = rrt; break; case 18: // WatchLo WatchLo = rrt & 0xFFFFFFFF; break; case 19: // WatchHi WatchHi = rrt & 0xFFFFFFFF; break; case 27: // CacheErr break; case 28: // TagLo TagLo = rrt & 0x0FFFFFC0; break; case 29: // TagHi TagHi =0; break; default: DebugMessage(M64MSG_ERROR, "Unknown MTC0 write: %d", PC->f.r.nrd); stop=1; } PC++; }
/* unschedules a host or service downtime */ int unschedule_downtime(int type, unsigned long downtime_id) { scheduled_downtime *temp_downtime = NULL; scheduled_downtime *next_downtime = NULL; host *hst = NULL; service *svc = NULL; timed_event *temp_event = NULL; #ifdef USE_EVENT_BROKER int attr = 0; #endif log_debug_info(DEBUGL_FUNCTIONS, 0, "unschedule_downtime()\n"); /* find the downtime entry in the list in memory */ if ((temp_downtime = find_downtime(type, downtime_id)) == NULL) return ERROR; /* find the host or service associated with this downtime */ if (temp_downtime->type == HOST_DOWNTIME) { if ((hst = find_host(temp_downtime->host_name)) == NULL) return ERROR; } else { if ((svc = find_service(temp_downtime->host_name, temp_downtime->service_description)) == NULL) return ERROR; } /* decrement pending flex downtime if necessary ... */ if (temp_downtime->fixed == FALSE && temp_downtime->incremented_pending_downtime == TRUE) { if (temp_downtime->type == HOST_DOWNTIME) hst->pending_flex_downtime--; else svc->pending_flex_downtime--; } log_debug_info(DEBUGL_DOWNTIME, 0, "Cancelling %s downtime (id=%lu)\n", temp_downtime->type == HOST_DOWNTIME ? "host" : "service", temp_downtime->downtime_id); /* decrement the downtime depth variable and update status data if necessary */ if (temp_downtime->is_in_effect == TRUE) { #ifdef USE_EVENT_BROKER /* send data to event broker */ attr = NEBATTR_DOWNTIME_STOP_CANCELLED; broker_downtime_data(NEBTYPE_DOWNTIME_STOP, NEBFLAG_NONE, attr, temp_downtime->type, temp_downtime->host_name, temp_downtime->service_description, temp_downtime->entry_time, temp_downtime->author, temp_downtime->comment, temp_downtime->start_time, temp_downtime->end_time, temp_downtime->fixed, temp_downtime->triggered_by, temp_downtime->duration, temp_downtime->downtime_id, NULL, temp_downtime->is_in_effect, temp_downtime->trigger_time); #endif if (temp_downtime->type == HOST_DOWNTIME) { hst->scheduled_downtime_depth--; update_host_status(hst, FALSE); /* log a notice - this is parsed by the history CGI */ if (hst->scheduled_downtime_depth == 0) { logit(NSLOG_INFO_MESSAGE, FALSE, "HOST DOWNTIME ALERT: %s;CANCELLED; Scheduled downtime for host has been cancelled.\n", hst->name); /* send a notification */ host_notification(hst, NOTIFICATION_DOWNTIMECANCELLED, NULL, NULL, NOTIFICATION_OPTION_NONE); } } else { svc->scheduled_downtime_depth--; update_service_status(svc, FALSE); /* log a notice - this is parsed by the history CGI */ if (svc->scheduled_downtime_depth == 0) { logit(NSLOG_INFO_MESSAGE, FALSE, "SERVICE DOWNTIME ALERT: %s;%s;CANCELLED; Scheduled downtime for service has been cancelled.\n", svc->host_name, svc->description); /* send a notification */ service_notification(svc, NOTIFICATION_DOWNTIMECANCELLED, NULL, NULL, NOTIFICATION_OPTION_NONE); } } } /* remove scheduled entry from event queue */ for (temp_event = event_list_high; temp_event != NULL; temp_event = temp_event->next) { if (temp_event->event_type != EVENT_SCHEDULED_DOWNTIME) continue; if (((unsigned long)temp_event->event_data) == downtime_id) break; } if (temp_event != NULL) { remove_event(temp_event, &event_list_high, &event_list_high_tail); my_free(temp_event->event_data); my_free(temp_event); } /* delete downtime entry */ if (temp_downtime->type == HOST_DOWNTIME) delete_host_downtime(downtime_id); else delete_service_downtime(downtime_id); /* unschedule all downtime entries that were triggered by this one */ while (1) { for (temp_downtime = scheduled_downtime_list; temp_downtime != NULL; temp_downtime = next_downtime) { next_downtime = temp_downtime->next; if (temp_downtime->triggered_by == downtime_id) { unschedule_downtime(ANY_DOWNTIME, temp_downtime->downtime_id); break; } } if (temp_downtime == NULL) break; } return OK; }
/* save the zone's triggers to internal memory and to disk */ void trigedit_save(struct descriptor_data *d) { int trig_rnum, i; int found = 0; char *s; trig_data *proto; trig_data *trig = OLC_TRIG(d); trig_data *live_trig; struct cmdlist_element *cmd, *next_cmd; struct index_data **new_index; struct descriptor_data *dsc; FILE *trig_file; int zone, top; char buf[MAX_CMD_LENGTH]; char bitBuf[MAX_INPUT_LENGTH]; char fname[MAX_INPUT_LENGTH]; char logbuf[MAX_INPUT_LENGTH]; if ((trig_rnum = real_trigger(OLC_NUM(d))) != -1) { proto = trig_index[trig_rnum]->proto; for (cmd = proto->cmdlist; cmd; cmd = next_cmd) { next_cmd = cmd->next; if (cmd->cmd) free(cmd->cmd); free(cmd); } free(proto->arglist); free(proto->name); /* Recompile the command list from the new script */ s = OLC_STORAGE(d); CREATE(trig->cmdlist, struct cmdlist_element, 1); trig->cmdlist->cmd = str_dup(strtok(s, "\n\r")); cmd = trig->cmdlist; while ((s = strtok(NULL, "\n\r"))) { CREATE(cmd->next, struct cmdlist_element, 1); cmd = cmd->next; cmd->cmd = str_dup(s); } /* make the prorotype look like what we have */ trig_data_copy(proto, trig); /* go through the mud and replace existing triggers */ live_trig = trigger_list; while (live_trig) { if (GET_TRIG_RNUM(live_trig) == trig_rnum) { if (live_trig->arglist) { free(live_trig->arglist); live_trig->arglist = NULL; } if (live_trig->name) { free(live_trig->name); live_trig->name = NULL; } if (proto->arglist) live_trig->arglist = str_dup(proto->arglist); if (proto->name) live_trig->name = str_dup(proto->name); live_trig->cmdlist = proto->cmdlist; live_trig->curr_state = live_trig->cmdlist; live_trig->trigger_type = proto->trigger_type; live_trig->attach_type = proto->attach_type; live_trig->narg = proto->narg; live_trig->data_type = proto->data_type; live_trig->depth = 0; live_trig->wait_event = NULL; if (GET_TRIG_WAIT(live_trig)) remove_event(GET_TRIG_WAIT(live_trig)); free_varlist(live_trig->var_list); } live_trig = live_trig->next_in_world; } } else {
int process_connection_event(event *temp_event_events, event *events, event *exits, client clients_vector[NUMBER_CLIENTS], int connections[][NUMBER_CLIENTS], int servers_number_connections[NUMBER_SERVERS], int clients_number_connections[NUMBER_CLIENTS], float flow[][NUMBER_CLIENTS], long double client_flow[NUMBER_CLIENTS], long double server_flow[NUMBER_SERVERS], long double data[][NUMBER_CLIENTS], int clients_state[NUMBER_CLIENTS], int servers_state[NUMBER_SERVERS], int event_id, int server_id, int client_id, int *number_events, int clients_up ) { int i,j, k; int change_flow; long double flow_accumulator, data_accumulator, data_accumulator_old, cost_accumulator; float when, epsilon, exp_paramenter; struct event *temp_event; struct event **ant, **pont; ant = ( event **) malloc(sizeof ( event *)); pont = ( event **) malloc(sizeof ( event *)); if (DEBUG_LEVEL > 3) { if(event_id == CONNECTION_EVENT){ printf("%f CONNECTION_EVENT %d %d\n", t, server_id, client_id);} if(event_id == DISCONNECTION_EVENT){ printf("%f DISCONNECTION_EVENT %d %d\n", t, server_id, client_id);} } //atualiza dados recebidos de todos os clientes usando fluxo atual, t e t-ultima-att for (j=0;j<NUMBER_CLIENTS;j++) { data_accumulator = 0; //iterador para total de dados recebidos - de todos os servidores data_accumulator_old = 0; for(i=0;i<NUMBER_SERVERS;i++) { data_accumulator_old += data[i][j]; data[i][j] += (t- t_last)*flow[i][j]; data_accumulator += data[i][j]; if( (data[i][j] < 0) || ((data_accumulator > FILE_SIZE) && (clients_vector[j].exit_event != NULL)) || ((data_accumulator_old > FILE_SIZE) && (clients_vector[j].exit_event != NULL)) ) { printf("ERROR: %f ERROR_DATA_AMOUNT - %d %d %Lf %Lf %Lf \n", t, i, j, data[i][j], data_accumulator_old, data_accumulator); printf( "\n%f REMAINING EVENTS TO PROCESS %d\n", t, (*number_events)); listar(exits); listar(events); printf("\n"); printf( "CLIENT %d DATA_VECTOR:\n", j); for(k=0; k<NUMBER_SERVERS; k++) { printf( "DATA_AMOUNT_FROM_SERVER %d %Lf\n", k, data[k][j]); } printf("\nCLIENT %d FLOW_VECTOR:\n", j); for(k=0; k<NUMBER_SERVERS; k++) { printf( "FLOW_AMOUNT_FROM_SERVER %d %f\n", k, flow[k][j]); } //return 1; } } } //atualiza t_last, marca quando foi a ultima atualizacao nos fluxos dos servidores t_last = t; //atualizando vetor de conexoes dos servidores if(event_id == CONNECTION_EVENT){ if(servers_state[server_id] == 0) { printf("ERROR: %f ERROR_CONNECTION_ATTEMPT - SERVER_DOWN %d %d\n", t, server_id, client_id); return 1; } if (connections[server_id][client_id] != 0) { printf("ERROR: %f ERROR_CONNECTION_ATTEMPT - ALREADY_CONNECTED %d %d\n", t, server_id, client_id); return 1; } connections[server_id][client_id] = 1; servers_number_connections[server_id]++; clients_number_connections[client_id]++; } else{ if (connections[server_id][client_id] != 1) { printf("ERROR: %f ERROR_DISCONNECTION_ATTEMPT - NOT_CONNECTED %d %d\n", t, server_id, client_id); return 1; } connections[server_id][client_id] = 0; servers_number_connections[server_id]--; clients_number_connections[client_id]--; } if(DEBUG_LEVEL>1) printf("%f SERVER_NUMBER_CONNECTIONS %d %d\n", t, server_id, servers_number_connections[server_id]); if(DEBUG_LEVEL>1) printf("%f CLIENT_NUMBER_CONNECTIONS %d %d\n", t, client_id, clients_number_connections[client_id]); //roda algoritmo de alocacao de banda para determinar novo flow //ja retorna com flow atualizado i = alocacao(clients_vector, clients_state, servers_state, connections, flow, client_flow, server_flow, clients_number_connections, servers_number_connections, clients_up); if( i == 1) return 1; //reescalona eventos de saida e fluxo para os clientes usando matriz de dados e fluxo atual for (j=0;j<NUMBER_CLIENTS;j++) { //alguns clientes ja podem ter saido, mas o servidor ainda nao recebeu a notificacao //para esses clientes, nao eh necessario reescalonar evento de saida/fluxo, pois eles nao estao mais no sistema //clientes que ja sairam do sistema possuem respectivos exit_event e flow_event com valor NULL (estrutara desses eventos foi desalocada) if(clients_vector[j].exit_event!=NULL) { flow_accumulator = 0; //iterador para fluxo total recebido por um cliente - de todos os servidores data_accumulator = 0; //iterador para total de dados recebidos - de todos os servidores cost_accumulator = 0; //iterador para total do custo das conexoes change_flow = 0; for(i=0;i<NUMBER_SERVERS;i++) { flow_accumulator += flow[i][j]; data_accumulator += data[i][j]; if(connections[i][j] == 1) cost_accumulator += Servers_connection_cost[j]; //se o fluxo alocado pelo servidor tiver mudado //isto eh, flow[][] (fluxo efetivo) passou a ter um valor diferente de own_flow[] (fluxo enxergado pelos clientes) //verificar se diferenca é perceptivel, caso seja, escalonar evento de mudanca de fluxo para respectivo cliente if( flow[i][j] != clients_vector[j].own_flow[i] ) { epsilon = flow[i][j]-clients_vector[j].own_flow[i]; if(epsilon < 0){ epsilon = -epsilon;} if( epsilon > (((float)VARIACAO)/100)*(clients_vector[j].own_flow[i]) ){ change_flow = 1;} } } if( cost_accumulator < 0 ) { printf("ERROR: %f ERROR_FLOW_ALLOCATION - NEGATIVE_COST %d %Lf\n", t, j, cost_accumulator); return 1; } if( (flow_accumulator < 0) || (flow_accumulator > (Clients_cap[j]-cost_accumulator)) ) { printf("ERROR: %f ERROR_TOTAL_FLOW_ALLOCATION %d %Lf %Lf\n", t, j, flow_accumulator, (Clients_cap[j]-cost_accumulator)); return 1; } if( (data_accumulator < 0) || (data_accumulator > FILE_SIZE) ) { printf("ERROR: %f ERROR_DATA_AMOUNT %d %Lf\n", t, j, data_accumulator); printf( "\n%f REMAINING EVENTS TO PROCESS %d\n", t, (*number_events)); listar(exits); listar(events); printf("\n"); printf( "CLIENT %d DATA_VECTOR:\n", j); for(k=0; k<NUMBER_SERVERS; k++) { printf( "DATA_AMOUNT_FROM_SERVER %d %Lf\n", k, data[k][j]); } printf("\nCLIENT %d FLOW_VECTOR:\n", j); for(k=0; k<NUMBER_SERVERS; k++) { printf( "FLOW_AMOUNT_FROM_SERVER %d %f\n", k, flow[k][j]); } //return 1; } if(DEBUG_LEVEL>3){ if(clients_vector[j].band_filled) printf("%f BAND_FILLED %d %Lf\n", t, j, flow_accumulator); if(!clients_vector[j].band_filled) printf("%f TOTAL_FLOW %d %Lf\n", t, j, flow_accumulator); printf("%f DATA_AMOUNT %d %Lf\n", t, j, data_accumulator); } //REESCALONAR EVENTO DE SAIDA BASEADO NO FLUXO ATUAL E DADOS JA RECEBIDOS //tempo estimado para fim do download //when = ((FILE_SIZE-data_accumulator)/flow_accumulator)+t; if( flow_accumulator != 0) { when = ((((long double)FILE_SIZE)-data_accumulator)/flow_accumulator)+t; //printf("%f ESTIMATED_TIME_EXIT %d %Lf\n", t, j, when); //if( ( ((when - t)*flow_accumulator)+data_accumulator ) > FILE_SIZE ) //{ printf("ERROR: %f ERROR_TIME_EVENT - EXIT_EVENT - TRUNKED %d exit: %f data_accumulator: %Lf flow_accumulator: %Lf \n", t, j, when, data_accumulator, flow_accumulator); return 1;} } else { when = INF;} if( when < t ) { printf("ERROR: %f ERROR_TIME_EVENT - EXIT_EVENT %d %f\n", t, j, when); return 1; } temp_event = clients_vector[j].exit_event; if(get_time(temp_event) != when) { //removendo antigo evento de saida remove_event(temp_event); //reinserindo recoloca(exits,CLIENT_EXIT_EVENT,when,0,j,temp_event); if(DEBUG_LEVEL>3) printf("%f REPUSH CLIENT_EXIT_EVENT %d %f\n", t, j, when); } //reescalona eventos de mudanca de fluxo no tempo t + 2 para clientes que tiveram alguma mudanca significativa e ainda nao possuem evento mudanca de fluxo escalonado if(change_flow && (clients_vector[j].flow_event_alloc==0)) { clients_vector[j].flow_event_alloc=1; temp_event = clients_vector[j].flow_event; // while(true) // { //gambiarra para que evento mudanca de fluxo nao ocorra antes de conexoes/desconexoes // pode ser usado para estabelecer limites do tempo que demora para percepcao do fluxo // ideia: estabelecer distancias entre servidores e clientes // estabelecer limites superiores e inferiores para o tempo necessario para perceber a mudanca de fluxo while(true) { // dadas as distancias dos servidores que tiveram mudancas significativas de fluxo exp_paramenter = (float)(1/(float)DELTA2); if(clients_vector[j].last_connection_scheduled > t) when = clients_vector[j].last_connection_scheduled+gera_aleatorio(exp_paramenter); if(clients_vector[j].last_connection_scheduled <= t) when = t+gera_aleatorio(exp_paramenter); //if( when > clients_vector[j].last_connection_scheduled ) break; //IDEIA: cliente ira perceber mudanca de fluxo depois de um tempo pelo menos FLOW_PERCEPTION vezes maior que "o maior tempo atual para estabelecer uma conexao" //if( (when-t) >= FLOW_PERCEPTION*(clients_vector[j].last_connection_scheduled - t) ) break; //} if( when < t ) { printf("ERROR: %f ERROR_TIME_EVENT - FLOW_EVENT %d %f\n", t, j, when); return 1; } busca_tempo (events, when, ant, pont); if(*pont==NULL) break; } recoloca(events,FLOW_EVENT,when,0,j,temp_event); if(DEBUG_LEVEL>3) printf( "%f PUSH FLOW_EVENT %d %f\n", t, j, when); (*number_events)++; } }//end if client exit != null } //tirando da lista evento que foi processado (temp_event_events) i = erase(events,temp_event_events); if( i == 1 ) return 1; (*number_events)--; return 0; }
bool Calendar<T>::remove_event(std::string desc, int day, int month) { return remove_event(desc, day, month, dp->year()); }