static void server_personalTask(http_request_t request, socket_t* client, db_t* base) { char salaryReq[10], salaryVal[10], yearReq[10], yearVal[10], toSend[10000]; char* str = strtok(request.uri, "?&="); str = strtok(NULL, "?&="); strcpy(salaryReq, str); // salary_m || salary_l str = strtok(NULL, "?&="); strcpy(salaryVal, str); // salary value str = strtok(NULL, "?&="); strcpy(yearReq, str); // year_m || year_l str = strtok(NULL, "?&="); strcpy(yearVal, str); // year value int count = db_countWorkers(base); worker_t** workers[count]; for(int i = 0; i<count; i++) workers[i] = worker_new(); str = db_personalTask(base, salaryReq, atoi(salaryVal), yearReq, atoi(yearVal), workers); sprintf(toSend, "HTTP/1.1 200 OK\n" "Content-Type: application/json\n" "Content-Length: %i\r\n\r\n" "\n%s", strlen(str)+1, str); for(int i = 0; i<count; i++) worker_free(workers[i]); socket_write_string(client, toSend); socket_close(client); }
void worker_cleanup(void) { LPWORKER lpWorker = NULL; /* WARNING: we can have a race condition here, if while this code is executed another worker is waiting to access hWorkersMutex, he will never be able to get it... */ if (hWorkersMutex != INVALID_HANDLE_VALUE) { WaitForSingleObject(hWorkersMutex, INFINITE); DEBUG_PRINT("Freeing global resource of workers"); /* Empty the queue of worker worker */ while (lpWorkers != NULL) { ReleaseMutex(hWorkersMutex); lpWorker = worker_pop(); DEBUG_PRINT("Freeing worker %x", lpWorker); WaitForSingleObject(hWorkersMutex, INFINITE); worker_free(lpWorker); }; ReleaseMutex(hWorkersMutex); /* Destroy associated mutex */ CloseHandle(hWorkersMutex); hWorkersMutex = INVALID_HANDLE_VALUE; }; }
void worker_push(LPWORKER lpWorker) { BOOL bFreeWorker; bFreeWorker = TRUE; WaitForSingleObject(hWorkersMutex, INFINITE); DEBUG_PRINT("Testing if we are under the maximum number of running workers"); if (list_length((LPLIST)lpWorkers) < THREAD_WORKERS_MAX) { DEBUG_PRINT("Saving this worker for future use"); DEBUG_PRINT("Next: %x", ((LPLIST)lpWorker)->lpNext); lpWorkers = (LPWORKER)list_concat((LPLIST)lpWorker, (LPLIST)lpWorkers); bFreeWorker = FALSE; }; nWorkersCurrent--; DEBUG_PRINT("Workers running current/runnning max/waiting: %d/%d/%d", nWorkersCurrent, nWorkersMax, list_length((LPLIST)lpWorkers)); ReleaseMutex(hWorkersMutex); if (bFreeWorker) { DEBUG_PRINT("Freeing worker %x", lpWorker); worker_free(lpWorker); } }
static char* server_getAllWorkersHTML(db_t* base) { char allOfTHem[10000] = ""; char one[1000]; int count = db_countWorkers(base); worker_t** workers[count]; for(int i = 0; i<count; i++) workers[i] = worker_new(); db_fillWorkerArr(base, workers); for(int i = 0; i < count; i++) { sprintf(one, " <p>Id: %i<br>" " Name: <a href=\"/workers/%i\">%s</a><br>" " <a href=\"/workers/delete/%i\">Free worker</a>" " <br><br></p>", worker_getId(workers[i]), worker_getId(workers[i]), worker_getName(workers[i]), worker_getId(workers[i])); strcat(allOfTHem, one); } strcat(allOfTHem, "<a href=\"/workers/new/\">New worker</a>"); for(int i = 0; i<count; i++) worker_free(workers[i]); return allOfTHem; }
static void server_getByIdHTML(http_request_t request, socket_t* client, db_t* base) { int id; char* getId = strpbrk(request.uri, "0123456789"); if(getId) { id = atoi(getId); if(id<=0 || !db_checkId(base, id)) { socket_write_string(client, "<h1>Wrong ID</h1><p><a href=\"/workers/\">All workers</a></p>"); return; } } else { server_notFound(client); return; } char toSend[2000]; char buffer[2000] = "<head><title>Worker</title></head><h1>Worker</h1><p><a href=\"/workers/\">All workers</a></p><p>"; worker_t* worker = db_getWorkerById(base, id); strcat(buffer, server_getWorkerHTML(worker)); strcat(buffer, "</p>"); sprintf(toSend, "HTTP/1.1 200 OK\n" "Content-Type: text/html\n" "Content-Length: %i\r\n\r\n" "\n%s", strlen(buffer), buffer); socket_write_string(client, toSend); worker_free(worker); socket_close(client); }
void db_free(struct db *db) { worker_free(db->worker); dir_free(db->log_dir); dir_free(db->index_dir); close(db->log_fd); free(db); }
/** Create new Worker */ wkr_t* worker_new(struct ev_loop *loop, wkr_tmp_t *tmp) { LOG_FUNCTION wkr_t* w = wr_malloc(wkr_t); assert(w!=NULL); w->req_fd = -1; w->listen_fd = -1; w->is_uds = tmp->is_uds; w->loop = loop; wr_string_null(w->sock_path); w->listen_port = 0; w->w_accept.active = 0; w->w_req.active = 0; w->http = NULL; w->tmp = tmp; assert(w->tmp!=NULL); w->env_var = NULL; w->ctl = wkr_ctl_new(w); assert(w->ctl!=NULL); if(connect_to_head(w) == FALSE){ worker_free(&w); return NULL; } start_ctl_watcher(w); /* if(w->tmp->is_static){ w->ctl->scgi = scgi_new(); load_application(w); } else */ if(send_config_req_msg(w) < 0){ worker_free(&w); return NULL; } // Connect to head controller UDS socket before user previliges get lowered. return w; }
gint slave_free(Slave* slave) { MAGIC_ASSERT(slave); gint returnCode = (slave->numPluginErrors > 0) ? -1 : 0; /* this launches delete on all the plugins and should be called before * the engine is marked "killed" and workers are destroyed. */ g_hash_table_destroy(slave->hosts); /* we will never execute inside the plugin again */ slave->forceShadowContext = TRUE; if(slave->topology) { topology_free(slave->topology); } if(slave->dns) { dns_free(slave->dns); } g_hash_table_destroy(slave->programs); g_mutex_clear(&(slave->lock)); g_mutex_clear(&(slave->pluginInitLock)); /* join and free spawned worker threads */ //TODO if(slave->cwdPath) { g_free(slave->cwdPath); } if(slave->dataPath) { g_free(slave->dataPath); } if(slave->hostsPath) { g_free(slave->hostsPath); } /* free main worker */ worker_free(slave->mainThreadWorker); MAGIC_CLEAR(slave); g_free(slave); return returnCode; }
/** * The worker thread command loop. * * @param[in] arg A pointer to the associated #worker. * @return NULL. **/ void* worker_thread (void *arg) { assert (arg != NULL); worker* wrk = (worker *) arg; for (;;) { struct kevent received; int ret = kevent (wrk->kq, wrk->sets.events, wrk->sets.length, &received, 1, NULL); if (ret == -1) { perror_msg ("kevent failed"); continue; } if (received.ident == wrk->io[KQUEUE_FD]) { if (received.flags & EV_EOF) { wrk->closed = 1; wrk->io[INOTIFY_FD] = -1; worker_erase (wrk); if (pthread_mutex_trylock (&wrk->mutex) == 0) { worker_free (wrk); pthread_mutex_unlock (&wrk->mutex); free (wrk); } /* If we could not lock on a worker, it means that an inotify * call (add_watch/rm_watch) has already locked it. In this * case worker will be freed by a caller (caller checks the * `closed' flag. */ return NULL; } else { process_command (wrk); } } else { produce_notifications (wrk, &received); } } return NULL; }
void server_free(server_t *server) { if (!server) return; int i; for (i = 0; i < server->receivers->size; i++) { receiver_free(array_at(server->receivers, i)); } for (i = 0; i < server->workers->size; i++) { worker_free(array_at(server->workers, i)); } conn_free(server->conn); array_free(server->endpoints); array_free(server->receivers); array_free(server->workers); free(server); }
void server_free(server *s) { int i; /* shutdown worker threads */ for(i=0; i<s->cfg->workers; i++) { worker_free(s->w[i]); } /* free */ event_del(s->signal); event_free(s->signal); event_base_free(s->base); close(s->fd); free(s->w); conf_free(s->cfg); log_free(s->log); free(s); }
static char* server_getAllWorkersJSON(db_t* base) { char allOfTHem[4000] = ""; int count = db_countWorkers(base); worker_t** workers[count]; for(int i = 0; i<count; i++) workers[i] = worker_new(); db_fillWorkerArr(base, workers); for(int i = 0; i < count; i++) { strcat(allOfTHem, worker_makeWorkerJSON(workers[i])); } for(int i = 0; i<count; i++) worker_free(workers[i]); return allOfTHem; }
static void server_getByIdJSON(http_request_t request, socket_t* client, db_t* base) { int id; char* getId = strpbrk(request.uri, "0123456789"); if(getId) { id = atoi(getId); if(id <= 0 || !db_checkId(base, id)) { socket_write_string(client, "Wrong ID"); return; } } else { server_notFound(client); return; } char buffer[1000] = ""; worker_t* worker = db_getWorkerById(base, id); char* workerJSON = worker_makeWorkerJSON(worker); if(workerJSON == NULL) { socket_write_string(client, "Wrong ID"); return; } sprintf(buffer, "HTTP/1.1 200 OK\n" "Content-Type: application/json\n" "Content-Length: %i\r\n\r\n" "\n%s", strlen(workerJSON)+1, workerJSON); socket_write_string(client, buffer); worker_free(worker); socket_close(client); }
static void server_post(http_request_t request, socket_t* client, db_t* base) { char buffer[2000] =""; char* name = http_request_getArg(&request, "name"); char* surname = http_request_getArg(&request, "surname"); char* salary = http_request_getArg(&request, "salary"); char* year = http_request_getArg(&request, "year"); if(strlen(name) <= 1 || strlen(surname) <= 1) { server_send(client, "Name/Surname wasn't filled in" "<p><a href=\"/workers/new/\">Back to POST</a></p>"); return; } if(isdigit(salary[0]) == 0 || isdigit(year[0])==0) { server_send(client, "Wrong data!" "<p><a href=\"/workers/new/\">Back to POST</a></p>"); return; } if(salary == " ") salary = "0"; if(year == " ") year = "0"; worker_t* worker = worker_new(); worker_fill(worker, -1, name, surname, atoi(salary), atoi(year)); db_insertWorker(base, worker); worker_free(worker); server_send(client, "Success" "<p><a href=\"/workers/\">All workers</a></p>"); }
LRESULT CALLBACK WndProc(HWND hwnd, UINT msg, WPARAM wParam, LPARAM lParam) { static HINSTANCE hInst; static HWND hButtonEx, hLable; static HWND hStaticIndS, hStaticNameS, hStaticSurnameS, hStaticExpS, hStaticSalarS; static HWND hStaticName, hStaticSurname, hStaticExp, hStaticSalar; static worker_t * worker[WORKERS_COUNT]; static int index; static char buffer[10]; static int tick; switch(msg) { case WM_CREATE: CreateWindowW( L"button", L"Timer", WS_CHILD|WS_VISIBLE|BS_CHECKBOX|BS_AUTOCHECKBOX, 90, 150, 130, 23, hwnd, (HMENU)TIMER_CB, NULL, NULL); hButtonEx = CreateWindowEx(0, WC_BUTTON, "Exit", WS_CHILD|WS_VISIBLE|WS_TABSTOP|BS_DEFPUSHBUTTON, 90, 190, 130, 23, hwnd, (HMENU)BUTTON_EX, hInst, NULL); hStaticIndS = CreateWindowEx(0, WC_STATIC, "Index:", WS_CHILD|WS_VISIBLE, 10, 20, 130, 23, hwnd, (HMENU)STATIC_INDEX_S, hInst, NULL); hStaticNameS = CreateWindowEx(0, WC_STATIC, "Name:", WS_CHILD|WS_VISIBLE, 10, 44, 130, 23, hwnd, (HMENU)STATIC_NAME_S, hInst, NULL); hStaticSurnameS = CreateWindowEx(0, WC_STATIC, "Surname:", WS_CHILD|WS_VISIBLE, 10, 68, 130, 23, hwnd, (HMENU)STATIC_SURNAME_S, hInst, NULL); hStaticExpS = CreateWindowEx(0, WC_STATIC, "Experience:", WS_CHILD|WS_VISIBLE, 10, 92, 130, 23, hwnd, (HMENU)STATIC_EXP_S, hInst, NULL); hStaticSalarS = CreateWindowEx(0, WC_STATIC, "Salary:", WS_CHILD|WS_VISIBLE, 10, 116, 130, 23, hwnd, (HMENU)STATIC_SALARY_S, hInst, NULL); index = rand() % WORKERS_COUNT; worker[0] = worker_new(0, "George", "Horn", 6, 6000); worker[1] = worker_new(1, "Emily", "Wiggins", 2, 2000); worker[2] = worker_new(2, "Claude", "Jefferson", 3, 3000); worker[3] = worker_new(3, "Ursula", "Miller", 1, 1000); worker[4] = worker_new(4, "Damian", "Fields", 4, 4500); tick = worker_getIndex(worker[index]); hLable = CreateWindowEx(0, "STATIC", itoa(worker_getIndex(worker[index]), buffer, 10), WS_CHILD|WS_VISIBLE, 150, 20, 130, 23, hwnd, (HMENU)LABLE_ID, hInst, NULL); int ret = SetTimer(hwnd, TIMER_CB, TIMER_TICK, NULL); if(ret == 0) MessageBox(hwnd, "Could not set timer", "ERROR", MB_OK|MB_ICONEXCLAMATION); hStaticName = CreateWindowEx(0, WC_STATIC, worker_getName(worker[index]), WS_CHILD|WS_VISIBLE, 150, 44, 130, 23, hwnd, (HMENU)STATIC_NAME, hInst, NULL); hStaticSurname = CreateWindowEx(0, WC_STATIC, worker_getSurname(worker[index]), WS_CHILD|WS_VISIBLE, 150, 68, 130, 23, hwnd, (HMENU)STATIC_SURNAME, hInst, NULL); hStaticExp = CreateWindowEx(0, WC_STATIC, itoa(worker_getExp(worker[index]), buffer, 10), WS_CHILD|WS_VISIBLE, 150, 92, 130, 23, hwnd, (HMENU)STATIC_EXP, hInst, NULL); hStaticSalar = CreateWindowEx(0, WC_STATIC, itoa(worker_getSalary(worker[index]), buffer, 10), WS_CHILD|WS_VISIBLE, 150, 116, 130, 23, hwnd, (HMENU)STATIC_SALARY, hInst, NULL); break; case WM_TIMER: { int checked = IsDlgButtonChecked(hwnd, TIMER_CB); if(checked) { tick++; sprintf(buffer, "%i", tick); HWND hIndex = GetDlgItem(hwnd, LABLE_ID); SendMessage(hIndex, WM_SETTEXT, (WPARAM)256, (LPARAM)buffer); } } break; case WM_COMMAND: { switch(LOWORD(wParam)) { case BUTTON_EX: { for(int i = 0; i<WORKERS_COUNT; i++) worker_free(worker[i]); DestroyWindow(hwnd); break; } } break; } case WM_CLOSE: for(int i = 0; i<WORKERS_COUNT; i++) worker_free(worker[i]); DestroyWindow(hwnd); break; case WM_DESTROY: PostQuitMessage(0); break; default: return DefWindowProc(hwnd, msg, wParam, lParam); } return 0; }
// Function responsible for the workers on current TM node. void *worker(void *ptr) { int my_rank = COMM_get_rank_id(); int task_id, j_id=0; // j_id = journal id for current thread. struct tm_thread_data *d = (struct tm_thread_data *) ptr; struct byte_array * task; struct result_node * result; uint64_t buffer; struct j_entry * entry; workerid = d->id; void* (*worker_new) (int, char **); worker_new = dlsym(d->handle, "spits_worker_new"); void (*execute_pit) (void *, struct byte_array *, struct byte_array *); execute_pit = dlsym(d->handle, "spits_worker_run"); void* (*worker_free) (void *); worker_free = dlsym(d->handle, "spits_worker_free"); void *user_data = worker_new ? worker_new(d->argc, d->argv) : NULL; if(TM_KEEP_JOURNAL > 0) { j_id = JOURNAL_get_id(d->dia, 'W'); } sem_wait (&d->tcount); // wait for the first task to arrive. while (d->running) { pthread_mutex_lock(&d->tlock); // Get a new task. cfifo_pop(&d->f, &task); pthread_mutex_unlock(&d->tlock); // Warn the Task Manager about the new space available. sem_post(&d->sem); byte_array_unpack64(task, &buffer); task_id = (int) buffer; debug("[worker] Received TASK %d", task_id); //_byte_array_pack64(task, (uint64_t) task_id); // Put it back, might use in execute_pit. result = (struct result_node *) malloc(sizeof(struct result_node)); byte_array_init(&result->ba, 10); byte_array_pack64(&result->ba, task_id); // Pack the ID in the result byte_array. byte_array_pack64(&result->ba, my_rank); if(TM_KEEP_JOURNAL > 0) { entry = JOURNAL_new_entry(d->dia, j_id); entry->action = 'P'; gettimeofday(&entry->start, NULL); } debug("[--WORKER] task: %d", task); debug("[--WORKER] &result->ba: %d", &result->ba); execute_pit(user_data, task, &result->ba); // Do the computation. if(TM_KEEP_JOURNAL > 0) { gettimeofday(&entry->end, NULL); } byte_array_free(task); // Free memory used in task and pointer. free(task); // For now, each pointer is allocated in master thread. debug("Appending task %d.", task_id); pthread_mutex_lock(&d->rlock); // Pack the result to send it later. result->next = d->results; result->before = NULL; result->task_id = task_id; if(d->results != NULL) { d->results->before = result; } d->results = result; if(d->is_blocking_flush==1) { if(TM_NO_WAIT_FINAL_FLUSH > 0) { sem_post(&d->no_wait_sem); } else { d->bf_remaining_tasks--; if(d->bf_remaining_tasks==0) { pthread_mutex_unlock(&d->bf_mutex); } } } pthread_mutex_unlock(&d->rlock); sem_wait (&d->tcount); // wait for the next task to arrive. } if (worker_free) { worker_free(user_data); } //free(result); pthread_exit(NULL); }
static int worker_server(worker *info) { /* server worker */ int nfds, fd, i; worker *w; int num = info->num; master_server *master_srv = info->master_srv; /* attach the shared mem */ int shmid; key_t key = master_srv->pid + num; if ((shmid = shmget(key, sizeof(worker), 0666)) < 0) { perror ("ERROR shmget"); exit (1); } /* attach it */ if ((w = shmat(shmid, NULL, 0)) == (char*) -1) { perror ("ERROR shmat"); exit (1); } /* process id */ w->pid = getpid(); /* worker process started */ printf (" * Worker process #%d is started.\n", num+1); /* pre-setup worker connections */ w->conns = connection_setup(master_srv); /* create a new event handler for this worker */ event_handler *ev_handler = events_create(master_srv->config->max_clients); /* share the event fd */ w->ev_handler.fd = ev_handler->fd; /* starting keep-alive clean-up thread */ printf (" * Starting keep-alive clean-up thread for worker #%d.\n", num+1); pthread_t thread_keep_alive; int rc_cleanup = pthread_create(&thread_keep_alive, NULL, worker_keep_alive_cleanup, w); /* starting heartbeat thread */ printf (" * Starting heartbeat thread for worker #%d.\n", num+1); pthread_t thread_heartbeat; int rc_heartbeat = pthread_create(&thread_heartbeat, NULL, worker_heartbeat, w); /* entering main loop... */ while (master_srv->running) { /* check for new data */ if ((nfds = events_wait(ev_handler, master_srv)) == -1) { perror ("ERROR epoll_pwait"); } for (i = 0; i < nfds; ++i) { /* data received */ fd = events_get_fd(ev_handler, i); connection *conn = w->conns[fd]; if (events_closed(ev_handler, i)) { /* the other end closed the connection */ conn->status = CONN_INACTIVE; printf (" * Other end closed the connection\n"); } else if (conn->status == CONN_INACTIVE) { /* this connection is inactive, initiate a new connection */ connection_start (master_srv, conn); } connection_handle (w, conn); /* closing */ if (conn->status == CONN_INACTIVE) { if (events_del_event(ev_handler, fd) == -1) { perror ("ERROR events_del_event"); } close (fd); } } } printf (" * Shutting down worker process #%d...\n", num+1); /* free event handler */ events_free (ev_handler, master_srv->config->max_clients); /* TODO: free all connections */ free (w->conns); /* free this workers memory */ worker_free (w); exit (0); }