void process_entry(t_sh_token *token, t_envp *envp) { t_int32 ret; t_mysh_er er; t_struct_linker job_list; init_linker(&job_list); while (token) { er = 0; if (token->up && token->flag & P_SEPARATOR_L) { if ((ret = check_if_builtin(token->up->str)) >= 0) { if (test_job(&job_list, token->flag)) er |= msh_builtin(&job_list, ret, token, envp); } else if ((ret = get_exec_path(envp, token))) er |= ret; else if (token->flag & P_SCOL_F || test_job(&job_list, token->flag)) er |= msh_exec(&job_list, token, envp); } msh_error(er, token->up); token = token->next; } free_jobs((t_job *)job_list.first); }
disque::~disque() { free_nodes(); if (job_) delete job_; free_jobs(); }
void free_queues(queue_info **qarr, char free_jobs_too) { int i; if (qarr == NULL) return; for (i = 0; qarr[i] != NULL; i++) { if (free_jobs_too) free_jobs(qarr[i] -> jobs); free_queue_info(qarr[i]); } free(qarr); }
void xml_loadsave_jobs_read(const gchar *dirname, const gchar *basename) { #ifdef DEBUG printf("xml_loadsave_jobs_read\n"); #endif GMarkupParser parser = {xml_loadsave_jobs_start_element, xml_loadsave_jobs_end_element, xml_loadsave_jobs_text, NULL, NULL}; GMarkupParseContext *context; gchar *file_contents; gsize length; GError *error = NULL; gchar file[SMALL]; free_jobs(TRUE); sprintf(file, "%s%s%s___job_teams.xml", dirname, G_DIR_SEPARATOR_S, basename); xml_loadsave_teams_read(file, job_teams); sprintf(file, "%s%s%s___jobs.xml", dirname, G_DIR_SEPARATOR_S, basename); context = g_markup_parse_context_new(&parser, 0, NULL, NULL); if(!g_file_get_contents(file, &file_contents, &length, &error)) { debug_print_message("xml_loadsave_jobs_read: error reading file %s\n", file); misc_print_error(&error, TRUE); } if(g_markup_parse_context_parse(context, file_contents, length, &error)) { g_markup_parse_context_end_parse(context, NULL); g_markup_parse_context_free(context); g_free(file_contents); } else { debug_print_message("xml_loadsave_jobs_read: error parsing file %s\n", file); misc_print_error(&error, TRUE); } }
/** Free all memory allocated by the program. This mainly means we have to free a lot of strings and GArrays. */ void free_memory(void) { #ifdef DEBUG printf("free_memory\n"); #endif free_variables(); free_names(FALSE); free_transfer_list(); free_strategies(); free_country(&country, FALSE); free_users(FALSE); free_bets(FALSE); free_lg_commentary(FALSE); free_news(FALSE); free_newspaper(FALSE); free_support_dirs(); free_jobs(FALSE); free_g_array(&live_games); }
/** Find out whether the user's application for the job is accepted and show the appropriate popups. @return TRUE if accepted, FALSE otherwise. */ gboolean misc2_callback_evaluate_job_application(Job *job, User *user) { #ifdef DEBUG printf("misc2_callback_evaluate_job_application\n"); #endif if(!query_job_application_successful(job, user)) { game_gui_show_warning( _("The owners of %s politely reject your application. You're not successful enough in their eyes."), job_get_team(job)->name); return FALSE; } if(job->type != JOB_TYPE_NATIONAL) { game_gui_show_warning( /* A lame duck is someone who will quit his job soon and thus doesn't have a lot of influence/impact anymore, e.g. an American president during the last 2 years of his second presidency. */ _("The owners of %s accept your application. Since %s don't want to get stuck with a lame duck, you get fired instantly and spend the rest of the current season tending your garden."), job_get_team(job)->name, user->tm->name); job_change_country(job); } else game_gui_show_warning( _("The owners of %s accept your application."), job_get_team(job)->name); user_change_team(user, team_of_id(job->team_id)); if(job->type == JOB_TYPE_NATIONAL) job_remove(job, TRUE); else free_jobs(TRUE); return TRUE; }
const std::vector<disque_job*>* disque::get_jobs(const char* name) { const redis_result* result = run(); if (result == NULL) return NULL; if (result->get_type() != REDIS_RESULT_ARRAY) return NULL; size_t n; const redis_result**children = result->get_children(&n); if (children == NULL || n == 0) return NULL; free_jobs(); string buf; for (size_t i = 0; i < n; i++) { const redis_result* rr = children[i]; if (rr->get_type() != REDIS_RESULT_ARRAY) continue; size_t k; const redis_result** jobs = rr->get_children(&k); if (jobs == NULL) continue; if (name == NULL) { if (k < 3) continue; disque_job* job = new disque_job; jobs_.push_back(job); jobs[0]->argv_to_string(buf); job->set_queue(buf.c_str()); buf.clear(); jobs[1]->argv_to_string(buf); job->set_id(buf.c_str()); buf.clear(); jobs[2]->argv_to_string(buf); job->set_body(buf.c_str(), buf.length()); buf.clear(); } else { disque_job* job = new disque_job; jobs_.push_back(job); job->set_queue(name); jobs[0]->argv_to_string(buf); job->set_id(buf.c_str()); buf.clear(); jobs[1]->argv_to_string(buf); job->set_body(buf.c_str(), buf.length()); buf.clear(); } } return &jobs_; }