/*扫描模块的定义*/ void module_parse_def(int * pos) { TokenInfo t_k; token_Get(pos,&t_k); /*更新模块名*/ set_context(t_k.content); token_Get(pos,&t_k); int brace=-1; /*第二遍扫描,扫描所有函数以及类的定义*/ { do { token_Get(pos,&t_k); switch(t_k.type) { case TOKEN_TYPE_EOF: break; /*发现函数的定义符号,定义函数*/ case TOKEN_TYPE_FUNC_DEF: func_ParseDef(pos); break; case TOKEN_TYPE_USING: module_parse_using(pos); break; /*发现类的定义*/ case TOKEN_TYPE_STRUCT: struct_ParseDefine(pos);/*解析类*/ break; case TOKEN_TYPE_LEFT_BRACE: brace--; break; case TOKEN_TYPE_RIGHT_BRACE: brace++; if(brace==0) { t_k.type=TOKEN_TYPE_EOF; } break; default : printf("the token %d is unknown %s\n",t_k.type,t_k.content); printf("error !!\n"); exit(0); break; } } while(t_k.type!=TOKEN_TYPE_EOF); } /*将当前模块重置为全局*/ set_context("\0"); }
static int vdadec_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { VDADecoderContext *ctx = avctx->priv_data; AVFrame *pic = data; int ret; set_context(avctx); ret = ff_h264_decoder.decode(avctx, data, got_frame, avpkt); restore_context(avctx); if (*got_frame) { AVBufferRef *buffer = pic->buf[0]; VDABufferContext *context = av_buffer_get_opaque(buffer); CVPixelBufferRef cv_buffer = (CVPixelBufferRef)pic->data[3]; CVPixelBufferRetain(cv_buffer); CVPixelBufferLockBaseAddress(cv_buffer, 0); context->cv_buffer = cv_buffer; pic->format = ctx->pix_fmt; if (CVPixelBufferIsPlanar(cv_buffer)) { int i, count = CVPixelBufferGetPlaneCount(cv_buffer); av_assert0(count < 4); for (i = 0; i < count; i++) { pic->data[i] = CVPixelBufferGetBaseAddressOfPlane(cv_buffer, i); pic->linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(cv_buffer, i); } } else { pic->data[0] = CVPixelBufferGetBaseAddress(cv_buffer); pic->linesize[0] = CVPixelBufferGetBytesPerRow(cv_buffer); } } avctx->pix_fmt = ctx->pix_fmt; return ret; }
int lib$show_timer (void * handle_address, int * code, int (*user_action_procedure)(), unsigned long user_argument_value) { long * context = handle_address; int status; long * mycontext = 0; if (*context == -1) mycontext = &timer_context[0]; if (*context == 0) mycontext = *context = malloc (5 * sizeof(long)); long new_context[5]; memset (&new_context[0], 0, 5 * sizeof(long)); set_context(&new_context[0]); char c[256]; sprintf(c, "ELAPSED: %d CPU: %d BUFIO: %d DIRIO: %d FAULTS: %d", new_context[0]-mycontext[0], new_context[0]-mycontext[0], new_context[1]-mycontext[1], new_context[2]-mycontext[2], new_context[3]-mycontext[3], new_context[4]-mycontext[4]); // pretty-print later struct dsc$descriptor d; struct dsc$descriptor * out_str = &d; out_str->dsc$a_pointer = c; out_str->dsc$w_length = strlen(c); if (user_action_procedure) user_action_procedure (out_str, user_argument_value); else printf("%s\n", out_str->dsc$a_pointer); return SS$_NORMAL; }
/*扫描模块*/ void module_parse_declare(int * pos) { TokenInfo t_k; token_Get(pos,&t_k); /*更新模块名*/ set_context(t_k.content); /*跳过左边的花括号*/ token_Get(pos,&t_k); int brace=-1; { /*扫描模块内部的声明*/ do { token_Get(pos,&t_k); switch(t_k.type) { case TOKEN_TYPE_EOF: break; case TOKEN_TYPE_LEFT_BRACE: brace--; break; case TOKEN_TYPE_RIGHT_BRACE: brace++; if(brace==0) { t_k.type=TOKEN_TYPE_EOF; } break; /*发现函数的定义符号,声明函数*/ case TOKEN_TYPE_FUNC_DEF: func_ParseDeclare(pos); break; /*发现类的定义,声明类*/ case TOKEN_TYPE_STRUCT: struct_ParseDeclare(pos); token_SkipBlock(pos);/*略过类定义块*/ break; default : break; } } while(t_k.type!=TOKEN_TYPE_EOF); } /*将当前模块重置为全局*/ set_context("\0"); }
/*! \brief Constructor. \param[in] Name String containing the name of the Plugin. \param[in] DeleteMode PluginDeletModeEnum specifying the delete mode to be used for the Plugin. \param[in] context Pointer to the runtime context. \param[in] lib Pointer to the DynamicLibrary used to load the Plugin. */ dmz::PluginInfo::PluginInfo ( const String &Name, const PluginDeleteModeEnum DeleteMode, RuntimeContext *context, DynamicLibrary *lib) : _state (*(new State (Name, DeleteMode, context))) { set_context (context); set_dynamic_library (lib); }
int lib$init_timer (long * context) { long * mycontext = 0; if (*context == -1) mycontext = &timer_context[0]; if (*context == 0) mycontext = *context = malloc (5 * sizeof(long)); memset (mycontext, 0, 5 * sizeof(long)); set_context (mycontext); return SS$_NORMAL; }
int main (int argc, char * argv[]) { //#define CHECK_CDROM 1 #ifdef CHECK_CDROM SDL_Init(SDL_INIT_CDROM); std::cout << SDL_CDNumDrives() << " CD- / DVD-ROM drives available." << std::endl; for (int i = 0; i < SDL_CDNumDrives(); i++ ) { std::cout << i << ". " << SDL_CDName(i) << std::endl; } #else SDL_Init(0); #endif while (!app_exit) { SDL_InitSubSystem(SDL_INIT_VIDEO); SDL_Surface * screen; screen = NULL; set_context(screen, context_config[0], // w context_config[1], // h context_config[2], // fullscreen context_config[3], // framebuffer size context_config[4], // multisamples context_config[5], // multisample buffer context_config[6], // bpp context_config[7] // depthbuffer size ); Application app; app.initialize(context_config); if (argc > 1) { app.load_scene(scene_config, argv[1]); } else { app.load_scene(scene_config, "testFile.bullet"); } app.run(&app_exit); app.close_scene(); app.close(); SDL_FreeSurface(screen); screen = NULL; SDL_QuitSubSystem(SDL_INIT_VIDEO); } SDL_Quit(); std::cout << "main() quit" << std::endl; return 0; }
static int set_context_from_socket( const struct service_config *scp, int fd ) { security_context_t curr_context = NULL; security_context_t peer_context = NULL; security_context_t exec_context = NULL; context_t bcon = NULL; context_t pcon = NULL; security_context_t new_context = NULL; security_context_t new_exec_context = NULL; int retval = -1; const char *exepath = NULL; if (getcon(&curr_context) < 0) goto fail; if (getpeercon(fd, &peer_context) < 0) goto fail; exepath = SC_SERVER_ARGV( scp )[0]; if (getfilecon(exepath, &exec_context) < 0) goto fail; if (!(bcon = context_new(curr_context))) goto fail; if (!(pcon = context_new(peer_context))) goto fail; if (!context_range_get(pcon)) goto fail; if (context_range_set(bcon, context_range_get(pcon))) goto fail; if (!(new_context = context_str(bcon))) goto fail; if (security_compute_create(new_context, exec_context, SECCLASS_PROCESS, &new_exec_context) < 0) goto fail; retval = set_context(new_exec_context); freecon(new_exec_context); fail: context_free(pcon); context_free(bcon); freecon(exec_context); freecon(peer_context); freecon(curr_context); return retval; }
void test_gzip() { std::cout << "- Page Gzip" << std::endl; set_context(true); cache().clear(); TEST(request().getenv("HTTP_ACCEPT_ENCODING") == "gzip, deflate"); cache().fetch_page("test"); response().out() << "gzip"; cache().store_page("test"); TEST(str().substr(0,2)=="\x1f\x8b"); TEST(gzip_); set_context(false); TEST(request().getenv("HTTP_ACCEPT_ENCODING") == ""); TEST(cache_size() == 1); TEST(cache().fetch_page("test") ==false); response().out() << "gzip"; cache().store_page("test"); TEST(str() == "gzip"); TEST(!gzip_); set_context(false); TEST(cache().fetch_page("test")); TEST(str()=="gzip"); TEST(!gzip_); set_context(true); TEST(cache().fetch_page("test")); TEST(str().substr(0,2)=="\x1f\x8b"); TEST(gzip_); set_context(false); TEST(cache_size()==2); cache().clear(); TEST(cache_size()==0); release_context(); }
static av_cold int vdadec_close(AVCodecContext *avctx) { VDADecoderContext *ctx = avctx->priv_data; /* release buffers and decoder */ ff_vda_destroy_decoder(&ctx->vda_ctx); /* close H.264 decoder */ if (ctx->h264_initialized) { set_context(avctx); ff_h264_decoder.close(avctx); restore_context(avctx); } return 0; }
int acl_init(char *x509, char *addr, uint16_t port) { journal_ftrace(__func__); acl_x509 = strdup(x509); set_context(acl_x509); acl_addr = strdup(addr); acl_port = port; event_register(EVENT_EXIT, "acl_fini", acl_fini, PRIO_AGNOSTIC); return 0; }
void test_basic() { std::cout << "- Page Basic" << std::endl; set_context(false); TEST(cache().has_cache()); TEST(!cache().fetch_page("test")); response().out() << "test"; cache().add_trigger("x"); cache().store_page("test"); TEST(str()=="test"); set_context(false); TEST(cache_size()==1); TEST(cache().fetch_page("test")); TEST(str()=="test"); set_context(false); cache().rise("x"); TEST(cache_size() == 0); TEST(!cache().fetch_page("test")); cache().add_trigger("x"); response().out() << "test2"; cache().reset(); // reset works cache().store_page("test",2); TEST(str()=="test2"); set_context(false); cache().rise("x"); TEST(cache_size() == 1); TEST(cache().fetch_page("test")); TEST(str() == "test2"); booster::ptime::millisleep(3000); set_context(false); TEST(cache().fetch_page("test")==false); cache().clear(); release_context(); }
void Planet::init() { set_context(&worker_); // build application methods // /.inspect adopt(new TMethod<Planet, &Planet::inspect>(this, Url(INSPECT_URL).name(), StringIO("url", "Returns some information on the state of a node."))); // /class classes_ = adopt(new ClassFinder(Url(CLASS_URL).name(), DEFAULT_OBJECTS_LIB_PATH)); // /rubyk Object *rubyk = adopt(new Object(Url(RUBYK_URL).name())); // /rubyk/link [[["","source url"],["", "target url"]], "Create a link between two urls."] rubyk->adopt(new TMethod<Planet, &Planet::link>(this, Url(LINK_URL).name(), JsonValue("[['','', ''],'url','op','url','Update a link between the two provided urls. Operations are '=>' (link) '||' (unlink) or '?' (pending).']"))); // /rubyk/quit rubyk->adopt(new TMethod<Planet, &Planet::quit>(this, Url(QUIT_URL).name(), NilIO("Stop all operations and quit."))); }
int bridge_init(char *x509, char *addr, uint16_t port) { journal_ftrace(__func__); bridge_x509 = strdup(x509); bridge_addr = strdup(addr); bridge_port = port; if (set_context(bridge_x509)) { journal_notice("bridge]> set_context failed :: %s:%i\n", __FILE__, __LINE__); return -1; } event_register(EVENT_EXIT, "bridge_fini", bridge_fini, PRIO_AGNOSTIC); return 0; }
enum thing layer_to_board_object_type(struct world *mzx_world) { int dialog_result; struct element *elements[3]; struct dialog di; int object_type = 0; const char *radio_button_strings[] = { "Custom Block", "Custom Floor", "Text" }; // Prevent previous keys from carrying through. force_release_all_keys(); set_context(CTX_BLOCK_TYPE); elements[0] = construct_radio_button(6, 4, radio_button_strings, 3, 12, &object_type); elements[1] = construct_button(5, 11, "OK", 0); elements[2] = construct_button(15, 11, "Cancel", -1); construct_dialog(&di, "Object type", 26, 4, 28, 14, elements, 3, 0); dialog_result = run_dialog(mzx_world, &di); destruct_dialog(&di); pop_context(); // Prevent UI keys from carrying through. force_release_all_keys(); if(dialog_result) return NO_ID; switch(object_type) { case 0: return CUSTOM_BLOCK; case 1: return CUSTOM_FLOOR; case 2: return __TEXT; } return NO_ID; }
int lib$stat_timer (int * code, long * value_argument, void * handle_address) { long * context = handle_address; int status; long * mycontext = 0; if (*context == -1) mycontext = &timer_context[0]; if (*context == 0) mycontext = *context = malloc (5 * sizeof(long)); long new_context[5]; memset (&new_context[0], 0, 5 * sizeof(long)); set_context(&new_context[0]); *value_argument = new_context[*code - 1]; return SS$_NORMAL; }
Audio :: Audio() { auto l = lock(); //alutInit(0, NULL); alutInitWithoutContext(0, NULL); m_pDevice = alcOpenDevice(NULL); if(not m_pDevice) throw std::runtime_error("failed to open OpenAL audio device"); m_pContext = alcCreateContext(m_pDevice, NULL); if(not m_pContext) alcCloseDevice(m_pDevice); try { set_context(); } catch(...) { alcDestroyContext(m_pContext); alcCloseDevice(m_pDevice); throw; } }
void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned long flags; local_irq_save(flags); /* If the process context we are swapping in has a different context * generation then we have it should get a new generation/pid */ if (unlikely(CTX_VERSION(next->context) != CTX_VERSION(next_mmu_context))) next->context = get_new_context(); /* Save the current pgd so the fast tlb handler can find it */ pgd_current = next->pgd; /* Set the current context */ set_context(next->context); local_irq_restore(flags); }
PortImpl::PortImpl(BufferFactory& bufs, NodeImpl* const node, const Raul::Symbol& name, uint32_t index, uint32_t poly, PortType type, const Atom& value, size_t buffer_size) : GraphObjectImpl(bufs.uris(), node, name) , _bufs(bufs) , _index(index) , _poly(poly) , _buffer_size(buffer_size) , _buffer_type(type) , _value(value) , _broadcast(false) , _set_by_user(false) , _last_broadcasted_value(value) , _context(Context::AUDIO) , _buffers(new Array<BufferFactory::Ref>(static_cast<size_t>(poly))) , _prepared_buffers(NULL) { _types.insert(type); assert(node != NULL); assert(_poly > 0); if (_buffer_size == 0) _buffer_size = bufs.default_buffer_size(type); const Ingen::Shared::LV2URIMap& uris = bufs.uris(); add_property(uris.rdf_type, type.uri()); set_property(uris.lv2_index, Atom((int32_t)index)); set_context(_context); if (type == PortType::EVENTS) _broadcast = true; // send activity blips }
image_node_renderer_t::image_node_renderer_t( const context_t& new_context) : render_done_( false) { set_context( new_context); }
static void __check_for_updates(struct world *mzx_world, struct config_info *conf) { int cur_host; char *update_host; bool try_next_host = true; bool ret = false; set_context(CTX_UPDATER); if(conf->update_host_count < 1) { error("No updater hosts defined! Aborting.", 1, 8, 0); goto err_out; } if(!swivel_current_dir(true)) goto err_out; for(cur_host = 0; (cur_host < conf->update_host_count) && try_next_host; cur_host++) { char **list_entries, buffer[LINE_BUF_LEN], *url_base, *value; struct manifest_entry *removed, *replaced, *added, *e; int i = 0, entries = 0, buf_len, result; char update_branch[LINE_BUF_LEN]; const char *version = VERSION; int list_entry_width = 0; enum host_status status; struct host *h = NULL; unsigned int retries; FILE *f; // Acid test: Can we write to this directory? f = fopen_unsafe(UPDATES_TXT, "w+b"); if(!f) { error("Failed to create \"" UPDATES_TXT "\". Check permissions.", 1, 8, 0); goto err_chdir; } update_host = conf->update_hosts[cur_host]; if(!reissue_connection(conf, &h, update_host)) goto err_host_destroy; for(retries = 0; retries < MAX_RETRIES; retries++) { // Grab the file containing the names of the current Stable and Unstable status = host_recv_file(h, "/" UPDATES_TXT, f, "text/plain"); rewind(f); if(status == HOST_SUCCESS) break; if(!reissue_connection(conf, &h, update_host)) goto err_host_destroy; } if(retries == MAX_RETRIES) { snprintf(widget_buf, WIDGET_BUF_LEN, "Failed to download \"" UPDATES_TXT "\" (err=%d).\n", status); widget_buf[WIDGET_BUF_LEN - 1] = 0; error(widget_buf, 1, 8, 0); goto err_host_destroy; } snprintf(update_branch, LINE_BUF_LEN, "Current-%s", conf->update_branch_pin); // Walk this list (of two, hopefully) while(true) { char *m = buffer, *key; value = NULL; // Grab a single line from the manifest if(!fgets(buffer, LINE_BUF_LEN, f)) break; key = strsep(&m, ":\n"); if(!key) break; value = strsep(&m, ":\n"); if(!value) break; if(strcmp(key, update_branch) == 0) break; } fclose(f); unlink(UPDATES_TXT); /* There was no "Current-XXX: Version" found; we cannot proceed with the * update because we cannot compute an update URL below. */ if(!value) { error("Failed to identify applicable update version.", 1, 8, 0); goto err_host_destroy; } /* There's likely to be a space prepended to the version number. * Skip it here. */ if(value[0] == ' ') value++; /* We found the latest update version, but we should check to see if that * matches the version we're already using. The user may choose to receive * "stability" updates for their current major version, or upgrade to the * newest one. */ if(strcmp(value, version) != 0) { struct element *elements[6]; struct dialog di; buf_len = snprintf(widget_buf, WIDGET_BUF_LEN, "A new major version is available (%s)", value); widget_buf[WIDGET_BUF_LEN - 1] = 0; elements[0] = construct_label((55 - buf_len) >> 1, 2, widget_buf); elements[1] = construct_label(2, 4, "You can continue to receive updates for the version\n" "installed (if available), or you can upgrade to the\n" "newest major version (recommended)."); elements[2] = construct_label(2, 8, "If you do not upgrade, this question will be asked\n" "again the next time you run the updater.\n"); elements[3] = construct_button(9, 11, "Upgrade", 0); elements[4] = construct_button(21, 11, "Update Old", 1); elements[5] = construct_button(36, 11, "Cancel", 2); construct_dialog(&di, "New Major Version", 11, 6, 55, 14, elements, 6, 3); result = run_dialog(mzx_world, &di); destruct_dialog(&di); // User pressed Escape, abort all updates if(result < 0 || result == 2) { try_next_host = false; goto err_host_destroy; } // User pressed Upgrade, use new major if(result == 0) version = value; } /* We can now compute a unique URL base for the updater. This will * be composed of a user-selected version and a static platform-archicture * name. */ url_base = cmalloc(LINE_BUF_LEN); snprintf(url_base, LINE_BUF_LEN, "/%s/" PLATFORM, version); debug("Update base URL: %s\n", url_base); /* The call to manifest_get_updates() destroys any existing manifest * file in this directory. Since we still allow user to abort after * this call, and downloading the updates may fail, we copy the * old manifest to a backup location and optionally restore it later. */ if(!backup_original_manifest()) { error("Failed to back up manifest. Check permissions.", 1, 8, 0); try_next_host = false; goto err_free_url_base; } for(retries = 0; retries < MAX_RETRIES; retries++) { bool m_ret; m_hide(); draw_window_box(3, 11, 76, 13, DI_MAIN, DI_DARK, DI_CORNER, 1, 1); write_string("Computing manifest deltas (added, replaced, deleted)..", 13, 12, DI_TEXT, 0); update_screen(); m_ret = manifest_get_updates(h, url_base, &removed, &replaced, &added); clear_screen(32, 7); m_show(); update_screen(); if(m_ret) break; if(!reissue_connection(conf, &h, update_host)) goto err_roll_back_manifest; } if(retries == MAX_RETRIES) { error("Failed to compute update manifests", 1, 8, 0); goto err_roll_back_manifest; } // At this point, we have a successful manifest, so we won't need another host try_next_host = false; if(!removed && !replaced && !added) { struct element *elements[3]; struct dialog di; elements[0] = construct_label(2, 2, "This client is already current."); elements[1] = construct_button(7, 4, "OK", 0); elements[2] = construct_button(13, 4, "Try next host", 1); construct_dialog(&di, "No Updates", 22, 9, 35, 6, elements, 3, 1); result = run_dialog(mzx_world, &di); destruct_dialog(&di); if((result == 1) && (cur_host < conf->update_host_count)) try_next_host = true; goto err_free_update_manifests; } for(e = removed; e; e = e->next, entries++) list_entry_width = MAX(list_entry_width, 2 + (int)strlen(e->name)+1+1); for(e = replaced; e; e = e->next, entries++) list_entry_width = MAX(list_entry_width, 2 + (int)strlen(e->name)+1+1); for(e = added; e; e = e->next, entries++) list_entry_width = MAX(list_entry_width, 2 + (int)strlen(e->name)+1+1); // We don't want the listbox to be too wide list_entry_width = MIN(list_entry_width, 60); list_entries = cmalloc(entries * sizeof(char *)); for(e = removed; e; e = e->next, i++) { list_entries[i] = cmalloc(list_entry_width); snprintf(list_entries[i], list_entry_width, "- %s", e->name); list_entries[i][list_entry_width - 1] = 0; } for(e = replaced; e; e = e->next, i++) { list_entries[i] = cmalloc(list_entry_width); snprintf(list_entries[i], list_entry_width, "* %s", e->name); list_entries[i][list_entry_width - 1] = 0; } for(e = added; e; e = e->next, i++) { list_entries[i] = cmalloc(list_entry_width); snprintf(list_entries[i], list_entry_width, "+ %s", e->name); list_entries[i][list_entry_width - 1] = 0; } draw_window_box(19, 1, 59, 4, DI_MAIN, DI_DARK, DI_CORNER, 1, 1); write_string(" Task Summary ", 33, 1, DI_TITLE, 0); write_string("ESC - Cancel [+] Add [-] Delete", 21, 2, DI_TEXT, 0); write_string("ENTER - Proceed [*] Replace ", 21, 3, DI_TEXT, 0); result = list_menu((const char **)list_entries, list_entry_width, NULL, 0, entries, ((80 - (list_entry_width + 9)) >> 1) + 1, 4); for(i = 0; i < entries; i++) free(list_entries[i]); free(list_entries); clear_screen(32, 7); update_screen(); if(result < 0) goto err_free_update_manifests; /* Defer deletions until we restart; any of these files may still be * in use by this (old) process. Reduce the number of entries by the * number of removed items for the progress meter below. */ for(e = removed; e; e = e->next, entries--) delete_hook(e->name); /* Since the operations for adding and replacing a file are identical, * we modify the replaced list and tack on the added list to the end. * * Either list may be NULL; in the case that `replaced' is NULL, simply * re-assign the `added' pointer. `added' being NULL has no effect. * * Later, we need only free the replaced list (see below). */ if(replaced) { for(e = replaced; e->next; e = e->next) ; e->next = added; } else replaced = added; cancel_update = false; host_set_callbacks(h, NULL, recv_cb, cancel_cb); i = 1; for(e = replaced; e; e = e->next, i++) { for(retries = 0; retries < MAX_RETRIES; retries++) { char name[72]; bool m_ret; if(!check_create_basedir(e->name)) goto err_free_delete_list; final_size = (long)e->size; m_hide(); snprintf(name, 72, "%s (%ldb) [%u/%u]", e->name, final_size, i, entries); meter(name, 0, final_size); update_screen(); m_ret = manifest_entry_download_replace(h, url_base, e, delete_hook); clear_screen(32, 7); m_show(); update_screen(); if(m_ret) break; if(cancel_update) { error("Download was cancelled; update aborted.", 1, 8, 0); goto err_free_delete_list; } if(!reissue_connection(conf, &h, update_host)) goto err_free_delete_list; host_set_callbacks(h, NULL, recv_cb, cancel_cb); } if(retries == MAX_RETRIES) { snprintf(widget_buf, WIDGET_BUF_LEN, "Failed to download \"%s\" (after %d attempts).", e->name, retries); widget_buf[WIDGET_BUF_LEN - 1] = 0; error(widget_buf, 1, 8, 0); goto err_free_delete_list; } } if(delete_list) { f = fopen_unsafe(DELETE_TXT, "wb"); if(!f) { error("Failed to create \"" DELETE_TXT "\". Check permissions.", 1, 8, 0); goto err_free_delete_list; } for(e = delete_list; e; e = e->next) { fprintf(f, "%08x%08x%08x%08x%08x%08x%08x%08x %lu %s\n", e->sha256[0], e->sha256[1], e->sha256[2], e->sha256[3], e->sha256[4], e->sha256[5], e->sha256[6], e->sha256[7], e->size, e->name); } fclose(f); } try_next_host = false; ret = true; err_free_delete_list: manifest_list_free(&delete_list); delete_list = delete_p = NULL; err_free_update_manifests: manifest_list_free(&removed); manifest_list_free(&replaced); err_roll_back_manifest: restore_original_manifest(ret); err_free_url_base: free(url_base); err_host_destroy: host_destroy(h); pop_context(); } //end host for loop err_chdir: swivel_current_dir_back(true); err_out: /* At this point we found updates and we successfully updated * to them. Reload the program with the original argv. */ if(ret) { const void *argv = process_argv; struct element *elements[2]; struct dialog di; elements[0] = construct_label(2, 2, "This client will now attempt to restart itself."); elements[1] = construct_button(23, 4, "OK", 0); construct_dialog(&di, "Update Successful", 14, 9, 51, 6, elements, 2, 1); run_dialog(mzx_world, &di); destruct_dialog(&di); execv(process_argv[0], argv); perror("execv"); error("Attempt to invoke self failed!", 1, 8, 0); return; } }
static void vdadec_flush(AVCodecContext *avctx) { set_context(avctx); ff_h264_decoder.flush(avctx); restore_context(avctx); }
static void fill_context(struct boot_context *context, struct ofw_dev *ofwdev) { int ndx; memset(context, 0, sizeof(*context)); set_context(initiatorname, "NAME", OBP_PARAM_ITNAME); snprintf(context->mac, sizeof(context->mac), "%02x:%02x:%02x:%02x:%02x:%02x", ofwdev->mac[0], ofwdev->mac[1], ofwdev->mac[2], ofwdev->mac[3], ofwdev->mac[4], ofwdev->mac[5]); /* * nic parameters */ for (ndx = 0; ndx < nic_count; ndx++) { if (!strcmp(niclist[ndx], ofwdev->dev_path)) { snprintf(context->iface, sizeof(context->iface), "eth%d", ndx); break; } } set_context(ipaddr, "IPADDR", OBP_PARAM_CIADDR); set_context(mask, "MASK", OBP_PARAM_SUBNET_MASK); /* * target parameters */ set_context(target_ipaddr, "IPADDR", OBP_PARAM_SIADDR); set_int_context(target_port, "PORT", OBP_PARAM_IPORT); set_context(lun, "LUN", OBP_PARAM_ILUN); set_context(targetname, "NAME", OBP_PARAM_INAME); set_context(isid, "ISID", OBP_PARAM_ISID); /* * chap stuff is always associated with the target */ set_context(chap_name, "CHAP_NAME", OBP_PARAM_ICHAPID); set_context(chap_password, "CHAP_PASSWORD", OBP_PARAM_ICHAPPW); set_context(chap_name_in, "CHAP_NAME_IN", OBP_PARAM_CHAPID); set_context(chap_password_in, "CHAP_PASSWORD_IN", OBP_PARAM_CHAPPW); }
void octree::load_kernels() { if (!devContext_flag) set_context(); //If we arive here we have aquired a device, configure parts of the code //Get the number of multiprocessors and compute number of //blocks to be used during the tree-walk nMultiProcessors = devContext.multiProcessorCount; const int blocksPerSM = getTreeWalkBlocksPerSM( this->getDevContext()->getComputeCapabilityMajor(), this->getDevContext()->getComputeCapabilityMinor()); nBlocksForTreeWalk = nMultiProcessors*blocksPerSM; std::string pathName; //AMUSE specific if(this->src_directory != NULL) { pathName.assign(this->src_directory); } else { //Strip the executable name, to get the path name std::string temp(execPath); int idx = (int)temp.find_last_of("/\\"); pathName.assign(temp.substr(0, idx+1)); } // load scan & sort kernels compactCount.setContext(devContext); exScanBlock.setContext(devContext); compactMove.setContext(devContext); splitMove.setContext(devContext); sortCount.setContext(devContext); sortMove.setContext(devContext); extractInt.setContext(devContext); reOrderKeysValues.setContext(devContext); convertKey64to96.setContext(devContext); extractKeyAndPerm.setContext(devContext); dataReorderR4.setContext(devContext); dataReorderF2.setContext(devContext); dataReorderI1.setContext(devContext); dataReorderCombined.setContext(devContext); #ifdef USE_CUDA compactCount.load_source("./scanKernels.ptx", pathName.c_str()); compactCount.create("compact_count", (const void*)&compact_count); exScanBlock.load_source("./scanKernels.ptx", pathName.c_str()); exScanBlock.create("exclusive_scan_block", (const void*)&exclusive_scan_block); compactMove.load_source("./scanKernels.ptx", pathName.c_str()); compactMove.create("compact_move", (const void*)&compact_move); splitMove.load_source("./scanKernels.ptx", pathName.c_str()); splitMove.create("split_move", (const void*)split_move); sortCount.load_source("./sortKernels.ptx", pathName.c_str()); sortCount.create("sort_count", (const void*)sort_count); sortMove.load_source("./sortKernels.ptx", pathName.c_str()); sortMove.create("sort_move_stage_key_value", (const void*)sort_move_stage_key_value); extractInt.load_source("./sortKernels.ptx", pathName.c_str()); extractInt.create("extractInt", (const void*)extractInt_kernel); reOrderKeysValues.load_source("./sortKernels.ptx", pathName.c_str()); reOrderKeysValues.create("reOrderKeysValues", (const void*)&reOrderKeysValues_kernel); extractKeyAndPerm.load_source("./sortKernels.ptx", pathName.c_str()); extractKeyAndPerm.create("extractKeyAndPerm", (const void*)&gpu_extractKeyAndPerm); convertKey64to96.load_source("./sortKernels.ptx", pathName.c_str()); convertKey64to96.create("convertKey64to96", (const void*)&gpu_convertKey64to96); dataReorderR4.load_source("./sortKernels.ptx", pathName.c_str()); // dataReorderR4.create("dataReorderR4"); dataReorderR4.create("dataReorderCombined4", (const void*)&dataReorderCombined4); dataReorderF2.load_source("./sortKernels.ptx", pathName.c_str()); dataReorderF2.create("dataReorderF2", (const void*)&gpu_dataReorderF2); dataReorderI1.load_source("./sortKernels.ptx", pathName.c_str()); dataReorderI1.create("dataReorderI1", (const void*)&gpu_dataReorderI1); dataReorderCombined.load_source("./sortKernels.ptx", pathName.c_str()); dataReorderCombined.create("dataReorderCombined", (const void*)&gpu_dataReorderCombined); #else compactCount.load_source("scanKernels.cl", "OpenCLKernels"); compactCount.create("compact_count"); exScanBlock.load_source("scanKernels.cl", "OpenCLKernels"); exScanBlock.create("exclusive_scan_block"); compactMove.load_source("scanKernels.cl", "OpenCLKernels"); compactMove.create("compact_move"); splitMove.load_source("scanKernels.cl", "OpenCLKernels"); splitMove.create("split_move"); #endif // load tree-build kernels /* set context */ build_key_list.setContext(devContext); build_valid_list.setContext(devContext); build_nodes.setContext(devContext); link_tree.setContext(devContext); define_groups.setContext(devContext); build_level_list.setContext(devContext); boundaryReduction.setContext(devContext); boundaryReductionGroups.setContext(devContext); build_body2group_list.setContext(devContext); store_groups.setContext(devContext); segmentedCoarseGroupBoundary.setContext(devContext); /* load kernels tree properties */ #ifdef USE_CUDA build_key_list.load_source("./build_tree.ptx", pathName.c_str()); build_valid_list.load_source("./build_tree.ptx", pathName.c_str()); build_nodes.load_source("./build_tree.ptx", pathName.c_str()); link_tree.load_source("./build_tree.ptx", pathName.c_str()); define_groups.load_source("./build_tree.ptx", pathName.c_str()); build_level_list.load_source("./build_tree.ptx", pathName.c_str()); boundaryReduction.load_source("./build_tree.ptx", pathName.c_str()); boundaryReductionGroups.load_source("./build_tree.ptx", pathName.c_str()); build_body2group_list.load_source("./build_tree.ptx", pathName.c_str()); store_groups.load_source("./build_tree.ptx", pathName.c_str()); segmentedCoarseGroupBoundary.load_source("./build_tree.ptx", pathName.c_str()); /* create kernels */ build_key_list.create("cl_build_key_list", (const void*)&cl_build_key_list); build_valid_list.create("cl_build_valid_list", (const void*)&cl_build_valid_list); build_nodes.create("cl_build_nodes", (const void*)&cl_build_nodes); link_tree.create("cl_link_tree", (const void*)&cl_link_tree); define_groups.create("build_group_list2", (const void*)&build_group_list2); build_level_list.create("build_level_list", (const void*)&gpu_build_level_list); boundaryReduction.create("boundaryReduction", (const void*)&gpu_boundaryReduction); boundaryReductionGroups.create("boundaryReductionGroups", (const void*)&gpu_boundaryReductionGroups); // build_body2group_list.create("build_body2group_list", (const void*)&gpu_build_body2group_list); store_groups.create("store_group_list", (const void*)&store_group_list); segmentedCoarseGroupBoundary.create("segmentedCoarseGroupBoundary", (const void*)&gpu_segmentedCoarseGroupBoundary); #else build_key_list.load_source("build_tree.cl", ""); build_valid_list.load_source("build_tree.cl", ""); build_nodes.load_source("build_tree.cl", ""); link_tree.load_source("build_tree.cl", ""); /* create kernels */ build_key_list.create("cl_build_key_list"); build_valid_list.create("cl_build_valid_list"); build_nodes.create("cl_build_nodes"); link_tree.create("cl_link_tree"); #endif // load tree-props kernels propsNonLeafD.setContext(devContext); propsLeafD.setContext(devContext); propsScalingD.setContext(devContext); setPHGroupData.setContext(devContext); setPHGroupDataGetKey.setContext(devContext); setPHGroupDataGetKey2.setContext(devContext); /* load kernels */ #ifdef USE_CUDA propsNonLeafD.load_source("./compute_propertiesD.ptx", pathName.c_str(), "", -1); propsLeafD.load_source("./compute_propertiesD.ptx", pathName.c_str(), "", -1); propsScalingD.load_source("./compute_propertiesD.ptx", pathName.c_str(), "",-1); setPHGroupData.load_source("./compute_propertiesD.ptx", pathName.c_str()); setPHGroupDataGetKey.load_source("./compute_propertiesD.ptx", pathName.c_str()); setPHGroupDataGetKey2.load_source("./compute_propertiesD.ptx", pathName.c_str()); /* create kernels */ propsNonLeafD.create("compute_non_leaf", (const void*)&compute_non_leaf); propsLeafD.create("compute_leaf", (const void*)&compute_leaf); propsScalingD.create("compute_scaling", (const void*)&compute_scaling); setPHGroupData.create("setPHGroupData", (const void*)&gpu_setPHGroupData); setPHGroupDataGetKey.create("setPHGroupDataGetKey", (const void*)&gpu_setPHGroupDataGetKey); setPHGroupDataGetKey2.create("setPHGroupDataGetKey2", (const void*)&gpu_setPHGroupDataGetKey2); #else propsNonLeaf.load_source("compProps.cl", ""); propsLeaf.load_source("compProps.cl", ""); propsScaling.load_source("compProps.cl", ""); /* create kernels */ propsNonLeaf.create("compute_non_leaf"); propsLeaf.create("compute_leaf"); propsScaling.create("compute_scaling"); #endif /* Tree iteration */ getTNext.setContext(devContext); predictParticles.setContext(devContext); getNActive.setContext(devContext); approxGrav.setContext(devContext); directGrav.setContext(devContext); correctParticles.setContext(devContext); computeDt.setContext(devContext); computeEnergy.setContext(devContext); setActiveGrps.setContext(devContext); distanceCheck.setContext(devContext); approxGravLET.setContext(devContext); determineLET.setContext(devContext); #ifdef USE_CUDA getTNext.load_source("./timestep.ptx", pathName.c_str(), "", -1); predictParticles.load_source("./timestep.ptx", pathName.c_str(), "", -1); getNActive.load_source("./timestep.ptx", pathName.c_str(), "", -1); approxGrav.load_source("./dev_approximate_gravity.ptx", pathName.c_str(), "", 64); directGrav.load_source("./dev_direct_gravity.ptx", pathName.c_str(), "", 64); correctParticles.load_source("./timestep.ptx", pathName.c_str(), "", -1); computeDt.load_source("./timestep.ptx", pathName.c_str(), "", -1); computeEnergy.load_source("./timestep.ptx", pathName.c_str(), "", -1); setActiveGrps.load_source("./timestep.ptx", pathName.c_str(), "", -1); distanceCheck.load_source("./timestep.ptx", pathName.c_str(), "", -1); approxGravLET.load_source("./dev_approximate_gravity.ptx", pathName.c_str(), "", 64); determineLET.load_source("./dev_approximate_gravity.ptx", pathName.c_str(), "", 64); /* create kernels */ getTNext.create("get_Tnext", (const void*)&get_Tnext); predictParticles.create("predict_particles", (const void*)&predict_particles); getNActive.create("get_nactive", (const void*)&get_nactive); approxGrav.create("dev_approximate_gravity", (const void*)&dev_approximate_gravity); #ifdef KEPLER /* preferL1 equal egaburov */ cudaFuncSetCacheConfig((const void*)&dev_approximate_gravity, cudaFuncCachePreferL1); cudaFuncSetCacheConfig((const void*)&dev_approximate_gravity_let, cudaFuncCachePreferL1); #if 0 #if 1 cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeFourByte); #else cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); #endif #endif #endif directGrav.create("dev_direct_gravity", (const void*)&dev_direct_gravity); correctParticles.create("correct_particles", (const void*)&correct_particles); computeDt.create("compute_dt", (const void*)&compute_dt); setActiveGrps.create("setActiveGroups", (const void*)&setActiveGroups); computeEnergy.create("compute_energy_double", (const void*)&compute_energy_double); distanceCheck.create("distanceCheck", (const void*)&distanceCheck); approxGravLET.create("dev_approximate_gravity_let", (const void*)&dev_approximate_gravity_let); #if 0 /* egaburov, doesn't compile with this */ determineLET.create("dev_determineLET", (const void*)&dev_determineLET); #endif #else getTNext.load_source("", ""); /* create kernels */ getTNext.create(""); #endif //Parallel kernels domainCheck.setContext(devContext); extractSampleParticles.setContext(devContext); extractOutOfDomainR4.setContext(devContext); extractOutOfDomainBody.setContext(devContext); insertNewParticles.setContext(devContext); internalMove.setContext(devContext); build_parallel_grps.setContext(devContext); segmentedSummaryBasic.setContext(devContext); domainCheckSFC.setContext(devContext); internalMoveSFC.setContext(devContext); internalMoveSFC2.setContext(devContext); extractOutOfDomainParticlesAdvancedSFC.setContext(devContext); extractOutOfDomainParticlesAdvancedSFC2.setContext(devContext); insertNewParticlesSFC.setContext(devContext); extractSampleParticlesSFC.setContext(devContext); domainCheckSFCAndAssign.setContext(devContext); #ifdef USE_CUDA domainCheck.load_source("./parallel.ptx", pathName.c_str()); extractSampleParticles.load_source("./parallel.ptx", pathName.c_str()); extractOutOfDomainR4.load_source("./parallel.ptx", pathName.c_str()); extractOutOfDomainBody.load_source("./parallel.ptx", pathName.c_str()); insertNewParticles.load_source("./parallel.ptx", pathName.c_str()); internalMove.load_source("./parallel.ptx", pathName.c_str()); build_parallel_grps.load_source("./build_tree.ptx", pathName.c_str()); segmentedSummaryBasic.load_source("./build_tree.ptx", pathName.c_str()); domainCheckSFC.load_source("./parallel.ptx", pathName.c_str()); internalMoveSFC.load_source("./parallel.ptx", pathName.c_str()); internalMoveSFC2.load_source("./parallel.ptx", pathName.c_str()); extractOutOfDomainParticlesAdvancedSFC.load_source("./parallel.ptx", pathName.c_str()); extractOutOfDomainParticlesAdvancedSFC2.load_source("./parallel.ptx", pathName.c_str()); insertNewParticlesSFC.load_source("./parallel.ptx", pathName.c_str()); extractSampleParticlesSFC.load_source("./parallel.ptx", pathName.c_str()); domainCheckSFCAndAssign.load_source("./parallel.ptx", pathName.c_str()); domainCheck.create("doDomainCheck", (const void*)&doDomainCheck); extractSampleParticles.create("extractSampleParticles", (const void*)&gpu_extractSampleParticles); extractOutOfDomainR4.create("extractOutOfDomainParticlesR4", (const void*)&extractOutOfDomainParticlesR4); extractOutOfDomainBody.create("extractOutOfDomainParticlesAdvanced", (const void*)&extractOutOfDomainParticlesAdvanced); insertNewParticles.create("insertNewParticles", (const void*)&gpu_insertNewParticles); internalMove.create("internalMove", (const void*)&gpu_internalMove); extractSampleParticlesSFC.create("build_parallel_grps", (const void*)&gpu_extractSampleParticlesSFC); build_parallel_grps.create("build_parallel_grps", (const void*)&gpu_build_parallel_grps); segmentedSummaryBasic.create("segmentedSummaryBasic", (const void*)&gpu_segmentedSummaryBasic); domainCheckSFC.create("domainCheckSFC", (const void*)&gpu_domainCheckSFC); internalMoveSFC.create("internalMoveSFC", (const void*)&gpu_internalMoveSFC); internalMoveSFC2.create("internalMoveSFC2", (const void*)&gpu_internalMoveSFC2); extractOutOfDomainParticlesAdvancedSFC.create("extractOutOfDomainParticlesAdvancedSFC", (const void*)&gpu_extractOutOfDomainParticlesAdvancedSFC); extractOutOfDomainParticlesAdvancedSFC2.create("extractOutOfDomainParticlesAdvancedSFC2", (const void*)&gpu_extractOutOfDomainParticlesAdvancedSFC2); insertNewParticlesSFC.create("insertNewParticlesSFC", (const void*)&gpu_insertNewParticlesSFC); domainCheckSFCAndAssign.create("domainCheckSFCAndAssign", (const void*)&gpu_domainCheckSFCAndAssign); #else #endif #ifdef USE_DUST define_dust_groups.setContext(devContext); define_dust_groups.load_source("./build_tree.ptx", pathName.c_str()); define_dust_groups.create("define_dust_groups"); store_dust_groups.setContext(devContext); store_dust_groups.load_source("./build_tree.ptx", pathName.c_str()); store_dust_groups.create("store_dust_groups"); predictDust.setContext(devContext); predictDust.load_source("./build_tree.ptx", pathName.c_str()); predictDust.create("predict_dust_particles"); correctDust.setContext(devContext); correctDust.load_source("./build_tree.ptx", pathName.c_str()); correctDust.create("correct_dust_particles"); #endif }
void test_objects() { std::cout << "- Data Object" << std::endl; set_context(false); { std::cout << "-- With Triggers Full API" << std::endl; { cache().reset(); cppcms::triggers_recorder tr(cache()); mydata d(1,2); std::set<std::string> t1,t2; t1.insert("k1"); t2.insert("k2"); cache().store_frame("foo","bar",t1); cache().store_data("dat",d,t2); TEST(tr.detach().size()==4); } { cache().reset(); cppcms::triggers_recorder tr(cache()); mydata d; std::string tmp; TEST(cache().fetch_frame("foo",tmp)); TEST(cache().fetch_data("dat",d)); TEST(d.x==1 && d.y==2); TEST(tmp=="bar"); std::set<std::string> tg=tr.detach(); TEST(tg.size()==4); TEST(tg.count("foo")==1); TEST(tg.count("dat")==1); TEST(tg.count("k1")==1); TEST(tg.count("k2")==1); } { cache().reset(); cppcms::triggers_recorder tr(cache()); mydata d; std::string tmp; TEST(cache().fetch_frame("foo",tmp,true)); TEST(cache().fetch_data("dat",d,true)); TEST(d.x==1 && d.y==2); TEST(tmp=="bar"); TEST(tr.detach().size()==0); } { cache().reset(); cppcms::triggers_recorder tr(cache()); mydata d(4,5); cache().store_frame("foo","baz",-1); cache().store_data("dat",d,-1); TEST(tr.detach().size()==2); } { cache().reset(); cppcms::triggers_recorder tr(cache()); mydata d; std::string tmp; TEST(cache().fetch_frame("foo",tmp)); TEST(cache().fetch_data("dat",d)); TEST(d.x==4 && d.y==5); TEST(tmp=="baz"); std::set<std::string> tg=tr.detach(); TEST(tg.size()==2); TEST(tg.count("foo")==1); TEST(tg.count("dat")==1); } } cache().clear(); { std::cout << "-- Without Triggers" << std::endl; { cache().reset(); cppcms::triggers_recorder tr(cache()); mydata d(1,2); cache().store_frame("foo","bar",std::set<std::string>(),-1,true); cache().store_data("dat",d,std::set<std::string>(),-1,true); TEST(tr.detach().size()==0); } { cache().reset(); cppcms::triggers_recorder tr(cache()); mydata d; std::string tmp; TEST(cache().fetch_frame("foo",tmp,true)); TEST(cache().fetch_data("dat",d,true)); TEST(d.x==1 && d.y==2); TEST(tmp=="bar"); TEST(tr.detach().size()==0); } { cache().reset(); cppcms::triggers_recorder tr(cache()); mydata d; std::string tmp; TEST(cache().fetch_frame("foo",tmp)); TEST(cache().fetch_data("dat",d)); TEST(d.x==1 && d.y==2); TEST(tmp=="bar"); TEST(tr.detach().size()==2); } { cache().reset(); cppcms::triggers_recorder tr(cache()); mydata d(4,5); cache().store_frame("foo","baz",-1,true); cache().store_data("dat",d,-1,true); TEST(tr.detach().size()==0); } { cache().reset(); cppcms::triggers_recorder tr(cache()); mydata d; std::string tmp; TEST(cache().fetch_frame("foo",tmp,true)); TEST(cache().fetch_data("dat",d,true)); TEST(d.x==4 && d.y==5); TEST(tmp=="baz"); TEST(tr.detach().size()==0); } } cache().clear(); { std::cout << "-- Timeouts" << std::endl; { cache().reset(); mydata d1(1,2); mydata d2(4,5); cache().store_frame("foo1","baz1",std::set<std::string>(),2,true); cache().store_data("dat1",d1,std::set<std::string>(),2,true); cache().store_frame("foo2","baz2",2,true); cache().store_data("dat2",d2,2,true); } { cache().reset(); mydata d1,d2; std::string t1,t2; TEST(cache().fetch_frame("foo1",t1,true)); TEST(cache().fetch_frame("foo2",t2,true)); TEST(cache().fetch_data("dat1",d1,true)); TEST(cache().fetch_data("dat2",d2,true)); TEST(d1.x==1 && d1.y==2); TEST(d2.x==4 && d2.y==5); TEST(t1=="baz1"); TEST(t2=="baz2"); } booster::ptime::millisleep(3000); { cache().reset(); mydata d1,d2; std::string t1,t2; TEST(!cache().fetch_frame("foo1",t1,true)); TEST(!cache().fetch_frame("foo2",t2,true)); TEST(!cache().fetch_data("dat1",d1,true)); TEST(!cache().fetch_data("dat2",d2,true)); } } }
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) { unsigned int id, cpu = smp_processor_id(); unsigned long *map; /* No lockless fast path .. yet */ atomic_spin_lock(&context_lock); #ifndef DEBUG_STEAL_ONLY pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n", cpu, next, next->context.active, next->context.id); #endif #ifdef CONFIG_SMP /* Mark us active and the previous one not anymore */ next->context.active++; if (prev) { #ifndef DEBUG_STEAL_ONLY pr_devel(" old context %p active was: %d\n", prev, prev->context.active); #endif WARN_ON(prev->context.active < 1); prev->context.active--; } again: #endif /* CONFIG_SMP */ /* If we already have a valid assigned context, skip all that */ id = next->context.id; if (likely(id != MMU_NO_CONTEXT)) goto ctxt_ok; /* We really don't have a context, let's try to acquire one */ id = next_context; if (id > last_context) id = first_context; map = context_map; /* No more free contexts, let's try to steal one */ if (nr_free_contexts == 0) { #ifdef CONFIG_SMP if (num_online_cpus() > 1) { id = steal_context_smp(id); if (id == MMU_NO_CONTEXT) goto again; goto stolen; } #endif /* CONFIG_SMP */ id = steal_context_up(id); goto stolen; } nr_free_contexts--; /* We know there's at least one free context, try to find it */ while (__test_and_set_bit(id, map)) { id = find_next_zero_bit(map, last_context+1, id); if (id > last_context) id = first_context; } stolen: next_context = id + 1; context_mm[id] = next; next->context.id = id; #ifndef DEBUG_STEAL_ONLY pr_devel("[%d] picked up new id %d, nrf is now %d\n", cpu, id, nr_free_contexts); #endif context_check_map(); ctxt_ok: /* If that context got marked stale on this CPU, then flush the * local TLB for it and unmark it before we use it */ if (test_bit(id, stale_map[cpu])) { pr_devel("[%d] flushing stale context %d for mm @%p !\n", cpu, id, next); local_flush_tlb_mm(next); /* XXX This clear should ultimately be part of local_flush_tlb_mm */ __clear_bit(id, stale_map[cpu]); } /* Flick the MMU and release lock */ set_context(id, next->pgd); atomic_spin_unlock(&context_lock); }
bool operator()(Context& ctx) { for(auto itFunc = functors_.begin(); itFunc != functors_.end(); itFunc++) itFunc->set_context(&ctx); return expression_.operator bool(); }
static av_cold int vdadec_init(AVCodecContext *avctx) { VDADecoderContext *ctx = avctx->priv_data; struct vda_context *vda_ctx = &ctx->vda_ctx; OSStatus status; int ret, i; ctx->h264_initialized = 0; /* init pix_fmts of codec */ if (!ff_h264_vda_decoder.pix_fmts) { if (kCFCoreFoundationVersionNumber < kCFCoreFoundationVersionNumber10_7) ff_h264_vda_decoder.pix_fmts = vda_pixfmts_prior_10_7; else ff_h264_vda_decoder.pix_fmts = vda_pixfmts; } /* init vda */ memset(vda_ctx, 0, sizeof(struct vda_context)); vda_ctx->width = avctx->width; vda_ctx->height = avctx->height; vda_ctx->format = 'avc1'; vda_ctx->use_sync_decoding = 1; vda_ctx->use_ref_buffer = 1; ctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts); switch (ctx->pix_fmt) { case AV_PIX_FMT_UYVY422: vda_ctx->cv_pix_fmt_type = '2vuy'; break; case AV_PIX_FMT_YUYV422: vda_ctx->cv_pix_fmt_type = 'yuvs'; break; case AV_PIX_FMT_NV12: vda_ctx->cv_pix_fmt_type = '420v'; break; case AV_PIX_FMT_YUV420P: vda_ctx->cv_pix_fmt_type = 'y420'; break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format: %d\n", avctx->pix_fmt); goto failed; } status = ff_vda_create_decoder(vda_ctx, avctx->extradata, avctx->extradata_size); if (status != kVDADecoderNoErr) { av_log(avctx, AV_LOG_ERROR, "Failed to init VDA decoder: %d.\n", status); goto failed; } /* init H.264 decoder */ set_context(avctx); ret = ff_h264_decoder.init(avctx); restore_context(avctx); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Failed to open H.264 decoder.\n"); goto failed; } ctx->h264_initialized = 1; for (i = 0; i < MAX_SPS_COUNT; i++) { const SPS *sps = (const SPS*)ctx->h264ctx.ps.sps_list[i]->data; if (sps && (sps->bit_depth_luma != 8 || sps->chroma_format_idc == 2 || sps->chroma_format_idc == 3)) { av_log(avctx, AV_LOG_ERROR, "Format is not supported.\n"); goto failed; } } return 0; failed: vdadec_close(avctx); return -1; }
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned int i, id, cpu = smp_processor_id(); unsigned long *map; /* No lockless fast path .. yet */ raw_spin_lock(&context_lock); pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", cpu, next, next->context.active, next->context.id); #ifdef CONFIG_SMP /* Mark us active and the previous one not anymore */ next->context.active++; if (prev) { pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); WARN_ON(prev->context.active < 1); prev->context.active--; } again: #endif /* CONFIG_SMP */ /* If we already have a valid assigned context, skip all that */ id = next->context.id; if (likely(id != MMU_NO_CONTEXT)) { #ifdef DEBUG_MAP_CONSISTENCY if (context_mm[id] != next) pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n", next, id, id, context_mm[id]); #endif goto ctxt_ok; } /* We really don't have a context, let's try to acquire one */ id = next_context; if (id > last_context) id = first_context; map = context_map; /* No more free contexts, let's try to steal one */ if (nr_free_contexts == 0) { #ifdef CONFIG_SMP if (num_online_cpus() > 1) { id = steal_context_smp(id); if (id == MMU_NO_CONTEXT) goto again; goto stolen; } #endif /* CONFIG_SMP */ if (no_selective_tlbil) id = steal_all_contexts(); else id = steal_context_up(id); goto stolen; } nr_free_contexts--; /* We know there's at least one free context, try to find it */ while (__test_and_set_bit(id, map)) { id = find_next_zero_bit(map, last_context+1, id); if (id > last_context) id = first_context; } stolen: next_context = id + 1; context_mm[id] = next; next->context.id = id; pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts); context_check_map(); ctxt_ok: /* If that context got marked stale on this CPU, then flush the * local TLB for it and unmark it before we use it */ if (test_bit(id, stale_map[cpu])) { pr_hardcont(" | stale flush %d [%d..%d]", id, cpu_first_thread_sibling(cpu), cpu_last_thread_sibling(cpu)); local_flush_tlb_mm(next); /* XXX This clear should ultimately be part of local_flush_tlb_mm */ for (i = cpu_first_thread_sibling(cpu); i <= cpu_last_thread_sibling(cpu); i++) { if (stale_map[i]) __clear_bit(id, stale_map[i]); } } /* Flick the MMU and release lock */ pr_hardcont(" -> %d\n", id); set_context(id, next->pgd); raw_spin_unlock(&context_lock); }
std::string operator()(Context& ctx) { for(auto itFunc = functors_.begin(); itFunc != functors_.end(); itFunc++) itFunc->set_context(&ctx); expression::result_type res = expression_.value(); return boost::lexical_cast<std::string>(res); }