void enable_module( unsigned int mod, bool onoff ) { if (mod < g_modules.size( )) { MODULE* module = g_modules[ mod ]; if (onoff) { if (!(get_module_state( mod ) & MOD_ENABLED)) { module->state = MOD_ENABLED; module->adapter->on_module( mod, MOD_EVENT_ENABLE, 0 ); core::event_modstate( mod, module->state ); } } else { if (get_module_state( mod ) & MOD_ENABLED) { module->state = MOD_DISABLED; module->adapter->on_module( mod, MOD_EVENT_DISABLE, 0 ); core::event_modstate( mod, module->state ); } } } }
static void trigger_delayed_lvm_update (UDisksDaemon *daemon) { UDisksLVM2State *state; state = get_module_state (daemon); if (udisks_lvm2_state_get_lvm_delayed_update_id (state) > 0) return; if (! udisks_lvm2_state_get_coldplug_done (state)) { /* Update immediately when doing coldplug, i.e. when lvm2 module has just * been activated. This is not 100% effective as this affects only the * first request but from the plugin nature we don't know whether * coldplugging has been finished or not. Might be subject to change in * the future. */ udisks_lvm2_state_set_coldplug_done (state, TRUE); lvm_update (daemon); } else { udisks_lvm2_state_set_lvm_delayed_update_id (state, g_timeout_add (100, delayed_lvm_update, daemon)); } }
static gboolean delayed_lvm_update (gpointer user_data) { UDisksDaemon *daemon = UDISKS_DAEMON (user_data); UDisksLVM2State *state; state = get_module_state (daemon); lvm_update (daemon); udisks_lvm2_state_set_lvm_delayed_update_id (state, 0); return FALSE; }
bool module_init(const module_t *module) { assert(metadata != NULL); assert(module != NULL); assert(get_module_state(module) == MODULE_STATE_NONE); if (!call_lifecycle_function(module->init)) { LOG_ERROR(LOG_TAG, "%s failed to initialize \"%s\"", __func__, module->name); return false; } set_module_state(module, MODULE_STATE_INITIALIZED); return true; }
void module_clean_up(const module_t *module) { assert(metadata != NULL); assert(module != NULL); module_state_t state = get_module_state(module); assert(state <= MODULE_STATE_INITIALIZED); // Only something to do if the module was actually initialized if (state < MODULE_STATE_INITIALIZED) return; if (!call_lifecycle_function(module->clean_up)) LOG_ERROR(LOG_TAG, "%s found \"%s\" reported failure during cleanup. Continuing anyway.", __func__, module->name); set_module_state(module, MODULE_STATE_NONE); }
bool module_start_up(const module_t *module) { assert(metadata != NULL); assert(module != NULL); // TODO(zachoverflow): remove module->init check once automagic order/call is in place. // This hack is here so modules which don't require init don't have to have useless calls // as we're converting the startup sequence. assert(get_module_state(module) == MODULE_STATE_INITIALIZED || module->init == NULL); if (!call_lifecycle_function(module->start_up)) { LOG_ERROR(LOG_TAG, "%s failed to start up \"%s\"", __func__, module->name); return false; } set_module_state(module, MODULE_STATE_STARTED); return true; }
void CodeGen_GPU_Host<CodeGen_CPU>::visit(const For *loop) { if (CodeGen_GPU_Dev::is_gpu_var(loop->name)) { // We're in the loop over innermost thread dimension debug(2) << "Kernel launch: " << loop->name << "\n"; ExtractBounds bounds; loop->accept(&bounds); debug(2) << "Kernel bounds: (" << bounds.num_threads[0] << ", " << bounds.num_threads[1] << ", " << bounds.num_threads[2] << ", " << bounds.num_threads[3] << ") threads, (" << bounds.num_blocks[0] << ", " << bounds.num_blocks[1] << ", " << bounds.num_blocks[2] << ", " << bounds.num_blocks[3] << ") blocks\n"; // compute a closure over the state passed into the kernel GPU_Host_Closure c(loop, loop->name); // compile the kernel string kernel_name = unique_name("kernel_" + loop->name, false); for (size_t i = 0; i < kernel_name.size(); i++) { if (!isalnum(kernel_name[i])) { kernel_name[i] = '_'; } } vector<GPU_Argument> closure_args = c.arguments(); for (size_t i = 0; i < closure_args.size(); i++) { if (closure_args[i].is_buffer && allocations.contains(closure_args[i].name)) { closure_args[i].size = allocations.get(closure_args[i].name).constant_bytes; } } cgdev->add_kernel(loop, kernel_name, closure_args); // get the actual name of the generated kernel for this loop kernel_name = cgdev->get_current_kernel_name(); debug(2) << "Compiled launch to kernel \"" << kernel_name << "\"\n"; Value *entry_name_str = builder->CreateGlobalStringPtr(kernel_name, "entry_name"); llvm::Type *target_size_t_type = (target.bits == 32) ? i32 : i64; // build the kernel arguments array llvm::PointerType *arg_t = i8->getPointerTo(); // void* int num_args = (int)closure_args.size(); // NULL-terminated list Value *gpu_args_arr = create_alloca_at_entry(ArrayType::get(arg_t, num_args+1), num_args+1, kernel_name + "_args"); // NULL-terminated list of size_t's Value *gpu_arg_sizes_arr = create_alloca_at_entry(ArrayType::get(target_size_t_type, num_args+1), num_args+1, kernel_name + "_arg_sizes"); for (int i = 0; i < num_args; i++) { // get the closure argument string name = closure_args[i].name; Value *val; if (closure_args[i].is_buffer) { // If it's a buffer, dereference the dev handle val = buffer_dev(sym_get(name + ".buffer")); } else { // Otherwise just look up the symbol val = sym_get(name); } // allocate stack space to mirror the closure element. It // might be in a register and we need a pointer to it for // the gpu args array. Value *ptr = builder->CreateAlloca(val->getType(), NULL, name+".stack"); // store the closure value into the stack space builder->CreateStore(val, ptr); // store a void* pointer to the argument into the gpu_args_arr Value *bits = builder->CreateBitCast(ptr, arg_t); builder->CreateStore(bits, builder->CreateConstGEP2_32(gpu_args_arr, 0, i)); // store the size of the argument int size_bits = (closure_args[i].is_buffer) ? target.bits : closure_args[i].type.bits; builder->CreateStore(ConstantInt::get(target_size_t_type, size_bits/8), builder->CreateConstGEP2_32(gpu_arg_sizes_arr, 0, i)); } // NULL-terminate the lists builder->CreateStore(ConstantPointerNull::get(arg_t), builder->CreateConstGEP2_32(gpu_args_arr, 0, num_args)); builder->CreateStore(ConstantInt::get(target_size_t_type, 0), builder->CreateConstGEP2_32(gpu_arg_sizes_arr, 0, num_args)); // TODO: only three dimensions can be passed to // cuLaunchKernel. How should we handle blkid[3]? internal_assert(is_one(bounds.num_threads[3]) && is_one(bounds.num_blocks[3])); Value *launch_args[] = { get_user_context(), builder->CreateLoad(get_module_state()), entry_name_str, codegen(bounds.num_blocks[0]), codegen(bounds.num_blocks[1]), codegen(bounds.num_blocks[2]), codegen(bounds.num_threads[0]), codegen(bounds.num_threads[1]), codegen(bounds.num_threads[2]), codegen(bounds.shared_mem_size), builder->CreateConstGEP2_32(gpu_arg_sizes_arr, 0, 0, "gpu_arg_sizes_ar_ref"), builder->CreateConstGEP2_32(gpu_args_arr, 0, 0, "gpu_args_arr_ref") }; llvm::Function *dev_run_fn = module->getFunction("halide_dev_run"); internal_assert(dev_run_fn) << "Could not find halide_dev_run in module\n"; Value *result = builder->CreateCall(dev_run_fn, launch_args); Value *did_succeed = builder->CreateICmpEQ(result, ConstantInt::get(i32, 0)); CodeGen_CPU::create_assertion(did_succeed, "Failure inside halide_dev_run"); } else { CodeGen_CPU::visit(loop); } }
void CodeGen_GPU_Host<CodeGen_CPU>::compile(Stmt stmt, string name, const vector<Argument> &args, const vector<Buffer> &images_to_embed) { init_module(); // also set up the child codegenerator - this is set up once per // PTX_Host::compile, and reused across multiple PTX_Dev::compile // invocations for different kernels. cgdev->init_module(); module = get_initial_module_for_target(target, context); // grab runtime helper functions // Fix the target triple debug(1) << "Target triple of initial module: " << module->getTargetTriple() << "\n"; llvm::Triple triple = CodeGen_CPU::get_target_triple(); module->setTargetTriple(triple.str()); debug(1) << "Target triple of initial module: " << module->getTargetTriple() << "\n"; // Pass to the generic codegen CodeGen::compile(stmt, name, args, images_to_embed); // Unset constant flag for embedded image global variables for (size_t i = 0; i < images_to_embed.size(); i++) { string name = images_to_embed[i].name(); GlobalVariable *global = module->getNamedGlobal(name + ".buffer"); global->setConstant(false); } std::vector<char> kernel_src = cgdev->compile_to_src(); Value *kernel_src_ptr = CodeGen_CPU::create_constant_binary_blob(kernel_src, "halide_kernel_src"); // Remember the entry block so we can branch to it upon init success. BasicBlock *entry = &function->getEntryBlock(); // Insert a new block to run initialization at the beginning of the function. BasicBlock *init_kernels_bb = BasicBlock::Create(*context, "init_kernels", function, entry); builder->SetInsertPoint(init_kernels_bb); Value *user_context = get_user_context(); Value *kernel_size = ConstantInt::get(i32, kernel_src.size()); Value *init = module->getFunction("halide_init_kernels"); internal_assert(init) << "Could not find function halide_init_kernels in initial module\n"; Value *result = builder->CreateCall4(init, user_context, get_module_state(), kernel_src_ptr, kernel_size); Value *did_succeed = builder->CreateICmpEQ(result, ConstantInt::get(i32, 0)); CodeGen_CPU::create_assertion(did_succeed, "Failure inside halide_init_kernels"); // Upon success, jump to the original entry. builder->CreateBr(entry); // Optimize the module CodeGen::optimize_module(); }
static void lvm_update_vgs (GObject *source_obj, GAsyncResult *result, gpointer user_data) { UDisksLVM2State *state; UDisksDaemon *daemon = UDISKS_DAEMON (source_obj); GDBusObjectManagerServer *manager; GTask *task = G_TASK (result); GError *error = NULL; VGsPVsData *data = g_task_propagate_pointer (task, &error); BDLVMVGdata **vgs = NULL; BDLVMPVdata **pvs = NULL; GHashTableIter vg_name_iter; gpointer key, value; const gchar *vg_name; if (!data) { if (error) udisks_warning ("LVM2 plugin: %s", error->message); else /* this should never happen */ udisks_warning ("LVM2 plugin: failure but no error when getting VGs!"); return; } vgs = data->vgs; pvs = data->pvs; /* free the data container (but not 'vgs' and 'pvs') */ g_free (data); manager = udisks_daemon_get_object_manager (daemon); state = get_module_state (daemon); /* Remove obsolete groups */ g_hash_table_iter_init (&vg_name_iter, udisks_lvm2_state_get_name_to_volume_group (state)); while (g_hash_table_iter_next (&vg_name_iter, &key, &value)) { UDisksLinuxVolumeGroupObject *group; gboolean found = FALSE; vg_name = key; group = value; for (BDLVMVGdata **vgs_p=vgs; !found && (*vgs_p); vgs_p++) found = g_strcmp0 ((*vgs_p)->name, vg_name) == 0; if (!found) { udisks_linux_volume_group_object_destroy (group); g_dbus_object_manager_server_unexport (manager, g_dbus_object_get_object_path (G_DBUS_OBJECT (group))); g_hash_table_iter_remove (&vg_name_iter); } } /* Add new groups and update existing groups */ for (BDLVMVGdata **vgs_p=vgs; *vgs_p; vgs_p++) { UDisksLinuxVolumeGroupObject *group; GSList *vg_pvs = NULL; vg_name = (*vgs_p)->name; group = g_hash_table_lookup (udisks_lvm2_state_get_name_to_volume_group (state), vg_name); if (group == NULL) { group = udisks_linux_volume_group_object_new (daemon, vg_name); g_hash_table_insert (udisks_lvm2_state_get_name_to_volume_group (state), g_strdup (vg_name), group); } for (BDLVMPVdata **pvs_p=pvs; *pvs_p; pvs_p++) if (g_strcmp0 ((*pvs_p)->vg_name, vg_name) == 0) vg_pvs = g_slist_prepend (vg_pvs, *pvs_p); udisks_linux_volume_group_object_update (group, *vgs_p, vg_pvs); } /* this is safe to do -- all BDLVMPVdata objects are still existing because the function that frees them is scheduled in main loop by the udisks_linux_volume_group_object_update() call above */ for (BDLVMPVdata **pvs_p=pvs; *pvs_p; pvs_p++) if ((*pvs_p)->vg_name == NULL) bd_lvm_pvdata_free (*pvs_p); /* only free the containers, the contents were passed further */ g_free (vgs); g_free (pvs); }