static int LuaOnSignal(lua_State *pState, CYDWEEventData &eventData, bool ignore_error, luabind::object const& func) { lua_stack_guard tmp_guard(pState); TranslateEventDataToLuaTable(pState, eventData); luabind::object event_data(luabind::from_stack(pState, -1)); if (ignore_error) { try { return luabind::call_function<int>(func, event_data); } catch (...) { } } else { try { return luabind::call_function<int>(func, event_data); } catch (luabind::error const& e) { LOGGING_ERROR(lg) << "exception: \"" << e.what() << "\" " << lua_tostring(e.state(), -1); } catch (std::exception const& e) { LOGGING_ERROR(lg) << "exception: \"" << e.what() << "\""; } catch (...) { LOGGING_ERROR(lg) << "unknown exception"; } } return -1; }
static void InitIATHook() { LOGGING_DEBUG(lg) << "IAT hook initialization started."; if (pgWeIatHooker.open_module(::GetModuleHandleW(NULL))) { if (pgWeIatHooker.open_dll("shell32.dll")) { INSTALL_WE_IAT_HOOK(SHBrowseForFolderA); } else { LOGGING_ERROR(lg) << "Cannot find shell32.dll in WE."; } if (pgWeIatHooker.open_dll("comdlg32.dll")) { INSTALL_WE_IAT_HOOK(GetSaveFileNameA); INSTALL_WE_IAT_HOOK(GetOpenFileNameA); } else { LOGGING_ERROR(lg) << "Cannot find comdlg32.dll in WE."; } } else { LOGGING_ERROR(lg) << "WE initialize IAT hook failed."; } LOGGING_DEBUG(lg) << "IAT hook initialization completed."; }
void MissionControl::eventJurySignalReceived (juryActions ja, int maneuverEntryID ) { LOGGING_INFO(mcLogger, "eventJurySignalReceived: " << endl << "\tmaneuverID:" << maneuverEntryID << " action: " << ja << endl ); if(maneuverEntryID < 0) { LOGGING_ERROR(mcLogger, "received a negative maneuver id! Cant handle this!!!!!" <<endl); return; } if(((unsigned int)maneuverEntryID) != m_maneuverList->getCurrentAbsManeuverID()) { m_maneuverList->setManeuverId(maneuverEntryID); } switch (ja) { case action_GETREADY : mStateMachine.process_event( EvGetReady() ); break; case action_START: //if(m_maneuverList->setManeuverId(maneuverEntryID) || true) { //TODO remove true mStateMachine.process_event( EvJuryGo() ); /*} else { LOGGING_ERROR(mcLogger, "Got wrong entry id!" <<endl); mStateMachine.process_event( EvError() ); }*/ break; case action_STOP: //TODO: send stop to driver module, best in the SM itself mStateMachine.process_event( EvJuryStop() ); break; } }
static bool VirtualMpqWatchCB(const base::lua::object& func, const std::string& filename, const void** pbuf, uint32_t* plen, uint32_t reserve_size) { lua_State* L = func.l(); base::lua::guard guard(L); try { func.push(); lua_pushlstring(L, filename.data(), filename.size()); if (LUA_OK != lua_pcall(L, 1, 1, 0)) { throw std::exception(lua_tostring(L, -1)); } } catch (const std::exception& e) { LOGGING_ERROR(logging::get_logger("lua")) << e.what(); return false; } if (LUA_TSTRING != lua_type(L, -1)) { return false; } size_t len = 0; const char* buf = lua_tolstring(L, -1, &len); void* tmpbuf = base::warcraft3::virtual_mpq::storm_alloc(len + reserve_size); if (!tmpbuf) { return false; } memcpy(tmpbuf, buf, len); *pbuf = tmpbuf; if (reserve_size) memset((unsigned char*)tmpbuf + len, 0, reserve_size); if (plen) *plen = len; lua_pop(L, 1); return true; }
void ExitOnGLError(const std::string error_message) { const GLenum ErrorValue = glGetError(); if (ErrorValue != GL_NO_ERROR) { LOGGING_ERROR(Visualization, error_message << ": " << (char*)gluErrorString(ErrorValue) << endl); exit(EXIT_FAILURE); } }
void computeLinearLoad(const uint32_t nr_of_items, uint32_t* blocks, uint32_t* threads_per_block) { // if (nr_of_items <= cMAX_NR_OF_BLOCKS) // { // *blocks = nr_of_items; // *threads_per_block = 1; // } // else // { if(nr_of_items == 0) { LOGGING_WARNING( Gpu_voxels_helpers, "Number of Items is 0. Blocks and Threads per Block is set to 1. Size 0 would lead to a Cuda ERROR" << endl); *blocks = 1; *threads_per_block = 1; return; } if (nr_of_items <= cMAX_NR_OF_BLOCKS * cMAX_THREADS_PER_BLOCK) { *blocks = (nr_of_items + cMAX_THREADS_PER_BLOCK - 1) / cMAX_THREADS_PER_BLOCK; // calculation replaces a ceil() function *threads_per_block = cMAX_THREADS_PER_BLOCK; } else { /* In this case the kernel must perform multiple runs because * nr_of_items is larger than the gpu can handle at once. * To overcome this limit, use standard parallelism offsets * as when programming host code (increment by the number of all threads * running). Use something like * * uint32_t i = blockIdx.x * blockDim.x + threadIdx.x; * * while (i < nr_of_items) * { * // perform some kernel operations here * * // increment by number of all threads that are running * i += blockDim.x * gridDim.x; * } * * CAUTION: currently cMAX_NR_OF_BLOCKS is 64K, although * GPUs with SM >= 3.0 support up to 2^31 -1 blocks in a grid! */ LOGGING_ERROR( Gpu_voxels_helpers, "computeLinearLoad: Number of Items " << nr_of_items << " exceeds the limit cMAX_NR_OF_BLOCKS * cMAX_THREADS_PER_BLOCK = " << (cMAX_NR_OF_BLOCKS*cMAX_THREADS_PER_BLOCK) << "! This number of items cannot be processed in a single invocation." << endl); *blocks = cMAX_NR_OF_BLOCKS; *threads_per_block = cMAX_THREADS_PER_BLOCK; } }
void TrafficSignDetAruco::initialize() { // get dictionary if(dictionary.fromFile(configurationFolder + "/roadsign.yml") == false) { LOGGING_ERROR(TrafficSignLogger, "[TrafficSignDetAruco] Dictionary file of road signs not found" << endl); } if(aruco::HighlyReliableMarkers::loadDictionary(dictionary) == false) { LOGGING_ERROR(TrafficSignLogger, "[TrafficSignDetAruco] loadDictionary function for markers failed (dictionary file not provided?)" << endl); } else { // dictionary is loaded, therefore marker detection is possible dictionaryLoaded = true; } // detector settings markerDetector.setMakerDetectorFunction(aruco::HighlyReliableMarkers::detect); markerDetector.setCornerRefinementMethod(aruco::MarkerDetector::LINES); markerDetector.setThresholdParams(THRESHOLD_PARAMETER_1, THRESHOLD_PARAMETER_2); markerDetector.setMinMaxSize(MIN_SIZE, MAX_SIZE); markerDetector.setWarpSize(WARP_SIZE); }
static bool InstallPatch(const char* name, uintptr_t address, uint8_t *patch, uint32_t patchLength) { bool ok = MemoryPatchAndVerify((void*)address, patch, patchLength); if (ok) { LOGGING_TRACE(lg) << base::format("Patch %s in 0x%08X success.", name, address); } else { LOGGING_ERROR(lg) << base::format("Patch %s in 0x%08X failed.", name, address); } return ok; }
void computeLinearLoad(const uint32_t nr_of_items, uint32_t* blocks, uint32_t* threads_per_block) { // if (nr_of_items <= cMAX_NR_OF_BLOCKS) // { // *blocks = nr_of_items; // *threads_per_block = 1; // } // else // { if(nr_of_items == 0) { LOGGING_ERROR( Gpu_voxels_helpers, "Number of Items is 0. Blocks and Threads per Block is set to 0. This will lead to a Cuda ERROR" << endl); *blocks = 0; *threads_per_block = 0; return; } if (nr_of_items <= cMAX_NR_OF_BLOCKS * cMAX_THREADS_PER_BLOCK) { *blocks = (nr_of_items + cMAX_THREADS_PER_BLOCK - 1) / cMAX_THREADS_PER_BLOCK; // calculation replaces a ceil() function *threads_per_block = cMAX_THREADS_PER_BLOCK; } else { /* In this case the kernel must perform multiple runs because * nr_of_items is larger than the gpu can handle at once. * To overcome this limits use standard parallelism offsets * as when programming host code (increment by the number of all threads * running). Use something like * * uint32_t i = blockIdx.x * blockDim.x + threadIdx.x; * * while (i < nr_of_items) * { * // perform some kernel operations here * * // increment by number of all threads that are running * i += blockDim.x * gridDim.x; * } */ *blocks = cMAX_NR_OF_BLOCKS; *threads_per_block = cMAX_THREADS_PER_BLOCK; } }
void HardwareCanSinkPeak::set(const CanMessageStamped::Ptr& msg) { if (!m_can_device) { LOGGING_ERROR(CAN, "CAN device is not available, ignoring received message." << endl); return; } if (!msg || ((*msg)->id == 0 && (*msg)->dlc == 0 && (*msg)->rtr == 0)) { LOGGING_WARNING(CAN, "No regular message received." << endl); return; } m_can_device->Send(**msg); }
static void VirtualMpqEventCB(const base::lua::object& func, const std::string& name, const std::string& data) { lua_State* L = func.l(); base::lua::guard guard(L); try { func.push(); lua_pushlstring(L, name.data(), name.size()); lua_pushlstring(L, data.data(), data.size()); if (LUA_OK != lua_pcall(L, 2, 0, 0)) { throw std::exception(lua_tostring(L, -1)); } } catch (const std::exception& e) { LOGGING_ERROR(logging::get_logger("lua")) << e.what(); } }
static bool VirtualMpqMapHasCB(const base::lua::object& func, const std::string& filename) { lua_State* L = func.l(); base::lua::guard guard(L); try { func.push(); lua_pushlstring(L, filename.data(), filename.size()); if (LUA_OK != lua_pcall(L, 1, 1, 0)) { throw std::exception(lua_tostring(L, -1)); } } catch (const std::exception& e) { LOGGING_ERROR(logging::get_logger("lua")) << e.what(); return false; } if (LUA_TBOOLEAN != lua_type(L, -1)) { return false; } bool res = !!lua_toboolean(L, -1); lua_pop(L, 1); return res; }
HardwareCanSinkPeak::HardwareCanSinkPeak(const std::string& uri, const std::string& name) : HardwareCanSink(uri, name), m_can_device() { icl_sourcesink::SimpleURI parsed_uri(uri); uint32_t can_baudrate = 500; boost::optional<uint32_t> uri_baudrate = parsed_uri.getQuery<uint32_t>("baudrate"); if (uri_baudrate) { can_baudrate = *uri_baudrate; } LOGGING_DEBUG(CAN, "Device: " << parsed_uri.path() << endl); LOGGING_DEBUG(CAN, "Baudrate: " << can_baudrate << " kbps" << endl); LOGGING_DEBUG(CAN, "Opening CAN-Device... " << endl); tCanDevice::CheckLXRTInterface(); m_can_device.reset(tCanDevice::Create(parsed_uri.path().c_str(), O_RDWR | O_NONBLOCK, 0xff, 0xff, can_baudrate, 300, 8000)); // Check if CAN device was initialized successfully. if (m_can_device->IsInitialized()) { LOGGING_DEBUG(CAN, "CAN device successfully initialized." << endl); } else { m_can_device.reset(); LOGGING_ERROR(CAN, "Error initializing CAN device." << endl); return; } }
int main(int argc, char* argv1[]) { TSK_VS_INFO* lVsInfo = NULL; TSK_OFF_T lCnt = 0; char lBuf[32768] = { 0 }; unsigned lCntRead = 0; TSK_IMG_INFO* lImgInfo = OS_FH_INVALID; OS_FH_TYPE lOut = OS_FH_INVALID; const TSK_TCHAR *const *argv; #ifdef TSK_WIN32 argv = CommandLineToArgvW(GetCommandLineW(), &argc); #else argv = (const TSK_TCHAR *const *) argv1; #endif lOut = OS_FOPEN_WRITE(argv[2]); if (lOut == OS_FH_INVALID) { LOGGING_ERROR("Could not open export image in write mode. \n") exit(1); } lImgInfo = tsk_img_open( 1, /* number of images */ (argv + 1), /* path to images */ TSK_IMG_TYPE_DETECT, /* disk image type */ 0); /* size of device sector in bytes */ if (lImgInfo != NULL) { TSK_OFF_T lSizeSectors = lImgInfo->size / lImgInfo->sector_size + \ (lImgInfo->size % lImgInfo->sector_size ? 1 : 0); LOGGING_INFO("Image size (Bytes): %lu, Image size (sectors): %lu\n", lImgInfo->size, lSizeSectors); lVsInfo = tsk_vs_open(lImgInfo, 0, TSK_VS_TYPE_DETECT); if (lVsInfo != NULL) { if (tsk_vs_part_walk(lVsInfo, 0, /* start */ lVsInfo->part_count - 1, /* end */ TSK_VS_PART_FLAG_ALL, /* all partitions */ part_act, /* callback */ (void*) lOut /* data passed to the callback */ ) != 0) { fprintf(stderr, "Problem when walking partitions. \n"); } } else { LOGGING_DEBUG("Volume system cannot be opened.\n"); for (lCnt = 0; lCnt < lSizeSectors; lCnt++) { lCntRead = lCnt == lSizeSectors - 1 ? lImgInfo->size % lImgInfo->sector_size : lImgInfo->sector_size; LOGGING_DEBUG("Reading %u bytes\n", lCntRead); tsk_img_read( lImgInfo, /* handler */ lCnt * lImgInfo->sector_size, /* start address */ lBuf, /* buffer to store data in */ lCntRead /* amount of data to read */ ); data_act(lBuf, lCntRead, lCnt * lImgInfo->sector_size, lOut); } } } else { LOGGING_ERROR("Problem opening the image. \n"); tsk_error_print(stderr); exit(1); } if (lOut != OS_FH_INVALID) { OS_FCLOSE(lOut); } return EXIT_SUCCESS; }
tHardwareCanSink * tHardwareCanSinkFactory::Construct(std::string specifier) { icl_core::SchemeParser parser; if (specifier == "") { LOGGING_DEBUG(icl_hardware::can::CAN, "No specifier was set, read settings from configfile" << "\n"); specifier = interpretConfigFile(); } if (parser.parseScheme(specifier)) { // convert to lowercase std::string lspecifier(parser.getSchemeResult().specifier); for (unsigned int i = 0; i < lspecifier.size(); ++i) { lspecifier[i] = tolower(lspecifier[i]); } LOGGING_DEBUG(icl_hardware::can::CAN, "tHardwareCanSinkFactory parser result: \n"); LOGGING_DEBUG(icl_hardware::can::CAN, " scheme_name: "<< parser.getSchemeResult().scheme_name << "\n"); LOGGING_DEBUG(icl_hardware::can::CAN, " specifier: "<< parser.getSchemeResult().specifier << "\n"); LOGGING_DEBUG(icl_hardware::can::CAN, " anchor: "<< parser.getSchemeResult().anchor << "\n"); LOGGING_DEBUG(icl_hardware::can::CAN, " #queries: "<< parser.getSchemeResult().queries.size() << icl_core::logging::endl); if (parser.getSchemeResult().scheme_type == icl_core::FileScheme) { tHardwareCanSinkCanfile * hardware_data_sink = new tHardwareCanSinkCanfile(); icl_core::QueryList query_list = parser.getSchemeResult().queries; unsigned int baud_rate = 0; for (unsigned int i=0; i<query_list.size(); ++i) { if (query_list[i].name == "baud") { baud_rate = atoi(query_list[i].value.c_str()); } } if (baud_rate != 0) { hardware_data_sink->Open(parser.getSchemeResult().specifier, baud_rate); } else { hardware_data_sink->Open(parser.getSchemeResult().specifier); } return hardware_data_sink; } else if (parser.getSchemeResult().scheme_type == icl_core::OtherScheme) { if (parser.getSchemeResult().scheme_name == "console://") { if (icl_core::config::getDefault<bool>("/icl_hardware_can/use_interpret", false)) { icl_core::String can_matrix = icl_core::config::getDefault<icl_core::String>("/icl_hardware_can/can_mask", ""); if (can_matrix != "") { tHardwareCanSinkInterpretMessage * hardware_data_sink = new tHardwareCanSinkInterpretMessage(); hardware_data_sink->Open(parser.getSchemeResult().specifier, can_matrix); return hardware_data_sink; } else { LOGGING_ERROR(icl_hardware::can::CAN, "Interpretation sink must be used together with CAN mask!" << icl_core::logging::endl); } } else { tHardwareCanSinkConsole * hardware_data_sink = new tHardwareCanSinkConsole(); hardware_data_sink->Open(parser.getSchemeResult().specifier); return hardware_data_sink; } } if (parser.getSchemeResult().scheme_name == "can://") { #ifdef _SYSTEM_POSIX_ tHardwareCanSinkPeak * hardware_data_sink = new tHardwareCanSinkPeak(); icl_core::QueryList query_list = parser.getSchemeResult().queries; unsigned int baud_rate = 0; for (unsigned int i=0; i<query_list.size(); ++i) { if (query_list[i].name == "baud") { baud_rate = atoi(query_list[i].value.c_str()); } } if (baud_rate != 0) { hardware_data_sink->Open(parser.getSchemeResult().specifier, baud_rate); } else { hardware_data_sink->Open(parser.getSchemeResult().specifier); } return hardware_data_sink; #else LOGGING_WARNING(icl_hardware::can::CAN, "can:// devices only available on posix platforms."); return NULL; #endif } } LOGGING_ERROR(icl_hardware::can::CAN, "Could not distinguish HardwareSink from scheme \"" << specifier << "\"!" << icl_core::logging::endl); } else { LOGGING_ERROR(icl_hardware::can::CAN, "Failed to parse scheme \"" << specifier << "\"!" << icl_core::logging::endl); } return NULL; }