wcs getTextUCS2(const void* buffer, size_t size) { if(size >= 2) if(*(unsigned short*)buffer == UNICODE_BYTE_ORDER_MARK) return wcs(((const wchar_t*)buffer) + 1, (size_t)((size - 2) >> 1)); return MBSTOWCS(mbs((char*)buffer, (size_t)size)); }
bool GetMBString(Handle<Value>& val,cs::Memory<char>& buf){ String::AsciiValue mbs(val); int len = strlen(*mbs) + 1; if(!buf.SetLength(len)){ ThrowException(String::New("可用内存耗尽")); return false; } buf.CopyFrom(*mbs,len); return true; }
void CPartialResultAggregator::sendResult(const void *row) { CMessageBuffer mb; if (row) { CMemoryRowSerializer mbs(mb); activity.queryRowSerializer()->serialize(mbs,(const byte *)row); } if (!activity.queryContainer().queryJob().queryJobComm().send(mb, 0, activity.queryMpTag(), 5000)) throw MakeThorException(0, "Failed to give partial result to master"); }
void sendResult(const void *row, IOutputRowSerializer *serializer, rank_t dst) { CMessageBuffer mb; DelayedSizeMarker sizeMark(mb); if (row&&hadElement) { CMemoryRowSerializer mbs(mb); serializer->serialize(mbs,(const byte *)row); sizeMark.write(); } container.queryJob().queryJobComm().send(mb, dst, mpTag); }
virtual void process() { IRecordSize *recordSize = helper->queryOutputMeta(); Owned<IThorRowInterfaces> rowIf = createThorRowInterfaces(queryRowManager(), helper->queryOutputMeta(), queryId(), queryCodeContext()); OwnedConstThorRow result = getAggregate(*this, container.queryJob().querySlaves(), *rowIf, *helper, mpTag); if (!result) return; CMessageBuffer msg; CMemoryRowSerializer mbs(msg); rowIf->queryRowSerializer()->serialize(mbs, (const byte *)result.get()); if (!queryJobChannel().queryJobComm().send(msg, 1, mpTag, 5000)) throw MakeThorException(0, "Failed to give result to slave"); }
void sendResult(const void *row, IOutputRowSerializer *serializer) { CMessageBuffer mb; size32_t start = mb.length(); size32_t sz = 0; mb.append(sz); if (row&&hadElement) { CMemoryRowSerializer mbs(mb); serializer->serialize(mbs,(const byte *)row); sz = mb.length()-start-sizeof(size32_t); mb.writeDirect(start,sizeof(size32_t),&sz); } container.queryJob().queryJobComm().send(mb, 0, masterMpTag); }
int vswprintf(wchar_t* wcs, size_t maxlen, const wchar_t* fmt, va_list ap) { mbstate_t mbstate; memset(&mbstate, 0, sizeof(mbstate)); // At most, each wide character (UTF-32) can be expanded to four narrow // characters (UTF-8). const size_t max_mb_len = maxlen * 4; const size_t mb_fmt_len = wcslen(fmt) * 4 + 1; UniquePtr<char[]> mbfmt(new char[mb_fmt_len]); if (wcsrtombs(mbfmt.get(), &fmt, mb_fmt_len, &mbstate) == MBS_FAILURE) { return -1; } UniquePtr<char[]> mbs(new char[max_mb_len]); int nprinted = vsnprintf(mbs.get(), max_mb_len, mbfmt.get(), ap); if (nprinted == -1) { return -1; } const char* mbsp = mbs.get(); if (mbsrtowcs(wcs, &mbsp, maxlen, &mbstate) == MBS_FAILURE) { return -1; } // Can't use return value from vsnprintf because that number is in narrow // characters, not wide characters. int result = wcslen(wcs); // swprintf differs from snprintf in that it returns -1 if the output was // truncated. // // Truncation can occur in two places: // 1) vsnprintf truncated, in which case the return value is greater than the // length we passed. // 2) Since the char buffer we pass to vsnprintf might be oversized, that // might not truncate while mbsrtowcs will. In this case, mbsp will point // to the next unconverted character instead of nullptr. if (nprinted >= max_mb_len || mbsp != nullptr) { return -1; } return result; }
static int main_(int /* argc */, char** /*argv*/) { std::string mbs("multibyte string"); std::wstring ws1(L"wide string #1"); stlsoft::simple_wstring ws2(L"wide string #2"); pan::log_NOTICE("mbs=", mbs, ", ws1=", pan::w2m(ws1), ", ws2=", pan::w2m(ws2)); #ifdef PANTHEIOS_SAFE_ALLOW_SHIM_INTERMEDIATES VARIANT var; var.vt = VT_I4; var.lVal = -10; pan::log_DEBUG("var=", pan::w2m(var)); #endif /* PANTHEIOS_SAFE_ALLOW_SHIM_INTERMEDIATES */ return EXIT_SUCCESS; }
virtual void process() override { ActPrintLog("INDEXWRITE: Start"); init(); IRowStream *stream = inputStream; ThorDataLinkMetaInfo info; input->getMetaInfo(info); outRowAllocator.setown(getRowAllocator(helper->queryDiskRecordSize())); start(); if (refactor) { assertex(isLocal); if (active) { unsigned targetWidth = partDesc->queryOwner().numParts()-(buildTlk?1:0); assertex(0 == container.queryJob().querySlaves() % targetWidth); unsigned partsPerNode = container.queryJob().querySlaves() / targetWidth; unsigned myPart = queryJobChannel().queryMyRank(); IArrayOf<IRowStream> streams; streams.append(*LINK(stream)); --partsPerNode; // Should this be merging 1,11,21,31 etc. unsigned p=0; unsigned fromPart = targetWidth+1 + (partsPerNode * (myPart-1)); for (; p<partsPerNode; p++) { streams.append(*createRowStreamFromNode(*this, fromPart++, queryJobChannel().queryJobComm(), mpTag, abortSoon)); } ICompare *icompare = helper->queryCompare(); assertex(icompare); Owned<IRowLinkCounter> linkCounter = new CThorRowLinkCounter; myInputStream.setown(createRowStreamMerger(streams.ordinality(), streams.getArray(), icompare, false, linkCounter)); stream = myInputStream; } else // serve nodes, creating merged parts rowServer.setown(createRowServer(this, stream, queryJobChannel().queryJobComm(), mpTag)); } processed = THORDATALINK_STARTED; // single part key support // has to serially pull all data fron nodes 2-N // nodes 2-N, could/should start pushing some data (as it's supposed to be small) to cut down on serial nature. unsigned node = queryJobChannel().queryMyRank(); if (singlePartKey) { if (1 == node) { try { open(*partDesc, false, helper->queryDiskRecordSize()->isVariableSize()); loop { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; if (abortSoon) return; processRow(row); } unsigned node = 2; while (node <= container.queryJob().querySlaves()) { Linked<IOutputRowDeserializer> deserializer = ::queryRowDeserializer(input); CMessageBuffer mb; Owned<ISerialStream> stream = createMemoryBufferSerialStream(mb); CThorStreamDeserializerSource rowSource; rowSource.setStream(stream); bool successSR; loop { { BooleanOnOff tf(receivingTag2); successSR = queryJobChannel().queryJobComm().sendRecv(mb, node, mpTag2); } if (successSR) { if (rowSource.eos()) break; Linked<IEngineRowAllocator> allocator = ::queryRowAllocator(input); do { RtlDynamicRowBuilder rowBuilder(allocator); size32_t sz = deserializer->deserialize(rowBuilder, rowSource); OwnedConstThorRow fRow = rowBuilder.finalizeRowClear(sz); processRow(fRow); } while (!rowSource.eos()); } } node++; } } catch (CATCHALL) { close(*partDesc, partCrc, true); throw; } close(*partDesc, partCrc, true); doStopInput(); } else { CMessageBuffer mb; CMemoryRowSerializer mbs(mb); Linked<IOutputRowSerializer> serializer = ::queryRowSerializer(input); loop { BooleanOnOff tf(receivingTag2); if (queryJobChannel().queryJobComm().recv(mb, 1, mpTag2)) // node 1 asking for more.. { if (abortSoon) break; mb.clear(); do { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; serializer->serialize(mbs, (const byte *)row.get()); } while (mb.length() < SINGLEPART_KEY_TRANSFER_SIZE); // NB: at least one row if (!queryJobChannel().queryJobComm().reply(mb)) throw MakeThorException(0, "Failed to send index data to node 1, from node %d", node); if (0 == mb.length()) break; } } } }
bool PSNaviController::open( const DeviceEnumerator *enumerator) { const ControllerDeviceEnumerator *pEnum = static_cast<const ControllerDeviceEnumerator *>(enumerator); const char *cur_dev_path= pEnum->get_path(); bool success= false; if (getIsOpen()) { SERVER_LOG_WARNING("PSNaviController::open") << "PSNavoController(" << cur_dev_path << ") already open. Ignoring request."; success= true; } else { char cur_dev_serial_number[256]; SERVER_LOG_INFO("PSNaviController::open") << "Opening PSNaviController(" << cur_dev_path << ")"; if (pEnum->get_serial_number(cur_dev_serial_number, sizeof(cur_dev_serial_number))) { SERVER_LOG_INFO("PSNaviController::open") << " with serial_number: " << cur_dev_serial_number; } else { cur_dev_serial_number[0]= '\0'; SERVER_LOG_INFO("PSNaviController::open") << " with EMPTY serial_number"; } HIDDetails.Device_path = cur_dev_path; #ifdef _WIN32 HIDDetails.Device_path_addr = HIDDetails.Device_path; HIDDetails.Device_path_addr.replace(HIDDetails.Device_path_addr.find("&col01#"), 7, "&col02#"); HIDDetails.Device_path_addr.replace(HIDDetails.Device_path_addr.find("&0000#"), 6, "&0001#"); HIDDetails.Handle_addr = hid_open_path(HIDDetails.Device_path_addr.c_str()); hid_set_nonblocking(HIDDetails.Handle_addr, 1); #endif HIDDetails.Handle = hid_open_path(HIDDetails.Device_path.c_str()); hid_set_nonblocking(HIDDetails.Handle, 1); IsBluetooth = (strlen(cur_dev_serial_number) > 0); if (getIsOpen()) // Controller was opened and has an index { // Get the bluetooth address #ifndef _WIN32 // On my Mac, getting the bt feature report when connected via // bt crashes the controller. So we simply copy the serial number. // It gets modified in getBTAddress. // TODO: Copy this over anyway even in Windows. Check getBTAddress // comments for handling windows serial_number. // Once done, we can remove the ifndef above. std::string mbs(cur_dev_serial_number); HIDDetails.Bt_addr = mbs; #endif if (getBTAddress(HIDDetails.Host_bt_addr, HIDDetails.Bt_addr)) { // Load the config file std::string btaddr = HIDDetails.Bt_addr; std::replace(btaddr.begin(), btaddr.end(), ':', '_'); cfg = PSNaviControllerConfig(btaddr); cfg.load(); // TODO: Other startup. success= true; } else { // If serial is still bad, maybe we have a disconnected // controller still showing up in hidapi SERVER_LOG_ERROR("PSNaviController::open") << "Failed to get bluetooth address of PSNaviController(" << cur_dev_path << ")"; success= false; } // Reset the polling sequence counter NextPollSequenceNumber= 0; } else { SERVER_LOG_ERROR("PSNaviController::open") << "Failed to open PSNaviController(" << cur_dev_path << ")"; success= false; } } return success; }
void test_a_doublebyte_conv(const std::string& encname) { wxString wxencname(encname.c_str(), wxConvUTF8); wxm::WXMEncoding* enc = wxm::WXMEncodingManager::Instance().GetWxmEncoding(wxencname); MB2UDataMap::const_iterator mb2uend = mb2u[encname].end(); for (size_t i=0; i<256; ++i) { wxByte wxb[2] = {wxByte(i), 0}; if (enc->IsLeadByte(wxb[0])) { for(size_t j=0; j<256; ++j) { char mbs_arr[3] = {i, j, 0}; MB2UDataMap::const_iterator it = mb2u[encname].find(mbs_arr); wxb[1] = wxByte(j); ucs4_t u = enc->MultiBytetoUCS4(wxb); if (u != 0) { BOOST_CHECK(it != mb2uend); if (it != mb2uend) { ucs4_t t = it->second; BOOST_CHECK(u == t); } } else { BOOST_CHECK(it == mb2uend); } } } else { char mbs_arr[2] = {i, 0}; MB2UDataMap::const_iterator it = mb2u[encname].find(mbs_arr); ucs4_t u = enc->MultiBytetoUCS4(wxb); if (u != 0) { BOOST_CHECK(it != mb2uend); if (it != mb2uend) { ucs4_t t = it->second; BOOST_CHECK(u == t); } } else { BOOST_CHECK(it == mb2uend); } } } U2MBDataMap::const_iterator u2mbend = u2mb[encname].end(); for (ucs4_t i=0; i<=0x10FFFF; ++i) { wxByte buf[4]; size_t n = enc->UCS4toMultiByte(i, buf); BOOST_CHECK(n <= 2); U2MBDataMap::const_iterator it = u2mb[encname].find(i); if (n == 1 || n == 2) { BOOST_CHECK(it != u2mbend); if (it != u2mbend) { buf[n] = 0; std::string mbs((const char*)buf); BOOST_CHECK(it->second == mbs); } } else if(n == 0) { BOOST_CHECK(it == u2mbend); } } mb2u[encname].clear(); u2mb[encname].clear(); }
void test_a_doublebyte_conv(const std::string& encname) { std::wstring wencname(encname.begin(), encname.end()); xm::Encoding* enc = xm::EncodingManager::Instance().GetEncoding(wencname); MB2UDataMap::const_iterator mb2uend = mb2u[encname].end(); for (size_t i=0; i<256; ++i) { ubyte bs[2] = { ubyte(i), 0 }; if (enc->IsLeadByte(bs[0])) { for(size_t j=0; j<256; ++j) { char mbs_arr[3] = { char(i), char(j), 0 }; MB2UDataMap::const_iterator it = mb2u[encname].find(mbs_arr); bs[1] = ubyte(j); ucs4_t u = enc->MultiBytetoUCS4(bs); if (u != 0) { BOOST_CHECK(it != mb2uend); if (it != mb2uend) { ucs4_t t = it->second; BOOST_CHECK(u == t); } } else { BOOST_CHECK(it == mb2uend); } } } else { char mbs_arr[2] = { char(i), 0 }; MB2UDataMap::const_iterator it = mb2u[encname].find(mbs_arr); ucs4_t u = enc->MultiBytetoUCS4(bs); if (u != 0) { BOOST_CHECK(it != mb2uend); if (it != mb2uend) { ucs4_t t = it->second; BOOST_CHECK(u == t); } } else { BOOST_CHECK(it == mb2uend); } } } U2MBDataMap::const_iterator u2mbend = u2mb[encname].end(); for (ucs4_t i=0; i<=0x10FFFF; ++i) { ubyte buf[4]; size_t n = enc->UCS4toMultiByte(i, buf); BOOST_CHECK(n <= 2); U2MBDataMap::const_iterator it = u2mb[encname].find(i); if (n == 1 || n == 2) { BOOST_CHECK(it != u2mbend); if (it != u2mbend) { buf[n] = 0; std::string mbs((const char*)buf); BOOST_CHECK(it->second == mbs); } } else if(n == 0) { BOOST_CHECK(it == u2mbend); } } mb2u[encname].clear(); u2mb[encname].clear(); }
virtual void process() override { ActPrintLog("INDEXWRITE: Start"); init(); IRowStream *stream = inputStream; ThorDataLinkMetaInfo info; input->getMetaInfo(info); outRowAllocator.setown(getRowAllocator(helper->queryDiskRecordSize())); start(); if (refactor) { assertex(isLocal); if (active) { unsigned targetWidth = partDesc->queryOwner().numParts()-(buildTlk?1:0); assertex(0 == container.queryJob().querySlaves() % targetWidth); unsigned partsPerNode = container.queryJob().querySlaves() / targetWidth; unsigned myPart = queryJobChannel().queryMyRank(); IArrayOf<IRowStream> streams; streams.append(*LINK(stream)); --partsPerNode; // Should this be merging 1,11,21,31 etc. unsigned p=0; unsigned fromPart = targetWidth+1 + (partsPerNode * (myPart-1)); for (; p<partsPerNode; p++) { streams.append(*createRowStreamFromNode(*this, fromPart++, queryJobChannel().queryJobComm(), mpTag, abortSoon)); } ICompare *icompare = helper->queryCompare(); assertex(icompare); Owned<IRowLinkCounter> linkCounter = new CThorRowLinkCounter; myInputStream.setown(createRowStreamMerger(streams.ordinality(), streams.getArray(), icompare, false, linkCounter)); stream = myInputStream; } else // serve nodes, creating merged parts rowServer.setown(createRowServer(this, stream, queryJobChannel().queryJobComm(), mpTag)); } processed = THORDATALINK_STARTED; // single part key support // has to serially pull all data fron nodes 2-N // nodes 2-N, could/should start pushing some data (as it's supposed to be small) to cut down on serial nature. unsigned node = queryJobChannel().queryMyRank(); if (singlePartKey) { if (1 == node) { try { open(*partDesc, false, helper->queryDiskRecordSize()->isVariableSize()); for (;;) { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; if (abortSoon) return; processRow(row); } unsigned node = 2; while (node <= container.queryJob().querySlaves()) { Linked<IOutputRowDeserializer> deserializer = ::queryRowDeserializer(input); CMessageBuffer mb; Owned<ISerialStream> stream = createMemoryBufferSerialStream(mb); CThorStreamDeserializerSource rowSource; rowSource.setStream(stream); bool successSR; for (;;) { { BooleanOnOff tf(receivingTag2); successSR = queryJobChannel().queryJobComm().sendRecv(mb, node, mpTag2); } if (successSR) { if (rowSource.eos()) break; Linked<IEngineRowAllocator> allocator = ::queryRowAllocator(input); do { RtlDynamicRowBuilder rowBuilder(allocator); size32_t sz = deserializer->deserialize(rowBuilder, rowSource); OwnedConstThorRow fRow = rowBuilder.finalizeRowClear(sz); processRow(fRow); } while (!rowSource.eos()); } } node++; } } catch (CATCHALL) { close(*partDesc, partCrc, true); throw; } close(*partDesc, partCrc, true); stop(); } else { CMessageBuffer mb; CMemoryRowSerializer mbs(mb); Linked<IOutputRowSerializer> serializer = ::queryRowSerializer(input); for (;;) { BooleanOnOff tf(receivingTag2); if (queryJobChannel().queryJobComm().recv(mb, 1, mpTag2)) // node 1 asking for more.. { if (abortSoon) break; mb.clear(); do { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; serializer->serialize(mbs, (const byte *)row.get()); } while (mb.length() < SINGLEPART_KEY_TRANSFER_SIZE); // NB: at least one row if (!queryJobChannel().queryJobComm().reply(mb)) throw MakeThorException(0, "Failed to send index data to node 1, from node %d", node); if (0 == mb.length()) break; } } } } else { if (!refactor || active) { try { StringBuffer partFname; getPartFilename(*partDesc, 0, partFname); ActPrintLog("INDEXWRITE: process: handling fname : %s", partFname.str()); open(*partDesc, false, helper->queryDiskRecordSize()->isVariableSize()); ActPrintLog("INDEXWRITE: write"); BooleanOnOff tf(receiving); if (!refactor || !active) receiving = false; do { OwnedConstThorRow row = inputStream->ungroupedNextRow(); if (!row) break; processRow(row); } while (!abortSoon); ActPrintLog("INDEXWRITE: write level 0 complete"); } catch (CATCHALL) { close(*partDesc, partCrc, isLocal && !buildTlk && 1 == node); throw; } close(*partDesc, partCrc, isLocal && !buildTlk && 1 == node); stop(); ActPrintLog("INDEXWRITE: Wrote %" RCPF "d records", processed & THORDATALINK_COUNT_MASK); if (buildTlk) { ActPrintLog("INDEXWRITE: sending rows"); NodeInfoArray tlkRows; CMessageBuffer msg; if (firstNode()) { if (processed & THORDATALINK_COUNT_MASK) { if (enableTlkPart0) tlkRows.append(* new CNodeInfo(0, firstRow.get(), firstRowSize, totalCount)); tlkRows.append(* new CNodeInfo(1, lastRow.get(), lastRowSize, totalCount)); } } else { if (processed & THORDATALINK_COUNT_MASK) { CNodeInfo row(queryJobChannel().queryMyRank(), lastRow.get(), lastRowSize, totalCount); row.serialize(msg); } queryJobChannel().queryJobComm().send(msg, 1, mpTag); } if (firstNode()) { ActPrintLog("INDEXWRITE: Waiting on tlk to complete"); // JCSMORE if refactor==true, is rowsToReceive here right?? unsigned rowsToReceive = (refactor ? (tlkDesc->queryOwner().numParts()-1) : container.queryJob().querySlaves()) -1; // -1 'cos got my own in array already ActPrintLog("INDEXWRITE: will wait for info from %d slaves before writing TLK", rowsToReceive); while (rowsToReceive--) { msg.clear(); receiveMsg(msg, RANK_ALL, mpTag); // NH->JCS RANK_ALL_OTHER not supported for recv if (abortSoon) return; if (msg.length()) { CNodeInfo *ni = new CNodeInfo(); ni->deserialize(msg); tlkRows.append(*ni); } } tlkRows.sort(CNodeInfo::compare); StringBuffer path; getPartFilename(*tlkDesc, 0, path); ActPrintLog("INDEXWRITE: creating toplevel key file : %s", path.str()); try { open(*tlkDesc, true, helper->queryDiskRecordSize()->isVariableSize()); if (tlkRows.length()) { CNodeInfo &lastNode = tlkRows.item(tlkRows.length()-1); memset(lastNode.value, 0xff, lastNode.size); } ForEachItemIn(idx, tlkRows) { CNodeInfo &info = tlkRows.item(idx); builder->processKeyData((char *)info.value, info.pos, info.size); } close(*tlkDesc, tlkCrc, true); } catch (CATCHALL) { abortSoon = true; close(*tlkDesc, tlkCrc, true); removeFiles(*partDesc); throw; } } } else if (!isLocal && firstNode())