int main() { // Data points for limits //vector<double> const x{0.5, 1., 1.5, 2., 2.4, 2.7, 3.}; vector<double> const x{700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500}; // limits in pb vector<double> const TbotWbot_Tb{7.0, 1.00, 0.70, 0.55, 0.40, 0.30, 0.28, 0.28, 0.28}; //vector<double> const theory_Tb{5.820, 3.860, 2.720, 1.950, 1.350, 0.982, 0.716, 0.540, 0.408}; //1400.0100 interp vector<double> const theory_Tb{2.910, 1.930, 1.360, 0.975, 0.675, 0.491, 0.358, 0.270, 0.204}; //1400.0100 inter //vector<double> const theory_Tb{7.29, 3.69, 2.05, 1.19, 0.682, 0.416, 0.259, 0.168, 0.111}; Andrea and Xanda //vector<double> const theory_Tt{7.07, 3.64, 2.01, 1.16, 0.693, 0.431, 0.275, 0.180, 0.119}; vector<double> const scale_Tb{0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01}; //theory_Tb=scale_Tb*theory_Tt; // Canvas TCanvas canvas("canvas", "", 1000, 1000); canvas.SetTicks(); canvas.SetGrid(kFALSE); canvas.SetGridy(kFALSE); // Style gStyle->SetStripDecimals(kFALSE); // Graphs with observed and mean expected limits TGraph graph_Nikoz_Tb(x.size(), x.data(), TbotWbot_Tb.data()); graph_Nikoz_Tb.SetLineColor(kCyan-2); graph_Nikoz_Tb.SetLineStyle(1); graph_Nikoz_Tb.SetLineWidth(2); TGraph graph_theory_Tb(x.size(), x.data(), theory_Tb.data()); graph_theory_Tb.SetLineColor(kBlack); graph_theory_Tb.SetLineStyle(2); graph_theory_Tb.SetLineWidth(3); /* TGraph graph_theory_Tt(x.size(), x.data(), theory_Tt.data()); graph_theory_Tt.SetLineColor(kBlack); graph_theory_Tt.SetLineStyle(2); graph_theory_Tt.SetLineWidth(3); */ // add reference lines... TLine mum1(0.25, 231, 3.25, 231); TLine mup1(0.25, 18, 3.25, 18); mum1.SetLineWidth(2); mum1.SetLineColor(2); mup1.SetLineWidth(2); // ..and corresponding text on top of them TLatex latexm1; TLatex latexp1; latexm1.SetTextAlign(11); latexp1.SetTextAlign(10); latexm1.SetTextSize(0.030); latexp1.SetTextSize(0.030); gPad->SetLogy(1); graph_Nikoz_Tb.Draw("a3"); graph_theory_Tb.Draw("l"); //graph_theory_Tt.Draw("l"); mum1.Draw("same"); //mup1.Draw("same"); latexm1.DrawLatex(.5,270, "#sigma_{VLQ}"); //latexp1.DrawLatex(.5,30, "#sigma_{tHq}(C_{t} = 1)"); // Decoration for axes graph_Nikoz_Tb.SetTitle(";m_{T}(GeV);" "95% CL limit on #sigma x BR (pb)"); graph_Nikoz_Tb.GetYaxis()->SetRangeUser(0.1, 10.0); graph_Nikoz_Tb.GetXaxis()->SetTitleOffset(1.3); graph_Nikoz_Tb.GetYaxis()->SetTitleOffset(1.3); graph_Nikoz_Tb.GetYaxis()->SetLabelSize(0.03); graph_Nikoz_Tb.GetXaxis()->SetLabelSize(0.03); // Add a legend TLegend legend(0.35, 0.65, 0.85, 0.85); legend.SetLineColor(0); legend.SetHeader("Tb, T #rightarrow Wb, singlet "); legend.AddEntry((TObject*)0, " ", " "); legend.SetTextSize(0.030); //legend.SetMargin(0.2); //legend.SetEntrySeparation(5); legend.AddEntry(&graph_Nikoz_Tb, "B2G-16-006 Tb #rightarrow Wbb #rightarrow l#nubb", "l"); legend.AddEntry((TObject*)0, "", ""); legend.AddEntry(&graph_theory_Tb, "#sigma_{NLO}, c_{Wb}=1, BR(Wb)=0.5", "l"); legend.AddEntry((TObject*)0, "", ""); //legend.AddEntry(&graph_theory_Tt, "Theory NLO (c_{Wt}=0.3)"); legend.Draw(); // CMS labels //TLatex cmsLabel(0.12, 0.91, "#scale[1.2]{#font[62]{CMS}} #font[52]{Preliminary}"); TLatex cmsLabel(0.12, 0.91, "#scale[1.2]{#font[62]{CMS}}"); cmsLabel.SetNDC(); cmsLabel.SetTextSize(0.04); cmsLabel.SetTextAlign(11); TLatex energyLabel(0.9, 0.91, "#font[42]{2.3 fb^{-1} (13 TeV)}"); energyLabel.SetNDC(); energyLabel.SetTextSize(0.04); energyLabel.SetTextAlign(31); cmsLabel.Draw(); energyLabel.Draw(); graph_Nikoz_Tb.Draw("same"); //graph_Heiner_Tb.Draw("same"); //gPad->SetLogy(1); // Save the figure canvas.Print("plotTSingComparison_Wb.pdf"); canvas.Print("plotTSingComparison_Wb.C"); cout<<"printed"<<endl; }
optional<Timeline<Phone>> getPhoneAlignment( const vector<s3wid_t>& wordIds, const vector<int16_t>& audioBuffer, ps_decoder_t& decoder) { // Create alignment list lambda_unique_ptr<ps_alignment_t> alignment( ps_alignment_init(decoder.d2p), [](ps_alignment_t* alignment) { ps_alignment_free(alignment); }); if (!alignment) throw runtime_error("Error creating alignment."); for (s3wid_t wordId : wordIds) { // Add word. Initial value for duration is ignored. ps_alignment_add_word(alignment.get(), wordId, 0); } int error = ps_alignment_populate(alignment.get()); if (error) throw runtime_error("Error populating alignment struct."); // Create search structure acmod_t* acousticModel = decoder.acmod; lambda_unique_ptr<ps_search_t> search( state_align_search_init("state_align", decoder.config, acousticModel, alignment.get()), [](ps_search_t* search) { ps_search_free(search); }); if (!search) throw runtime_error("Error creating search."); // Start recognition error = acmod_start_utt(acousticModel); if (error) throw runtime_error("Error starting utterance processing for alignment."); { // Eventually end recognition auto endRecognition = gsl::finally([&]() { acmod_end_utt(acousticModel); }); // Start search ps_search_start(search.get()); // Process entire audio clip const int16* nextSample = audioBuffer.data(); size_t remainingSamples = audioBuffer.size(); bool fullUtterance = true; while (acmod_process_raw(acousticModel, &nextSample, &remainingSamples, fullUtterance) > 0) { while (acousticModel->n_feat_frame > 0) { ps_search_step(search.get(), acousticModel->output_frame); acmod_advance(acousticModel); } } // End search error = ps_search_finish(search.get()); if (error) return boost::none; } // Extract phones with timestamps char** phoneNames = decoder.dict->mdef->ciname; Timeline<Phone> result; for (ps_alignment_iter_t* it = ps_alignment_phones(alignment.get()); it; it = ps_alignment_iter_next(it)) { // Get phone ps_alignment_entry_t* phoneEntry = ps_alignment_iter_get(it); s3cipid_t phoneId = phoneEntry->id.pid.cipid; string phoneName = phoneNames[phoneId]; if (phoneName == "SIL") continue; // Add entry centiseconds start(phoneEntry->start); centiseconds duration(phoneEntry->duration); Phone phone = PhoneConverter::get().parse(phoneName); if (phone == Phone::AH && duration < 6_cs) { // Heuristic: < 6_cs is schwa. Pocketsphinx doesn't differentiate. phone = Phone::Schwa; } Timed<Phone> timedPhone(start, start + duration, phone); result.set(timedPhone); } return result; }
inline Matrix<T, 1, Dynamic> to_row_vector(const vector<T> & vec) { return Matrix<T, 1, Dynamic>::Map(vec.data(), vec.size()); }
void init(void) { // Create a FBO for normal scene rendering. glGenFramebuffersEXT(1, &renderFBO); glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, renderFBO); GLint samples; glGetIntegerv(GL_MAX_SAMPLES, &samples); // Render buffer for color. glGenRenderbuffers(1, &renderColor); glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, renderColor); glRenderbufferStorageMultisampleEXT(GL_RENDERBUFFER_EXT, samples, GL_RGBA8, w, h); glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_RENDERBUFFER_EXT, renderColor); // Render buffer for depth. glGenRenderbuffersEXT(1, &renderDepth); glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, renderDepth); glRenderbufferStorageMultisampleEXT(GL_RENDERBUFFER_EXT, samples, GL_DEPTH_COMPONENT24, w, h); glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, renderDepth); // Check status for scene buffer. GLenum FBOstatus = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT); if (FBOstatus != GL_FRAMEBUFFER_COMPLETE) { cerr << "Status of render FBO is incomplete." << endl; exit(1); } glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0); // FBO for shadow calculation. glGenFramebuffersEXT(1, &depthFBO_light1); glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, depthFBO_light1); // The depth buffer texture glGenTextures(1, &depthTexture_light1); glBindTexture(GL_TEXTURE_2D, depthTexture_light1); // Texture parameters. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT24, 2 * w, 2 * h, 0, GL_DEPTH_COMPONENT, GL_FLOAT, 0); // Bind texture to shadow mapping FBO. glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_TEXTURE_2D, depthTexture_light1, 0); glDrawBuffer(GL_NONE); glReadBuffer(GL_NONE); glClearDepth(1.0); // Check status. FBOstatus = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT); if (FBOstatus != GL_FRAMEBUFFER_COMPLETE) { cerr << "Status of depth texture is incomplete." << endl; exit(1); } glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0); // FBO for shadow calculation. glGenFramebuffersEXT(1, &depthFBO_light2); glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, depthFBO_light2); // The depth buffer texture glGenTextures(1, &depthTexture_light2); glBindTexture(GL_TEXTURE_2D, depthTexture_light2); // Texture parameters. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT24, 2 * w, 2 * h, 0, GL_DEPTH_COMPONENT, GL_FLOAT, 0); // Bind texture to shadow mapping FBO. glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_TEXTURE_2D, depthTexture_light2, 0); glDrawBuffer(GL_NONE); glReadBuffer(GL_NONE); glClearDepth(1.0); // Check status. FBOstatus = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT); if (FBOstatus != GL_FRAMEBUFFER_COMPLETE) { cerr << "Status of depth texture is incomplete." << endl; exit(1); } glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0); // FBO for shadow calculation. glGenFramebuffersEXT(1, &depthFBO_light3); glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, depthFBO_light3); // The depth buffer texture glGenTextures(1, &depthTexture_light3); glBindTexture(GL_TEXTURE_2D, depthTexture_light3); // Texture parameters. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT24, 2 * w, 2 * h, 0, GL_DEPTH_COMPONENT, GL_FLOAT, 0); // Bind texture to shadow mapping FBO. glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_TEXTURE_2D, depthTexture_light3, 0); glDrawBuffer(GL_NONE); glReadBuffer(GL_NONE); glClearDepth(1.0); // Check status. FBOstatus = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT); if (FBOstatus != GL_FRAMEBUFFER_COMPLETE) { cerr << "Status of depth texture is incomplete." << endl; exit(1); } glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0); // Generate 2D texture for the mat. glGenTextures(1, &checkerBoard); glBindTexture(GL_TEXTURE_2D, checkerBoard); GLubyte* checkerData = new GLubyte[4 * h * w]; for (int i = 0; i < h; i++) for (int j = 0; j < w; j++) { GLubyte c = (((i & 0x16) == 0) ^ ((j & 0x16) == 0)) * 255; checkerData[4 * i * w + 4 * j] = c; checkerData[4 * i * w + 4 * j + 1] = c; checkerData[4 * i * w + 4 * j + 2] = c; checkerData[4 * i * w + 4 * j + 3] = 255; } glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR_MIPMAP_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, checkerData); glGenerateMipmap(GL_TEXTURE_2D); delete checkerData; checkerData = NULL; // Create a vertex array object. GLuint vao[1]; glGenVertexArrays(1, vao); glBindVertexArray(vao[0]); vec4 checkerBoardCoord[6] = { vec4(1, 0, 0, 1), vec4(1, 1, 0, 1), vec4(0, 0, 0, 1), vec4(0, 0, 0, 1), vec4(1, 1, 0, 1), vec4(0, 1, 0, 1) }; // Create and initialize a buffer object. GLuint buffer; glGenBuffers(1, &buffer); glBindBuffer(GL_ARRAY_BUFFER, buffer); glBufferData(GL_ARRAY_BUFFER, vertices.size()*sizeof(vec4) + normals.size()*sizeof(vec4) + sizeof(checkerBoardCoord), NULL, GL_STATIC_DRAW); // Pass vertices & normals data to opengl buffer object. glBufferSubData(GL_ARRAY_BUFFER, 0, vertices.size()*sizeof(vec4), vertices.data()); glBufferSubData(GL_ARRAY_BUFFER, vertices.size()*sizeof(vec4), normals.size()*sizeof(vec4), normals.data()); glBufferSubData(GL_ARRAY_BUFFER, vertices.size()*sizeof(vec4) + normals.size()*sizeof(vec4), sizeof(checkerBoardCoord), checkerBoardCoord); shadowShader = InitShader("vshadershadow.glsl", "fshadershadow.glsl"); glBindAttribLocation(shadowShader, 0, "vVertex"); glBindAttribLocation(shadowShader, 0, "fragmentdepth"); LinkShader(shadowShader); GLuint loc_ver_shadow = glGetAttribLocation(shadowShader, "vVertex"); glEnableVertexAttribArray(loc_ver_shadow); glVertexAttribPointer(loc_ver_shadow, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0)); // Load shaders and use the resulting shader program. program = InitShader("vshader21.glsl", "fshader21.glsl"); LinkShader(program); // Initialize the vertex position attribute from the vertex shader. GLuint loc_ver = glGetAttribLocation(program, "vPosition"); glEnableVertexAttribArray(loc_ver); glVertexAttribPointer(loc_ver, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0)); // Pass normal vectors of each triangle to vertex shader GLuint loc_col = glGetAttribLocation(program, "vNormal"); glEnableVertexAttribArray(loc_col); glVertexAttribPointer(loc_col, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(vertices.size()*sizeof(vec4))); // Pass normal vectors of each triangle to vertex shader GLuint loc_che = glGetAttribLocation(program, "checkerBoardCoord"); glEnableVertexAttribArray(loc_che); glVertexAttribPointer(loc_che, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(vertices.size()*sizeof(vec4) + normals.size()*sizeof(vec4))); }
stringify(const vector<uint8_t>& value) { text = new char[value.size() + 1](); memcpy(text, value.data(), value.size()); }
void Campaign::sort_data() { /// sort price_list and cp_sorted qsort(cp_sorted.data(), D, sizeof(int), compare); qsort(price_list.data(), num_unique, sizeof(int), compare); }
vector<unsigned char> WinSigner::sign(const string &method, const vector<unsigned char> &digest) const { DEBUG("sign(method = %s, digest = length=%d)", method.c_str(), digest.size()); BCRYPT_PKCS1_PADDING_INFO padInfo = { nullptr }; ALG_ID alg = 0; if(method == URI_RSA_SHA1) { padInfo.pszAlgId = NCRYPT_SHA1_ALGORITHM; alg = CALG_SHA1; } else if(method == URI_RSA_SHA224) { padInfo.pszAlgId = L"SHA224"; } else if(method == URI_RSA_SHA256) { padInfo.pszAlgId = NCRYPT_SHA256_ALGORITHM; alg = CALG_SHA_256; } else if(method == URI_RSA_SHA384) { padInfo.pszAlgId = NCRYPT_SHA384_ALGORITHM; alg = CALG_SHA_384; } else if(method == URI_RSA_SHA512) { padInfo.pszAlgId = NCRYPT_SHA512_ALGORITHM; alg = CALG_SHA_512; } else if(method == URI_ECDSA_SHA224) {} else if(method == URI_ECDSA_SHA256) {} else if(method == URI_ECDSA_SHA384) {} else if(method == URI_ECDSA_SHA512) {} else THROW("Unsupported signature method"); SECURITY_STATUS err = 0; vector<unsigned char> signature; switch(d->spec) { case CERT_NCRYPT_KEY_SPEC: { DWORD size = 0; wstring algo(5, 0); err = NCryptGetProperty(d->key, NCRYPT_ALGORITHM_GROUP_PROPERTY, PBYTE(algo.data()), DWORD((algo.size() + 1) * 2), &size, 0); algo.resize(size/2 - 1); bool isRSA = algo == L"RSA"; if(!d->pin.empty()) { wstring pin = util::File::encodeName(d->pin); err = NCryptSetProperty(d->key, NCRYPT_PIN_PROPERTY, PBYTE(pin.c_str()), DWORD(pin.size()), 0); if(err != ERROR_SUCCESS) break; } err = NCryptSignHash(d->key, isRSA ? &padInfo : nullptr, PBYTE(digest.data()), DWORD(digest.size()), nullptr, 0, &size, isRSA ? BCRYPT_PAD_PKCS1 : 0); if(FAILED(err)) break; signature.resize(size); err = NCryptSignHash(d->key, isRSA ? &padInfo : nullptr, PBYTE(digest.data()), DWORD(digest.size()), signature.data(), DWORD(signature.size()), &size, isRSA ? BCRYPT_PAD_PKCS1 : 0); break; } case AT_SIGNATURE: case AT_KEYEXCHANGE: { if(method == URI_RSA_SHA224) THROW("Unsupported digest"); if(!d->pin.empty() && !CryptSetProvParam(d->key, d->spec == AT_SIGNATURE ? PP_SIGNATURE_PIN : PP_KEYEXCHANGE_PIN, LPBYTE(d->pin.c_str()), 0)) { err = LONG(GetLastError()); break; } HCRYPTHASH hash = 0; if(!CryptCreateHash(d->key, alg, 0, 0, &hash)) THROW("Failed to sign"); if(!CryptSetHashParam(hash, HP_HASHVAL, LPBYTE(digest.data()), 0)) { CryptDestroyHash(hash); THROW("Failed to sign"); } DWORD size = 0; if(!CryptSignHashW(hash, d->spec, nullptr, 0, nullptr, &size)) { err = LONG(GetLastError()); CryptDestroyHash(hash); break; } signature.resize(size); if(!CryptSignHashW(hash, d->spec, nullptr, 0, signature.data(), &size)) err = LONG(GetLastError()); std::reverse(signature.begin(), signature.end()); CryptDestroyHash(hash); break; } default: THROW("Failed to sign"); } switch(err) { case ERROR_SUCCESS: break; case ERROR_CANCELLED: case SCARD_W_CANCELLED_BY_USER: { Exception e(__FILE__, __LINE__, "PIN acquisition canceled."); e.setCode(Exception::PINCanceled); throw e; } case SCARD_W_WRONG_CHV: default: ostringstream s; s << "Failed to login to token: " << err; Exception e(__FILE__, __LINE__, s.str()); e.setCode(Exception::PINFailed); throw e; } return signature; }
void copy(vector<T> const& src, host::vector<T>& dst, size_t size, stream& stream) { assert(size <= src.capacity()); assert(size <= dst.capacity()); CUDA_CALL(cudaMemcpyAsync(dst.data(), src.data(), size * sizeof(T), cudaMemcpyDeviceToHost, stream.data())); }
void copy(vector<T> const& src, symbol<T[]>& dst, stream& stream) { assert(src.size() == dst.size()); CUDA_CALL(cudaMemcpyToSymbolAsync(reinterpret_cast<char const*>(dst.data()), src.data(), src.size() * sizeof(T), 0, cudaMemcpyDeviceToDevice, stream.data())); }
void copy(symbol<T[]> const& src, vector<T>& dst) { assert(src.size() == dst.size()); CUDA_CALL(cudaMemcpyFromSymbol(dst.data(), reinterpret_cast<char const*>(src.data()), src.size() * sizeof(T), 0, cudaMemcpyDeviceToDevice)); }
void copy(vector<T> const& src, host::vector<T>& dst, stream& stream) { assert(src.size() == dst.size()); CUDA_CALL(cudaMemcpyAsync(dst.data(), src.data(), src.size() * sizeof(T), cudaMemcpyDeviceToHost, stream.data())); }
int quickSort(vector<int> &v) { return _quickSort(v.data(), 0, v.size()); }
void buildModel (vector<ModelLine> &dataset, vector<Point3> &pointList) { return buildModel (dataset, pointList.data(), pointList.size()); }
void OnlineSession::kSimilar(int dbindex, vector<double> qdata, TimeInterval interval, int k, int strict) { if (groupings[dbindex] == NULL) genGrouping(dbindex, defaultST); TimeSeriesGrouping *t = groupings[dbindex]; int slen = dataSets[dbindex]->seqLength; int ilen = interval.length(); TimeSeriesIntervalEnvelope eqdata(TimeSeriesInterval(qdata.data(), TimeInterval(0, qdata.size() - 1))); double gb; int bsfStart = 0, bsfEnd = 0, bsfIndex = -1; double bsf = INF; if (strict == 0) { for (int i = 0; i < slen; i++) { if (debug) { cout << "Searching groups from start=" << i << ", bsf=" << bsf << endl; } for (int j = i; j < slen; j++) { int k = t->groups[i * slen + j].getBestGroup(eqdata, &gb, bsf); if (k < 0) continue; if (gb < bsf) { bsf = gb; bsfStart = i; bsfEnd = j; bsfIndex = k; } } } } else if (strict == 1) { for (int i = 0; i + ilen - 1 < slen; i++) { if (debug) cout << "Searching groups from start=" << i << endl; int j = i + (ilen - 1); int k = t->groups[i * slen + j].getBestGroup(eqdata, &gb, bsf); if (k < 0) continue; if (gb < bsf) { bsf = gb; bsfStart = i; bsfEnd = j; bsfIndex = k; } } } else { bsfStart = interval.start; bsfEnd = interval.end; bsfIndex = t->groups[bsfStart * slen + bsfEnd].getBestGroup(eqdata, &gb, bsf); if (bsfIndex >= 0) bsf = gb; } if (bsf == INF || bsfIndex < 0) { cerr << "kSimilar: Failed to find similar objects. No suitable candidate group centroids." << endl; return; } cout << "Found most similar interval and group: " << bsfIndex << "@" << "[" << bsfStart << "," << bsfEnd << "]" << endl; vector<kSim> sim = t->groups[bsfStart * slen + bsfEnd].groups[bsfIndex]->getSortedSimilar(eqdata, k); cout << "Discovered k similar points:" << endl; for (unsigned int i = 0; i < sim.size(); i++) { cout << "Series " << sim[i].index << ", interval [" << bsfStart << "," << bsfEnd << "] is at distance " << sim[i].distance << "." << endl; TimeSeriesInterval interval = (*t->groups[bsfStart * slen + bsfEnd].groups[bsfIndex]->slice)[sim[i].index]; for (int j = 0; j < interval.length(); j++) { cout << interval[j] << " "; } cout << endl; } }
//-------------------------------------------------------------------------------------- // Рисование кадра //-------------------------------------------------------------------------------------- void Render() { // Просто очищаем задний буфер g_pImmediateContext->ClearRenderTargetView(g_pRenderTargetView, DirectX::Colors::White); g_MainShader->Draw(); Const cb; ZeroMemory(&cb, sizeof(Const)); cb.WVP = XMMatrixTranspose(XMMatrixOrthographicOffCenterRH(.0f, g_width, g_height, .0f, .0f, 1.0f)); g_pImmediateContext->UpdateSubresource(g_WVP, 0, NULL, &cb, 0, 0); g_pImmediateContext->VSSetConstantBuffers(0, 1, &g_WVP); // ToDo for (Shape *s : shapes) { lines.clear(); s->Render(); ID3D11Buffer *g_vbLine = Buffer::CreateVertexBuffer(sizeof(Vertex) * lines.size(), false, lines.data()); unsigned int stride = sizeof(Vertex); unsigned int offset = 0; g_pImmediateContext->IASetVertexBuffers(0, 1, &g_vbLine, &stride, &offset); g_pImmediateContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_LINELIST); g_pImmediateContext->Draw(lines.size(), 0); _RELEASE(g_vbLine); } ZeroMemory(&cb, sizeof(Const)); cb.WVP = XMMatrixTranspose(XMMatrixOrthographicOffCenterLH(0.0f, g_width, g_height, .0f, .0f, 1.0f)); g_pImmediateContext->UpdateSubresource(g_WVP, 0, NULL, &cb, 0, 0); unsigned int stride = sizeof(Vertex); unsigned int offset = 0; g_pImmediateContext->IASetVertexBuffers(0, 1, &g_GUIvb, &stride, &offset); g_pImmediateContext->IASetIndexBuffer(g_GUIib, DXGI_FORMAT_R32_UINT, 0); g_pImmediateContext->VSSetConstantBuffers(0, 1, &g_WVP); g_pImmediateContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); g_pImmediateContext->DrawIndexed(cindices, 0, 0); if (GUIshow) for (Shape *s : toolbar) { lines.clear(); s->Render(); ID3D11Buffer *g_vbLine = Buffer::CreateVertexBuffer(sizeof(Vertex) * lines.size(), false, lines.data()); unsigned int stride = sizeof(Vertex); unsigned int offset = 0; g_pImmediateContext->IASetVertexBuffers(0, 1, &g_vbLine, &stride, &offset); g_pImmediateContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_LINELIST); g_pImmediateContext->Draw(lines.size(), 0); _RELEASE(g_vbLine); } // Выбросить задний буфер на экран g_pSwapChain->Present(0, 0); }
void memset(vector<T>& array, int const& value) { CUDA_CALL(cudaMemset(array.data(), value, array.size() * sizeof(T))); }
Connect::Result Connect::exec(initializer_list<pair<string,string>> headers, const vector<unsigned char> &data) { return exec(headers, data.data(), data.size()); }
void memset(vector<T>& array, int const& value, size_t size) { assert(size <= array.capacity()); CUDA_CALL(cudaMemset(array.data(), value, size * sizeof(T))); }
TEST_P(FDRFloodp, StreamingMask) { const u32 hint = GetParam(); SCOPED_TRACE(hint); const size_t fake_history_size = 16; const vector<u8> fake_history(fake_history_size, 0); const size_t dataSize = 1024; vector<u8> data(dataSize); vector<u8> tempdata(dataSize + fake_history_size); // headroom u8 c = '\0'; while (1) { u8 bit = 1 << (c & 0x7); u8 cAlt = c ^ bit; SCOPED_TRACE((unsigned int)c); memset(&data[0], c, dataSize); vector<hwlmLiteral> lits; // literals of type "aaaa" of length 4, 12, and mask of length // 1, 2, 4, 8, 0f type "b.......", "b......b" both case-less and case-sensitive string s4(4, c); string s4Alt(4, cAlt); for (int i = 0; i < 4 ; i++) { u32 mskLen = 1 << i; vector<u8> msk(mskLen, '\0'); vector<u8> cmp(mskLen, '\0'); cmp[0] = cAlt; msk[0] = '\xff'; // msk[f0000000] cmp[c0000000] lit[aaaa] if (mskLen > s4.length()) { lits.push_back(hwlmLiteral(s4, false, false, i * 12 + 0, HWLM_ALL_GROUPS, msk, cmp)); lits.push_back(hwlmLiteral(s4, true, false, i * 12 + 1, HWLM_ALL_GROUPS, msk, cmp)); } // msk[f0000000] cmp[e0000000] lit[EEEE] if (bit == CASE_BIT && isalpha(c)) { lits.push_back(hwlmLiteral(s4, true, false, i * 12 + 2, HWLM_ALL_GROUPS, msk, cmp)); } // msk[E0000000] cmp[E0000000] lit[eeee] if ((cAlt & bit) == 0) { msk[0] = ~bit; lits.push_back(hwlmLiteral(s4, false, false, i * 12 + 3, HWLM_ALL_GROUPS, msk, cmp)); lits.push_back(hwlmLiteral(s4, true, false, i * 12 + 4, HWLM_ALL_GROUPS, msk, cmp)); } // msk[f0000000] cmp[a0000000] lit[aaaa] cmp[0] = c; msk[0] = '\xff'; lits.push_back(hwlmLiteral(s4, false, false, i * 12 + 5, HWLM_ALL_GROUPS, msk, cmp)); lits.push_back(hwlmLiteral(s4, true, false, i * 12 + 6, HWLM_ALL_GROUPS, msk, cmp)); // msk[f0000000] cmp[a0000000] lit[cccc] if (mskLen > s4Alt.length()) { lits.push_back(hwlmLiteral(s4Alt, false, false, i * 12 + 7, HWLM_ALL_GROUPS, msk, cmp)); lits.push_back(hwlmLiteral(s4Alt, true, false, i * 12 + 8, HWLM_ALL_GROUPS, msk, cmp)); } if (bit == CASE_BIT && isalpha(c)) { // msk[f0000000] cmp[e0000000] lit[EEEE] lits.push_back(hwlmLiteral(s4Alt, true, false, i * 12 + 9, HWLM_ALL_GROUPS, msk, cmp)); // msk[f0000000] cmp[e000000E] lit[eeee] cmp[mskLen - 1] = cAlt; msk[mskLen - 1] = '\xff'; lits.push_back(hwlmLiteral(s4, true, false, i * 12 + 10, HWLM_ALL_GROUPS, msk, cmp)); // msk[f0000000] cmp[E000000E] lit[eeee] cmp[0] = cAlt; lits.push_back(hwlmLiteral(s4, true, false, i * 12 + 11, HWLM_ALL_GROUPS, msk, cmp)); } } auto fdr = fdrBuildTableHinted(lits, false, hint, get_current_target(), Grey()); CHECK_WITH_TEDDY_OK_TO_FAIL(fdr, hint); map <u32, int> matchesCounts; hwlm_error_t fdrStatus; const u32 cnt4 = dataSize - 4 + 1; for (u32 streamChunk = 1; streamChunk <= 16; streamChunk *= 2) { matchesCounts.clear(); const u8 *d = data.data(); // reference past the end of fake history to allow headroom const u8 *fhist = fake_history.data() + fake_history_size; fdrStatus = fdrExecStreaming(fdr.get(), fhist, 0, d, streamChunk, 0, countCallback, &matchesCounts, HWLM_ALL_GROUPS, nullptr); ASSERT_EQ(0, fdrStatus); for (u32 j = streamChunk; j < dataSize; j += streamChunk) { if (j < 16) { /* allow 16 bytes headroom on read to avoid invalid * memory read during the FDR zone creation.*/ memset(tempdata.data(), c, dataSize + fake_history_size); const u8 *tmp_d = tempdata.data() + fake_history_size; fdrStatus = fdrExecStreaming(fdr.get(), tmp_d, j, tmp_d + j, streamChunk, 0, countCallback, &matchesCounts, HWLM_ALL_GROUPS, nullptr); } else { fdrStatus = fdrExecStreaming(fdr.get(), d + j - 8, 8, d + j, streamChunk, 0, countCallback, &matchesCounts, HWLM_ALL_GROUPS, nullptr); } ASSERT_EQ(0, fdrStatus); } for (u8 i = 0; i < 4; i++) { u32 mskLen = 1 << i; u32 cntMask = MIN(cnt4, dataSize - mskLen + 1); ASSERT_EQ(0, matchesCounts[i * 12 + 0]); ASSERT_EQ(0, matchesCounts[i * 12 + 1]); ASSERT_EQ(0, matchesCounts[i * 12 + 2]); if ((cAlt & bit) == 0) { ASSERT_EQ(cntMask, matchesCounts[i * 12 + 3]); ASSERT_EQ(cntMask, matchesCounts[i * 12 + 4]); } if (mskLen > 4) { ASSERT_EQ(cntMask, matchesCounts[i * 12 + 5]); ASSERT_EQ(cntMask, matchesCounts[i * 12 + 6]); ASSERT_EQ(0, matchesCounts[i * 12 + 7]); if (bit == CASE_BIT && isalpha(c)) { ASSERT_EQ(cntMask, matchesCounts[i * 12 + 8]); } else { ASSERT_EQ(0, matchesCounts[i * 12 + 8]); } } else { ASSERT_EQ(cnt4, matchesCounts[i * 12 + 5]); ASSERT_EQ(cnt4, matchesCounts[i * 12 + 6]); } if (bit == CASE_BIT && isalpha(c)) { ASSERT_EQ(cntMask, matchesCounts[i * 12 + 9]); ASSERT_EQ(0, matchesCounts[i * 12 + 10]); ASSERT_EQ(0, matchesCounts[i * 12 + 11]); } } } if (++c == '\0') { break; } } }
void copy(vector<T> const& src, vector<T>& dst) { assert(src.size() == dst.size()); CUDA_CALL(cudaMemcpy(dst.data(), src.data(), src.size() * sizeof(T), cudaMemcpyDeviceToDevice)); }
vector<bool> CharacterAnalysis::filterBetweenLines(Mat img, vector<vector<Point> > contours, vector<Vec4i> hierarchy, vector<Point> outerPolygon, vector<bool> goodIndices) { static float MIN_AREA_PERCENT_WITHIN_LINES = 0.88; static float MAX_DISTANCE_PERCENT_FROM_LINES = 0.15; vector<bool> includedIndices(contours.size()); for (int j = 0; j < contours.size(); j++) includedIndices[j] = false; if (outerPolygon.size() == 0) return includedIndices; vector<Point> validPoints; // Figure out the line height LineSegment topLine(outerPolygon[0].x, outerPolygon[0].y, outerPolygon[1].x, outerPolygon[1].y); LineSegment bottomLine(outerPolygon[3].x, outerPolygon[3].y, outerPolygon[2].x, outerPolygon[2].y); float x = ((float) img.cols) / 2; Point midpoint = Point(x, bottomLine.getPointAt(x)); Point acrossFromMidpoint = topLine.closestPointOnSegmentTo(midpoint); float lineHeight = distanceBetweenPoints(midpoint, acrossFromMidpoint); // Create a white mask for the area inside the polygon Mat outerMask = Mat::zeros(img.size(), CV_8U); Mat innerArea(img.size(), CV_8U); fillConvexPoly(outerMask, outerPolygon.data(), outerPolygon.size(), Scalar(255,255,255)); // For each contour, determine if enough of it is between the lines to qualify for (int i = 0; i < contours.size(); i++) { if (goodIndices[i] == false) continue; innerArea.setTo(Scalar(0,0,0)); drawContours(innerArea, contours, i, // draw this contour cv::Scalar(255,255,255), // in CV_FILLED, 8, hierarchy, 0 ); bitwise_and(innerArea, outerMask, innerArea); vector<vector<Point> > tempContours; findContours(innerArea, tempContours, CV_RETR_EXTERNAL, // retrieve the external contours CV_CHAIN_APPROX_SIMPLE ); // all pixels of each contours ); double totalArea = contourArea(contours[i]); double areaBetweenLines = 0; for (int tempContourIdx = 0; tempContourIdx < tempContours.size(); tempContourIdx++) { areaBetweenLines += contourArea(tempContours[tempContourIdx]); } if (areaBetweenLines / totalArea < MIN_AREA_PERCENT_WITHIN_LINES) { // Not enough area is inside the lines. continue; } // now check to make sure that the top and bottom of the contour are near enough to the lines // First get the high and low point for the contour // Remember that origin is top-left, so the top Y values are actually closer to 0. int highPointIndex = 0; int highPointValue = 999999999; int lowPointIndex = 0; int lowPointValue = 0; for (int cidx = 0; cidx < contours[i].size(); cidx++) { if (contours[i][cidx].y < highPointValue) { highPointIndex = cidx; highPointValue = contours[i][cidx].y; } if (contours[i][cidx].y > lowPointValue) { lowPointIndex = cidx; lowPointValue = contours[i][cidx].y; } } // Get the absolute distance from the top and bottom lines Point closestTopPoint = topLine.closestPointOnSegmentTo(contours[i][highPointIndex]); Point closestBottomPoint = bottomLine.closestPointOnSegmentTo(contours[i][lowPointIndex]); float absTopDistance = distanceBetweenPoints(closestTopPoint, contours[i][highPointIndex]); float absBottomDistance = distanceBetweenPoints(closestBottomPoint, contours[i][lowPointIndex]); float maxDistance = lineHeight * MAX_DISTANCE_PERCENT_FROM_LINES; if (absTopDistance < maxDistance && absBottomDistance < maxDistance) { includedIndices[i] = true; } } return includedIndices; }
void copy(vector<T> const& src, vector<T>& dst, size_t size) { assert(size <= src.capacity()); assert(size <= dst.capacity()); CUDA_CALL(cudaMemcpy(dst.data(), src.data(), size * sizeof(T), cudaMemcpyDeviceToDevice)); }
void Texture<GLubyte>::savePNG(string const &filename, ostream &output, vector<GLubyte> const &data) const { png_structp pngPtr = png_create_write_struct(PNG_LIBPNG_VER_STRING, 0, 0, 0); if(!pngPtr) throw log(__FILE__, __LINE__, LogType::error, "Failed to save " + filename); png_infop infoPtr = png_create_info_struct(pngPtr); if(!infoPtr) throw log(__FILE__, __LINE__, LogType::error, "Failed to save " + filename); if(setjmp(png_jmpbuf(pngPtr))) throw log(__FILE__, __LINE__, LogType::error, "Failed to save " + filename); png_set_write_fn(pngPtr, reinterpret_cast<png_voidp>(&output), writeToStream, flushStream); if(setjmp(png_jmpbuf(pngPtr))) throw log(__FILE__, __LINE__, LogType::error, "Failed to save " + filename); png_uint_32 color = PNG_COLOR_TYPE_GRAY; uint channels = 1; switch(externalFormat()) { case GL_RED: color = PNG_COLOR_TYPE_GRAY; channels = 1; break; case GL_RG: color = PNG_COLOR_TYPE_GRAY_ALPHA; channels = 2; break; case GL_RGB: color = PNG_COLOR_TYPE_RGB; channels = 3; break; case GL_RGBA: color = PNG_COLOR_TYPE_RGB_ALPHA; channels = 4; break; default: throw log(__FILE__, __LINE__, LogType::error, "Failed to save " + filename); } png_set_IHDR(pngPtr, infoPtr, width(), height(), 8, color, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE); png_write_info(pngPtr, infoPtr); vector<png_bytep> rowPtrs(height()); for(uint y = 0; y != height(); ++y) { // A const_cast is used because libpng won't take anything else png_uint_32 offset = y * (width() * channels); rowPtrs.at(y) = static_cast<png_bytep>(const_cast<GLubyte*>(data.data())) + offset; } if(setjmp(png_jmpbuf(pngPtr))) throw log(__FILE__, __LINE__, LogType::error, "Failed to save " + filename); png_write_image(pngPtr, rowPtrs.data()); if(setjmp(png_jmpbuf(pngPtr))) throw log(__FILE__, __LINE__, LogType::error, "Failed to save " + filename); png_write_end(pngPtr, 0); png_destroy_write_struct(&pngPtr, &infoPtr); }
void *processing(void*) { sleep(3); // Ensure that the capture thread has time to initialize and fill buffer PnP.is_current = false; while(1) { /// TODO: Include some kind of protection to make sure we don't repeatedly process the same frame /// (if processing somehow goes faster than capturing -- not a problem on BBB right now) pthread_mutex_lock(&framelock_mutex); // Decode JPEG image stored in the most recently dequeued buffer v4l2_process_image(frame, user_buffer.ptr[user_buffer.buf_last]); pthread_mutex_unlock(&framelock_mutex); #if (!ARM) // DEBUG: Wait until the desired amount of time has passed do { clock_gettime(CLOCK_MONOTONIC, &simulated_fps_toc); simulated_fps_elapsed = (simulated_fps_toc.tv_sec - simulated_fps_tic.tv_sec); simulated_fps_elapsed+= (simulated_fps_toc.tv_nsec - simulated_fps_tic.tv_nsec) / 1000000000.0; } while (simulated_fps_elapsed < 1.0/(simulated_fps)); clock_gettime(CLOCK_MONOTONIC, &simulated_fps_tic); #endif //ARM vector<Point2f> imagePoints; bool preCorrelated = Detector.findLEDs(frame,gray,binary,imagePoints,DetectorParams,PnP.is_current,PnP.projImagePoints); // Compute pose estimate int poseIters = PnP.localizeUAV(imagePoints, poseState, poseErr, 9, POSE_ERR_TOL, SECONDARY_POSE_ERR_TOL, preCorrelated); if ( poseIters > 0 && checkSanity(poseState) > 0 ) { reportState = poseState; PnP.is_current = true; } else { PnP.is_current = false; } #ifdef SAVE_KF_DATA // DEBUG: write Kalman filter inputs to file (including time) // compute time clock_gettime(CLOCK_MONOTONIC, &DEBUG_toc); DEBUG_elapsed = DEBUG_toc.tv_sec - DEBUG_tic.tv_sec; DEBUG_elapsed+= (DEBUG_toc.tv_nsec - DEBUG_tic.tv_nsec) / 1000000000.0; DEBUGFILE << DEBUG_elapsed << ","; // save input state for (int i=0; i<reportState.size(); i++) DEBUGFILE << reportState[i] << ","; #endif //SAVE_KF_DATA #ifdef EMPLOY_KF // Employ Kalman filter KF.predict(reportState.data()); KF.correct(); // KF.correct() returns TRUE if not an outlier, FALSE if an outlier KF.get_state(reportState.data()); // Return the ESTIMATED state #endif //EMPLOY_KF #ifdef LOG_VISION_DATA recordLogData(logfd,reportState,poseState,poseErr,PnP.is_current); #endif //LOG_VISION_DATA #ifdef SAVE_KF_DATA // DEBUG: save output from Kalman filter for (int i=0; i<reportState.size()-1; i++) DEBUGFILE << reportState[i] << ","; DEBUGFILE << reportState.back() << "\n"; // don't write comma, proceed to newline #endif //SAVE_KF_DATA // send pose estimate to autopilot #if ARM Serial.writeData(reportState); #endif /// ////////// DEBUGGING SPECIFIC OPTIONS ////////// /// /// Print fps and pose estimate to console in real-time #ifdef POSE_TO_CONSOLE double fps_cnt=fps.fps(); printf("\e[J FPS: %6.2f # of detected features: %4d\n",fps_cnt,imagePoints.size()); printf(" Pose Estimate:%7s %7s %7s %7s %7s %7s [in/deg]\n","x","y","z","roll","pitch","yaw"); printf(" %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f", reportState[0], reportState[1], reportState[2], reportState[3], reportState[4], reportState[5]); if (!PnP.is_current) printf(" ZOH"); printf("\r\e[2A"); // move cursor fflush(stdout); #endif /* POSE_TO_CONSOLE */ /// Temporarily moved here to save plain frames (without debug info) //#ifdef SAVEOFF_FRAMES // saveDebugFrame(frame, imageSavepath); //#endif /* SAVEOFF_FRAMES */ #ifdef DEBUG_VIDEO PnP.drawOverFrame(frame, reportState); imshow("DEBUG_VIDEO",frame); waitKey(1); #endif /* DEBUG_VIDEO */ #if defined(SAVEOFF_FRAMES) && !defined(DEBUG_VIDEO) // don't repeat this step PnP.drawOverFrame(frame); #endif #ifdef SAVEOFF_FRAMES saveDebugFrame(frame, imageSavepath); #endif /* SAVEOFF_FRAMES */ /// ////////// DEBUGGING SPECIFIC OPTIONS ////////// /// } pthread_exit(NULL); }
string Request::urlEncode(const vector<char>& data){ return this->urlEncode(data.data(), data.size()); }
void debug_points::make_points(vector<vec3> lines) { glBindBuffer(GL_ARRAY_BUFFER, debug_mesh.VBO); debug_mesh.vertices = lines.size() * sizeof(vec3); glBufferSubData(GL_ARRAY_BUFFER, 0, lines.size() * sizeof(vec3), lines.data()); }
static string b2a_str(const vector<uint8_t>& data, int bits) { assert((int) data.size() == bits / 8); string result; result.append((char*) data.data()); return result; }
int main(int argc, char *argv[]) { SDL_Init(SDL_INIT_VIDEO); displayWindow = SDL_CreateWindow("Platformer", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, 640, 360, SDL_WINDOW_OPENGL); SDL_GLContext context = SDL_GL_CreateContext(displayWindow); SDL_GL_MakeCurrent(displayWindow, context); #ifdef _WINDOWS glewInit(); #endif SDL_Event event; glViewport(0, 0, 640, 360); ShaderProgram program(RESOURCE_FOLDER"vertex_textured.glsl", RESOURCE_FOLDER"fragment_textured.glsl"); GLuint playerTexture = LoadTexture("player.png"); GLuint spriteSheetTexture = LoadTexture("sheet.png"); Matrix projectionMatrix; Matrix modelMatrix; Matrix viewMatrix; SheetSprite playerSprite(&program, playerTexture, 0, 0, 1, 1, 0.3); Entity player(playerSprite, 1.0, -11.5, .5, .4, 0, 0, 0, 0); projectionMatrix.setOrthoProjection(-2.55f, 2.55f, -1.5f, 1.5f, -1.0f, 1.0f); program.setProjectionMatrix(projectionMatrix); readFile(); glUseProgram(program.programID); glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); bool done = false; while (!done) { while (SDL_PollEvent(&event)) { if (event.type == SDL_QUIT || event.type == SDL_WINDOWEVENT_CLOSE) { done = true; } } glClear(GL_COLOR_BUFFER_BIT); //draw level modelMatrix.identity(); program.setModelMatrix(modelMatrix); glUseProgram(program.programID); glVertexAttribPointer(program.positionAttribute, 2, GL_FLOAT, false, 0, vertexData.data()); glEnableVertexAttribArray(program.positionAttribute); glVertexAttribPointer(program.texCoordAttribute, 2, GL_FLOAT, false, 0, texCoordData.data()); glEnableVertexAttribArray(program.texCoordAttribute); glBindTexture(GL_TEXTURE_2D, spriteSheetTexture); glDrawArrays(GL_TRIANGLES, 0, tileCount * 6); glDisableVertexAttribArray(program.positionAttribute); glDisableVertexAttribArray(program.texCoordAttribute); //update and draw player updatePlayer(&player); checkPlayer(&player); player.Render(&program); //camera following player viewMatrix.identity(); viewMatrix.Translate(-player.x, -player.y, 0); program.setViewMatrix(viewMatrix); SDL_GL_SwapWindow(displayWindow); } SDL_Quit(); return 0; }
const void* GetDataBuffer() override { return m_values.data(); }
double expoTreeSIR(int parVecLen, double* K, double* beta, double* mu, double* psi, double rho, vector<double>& times, vector<int>& ttypes, int extant, int est_norm, int vflag, int rescale, int nroot) { double fx = -INFINITY; int info = 1; if (! checkParams(parVecLen,K,beta,mu,psi,rho,nroot)) { if (vflag > 0) { fprintf(stderr,"Ilegal parameters. Returning inf.\n"); fprintf(stderr,"N = %g, beta = %g, mu = %g, psi = %g, rho = %g\n", K[0],beta[0],mu[0],psi[0],rho); } return -INFINITY; } /* count number of sampled lineages */ int num_sampled = 0; for (size_t i(0); i < times.size(); ++i) { switch (ttypes[i]) { case 0: ++num_sampled; break; default: break; } } int ki = 0; int nt = times.size(); double* ptimes = times.data(); int* pttypes = ttypes.data(); double* maxK = max_element(K,K+parVecLen); int maxN = ceil(*maxK); int dim = sir_index_N(0,maxN,maxN)+1; vector<double> p0(dim,0.0); double t0 = 0.0; double scale = 0.0; double sigma = psi[0]/(mu[0]+psi[0]); /* sampling probability */ double pR; /* set initial value of p */ if (extant == 0) { // p0[0] = 0.0; scale = extant*log(sigma); int m = 0; for (int R = 0; R <= maxN; ++R) { pR = (R >= num_sampled) ? pow(1.0-sigma,R-num_sampled) : 0.0; p0[m++] = pR; for (int I = 1; I <= maxN-R; ++I) { p0[m++] = 0.0; } } ki = 0; t0 = 0.0; ptimes = times.data(); pttypes = ttypes.data(); nt = times.size(); } else { cerr << "Not yet implemented for extant lineages !!" << endl; return -INFINITY; } #ifdef OPENCL info = 2; clrExpoTree(K,&ki,beta,mu,psi,&nt,&parVecLen, ptimes,pttypes,p0.data(), &t0,&info,&est_norm,&vflag,&rescale); #else sir_expotree(K,&ki,beta,mu,psi,&nt,&parVecLen, ptimes,pttypes,p0.data(), &t0,&info,&est_norm,&vflag,&rescale); #endif if (info > 0) { int m = sir_index_N(1,0,maxN); fx = p0[m] + scale; if (vflag > 0) fprintf(stderr,"ln(p(1,t)) = %20.12e\n",fx); } else { if (vflag > 0) fprintf(stderr,"rExpoTree returned %d!\n",info); return -INFINITY; } return fx; }