void uavg() { lg("User avg centering\n"); ZERO(useravg); int u; for(u=0;u<NUSERS;u++) { int base=useridx[u][0]; int d0=UNTRAIN(u); int i; for(i=0; i<d0;i++) useravg[u]+=err[base+i]; useravg[u]/=d0+USERAVG_ALPHA; } for(u=0;u<NUSERS;u++) { int base=useridx[u][0]; int d012=UNALL(u); int i; for(i=0; i<d012;i++) { err[base++]-=useravg[u]; } } }
void GlobalLock::drop() { assert(locked_ == 1); { thread::Mutex::LockGuard lg(mutex_); locked_ = 0; condition_.signal(); } // Handshake with the yieldee, so that we don't starve the system. thread::Mutex::LockGuard hlg(handshake_mutex_); // request_drop_ is reset 0 here and inside the handshake of take(). // This is expected, because the point of handshake is to force the taker // to run. So if the above .signal() causes control to return to a thread // in take() before we get to this point, then it's fine for them to say // "hey, don't worry about the handshake protocol, I'm back here already" by // setting request_drop_ to 0. // // It should be noted that this is the minority case though. The majority case // is what the handshake protocol is designed to fix. Namely, when a thread // decides to yield the GIL (yielding meaning it's CPU bound, but voluntarily // allowing another thread to run) that without the handshake, the unlock and // then relocking again right away can starve the other threads in the system. // This starves the system because from the OSes perspective, it's totally fine // to not bother to schedule another thread right away when a mutex is unlocked. // But this means that when that same thread that unlock goes to lock it again, // no other thread was able to jump in and grab the lock and do some work. // // So the handshake forces the OS to actually yield control to another thread by // having a thread that is dropping the GIL wait for a thread that is taking // the GIL to acknowledge that it has taken the GIL. // if(request_drop_ == 1) { request_drop_ = 0; handshake_condition_.wait(handshake_mutex_); } }
bool cps_api_db_get_node_group(const std::string &group,std::vector<std::string> &lst) { if (group.find(':')!=std::string::npos) {//case where a node is a group (or more that a node is a group) lst.push_back(group); return true; } std::lock_guard<std::recursive_mutex> lg(_mutex); (void)load_groups(); cps_api_node_data_type_t type; if(!_nodes->get_group_type(group,type)) { const char * _alias = _nodes->addr(group); if (_alias!=nullptr) { lst.push_back(_alias); return true; } EV_LOGGING(DSAPI,ERR,"GET-NODE-GRUOP","Failed to get group type for %s",group.c_str()); return false; } if((type == cps_api_node_data_1_PLUS_1_REDUNDENCY) && (_nodes->is_master_set(group))) { auto it = _nodes->_master.find(group); if(it == _nodes->_master.end()) { EV_LOGGING(DSAPI,ERR,"GET-NODE-GRUOP","Master not set for %s",group.c_str()); return false; } lst.push_back(it->second); return true; } if (!_nodes->group_addresses(group,lst)) { const char * __addr = _nodes->addr(group.c_str()); if (__addr==nullptr) return false; lst.push_back(__addr); } return true; }
void movieavguser() { lg("Movie Avg User\n"); int u; // Remove average but only use training data double avg[NMOVIES]; int moviecount[NMOVIES]; ZERO(avg); ZERO(moviecount); for(u=0;u<NUSERS;u++) { int base=useridx[u][0]; int d0=UNTRAIN(u); int j; for(j=0;j<d0;j++) { int m=userent[base+j]&USER_MOVIEMASK; int r=(userent[base+j]>>USER_LMOVIEMASK)&7; avg[m]+=r; moviecount[m]++; } } int m; for(m=0;m<NMOVIES;m++) avg[m]/=moviecount[m]; for(u=0;u<NUSERS;u++) { int base=useridx[u][0]; int d0=UNTRAIN(u); int j; double sum=0.; for(j=0;j<d0;j++) { int m=userent[base+j]&USER_MOVIEMASK; int r=(userent[base+j]>>USER_LMOVIEMASK)&7; sum+=r-avg[m]; } useravg[u]=sum/d0; } movieXuser(useravg,MOVIEAVGUSER_ALPHA); }
static cps_api_return_code_t _cps_api_event_service_publish_msg(cps_api_event_service_handle_t handle, cps_api_object_t msg) { STD_ASSERT(msg!=NULL); STD_ASSERT(handle!=NULL); cps_api_key_t *_okey = cps_api_object_key(msg); if (cps_api_key_get_len(_okey) < CPS_OBJ_KEY_SUBCAT_POS) { //likely invalid message return cps_api_ret_code_ERR; } std_event_key_t key; cps_api_to_std_key(&key,_okey); cps_api_to_std_event_map_t *p = handle_to_data(handle); int retry = (int)p->retry; for (; retry > 0 ; --retry) { std_mutex_simple_lock_guard lg(&p->lock); if (!__connect_to_service(handle)) { std_usleep(MILLI_TO_MICRO(1)); continue; } t_std_error rc = std_client_publish_msg_data( handle_to_std_handle(handle), &key, cps_api_object_array(msg), cps_api_object_to_array_len(msg)); if (rc!=STD_ERR_OK) { __close_channel(handle); } else { return cps_api_ret_code_OK; } } return cps_api_ret_code_ERR; }
void GlobalLock::take() { thread::Mutex::LockGuard lg(mutex_); while(locked_) { uint32_t saved_serial = serial_; struct timespec ts; struct timeval tv; gettimeofday(&tv, NULL); timeradd(&tv, &timeout_, &tv); TIMEVAL_TO_TIMESPEC(&tv, &ts); // Wait for the requested amount of time... if(condition_.wait_until(mutex_, &ts) == thread::cTimedOut) { // If we don't have control, ask for it. // We check saved_serial to serial_ to see if anyone else has woken // up for a requested timeslice. If they have, we don't want to interrupt // them, they might have started their timeslice 2 microseconds ago. So // instead we loop back and wait for the full timeout again before checking // (and hopefully asking) for the gil. if(locked_ && saved_serial == serial_) { request_drop_ = 1; } } } // Ok, locked_ isn't set, we can grab it now. // Handshake control back to the yielder. { thread::Mutex::LockGuard hlg(handshake_mutex_); locked_ = 1; serial_++; request_drop_ = 0; handshake_condition_.signal(); } }
/* see catmany() */ static GEN catmanyMAT(GEN y1, GEN y2) { long i, h = 0, L = 1; GEN z, y; for (y = y2; y >= y1; y--) { GEN c = gel(y,0); long nc = lg(c)-1; if (nc == 0) continue; if (h != lgcols(c)) { if (h) err_cat(gel(y2,0), c); h = lgcols(c); } L += nc; z = new_chunk(nc) - 1; for (i=1; i<=nc; i++) gel(z,i) = gel(c,i); } z = new_chunk(1); *z = evaltyp(t_MAT) | evallg(L); return z; }
bool Park::park_timed(STATE, CallFrame* call_frame, struct timespec* ts) { thread::Mutex::LockGuard lg(mutex_); wake_ = false; sleeping_ = true; state->vm()->thread->sleep(state, cTrue); bool timeout = false; while(!wake_) { GCIndependent gc_guard(state, call_frame); if(cond_.wait_until(mutex_, ts) == thread::cTimedOut) { timeout = true; break; } } sleeping_ = false; state->vm()->thread->sleep(state, cFalse); return timeout; }
void LOpenGLComponent::renderOpenGL() { if(hasCallback("renderOpenGL")) { const float desktopScale = (float) openGLContext.getRenderingScale(); ScopedPointer<LowLevelGraphicsContext> glRenderer( createOpenGLGraphicsContext ( openGLContext, roundToInt (desktopScale * Component::getWidth()), roundToInt (desktopScale * Component::getHeight()) ) ); if(glRenderer != nullptr) { Graphics g(*glRenderer); g.addTransform (AffineTransform::scale (desktopScale)); LGraphics lg(LUA::Get(), g); callback( "renderOpenGL", 1, { new LRefBase("Graphics", &lg) } ); if(! LUA::isEmpty()) renderGLSL(g); else lua_pop(LUA::Get(), 1); } else { callback("renderOpenGL"); } } }
GEN vecsum(GEN v) { pari_sp av = avma; long i, l; GEN p; if (!is_vec_t(typ(v))) pari_err_TYPE("vecsum", v); l = lg(v); if (l == 1) return gen_0; p = gel(v,1); if (l == 2) return gcopy(p); for (i=2; i<l; i++) { p = gadd(p, gel(v,i)); if (gc_needed(av, 2)) { if (DEBUGMEM>1) pari_warn(warnmem,"sum"); p = gerepileupto(av, p); } } return gerepileupto(av, p); }
void alarm_center_video_client::heart_beet_worker() { AUTO_LOG_FUNCTION; auto last_time_get_is_show_video_user_mgr_dlg = std::chrono::steady_clock::now(); while (running_) { std::this_thread::sleep_for(std::chrono::milliseconds(1000)); if (!running_)break; // get is show video_user_mgr_dlg per 2s { auto now = std::chrono::steady_clock::now(); auto diff = now - last_time_get_is_show_video_user_mgr_dlg; if (std::chrono::duration_cast<std::chrono::seconds>(diff).count() >= 3) { { std::lock_guard<std::mutex> lg(mutex_for_heart_beet_); client_->get_is_show_video_user_mgr_dlg(); } last_time_get_is_show_video_user_mgr_dlg = std::chrono::steady_clock::now(); } } } }
void Redox::freeQueuedCommands(struct ev_loop *loop, ev_async *async, int revents) { Redox *rdx = (Redox *)ev_userdata(loop); lock_guard<mutex> lg(rdx->free_queue_guard_); while (!rdx->commands_to_free_.empty()) { long id = rdx->commands_to_free_.front(); rdx->commands_to_free_.pop(); if (rdx->freeQueuedCommand<redisReply *>(id)) { } else if (rdx->freeQueuedCommand<string>(id)) { } else if (rdx->freeQueuedCommand<char *>(id)) { } else if (rdx->freeQueuedCommand<int>(id)) { } else if (rdx->freeQueuedCommand<long long int>(id)) { } else if (rdx->freeQueuedCommand<nullptr_t>(id)) { } else if (rdx->freeQueuedCommand<vector<string>>(id)) { } else if (rdx->freeQueuedCommand<std::set<string>>(id)) { } else if (rdx->freeQueuedCommand<unordered_set<string>>(id)) { } else { } } }
Object* CompiledCode::default_executor(STATE, CallFrame* call_frame, Executable* exec, Module* mod, Arguments& args) { LockableScopedLock lg(state, &state->shared(), __FILE__, __LINE__); CompiledCode* code = as<CompiledCode>(exec); if(code->execute == default_executor) { const char* reason = 0; int ip = -1; OnStack<4> os(state, code, exec, mod, args.argument_container_location()); GCTokenImpl gct; if(!code->internalize(state, gct, &reason, &ip)) { Exception::bytecode_error(state, call_frame, code, ip, reason); return 0; } } lg.unlock(); return code->execute(state, call_frame, exec, mod, args); }
boost::optional<ScopedCollectionMetadata> MetadataManager::getActiveMetadata( std::shared_ptr<MetadataManager> self, const boost::optional<LogicalTime>& atClusterTime) { stdx::lock_guard<stdx::mutex> lg(_managerLock); if (_metadata.empty()) { return boost::none; } auto activeMetadataTracker = _metadata.back(); const auto& activeMetadata = activeMetadataTracker->metadata; // We don't keep routing history for unsharded collections, so if the collection is unsharded // just return the active metadata if (!atClusterTime || !activeMetadata.isSharded()) { return ScopedCollectionMetadata(std::make_shared<RangePreserver>( lg, std::move(self), std::move(activeMetadataTracker))); } auto chunkManager = activeMetadata.getChunkManager(); auto chunkManagerAtClusterTime = std::make_shared<ChunkManager>( chunkManager->getRoutingHistory(), atClusterTime->asTimestamp()); class MetadataAtTimestamp : public ScopedCollectionMetadata::Impl { public: MetadataAtTimestamp(CollectionMetadata metadata) : _metadata(std::move(metadata)) {} const CollectionMetadata& get() override { return _metadata; } private: CollectionMetadata _metadata; }; return ScopedCollectionMetadata(std::make_shared<MetadataAtTimestamp>( CollectionMetadata(chunkManagerAtClusterTime, activeMetadata.shardId()))); }
int main(int argc, char *argv[]) { std::array<pthread_t, NUM_THREADS> threads; std::array<ThreadData, NUM_THREADS> threadData; const int P = lg(VEC_LENGTH); std::array<std::vector<std::complex<double> >, NUM_THREADS> primals; std::array<std::vector<std::complex<double> >, NUM_THREADS> duals; std::array<std::vector<std::complex<double> >, NUM_THREADS> dualPrimes; for (int i = 0; i < NUM_THREADS; i++) { primals[i].resize(VEC_LENGTH); dualPrimes[i].resize(VEC_LENGTH); duals[i].resize(VEC_LENGTH); for (int j = 0; j < VEC_LENGTH; j++) { primals[i][j] = j; } } for (int i = 0; i < NUM_THREADS; i++) { threadData[i].pPrimal = &primals[i]; threadData[i].pDual = &duals[i]; threadData[i].P = P; int rc = pthread_create(&threads[i], nullptr, fftHelper, static_cast<void *>(&threadData[i])); if (rc) std::cerr << "pthread_create failed with error code: " << rc << std::endl; } for (auto it = threads.begin(); it != threads.end(); ++it) { int rc = pthread_join(*it, nullptr); if (rc) std::cerr << "pthread_join failed with error code: " << rc << std::endl; } std::cout << duals[0][VEC_LENGTH - 1] << " " << duals[0][VEC_LENGTH - 1] << std::endl; return EXIT_SUCCESS; }
PacketStreamReader::FrameInfo PacketStreamReader::Seek(PacketStreamSourceId src, size_t framenum, SyncTime *sync) { lock_guard<decltype(_mutex)> lg(_mutex); if (!_stream.seekable()) throw std::runtime_error("Stream is not seekable (probably a pipe)."); if (src > _sources.size()) throw std::runtime_error("Invalid Frame Source ID."); if(_stream.data_len()) //we were in the middle of reading data, and are holding an extra lock. We need to release it, while still holding the scoped lock. Skip(_stream.data_len()); while (!_index.has(src, framenum)) { pango_print_warn("seek index miss... reading ahead.\n"); if (_stream.data_len()) _stream.skip(_stream.data_len()); auto fi = NextFrame(src, nullptr); if (!fi) //if we hit the end, throw throw std::out_of_range("frame number not in sequence"); } auto target_header_start = _index.position(src, framenum); _stream.seekg(target_header_start); _next_packet_framenum[src] = framenum; //this increments when we parse the header in the next line; //THIS WILL BREAK _next_packet_framenum FOR ALL OTHER SOURCES. Todo more refactoring to fix. auto r = _stream.peekFrameHeader(*this); //we need to do this now, because we need r.time in order to sync up our playback. if (nullptr != sync && _starttime) sync->ResyncToOffset(r.time - _starttime); //if we have a sync timer, we need to reset it to play synchronized frame from where we just did a seek to. return r; }
/* return a bound for T_2(P), P | polbase * max |b_i|^2 <= 3^{3/2 + d} / (4 \pi d) [P]_2, * where [P]_2 is Bombieri's 2-norm * Sum over conjugates */ static GEN nf_Beauzamy_bound(GEN nf, GEN polbase) { GEN lt,C,run,s, G = gmael(nf,5,2), POL, bin; long i,prec,precnf, d = degpol(polbase), n = degpol(nf[1]); precnf = gprecision(G); prec = MEDDEFAULTPREC; bin = vecbinome(d); POL = polbase + 2; /* compute [POL]_2 */ for (;;) { run= real_1(prec); s = real_0(prec); for (i=0; i<=d; i++) { GEN p1 = gnorml2(arch_for_T2(G, gmul(run, gel(POL,i)))); /* T2(POL[i]) */ if (!signe(p1)) continue; if (lg(p1) == 3) break; /* s += T2(POL[i]) / binomial(d,i) */ s = addrr(s, gdiv(p1, gel(bin,i+1))); } if (i > d) break; prec = (prec<<1)-2; if (prec > precnf) { nffp_t F; remake_GM(nf, &F, prec); G = F.G; if (DEBUGLEVEL>1) pari_warn(warnprec, "nf_factor_bound", prec); } } lt = leading_term(polbase); s = gmul(s, mulis(sqri(lt), n)); C = powrshalf(stor(3,DEFAULTPREC), 3 + 2*d); /* 3^{3/2 + d} */ return gdiv(gmul(C, s), gmulsg(d, mppi(DEFAULTPREC))); }
static cps_api_return_code_t _cps_api_wait_for_event( cps_api_event_service_handle_t handle, cps_api_object_t msg) { std_event_msg_t m; while (true) { std_event_client_handle h; cps_api_to_std_event_map_t *p = handle_to_data(handle); { std_mutex_simple_lock_guard lg(&p->lock); if (!__connect_to_service(handle)) { //retry every 50ms std_usleep(MILLI_TO_MICRO(50)); continue; } h = handle_to_std_handle(handle); } if (std_client_wait_for_event_data(h,&m, cps_api_object_array(msg), cps_api_object_get_reserve_len(msg))!=STD_ERR_OK) { __close_channel(handle); continue; } if (!cps_api_object_received(msg,m.data_len)) { EV_LOG(ERR,DSAPI,0,"CPS-EV-RX","Invalid message received... returning to client"); __close_channel(handle); continue; } break; } return cps_api_ret_code_OK; }
Executable* MethodTable::remove(STATE, Symbol* name) { check_frozen(state); utilities::thread::SpinLock::LockGuard lg(lock_); native_int num_entries = entries_->to_native(); native_int num_bins = bins_->to_native(); if(min_density_p(num_entries, num_bins) && (num_bins >> 1) >= METHODTABLE_MIN_SIZE) { redistribute(state, num_bins >>= 1); } native_int bin = find_bin(key_hash(name), num_bins); MethodTableBucket* entry = try_as<MethodTableBucket>(values_->at(state, bin)); MethodTableBucket* last = NULL; while(entry) { if(entry->name() == name) { Executable* val = entry->method(); if(last) { last->next(state, entry->next()); } else { values_->put(state, bin, entry->next()); } entries(state, Fixnum::from(entries_->to_native() - 1)); return val; } last = entry; entry = try_as<MethodTableBucket>(entry->next()); } return nil<Executable>(); }
GEN F2xq_ellgens(GEN a2, GEN a6, GEN ch, GEN D, GEN m, GEN T) { GEN P; pari_sp av = avma; struct _F2xqE e; e.a2=a2; e.a6=a6; e.T=T; switch(lg(D)-1) { case 0: return cgetg(1,t_VEC); case 1: P = gen_gener(gel(D,1), (void*)&e, &F2xqE_group); P = mkvec(F2xqE_changepoint(P, ch, T)); break; default: P = gen_ellgens(gel(D,1), gel(D,2), m, (void*)&e, &F2xqE_group, _F2xqE_pairorder); gel(P,1) = F2xqE_changepoint(gel(P,1), ch, T); gel(P,2) = F2xqE_changepoint(gel(P,2), ch, T); break; } return gerepilecopy(av, P); }
void SignalHandler::add_signal(STATE, int sig, HandlerType type) { SYNC(state); utilities::thread::Mutex::LockGuard lg(worker_lock_); #ifndef RBX_WINDOWS struct sigaction action; if(type == eDefault) { action.sa_handler = SIG_DFL; watched_signals_.remove(sig); } else if(type == eIgnore) { action.sa_handler = SIG_IGN; watched_signals_.push_back(sig); } else { action.sa_handler = signal_tramp; watched_signals_.push_back(sig); } action.sa_flags = 0; sigfillset(&action.sa_mask); sigaction(sig, &action, NULL); #endif }
void Redox::processQueuedCommands(struct ev_loop *loop, ev_async *async, int revents) { Redox *rdx = (Redox *)ev_userdata(loop); lock_guard<mutex> lg(rdx->queue_guard_); while (!rdx->command_queue_.empty()) { long id = rdx->command_queue_.front(); rdx->command_queue_.pop(); if (rdx->processQueuedCommand<redisReply *>(id)) { } else if (rdx->processQueuedCommand<string>(id)) { } else if (rdx->processQueuedCommand<char *>(id)) { } else if (rdx->processQueuedCommand<int>(id)) { } else if (rdx->processQueuedCommand<long long int>(id)) { } else if (rdx->processQueuedCommand<nullptr_t>(id)) { } else if (rdx->processQueuedCommand<vector<string>>(id)) { } else if (rdx->processQueuedCommand<std::set<string>>(id)) { } else if (rdx->processQueuedCommand<unordered_set<string>>(id)) { } else throw runtime_error("Command pointer not found in any queue!"); } }
void useravgmovie() { lg("User AvgMovie\n"); double avgmovie[NMOVIES]; int moviecount[NMOVIES]; ZERO(avgmovie); ZERO(moviecount); int u; for(u=0;u<NUSERS;u++) { int base=useridx[u][0]; int d0=UNTRAIN(u); int j; for(j=0;j<d0;j++) { int m=userent[base+j]&USER_MOVIEMASK; int r=(userent[base+j]>>USER_LMOVIEMASK)&7; avgmovie[m]+=r; moviecount[m]++; } } int m; for(m=0;m<NMOVIES;m++) avgmovie[m]/=moviecount[m]; userXmovie(avgmovie,USERAVGMOVIE_ALPHA); }
size_t oc_ostream_log_write(oc_log_ctx_t *ctx, const int level, const char *msg) try { oc_ostream_logger_ctx *lctx = static_cast<oc_ostream_logger_ctx *>(ctx->ctx); std::lock_guard<std::mutex> lg(lctx->mutex); std::ostringstream os; os << level << ": "; if(nullptr != ctx->module_name) os << '[' << ctx->module_name << "] "; os << msg << '\n'; lctx->os << os.str().c_str(); return 1 + os.str().length(); } catch(...) { return 0; }
void c_rpc_server::c_session::execute_rpc_command(const std::string &input_message) { try { nlohmann::json j = nlohmann::json::parse(input_message); const std::string cmd_name = j["cmd"];//.begin().value(); dbg("cmd name " << cmd_name); // calling rpc function nlohmann::json json_response; { LockGuard<Mutex> lg(m_rpc_server_ptr->m_rpc_functions_map_mutex); json_response = m_rpc_server_ptr->m_rpc_functions_map.at(cmd_name)(input_message); } json_response["id"] = get_command_id(); if (j.find("id")!= j.end()) { json_response["re"] = j["id"]; } send_response(json_response); } catch (const std::exception &e) { _erro( "exception in execute_rpc_command " << e.what() ); _erro( "close connection\n" ); delete_me(); return; } }
float Render_Widget::get_depth_at(const math::vec2u32& coords) { if (!m_flags.test(Flag::DEPTH_ACCESS)) { QASSERT(0); return 0; } if (m_buffers_full) { auto& buffer = m_buffers[m_read_buffer_idx]; std::lock_guard<std::mutex> lg(buffer.mutex); resolve_depth_buffer(buffer); auto c = math::min(coords, buffer.size - math::vec2u32(1, 1)); //convert from top-left origin to openGL bottom-left origin c.y = buffer.size.y - c.y - 1; return buffer.depth_data[c.y * buffer.size.x + c.x]; } return 0; }
void OCPiano::DrawBlackKey(const int Pos, const bool Down) { if (!Down) { SetPenBrush("#888","#666"); //Scene.addRect(Pos+2,1,BlackKeyWidth,BlackKeyHeight,QPen("#888"),QBrush("#666")); Rectangle(Pos+2,1,BlackKeyWidth,BlackKeyHeight); } SetPenBrush(Qt::black); //Scene.addRect(Pos,0,BlackKeyWidth,BlackKeyHeight,QPen(Qt::black),QBrush(Qt::black)); Rectangle(Pos,0,BlackKeyWidth,BlackKeyHeight); QColor col("#666"); if (Down) col=QColor("#444"); SetPen(col); //Scene.addLine(Pos+1,1,Pos+1,BlackKeyHeight-3,QPen(col)); Line(Pos+1,1,Pos+1,BlackKeyHeight-3); QLinearGradient lg(0,0,0,BlackKeyHeight-7); if (Down) { lg.setColorAt(0,"#000"); lg.setColorAt(1,"#444"); } else { lg.setColorAt(0,"#000"); lg.setColorAt(1,"#666"); } QPainterPath p(QPoint(Pos+2,1)); p.lineTo(Pos+2,BlackKeyHeight-5); p.cubicTo(Pos+2,BlackKeyHeight-3,Pos+BlackKeyWidth-1,BlackKeyHeight-3,Pos+BlackKeyWidth-1,BlackKeyHeight-5); p.lineTo(Pos+BlackKeyWidth-1,1); SetPen(Qt::black); SetBrush(lg); Path(p); //Scene.addPath(p,QPen(Qt::black),QBrush(lg)); }
void PanelButton::paintEvent(QPaintEvent * e){ QPainter p(this); p.setRenderHint(p.Antialiasing); QColor lB(223,228,235); QColor lB2(203,208,215); QLinearGradient lg(0,0,0,height()); lg.setColorAt(0,lB); lg.setColorAt(1,lB2); p.setBrush(lg); p.setPen(Qt::transparent); p.drawRect(0,0,width()-5,height()); QColor lB3(183,188,195); p.setPen(lB3); p.drawLine(0,this->height(),this->width()-5,this->height()); if (count()>0) { QColor fc(60,60,60); p.setPen(fc); QFont font; //font.setBold(true); font.setFamily("URW Gothic L"); p.setFont(font); QRect trect(0,0,width()-5,height()); p.drawText(trect,Qt::AlignCenter,this->itemText(currentIndex())); } e->accept(); }
static int indexgroupcentre(GEN G, GEN Z, const long *good, const long *bad) { long i; for(i=1;i<lg(Z);i++) { GEN z=gel(Z,i); if (perm_order(z)==2) { pari_sp btop=avma; GEN H = cyclicgroup(z,2); GEN C = group_quotient(G,H); GEN Q = quotient_group(C,G); const long *p; long idx=group_ident(Q,NULL); avma=btop; for(p=good;*p;p++) if (*p==idx) return 1; for(p=bad;*p;p++) if (*p==idx) return 0; } } return 0; }
void logMessage( ELog aLog, const std::string& aMessage ) { std::lock_guard<std::mutex> lg( mLogMutex ); mLogs.emplace_back( aLog, aMessage ); }