void File::flush() { if (!writable_) { throw logic_error("File not writable"); } do_flush(); }
//read lock int VBBM::checkConsistency() const { /* Struct integrity tests 1: Verify that the recorded size matches the actual size 2: Verify there are no empty entries reachable from the hash table 3: Verify there are no empty entries below the LWM 4a: Make sure every VBBM entry points to a unique position in the VB 4b: Make sure every VBBM entry has unique LBID & VERID. */ int i, j, k; /* Test 1 is already implemented */ size(); /* Test 2 - no empty elements reachable from the hash table */ int nextElement; for (i = 0; i < vbbm->numHashBuckets; i++) { if (hashBuckets[i] != -1) for (nextElement = hashBuckets[i]; nextElement != -1; nextElement = storage[nextElement].next) if (storage[nextElement].lbid == -1) throw logic_error("VBBM::checkConsistency(): an empty storage entry is reachable from the hash table"); } /* Test 3 - verify that there are no empty entries below the LWM */ for (i = 0; i < vbbm->vbLWM; i++) { if (storage[i].lbid == -1) { cerr << "VBBM: LWM=" << vbbm->vbLWM << " first empty entry=" << i << endl; throw logic_error("VBBM::checkConsistency(): LWM accounting error"); } } /* Test 4b - verify the uniqueness of the entries */ for (i = 0; i < vbbm->numHashBuckets; i++) if (hashBuckets[i] != -1) for (j = hashBuckets[i]; j != -1; j = storage[j].next) for (k = storage[j].next; k != -1; k = storage[k].next) if (storage[j].lbid == storage[k].lbid && storage[j].verID == storage[k].verID) { cerr << "VBBM: lbid=" << storage[j].lbid << " verID=" << storage[j].verID << endl; throw logic_error("VBBM::checkConsistency(): Duplicate entry found"); } /* Test 4a - verify the uniqueness of vbOID, vbFBO fields */ for (i = 0; i < vbbm->vbCapacity; i++) if (storage[i].lbid != -1) for (j = i+1; j < vbbm->vbCapacity; j++) if (storage[j].lbid != -1) if (storage[j].vbOID == storage[i].vbOID && storage[j].vbFBO == storage[i].vbFBO) { cerr << "VBBM: lbid1=" << storage[i].lbid << " lbid2=" << storage[j].lbid << " verID1=" << storage[i].verID << " verID2=" << storage[j].verID << " share vbOID=" << storage[j].vbOID << " vbFBO=" << storage[j].vbFBO << endl; throw logic_error("VBBM::checkConsistency(): 2 VBBM entries share space in the VB"); } return 0; }
Polygon_2 cut_ear(const Polygon_2& polygon, Triangle_iter triangles, const boost::unordered_map<Point_3, boost::unordered_set<Segment_3_undirected> >& point2edges) { static log4cplus::Logger logger = log4cplus::Logger::getInstance("polygon_utils"); LOG4CPLUS_TRACE(logger, "Cutting ear of " << pp(polygon));; Polygon_2 ret(polygon); Polygon_2::Vertex_circulator start = ret.vertices_circulator(); Polygon_2::Vertex_circulator c = start; Polygon_2::Vertex_circulator p = c - 1; Polygon_2::Vertex_circulator n = c + 1; // Do a run preferring to get rid of collinear points do { Polygon_2::Vertex_circulator pp = p - 1; if (!is_legal(*pp, *p, *c, point2edges) && can_cut(polygon, p, c, n, point2edges)) // if (!is_legal(*pp, *p, *c, point2edges) && // is_legal(*p, *c, *n, point2edges)) { *triangles++ = Triangle(*p, *c, *n); ret.erase(c); return ret; } Polygon_2::Vertex_circulator ppp = pp - 1; if (!is_legal(*pp, *p, *c, point2edges) && can_cut(polygon, ppp, pp, p, point2edges)) // if (!is_legal(*pp, *p, *c, point2edges) && // is_legal(*p, *c, *n, point2edges)) { *triangles++ = Triangle(*ppp, *pp, *p); ret.erase(pp); return ret; } ++p; ++c; ++n; } while (c != start); // Okay, just take any cut do { if (can_cut(polygon, p, c, n, point2edges)) { *triangles++ = Triangle(*p, *c, *n); ret.erase(c); return ret; } ++p; ++c; ++n; } while (c != start); // Okay, really just take any cut do { // if (can_cut(polygon, p, c, n, point2edges)) if (is_legal(*p, *c, *n, point2edges)) { *triangles++ = Triangle(*p, *c, *n); ret.erase(c); return ret; } ++p; ++c; ++n; } while (c != start); LOG4CPLUS_DEBUG(logger, "Polygon is not strictly convex"); LOG4CPLUS_DEBUG(logger, " Original: " << pp(polygon)); LOG4CPLUS_DEBUG(logger, " Current: " << pp(ret)); throw logic_error("Polygon is not strictly convex"); }
/* * @job_xx_ben_file, like job.4.ben, it contains job.4.1.out job.4.2.out job.4.3.out job.4.4.out * * */ double data_proc::process_internal5(const string& job_xx_ben_file, int job_scale, const bool &loose) { string job_xx_out_file; double time_start = 0; ifstream job_xx_ben_ifs(job_xx_ben_file); /*lines of time_start/time_end print-out*/ int num_lines = 2; vector<double> vec_start; vector<double> vec_end; while (getline(job_xx_ben_ifs, job_xx_out_file)) { fprintf(stdout, "[%s][%s]: processing...\n", job_xx_ben_file.c_str(), job_xx_out_file.c_str()); ifstream job_xx_out_ifs(job_xx_out_file); /* the format, [jobid]: [timestamp] 78: 1366949424667582.000000 78: 1366949424667658.000000 * */ string timestamp_line; int j = 0; //counter for lines of timestamp in job.xx.out file int first_jobid; //the first jobid int second_jobid; //the second jobid while (getline(job_xx_out_ifs, timestamp_line)) { j++; if (timestamp_line.empty()) continue; String_Tokenizer tokens(timestamp_line, ":"); // string jobid = trim(tokens.next_token()); //78, the jobid string timestamp = trim(tokens.next_token()); //1366949424667582.000000, the timestamp if (j % 2 != 0) first_jobid = atoi(jobid.c_str()); else second_jobid = atoi(jobid.c_str()); if (j % 2 == 0) { if (first_jobid != second_jobid) { char buf[200]; sprintf(buf, "[%s][%d][%d]: the timestamp of start and end is not contiguous.", job_xx_out_file.c_str(), first_jobid, second_jobid); throw logic_error(buf); } } if (j % 2 != 0) vec_start.push_back(atof(timestamp.c_str())); else vec_end.push_back(atof(timestamp.c_str())); } if (j != num_lines) { char buf[200]; sprintf(buf, "[%s][%d][%d]: less or more than %d timestamp lines in job.xx.out file.", job_xx_out_file.c_str(), first_jobid, second_jobid, num_lines); throw logic_error(buf); } } sort(vec_start.begin(), vec_start.end()); sort(vec_end.begin(), vec_end.end()); double timespan_sum = vec_end[vec_end.size() - 1] - vec_start[0]; return timespan_sum; }
void World::insertPlayer(Player* player) { vector< Player* >::const_iterator it = find(playerList.cbegin(), playerList.cend(), player); if (it == playerList.cend()) playerList.push_back(player); else throw logic_error("Player already inserted into world."); }
/** * Returns the statistic corresponding to this effect as part of * the endowment function. */ double CovariateIndirectTiesEffect::endowmentStatistic( Network * pLostTieNetwork) { throw logic_error( "CovariateIndirectTiesEffect: Endowment effect not supported"); }
void GDoubleTerminalDef::emit( EScriptProgram& prog, const GTerminal* term ) const { throw logic_error("oops"); }
LBIDResourceGraph & LBIDResourceGraph::operator=(const LBIDResourceGraph &r) { throw logic_error("Don't do that"); }
bool ChooserEvaluator::select_parameters( const std::vector<ChooserPoly> &operands, int budget_gap, double noise_standard_deviation, const map<int, vector<SmallModulus> > &coeff_modulus_options, EncryptionParameters &destination) { if (budget_gap < 0) { throw std::invalid_argument("budget_gap cannot be negative"); } if (noise_standard_deviation < 0) { throw invalid_argument("noise_standard_deviation can not be negative"); } if (coeff_modulus_options.size() == 0) { throw invalid_argument("parameter_options must contain at least one entry"); } if (operands.empty()) { throw invalid_argument("operands cannot be empty"); } int largest_bit_count = 0; int largest_coeff_count = 0; for (size_t i = 0; i < operands.size(); i++) { if (operands[i].comp_ == nullptr) { throw logic_error("no operation history to simulate"); } int current_bit_count = get_significant_bit_count(operands[i].max_abs_value_); largest_bit_count = (current_bit_count > largest_bit_count) ? current_bit_count : largest_bit_count; int current_coeff_count = operands[i].max_coeff_count_; largest_coeff_count = (current_coeff_count > largest_coeff_count) ? current_coeff_count : largest_coeff_count; } // We restrict to plain moduli that are powers of two. Here largest_bit_count // is the largest positive coefficient that we can expect to appear. Thus, we // need one more bit. uint64_t new_plain_modulus; if (largest_bit_count >= SEAL_USER_MODULO_BIT_BOUND) { // The plain_modulus needed is too big return false; } new_plain_modulus = 1ULL << largest_bit_count; destination.set_plain_modulus(new_plain_modulus); bool found_good_parms = false; map<int, vector<SmallModulus> >::const_iterator iter = coeff_modulus_options.begin(); while (iter != coeff_modulus_options.end() && !found_good_parms) { int dimension = iter->first; if (dimension < 512 || (dimension & (dimension - 1)) != 0) { throw invalid_argument("coeff_modulus_options keys invalid"); } int coeff_bit_count = 0; for(auto mod : iter->second) { coeff_bit_count += mod.bit_count(); } if (dimension > largest_coeff_count && coeff_bit_count > destination.plain_modulus().bit_count()) { // Set the polynomial destination.set_coeff_modulus(iter->second); BigPoly new_poly_modulus(dimension + 1, 1); new_poly_modulus.set_zero(); new_poly_modulus[0] = 1; new_poly_modulus[dimension] = 1; destination.set_poly_modulus(new_poly_modulus); // The bound needed for GapSVP->search-LWE reduction //parms.noise_standard_deviation() = round(sqrt(dimension / (2 * 3.1415)) + 0.5); // Use constant (small) standard deviation. destination.set_noise_standard_deviation(noise_standard_deviation); found_good_parms = true; for (size_t i = 0; i < operands.size(); i++) { // If one of the operands does not decrypt, set found_good_parms to false. found_good_parms = operands[i].simulate(destination).decrypts(budget_gap) ? found_good_parms : false; } } // This dimension/coeff_modulus are to small. Move on to the next pair. iter++; } if (!found_good_parms) { destination = EncryptionParameters(); } return found_good_parms; }
vector<SplitEdge> eulerCSplit(const vector<SplitEdge> &g, vector<SplitEdge> &B, int s, unsigned int k, vector<vector<int>> X, historyIndex &h) { vector<SplitEdge> GHat = g; GHat = fixEquation4(GHat, s); if (cG(s, g) % 2 != 0) { cout << "ERROR: odd CG(s)" << endl; throw logic_error(""); } vector<vector<int>> X1 = X; while (X1.size() >= 3) { X1 = sortByCG(GHat, s, X1); vector<int> cgsX1; for (unsigned i = 0; i < X1.size(); i++) { cgsX1.push_back(cG(s, X1[i], GHat)); } int del1P = 0; int del2P = 0; if ((cG(s, X1[0], GHat) - cG(s, X1[1], GHat)) >= cG(s, X1[X1.size() - 1], GHat)) del1P = cG(s, X1[X1.size() - 1], GHat); else { del2P = (int)ceil(double(cG(s, X1[X1.size() - 1], GHat) - cG(s, X1[0], GHat) + cG(s, X1[1], GHat)) * .5); del1P = cG(s, X1[X1.size() - 1], GHat) - del2P; } GHat = removeZeroWeighted(GHat); GHat = pairing(X1[0], X1[X1.size() - 1], del1P, B, GHat, s, h); GHat = removeZeroWeighted(GHat); GHat = pairing(X1[1], X1[X1.size() - 1], del2P, B, GHat, s, h); bool erase1 = cG(s, X1[0], GHat) == 0; bool erase2 = cG(s, X1[1], GHat) == 0; bool eraseLast = cG(s, X1.back(), GHat) == 0; vector<vector<int>> X11; if (!erase1) X11.push_back(X1[0]); if (!erase2) X11.push_back(X1[1]); for (unsigned i = 2; i < X1.size() - 1; i++) X11.push_back(X1[i]); if (!eraseLast) X11.push_back(X1.back()); X1 = X11; if (X1.size() == 1) { cout << "ERROR: X1.size() == 1" << endl; throw logic_error(""); } } int del12 = 0; if (X1.size() != 0) { if (cG(s, X1[0], GHat) != cG(s, X1[1], GHat)) { cout << "eulerCSplit sanity check failure. Make sure cG(X1) != cG(X2)" << endl; throw logic_error(""); } del12 = cG(s, X1[0], GHat); GHat = removeZeroWeighted(GHat); GHat = pairing(X1[0], X1[1], del12, B, GHat, s, h); } return GHat; }
LBIDResourceGraph::LBIDResourceGraph(const LBIDResourceGraph &r) { throw logic_error("Don't do that"); }
vector<SplitEdge> pairing(const vector<int> &Xi,const vector<int> &Xj, int delij, vector<SplitEdge> &B, const vector<SplitEdge> &g, int s, historyIndex &h) { vector<SplitEdge> GHat = g; while (delij > 0) { int u = 0; int v = 0; vector<int> gamXi = setIntersection(Xi, neighbors(GHat, s)); vector<int> gamXj = setIntersection(Xj, neighbors(GHat, s)); if (gamXi.size() == 0 || gamXj.size() == 0) { cout << "Detected 0, dumping status" << endl; cout << "GHat:" << endl; output(GHat); cout << "B:" << endl; output(B); cout << "S: " << s << endl; cout << "delij: " << delij << endl; cout << "Xi:" << endl; output(Xi); cout << "neighbors of s in GHat:" << endl; output(neighbors(GHat, s)); cout << "Xj:" << endl; output(Xj); cout << "gamXi:" << endl; output(gamXi); cout << "gamXj:" << endl; output(gamXj); } u = gamXi[0]; v = gamXj[0]; if (u == v) { cout << "U == V. Quitting" << endl; throw logic_error(""); } int delta = min(min(cG(s, u, GHat), cG(s, v, GHat)),delij); GHat = split(GHat, u, v, s, delta, h); //Add weight delta to (u,v) in B bool found = false; for (unsigned i = 0; i < B.size(); i++) { if (connects(B[i], u, v)) { B[i].weight += delta; found = true; break; } } if (!found) { for (unsigned i = 0; i < GHat.size(); i++) { if (connects(GHat[i], u, v)) { SplitEdge e(GHat[i].end0, GHat[i].end1, delta, GHat[i].orig0, GHat[i].orig1); B.push_back(e); found = true; break; } } } if (!found) { SplitEdge e(u, v, delta, u, v); B.push_back(e); } delij -= delta; } return GHat; }
void eraseSplit(historyIndex &h, int s, int u, int v, int weight) { //cout << "Called eraseSplit on s=" << s << ", u=" << u << ", v=" << v << ", weight=" << weight << endl; int startSize = historySize(h); //cout << "Current history size is " << startSize << endl; h.loc = h.last; history toErase(s, u, v, weight); if (*h.last == toErase) { //cout << "Went down last erase path." << endl; if (h.last->multiplicity == weight) { h.last->prev->next = nullptr; h.last = h.last->prev; } else h.last->multiplicity -= weight; return; } //cout << "Went down full path." << endl; h.loc = h.last; //cout << "This is h" << endl; //cout << "last:" << endl; //output(*h.last); //cout << "first: " << endl; //output(*h.first); //cout << "current:" << endl; //output(*h.loc); while (h.loc->prev != nullptr) { h.loc = h.loc->prev; //cout << "This is h" << endl; //cout << "last:" << endl; //output(*h.last); //cout << "first: " << endl; //output(*h.first); //cout << "current:" << endl; //output(*h.loc); if (toErase == *h.loc) { if (h.loc->multiplicity == weight) { if (!(h.loc->isFirst())) { h.loc->prev->next = h.loc->next; h.loc->next->prev = h.loc->prev; } else { cout << "Error: Could not find split in history. Quitting." << endl; throw logic_error(""); } } else h.loc->multiplicity -= weight; break; } } int endSize = historySize(h); //cout << "Ending history size is: " << endSize << endl; }
huReturn hookUp(const vector<SplitEdge> &g, int s, vector<SplitEdge> &bIn, int k, vector<vector<int>> &Y, historyIndex &h) { if (cG(s, g) != 0) { cout << "cG(s) problem in hookup" << endl; throw logic_error(""); } vector<SplitEdge> H = g; vector<SplitEdge> G1 = g; vector<SplitEdge> B = bIn; vector<SplitEdge> B1; vector<vector<int>> XS; int maxNodeInd = getMaxNodeInd(G1); for (int i = 0; i < maxNodeInd; i++) XS.push_back(vector<int>()); for (int i = 0; i < maxNodeInd; i++) XS[i].push_back(i); //cout << "About to enter while loop" << endl; while (getNumUsedNodes(H) >= 4) { vector<int> ma = maOrderingHeap(H, s); int v = ma[ma.size() - 2]; int w = ma[ma.size() - 1]; if (v == s || w == s) throw logic_error("SET WAS V - S, S FOUND"); vector<int> X1; H = combineVertices(H, v, w); H = compress(H); XS[v] = setUnion(XS[v], XS[w]); if (XS[w].size() == 0) { cout << "Error: W, " << w << " was merged twice. Quitting" << endl; throw logic_error(""); } XS[w] = vector<int>(); if (cG(v, H) < k) { int numToGet = (int)ceil(.5*(double(k) - double(cG(G1, XS[v])))); vector<SplitEdge> GX = inducedSubgraph(G1, XS[v]); vector<SplitEdge> delB; int added = 0; for (unsigned i = 0; i < GX.size(); i++) { SplitEdge e = SplitEdge(GX[i].end0, GX[i].end1, GX[i].weight, GX[i].orig0, GX[i].orig1); if (isMem(e, B)) { int bW = B[indexOfEdge(B, e.end0, e.end1)].weight; if (bW < e.weight) e.weight = bW; if (e.weight > (numToGet - added)) { e.weight = numToGet - added; } added += e.weight; delB.push_back(e); } if (added == numToGet) break; } if (added != numToGet) { cout << "Error: GX did not contain " << numToGet << " entries in B. Quitting." << endl; throw logic_error(""); } if (!isSubset(delB, B)) { cout << "ERROR: delB is not a subset of B." << endl; cout << "B:" << endl; output(B); cout << "delB:" << endl; output(delB); cout << "This was the GX to choose from:" << endl; output(GX); cout << "V: " << v << endl; cout << "W: " << w << endl; cout << "S: " << s << endl; throw logic_error(""); } B = setRemove(delB, B); B = removeZeroWeighted(B); B1 = setUnion(delB, B1); H = removeZeroWeighted(H); G1 = hookUpHelper(s, G1, delB, h); G1 = removeZeroWeighted(G1); H = removeZeroWeighted(H); bool addedFromXSinH = false; numToGet *= 2; for (unsigned i = 0; i < H.size(); i++) { SplitEdge tester = SplitEdge(s, v, 0, 0, 0); if (equals(tester, H[i])) { //cout << "Increasing weight in hookUp in H between " << H[i].end0 << " and " << H[i].end1 << "from " << H[i].weight << " to " << H[i].weight + numToGet << endl; H[i].weight += numToGet; addedFromXSinH = true; break; } } if (!addedFromXSinH && numToGet != 0) { //cout << "Creating edge in hookUp in H between " << s << " and " << v << " with weight " << numToGet << endl; SplitEdge e(s, v, numToGet, s, v); H.push_back(e); } vector<vector<int>> newY; for (unsigned i = 0; i < Y.size(); i++) { if (!isProperSubset(Y[i], XS[v])) newY.push_back(Y[i]); } bool foundX1inY = false; for (unsigned i = 0; i < newY.size(); i++) { if (setsEqual(newY[i], XS[v])) foundX1inY = true; } if (!foundX1inY) newY.push_back(XS[v]); Y = newY; } } huReturn ret; ret.BP = B1; ret.G1 = G1; ret.Y = Y; return ret; }
PWIZ_API_DECL Parabola::Parabola(vector<double> a) : a_(a) { if (a_.size() != 3) throw logic_error("[Parabola::Parabola()] 3 coefficients required."); }
void EventParser::endElement(const XMLCh* const name) { try{ switch(enumState(name)) { case TRACEDCONTACTS: if(state == TRACEDCONTACTS) state = FINISH; else { XMLString::release(&charBuffer); throw logic_error("XML Parser error: garbled XML input at </tracedcontacts>"); } //cout << "Leaving TRACEDCONTACTS" << endl; break; case CONTACT: //cout << "Leaving CONTACT" << endl; if(state == CONTACT) { state = TRACEDCONTACTS; } else { XMLString::release(&charBuffer); throw logic_error("XML Parser error: garbled XML input at </contact>"); } break; case FROM: if(state == FROM) { currEvent = new ContactEvent(contactee,eventTime,contactor,eventType); pair<EventQueue::iterator, bool> rv = eventQueue.insert(currEvent); if(rv.second == false) { cerr << "Duplicate time: " << eventTime << endl; } state = CONTACT; inFrom = false; } else { cerr << "State: " << state << endl; XMLString::release(&charBuffer); throw logic_error("XML Parser error: garbled XML input at </from>"); } break; case TYPE: if(state == TYPE) { if(XMLString::stringLen(charBuffer) == 0) { cerr << "Warning: Type empty!" << endl; } XMLString::transcode(charBuffer,chBuff,99); if (XMLString::equals(chBuff,"background")) eventType = BACKGROUND; else if(XMLString::equals(chBuff,"feedmill")) eventType = FEEDMILL; else if(XMLString::equals(chBuff,"shouse")) eventType = SHOUSE; else if(XMLString::equals(chBuff,"company")) eventType = COMPANY; else if(XMLString::equals(chBuff,"ispatial")) eventType = ISPATIAL; else if(XMLString::equals(chBuff,"nspatial")) eventType = NSPATIAL; else cerr << "Unrecognised contact type '" << chBuff << "'" << endl; state = FROM; } else { XMLString::release(&charBuffer); throw logic_error("XML Parser error: garbled XML input at </type>"); } break; case TIME: if(state == TIME) { XMLString::transcode(charBuffer,chBuff,99); eventTime = atof(chBuff); state = FROM; } else { XMLString::release(&charBuffer); throw logic_error("XML Parser error: garbled XML input at </time>"); } break; default: XMLString::release(&charBuffer); throw logic_error("XML Parser error: Unrecognised tag"); } XMLString::release(&charBuffer); } catch(exception& e) { XMLString::release(&charBuffer); throw logic_error(e.what()); } //cout << "Leaving end element" << endl; }
void DNServer::mainLoop() { #ifdef __linux typedef int SOCKET; typedef sockaddr SOCKADDR; typedef sockaddr_in SOCKADDR_IN; const int INVALID_SOCKET = -1; #define closesocket(X) close(X) #endif SOCKET servSock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); if (servSock == INVALID_SOCKET) throw logic_error("DNServer: socket error"); SOCKET cliSock = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); if (cliSock == INVALID_SOCKET) { closesocket(servSock); throw logic_error("DNServer: socket error"); } try { SOCKADDR_IN ssin = { 0 }; ssin.sin_family = AF_INET; ssin.sin_addr.s_addr = inet_addr(cfpars["main"]["bindaddr"].c_str()); ssin.sin_port = htons(stoi(cfpars["main"]["bindport"])); SOCKADDR_IN assin = { 0 }; assin.sin_family = AF_INET; assin.sin_addr.s_addr = inet_addr(cfpars["main"]["rootdns"].c_str()); assin.sin_port = htons(stoi(cfpars["main"]["rootdnsport"])); if (::bind(servSock, reinterpret_cast<SOCKADDR*>(&ssin), sizeof(ssin))) throw logic_error("DNServer: bind error"); unsigned char packet[BUFSIZE]; while (1) { memset(packet, 0, sizeof(packet)); size_t recSize; SOCKADDR_IN ssinf = { 0 }; #ifdef _WIN32 int ssinsz = sizeof(ssinf); #elif __linux socklen_t ssinsz = sizeof(ssinf); #endif if ((recSize = recvfrom(servSock, reinterpret_cast<char*>(packet), sizeof(packet), 0, reinterpret_cast<SOCKADDR*>(&ssinf), &ssinsz)) > 0) { DnsResponse response(packet); if (filter(inet_ntoa(ssinf.sin_addr)) && processClient(response)) { // обработать запрос и отдать ответ size_t respsize = response.size(); response.setTruncated(respsize >= 512); if (respsize > sizeof(packet)) { unique_ptr<unsigned char[]> uptr(new unsigned char[respsize]); response.dump(uptr.get()); sendto(servSock, reinterpret_cast<char*>(uptr.get()), respsize, 0, reinterpret_cast<SOCKADDR*>(&ssinf), sizeof(ssinf)); } else { memset(packet, 0, sizeof(packet)); response.dump(packet); sendto(servSock, reinterpret_cast<char*>(packet), respsize, 0, reinterpret_cast<SOCKADDR*>(&ssinf), sizeof(ssinf)); } } else { // данный тип запроса не поддерживаетс¤ // отправить запрос основному серверу sendto(cliSock, reinterpret_cast<char*>(packet), recSize, 0, reinterpret_cast<SOCKADDR*>(&assin), sizeof(assin)); memset(packet, 0, sizeof(packet)); int rfms = recvfrom(cliSock, reinterpret_cast<char*>(packet), sizeof(packet), 0, NULL, NULL); sendto(servSock, reinterpret_cast<char*>(packet), rfms, 0, reinterpret_cast<SOCKADDR*>(&ssinf), sizeof(ssinf)); } } } } catch (...) { closesocket(servSock); closesocket(cliSock); throw; } closesocket(servSock); closesocket(cliSock); }
void EventParser::startElement(const XMLCh* const name, AttributeList& attributes) { //char* ascName = XMLString::transcode(name); //cout << "State (ASC): " << ascName << endl; //XMLString::release(&ascName); if (charBuffer != NULL) XMLString::release(&charBuffer); switch(enumState(name)) { case TRACEDCONTACTS: //cout << "Entered TRACEDCONTACTS" << endl; if(state == START) state = TRACEDCONTACTS; else throw logic_error("XML Parser error: garbled XML input at <tracedcontacts>"); break; case CONTACT: //cout << "Entered CONTACT" << endl; if(state == TRACEDCONTACTS) { state = CONTACT; const XMLCh* id = attributes.getValue("id"); XMLString::transcode(id,chBuff,99); contactee = atoi(chBuff); } else throw logic_error("XML Parser error: garbled XML input at <contact>"); break; case FROM: if(state == CONTACT) { state = FROM; inFrom = true; const XMLCh* id = attributes.getValue("id"); XMLString::transcode(id,chBuff,99); contactor = atoi(chBuff); } else throw logic_error("XML Parser error: garbled XML input at <from>"); break; case TYPE: if(state == FROM) state = TYPE; else { cerr << "State at <type>:" << state << endl; throw logic_error("XML Parser error: garbled XML input at <type>"); } break; case TIME: assert(state == FROM); if(state == FROM) state = TIME; else throw logic_error("XML Parser error: garbled XML input at <time>"); break; default: throw logic_error("Unknown state in parse"); } }
bool ChooserEvaluator::select_parameters(const std::vector<ChooserPoly> &operands, double noise_standard_deviation, double noise_max_deviation, const std::map<int, BigUInt> ¶meter_options, EncryptionParameters &destination) { if (noise_standard_deviation < 0) { throw invalid_argument("noise_standard_deviation can not be negative"); } if (noise_max_deviation < 0) { throw invalid_argument("noise_max_deviation can not be negative"); } if (parameter_options.size() == 0) { throw invalid_argument("parameter_options must contain at least one entry"); } if (operands.empty()) { throw invalid_argument("operands cannot be empty"); } int largest_bit_count = 0; int largest_coeff_count = 0; for (vector<ChooserPoly>::size_type i = 0; i < operands.size(); ++i) { if (operands[i].comp_ == nullptr) { throw logic_error("no operation history to simulate"); } int current_bit_count = operands[i].max_abs_value_.significant_bit_count(); largest_bit_count = (current_bit_count > largest_bit_count) ? current_bit_count : largest_bit_count; int current_coeff_count = operands[i].max_coeff_count_; largest_coeff_count = (current_coeff_count > largest_coeff_count) ? current_coeff_count : largest_coeff_count; } // We restrict to plain moduli that are powers of two. Here largest_bit_count is the largest positive // coefficient that we can expect to appear. Thus, we need one more bit. destination.plain_modulus() = 1; destination.plain_modulus() <<= largest_bit_count; bool found_good_parms = false; map<int, BigUInt>::const_iterator iter = parameter_options.begin(); while (iter != parameter_options.end() && !found_good_parms) { int dimension = iter->first; if (dimension < 512 || (dimension & (dimension - 1)) != 0) { throw invalid_argument("parameter_options keys invalid"); } if (dimension > largest_coeff_count && destination.plain_modulus() < iter->second) { // Set the polynomial destination.coeff_modulus() = iter->second; destination.poly_modulus().resize(dimension + 1, 1); destination.poly_modulus().set_zero(); destination.poly_modulus()[0] = 1; destination.poly_modulus()[dimension] = 1; // The bound needed for GapSVP->search-LWE reduction //parms.noise_standard_deviation() = round(sqrt(dimension / (2 * 3.1415)) + 0.5); // Use constant (small) standard deviation. destination.noise_standard_deviation() = noise_standard_deviation; // We truncate the gaussian at noise_max_deviation. destination.noise_max_deviation() = noise_max_deviation; // Start initially with the maximum decomposition_bit_count, then decrement until decrypts(). destination.decomposition_bit_count() = destination.coeff_modulus().significant_bit_count(); // We bound the decomposition bit count value by 1/8 of the maximum. A too small // decomposition bit count slows down multiplication significantly. This is not an // issue when the user wants to use multiply_norelin() instead of multiply(), as it // only affects the relinearization step. The fraction 1/8 is not an optimal choice // in any sense, but was rather arbitrarily chosen. An expert user might want to tweak this // value to be smaller or larger depending on their use case. // To do: Figure out a somewhat optimal bound. int min_decomposition_bit_count = destination.coeff_modulus().significant_bit_count() / 8; while (!found_good_parms && destination.decomposition_bit_count() > min_decomposition_bit_count) { found_good_parms = true; for (vector<ChooserPoly>::size_type i = 0; i < operands.size(); ++i) { // If one of the operands does not decrypt, set found_good_parms to false. found_good_parms = operands[i].simulate(destination).decrypts() ? found_good_parms : false; } if (!found_good_parms) { --destination.decomposition_bit_count(); } else { // We found some good parameters. But in fact we can still decrease the decomposition count // a little bit without hurting performance at all. int old_dbc = destination.decomposition_bit_count(); int num_parts = destination.coeff_modulus().significant_bit_count() / old_dbc + (destination.coeff_modulus().significant_bit_count() % old_dbc != 0); destination.decomposition_bit_count() = destination.coeff_modulus().significant_bit_count() / num_parts + (destination.coeff_modulus().significant_bit_count() % num_parts != 0); } } } // This dimension/coeff_modulus are to small. Move on to the next pair. ++iter; } if (!found_good_parms) { destination = EncryptionParameters(); } return found_good_parms; }
wstring Manager::GeneratePurifiedTex( const PurifiedTexOptions& options ) const { if (!mParseTree.get()) throw logic_error( "Parse tree not yet built in Manager::GeneratePurifiedTex" ); wostringstream os; LatexFeatures features; mParseTree->GetPurifiedTex(os, features, cFontEncodingDefault); wstring latex = os.str(); if (features.mNeedsX2 || features.mNeedsCJK) { features.mNeedsUcs = true; features.mNeedsAmsmath = true; // for the "\text" command } // Generate purified tex output wostringstream output; output << L"\\nonstopmode\n" L"\\documentclass[12pt]{article}\n"; if (features.mNeedsAmsmath) output << L"\\usepackage{amsmath}\n"; if (features.mNeedsAmsfonts) output << L"\\usepackage{amsfonts}\n"; if (features.mNeedsAmssymb) output << L"\\usepackage{amssymb}\n"; if (features.mNeedsColor) output << L"\\usepackage[dvips,usenames]{color}\n"; if (features.mNeedsUcs) { if (!options.mAllowUcs) throw Exception(L"LatexPackageUnavailable", L"ucs"); output << L"\\usepackage[utf8x]{inputenc}\n"; } if (features.mNeedsX2) output << L"\\usepackage[X2,T1]{fontenc}\n" L"\\newcommand{\\cyr}[1]{\\text{" L"\\bgroup\\fontencoding{X2}\\selectfont #1\\egroup}}\n"; if (features.mNeedsCJK) { if (!options.mAllowCJK) throw Exception(L"LatexPackageUnavailable", L"CJK"); output << L"\\usepackage{CJK}\n"; if (features.mNeedsJapaneseFont) { if (options.mJapaneseFont.empty()) throw Exception(L"LatexFontNotSpecified", L"japanese"); output << L"\\newcommand{\\jap}[1]{\\text{\\begin{CJK}{UTF8}{" << options.mJapaneseFont << L"}#1\\end{CJK}}}\n"; } } if (options.mAllowPreview) output << L"\\usepackage[active]{preview}\n"; else output << L"\\pagestyle{empty}\n"; if (options.mLaTeXPreamble.length() > 0) output << options.mLaTeXPreamble << "\n"; output << L"\\begin{document}\n"; if (options.mLaTeXBeforeMath.length() > 0) output << options.mLaTeXBeforeMath << "\n"; if (options.mAllowPreview) output << L"\\begin{preview}\n"; if (options.mDisplayMath) output << L"\\[\n" << latex << L"\n\\]\n"; else output << L"$\n" << latex << L"\n$\n"; if (options.mAllowPreview) output << L"\\end{preview}\n"; output << L"\\end{document}\n"; return output.str(); }
void Attributes::SetDoubleAttrVisitor:: visit(const Attributes& a) { const char* emsg = "Cannot set attribute of a const instance."; throw logic_error(emsg); }
vector<vector<Geometry::ElemId>> Volume::getPartitionsIds( const size_t nDivisions, const vector<pair<Geometry::ElemId,int>> idWgt, const Math::Real* taskPower) const { // Metis v5 manual: // [...] take as input the element-node array of the mesh and // compute a k-way partitioning for both its elements and its nodes // idWgt contains id and weight pairs. vector<vector<Geometry::ElemId>> res; res.resize(nDivisions, vector<Geometry::ElemId>()); // Accounts for the one partition case. if (nDivisions == 1) { Geometry::ConstElemRGroup physVol = elems(); physVol.removeMatId(MatId(0)); const size_t nK = physVol.sizeOf<Geometry::VolR>(); res[0].resize(nK, Geometry::ElemId(0)); for (size_t i = 0; i < nK; i++) { res[0][i] = (elems())(i)->getId(); } return res; } #ifdef MESH_ALLOW_PARTITIONING // Prepares mesh info. cout << " - Preparing mesh info... " << flush; idx_t ne = elems().sizeOf<Geometry::VolR>(); idx_t *eptr, *eind; eptr = new idx_t[ne+1]; eind = new idx_t[ne*4]; size_t counter = 0; eptr[0] = counter; for (idx_t i = 0; i < ne; i++) { const Geometry::VolR* vol = elem_.tet[i]; for (size_t j = 0; j < vol->numberOfVertices(); j++) { eind[counter++] = vol->getVertex(j)->id - 1; } eptr[i+1] = counter; } cout << "OK" << endl; // Relabels ids, needed by quadratic or linearized meshes. cout << " - Relabeling... " << flush; DynMatrix<Math::Int> id(ne*4,3); for (Math::Int i = 0; i < ne*4; i++) { id(i,0) = i; id(i,1) = eind[i]; id(i,2) = 0; } id.sortRows_omp(1,1); Math::Int label = 0; for (Math::Int i = 1; i < ne*4; i++) { if (id(i,1) == id(i-1,1)) { id(i,2) = label; } else { id(i,2) = ++label; } } id.sortRows_omp(0,0); for (Math::Int i = 0; i < ne*4; i++) { eind[i] = id(i,2); } idx_t nn = label+1; // Number of vertices. cout << "OK" << endl; // Copies weights. cout << " - Copying weights... " << flush; idx_t *vwgt; if (idWgt.size() == 0) { vwgt = NULL; } else { vwgt = new idx_t[ne]; for (Math::Int e = 0; e < ne; e++) { vwgt[e] = idWgt[e].second; } } idx_t *vsize = NULL; idx_t nparts = nDivisions; idx_t objval; idx_t *epart; epart = new idx_t[ne]; idx_t *npart; npart = new idx_t[nn]; cout << "OK" << endl; // Computes task computational powers. real_t *tpwgts = NULL; if (taskPower != NULL) { tpwgts = new real_t[nDivisions]; real_t sum = 0.0; for (size_t i = 0; i < nDivisions; i++) { tpwgts[i] = taskPower[i]; sum += tpwgts[i]; } assert(std::abs(sum) - 1.0e-16 < 1.0); } // METIS options. cout << " - Setting Options... " << flush; idx_t options[METIS_NOPTIONS]; Math::Int status; status = METIS_SetDefaultOptions(options); options[METIS_OPTION_PTYPE] = METIS_PTYPE_KWAY; options[METIS_OPTION_OBJTYPE] = METIS_OBJTYPE_CUT; options[METIS_OPTION_SEED] = (idx_t) 0; // options[METIS_OPTION_OBJTYPE] = METIS_OBJTYPE_VOL; // c numbering. Starts from 0. options[METIS_OPTION_NUMBERING] = 0; cout << "OK" << endl; // Calls METIS partition function for meshes. idx_t ncommon = 3; // Number of common vertices per element. cout << " - Calling Part Mesh Dual... " << flush; status = METIS_PartMeshDual( &ne, &nn, eptr, eind, vwgt, vsize, &ncommon, &nparts, tpwgts, options, &objval, epart, npart); if (status != METIS_OK) { throw Error("METIS_PartMeshDual fn failed with error: " + status); } cout << "OK" << endl; // Converts result. for (size_t i = 0; i < nDivisions; i++) { res[i].reserve(ne); } for (Math::Int i = 0; i < ne; i++) { size_t id = elem_.tet[i]->getId(); res[epart[i]].push_back(id); } // Frees memory. delete vwgt; delete epart; delete npart; delete eptr; delete eind; // Returns result. return res; #else throw logic_error("Mesh partitioning is not allowed."); #endif }
/* * @job_xx_ben_file, like job.4.ben, it contains job.4.0.out job.4.1.out job.4.2.out job.4.3.out job.4.4.out * * */ double data_proc::process_internal(const string& job_xx_ben_file, int job_scale, const bool &loose) { string job_xx_out_file; double time_start = 0; ifstream job_xx_ben_ifs(job_xx_ben_file); /*lines of time_start/time_end print-out, for 1024 parallel jobs on 1024 cores of 512 nodes*/ int num_lines = 2048; vector<double> vec_start; vector<double> vec_end; int i = 0; while (getline(job_xx_ben_ifs, job_xx_out_file)) { fprintf(stdout, "[%s][%s]: processing...\n", job_xx_ben_file.c_str(), job_xx_out_file.c_str()); ifstream job_xx_out_ifs(job_xx_out_file); /*the first line is job.x.0.out, count the timestamp in it as time start*/ if (loose && i == 0) { string timestamp_line; while (getline(job_xx_out_ifs, timestamp_line)) { time_start = atof(timestamp_line.c_str()); break; /*since there's only one line in job.x.0.out*/ } } if (i > 0) { /*count the earliest timestamp in all job.x.x.out(except job.x.0.out) as time start*/ /* the format, [MPI process number]: [timestamp] 0163: 1366949424667582.000000 0163: 1366949424667658.000000 * */ string timestamp_line; int j = 0; //counter for lines of timestamp in job.xx.out file int first_proc_num; //the first MPI processor number int second_proc_num; //the second MPI processor number while (getline(job_xx_out_ifs, timestamp_line)) { j++; if (timestamp_line.empty()) continue; String_Tokenizer tokens(timestamp_line, ":"); // string proc_num = trim(tokens.next_token()); //0163, the MPI process number string timestamp = trim(tokens.next_token()); //1366949424667582.000000, the timestamp if (j % 2 != 0) first_proc_num = atoi(proc_num.c_str()); else second_proc_num = atoi(proc_num.c_str()); if (j % 2 == 0) { if (first_proc_num != second_proc_num) { char buf[200]; sprintf(buf, "[%s][%d][%d]: the timestamp of start and end is not contiguous.", job_xx_out_file.c_str(), first_proc_num, second_proc_num); throw logic_error(buf); } } if (j % 2 != 0) vec_start.push_back(atof(timestamp.c_str())); else vec_end.push_back(atof(timestamp.c_str())); } if (j != num_lines) { throw logic_error( "less or more than 2048 timestamp lines in job.xx.out file."); } } i++; } sort(vec_start.begin(), vec_start.end()); sort(vec_end.begin(), vec_end.end()); if (!loose) time_start = vec_start[0]; double timespan_sum = vec_end[vec_end.size() - 1] - time_start; return timespan_sum; }
void Solver::make_problem_pb() { if (problem_discrete == 0) throw logic_error("Discrete problem doesn't exist."); if (problem_pb != 0) delete problem_pb; problem_pb = new Problem(); // clear variables mapping vars_mapping.clear(); // add variables for (size_t i = 0, index = 0; i < Problem::VARS_GROUPS_TOTAL; ++i) { Problem::Vars_group group = Problem::Vars_group(i); for (size_t j = 0; j < problem_discrete->get_variables_num(group); ++j) { Variable& v_d = problem_discrete->get_variable(group, j); real_t lower_bound = v_d.get_lower_bound(); real_t upper_bound = v_d.get_upper_bound(); size_t num = size_t(floor(log(upper_bound.get_d() - lower_bound.get_d()) / log(2)) + 1); for (size_t k = 0; k < num; ++k, ++index) { ostringstream stream; stream << k; string name; if (group == Problem::VARS_NEXT_STATE) { name = v_d.get_name(); name.erase(name.end() - 1); name += "_" + stream.str() + "'"; } else name = v_d.get_name() + "_" + stream.str(); Variable v_b(name, Variable::INTEGER, 0, 1); problem_pb->add_variable(group, v_b); vars_mapping[v_d.get_name()].push_back(name); } } } // add constraints for (size_t i = 0; i < Problem::CONSTRS_GROUPS_TOTAL; ++i) { Problem::Constrs_group group = Problem::Constrs_group(i); for (size_t j = 0; j < problem_discrete->get_constraints_num(group); ++j) { Constraint& c_d = problem_discrete->get_constraint(group, j); Constraint c_b(c_d.get_type()); real_t free_member = 0; for (size_t k = 0; k < problem_discrete->get_constraints_vars_num(group); ++k) { Variable& v_d = problem_discrete->get_constraints_var(group, k); real_t coeff = c_d.get_coeff(v_d); if (coeff != 0) { for (size_t w = 0; w < vars_mapping[v_d.get_name()].size(); ++w) { string& name = vars_mapping[v_d.get_name()][w]; c_b.set_coeff(name, coeff * pow(2.0f, w)); } free_member += coeff * v_d.get_lower_bound(); } } free_member += c_d.get_free_member(); c_b.set_free_member(free_member); problem_pb->add_constraint(group, c_b); } } }
void World::removePlayer(Player* player) { vector< Player* >::iterator it = find(playerList.begin(), playerList.end(), player); if (it == playerList.end()) throw logic_error("No player to remove from world."); else VectorUtil::replaceErase(playerList, it); }
/** * Calculates the statistic corresponding to the given ego. The parameter * pNetwork is always the current network as there are no endowment effects * of this kind. */ double DistanceTwoEffect::egoStatistic(int ego, const Network * pNetwork) { double statistic = 0; int n = pNetwork->n(); const Network * pStartMissingNetwork = this->pData()->pMissingTieNetwork(this->period()); const Network * pEndMissingNetwork = this->pData()->pMissingTieNetwork(this->period() + 1); int i = ego; // Invariant: mark[h] <= baseMark for all h. int baseMark = n * i; // Count the number of two-paths from i to each h by setting // mark[h] = baseMark + <number of two-paths from i to h>. // If there are no two-paths from i to h, we leave mark[h] <= baseMark. for (IncidentTieIterator iterI = pNetwork->outTies(i); iterI.valid(); iterI.next()) { int j = iterI.actor(); for (IncidentTieIterator iterJ = pNetwork->outTies(j); iterJ.valid(); iterJ.next()) { int h = iterJ.actor(); if (this->lmark[h] <= baseMark) { // We have encountered this actor for the first time. this->lmark[h] = baseMark + 1; } else { // We've found yet another two-path from i to h. this->lmark[h]++; } if (this->lmark[h] == baseMark + this->lrequiredTwoPathCount) { // We've reached the necessary minimum of two-paths, hence // a new candidate for a distance-two pair is found. statistic++; } } } // Okay, if there's a tie (i,h) then <i,h> cannot possibly be a // distance two pair. Hence we iterate over outgoing ties (i,h) of i, // and if the actor h has enough two-paths, we unmark it and decrement // the statistic. for (IncidentTieIterator iter = pNetwork->outTies(i); iter.valid(); iter.next()) { int h = iter.actor(); if (this->lmark[h] >= baseMark + this->lrequiredTwoPathCount) { this->lmark[h] = 0; statistic--; } } // We do a similar fix for missing ties (i,h) at either end of // the period. for (IncidentTieIterator iter = pStartMissingNetwork->outTies(i); iter.valid(); iter.next()) { int h = iter.actor(); if (this->lmark[h] >= baseMark + this->lrequiredTwoPathCount) { this->lmark[h] = 0; statistic--; } } for (IncidentTieIterator iter = pEndMissingNetwork->outTies(i); iter.valid(); iter.next()) { int h = iter.actor(); if (this->lmark[h] >= baseMark + this->lrequiredTwoPathCount) { this->lmark[h] = 0; statistic--; } } // Ignore the trivial pair <i,i>. if (this->lmark[i] >= baseMark + this->lrequiredTwoPathCount) { statistic--; } // For symmetric networks, we don't want to count each distance 2 // pair twice. const OneModeNetworkLongitudinalData * pData = dynamic_cast<const OneModeNetworkLongitudinalData *>(this->pData()); if (!pData) { throw logic_error( "One-mode network data expected in distance 2 effect."); } if (pData->symmetric()) { statistic /= 2; } return statistic; }
MemoryRangeWorker::MemoryRangeWorker(void* start_addr, void* end_addr) throw(logic_error): start_addr_((addr_type)start_addr), end_addr_((addr_type)end_addr) { if(end_addr_ < start_addr_) throw logic_error(__FUNCTION__": Manual memory range has negative size!"); }
/** * Returns the statistic corresponding to this effect as part of * the endowment function. */ double DistanceTwoEffect::endowmentStatistic(Network * pLostTieNetwork) { throw logic_error( "Endowment effect not supported for distance 2 effects."); }
/** * \details Generate a set of tasks * \return A vector containing thoses tasks */ vector<Task> taskGenerator::generateTasks(int utPerc, int numT, int precision) { if (utPerc > numT * 100) throw logic_error("generateTasks : Impossible to do a feasible system with the given parameters"); // Generate numT tasks randomly vector<Task> tasks = vector<Task>(numT); for (int i = 0; i < numT; ++i) { int offset = (int) (rand() % taskGenerator::MAX_OFFSET); int period_pos = (int) (rand() % taskGenerator::ACCEPTED_PERIODS_size); int period = taskGenerator::ACCEPTED_PERIODS[period_pos]; int deadline = period; int wcet = (deadline == 1) ? 1 : (int) ((rand() % (deadline - 1)) + 1); tasks[i] = Task(offset, period, deadline, wcet); } // modify all tasks to get closer to the utilization parameter // This will not work exactly because of discrete time (and because each task must respect Ui < 100%) float current_utiliz = systemUtilization(tasks); float utilizFactor = utPerc / current_utiliz; for (unsigned int i = 0; i < tasks.size(); ++i) { int newWcet = (int)(tasks[i].getWcet() * utilizFactor); tasks[i].setWcet(max(newWcet, 1)); if (tasks[i].getWcet() > tasks[i].getPeriod()) tasks[i].setPeriod(tasks[i].getWcet()); } // Try to get closer and closer to the desired utilization by small modifications current_utiliz = systemUtilization(tasks); unsigned int loop_counter = 0; while(abs(current_utiliz - utPerc) > precision) { bool mustDecreaseUtil = (current_utiliz - utPerc > 0); // check that we are not stuck bool stuck = true; for (unsigned int i = 0; i < tasks.size(); ++i) { if (mustDecreaseUtil) stuck = (stuck and tasks.at(i).getWcet() == 1); else stuck = (stuck and tasks.at(i).getWcet() == tasks.at(i).getPeriod()); } if (stuck) { return (vector<Task>()); } int rndTaskP = rand() % tasks.size(); Task* rndTask = &tasks[rndTaskP]; if (rndTask->getWcet() == 1 or rndTask->getWcet() == rndTask->getPeriod()) continue; int oldWcet = rndTask->getWcet(); int newWcet = mustDecreaseUtil ? --oldWcet : ++oldWcet; rndTask->setWcet(newWcet); current_utiliz = systemUtilization(tasks); ++loop_counter; } // explicit deadline for (unsigned int i =0 ; i < tasks.size(); ++i) { tasks[i].setDeadline(tasks[i].getPeriod()); tasks[i].reComputeUtilisation(); } return tasks; }
int File::write(const vector<unsigned char>& buf) { if (!writable_) { throw logic_error("File not writable"); } return do_write(buf); }