void init_234_tree (b_tree* t){ bt_node* temp = NULL; std::queue<bt_node*> nq; int dgr = 4, th = 2; for (int nh = 2; nh >= 0; nh --){ for (int ni = 0; ni < (int) pow (dgr, th-nh); ni++){ temp = new bt_node (2); if (t->root == NULL) {t->root = temp;} temp->size = temp->capacity; for (int ki = 0; ki < 3; ki ++){ temp->keys[ki] = (ki + ni*dgr + 1)*(int) pow (dgr, nh) - 1; } attach_node (temp, nq); nq.push(temp); } } }
void init_tree (b_tree *r){ std::queue<bt_node*> nq; int th=2, dgr=4; bt_node* new_n = NULL; for (int nh=2; nh>=0; nh--){ for (int ni=0; ni<(int) pow (dgr, th-nh); ni++){ new_n = new bt_node(2); if (r->root == NULL) {r->root = new_n;} for (int ki=0; ki<dgr; ki++){ new_n->keys[ki] = (int) pow (dgr, nh) * (ni*dgr+ki+1) - 1; new_n->keys[ki] = new_n->keys[ki] << 1; } new_n->size = new_n->capacity; attach_node( new_n, nq); nq.push(new_n); } } }
inline void push_back(size_t size, void* node) //attach_node wrapper : much list, wow { #ifdef __DEBUG__ printf("Push back / size : %u, node : %u\n",size, node); #endif if(size<=8) attach_node(&seg_8, &tail_8, node); //call with block size else if(size<=16) attach_node(&seg_16, &tail_16, node); else if(size<=24) attach_node(&seg_24, &tail_24, node); else if(size<=32) attach_node(&seg_32, &tail_32, node); else if(size<=40) attach_node(&seg_40, &tail_40, node); else if(size<=48) attach_node(&seg_48, &tail_48, node); else if(size<=56) attach_node(&seg_56, &tail_56, node); else if(size<=60) attach_node(&seg_60, &tail_60, node); else if(size<=64) attach_node(&seg_64, &tail_64, node); else if(size<=96) attach_node(&seg_96, &tail_96, node); else if(size<=128) attach_node(&seg_128, &tail_128, node); else attach_node(&seg_inf, &tail_inf, node); }
void trie_add_prefix(struct f_trie *t, ip_addr px, int plen, int l, int h) { if (l == 0) t->zero = 1; else l--; if (h < plen) plen = h; ip_addr amask = ipa_xor(ipa_mkmask(l), ipa_mkmask(h)); ip_addr pmask = ipa_mkmask(plen); ip_addr paddr = ipa_and(px, pmask); struct f_trie_node *o = NULL; struct f_trie_node *n = &t->root; while(n) { ip_addr cmask = ipa_and(n->mask, pmask); if (ipa_compare(ipa_and(paddr, cmask), ipa_and(n->addr, cmask))) { /* We are out of path - we have to add branching node 'b' between node 'o' and node 'n', and attach new node 'a' as the other child of 'b'. */ int blen = ipa_pxlen(paddr, n->addr); ip_addr bmask = ipa_mkmask(blen); ip_addr baddr = ipa_and(px, bmask); /* Merge accept masks from children to get accept mask for node 'b' */ ip_addr baccm = ipa_and(ipa_or(amask, n->accept), bmask); struct f_trie_node *a = new_node(t, plen, paddr, pmask, amask); struct f_trie_node *b = new_node(t, blen, baddr, bmask, baccm); attach_node(o, b); attach_node(b, n); attach_node(b, a); return; } if (plen < n->plen) { /* We add new node 'a' between node 'o' and node 'n' */ amask = ipa_or(amask, ipa_and(n->accept, pmask)); struct f_trie_node *a = new_node(t, plen, paddr, pmask, amask); attach_node(o, a); attach_node(a, n); return; } if (plen == n->plen) { /* We already found added node in trie. Just update accept mask */ n->accept = ipa_or(n->accept, amask); return; } /* Update accept mask part M2 and go deeper */ n->accept = ipa_or(n->accept, ipa_and(amask, n->mask)); /* n->plen < plen and plen <= 32 (128) */ o = n; n = n->c[ipa_getbit(paddr, n->plen) ? 1 : 0]; } /* We add new tail node 'a' after node 'o' */ struct f_trie_node *a = new_node(t, plen, paddr, pmask, amask); attach_node(o, a); }
int optPFRPAsync::task_sched(int k) { task *t = ts->tq[k]; int tc, td, tp, toffset; tc=t->c, td=t->d, tp=t->p, toffset=t->offset; INT64 start_t = ts->minOffsets[k]; // toffset; // ???? then t migh has not all jobs scheduled // schedule endpoint for the new task INT64 biggestpoint =start_t + ts->LCMs[k]; // step 1, propagate the used nodes if(propagate_nodes(k, biggestpoint) == TS_STATE_INSUFMEM) return TS_STATE_INSUFMEM; // insufficient mem // check linked list if(check_linked_list(list_head) < 0) { g_stat->write_critical_info(list_head->info); return TS_STATE_ERROR; } bool merge = pconfig->merge; bool occupyshort = pconfig->occupyshort; int ret = TS_STATE_SCHEDED; INT64 wcrt = tc; ts->wcrt_job[k] = 0; // step 2, simulate tk in [toffset, toffset+lcm[k]] INT64 count = ts->LCMs[k]/tp; INT64 num_scheded = 0; INT64 js; // absolute release time of each job js = toffset; // then +tp INT64 s, e; // candidate interval char action; int retval; node *p, *pprev; pprev = NULL; p = list_head->first; s = js; if(p) { e = min(s+tc, p->start_of_interval); if(s< e) // 1st job of t is scheduled at the head of the list { if(!occupyshort && e-s < tc) // igore short intervals goto _jump_one; if(e-s < tc) action = TS_ACTION_ABORT ; else action = TS_ACTION_DONE; if(attach_node(list_head, NULL, s, e, merge, action, k) < 0) // add to head { cout << "ts too many nodes." << list_head->number_of_nodes << ":" << g_mymem->num_remains << endl; return TS_STATE_INSUFMEM; } p = list_head->first; wcrt = max(wcrt, e-js); // first one, must be <=td if(action == TS_ACTION_DONE) { ts->wcrt_job[k] = num_scheded; num_scheded++; js += tp; } } s = max(js, p->end_of_interval); } _jump_one: while(num_scheded < count) { while(true) // roll forward to js { if(!p) // p is the last used_node, simply sched the rest jobs { while(num_scheded < count) { s = (pprev==NULL)?js:max(pprev->end_of_interval, js); e = s+tc; retval = attach_node(list_head, list_head->last, s, e, merge, TS_ACTION_DONE, k); if(retval < 0) { cout << "ts too many nodes." << list_head->number_of_nodes << ":" << g_mymem->num_remains << endl; return TS_STATE_INSUFMEM; } if(pprev) { // otherwise, response time is always tc if(e-js > wcrt) { wcrt = e-js; ts->wcrt_job[k] = num_scheded; } if(wcrt > td) goto _end; pprev = NULL; } num_scheded++; js += tp; if(js >= biggestpoint) break; } goto _end; } // update wcrt e = min(s+tc, p->start_of_interval); if(e<=s || e<=js) { pprev = p; s = max(pprev->end_of_interval, js); p = p->next; continue; } if(e-js > wcrt) { wcrt = e-js; ts->wcrt_job[k] = num_scheded; } if(wcrt > td) goto _end; if(p->start_of_interval <= js) // keep rolling { pprev = p; s = max(pprev->end_of_interval, js); p = p->next; continue; } // find a inteval, however might not long enough if(!occupyshort && e-s < tc) // igore short intervals, keep looking { pprev = p; s = max(pprev->end_of_interval, js); p = p->next; continue; } if(e-s < tc) action = TS_ACTION_ABORT; else action = TS_ACTION_DONE; retval = attach_node(list_head, pprev, s, e, merge, action, k); if(retval < 0) { cout << "ts 1 too many nodes." << list_head->number_of_nodes << ":" << g_mymem->num_remains << endl; return TS_STATE_INSUFMEM; } // else if(retval == 2) // p is merged, don't change pprev // else if(retval == 4) // new node is attached to pprev, don't change pprev else if(retval == 3) // new node is inserted to p pprev = p; else if(retval == 1) // no merge pprev = pprev->next; s = max(pprev->end_of_interval, js); p = pprev->next; if(action == TS_ACTION_DONE) { num_scheded++; js += tp; s = max(pprev->end_of_interval, js); if(js >= biggestpoint) goto _end; break; // done a job, try next one } } // do a job } // all jobs _end: ts->realWcrt[k] = wcrt; if(num_scheded < count) { cout << count-num_scheded << " job(s) left when scheduling exits!!!!!!!!!!!!!!!!!!!!!!!\n"; ts->wcrt_failed_job = num_scheded; ret = TS_STATE_UNSCHED; } // shrink the last one if it goes out of biggestpoint p = list_head->last; if(p->end_of_interval>biggestpoint) { if(p->start_of_interval<biggestpoint) p->end_of_interval = biggestpoint; else { string str = t->name+": last node is abnormal after sched, [start_of_interval:end_of_interval]= "+to_string(p->start_of_interval)+" : "+to_string(p->end_of_interval); cout << str << endl; g_stat->write_critical_info(str); } } // check linked list if(check_linked_list(list_head) < 0) { g_stat->write_critical_info(list_head->info); return TS_STATE_ERROR; } return ret; }
int optPFRPAsync::sched(taskset *_ts) { ts = _ts; int num_tasks = ts->num_task; int i; int endstate; g_stat->start_stat(); // start clocking // a neccessary condition check: no execution time can greater than any deadline(only in sync release?) if(pconfig->useoffset == true) { bool found = false; task *t1, *t2, *t=NULL; int j, r=0; for(i=ts->num_task-1; i>0; i--) { t1 = ts->tq[i]; for(j=0; j<i; j++) { t2 = ts->tq[j]; if(t1->c >= t2->d) { t = t1; r = i; } else if(t2->c >= t1->d) { t = t2; r = j; } else continue; found = true; goto _pre_check_done; } } _pre_check_done: if(found) { ts->state = TS_STATE_UNSCHED; ts->failedTask = t->name; ts->failed_task = r; g_stat->end_stat(ts); // stop clocking return ts->state; } } // a simple memory check if(pconfig->merge == false) { INT64 cc = 0; for(i=0; i<num_tasks; i++) cc += ts->LCMs[num_tasks-2]/ts->tq[i]->p; if(g_mymem->num_remains < cc) { cout << "Too many nodes <needed:remains>: <" << cc << ":" << g_mymem->num_remains << ">" << endl; ts->state = TS_STATE_INSUFMEM; g_stat->end_stat(ts); // stop clocking return ts->state; } } // sched task 0 task *t ; t = ts->tq[0]; int tc, toffset; tc=t->c, toffset=t->offset; attach_node(list_head, NULL, toffset, toffset+tc, pconfig->merge, TS_ACTION_DONE, 0);// no way to fail... ts->num_nodes_used[0]++; ts->estimateWcrt[0] = tc; ts->realWcrt[0] = tc; ts->wcrt_job[0] = 0; int k; // task k endstate = TS_STATE_UNSCHED; for(k=1; k<num_tasks-1; k++) // task 0 is scheduled already { // no sufficient test if(mode == 1) { endstate = task_sched(k); // exact test ts->num_nodes_used[k] = list_head->number_of_nodes; } else { endstate = process_permissibility_intervals(k); if(endstate == TS_STATE_UNKNOWN) { endstate = task_sched(k); // exact test ts->num_nodes_used[k] = list_head->number_of_nodes; } } if(endstate != TS_STATE_SCHEDED)// cease scheduling break; } // last task if(k == num_tasks-1) { // no sufficient test if(mode == 1) { //endstate = task_sched(k); // exact test endstate = task_simulate(k); // exact test ts->num_nodes_used[k] = list_head->number_of_nodes; } else { endstate = process_permissibility_intervals(k); if(endstate == TS_STATE_UNKNOWN) { //endstate = task_sched(k); // exact test endstate = task_simulate(k); // exact test ts->num_nodes_used[k] = list_head->number_of_nodes; } } } if(endstate != TS_STATE_SCHEDED) { ts->failed_task = k; ts->failedTask = ts->tq[k]->name; } ts->state = endstate; g_stat->end_stat(ts); // stop clocking return endstate; }
// propagate nodes from start (minoffset[k-1] or maxoffset[k-1]) to start+lcm[k-1] int optPFRPAsync::propagate_nodes(int k, INT64 biggestpoint) { // existing schedule INT64 start_t = ts->minOffsets[k-1]; INT64 end_t = start_t + ts->LCMs[k-1]; INT64 gapO = ts->LCMs[k-1]; INT64 gap = gapO; int state; // find the first and last nodes to be copied, they might be an partial, thus have a start offset and end offset. node *first, *last, *p, *pnext; INT64 soff; // until long enough INT64 s, e; bool bmerge = pconfig->merge; // schedule endpoint for the new task // INT64 biggestpoint = ts->minOffsets[k] + ts->LCMs[k]; if(end_t >= biggestpoint) // no copy needed { state = TS_STATE_SCHEDED; goto _exit; } // find the first and last nodes to be copied, they might be an partial, thus have a start offset and end offset. // node *first, *last, *p, *pnext; // INT64 soff; first = NULL; last = list_head->last; p = list_head->first; // p != null guaranteed while(p) { soff = p->start_of_interval; if(soff >= start_t) break; // else if(p->start_of_interval < start_t) if(p->end_of_interval >= start_t ) { soff = start_t; break; } p = p->next; } first = p; if(!first) { state = TS_STATE_SCHEDED; goto _exit; } // until long enough // INT64 s, e; // bool bmerge = pconfig->merge; while (list_head->last->end_of_interval < biggestpoint) // p is the cursor runs between *first and *last { if(p == first) // first one, might be partial s = soff + gap; else s = p->start_of_interval + gap; if(s >= biggestpoint) break; // NOTE the last node could be merge with the next first node if(p == last) { e = min(p->end_of_interval, end_t) + gap;// last one might be partial pnext = first; gap += gapO; } else { e = p->end_of_interval + gap; pnext = p->next; } if(attach_node(list_head, list_head->last, s, e, bmerge, p->action, p->nTask) < 0) // add to tail { cout << "ts 1 too many nodes." << list_head->number_of_nodes << ":" << g_mymem->num_remains << endl; return TS_STATE_INSUFMEM; } p = pnext; } // shrink the last one if it goes out of new_end_t if (list_head->last->end_of_interval > biggestpoint) list_head->last->end_of_interval = biggestpoint; state = TS_STATE_SCHEDED; _exit: ts->num_nodes_used_after_propagation[k-1] = list_head->number_of_nodes; return state; }