void test_map_keys() { clear(report); assert_equals(map_keys(report, "Aasdf", 1), 1); assert_equals(report[0], KEYBOARD_MODIFIER_LEFTSHIFT); assert_equals(report[2], 0x04); clear(report); assert_equals(map_keys(report, "zasdf", 1), 1); assert_equals(report[0], 0); assert_equals(report[2], 0x1D); clear(report); assert_equals(map_keys(report, "^asdf", 1), 1); assert_equals(report[0], KEYBOARD_MODIFIER_LEFTSHIFT); assert_equals(report[2], 0x23); clear(report); assert_equals(map_keys(report, "%{bad+B+2}", 1), 1); assert_equals(report[0], KEYBOARD_MODIFIER_LEFTSHIFT); assert_equals(report[2], 0x22); clear(report); assert_equals(map_keys(report, "%{LeftGUI+ }rocketnumbernine.com", 1), 12); assert_equals(report[0], KEYBOARD_MODIFIER_LEFTGUI); assert_equals(report[2], 0x2C); }
void op_for(void) { Var idx, list; idx = pop(); list = pop(); if( list.type == MAP) { list.v.list = map_keys( list.v.map ); list.type = LIST; } if( list.type == STR) { list.v.list = string_list( list.v.str ); list.type = LIST; } if (list.type != LIST) { var_free(list); raise(E_FOR); } else if (idx.v.num >= list.v.list->len) { /* loop is complete */ var_free(list); frame.pc = frame.m->code[frame.pc + 1]; /* skip to end */ } else { var_assign_local(frame.stack, frame.m->code[frame.pc], var_dup(list.v.list->el[idx.v.num])); idx.v.num++; push(list); /* push list */ push(idx); /* push new index */ pushpc(frame.pc - 1); /* push address of FOR statement */ frame.pc += 2; /* go to first instruction in loop */ } }
void textures_release(void* textures) { char** keys = map_keys(textures); for (int i = 0; i < array_count(keys); i++) { char* key = keys[i]; texture_t* texture = map_get(textures, key); free(texture); } array_release(keys); }
// a - b struct map *map_minus(struct map *a, const struct map *b) { if ((a == NULL) || (b == NULL)) return a; struct array *keys = map_keys(b); for (int i=0; i<keys->length; i++) { const void *key = array_get(keys, i); if (map_has(a, key)) { map_remove(a, key); } } array_del(keys); return a; }
void *roundRobinScheduler(void *pc) { pktcore_t *pcore = (pktcore_t *)pc; List *keylst; int nextqid, qcount, rstatus, pktsize; char *nextqkey; gpacket_t *in_pkt; simplequeue_t *nextq; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); while (1) { verbose(2, "[roundRobinScheduler]:: Round robin scheduler processing... "); keylst = map_keys(pcore->queues); nextqid = pcore->lastqid; qcount = list_length(keylst); pthread_mutex_lock(&(pcore->qlock)); if (pcore->packetcnt == 0) pthread_cond_wait(&(pcore->schwaiting), &(pcore->qlock)); pthread_mutex_unlock(&(pcore->qlock)); pthread_testcancel(); do { nextqid = (1 + nextqid) % qcount; nextqkey = list_item(keylst, nextqid); // get the queue.. nextq = map_get(pcore->queues, nextqkey); // read the queue.. rstatus = readQueue(nextq, (void **)&in_pkt, &pktsize); if (rstatus == EXIT_SUCCESS) { pcore->lastqid = nextqid; writeQueue(pcore->workQ, in_pkt, pktsize); } } while (nextqid != pcore->lastqid && rstatus == EXIT_FAILURE); list_release(keylst); pthread_mutex_lock(&(pcore->qlock)); if (rstatus == EXIT_SUCCESS) pcore->packetcnt--; pthread_mutex_unlock(&(pcore->qlock)); usleep(rconfig.schedcycle); } }
// a + b; in case of intersection, a wins struct map *map_union(struct map *a, const struct map *b) { if (b == NULL) return a; if (a == NULL) return map_copy(b->context, b); struct array *keys = map_keys(b); for (int i=0; i<keys->length; i++) { const void *key = array_get(keys, i); if (!map_has(a, key)) { void *key2 = b->copyor(key, a->context); void *value = map_get(b, key); void *value2 = b->copyor(value, a->context); map_insert(a, key2, value2); } } array_del(keys); return a; }
static bool variable_compare_maps(struct context *context, const struct map *umap, const struct map *vmap) { if (!umap && !vmap) return true; if (!umap) return variable_compare_maps(context, vmap, umap); struct array *keys = map_keys(umap); if (!vmap) return !keys->length; for (int i=0; i<keys->length; i++) { struct byte_array *key = (struct byte_array*)array_get(keys, i); struct variable *uvalue = (struct variable*)map_get(umap, key); struct variable *vvalue = (struct variable*)map_get(vmap, key); if (!variable_compare(context, uvalue, vvalue)) return false; } return true; }
// WCWeightFairQueuer: function called by the classifier to enqueue // the packets.. // TODO: Debug this function... void *weightedFairScheduler(void *pc) { pktcore_t *pcore = (pktcore_t *)pc; List *keylst; int nextqid, qcount, rstatus, pktsize; char *nextqkey; gpacket_t *in_pkt; simplequeue_t *nextq; long queueSizes[100]; //Array to store bytes sent for each queue id int PacketsSent[10]; int i, j;//To iterate through the queue length array long crtStarvedQid; double crtStarvedQidWeight; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); while (1) { verbose(1, "[weightedFairScheduler]:: Weighted Fair Queue schedule processing... "); keylst = map_keys(pcore->queues); qcount = list_length(keylst); pthread_mutex_lock(&(pcore->qlock)); if (pcore->packetcnt == 0) pthread_cond_wait(&(pcore->schwaiting), &(pcore->qlock)); pthread_mutex_unlock(&(pcore->qlock)); pthread_testcancel(); for(i = 0; i <qcount; i++) { nextqkey = list_item(keylst, i); // get the queue.. nextq = map_get(pcore->queues, nextqkey); if(GetQueueSize(nextq) > 0) { crtStarvedQidWeight = (queueSizes[i] / (nextq->weight) ); //TODO crtStarvedQid = i; break; } } if(i == qcount) { list_release(keylst); usleep(rconfig.schedcycle); continue; } for(j = i; j < qcount; j++) { nextqkey = list_item(keylst, j); // get the queue.. nextq = map_get(pcore->queues, nextqkey); if(( (queueSizes[j] / (nextq->weight)) < crtStarvedQidWeight) && (GetQueueSize(nextq) > 0)) //TODO { crtStarvedQid = j; crtStarvedQidWeight = queueSizes[j] / (nextq->weight) ; //TODO } } nextqid = crtStarvedQid; nextqkey = list_item(keylst, nextqid); // get the queue.. nextq = map_get(pcore->queues, nextqkey); // read the queue.. rstatus = readQueue(nextq, (void **)&in_pkt, &pktsize);//Here we get the packet size. if (rstatus == EXIT_SUCCESS) { writeQueue(pcore->workQ, in_pkt, pktsize); verbose(1, "[weightedFairScheduler---Just sent]:: Queue[%d] has now sent %lu bytes", nextqid, queueSizes[nextqid]); queueSizes[nextqid] = queueSizes[nextqid] + findPacketSize(&(in_pkt->data));//Storing updated data sent in array PacketsSent[nextqid]++; } for(i = 0; i <qcount; i++) { nextqkey = list_item(keylst, i); // get the queue.. nextq = map_get(pcore->queues, nextqkey); verbose(1, "Packets Queued[%d] = %d, Bytes sent = %d, Packets Sent = %d", i, GetQueueSize(nextq), queueSizes[i], PacketsSent[i]); } list_release(keylst); pthread_mutex_lock(&(pcore->qlock)); if (rstatus == EXIT_SUCCESS) { (pcore->packetcnt)--; } pthread_mutex_unlock(&(pcore->qlock)); usleep(rconfig.schedcycle); } }
void *weightedFairScheduler(void *pc) { pktcore_t *pcore = (pktcore_t *)pc; List *keylst; simplequeue_t *nxtq, *thisq; char *nxtkey, *savekey; double minftime, minstime, tweight; int pktsize, npktsize; gpacket_t *in_pkt, *nxt_pkt; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); // die as soon as cancelled while (1) { verbose(2, "[weightedFairScheduler]:: Worst-case weighted fair queuing scheduler processing.."); pthread_mutex_lock(&(pcore->qlock)); if (pcore->packetcnt == 0) pthread_cond_wait(&(pcore->schwaiting), &(pcore->qlock)); pthread_mutex_unlock(&(pcore->qlock)); pthread_testcancel(); keylst = map_keys(pcore->queues); while (list_has_next(keylst) == 1) { nxtkey = list_next(keylst); nxtq = map_get(pcore->queues, nxtkey); if (nxtq->cursize == 0) continue; if ((nxtq->stime <= pcore->vclock) && (nxtq->ftime < minftime)) { savekey = nxtkey; minftime = nxtq->ftime; } } list_release(keylst); // if savekey is NULL then release the lock.. if (savekey == NULL) continue; else { thisq = map_get(pcore->queues, savekey); readQueue(thisq, (void **)&in_pkt, &pktsize); writeQueue(pcore->workQ, in_pkt, pktsize); pthread_mutex_lock(&(pcore->qlock)); pcore->packetcnt--; pthread_mutex_unlock(&(pcore->qlock)); peekQueue(thisq, (void **)&nxt_pkt, &npktsize); if (npktsize) { thisq->stime = thisq->ftime; thisq->ftime = thisq->stime + npktsize/thisq->weight; } minstime = thisq->stime; tweight = 0.0; keylst = map_keys(pcore->queues); while (list_has_next(keylst) == 1) { nxtkey = list_next(keylst); nxtq = map_get(pcore->queues, nxtkey); tweight += nxtq->weight; if ((nxtq->cursize > 0) && (nxtq->stime < minstime)) minstime = nxtq->stime; } list_release(keylst); pcore->vclock = max(minstime, (pcore->vclock + ((double)pktsize)/tweight)); } } }
// WCWeightFairQueuer: function called by the classifier to enqueue // the packets.. // TODO: Debug this function... int weightedFairQueuer(pktcore_t *pcore, gpacket_t *in_pkt, int pktsize, char *qkey) { simplequeue_t *thisq, *nxtq; double minftime, minstime, tweight; List *keylst; char *nxtkey, *savekey; verbose(2, "[weightedFairQueuer]:: Worst-case weighted fair queuing scheduler processing.."); pthread_mutex_lock(&(pcore->qlock)); thisq = map_get(pcore->queues, qkey); if (thisq == NULL) { fatal("[weightedFairQueuer]:: Invalid %s key presented for queue addition", qkey); pthread_mutex_unlock(&(pcore->qlock)); return EXIT_FAILURE; // packet dropped.. } printf("Checking the queue size \n"); if (thisq->cursize == 0) { verbose(2, "[weightedFairQueuer]:: inserting the first element.. "); thisq->stime = max(pcore->vclock, thisq->ftime); thisq->ftime = thisq->stime + pktsize/thisq->weight; minstime = thisq->stime; keylst = map_keys(pcore->queues); while (list_has_next(keylst) == 1) { nxtkey = list_next(keylst); nxtq = map_get(pcore->queues, nxtkey); if ((nxtq->cursize > 0) && (nxtq->stime < minstime)) minstime = nxtq->stime; } list_release(keylst); pcore->vclock = max(minstime, pcore->vclock); // insert the packet... and increment variables.. writeQueue(thisq, in_pkt, pktsize); pcore->packetcnt++; // wake up scheduler if it was waiting.. if (pcore->packetcnt == 1) pthread_cond_signal(&(pcore->schwaiting)); pthread_mutex_unlock(&(pcore->qlock)); return EXIT_SUCCESS; } else if (thisq->cursize < thisq->maxsize) { // insert packet and setup variables.. writeQueue(thisq, in_pkt, pktsize); pcore->packetcnt++; pthread_mutex_unlock(&(pcore->qlock)); return EXIT_SUCCESS; } else { verbose(2, "[weightedFairQueuer]:: Packet dropped.. Queue for %s is full ", qkey); pthread_mutex_unlock(&(pcore->qlock)); return EXIT_SUCCESS; } }
void *weightedFairScheduler(void *pc) { pktcore_t *pcore = (pktcore_t *)pc; List *keylst; simplequeue_t *nxtq, *thisq; char *nxtkey, *savekey; double minftime, minstime, tweight; int pktsize, npktsize; gpacket_t *in_pkt, *nxt_pkt; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); // die as soon as cancelled //MOD pcore->vclock = 0.0; //pktsize - used to get the packet size of packet being enqueued now //npktsize - used to determine if there are more packets in current queue // while (1) { verbose(2, "[weightedFairScheduler]:: Worst-case weighted fair queuing scheduler processing.."); pthread_mutex_lock(&(pcore->qlock)); if (pcore->packetcnt == 0) pthread_cond_wait(&(pcore->schwaiting), &(pcore->qlock)); pthread_mutex_unlock(&(pcore->qlock)); pthread_testcancel(); keylst = map_keys(pcore->queues); minftime = MAX_DOUBLE;//MOD savekey = NULL;//MOD while (list_has_next(keylst) == 1) { nxtkey = list_next(keylst); nxtq = map_get(pcore->queues, nxtkey); if (nxtq->cursize == 0) continue; //determines the minftime if ((nxtq->stime <= pcore->vclock) && (nxtq->ftime < minftime)) { savekey = nxtkey; minftime = nxtq->ftime; } } list_release(keylst); // if savekey is NULL then release the lock.. if (savekey == NULL) continue; else { thisq = map_get(pcore->queues, savekey); readQueue(thisq, (void **)&in_pkt, &pktsize); writeQueue(pcore->workQ, in_pkt, pktsize); pthread_mutex_lock(&(pcore->qlock)); pcore->packetcnt--; pthread_mutex_unlock(&(pcore->qlock)); peekQueue(thisq, (void **)&nxt_pkt, &npktsize); //Doing this because we don't change the stime and //ftime unless a new packet comes into an empty queue if (npktsize) { thisq->stime = thisq->ftime; thisq->ftime = thisq->stime + npktsize/thisq->weight; } minstime = thisq->stime; tweight = 0.0; keylst = map_keys(pcore->queues); //determine minstime while (list_has_next(keylst) == 1) { nxtkey = list_next(keylst); nxtq = map_get(pcore->queues, nxtkey); tweight += nxtq->weight; if ((nxtq->cursize > 0) && (nxtq->stime < minstime)) minstime = nxtq->stime; } list_release(keylst); pcore->vclock = max(minstime, (pcore->vclock + ((double)pktsize)/tweight)); } } }