list_t *list_dup(list_t *orig) { list_iter *iter = list_get_iterator(orig, LIST_START_HEAD); if (iter == NULL) { return NULL; } list_t *copy = list_create(&orig->type); if (copy == NULL) { list_release_iterator(iter); return NULL; } list_node *node; while ((node = list_next(iter)) != NULL) { void *value = node->value; if (copy->type.dup) { value = copy->type.dup(value); if (value == NULL) { list_release_iterator(iter); list_release(copy); return NULL; } } if (list_add_node_tail(copy, value) == NULL) { list_release_iterator(iter); list_release(copy); return NULL; } } list_release_iterator(iter); return copy; }
int main(int argc, char *argv[]) { list_type type; type.dup = node_dup; type.free = node_free; type.compare = node_compare; list_t *list = list_create(&type); for (int i = 0; i < 10; ++i) { sds value = sdsempty(); value = sdscatprintf(value, "%d", i); list_add_node_tail(list, value); printf("add %s\n", value); sdsfree(value); } for (int i = 0; i < 10; ++i) { sds value = sdsempty(); value = sdscatprintf(value, "%d", 10 + i); list_add_node_head(list, value); printf("add %s\n", value); sdsfree(value); } list_node *node = list_index(list, 10); sds value = sdsempty(); value = sdscatprintf(value, "%d", 100); list_insert_node(list, node, value, 1); printf("insert %s\n", value); node = list_find(list, value); printf("search: %s\n", (char *)node->value); sdsfree(value); for (int i = -10; i < 10; ++i) { node = list_index(list, i); if (node) { printf("%d: %s\n", i, (char *)node->value); } } list_t *copy = list_dup(list); list_release(list); printf("len: %ld\n", list_len(copy)); list_rotate(copy); list_rotate(copy); list_rotate(copy); list_iter *iter = list_get_iterator(copy, LIST_START_HEAD); while ((node = list_next(iter)) != NULL) { printf("%s\n", (char *)node->value); list_del(copy, node); } list_release_iterator(iter); list_release(copy); printf("len: %ld\n", list_len(copy)); printf("head: %p, tail: %p\n", copy->head, copy->tail); return 0; }
list_t * process_job_results(Eina_Hash *map_histo, list_t *files, float threshold) { list_t * similar_files = NULL; list_t * lst_files = NULL; list_t * list_histo = NULL; list_t * current = NULL; while(files) { histogram_t *cached_elem = eina_hash_find(map_histo, files->value); list_histo = list_append(list_histo, cached_elem); files = files->next; } printf("Looking for similarities in %i elements with threshold %.2f\n" , eina_hash_population(map_histo), threshold); current = list_histo; while(current) { lst_files = search_similar(current->value , current->next, threshold); current = current->next; similar_files = list_append(similar_files, lst_files); } list_release(list_histo); return similar_files; }
/* * list_release() * Libera toda uma lista ligada */ void list_release(link list) { if(list) { list_release(list->next); free(list); } }
/* * graph_release() * Libera o dado grafo */ void graph_release(Graph g) { vertex v; for(v=0; v<g->v; v++) list_release(g->adj[v]); free(g->adj); free(g); }
// destroys the data structures of the message and releases the memory held // by them.. we could still have a memory leak!! int destroySimpleQueue(simplequeue_t *msgqueue) { verbose(2,"[destroySimpleQueue]:: Deleting the queue %s .. ", msgqueue->name); if (msgqueue != NULL) { if (msgqueue->queue != NULL) list_release(msgqueue->queue); free(msgqueue); } verbose(4, "[destroySimpleQueue]:: released all the simple queue data structures.. "); // TODO: check the errno to see whether any problems.. return EXIT_SUCCESS; }
int main() { struct list* my_list = list_new(); int i; for (i=0; i < NB_ELEMENTS; ++i) { list_append (my_list, i); } list_release(my_list); #ifdef __VERIFY assert(counter == 0); #endif }
void *roundRobinScheduler(void *pc) { pktcore_t *pcore = (pktcore_t *)pc; List *keylst; int nextqid, qcount, rstatus, pktsize; char *nextqkey; gpacket_t *in_pkt; simplequeue_t *nextq; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); while (1) { verbose(2, "[roundRobinScheduler]:: Round robin scheduler processing... "); keylst = map_keys(pcore->queues); nextqid = pcore->lastqid; qcount = list_length(keylst); pthread_mutex_lock(&(pcore->qlock)); if (pcore->packetcnt == 0) pthread_cond_wait(&(pcore->schwaiting), &(pcore->qlock)); pthread_mutex_unlock(&(pcore->qlock)); pthread_testcancel(); do { nextqid = (1 + nextqid) % qcount; nextqkey = list_item(keylst, nextqid); // get the queue.. nextq = map_get(pcore->queues, nextqkey); // read the queue.. rstatus = readQueue(nextq, (void **)&in_pkt, &pktsize); if (rstatus == EXIT_SUCCESS) { pcore->lastqid = nextqid; writeQueue(pcore->workQ, in_pkt, pktsize); } } while (nextqid != pcore->lastqid && rstatus == EXIT_FAILURE); list_release(keylst); pthread_mutex_lock(&(pcore->qlock)); if (rstatus == EXIT_SUCCESS) pcore->packetcnt--; pthread_mutex_unlock(&(pcore->qlock)); usleep(rconfig.schedcycle); } }
list_t * process_files(clinfo_t *clinfo, list_t * files, float threshold) { histogram_cache_descriptor_init(); list_t * similar_files = NULL; list_t * job_waits = NULL; Eina_Hash *map_histo; map_histo = read_histogram_file(CACHE_FILE); clean_inexistant_files(map_histo); job_waits = push_jobs(files, clinfo, map_histo); wait_for_jobs(job_waits, map_histo); list_release(job_waits); write_histogram_to_file(CACHE_FILE, map_histo); similar_files = process_job_results(map_histo, files, threshold); eina_hash_free(map_histo); clinfo_free(clinfo); histogram_cache_descriptor_shutdown(); return similar_files; }
// WCWeightFairQueuer: function called by the classifier to enqueue // the packets.. // TODO: Debug this function... void *weightedFairScheduler(void *pc) { pktcore_t *pcore = (pktcore_t *)pc; List *keylst; int nextqid, qcount, rstatus, pktsize; char *nextqkey; gpacket_t *in_pkt; simplequeue_t *nextq; long queueSizes[100]; //Array to store bytes sent for each queue id int PacketsSent[10]; int i, j;//To iterate through the queue length array long crtStarvedQid; double crtStarvedQidWeight; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); while (1) { verbose(1, "[weightedFairScheduler]:: Weighted Fair Queue schedule processing... "); keylst = map_keys(pcore->queues); qcount = list_length(keylst); pthread_mutex_lock(&(pcore->qlock)); if (pcore->packetcnt == 0) pthread_cond_wait(&(pcore->schwaiting), &(pcore->qlock)); pthread_mutex_unlock(&(pcore->qlock)); pthread_testcancel(); for(i = 0; i <qcount; i++) { nextqkey = list_item(keylst, i); // get the queue.. nextq = map_get(pcore->queues, nextqkey); if(GetQueueSize(nextq) > 0) { crtStarvedQidWeight = (queueSizes[i] / (nextq->weight) ); //TODO crtStarvedQid = i; break; } } if(i == qcount) { list_release(keylst); usleep(rconfig.schedcycle); continue; } for(j = i; j < qcount; j++) { nextqkey = list_item(keylst, j); // get the queue.. nextq = map_get(pcore->queues, nextqkey); if(( (queueSizes[j] / (nextq->weight)) < crtStarvedQidWeight) && (GetQueueSize(nextq) > 0)) //TODO { crtStarvedQid = j; crtStarvedQidWeight = queueSizes[j] / (nextq->weight) ; //TODO } } nextqid = crtStarvedQid; nextqkey = list_item(keylst, nextqid); // get the queue.. nextq = map_get(pcore->queues, nextqkey); // read the queue.. rstatus = readQueue(nextq, (void **)&in_pkt, &pktsize);//Here we get the packet size. if (rstatus == EXIT_SUCCESS) { writeQueue(pcore->workQ, in_pkt, pktsize); verbose(1, "[weightedFairScheduler---Just sent]:: Queue[%d] has now sent %lu bytes", nextqid, queueSizes[nextqid]); queueSizes[nextqid] = queueSizes[nextqid] + findPacketSize(&(in_pkt->data));//Storing updated data sent in array PacketsSent[nextqid]++; } for(i = 0; i <qcount; i++) { nextqkey = list_item(keylst, i); // get the queue.. nextq = map_get(pcore->queues, nextqkey); verbose(1, "Packets Queued[%d] = %d, Bytes sent = %d, Packets Sent = %d", i, GetQueueSize(nextq), queueSizes[i], PacketsSent[i]); } list_release(keylst); pthread_mutex_lock(&(pcore->qlock)); if (rstatus == EXIT_SUCCESS) { (pcore->packetcnt)--; } pthread_mutex_unlock(&(pcore->qlock)); usleep(rconfig.schedcycle); } }
void *weightedFairScheduler(void *pc) { pktcore_t *pcore = (pktcore_t *)pc; List *keylst; simplequeue_t *nxtq, *thisq; char *nxtkey, *savekey; double minftime, minstime, tweight; int pktsize, npktsize; gpacket_t *in_pkt, *nxt_pkt; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); // die as soon as cancelled while (1) { verbose(2, "[weightedFairScheduler]:: Worst-case weighted fair queuing scheduler processing.."); pthread_mutex_lock(&(pcore->qlock)); if (pcore->packetcnt == 0) pthread_cond_wait(&(pcore->schwaiting), &(pcore->qlock)); pthread_mutex_unlock(&(pcore->qlock)); pthread_testcancel(); keylst = map_keys(pcore->queues); while (list_has_next(keylst) == 1) { nxtkey = list_next(keylst); nxtq = map_get(pcore->queues, nxtkey); if (nxtq->cursize == 0) continue; if ((nxtq->stime <= pcore->vclock) && (nxtq->ftime < minftime)) { savekey = nxtkey; minftime = nxtq->ftime; } } list_release(keylst); // if savekey is NULL then release the lock.. if (savekey == NULL) continue; else { thisq = map_get(pcore->queues, savekey); readQueue(thisq, (void **)&in_pkt, &pktsize); writeQueue(pcore->workQ, in_pkt, pktsize); pthread_mutex_lock(&(pcore->qlock)); pcore->packetcnt--; pthread_mutex_unlock(&(pcore->qlock)); peekQueue(thisq, (void **)&nxt_pkt, &npktsize); if (npktsize) { thisq->stime = thisq->ftime; thisq->ftime = thisq->stime + npktsize/thisq->weight; } minstime = thisq->stime; tweight = 0.0; keylst = map_keys(pcore->queues); while (list_has_next(keylst) == 1) { nxtkey = list_next(keylst); nxtq = map_get(pcore->queues, nxtkey); tweight += nxtq->weight; if ((nxtq->cursize > 0) && (nxtq->stime < minstime)) minstime = nxtq->stime; } list_release(keylst); pcore->vclock = max(minstime, (pcore->vclock + ((double)pktsize)/tweight)); } } }
// WCWeightFairQueuer: function called by the classifier to enqueue // the packets.. // TODO: Debug this function... int weightedFairQueuer(pktcore_t *pcore, gpacket_t *in_pkt, int pktsize, char *qkey) { simplequeue_t *thisq, *nxtq; double minftime, minstime, tweight; List *keylst; char *nxtkey, *savekey; verbose(2, "[weightedFairQueuer]:: Worst-case weighted fair queuing scheduler processing.."); pthread_mutex_lock(&(pcore->qlock)); thisq = map_get(pcore->queues, qkey); if (thisq == NULL) { fatal("[weightedFairQueuer]:: Invalid %s key presented for queue addition", qkey); pthread_mutex_unlock(&(pcore->qlock)); return EXIT_FAILURE; // packet dropped.. } printf("Checking the queue size \n"); if (thisq->cursize == 0) { verbose(2, "[weightedFairQueuer]:: inserting the first element.. "); thisq->stime = max(pcore->vclock, thisq->ftime); thisq->ftime = thisq->stime + pktsize/thisq->weight; minstime = thisq->stime; keylst = map_keys(pcore->queues); while (list_has_next(keylst) == 1) { nxtkey = list_next(keylst); nxtq = map_get(pcore->queues, nxtkey); if ((nxtq->cursize > 0) && (nxtq->stime < minstime)) minstime = nxtq->stime; } list_release(keylst); pcore->vclock = max(minstime, pcore->vclock); // insert the packet... and increment variables.. writeQueue(thisq, in_pkt, pktsize); pcore->packetcnt++; // wake up scheduler if it was waiting.. if (pcore->packetcnt == 1) pthread_cond_signal(&(pcore->schwaiting)); pthread_mutex_unlock(&(pcore->qlock)); return EXIT_SUCCESS; } else if (thisq->cursize < thisq->maxsize) { // insert packet and setup variables.. writeQueue(thisq, in_pkt, pktsize); pcore->packetcnt++; pthread_mutex_unlock(&(pcore->qlock)); return EXIT_SUCCESS; } else { verbose(2, "[weightedFairQueuer]:: Packet dropped.. Queue for %s is full ", qkey); pthread_mutex_unlock(&(pcore->qlock)); return EXIT_SUCCESS; } }
void *weightedFairScheduler(void *pc) { pktcore_t *pcore = (pktcore_t *)pc; List *keylst; simplequeue_t *nxtq, *thisq; char *nxtkey, *savekey; double minftime, minstime, tweight; int pktsize, npktsize; gpacket_t *in_pkt, *nxt_pkt; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); // die as soon as cancelled //MOD pcore->vclock = 0.0; //pktsize - used to get the packet size of packet being enqueued now //npktsize - used to determine if there are more packets in current queue // while (1) { verbose(2, "[weightedFairScheduler]:: Worst-case weighted fair queuing scheduler processing.."); pthread_mutex_lock(&(pcore->qlock)); if (pcore->packetcnt == 0) pthread_cond_wait(&(pcore->schwaiting), &(pcore->qlock)); pthread_mutex_unlock(&(pcore->qlock)); pthread_testcancel(); keylst = map_keys(pcore->queues); minftime = MAX_DOUBLE;//MOD savekey = NULL;//MOD while (list_has_next(keylst) == 1) { nxtkey = list_next(keylst); nxtq = map_get(pcore->queues, nxtkey); if (nxtq->cursize == 0) continue; //determines the minftime if ((nxtq->stime <= pcore->vclock) && (nxtq->ftime < minftime)) { savekey = nxtkey; minftime = nxtq->ftime; } } list_release(keylst); // if savekey is NULL then release the lock.. if (savekey == NULL) continue; else { thisq = map_get(pcore->queues, savekey); readQueue(thisq, (void **)&in_pkt, &pktsize); writeQueue(pcore->workQ, in_pkt, pktsize); pthread_mutex_lock(&(pcore->qlock)); pcore->packetcnt--; pthread_mutex_unlock(&(pcore->qlock)); peekQueue(thisq, (void **)&nxt_pkt, &npktsize); //Doing this because we don't change the stime and //ftime unless a new packet comes into an empty queue if (npktsize) { thisq->stime = thisq->ftime; thisq->ftime = thisq->stime + npktsize/thisq->weight; } minstime = thisq->stime; tweight = 0.0; keylst = map_keys(pcore->queues); //determine minstime while (list_has_next(keylst) == 1) { nxtkey = list_next(keylst); nxtq = map_get(pcore->queues, nxtkey); tweight += nxtq->weight; if ((nxtq->cursize > 0) && (nxtq->stime < minstime)) minstime = nxtq->stime; } list_release(keylst); pcore->vclock = max(minstime, (pcore->vclock + ((double)pktsize)/tweight)); } } }