void worker_loop(int id) { dmq_worker_t* worker = &workers[id]; dmq_job_t* current_job; peer_reponse_t peer_response; int ret_value; for(;;) { LM_DBG("dmq_worker [%d %d] getting lock\n", id, my_pid()); lock_get(&worker->lock); LM_DBG("dmq_worker [%d %d] lock acquired\n", id, my_pid()); /* multiple lock_release calls might be performed, so remove from queue until empty */ do { /* fill the response with 0's */ memset(&peer_response, 0, sizeof(peer_response)); current_job = job_queue_pop(worker->queue); /* job_queue_pop might return NULL if queue is empty */ if(current_job) { ret_value = current_job->f(current_job->msg, &peer_response); if(ret_value < 0) { LM_ERR("running job failed\n"); continue; } /* add the body to the reply */ if(peer_response.body.s) { if(set_reply_body(current_job->msg, &peer_response.body, &peer_response.content_type) < 0) { LM_ERR("error adding lumps\n"); continue; } } /* send the reply */ if(slb.freply(current_job->msg, peer_response.resp_code, &peer_response.reason) < 0) { LM_ERR("error sending reply\n"); } /* if body given, free the lumps and free the body */ if(peer_response.body.s) { del_nonshm_lump_rpl(¤t_job->msg->reply_lump); pkg_free(peer_response.body.s); } LM_DBG("sent reply\n"); shm_free(current_job->msg); shm_free(current_job); worker->jobs_processed++; } } while(job_queue_size(worker->queue) > 0); } }
void *job_queue_pop_thread(void *argv){ job_queue_pop(); return NULL; }//void