/*! \brief This function is a thread that sits and listens on the packet_queue for incoming data that has come from handle_data() after passing basic validation. This thread will call the dispatcher with the packet, which will fire off all appropriate subscribers to this packet \param data is unused \returns NULL */ void *packet_handler(gpointer data) { GTimeVal tval; FreeEMS_Packet *packet = NULL; GAsyncQueue *queue = (GAsyncQueue *)DATA_GET(global_data,"packet_queue"); GCond *cond = NULL; while(TRUE) { if ((DATA_GET(global_data,"leaving") || (DATA_GET(global_data,"packet_handler_thread_exit")))) { cond = (GCond *)DATA_GET(global_data,"packet_handler_cond"); if (cond) g_cond_signal(cond); g_thread_exit(0); } g_get_current_time(&tval); g_time_val_add(&tval,250000); packet = (FreeEMS_Packet *)g_async_queue_timed_pop(queue,&tval); if (packet) dispatch_packet_queues(packet); } g_thread_exit(0); return NULL; }
static gpointer iris_thread_worker (IrisThread *thread) { IrisMessage *message; GTimeVal timeout = {0,0}; g_return_val_if_fail (thread != NULL, NULL); g_return_val_if_fail (thread->queue != NULL, NULL); #if LINUX my_thread = thread; #else pthread_setspecific (my_thread, thread); #endif iris_debug_init_thread (); iris_debug (IRIS_DEBUG_THREAD); next_message: if (thread->exclusive) { message = g_async_queue_pop (thread->queue); } else { /* If we do not get any schedulers to work for within our * timeout period, we can safely shutdown. */ g_get_current_time (&timeout); g_time_val_add (&timeout, G_USEC_PER_SEC * 5); message = g_async_queue_timed_pop (thread->queue, &timeout); if (!message) { /* Make sure that the manager removes us from the * free thread list. */ iris_scheduler_manager_destroy (thread); /* make sure nothing was added while we * removed ourselves */ message = g_async_queue_try_pop (thread->queue); } } if (!message) return NULL; switch (message->what) { case MSG_MANAGE: iris_thread_handle_manage (thread, iris_message_get_pointer (message, "queue"), iris_message_get_boolean (message, "exclusive"), iris_message_get_boolean (message, "leader")); break; case MSG_SHUTDOWN: iris_thread_handle_shutdown (thread); break; default: g_warn_if_reached (); break; } goto next_message; }
G_MODULE_EXPORT gchar * request_interface_version(gint *len) { OutputData *output = NULL; GAsyncQueue *queue = NULL; FreeEMS_Packet *packet = NULL; gchar *version = NULL; GTimeVal tval; Serial_Params *serial_params = NULL; guint8 *buf = NULL; /* Raw packet */ guint8 pkt[INTVER_REQ_PKT_LEN]; gint res = 0; gint i = 0; guint8 sum = 0; gint tmit_len = 0; serial_params = DATA_GET(global_data,"serial_params"); g_return_val_if_fail(serial_params,NULL); if (DATA_GET(global_data,"offline")) return g_strdup("Offline"); pkt[HEADER_IDX] = 0; pkt[H_PAYLOAD_IDX] = (REQUEST_INTERFACE_VERSION & 0xff00 ) >> 8; pkt[L_PAYLOAD_IDX] = (REQUEST_INTERFACE_VERSION & 0x00ff ); for (i=0;i<INTVER_REQ_PKT_LEN-1;i++) sum += pkt[i]; pkt[INTVER_REQ_PKT_LEN-1] = sum; buf = finalize_packet((guint8 *)&pkt,INTVER_REQ_PKT_LEN,&tmit_len); queue = g_async_queue_new(); register_packet_queue(PAYLOAD_ID,queue,RESPONSE_INTERFACE_VERSION); if (!write_wrapper_f(serial_params->fd,buf, tmit_len, NULL)) { deregister_packet_queue(PAYLOAD_ID,queue,RESPONSE_INTERFACE_VERSION); g_free(buf); g_async_queue_unref(queue); return NULL; } g_free(buf); g_get_current_time(&tval); g_time_val_add(&tval,500000); packet = g_async_queue_timed_pop(queue,&tval); deregister_packet_queue(PAYLOAD_ID,queue,RESPONSE_INTERFACE_VERSION); g_async_queue_unref(queue); /* if (packet) printf("Firmware version PACKET ARRIVED!\n"); else printf("TIMEOUT\n"); */ if (packet) { version = g_strndup((const gchar *)(packet->data+packet->payload_base_offset),packet->payload_length); if (len) *len = packet->payload_length; freeems_packet_cleanup(packet); } return version; }
gpointer g_async_queue_timeout_pop(GAsyncQueue *queue,guint64 timeout) { GTimeVal time; g_get_current_time(&time); g_time_val_add(&time,(glong)timeout); return g_async_queue_timed_pop(queue,&time); }
gpointer Util::g_async_queue_timeout_pop( GAsyncQueue * queue , guint64 timeout ) { #if GLIB_CHECK_VERSION(2,32,0) return ::g_async_queue_timeout_pop( queue , timeout ); #else GTimeVal tv; g_get_current_time( & tv ); g_time_val_add( & tv , timeout ); return g_async_queue_timed_pop( queue , & tv ); #endif }
ArvBuffer * arv_stream_timeout_pop_buffer (ArvStream *stream, guint64 timeout) { #if GLIB_CHECK_VERSION(2,32,0) g_return_val_if_fail (ARV_IS_STREAM (stream), NULL); return g_async_queue_timeout_pop (stream->priv->output_queue, timeout); #else GTimeVal end_time; g_return_val_if_fail (ARV_IS_STREAM (stream), NULL); g_get_current_time (&end_time); g_time_val_add (&end_time, timeout); return g_async_queue_timed_pop (stream->priv->output_queue, &end_time); #endif }
/** * oh_dequeue_session_event * @sid: * @event: * * * * Returns: **/ SaErrorT oh_dequeue_session_event(SaHpiSessionIdT sid, SaHpiTimeoutT timeout, struct oh_event *event) { struct oh_session *session = NULL; struct oh_event *devent = NULL; GTimeVal gfinaltime; GAsyncQueue *eventq = NULL; if (sid < 1 || (event == NULL)) return SA_ERR_HPI_INVALID_PARAMS; g_static_rec_mutex_lock(&oh_sessions.lock); /* Locked session table */ session = g_hash_table_lookup(oh_sessions.table, &sid); if (!session) { g_static_rec_mutex_unlock(&oh_sessions.lock); return SA_ERR_HPI_INVALID_SESSION; } eventq = session->eventq; g_async_queue_ref(eventq); g_static_rec_mutex_unlock(&oh_sessions.lock); if (timeout == SAHPI_TIMEOUT_IMMEDIATE) { devent = g_async_queue_try_pop(eventq); } else if (timeout == SAHPI_TIMEOUT_BLOCK) { devent = g_async_queue_pop(eventq); /* FIXME: Need to time this. */ } else { g_get_current_time(&gfinaltime); g_time_val_add(&gfinaltime, (glong) (timeout / 1000)); devent = g_async_queue_timed_pop(eventq, &gfinaltime); } g_async_queue_unref(eventq); if (devent) { memcpy(event, devent, sizeof(struct oh_event)); g_free(devent); return SA_OK; } else { memset(event, 0, sizeof(struct oh_event)); return SA_ERR_HPI_TIMEOUT; } }
/** * Thread procedure of ZBlobSystem. * * @param[in] self this * * Performs the swapping/storage maintenance tasks described in the spec. * * @returns Currently just self **/ static gpointer z_blob_system_threadproc(ZBlobSystem *self) { ZBlob *blob; GList *cur, *del; gssize blob_alloc_req; GTimeVal next_time, now; glong interval = 300; /* 5 min. */ z_enter(); g_assert(self); g_mutex_lock(self->mtx_blobsys); g_cond_signal(self->cond_thread_started); g_mutex_unlock(self->mtx_blobsys); g_get_current_time(&next_time); next_time.tv_sec += interval; while (1) { blob = g_async_queue_timed_pop(self->req_queue, &next_time); /* blocks until there is a requesting blob in the queue */ if (blob == NULL) { g_get_current_time(&next_time); next_time.tv_sec += interval; z_blob_system_report_usage(self); continue; } g_get_current_time(&now); if (now.tv_sec > next_time.tv_sec) { z_blob_system_report_usage(self); } if (blob == (ZBlob*)Z_BLOB_THREAD_KILL) break; g_mutex_lock(self->mtx_blobsys); if (blob == (ZBlob*)Z_BLOB_MEM_FREED) { /* check the waiting queue - it is enough to check on successful negative alloc requests, * because this is the only case when memory is freed up */ cur = self->waiting_list; while (cur) { blob = (ZBlob*) cur->data; del = NULL; blob->approved = z_blob_check_alloc(blob); if (blob->approved) { del = cur; z_blob_signal_ready(blob); } cur = cur->next; if (del) self->waiting_list = g_list_delete_link(self->waiting_list, del); } /* try to swap in blobs - makes sence only on negative alloc reqs, too */ z_blob_system_swap_in(self); } else { blob_alloc_req = blob->alloc_req; blob->approved = z_blob_check_alloc(blob); if (!blob->approved) /* In case of denial, move the blob to the waiting queue */ { z_log(NULL, CORE_INFO, 4, "Blob storage is full, adding allocate request to the waiting list; size='%" G_GSIZE_FORMAT "'", blob_alloc_req); self->waiting_list = g_list_append(self->waiting_list, blob); } else /* send back the result to the blob */ { z_blob_signal_ready(blob); } } g_mutex_unlock(self->mtx_blobsys); } z_leave(); g_thread_exit(self); z_return(self); }
static gpointer dropbox_command_client_thread(DropboxCommandClient *dcc) { struct sockaddr_un addr; socklen_t addr_len; int connection_attempts = 1; /* intialize address structure */ addr.sun_family = AF_UNIX; g_snprintf(addr.sun_path, sizeof(addr.sun_path), "%s/.dropbox/command_socket", g_get_home_dir()); addr_len = sizeof(addr) - sizeof(addr.sun_path) + strlen(addr.sun_path); while (1) { GIOChannel *chan = NULL; GError *gerr = NULL; int sock; gboolean failflag = TRUE; do { int flags; if (0 > (sock = socket(PF_UNIX, SOCK_STREAM, 0))) { /* WTF */ break; } /* set timeout on socket, to protect against bad servers */ { struct timeval tv = {3, 0}; if (0 > setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(struct timeval)) || 0 > setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(struct timeval))) { /* debug("setsockopt failed"); */ break; } } /* set native non-blocking, for connect timeout */ { if ((flags = fcntl(sock, F_GETFL, 0)) < 0 || fcntl(sock, F_SETFL, flags | O_NONBLOCK) < 0) { /* debug("fcntl failed"); */ break; } } /* if there was an error we have to try again later */ if (connect(sock, (struct sockaddr *) &addr, addr_len) < 0) { if (errno == EINPROGRESS) { fd_set writers; struct timeval tv = {1, 0}; FD_ZERO(&writers); FD_SET(sock, &writers); /* if nothing was ready after 3 seconds, fail out homie */ if (select(sock+1, NULL, &writers, NULL, &tv) == 0) { /* debug("connection timeout"); */ break; } if (connect(sock, (struct sockaddr *) &addr, addr_len) < 0) { /* debug("couldn't connect to command server after 1 second"); */ break; } } /* errno != EINPROGRESS */ else { /* debug("bad connection"); */ break; } } /* set back to blocking */ if (fcntl(sock, F_SETFL, flags) < 0) { /* debug("fcntl2 failed"); */ break; } failflag = FALSE; } while (0); if (failflag) { ConnectionAttempt *ca = g_new(ConnectionAttempt, 1); ca->dcc = dcc; ca->connect_attempt = connection_attempts; g_idle_add((GSourceFunc) on_connection_attempt, ca); if (sock >= 0) { close(sock); } g_usleep(G_USEC_PER_SEC); connection_attempts++; continue; } else { connection_attempts = 0; } /* connected */ debug("command client connected"); chan = g_io_channel_unix_new(sock); g_io_channel_set_close_on_unref(chan, TRUE); g_io_channel_set_line_term(chan, "\n", -1); #define SET_CONNECTED_STATE(s) { \ g_mutex_lock(dcc->command_connected_mutex); \ dcc->command_connected = s; \ g_mutex_unlock(dcc->command_connected_mutex); \ } SET_CONNECTED_STATE(TRUE); g_idle_add((GSourceFunc) on_connect, dcc); while (1) { DropboxCommand *dc; while (1) { GTimeVal gtv; g_get_current_time(>v); g_time_val_add(>v, G_USEC_PER_SEC / 10); /* get a request from caja */ dc = g_async_queue_timed_pop(dcc->command_queue, >v); if (dc != NULL) { break; } else { if (check_connection(chan) == FALSE) { goto BADCONNECTION; } } } /* this pointer should be unique */ if ((gpointer (*)(DropboxCommandClient *data)) dc == &dropbox_command_client_thread) { debug("got a reset request"); goto BADCONNECTION; } switch (dc->request_type) { case GET_FILE_INFO: { debug("doing file info command"); do_file_info_command(chan, (DropboxFileInfoCommand *) dc, &gerr); } break; case GENERAL_COMMAND: { debug("doing general command"); do_general_command(chan, (DropboxGeneralCommand *) dc, &gerr); } break; default: g_assert_not_reached(); break; } debug("done."); if (gerr != NULL) { // debug("COMMAND ERROR*****************************"); /* mark this request as never to be completed */ end_request(dc); debug("command error: %s", gerr->message); g_error_free(gerr); BADCONNECTION: /* grab all the rest of the data off the async queue and mark it never to be completed, who knows how long we'll be disconnected */ while ((dc = g_async_queue_try_pop(dcc->command_queue)) != NULL) { end_request(dc); } g_io_channel_unref(chan); SET_CONNECTED_STATE(FALSE); /* call the disconnect handler */ g_idle_add((GSourceFunc) on_disconnect, dcc); break; } } #undef SET_CONNECTED_STATE } return NULL; }
void jcr_queue_deliver(void *a) { extern jcr_instance jcr; GIOStatus rc = G_IO_STATUS_NORMAL; GString *buffer; gsize bytes; int left, len, pkts; dpacket d; GTimeVal timeout; int buf_size = j_atoi(xmlnode_get_data(xmlnode_get_tag(jcr->config,"send-buffer")), 8192); log_warn(JDBG, "packet delivery thread starting."); buffer = g_string_new(NULL); while(TRUE) { g_string_set_size(buffer, 0); pkts = 0; g_get_current_time(&timeout); g_time_val_add(&timeout, (5 * G_USEC_PER_SEC)); d = (dpacket)g_async_queue_timed_pop(jcr->dqueue, &timeout); if (d == NULL) { if (jcr->stream_state == _STREAM_CONNECTED) continue; else break; } g_string_append(buffer, xmlnode2str(d->x)); xmlnode_free(d->x); d = NULL; left = len = buffer->len; pkts++; while ((g_async_queue_length(jcr->dqueue) > 0) && (buffer->len < buf_size)) { d = (dpacket)g_async_queue_pop(jcr->dqueue); g_string_append(buffer, xmlnode2str(d->x)); xmlnode_free(d->x); d = NULL; left = len = buffer->len; pkts++; } // log_debug(JDBG, "%d '%s'", len, buf); while ((left > 0) && (rc == G_IO_STATUS_NORMAL)) { rc = g_io_channel_write_chars(jcr->gio, (buffer->str+(len - left)), left, &bytes, NULL); left = left - bytes; if (rc != G_IO_STATUS_NORMAL) { log_warn(JDBG, "Send packet failed, dropping packet"); } log_debug(JDBG, "wrote %d packets of %d bytes", pkts, bytes); // fprintf(stderr, "wrote %d packets of %d bytes\n", pkts, bytes); if (left==0){ //queue is empty, flushing the socket g_io_channel_flush(jcr->gio, NULL); } } } log_warn(JDBG, "packet delivery thread exiting."); log_warn(JDBG, " Last DvryQ Buffer='%.*s'", buffer->len, buffer->str); g_string_free(buffer, TRUE); }
static GRealThreadPool* g_thread_pool_wait_for_new_pool (void) { GRealThreadPool *pool; gint local_wakeup_thread_serial; guint local_max_unused_threads; gint local_max_idle_time; gint last_wakeup_thread_serial; gboolean have_relayed_thread_marker = FALSE; local_max_unused_threads = g_atomic_int_get (&max_unused_threads); local_max_idle_time = g_atomic_int_get (&max_idle_time); last_wakeup_thread_serial = g_atomic_int_get (&wakeup_thread_serial); g_atomic_int_inc (&unused_threads); do { if (g_atomic_int_get (&unused_threads) >= local_max_unused_threads) { /* If this is a superfluous thread, stop it. */ pool = NULL; } else if (local_max_idle_time > 0) { /* If a maximal idle time is given, wait for the given time. */ GTimeVal end_time; g_get_current_time (&end_time); g_time_val_add (&end_time, local_max_idle_time * 1000); DEBUG_MSG (("thread %p waiting in global pool for %f seconds.", g_thread_self (), local_max_idle_time / 1000.0)); pool = g_async_queue_timed_pop (unused_thread_queue, &end_time); } else { /* If no maximal idle time is given, wait indefinitely. */ DEBUG_MSG (("thread %p waiting in global pool.", g_thread_self ())); pool = g_async_queue_pop (unused_thread_queue); } if (pool == wakeup_thread_marker) { local_wakeup_thread_serial = g_atomic_int_get (&wakeup_thread_serial); if (last_wakeup_thread_serial == local_wakeup_thread_serial) { if (!have_relayed_thread_marker) { /* If this wakeup marker has been received for * the second time, relay it. */ DEBUG_MSG (("thread %p relaying wakeup message to " "waiting thread with lower serial.", g_thread_self ())); g_async_queue_push (unused_thread_queue, wakeup_thread_marker); have_relayed_thread_marker = TRUE; /* If a wakeup marker has been relayed, this thread * will get out of the way for 100 microseconds to * avoid receiving this marker again. */ g_usleep (100); } } else { if (g_atomic_int_exchange_and_add (&kill_unused_threads, -1) > 0) { pool = NULL; break; } DEBUG_MSG (("thread %p updating to new limits.", g_thread_self ())); local_max_unused_threads = g_atomic_int_get (&max_unused_threads); local_max_idle_time = g_atomic_int_get (&max_idle_time); last_wakeup_thread_serial = local_wakeup_thread_serial; have_relayed_thread_marker = FALSE; } } } while (pool == wakeup_thread_marker); g_atomic_int_add (&unused_threads, -1); return pool; }
/* \brief Queries the ECU for a location ID list */ G_MODULE_EXPORT Location_Details *request_location_id_details(guint16 loc_id) { OutputData *output = NULL; GAsyncQueue *queue = NULL; FreeEMS_Packet *packet = NULL; GTimeVal tval; GList *list = NULL; Serial_Params *serial_params = NULL; guint8 *buf = NULL; Location_Details *details = NULL; /* Raw packet */ guint8 pkt[LOC_ID_DETAILS_REQ_PKT_LEN]; gint res = 0; gint i = 0; gint h = 0; gint l = 0; gint tmpi = 0; guint8 sum = 0; gint tmit_len = 0; serial_params = DATA_GET(global_data,"serial_params"); g_return_val_if_fail(serial_params,NULL); pkt[HEADER_IDX] = 0; pkt[H_PAYLOAD_IDX] = (REQUEST_RETRIEVE_LOCATION_ID_DETAILS & 0xff00 ) >> 8; pkt[L_PAYLOAD_IDX] = (REQUEST_RETRIEVE_LOCATION_ID_DETAILS & 0x00ff ); pkt[L_PAYLOAD_IDX+1] = (loc_id & 0xff00) >> 8; /* H location bits */ pkt[L_PAYLOAD_IDX+2] = (loc_id & 0x00ff); /* L location bits */ for (i=0;i<LOC_ID_DETAILS_REQ_PKT_LEN-1;i++) sum += pkt[i]; pkt[LOC_ID_DETAILS_REQ_PKT_LEN-1] = sum; buf = finalize_packet((guint8 *)&pkt,LOC_ID_DETAILS_REQ_PKT_LEN,&tmit_len); queue = g_async_queue_new(); register_packet_queue(PAYLOAD_ID,queue,RESPONSE_RETRIEVE_LOCATION_ID_DETAILS); if (!write_wrapper_f(serial_params->fd,buf, tmit_len, NULL)) { deregister_packet_queue(PAYLOAD_ID,queue,RESPONSE_RETRIEVE_LOCATION_ID_DETAILS); g_free(buf); g_async_queue_unref(queue); return NULL; } g_free(buf); g_get_current_time(&tval); g_time_val_add(&tval,500000); packet = g_async_queue_timed_pop(queue,&tval); deregister_packet_queue(PAYLOAD_ID,queue,RESPONSE_RETRIEVE_LOCATION_ID_DETAILS); g_async_queue_unref(queue); if (packet) { /*printf("packet payload length %i\n",packet->payload_length);*/ if (packet->payload_length != 12) printf("ERROR in locationID details response!\n"); details = g_new0(Location_Details, 1); tmpi = 0; h = packet->data[packet->payload_base_offset]; l = packet->data[packet->payload_base_offset+1]; details->flags = (h << 8) + l; /*printf("loc id details flags %i\n",details->flags);*/ h = packet->data[packet->payload_base_offset+2]; l = packet->data[packet->payload_base_offset+3]; details->parent = (h << 8) + l; /*printf("loc id details parent %i\n",details->parent);*/ details->ram_page = packet->data[packet->payload_base_offset+4]; details->flash_page = packet->data[packet->payload_base_offset+5]; /*printf("loc id details ram_page %i\n",details->ram_page);*/ /*printf("loc id details flash_page %i\n",details->flash_page);*/ h = packet->data[packet->payload_base_offset+6]; l = packet->data[packet->payload_base_offset+7]; details->ram_address = (h << 8) + l; /*printf("loc id details ram_address %0x\n",details->ram_address);*/ h = packet->data[packet->payload_base_offset+8]; l = packet->data[packet->payload_base_offset+9]; details->flash_address = (h << 8) + l; /*printf("loc id details flash_address %0x\n",details->flash_address);*/ h = packet->data[packet->payload_base_offset+10]; l = packet->data[packet->payload_base_offset+11]; details->length = (h << 8) + l; /*printf("loc id details length %i\n",details->length);*/ freeems_packet_cleanup(packet); } return details; }
/* \brief Queries the ECU for a location ID list */ G_MODULE_EXPORT GList *request_location_ids(gint * len) { OutputData *output = NULL; GAsyncQueue *queue = NULL; FreeEMS_Packet *packet = NULL; GTimeVal tval; GList *list = NULL; Serial_Params *serial_params = NULL; guint8 *buf = NULL; /* Raw packet */ guint8 pkt[LOC_ID_LIST_REQ_PKT_LEN]; gint res = 0; gint i = 0; gint h = 0; gint l = 0; gint tmpi = 0; guint8 sum = 0; gint tmit_len = 0; guint8 flag = BLOCK_BITS_AND; guint16 bits = 0; serial_params = DATA_GET(global_data,"serial_params"); g_return_val_if_fail(serial_params,NULL); pkt[HEADER_IDX] = 0; pkt[H_PAYLOAD_IDX] = (REQUEST_RETRIEVE_LIST_OF_LOCATION_IDS & 0xff00 ) >> 8; pkt[L_PAYLOAD_IDX] = (REQUEST_RETRIEVE_LIST_OF_LOCATION_IDS & 0x00ff ); pkt[L_PAYLOAD_IDX+1] = flag; /* AND/OR */ bits |= BLOCK_IS_INDEXABLE | BLOCK_IN_RAM; pkt[L_PAYLOAD_IDX+2] = (bits & 0xff00) >> 8; /* H bits */ pkt[L_PAYLOAD_IDX+3] = (bits & 0x00ff); /* L bits */ for (i=0;i<LOC_ID_LIST_REQ_PKT_LEN-1;i++) sum += pkt[i]; pkt[LOC_ID_LIST_REQ_PKT_LEN-1] = sum; buf = finalize_packet((guint8 *)&pkt,LOC_ID_LIST_REQ_PKT_LEN,&tmit_len); queue = g_async_queue_new(); register_packet_queue(PAYLOAD_ID,queue,RESPONSE_RETRIEVE_LIST_OF_LOCATION_IDS); if (!write_wrapper_f(serial_params->fd,buf, tmit_len, NULL)) { deregister_packet_queue(PAYLOAD_ID,queue,RESPONSE_RETRIEVE_LIST_OF_LOCATION_IDS); g_free(buf); g_async_queue_unref(queue); return NULL; } g_free(buf); g_get_current_time(&tval); g_time_val_add(&tval,500000); packet = g_async_queue_timed_pop(queue,&tval); deregister_packet_queue(PAYLOAD_ID,queue,RESPONSE_RETRIEVE_LIST_OF_LOCATION_IDS); g_async_queue_unref(queue); if (packet) { for (i=0;i<packet->payload_length;i++) { tmpi = 0; h = packet->data[packet->payload_base_offset+i]; i++; l = packet->data[packet->payload_base_offset+i]; tmpi = (h << 8) + l; list = g_list_append(list,GINT_TO_POINTER(tmpi)); } if (len) *len = packet->payload_length; freeems_packet_cleanup(packet); } return list; }
/** * Thread which process addresses on tls push queue (tls_push_queue member * of ::nuauthdatas) which need an authentication. * * Lock is only needed when modifications are done, because when this thread * work (push mode) it's the only one who can modify the hash. * * Use a switch: * - #WARN_MESSAGE: call warn_clients() (and may call ip_authentication_workers()) * - #INSERT_MESSAGE: call add_client() */ void *push_worker(GMutex * mutex) { struct msg_addr_set *global_msg = g_new0(struct msg_addr_set, 1); struct nu_srv_message *msg = g_new0(struct nu_srv_message, 1); struct internal_message *message; GTimeVal tv; msg->type = SRV_REQUIRED_PACKET; msg->option = 0; msg->length = htons(4); global_msg->msg = msg; g_async_queue_ref(nuauthdatas->tls_push_queue); /* wait for message */ while (g_mutex_trylock(mutex)) { g_mutex_unlock(mutex); /* wait a message during POP_DELAY */ g_get_current_time(&tv); g_time_val_add(&tv, POP_DELAY); message = g_async_queue_timed_pop(nuauthdatas->tls_push_queue, &tv); if (message == NULL) continue; switch (message->type) { case WARN_MESSAGE: global_msg->addr = (((auth_pckt_t *) message->datas)->header).saddr; global_msg->found = FALSE; /* search in client array */ warn_clients(global_msg, NULL, NULL); /* do we have found something */ if (!ipv6_equal(&global_msg->addr, &in6addr_any)) { if (global_msg->found == FALSE) { /* if we do ip authentication send request to pool */ if (nuauthconf-> do_ip_authentication) { thread_pool_push (nuauthdatas-> ip_authentication_workers, message->datas, NULL); } else { g_free(message->datas); } } else { /* free header */ g_free(message->datas); } } break; case INSERT_MESSAGE: { struct tls_insert_data *data = message->datas; if (data->data) { add_client(data->socket, data->data); } g_free(data); } break; default: g_message("lost"); } g_free(message); } g_free(msg); g_free(global_msg); g_async_queue_unref(nuauthdatas->tls_push_queue); return NULL; }
/** * oh_dequeue_session_event * @sid: * @event: * * * * Returns: **/ SaErrorT oh_dequeue_session_event(SaHpiSessionIdT sid, SaHpiTimeoutT timeout, struct oh_event * event, SaHpiEvtQueueStatusT * eventq_status) { struct oh_session *session = NULL; struct oh_event *devent = NULL; GTimeVal gfinaltime; GAsyncQueue *eventq = NULL; SaHpiBoolT subscribed; SaErrorT invalid; if (sid < 1 || (event == NULL)) return SA_ERR_HPI_INVALID_PARAMS; g_static_rec_mutex_lock(&oh_sessions.lock); /* Locked session table */ session = g_hash_table_lookup(oh_sessions.table, &sid); if (!session) { g_static_rec_mutex_unlock(&oh_sessions.lock); return SA_ERR_HPI_INVALID_SESSION; } if (eventq_status) { *eventq_status = session->eventq_status; } session->eventq_status = 0; eventq = session->eventq; g_async_queue_ref(eventq); g_static_rec_mutex_unlock(&oh_sessions.lock); if (timeout == SAHPI_TIMEOUT_IMMEDIATE) { devent = g_async_queue_try_pop(eventq); } else if (timeout == SAHPI_TIMEOUT_BLOCK) { while (devent == NULL) { g_get_current_time(&gfinaltime); g_time_val_add(&gfinaltime, 5000000L); devent = g_async_queue_timed_pop(eventq, &gfinaltime); /* compliance with spec page 63 */ invalid = oh_get_session_subscription(sid, &subscribed); /* Is the session still open? or still subscribed? */ if (invalid || !subscribed) { g_async_queue_unref(eventq); oh_event_free(devent, FALSE); return invalid ? SA_ERR_HPI_INVALID_SESSION : SA_ERR_HPI_INVALID_REQUEST; } } } else { g_get_current_time(&gfinaltime); g_time_val_add(&gfinaltime, (glong) (timeout / 1000)); devent = g_async_queue_timed_pop(eventq, &gfinaltime); invalid = oh_get_session_subscription(sid, &subscribed); if (invalid || !subscribed) { g_async_queue_unref(eventq); oh_event_free(devent, FALSE); return invalid ? SA_ERR_HPI_INVALID_SESSION : SA_ERR_HPI_INVALID_REQUEST; } } g_async_queue_unref(eventq); if (devent) { memcpy(event, devent, sizeof(struct oh_event)); g_free(devent); return SA_OK; } else { memset(event, 0, sizeof(struct oh_event)); return SA_ERR_HPI_TIMEOUT; } }
static void db_queue_process(sqlite3 *db) { GTimeVal trans_end = {}; // tv_sec = 0 if no transaction is active gboolean donext = FALSE; gboolean errtrans = FALSE; GAsyncQueue *res; gint64 lastid; int r; while(1) { char *q = donext ? g_async_queue_try_pop(db_queue) : trans_end.tv_sec ? g_async_queue_timed_pop(db_queue, &trans_end) : g_async_queue_pop(db_queue); int flags = q ? darray_get_int32(q) : 0; gboolean nocache = flags & DBF_NOCACHE ? TRUE : FALSE; // Commit state if we need to if(!q || flags & DBF_SINGLE || flags & DBF_END) { g_warn_if_fail(!donext); if(trans_end.tv_sec) db_queue_process_commit(db); trans_end.tv_sec = 0; donext = errtrans = FALSE; } // If this was a timeout, wait for next query if(!q) continue; // if this is an END, quit. if(flags & DBF_END) { g_debug("db: Shutting down."); g_free(q); break; } // handle SINGLE if(flags & DBF_SINGLE) { r = db_queue_process_one(db, q, nocache, FALSE, &res, &lastid); db_queue_item_final(res, r, lastid); g_free(q); continue; } // report error to NEXT-chained queries if the transaction has been aborted. if(errtrans) { g_warn_if_fail(donext); db_queue_item_error(q); donext = flags & DBF_NEXT ? TRUE : FALSE; if(!donext) { errtrans = FALSE; trans_end.tv_sec = 0; } g_free(q); continue; } // handle LAST queries if(flags & DBF_LAST) { r = db_queue_process_one(db, q, nocache, trans_end.tv_sec?TRUE:FALSE, &res, &lastid); // Commit first, then send back the final result if(trans_end.tv_sec) { if(r == SQLITE_DONE) r = db_queue_process_commit(db); if(r != SQLITE_DONE) db_queue_process_rollback(db); } trans_end.tv_sec = 0; donext = FALSE; db_queue_item_final(res, r, lastid); g_free(q); continue; } // start a new transaction for normal/NEXT queries if(!trans_end.tv_sec) { g_get_current_time(&trans_end); g_time_val_add(&trans_end, DB_FLUSH_TIMEOUT); r = db_queue_process_begin(db); if(r != SQLITE_DONE) { if(flags & DBF_NEXT) donext = errtrans = TRUE; else trans_end.tv_sec = 0; db_queue_item_error(q); g_free(q); continue; } } // handle normal/NEXT queries r = db_queue_process_one(db, q, nocache, TRUE, &res, &lastid); db_queue_item_final(res, r, lastid); g_free(q); // Rollback and update state on error if(r != SQLITE_DONE) { db_queue_process_rollback(db); if(flags & DBF_NEXT) errtrans = TRUE; else trans_end.tv_sec = 0; } } }