/****************************************************************************** Description.: this is the main worker thread it loops forever, grabs a fresh frame and stores it to file Input Value.: Return Value: ******************************************************************************/ void *worker_thread(void *arg) { int ok = 1; int frame_size = 0; unsigned char *tmp_framebuffer = NULL; struct sockaddr_in addr; int sd; bzero(&addr, sizeof(addr)); /* set cleanup handler to cleanup allocated ressources */ pthread_cleanup_push(worker_cleanup, NULL); // set TCP server data structures --------------------------- if(port <= 0) { OPRINT("a valid TCP port must be provided\n"); return NULL; } addr.sin_addr.s_addr = inet_addr(server); addr.sin_family = AF_INET; addr.sin_port = htons(port); // ----------------------------------------------------------- struct timeval imageProcessingStart; struct timeval imageProcessingStop; struct timeval socketStart; struct timeval socketStop; while (ok >= 0 && !pglobal->stop) { //DBG("waiting for fresh frame\n"); gettimeofday(&imageProcessingStart, NULL); pthread_mutex_lock(&pglobal->in[input_number].db); pthread_cond_wait(&pglobal->in[input_number].db_update, &pglobal->in[input_number].db); /* read buffer */ frame_size = pglobal->in[input_number].size; /* check if buffer for frame is large enough, increase it if necessary */ if(frame_size > max_frame_size) { DBG("increasing buffer size to %d\n", frame_size); max_frame_size = frame_size + (1 << 16); if((tmp_framebuffer = realloc(frame, max_frame_size)) == NULL) { pthread_mutex_unlock(&pglobal->in[input_number].db); LOG("not enough memory\n"); return NULL; } frame = tmp_framebuffer; } /* copy frame to our local buffer now */ memcpy(frame, pglobal->in[input_number].buf, frame_size); /* allow others to access the global buffer again */ pthread_mutex_unlock(&pglobal->in[input_number].db); gettimeofday(&imageProcessingStop, NULL); printDuration(&imageProcessingStart, &imageProcessingStop, "Image"); /* Send image */ gettimeofday(&socketStart, NULL); DBG("Create connection to %s:%d\n", server, port); sd = socket(AF_INET, SOCK_STREAM, 0); // Test usleep(200 * 1000); if (connect(sd , (struct sockaddr*) &addr , sizeof(addr)) == 0) { DBG("Connection to %s:%d established\n", server, port); if (!send(sd, frame, frame_size, 0)) { perror("Image was not send"); } DBG("Closing connection to %s:%d\n", server, port); close(sd); } else { perror("connect"); } gettimeofday(&socketStop, NULL); printDuration(&socketStart, &socketStop, "Socket"); } DBG("Ending TCP worker thread\n"); /* cleanup now */ pthread_cleanup_pop(1); return NULL; }
void *channel_pth(void *item) { uint32_t i,ret,dglen; uint32_t fd; uint32_t data_per_sec = 0; struct timeval vtime; struct msg_s2c_st *msg2c = NULL; struct sockaddr_in his_addr; struct channel_item *ptr = item; glob_t res; char patstr[PATSTRSIZE]; sigset_t set,oset; /* block sigals */ sigfillset(&set); pthread_sigmask(SIG_UNBLOCK,&set,&oset); pthread_sigmask(SIG_BLOCK,&set,NULL); /* set address */ his_addr.sin_family = AF_INET; his_addr.sin_port = htons(args.recieve_port); inet_pton(AF_INET,args.multicast_group,&his_addr.sin_addr); /* start to send data */ memset(patstr,0,PATSTRSIZE); strcat(patstr,ptr->path); strcat(patstr,PATTERN); if(glob(patstr,0,NULL,&res)!=0){ syslog(LOG_ERR,"%s\n","no mp3 item found."); pthread_exit(NULL); } pthread_cleanup_push(glob_free,&res); if(((msg2c=malloc(sizeof(*msg2c)+SBUFSIZE-1))==NULL)){ syslog(LOG_ERR,"%s\n","failed to allocate memory."); globfree(&res); pthread_exit(NULL); } pthread_cleanup_push(do_free,msg2c); for(i=0;i<res.gl_pathc;){ if((fd=open(res.gl_pathv[i],O_RDONLY))<0){ syslog(LOG_ERR,"failed to open %s.\n",ptr->path); free(msg2c); globfree(&res); pthread_exit(NULL); } pthread_cleanup_push(do_close,(void*)fd); while(1){ if(data_per_sec<32768){ memset(msg2c->data,0,SBUFSIZE); ret=read(fd,msg2c->data,SBUFSIZE); data_per_sec+=ret; if(ret==0){ break; } msg2c->channel_id = htons(ptr->id); dglen = ret + sizeof(msg2c->channel_id); while(sendto(sd, msg2c, dglen, 0, (void*)&his_addr, sizeof(his_addr))<0) { if(errno==EINTR){ continue; } syslog(LOG_ERR,"%s\n","failed to send message."); free(msg2c); globfree(&res); pthread_exit(NULL); } }else { /* init time */ vtime.tv_sec = 1; vtime.tv_usec = 0; select(0,NULL,NULL,NULL,&vtime); data_per_sec = 0; } } pthread_cleanup_pop(1); i++; if(i==res.gl_pathc){ i = 0; } } pthread_sigmask(SIG_SETMASK,&oset,NULL); pthread_cleanup_pop(1); pthread_cleanup_pop(1); syslog(LOG_ERR,"i=%d\t%s\n",i,"exception exit."); pthread_exit(NULL); }
void *stapi_read_thread(void *sparam) { int32_t dev_index, ErrorCode, i, j, CRCValid; uint32_t QueryBufferHandle = 0, DataSize = 0; uchar buf[BUFFLEN]; struct read_thread_param *para = sparam; dev_index = para->id; pthread_setspecific(getclient, para->cli); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); pthread_cleanup_push(stapi_cleanup_thread, (void*) dev_index); int32_t error_count = 0; while (1) { QueryBufferHandle = 0; ErrorCode = oscam_stapi_SignalWaitBuffer(dev_list[dev_index].SignalHandle, &QueryBufferHandle, 1000); switch (ErrorCode) { case 0: // NO_ERROR: break; case 852042: // ERROR_SIGNAL_ABORTED cs_log("Caught abort signal"); pthread_exit(NULL); break; case 11: // ERROR_TIMEOUT: //cs_log("timeout %d", dev_index); //TODO: if pidindex == -1 try next continue; break; default: if (QueryBufferHandle != 0) { cs_log("SignalWaitBuffer error: %d", ErrorCode); oscam_stapi_BufferFlush(QueryBufferHandle); continue; } cs_log("SignalWaitBuffer: index %d ErrorCode: %d - QueryBuffer: %x", dev_index, ErrorCode, QueryBufferHandle); error_count++; if (error_count>10) { cs_log("Too many errors in reader thread %d, quitting.", dev_index); pthread_exit(NULL); } continue; break; } uint32_t NumFilterMatches = 0; int32_t demux_id = 0, filter_num = 0; DataSize = 0; uint32_t k; uint32_t MatchedFilterList[10]; ErrorCode = oscam_stapi_BufferReadSection(QueryBufferHandle, MatchedFilterList, 10, &NumFilterMatches, &CRCValid, buf, BUFFLEN, &DataSize); if (ErrorCode != 0) { cs_log("BufferRead: index: %d ErrorCode: %d", dev_index, ErrorCode); cs_sleepms(1000); continue; } if (DataSize<=0) continue; pthread_mutex_lock(&filter_lock); // don't use cs_lock() here; multiple threads using same s_client struct for(k=0;k<NumFilterMatches;k++) { for (i=0;i<MAX_DEMUX;i++) { for (j=0;j<MAX_FILTER;j++) { if (dev_list[dev_index].demux_fd[i][j].fd == MatchedFilterList[k]) { demux_id=i; filter_num=j; dvbapi_process_input(demux_id, filter_num, buf, DataSize); } } } } pthread_mutex_unlock(&filter_lock); } pthread_cleanup_pop(0); }
static void *thread_func_exit(void *arg) { pthread_cleanup_push(&cleanup1, arg); pthread_exit(NULL); pthread_cleanup_pop(0); return NULL; }
void *rs_start_accept_thread(void *data) { int err, cli_fd; socklen_t socklen; rs_master_info_t *mi; struct sockaddr_in cli_addr; rs_request_dump_t *rd; mi = (rs_master_info_t *) data; pthread_cleanup_push(rs_free_accept_thread, mi); if(mi == NULL) { rs_log_err(0, "accept thread can not get master info struct"); goto free; } rs_memzero(&cli_addr, sizeof(cli_addr)); for( ;; ) { cli_fd = accept(mi->svr_fd, (struct sockaddr *) &cli_addr, &socklen); if(cli_fd == -1) { if(rs_errno == EINTR) { continue; } rs_log_err(rs_errno, "accept() failed"); goto free; } /* register slave */ rd = rs_get_request_dump(mi->req_dump_info); if(rd == NULL) { rs_log_err(0, "no more free request_dump struct"); rs_close(cli_fd); cli_fd = -1; continue; } rd->cli_fd = cli_fd; /* init ring buffer */ if(rs_init_ring_buffer2(&(rd->ring_buf), RS_RING_BUFFER_NUM) != RS_OK) { goto free; } /* init slab */ if(rs_init_slab(&(rd->slab), NULL, mi->slab_init_size, mi->slab_factor , mi->slab_mem_size, RS_SLAB_PREALLOC) != RS_OK) { goto free; } /* create dump thread */ if((err = pthread_create(&(rd->dump_thread), &(mi->req_dump_info->thread_attr), rs_start_dump_thread, (void *) rd)) != 0) { rs_log_err(err, "pthread_create() failed, req_dump thread"); goto free; } } free: pthread_cleanup_pop(1); return NULL; }
/*Loops for connection->attempts */ uerr_t http_get_url_info_loop(connection_t * connection) { pthread_mutex_lock(&connection->access_mutex); connection->running = TRUE; pthread_mutex_unlock(&connection->access_mutex); assert(connection->attempts >= 0); do { if (connection->attempts > 0 && connection->err != NEWLOCATION) { connection_show_message(connection, _("Retrying... Attempt %d in %d seconds"), connection->attempts, connection->retry_delay.tv_sec); delay_ms(connection->retry_delay.tv_sec * 1000); } /*Push the handler which will cleanup any sockets that are left open */ pthread_cleanup_push(cleanup_socks, (void *) connection); connection->err = proz_http_get_url_info(connection); /*pop the handler */ pthread_cleanup_pop(0); connection->attempts++; switch (connection->err) { case HOK: connection_show_message(connection, _("Successfully got info")); pthread_mutex_lock(&connection->access_mutex); connection->running = FALSE; pthread_mutex_unlock(&connection->access_mutex); return connection->err; break; case NEWLOCATION: return connection->err; break; case HTTPNSFOD: connection_show_message(connection, _("File not found!")); pthread_mutex_lock(&connection->access_mutex); connection->running = FALSE; pthread_mutex_unlock(&connection->access_mutex); return connection->err; break; default: connection_show_message(connection, proz_strerror(connection->err)); break; } } while ((connection->attempts < connection->max_attempts) || connection->max_attempts == 0); connection_show_message(connection, _ ("I have tried %d attempt(s) and have failed, aborting"), connection->attempts); pthread_mutex_lock(&connection->access_mutex); connection->running = FALSE; pthread_mutex_unlock(&connection->access_mutex); return connection->err; }
void vegas_pfb_thread(void *_args) { /* Get args */ struct vegas_thread_args *args = (struct vegas_thread_args *)_args; int rv; /* Set cpu affinity */ cpu_set_t cpuset, cpuset_orig; sched_getaffinity(0, sizeof(cpu_set_t), &cpuset_orig); //CPU_ZERO(&cpuset); CPU_CLR(13, &cpuset); CPU_SET(11, &cpuset); rv = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); if (rv<0) { vegas_error("vegas_pfb_thread", "Error setting cpu affinity."); perror("sched_setaffinity"); } /* Set priority */ rv = setpriority(PRIO_PROCESS, 0, args->priority); if (rv<0) { vegas_error("vegas_pfb_thread", "Error setting priority level."); perror("set_priority"); } /* Attach to status shared mem area */ struct vegas_status st; rv = vegas_status_attach(&st); if (rv!=VEGAS_OK) { vegas_error("vegas_pfb_thread", "Error attaching to status shared memory."); pthread_exit(NULL); } pthread_cleanup_push((void *)vegas_status_detach, &st); pthread_cleanup_push((void *)set_exit_status, &st); pthread_cleanup_push((void *)vegas_thread_set_finished, args); /* Init status */ vegas_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "init"); vegas_status_unlock_safe(&st); /* Init structs */ struct vegas_params gp; struct sdfits sf; pthread_cleanup_push((void *)vegas_free_sdfits, &sf); /* Attach to databuf shared mem */ struct vegas_databuf *db_in, *db_out; db_in = vegas_databuf_attach(args->input_buffer); if (db_in==NULL) { char msg[256]; sprintf(msg, "Error attaching to databuf(%d) shared memory.", args->input_buffer); vegas_error("vegas_pfb_thread", msg); pthread_exit(NULL); } pthread_cleanup_push((void *)vegas_databuf_detach, db_in); db_out = vegas_databuf_attach(args->output_buffer); if (db_out==NULL) { char msg[256]; sprintf(msg, "Error attaching to databuf(%d) shared memory.", args->output_buffer); vegas_error("vegas_pfb_thread", msg); pthread_exit(NULL); } pthread_cleanup_push((void *)vegas_databuf_detach, db_out); /* Loop */ char *hdr_in = NULL; int curblock_in=0; int first=1; int acc_len = 0; int nchan = 0; int nsubband = 0; signal(SIGINT,cc); vegas_status_lock_safe(&st); if (hgeti4(st.buf, "NCHAN", &nchan)==0) { fprintf(stderr, "ERROR: %s not in status shm!\n", "NCHAN"); } if (hgeti4(st.buf, "NSUBBAND", &nsubband)==0) { fprintf(stderr, "ERROR: %s not in status shm!\n", "NSUBBAND"); } vegas_status_unlock_safe(&st); if (EXIT_SUCCESS != init_gpu(db_in->block_size, db_out->block_size, nsubband, nchan)) { (void) fprintf(stderr, "ERROR: GPU initialisation failed!\n"); run = 0; } while (run) { /* Note waiting status */ vegas_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "waiting"); vegas_status_unlock_safe(&st); /* Wait for buf to have data */ rv = vegas_databuf_wait_filled(db_in, curblock_in); if (rv!=0) continue; /* Note waiting status, current input block */ vegas_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "processing"); hputi4(st.buf, "PFBBLKIN", curblock_in); vegas_status_unlock_safe(&st); hdr_in = vegas_databuf_header(db_in, curblock_in); /* Get params */ if (first) { vegas_read_obs_params(hdr_in, &gp, &sf); /* Read required exposure from status shared memory, and calculate corresponding accumulation length */ acc_len = (abs(sf.hdr.chan_bw) * sf.hdr.hwexposr); } vegas_read_subint_params(hdr_in, &gp, &sf); /* Call PFB function */ do_pfb(db_in, curblock_in, db_out, first, st, acc_len); /* Mark input block as free */ vegas_databuf_set_free(db_in, curblock_in); /* Go to next input block */ curblock_in = (curblock_in + 1) % db_in->n_block; /* Check for cancel */ pthread_testcancel(); if (first) { first=0; } } run=0; //cudaThreadExit(); pthread_exit(NULL); cleanup_gpu(); pthread_cleanup_pop(0); /* Closes vegas_databuf_detach(out) */ pthread_cleanup_pop(0); /* Closes vegas_databuf_detach(in) */ pthread_cleanup_pop(0); /* Closes vegas_free_sdfits */ pthread_cleanup_pop(0); /* Closes vegas_thread_set_finished */ pthread_cleanup_pop(0); /* Closes set_exit_status */ pthread_cleanup_pop(0); /* Closes vegas_status_detach */ }
void *frag_thrd(void *destination_file) { sigset_t sigmask, old_mask; char *data_buffer; int fd; sigemptyset(&sigmask); sigaddset(&sigmask, SIGINT); sigaddset(&sigmask, SIGTERM); sigaddset(&sigmask, SIGUSR1); pthread_sigmask(SIG_BLOCK, &sigmask, &old_mask); fd = open(destination_file, O_RDONLY); if(fd == -1) BAD_ERROR("frag_thrd: can't open destination for reading\n"); data_buffer = malloc(SQUASHFS_FILE_MAX_SIZE); if(data_buffer == NULL) MEM_ERROR(); pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex); while(1) { struct file_buffer *file_buffer = queue_get(to_process_frag); struct file_buffer *buffer; int sparse = checksum_sparse(file_buffer); struct file_info *dupl_ptr; long long file_size; unsigned short checksum; char flag; int res; if(sparse_files && sparse) { file_buffer->c_byte = 0; file_buffer->fragment = FALSE; } else file_buffer->c_byte = file_buffer->size; /* * Specutively pull into the fragment cache any fragment blocks * which contain fragments which *this* fragment may be * be a duplicate. * * By ensuring the fragment block is in cache ahead of time * should eliminate the parallelisation stall when the * main thread needs to read the fragment block to do a * duplicate check on it. * * If this is a fragment belonging to a larger file * (with additional blocks) then ignore it. Here we're * interested in the "low hanging fruit" of files which * consist of only a fragment */ if(file_buffer->file_size != file_buffer->size) { seq_queue_put(to_main, file_buffer); continue; } file_size = file_buffer->file_size; pthread_mutex_lock(&dup_mutex); dupl_ptr = dupl[DUP_HASH(file_size)]; pthread_mutex_unlock(&dup_mutex); file_buffer->dupl_start = dupl_ptr; file_buffer->duplicate = FALSE; for(; dupl_ptr; dupl_ptr = dupl_ptr->next) { if(file_size != dupl_ptr->file_size || file_size != dupl_ptr->fragment->size) continue; pthread_mutex_lock(&dup_mutex); flag = dupl_ptr->have_frag_checksum; checksum = dupl_ptr->fragment_checksum; pthread_mutex_unlock(&dup_mutex); /* * If we have the checksum and it matches then * read in the fragment block. * * If we *don't* have the checksum, then we are * appending, and the fragment block is on the * "old" filesystem. Read it in and checksum * the entire fragment buffer */ if(!flag) { buffer = get_fragment_cksum(dupl_ptr, data_buffer, fd, &checksum); if(checksum != file_buffer->checksum) { cache_block_put(buffer); continue; } } else if(checksum == file_buffer->checksum) buffer = get_fragment(dupl_ptr->fragment, data_buffer, fd); else continue; res = memcmp(file_buffer->data, buffer->data + dupl_ptr->fragment->offset, file_size); cache_block_put(buffer); if(res == 0) { struct file_buffer *dup = malloc(sizeof(*dup)); if(dup == NULL) MEM_ERROR(); memcpy(dup, file_buffer, sizeof(*dup)); cache_block_put(file_buffer); dup->dupl_start = dupl_ptr; dup->duplicate = TRUE; file_buffer = dup; break; } } seq_queue_put(to_main, file_buffer); } pthread_cleanup_pop(0); }
static struct file_buffer *get_fragment(struct fragment *fragment, char *data_buffer, int fd) { struct squashfs_fragment_entry *disk_fragment; struct file_buffer *buffer, *compressed_buffer; long long start_block; int res, size, index = fragment->index; char locked; /* * Lookup fragment block in cache. * If the fragment block doesn't exist, then get the compressed version * from the writer cache or off disk, and decompress it. * * This routine has two things which complicate the code: * * 1. Multiple threads can simultaneously lookup/create the * same buffer. This means a buffer needs to be "locked" * when it is being filled in, to prevent other threads from * using it when it is not ready. This is because we now do * fragment duplicate checking in parallel. * 2. We have two caches which need to be checked for the * presence of fragment blocks: the normal fragment cache * and a "reserve" cache. The reserve cache is used to * prevent an unnecessary pipeline stall when the fragment cache * is full of fragments waiting to be compressed. */ pthread_cleanup_push((void *) pthread_mutex_unlock, &dup_mutex); pthread_mutex_lock(&dup_mutex); again: buffer = cache_lookup_nowait(fragment_buffer, index, &locked); if(buffer) { pthread_mutex_unlock(&dup_mutex); if(locked) /* got a buffer being filled in. Wait for it */ cache_wait_unlock(buffer); goto finished; } /* not in fragment cache, is it in the reserve cache? */ buffer = cache_lookup_nowait(reserve_cache, index, &locked); if(buffer) { pthread_mutex_unlock(&dup_mutex); if(locked) /* got a buffer being filled in. Wait for it */ cache_wait_unlock(buffer); goto finished; } /* in neither cache, try to get it from the fragment cache */ buffer = cache_get_nowait(fragment_buffer, index); if(!buffer) { /* * no room, get it from the reserve cache, this is * dimensioned so it will always have space (no more than * processors + 1 can have an outstanding reserve buffer) */ buffer = cache_get_nowait(reserve_cache, index); if(!buffer) { /* failsafe */ ERROR("no space in reserve cache\n"); goto again; } } pthread_mutex_unlock(&dup_mutex); compressed_buffer = cache_lookup(fwriter_buffer, index); pthread_cleanup_push((void *) pthread_mutex_unlock, &fragment_mutex); pthread_mutex_lock(&fragment_mutex); disk_fragment = &fragment_table[index]; size = SQUASHFS_COMPRESSED_SIZE_BLOCK(disk_fragment->size); start_block = disk_fragment->start_block; pthread_cleanup_pop(1); if(SQUASHFS_COMPRESSED_BLOCK(disk_fragment->size)) { int error; char *data; if(compressed_buffer) data = compressed_buffer->data; else { res = read_filesystem(fd, start_block, size, data_buffer); if(res == 0) { ERROR("Failed to read fragment from output" " filesystem\n"); BAD_ERROR("Output filesystem corrupted?\n"); } data = data_buffer; } res = compressor_uncompress(comp, buffer->data, data, size, block_size, &error); // CJH: Decompression errors are displayed elsewhere //if(res == -1) // BAD_ERROR("%s uncompress failed with error code %d\n", // comp->name, error); } else if(compressed_buffer) memcpy(buffer->data, compressed_buffer->data, size); else { res = read_filesystem(fd, start_block, size, buffer->data); if(res == 0) { ERROR("Failed to read fragment from output " "filesystem\n"); BAD_ERROR("Output filesystem corrupted?\n"); } } cache_unlock(buffer); cache_block_put(compressed_buffer); finished: pthread_cleanup_pop(0); return buffer; }
/* * Same as http_xml_send_xmlrpc() but with user-defined URL path. */ int http_xml_send_xmlrpc2(struct http_client *client, struct in_addr ip, u_int16_t port, int https, const char *username, const char *password, const char *urlpath, const char *methodName, u_int nparams, const struct structs_type **ptypes, const void **pdatas, const struct structs_type *rep_type, void *rep, struct xmlrpc_compact_fault *faultp, structs_xmllog_t *rlogger) { const struct structs_type *const xreq_type = &structs_type_xmlrpc_request; const struct structs_type *const xrep_type = &structs_type_xmlrpc_response; const struct structs_type *const ftype = &structs_type_xmlrpc_compact_fault; struct http_xml_send_xmlrpc_ctx *ctx; struct xmlrpc_compact_fault fault; char ebuf[128]; int ret = -1; int r; /* Get context */ if ((ctx = MALLOC(MEM_TYPE, sizeof(*ctx))) == NULL) { alogf(LOG_ERR, "%s: %m", "malloc"); return (-1); } memset(ctx, 0, sizeof(*ctx)); pthread_cleanup_push(http_xml_send_xmlrpc_cleanup, ctx); /* Build XML-RPC request and reply */ if ((ctx->xreq = structs_xmlrpc_build_request(MEM_TYPE, methodName, nparams, ptypes, pdatas)) == NULL) { alogf(LOG_ERR, "%s: %m", "structs_xmlrpc_build_request"); goto fail; } if ((ctx->xrep = MALLOC(MEM_TYPE, xrep_type->size)) == NULL) { alogf(LOG_ERR, "%s: %m", "malloc"); goto fail; } if (structs_init(xrep_type, NULL, ctx->xrep) == -1) { alogf(LOG_ERR, "%s: %m", "structs_init"); FREE(MEM_TYPE, ctx->xrep); ctx->xrep = NULL; goto fail; } #ifdef XML_RPC_DEBUG printf("%s: sending this XML-RPC request:\n", __FUNCTION__); (void)structs_xml_output(xreq_type, XML_RPC_REQUEST_TAG, NULL, ctx->xreq, stdout, NULL, 0); #endif /* Send request and get reply; note: we could get canceled here. */ r = http_xml_send(client, ip, port, https, urlpath, username, password, XML_RPC_REQUEST_TAG, NULL, xreq_type, ctx->xreq, STRUCTS_XML_FULL, XML_RPC_REPLY_TAG, NULL, NULL, xrep_type, ctx->xrep, 0, rlogger); #ifdef XML_RPC_DEBUG printf("%s: got this XML-RPC reply (error=%s):\n", __FUNCTION__, r == -1 ? strerror(errno) : "none"); (void)structs_xml_output(xrep_type, XML_RPC_REPLY_TAG, NULL, ctx->xrep, stdout, NULL, 0); #endif /* Check error */ if (r == -1) goto fail; /* Check for fault */ if (structs_init(ftype, NULL, &fault) == -1) { alogf(LOG_ERR, "%s: %m", "structs_init"); goto fail; } if (structs_xmlrpc2struct(xrep_type, ctx->xrep, "fault.value", ftype, &fault, NULL, NULL, 0) == 0) { if (faultp != NULL) *faultp = fault; else structs_free(ftype, NULL, &fault); ret = -2; /* -2 indicates fault */ goto fail; } structs_free(ftype, NULL, &fault); /* Extract response (if desired) */ if (rep != NULL) { if (rep_type != NULL) { /* return compact type */ if (structs_xmlrpc2struct(xrep_type, ctx->xrep, "params.0.value", rep_type, rep, NULL, ebuf, sizeof(ebuf)) == -1) { (*rlogger)(LOG_ERR, "error decoding XML-RPC" " response: %s", ebuf); goto fail; } } else { /* return exploded type */ if (structs_get(&structs_type_xmlrpc_value, "params.0.value", ctx->xrep, rep) == -1) { alogf(LOG_ERR, "structs_get: %m%s", ""); goto fail; } } } /* OK */ ret = 0; fail:; /* Done */ pthread_cleanup_pop(1); return (ret); }
/* * function handle_requests_loop(): infinite loop of requests handling * algorithm: forever, if there are requests to handle, take the first * and handle it. Then wait on the given condition variable, * and when it is signaled, re-do the loop. * increases number of pending requests by one. * input: id of thread, for printing purposes. * output: none. */ void* handle_requests_loop(void* thread_params) { int rc; /* return code of pthreads functions. */ struct request* a_request; /* pointer to a request. */ struct handler_thread_params *data; /* hadler thread's parameters */ /* sanity check -make sure data isn't NULL */ data = (struct handler_thread_params*)thread_params; assert(data); printf("Starting thread '%d'\n", data->thread_id); fflush(stdout); /* set my cancel state to 'enabled', and cancel type to 'defered'. */ pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL); /* set thread cleanup handler */ pthread_cleanup_push(cleanup_free_mutex, (void*)data->request_mutex); /* lock the mutex, to access the requests list exclusively. */ rc = pthread_mutex_lock(data->request_mutex); #ifdef DEBUG printf("thread '%d' after pthread_mutex_lock\n", data->thread_id); fflush(stdout); #endif /* DEBUG */ /* do forever.... */ while (1) { int num_requests = get_requests_number(data->requests); #ifdef DEBUG printf("thread '%d', num_requests = %d\n", data->thread_id, num_requests); fflush(stdout); #endif /* DEBUG */ if (num_requests > 0) { /* a request is pending */ a_request = get_request(data->requests); if (a_request) { /* got a request - handle it and free it */ handle_request(a_request, data->thread_id); free(a_request); } } else { /* the thread checks the flag before waiting */ /* on the condition variable. */ /* if no new requests are going to be generated, exit. */ if (done_creating_requests) { pthread_mutex_unlock(data->request_mutex); printf("thread '%d' exiting\n", data->thread_id); fflush(stdout); pthread_exit(NULL); } /* wait for a request to arrive. note the mutex will be */ /* unlocked here, thus allowing other threads access to */ /* requests list. */ #ifdef DEBUG printf("thread '%d' before pthread_cond_wait\n", data->thread_id); fflush(stdout); #endif /* DEBUG */ rc = pthread_cond_wait(data->got_request, data->request_mutex); /* and after we return from pthread_cond_wait, the mutex */ /* is locked again, so we don't need to lock it ourselves */ #ifdef DEBUG printf("thread '%d' after pthread_cond_wait\n", data->thread_id); fflush(stdout); #endif /* DEBUG */ } } /* remove thread cleanup handler. never reached, but we must use */ /* it here, according to pthread_cleanup_push's manual page. */ pthread_cleanup_pop(0); }
/* * Send a copy of the message, wait for a reply, and return the reply. * * The "reply" should already be initialized. * * This properly handles the calling thread's being canceled. */ int http_xml_send(struct http_client *client, struct in_addr ip, u_int16_t port, int https, const char *urlpath, const char *username, const char *password, const char *ptag, const char *pattrs, const struct structs_type *ptype, const void *payload, int pflags, const char *rtag, char **rattrsp, const char *rattrs_mtype, const struct structs_type *rtype, void *reply, int rflags, structs_xmllog_t *rlogger) { struct http_client_connection *cc; struct http_request *req; struct http_response *resp; int ret = -1; u_int code; FILE *fp; /* Get HTTP connection */ if ((cc = http_client_connect(client, ip, port, https)) == NULL) { alogf(LOG_ERR, "can't %s for %s:%u: %m", "get HTTP client" _ inet_ntoa(ip) _ port); return -1; } /* Push cleanup hook */ pthread_cleanup_push(http_xml_send_cleanup, cc); /* Set up request */ req = http_client_get_request(cc); if (http_request_set_method(req, payload != NULL ? HTTP_METHOD_POST : HTTP_METHOD_GET) == -1) { alogf(LOG_ERR, "can't %s for %s:%u: %m", "set method" _ inet_ntoa(ip) _ port); goto fail; } if (http_request_set_path(req, urlpath) == -1) { alogf(LOG_ERR, "can't %s for %s:%u: %m", "set path" _ inet_ntoa(ip) _ port); goto fail; } if (http_request_set_header(req, 0, "Content-Type", "text/xml") == -1) { alogf(LOG_ERR, "can't %s for %s:%u: %m", "set content-type" _ inet_ntoa(ip) _ port); goto fail; } if (username != NULL && password != NULL) { char *auth; if ((auth = http_request_encode_basic_auth(TYPED_MEM_TEMP, username, password)) == NULL) { alogf(LOG_ERR, "can't %s for %s:%u: %m", "encode authorization" _ inet_ntoa(ip) _ port); goto fail; } if (http_request_set_header(req, 0, "Authorization", "Basic %s", auth) == -1) { alogf(LOG_ERR, "can't %s for %s:%u: %m", "set authorization header" _ inet_ntoa(ip) _ port); FREE(TYPED_MEM_TEMP, auth); goto fail; } FREE(TYPED_MEM_TEMP, auth); } /* Write XML data to HTTP client output stream */ if (payload != NULL) { if ((fp = http_request_get_output(req, 1)) == NULL) { alogf(LOG_ERR, "can't %s for %s:%u: %m", "get output" _ inet_ntoa(ip) _ port); goto fail; } if (structs_xml_output(ptype, ptag, pattrs, payload, fp, NULL, pflags) == -1) { alogf(LOG_ERR, "can't %s for %s:%u: %m", "write XML" _ inet_ntoa(ip) _ port); goto fail; } } /* Get response */ if ((resp = http_client_get_response(cc)) == NULL) { alogf(LOG_ERR, "can't %s for %s:%u: %m", "get response" _ inet_ntoa(ip) _ port); goto fail; } if ((code = http_response_get_code(resp)) != HTTP_STATUS_OK) { alogf(LOG_ERR, "rec'd HTTP error code %d from" "http%s://%s:%u%s: %s", code _ https ? "s" : "" _ inet_ntoa(ip) _ port _ urlpath _ http_response_status_msg(code)); goto fail; } /* Read XML reply from client input stream */ if ((fp = http_response_get_input(resp)) == NULL) { alogf(LOG_ERR, "can't %s for %s:%u: %m", "get input" _ inet_ntoa(ip) _ port); goto fail; } if (structs_xml_input(rtype, rtag, rattrsp, rattrs_mtype, fp, reply, rflags, rlogger) == -1) { alogf(LOG_ERR, "can't %s for %s:%u: %m", "read XML reply" _ inet_ntoa(ip) _ port); goto fail; } /* OK */ ret = 0; fail:; /* Done */ pthread_cleanup_pop(1); return (ret); }
/****************************************************************************** Description.: this is the main worker thread it loops forever, grabs a fresh frame, decompressed the JPEG and displays the decoded data using SDL Input Value.: Return Value: ******************************************************************************/ void *worker_thread(void *arg) { int frame_size = 0, firstrun = 1; SDL_Surface *screen = NULL, *image = NULL; decompressed_image rgbimage; /* initialze the buffer for the decompressed image */ rgbimage.buffersize = 0; rgbimage.buffer = NULL; /* initialze the SDL video subsystem */ if(SDL_Init(SDL_INIT_VIDEO) < 0) { fprintf(stderr, "Couldn't initialize SDL: %s\n", SDL_GetError()); exit(EXIT_FAILURE); } /* just allocate a large buffer for the JPEGs */ if((frame = malloc(4096 * 1024)) == NULL) { OPRINT("not enough memory for worker thread\n"); exit(EXIT_FAILURE); } /* set cleanup handler to cleanup allocated ressources */ pthread_cleanup_push(worker_cleanup, NULL); while(!pglobal->stop) { DBG("waiting for fresh frame\n"); pthread_cond_wait(&pglobal->in[plugin_number].db_update, &pglobal->in[plugin_number].db); /* read buffer */ frame_size = pglobal->in[plugin_number].size; memcpy(frame, pglobal->in[plugin_number].buf, frame_size); pthread_mutex_unlock(&pglobal->in[plugin_number].db); /* decompress the JPEG and store results in memory */ if(decompress_jpeg(frame, frame_size, &rgbimage)) { DBG("could not properly decompress JPEG data\n"); continue; } if(firstrun) { /* create the primary surface (the visible window) */ screen = SDL_SetVideoMode(rgbimage.width, rgbimage.height, 0, SDL_ANYFORMAT | SDL_HWSURFACE); SDL_WM_SetCaption("MJPG-Streamer Viewer", NULL); /* create a SDL surface to display the data */ image = SDL_AllocSurface(SDL_SWSURFACE, rgbimage.width, rgbimage.height, 24, #if SDL_BYTEORDER == SDL_LIL_ENDIAN 0x0000FF, 0x00FF00, 0xFF0000, #else 0xFF0000, 0x00FF00, 0x0000FF, #endif 0); /* copy the decoded data across */ memcpy(image->pixels, rgbimage.buffer, rgbimage.width * rgbimage.height * 3); free(rgbimage.buffer); /* now, that we know the dimensions, we can directly copy to the right surface */ rgbimage.buffer = image->pixels; rgbimage.buffersize = rgbimage.width * rgbimage.height * 3; firstrun = 0; } /* copy the image to the primary surface */ SDL_BlitSurface(image, NULL, screen, NULL); /* redraw the whole surface */ SDL_Flip(screen); } pthread_cleanup_pop(1); /* get rid of the image */ SDL_FreeSurface(image); return NULL; }
void* __ctInitThread(void* v)//pcontech_thread_create ptc { void* (*f)(void*); void (*g)(void*); void* a; unsigned int p; long long skew; ct_tsc_t start; pcontech_thread_create ptc = (pcontech_thread_create) v; f = ptc->func; a = ptc->arg; p = ptc->parent_ctid; start = rdtsc(); //printf("Thread %u alive\n", ptc->child_ctid); // HACK -- REMOVE!!! #if 0 printf("Hello!\n"); fflush(stdout); cpu_set_t cpuset; CPU_ZERO(&cpuset); CPU_SET(24 - (ptc->child_ctid % 16), &cpuset); int r = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); printf("%d - %d - %llx\n", ptc->child_ctid, r, cpuset); #endif // // Now compute the skew // #ifdef CT_CLOCK_SKEW skew = 0; while (ptc->parent_skew == 0) { ptc->child_skew = rdtsc(); } skew = ptc->parent_skew; #else skew = 1; #endif __ctThreadLocalNumber = ptc->child_ctid;//__sync_fetch_and_add(&__ctThreadGlobalNumber, 1); __ctAllocateLocalBuffer(); __ctThreadInfoList = NULL; free(ptc); __ctStoreThreadCreate(p, skew, start); if (__ctIsROIEnabled == true && __ctIsROIActive == false) { __ctQueueBuffer(false); } g = __ctCleanupThread; pthread_cleanup_push(g, (void*)p); #ifdef CT_OVERHEAD_TRACK { ct_tsc_t end = rdtsc(); //__ctTotalThreadOverhead += (end - start); } #endif a = f(a); pthread_cleanup_pop(1); return a; }
static void *worker(void UNUSED(*arg)) { MYLDAP_SESSION *session; int csock; int j; struct sockaddr_storage addr; socklen_t alen; fd_set fds; struct timeval tv; /* create a new LDAP session */ session = myldap_create_session(); /* clean up the session if we're done */ pthread_cleanup_push(worker_cleanup, session); /* start waiting for incoming connections */ while (1) { /* time out connection to LDAP server if needed */ myldap_session_check(session); /* set up the set of fds to wait on */ FD_ZERO(&fds); FD_SET(nslcd_serversocket, &fds); /* set up our timeout value */ tv.tv_sec = nslcd_cfg->idle_timelimit; tv.tv_usec = 0; /* wait for a new connection */ j = select(nslcd_serversocket + 1, &fds, NULL, NULL, nslcd_cfg->idle_timelimit > 0 ? &tv : NULL); /* check result of select() */ if (j < 0) { if (errno == EINTR) log_log(LOG_DEBUG, "select() failed (ignored): %s", strerror(errno)); else log_log(LOG_ERR, "select() failed: %s", strerror(errno)); continue; } /* see if our file descriptor is actually ready */ if (!FD_ISSET(nslcd_serversocket, &fds)) continue; /* wait for a new connection */ alen = (socklen_t)sizeof(struct sockaddr_storage); csock = accept(nslcd_serversocket, (struct sockaddr *)&addr, &alen); if (csock < 0) { if ((errno == EINTR) || (errno == EAGAIN) || (errno == EWOULDBLOCK)) log_log(LOG_DEBUG, "accept() failed (ignored): %s", strerror(errno)); else log_log(LOG_ERR, "accept() failed: %s", strerror(errno)); continue; } /* make sure O_NONBLOCK is not inherited */ if ((j = fcntl(csock, F_GETFL, 0)) < 0) { log_log(LOG_ERR, "fctnl(F_GETFL) failed: %s", strerror(errno)); if (close(csock)) log_log(LOG_WARNING, "problem closing socket: %s", strerror(errno)); continue; } if (fcntl(csock, F_SETFL, j & ~O_NONBLOCK) < 0) { log_log(LOG_ERR, "fctnl(F_SETFL,~O_NONBLOCK) failed: %s", strerror(errno)); if (close(csock)) log_log(LOG_WARNING, "problem closing socket: %s", strerror(errno)); continue; } /* indicate new connection to logging module (generates unique id) */ log_newsession(); /* handle the connection */ handleconnection(csock, session); /* indicate end of session in log messages */ log_clearsession(); } pthread_cleanup_pop(1); return NULL; }
/** * WaitForMultipleObjectsEx: * @numobjects: The number of objects in @handles. The maximum allowed * is %IO_LAYER_MAXIMUM_WAIT_OBJECTS. * @handles: An array of object handles. Duplicates are not allowed. * @waitall: If %TRUE, this function waits until all of the handles * are signalled. If %FALSE, this function returns when any object is * signalled. * @timeout: The maximum time in milliseconds to wait for. * @alertable: if TRUE, the wait can be interrupted by an APC call * * This function returns when either one or more of @handles is * signalled, or @timeout ms elapses. If @timeout is zero, the state * of each item of @handles is tested and the function returns * immediately. If @timeout is %INFINITE, the function waits forever. * * Return value: %WAIT_OBJECT_0 to %WAIT_OBJECT_0 + @numobjects - 1 - * if @waitall is %TRUE, indicates that all objects are signalled. If * @waitall is %FALSE, the return value minus %WAIT_OBJECT_0 indicates * the first index into @handles of the objects that are signalled. * %WAIT_ABANDONED_0 to %WAIT_ABANDONED_0 + @numobjects - 1 - if * @waitall is %TRUE, indicates that all objects are signalled, and at * least one object is an abandoned mutex object (See * WaitForSingleObject() for a description of abandoned mutexes.) If * @waitall is %FALSE, the return value minus %WAIT_ABANDONED_0 * indicates the first index into @handles of an abandoned mutex. * %WAIT_TIMEOUT - The @timeout interval elapsed and no objects in * @handles are signalled. %WAIT_FAILED - an error occurred. * %WAIT_IO_COMPLETION - the wait was ended by an APC. */ guint32 WaitForMultipleObjectsEx(guint32 numobjects, gpointer *handles, gboolean waitall, guint32 timeout, gboolean alertable) { GHashTable *dups; gboolean duplicate = FALSE, bogustype = FALSE, done; guint32 count, lowest; struct timespec abstime; guint i; guint32 ret; int thr_ret; gpointer current_thread = GetCurrentThread (); if (numobjects > IO_LAYER_MAXIMUM_WAIT_OBJECTS) { #ifdef DEBUG g_message ("%s: Too many handles: %d", __func__, numobjects); #endif return(WAIT_FAILED); } if (numobjects == 1) { return WaitForSingleObjectEx (handles [0], timeout, alertable); } /* Check for duplicates */ dups = g_hash_table_new (g_direct_hash, g_direct_equal); for (i = 0; i < numobjects; i++) { gpointer exists = g_hash_table_lookup (dups, handles[i]); if (exists != NULL) { #ifdef DEBUG g_message ("%s: Handle %p duplicated", __func__, handles[i]); #endif duplicate = TRUE; break; } if (_wapi_handle_test_capabilities (handles[i], WAPI_HANDLE_CAP_WAIT) == FALSE) { #ifdef DEBUG g_message ("%s: Handle %p can't be waited for", __func__, handles[i]); #endif bogustype = TRUE; } g_hash_table_insert (dups, handles[i], handles[i]); } g_hash_table_destroy (dups); if (duplicate == TRUE) { #ifdef DEBUG g_message ("%s: Returning due to duplicates", __func__); #endif return(WAIT_FAILED); } if (bogustype == TRUE) { #ifdef DEBUG g_message ("%s: Returning due to bogus type", __func__); #endif return(WAIT_FAILED); } done = test_and_own (numobjects, handles, waitall, &count, &lowest); if (done == TRUE) { return(WAIT_OBJECT_0+lowest); } /* Have to wait for some or all handles to become signalled */ if(timeout!=INFINITE) { _wapi_calc_timeout (&abstime, timeout); } if (alertable && _wapi_thread_apc_pending (current_thread)) { _wapi_thread_dispatch_apc_queue (current_thread); return WAIT_IO_COMPLETION; } while(1) { /* Prod all special-wait handles that aren't already * signalled */ for (i = 0; i < numobjects; i++) { if (_wapi_handle_test_capabilities (handles[i], WAPI_HANDLE_CAP_SPECIAL_WAIT) == TRUE && _wapi_handle_issignalled (handles[i]) == FALSE) { _wapi_handle_ops_special_wait (handles[i], 0); } } /* Check before waiting on the condition, just in case */ done = test_and_own (numobjects, handles, waitall, &count, &lowest); if (done == TRUE) { return(WAIT_OBJECT_0 + lowest); } #ifdef DEBUG g_message ("%s: locking signal mutex", __func__); #endif pthread_cleanup_push ((void(*)(void *))_wapi_handle_unlock_signal_mutex, NULL); thr_ret = _wapi_handle_lock_signal_mutex (); g_assert (thr_ret == 0); if (timeout == INFINITE) { ret = _wapi_handle_wait_signal (); } else { ret = _wapi_handle_timedwait_signal (&abstime); } #ifdef DEBUG g_message ("%s: unlocking signal mutex", __func__); #endif thr_ret = _wapi_handle_unlock_signal_mutex (NULL); g_assert (thr_ret == 0); pthread_cleanup_pop (0); if (alertable && _wapi_thread_apc_pending (current_thread)) { _wapi_thread_dispatch_apc_queue (current_thread); return WAIT_IO_COMPLETION; } /* Check if everything is signalled, as we can't * guarantee to notice a shared signal even if the * wait timed out */ done = test_and_own (numobjects, handles, waitall, &count, &lowest); if (done == TRUE) { return(WAIT_OBJECT_0+lowest); } else if (ret != 0) { /* Didn't get all handles, and there was a * timeout or other error */ #ifdef DEBUG g_message ("%s: wait returned error: %s", __func__, strerror (ret)); #endif if(ret==ETIMEDOUT) { return(WAIT_TIMEOUT); } else { return(WAIT_FAILED); } } } }
/* The function that does the work of calling the extension's callbacks and also managing the permessagedata structures */ void fd_hook_call(enum fd_hook_type type, struct msg * msg, struct fd_peer * peer, void * other, struct fd_msg_pmdl * pmdl) { struct fd_list * li; ASSERT(type <= HOOK_LAST); int call_default = 0; /* lock the list of hooks for this type */ CHECK_POSIX_DO( pthread_rwlock_rdlock(&HS_array[type].rwlock), ); pthread_cleanup_push( fd_cleanup_rwlock, &HS_array[type].rwlock ); if (FD_IS_LIST_EMPTY(&HS_array[type].sentinel)) { call_default = 1; } else { /* for each registered hook */ for (li = HS_array[type].sentinel.next; li != &HS_array[type].sentinel; li = li->next) { struct fd_hook_hdl * h = (struct fd_hook_hdl *)li->o; struct fd_hook_permsgdata * pmd = NULL; /* do we need to handle pmd ? */ if (h->data_hdl && pmdl) { pmd = get_or_create_pmd(pmdl, h->data_hdl); } /* Now, call this callback */ (*h->fd_hook_cb)(type, msg, &peer->p_hdr, other, pmd, h->regdata); } } pthread_cleanup_pop(0); /* done */ CHECK_POSIX_DO( pthread_rwlock_unlock(&HS_array[type].rwlock), ); if (call_default) { CHECK_POSIX_DO( pthread_mutex_lock(&hook_default_mtx), ); pthread_cleanup_push( fd_cleanup_mutex, &hook_default_mtx ); /* There was no registered handler, default behavior for this hook */ switch (type) { case HOOK_DATA_RECEIVED: { struct fd_cnx_rcvdata *rcv_data = other; LOG_A("RCV: %zd bytes", rcv_data->length); break; } case HOOK_MESSAGE_RECEIVED: { CHECK_MALLOC_DO(fd_msg_dump_summary(&hook_default_buf, &hook_default_len, NULL, msg, NULL, 0, 1), break); LOG_D("RCV from '%s': %s", peer ? peer->p_hdr.info.pi_diamid : "<unknown>", hook_default_buf); break; } case HOOK_MESSAGE_LOCAL: { CHECK_MALLOC_DO(fd_msg_dump_full(&hook_default_buf, &hook_default_len, NULL, msg, NULL, 0, 1), break); LOG_A("Handled to framework for sending: %s", hook_default_buf); break; } case HOOK_MESSAGE_PRE_SEND: { CHECK_MALLOC_DO(fd_msg_dump_summary(&hook_default_buf, &hook_default_len, NULL, msg, NULL, 0, 1), break); LOG_D("GOING TO SEND TO '%s': %s", peer ? peer->p_hdr.info.pi_diamid : "<unknown>", hook_default_buf); break; } case HOOK_MESSAGE_SENT: { CHECK_MALLOC_DO(fd_msg_dump_summary(&hook_default_buf, &hook_default_len, NULL, msg, NULL, 0, 1), break); LOG_D("SENT to '%s': %s", peer ? peer->p_hdr.info.pi_diamid : "<unknown>", hook_default_buf); break; } case HOOK_MESSAGE_FAILOVER: { CHECK_MALLOC_DO(fd_msg_dump_summary(&hook_default_buf, &hook_default_len, NULL, msg, NULL, 0, 1), break); LOG_D("Failing over message sent to '%s': %s", peer ? peer->p_hdr.info.pi_diamid : "<unknown>", hook_default_buf); break; } case HOOK_MESSAGE_PARSING_ERROR: { if (msg) { DiamId_t id = NULL; if (!fd_msg_source_get( msg, &id, NULL )) id = (DiamId_t)"<error getting source>"; if (!id) id = (DiamId_t)"<local>"; CHECK_MALLOC_DO(fd_msg_dump_treeview(&hook_default_buf, &hook_default_len, NULL, msg, NULL, 0, 1), break); LOG_E("Parsing error: '%s' for the following message received from '%s':", (char *)other, (char *)id); LOG_SPLIT(FD_LOG_ERROR, " ", hook_default_buf, NULL); } else { struct fd_cnx_rcvdata *rcv_data = other; CHECK_MALLOC_DO(fd_dump_extend_hexdump(&hook_default_buf, &hook_default_len, NULL, rcv_data->buffer, rcv_data->length, 0, 0), break); LOG_E("Parsing error: cannot parse %zdB buffer from '%s': %s", rcv_data->length, peer ? peer->p_hdr.info.pi_diamid : "<unknown>", hook_default_buf); } break; } case HOOK_MESSAGE_ROUTING_ERROR: { CHECK_MALLOC_DO(fd_msg_dump_treeview(&hook_default_buf, &hook_default_len, NULL, msg, NULL, 0, 1), break); LOG_E("Routing error: '%s' for the following message:", (char *)other); LOG_SPLIT(FD_LOG_ERROR, " ", hook_default_buf, NULL); break; } case HOOK_MESSAGE_ROUTING_FORWARD: { CHECK_MALLOC_DO(fd_msg_dump_summary(&hook_default_buf, &hook_default_len, NULL, msg, NULL, 0, 1), break); LOG_D("FORWARDING: %s", hook_default_buf); break; } case HOOK_MESSAGE_ROUTING_LOCAL: { CHECK_MALLOC_DO(fd_msg_dump_summary(&hook_default_buf, &hook_default_len, NULL, msg, NULL, 0, 1), break); LOG_D("DISPATCHING: %s", hook_default_buf); break; } case HOOK_MESSAGE_DROPPED: { CHECK_MALLOC_DO(fd_msg_dump_treeview(&hook_default_buf, &hook_default_len, NULL, msg, NULL, 0, 1), break); LOG_E("Message discarded ('%s'):", (char *)other); LOG_SPLIT(FD_LOG_ERROR, " ", hook_default_buf, NULL); break; } case HOOK_PEER_CONNECT_FAILED: { if (msg) { CHECK_MALLOC_DO(fd_msg_dump_full(&hook_default_buf, &hook_default_len, NULL, msg, NULL, 0, 1), break); LOG_N("Connection to '%s' failed: '%s'; CER/CEA dump:", peer ? peer->p_hdr.info.pi_diamid : "<unknown>", (char *)other); LOG_SPLIT(FD_LOG_NOTICE, " ", hook_default_buf, NULL); } else { LOG_D("Connection to '%s' failed: %s", peer ? peer->p_hdr.info.pi_diamid : "<unknown>", (char *)other); } break; } case HOOK_PEER_CONNECT_SUCCESS: { DiamId_t id = NULL; if ((!fd_msg_source_get( msg, &id, NULL )) && (id == NULL)) { /* The CEA is locally issued */ fd_msg_answ_getq(msg, &msg); /* We dump the CER in that case */ } CHECK_MALLOC_DO(fd_msg_dump_full(&hook_default_buf, &hook_default_len, NULL, msg, NULL, 0, 1), break); char protobuf[40]; if (peer) { CHECK_FCT_DO(fd_peer_cnx_proto_info(&peer->p_hdr, protobuf, sizeof(protobuf)), break ); } else { protobuf[0] = '-'; protobuf[1] = '\0'; } LOG_N("Connected to '%s' (%s), remote capabilities: ", peer ? peer->p_hdr.info.pi_diamid : "<unknown>", protobuf); LOG_SPLIT(FD_LOG_NOTICE, " ", hook_default_buf, NULL); break; }
/** * WaitForSingleObjectEx: * @handle: an object to wait for * @timeout: the maximum time in milliseconds to wait for * @alertable: if TRUE, the wait can be interrupted by an APC call * * This function returns when either @handle is signalled, or @timeout * ms elapses. If @timeout is zero, the object's state is tested and * the function returns immediately. If @timeout is %INFINITE, the * function waits forever. * * Return value: %WAIT_ABANDONED - @handle is a mutex that was not * released by the owning thread when it exited. Ownership of the * mutex object is granted to the calling thread and the mutex is set * to nonsignalled. %WAIT_OBJECT_0 - The state of @handle is * signalled. %WAIT_TIMEOUT - The @timeout interval elapsed and * @handle's state is still not signalled. %WAIT_FAILED - an error * occurred. %WAIT_IO_COMPLETION - the wait was ended by an APC. */ guint32 WaitForSingleObjectEx(gpointer handle, guint32 timeout, gboolean alertable) { guint32 ret, waited; struct timespec abstime; int thr_ret; gboolean apc_pending = FALSE; gpointer current_thread = GetCurrentThread (); if (_wapi_handle_test_capabilities (handle, WAPI_HANDLE_CAP_WAIT) == FALSE) { #ifdef DEBUG g_message ("%s: handle %p can't be waited for", __func__, handle); #endif return(WAIT_FAILED); } if (_wapi_handle_test_capabilities (handle, WAPI_HANDLE_CAP_SPECIAL_WAIT) == TRUE) { #ifdef DEBUG g_message ("%s: handle %p has special wait", __func__, handle); #endif ret = _wapi_handle_ops_special_wait (handle, timeout); if (alertable && _wapi_thread_apc_pending (current_thread)) { apc_pending = TRUE; ret = WAIT_IO_COMPLETION; } goto check_pending; } #ifdef DEBUG g_message ("%s: locking handle %p", __func__, handle); #endif pthread_cleanup_push ((void(*)(void *))_wapi_handle_unlock_handle, handle); thr_ret = _wapi_handle_lock_handle (handle); g_assert (thr_ret == 0); if (_wapi_handle_test_capabilities (handle, WAPI_HANDLE_CAP_OWN) == TRUE) { if (own_if_owned (handle) == TRUE) { #ifdef DEBUG g_message ("%s: handle %p already owned", __func__, handle); #endif ret = WAIT_OBJECT_0; goto done; } } if (alertable && _wapi_thread_apc_pending (current_thread)) { apc_pending = TRUE; ret = WAIT_IO_COMPLETION; goto done; } if (own_if_signalled (handle) == TRUE) { #ifdef DEBUG g_message ("%s: handle %p already signalled", __func__, handle); #endif ret=WAIT_OBJECT_0; goto done; } /* Have to wait for it */ if (timeout != INFINITE) { _wapi_calc_timeout (&abstime, timeout); } do { /* Check before waiting on the condition, just in case */ if (own_if_signalled (handle)) { #ifdef DEBUG g_message ("%s: handle %p signalled", __func__, handle); #endif ret = WAIT_OBJECT_0; goto done; } if (timeout == INFINITE) { waited = _wapi_handle_wait_signal_handle (handle); } else { waited = _wapi_handle_timedwait_signal_handle (handle, &abstime); } if (alertable) apc_pending = _wapi_thread_apc_pending (current_thread); if(waited==0 && !apc_pending) { /* Condition was signalled, so hopefully * handle is signalled now. (It might not be * if someone else got in before us.) */ if (own_if_signalled (handle)) { #ifdef DEBUG g_message ("%s: handle %p signalled", __func__, handle); #endif ret=WAIT_OBJECT_0; goto done; } /* Better luck next time */ } } while(waited == 0 && !apc_pending); /* Timeout or other error */ #ifdef DEBUG g_message ("%s: wait on handle %p error: %s", __func__, handle, strerror (waited)); #endif ret = WAIT_TIMEOUT; done: #ifdef DEBUG g_message ("%s: unlocking handle %p", __func__, handle); #endif thr_ret = _wapi_handle_unlock_handle (handle); g_assert (thr_ret == 0); pthread_cleanup_pop (0); check_pending: if (apc_pending) { _wapi_thread_dispatch_apc_queue (current_thread); ret = WAIT_IO_COMPLETION; } return(ret); }
int pthread_rwlock_wrlock (pthread_rwlock_t * rwlock) { int result; pthread_rwlock_t rwl; if (rwlock == NULL || *rwlock == NULL) { return EINVAL; } /* * We do a quick check to see if we need to do more work * to initialise a static rwlock. We check * again inside the guarded section of ptw32_rwlock_check_need_init() * to avoid race conditions. */ if (*rwlock == PTHREAD_RWLOCK_INITIALIZER) { result = ptw32_rwlock_check_need_init (rwlock); if (result != 0 && result != EBUSY) { return result; } } rwl = *rwlock; if (rwl->nMagic != PTW32_RWLOCK_MAGIC) { return EINVAL; } if ((result = pthread_mutex_lock (&(rwl->mtxExclusiveAccess))) != 0) { return result; } if ((result = pthread_mutex_lock (&(rwl->mtxSharedAccessCompleted))) != 0) { (void) pthread_mutex_unlock (&(rwl->mtxExclusiveAccess)); return result; } if (rwl->nExclusiveAccessCount == 0) { if (rwl->nCompletedSharedAccessCount > 0) { rwl->nSharedAccessCount -= rwl->nCompletedSharedAccessCount; rwl->nCompletedSharedAccessCount = 0; } if (rwl->nSharedAccessCount > 0) { rwl->nCompletedSharedAccessCount = -rwl->nSharedAccessCount; /* * This routine may be a cancelation point * according to POSIX 1003.1j section 18.1.2. */ #if defined(_MSC_VER) && _MSC_VER < 1400 #pragma inline_depth(0) #endif pthread_cleanup_push (ptw32_rwlock_cancelwrwait, (void *) rwl); do { result = pthread_cond_wait (&(rwl->cndSharedAccessCompleted), &(rwl->mtxSharedAccessCompleted)); } while (result == 0 && rwl->nCompletedSharedAccessCount < 0); pthread_cleanup_pop ((result != 0) ? 1 : 0); #if defined(_MSC_VER) && _MSC_VER < 1400 #pragma inline_depth() #endif if (result == 0) { rwl->nSharedAccessCount = 0; } } } if (result == 0) { rwl->nExclusiveAccessCount++; } return result; }
int HID_API_EXPORT hid_read_timeout(hid_device *dev, unsigned char *data, size_t length, int milliseconds) { int bytes_read = -1; #if 0 int transferred; int res = libusb_interrupt_transfer(dev->device_handle, dev->input_endpoint, data, length, &transferred, 5000); LOG("transferred: %d\n", transferred); return transferred; #endif pthread_mutex_lock(&dev->mutex); pthread_cleanup_push(&cleanup_mutex, dev); /* There's an input report queued up. Return it. */ if (dev->input_reports) { /* Return the first one */ bytes_read = return_data(dev, data, length); goto ret; } if (dev->shutdown_thread) { /* This means the device has been disconnected. An error code of -1 should be returned. */ bytes_read = -1; goto ret; } if (milliseconds == -1) { /* Blocking */ while (!dev->input_reports && !dev->shutdown_thread) { pthread_cond_wait(&dev->condition, &dev->mutex); } if (dev->input_reports) { bytes_read = return_data(dev, data, length); } } else if (milliseconds > 0) { /* Non-blocking, but called with timeout. */ int res; struct timespec ts; clock_gettime(CLOCK_REALTIME, &ts); ts.tv_sec += milliseconds / 1000; ts.tv_nsec += (milliseconds % 1000) * 1000000; if (ts.tv_nsec >= 1000000000L) { ts.tv_sec++; ts.tv_nsec -= 1000000000L; } while (!dev->input_reports && !dev->shutdown_thread) { res = pthread_cond_timedwait(&dev->condition, &dev->mutex, &ts); if (res == 0) { if (dev->input_reports) { bytes_read = return_data(dev, data, length); break; } /* If we're here, there was a spurious wake up or the read thread was shutdown. Run the loop again (ie: don't break). */ } else if (res == ETIMEDOUT) { /* Timed out. */ bytes_read = 0; break; } else { /* Error. */ bytes_read = -1; break; } } } else { /* Purely non-blocking */ bytes_read = 0; } ret: pthread_mutex_unlock(&dev->mutex); pthread_cleanup_pop(0); return bytes_read; }
static void *run(hashpipe_thread_args_t * args) { // Local aliases to shorten access to args fields // Our output buffer happens to be a paper_input_databuf hashpipe_status_t st = args->st; const char * status_key = args->thread_desc->skey; st_p = &st; // allow global (this source file) access to the status buffer // Get inital value for crc32 function uint32_t init_crc = crc32(0,0,0); // Flag that holds off the crc thread int holdoff = 1; // Force ourself into the hold off state hashpipe_status_lock_safe(&st); hputi4(st.buf, "NETHOLD", 1); hashpipe_status_unlock_safe(&st); while(holdoff) { // We're not in any hurry to startup sleep(1); hashpipe_status_lock_safe(&st); // Look for NETHOLD value hgeti4(st.buf, "NETHOLD", &holdoff); if(!holdoff) { // Done holding, so delete the key hdel(st.buf, "NETHOLD"); } hashpipe_status_unlock_safe(&st); } /* Read network params */ struct hashpipe_udp_params up = { .bindhost = "0.0.0.0", .bindport = 8511, .packet_size = 8200 }; hashpipe_status_lock_safe(&st); // Get info from status buffer if present (no change if not present) hgets(st.buf, "BINDHOST", 80, up.bindhost); hgeti4(st.buf, "BINDPORT", &up.bindport); // Store bind host/port info etc in status buffer hputs(st.buf, "BINDHOST", up.bindhost); hputi4(st.buf, "BINDPORT", up.bindport); hputu4(st.buf, "CRCPKOK", 0); hputu4(st.buf, "CRCPKERR", 0); hputs(st.buf, status_key, "running"); hashpipe_status_unlock_safe(&st); struct hashpipe_udp_packet p; /* Give all the threads a chance to start before opening network socket */ sleep(1); /* Set up UDP socket */ int rv = hashpipe_udp_init(&up); if (rv!=HASHPIPE_OK) { hashpipe_error("paper_crc_thread", "Error opening UDP socket."); pthread_exit(NULL); } pthread_cleanup_push((void *)hashpipe_udp_close, &up); /* Main loop */ uint64_t packet_count = 0; uint64_t good_count = 0; uint64_t error_count = 0; uint64_t elapsed_wait_ns = 0; uint64_t elapsed_recv_ns = 0; uint64_t elapsed_proc_ns = 0; float ns_per_wait = 0.0; float ns_per_recv = 0.0; float ns_per_proc = 0.0; struct timespec start, stop; struct timespec recv_start, recv_stop; packet_header_t hdr; while (run_threads()) { /* Read packet */ clock_gettime(CLOCK_MONOTONIC, &recv_start); do { clock_gettime(CLOCK_MONOTONIC, &start); p.packet_size = recv(up.sock, p.data, HASHPIPE_MAX_PACKET_SIZE, 0); clock_gettime(CLOCK_MONOTONIC, &recv_stop); } while (p.packet_size == -1 && (errno == EAGAIN || errno == EWOULDBLOCK) && run_threads()); // Break out of loop if stopping if(!run_threads()) break; // Increment packet count packet_count++; // Check CRC if(crc32(init_crc, (/*const?*/ uint8_t *)p.data, p.packet_size) == 0xffffffff) { // CRC OK! Increment good counter good_count++; } else { // CRC error! Increment error counter error_count++; // Log message get_header(&p, &hdr); hashpipe_warn("paper_crc", "CRC error mcnt %llu ; fid %u ; xid %u", hdr.mcnt, hdr.fid, hdr.xid); } clock_gettime(CLOCK_MONOTONIC, &stop); elapsed_wait_ns += ELAPSED_NS(recv_start, start); elapsed_recv_ns += ELAPSED_NS(start, recv_stop); elapsed_proc_ns += ELAPSED_NS(recv_stop, stop); if(packet_count % 1000 == 0) { // Compute stats get_header(&p, &hdr); ns_per_wait = (float)elapsed_wait_ns / packet_count; ns_per_recv = (float)elapsed_recv_ns / packet_count; ns_per_proc = (float)elapsed_proc_ns / packet_count; // Update status hashpipe_status_lock_busywait_safe(&st); hputu8(st.buf, "CRCMCNT", hdr.mcnt); // Gbps = bits_per_packet / ns_per_packet // (N_BYTES_PER_PACKET excludes header, so +8 for the header) hputr4(st.buf, "CRCGBPS", 8*(N_BYTES_PER_PACKET+8)/(ns_per_recv+ns_per_proc)); hputr4(st.buf, "CRCWATNS", ns_per_wait); hputr4(st.buf, "CRCRECNS", ns_per_recv); hputr4(st.buf, "CRCPRCNS", ns_per_proc); // TODO Provide some way to recognize request to zero out the // CRCERR and CRCOK fields. hputu8(st.buf, "CRCPKOK", good_count); hputu8(st.buf, "CRCPKERR", error_count); hashpipe_status_unlock_safe(&st); // Start new average elapsed_wait_ns = 0; elapsed_recv_ns = 0; elapsed_proc_ns = 0; packet_count = 0; } /* Will exit if thread has been cancelled */ pthread_testcancel(); } /* Have to close all push's */ pthread_cleanup_pop(1); /* Closes push(hashpipe_udp_close) */ return NULL; } static hashpipe_thread_desc_t crc_thread = { name: "paper_crc_thread", skey: "CRCSTAT", init: NULL, run: run, ibuf_desc: {NULL},
static void * direct_thread_main( void *arg ) { void *ret; DirectThread *thread = arg; pthread_setspecific( thread_key, thread ); D_DEBUG_AT( Direct_ThreadInit, "%s( %p )\n", __FUNCTION__, arg ); D_DEBUG_AT( Direct_ThreadInit, " -> starting...\n" ); D_MAGIC_ASSERT( thread, DirectThread ); pthread_cleanup_push( direct_thread_cleanup, thread ); thread->tid = direct_gettid(); D_DEBUG_AT( Direct_ThreadInit, " -> tid %d\n", thread->tid ); __D_direct_thread_call_init_handlers( thread ); /* Have all signals handled by the main thread. */ if (direct_config->thread_block_signals) direct_signals_block_all(); /* Lock the thread mutex. */ D_DEBUG_AT( Direct_ThreadInit, " -> locking...\n" ); direct_mutex_lock( &thread->lock ); /* Indicate that our initialization has completed. */ D_ASSERT( !thread->init ); thread->init = true; D_DEBUG_AT( Direct_ThreadInit, " -> signalling...\n" ); direct_waitqueue_signal( &thread->cond ); /* Unlock the thread mutex. */ D_DEBUG_AT( Direct_ThreadInit, " -> unlocking...\n" ); direct_mutex_unlock( &thread->lock ); if (thread->joining) { D_DEBUG_AT( Direct_Thread, " -> Being joined before entering main routine!\n" ); return NULL; } D_MAGIC_ASSERT( thread, DirectThread ); /* Call main routine. */ D_DEBUG_AT( Direct_ThreadInit, " -> running...\n" ); ret = thread->main( thread, thread->arg ); D_DEBUG_AT( Direct_Thread, " -> Returning %p from '%s' (%s, %d)...\n", ret, thread->name, direct_thread_type_name(thread->type), thread->tid ); D_MAGIC_ASSERT( thread, DirectThread ); pthread_cleanup_pop( 1 ); return ret; }
static void *thread_func_noexit(void *arg) { pthread_cleanup_push(&cleanup2, arg); return NULL; pthread_cleanup_pop(0); return NULL; }
/* * * The main worker-thread for the i2c-dev device * Enters an endless loop that waits for commands and * executes them. * * Currently we only support GA-devices * */ void *thr_sendrec_I2C_DEV(void *v) { char msg[1000]; ga_state_t gatmp; int last_cancel_state, last_cancel_type; bus_thread_t *btd = (bus_thread_t *) malloc(sizeof(bus_thread_t)); if (btd == NULL) pthread_exit((void *) 1); btd->bus = (bus_t) v; btd->fd = -1; pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &last_cancel_state); pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &last_cancel_type); /*register cleanup routine */ pthread_cleanup_push((void *) end_bus_thread, (void *) btd); syslog_bus(btd->bus, DBG_INFO, "i2c-dev bus started (device = %s).", buses[btd->bus].device.file.path); I2CDEV_DATA *data = buses[btd->bus].driverdata; int ga_reset_devices = data->ga_reset_devices; buses[btd->bus].watchdog = 1; /* command processing starts here */ while (true) { /* process POWER changes */ if (buses[btd->bus].power_changed == 1) { /* dummy select, power state is directly read by select_bus() */ select_bus(0, btd->bus); buses[btd->bus].power_changed = 0; infoPower(btd->bus, msg); enqueueInfoMessage(msg); if ((ga_reset_devices == 1) && (buses[btd->bus].power_state == 1)) { reset_ga(btd->bus); } } /* do nothing, if power is off */ if (buses[btd->bus].power_state == 0) { if (usleep(1000) == -1) { syslog_bus(btd->bus, DBG_ERROR, "usleep() failed: %s (errno = %d)\n", strerror(errno), errno); } continue; } buses[btd->bus].watchdog = 4; /* process GA commands */ if (!queue_GA_isempty(btd->bus)) { dequeueNextGA(btd->bus, &gatmp); handle_i2c_set_ga(btd->bus, &gatmp); setGA(btd->bus, gatmp.id, gatmp); select_bus(0, btd->bus); buses[btd->bus].watchdog = 6; } if (usleep(1000) == -1) { syslog_bus(btd->bus, DBG_ERROR, "usleep() failed: %s (errno = %d)\n", strerror(errno), errno); } } /*run the cleanup routine */ pthread_cleanup_pop(1); return NULL; }
void * snd_record(void* voidargs) { struct snd_record_args* args = (snd_record_args*) voidargs; *args->returnObj.state = 0; long loops; int rc; int size; snd_pcm_t *handle; snd_pcm_hw_params_t *params; unsigned int val; int dir; snd_pcm_uframes_t frames; uint16_t *buffer; int* signalNewState = args->signalNewState; bool continuous_capture = args->duration <= 0 ? true : false; /* Open PCM device for recording (capture). */ rc = snd_pcm_open(&handle, (const char*) args->dev_name.c_str(), SND_PCM_STREAM_CAPTURE, 0); pthread_cleanup_push(deallocate_srarg, voidargs); if (rc < 0) { fp_err(FPOL_PCM, "unable to open pcm device: %s", snd_strerror(rc)); fp_debug(FPOL_PCM, "dev_name: %s", (char*) args->dev_name.c_str()); args->returnObj.errorcode = 1; //pthread_cleanup_pop(1); //return NULL; pthread_exit(NULL); } /* Allocate a hardware parameters object. */ snd_pcm_hw_params_alloca(¶ms); /* Fill it in with default values. */ snd_pcm_hw_params_any(handle, params); /* Set the desired hardware parameters. */ /* Interleaved mode */ snd_pcm_hw_params_set_access(handle, params, SND_PCM_ACCESS_RW_INTERLEAVED); /* Signed 16-bit little-endian format */ snd_pcm_hw_params_set_format(handle, params, SND_PCM_FORMAT_S16_LE); /* Two channels (stereo) */ snd_pcm_hw_params_set_channels(handle, params, 2); /* 44100 samples/second sampling rate (CD quality) */ val = args->samplingFrequency; snd_pcm_hw_params_set_rate_near(handle, params, &val, &dir); /* Set period size to 32 frames. */ frames = 1152; //32; // snd_pcm_hw_params_set_period_size_near(handle, params, &frames, &dir); /* Write the parameters to the driver */ rc = snd_pcm_hw_params(handle, params); if (rc < 0) { fprintf(stderr, "unable to set hw parameters: %s\n", snd_strerror(rc)); std::cout << "\n" << getTime() << " snd_record: unable to set hw parameters: " << snd_strerror(rc) << "\n"; pthread_exit(NULL); } /* Use a buffer large enough to hold one period */ snd_pcm_hw_params_get_period_size(params, &frames, &dir); size = frames * 4; /* 2 bytes/sample, 2 channels */ buffer = (uint16_t*) malloc(size); free_pcm_args fpa; fpa.handle = handle; fpa.buffer = buffer; pthread_cleanup_push(&free_pcm, (void*) &fpa); /* We want to loop for 5 seconds */ snd_pcm_hw_params_get_period_time(params, &val, &dir); loops = args->duration * 1000000 / val; *args->returnObj.state = 1; while (*signalNewState >= 0 && (loops > 0 || continuous_capture)) { loops--; rc = snd_pcm_readi(handle, (void**) buffer, frames); if (rc == -EPIPE) { /* EPIPE means overrun */ fprintf(stderr, "overrun occurred\n"); snd_pcm_prepare(handle); } else if (rc < 0) { args->returnObj.error = std::string("error from read: ") + std::string(snd_strerror(rc)); std::cout << "\n" << getTime() << " snd_record(): error from read: " << snd_strerror(rc) << "\n"; break; } else if (rc != (int) frames) { fprintf(stderr, "short read, read %d frames\n", rc); } (*(args->periodbuffer))[*args->periodbufferfloat].initferryperiod(size, 2); memcpy((*args->periodbuffer)[*args->periodbufferfloat].period, buffer, (*args->periodbuffer)[*args->periodbufferfloat].length); (*args->periodbufferfloat)++; (*args->periodbufferfloat) %= *args->periodbufferlength; // if (rc != size) { // fprintf(stderr, "short write: wrote %d bytes\n", rc); // } } snd_pcm_drain(handle); snd_pcm_close(handle); free(buffer); pthread_cleanup_pop(0); *args->returnObj.state = -1; pthread_cleanup_pop(0); return NULL; }
static gpointer _mp_app_inotify_watch_thread(gpointer user_data) { mp_inotify_t *handle = (mp_inotify_t *) user_data; int oldtype = 0; mp_retvm_if(handle == NULL, NULL, "handle is NULL"); DEBUG_TRACE("Create _mp_app_inotify_watch_thread!!! "); pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &oldtype); while (1) { ssize_t len = 0; ssize_t i = 0; char event_buff[MP_EVENT_BUF_LEN] = { 0, }; if (handle->fd < 0) { ERROR_TRACE("fd is not a vaild one"); pthread_exit(NULL); } len = read(handle->fd, event_buff, sizeof(event_buff) - 1); if (len <= 0 || len > sizeof(event_buff) - 1) { } while (i < len) { struct inotify_event *pevent = (struct inotify_event *)&event_buff[i]; mp_inotify_event s_event = MP_INOTI_NONE; if (pevent->len && strncmp(pevent->name, ".", 1) == 0) { s_event = MP_INOTI_NONE; } else if (pevent->mask & IN_ISDIR) //directory { /* if (pevent->mask & IN_DELETE_SELF) s_event = MP_INOTI_DELETE_SELF; if (pevent->mask & IN_MOVE_SELF) s_event = MP_INOTI_MOVE_SELF; if (pevent->mask & IN_CREATE) s_event = MP_INOTI_CREATE; if (pevent->mask & IN_DELETE) s_event = MP_INOTI_DELETE; if (pevent->mask & IN_MOVED_FROM) s_event = MP_INOTI_MOVE_OUT; if (pevent->mask & IN_MOVED_TO) s_event = MP_INOTI_MOVE_IN; */ } else //file { if (pevent->mask & IN_CREATE) { s_event = MP_INOTI_NONE; handle->prev_event = IN_CREATE; } if (pevent->mask & IN_CLOSE_WRITE) { if (handle->prev_event == IN_CREATE) { s_event = MP_INOTI_CREATE; } handle->prev_event = MP_INOTI_NONE; } if (pevent->mask & IN_DELETE) s_event = MP_INOTI_DELETE; if (pevent->mask & IN_MODIFY) { s_event = MP_INOTI_MODIFY; } if (pevent->mask & IN_MOVED_TO) { s_event = MP_INOTI_MOVE_OUT; } } if (s_event != MP_INOTI_NONE) { pthread_cleanup_push(_mp_app_inotify_thread_clean_up, (void *)&mp_noti_lock); pthread_mutex_lock(&mp_noti_lock); if (handle->callback) { handle->callback(s_event, (pevent->len) ? pevent->name : NULL, handle->u_data); } pthread_mutex_unlock(&mp_noti_lock); pthread_cleanup_pop(0); } i += sizeof(struct inotify_event) + pevent->len; if (i >= MP_EVENT_BUF_LEN) break; } } DEBUG_TRACE("end _mp_app_inotify_watch_thread!!! "); return NULL; }
int sem_timedwait (sem_t * sem, const struct timespec *abstime) /* * ------------------------------------------------------ * DOCPUBLIC * This function waits on a semaphore possibly until * 'abstime' time. * * PARAMETERS * sem * pointer to an instance of sem_t * * abstime * pointer to an instance of struct timespec * * DESCRIPTION * This function waits on a semaphore. If the * semaphore value is greater than zero, it decreases * its value by one. If the semaphore value is zero, then * the calling thread (or process) is blocked until it can * successfully decrease the value or until interrupted by * a signal. * * If 'abstime' is a NULL pointer then this function will * block until it can successfully decrease the value or * until interrupted by a signal. * * RESULTS * 0 successfully decreased semaphore, * -1 failed, error in errno * ERRNO * EINVAL 'sem' is not a valid semaphore, * ENOSYS semaphores are not supported, * EINTR the function was interrupted by a signal, * EDEADLK a deadlock condition was detected. * ETIMEDOUT abstime elapsed before success. * * ------------------------------------------------------ */ { int result = 0; sem_t s = *sem; if (sem == NULL) { result = EINVAL; } else { DWORD milliseconds; if (abstime == NULL) { milliseconds = INFINITE; } else { /* * Calculate timeout as milliseconds from current system time. */ milliseconds = ptw32_relmillisecs (abstime); } pthread_testcancel(); if ((result = pthread_mutex_lock (&s->lock)) == 0) { int v = --s->value; (void) pthread_mutex_unlock (&s->lock); if (v < 0) { #ifdef NEED_SEM int timedout; #endif sem_timedwait_cleanup_args_t cleanup_args; cleanup_args.sem = s; cleanup_args.resultPtr = &result; #ifdef _MSC_VER #pragma inline_depth(0) #endif /* Must wait */ pthread_cleanup_push(ptw32_sem_timedwait_cleanup, (void *) &cleanup_args); #ifdef NEED_SEM timedout = #endif result = pthreadCancelableTimedWait (s->sem, milliseconds); pthread_cleanup_pop(result); #ifdef _MSC_VER #pragma inline_depth() #endif #ifdef NEED_SEM if (!timedout && pthread_mutex_lock (&s->lock) == 0) { if (s->leftToUnblock > 0) { --s->leftToUnblock; SetEvent(s->sem); } (void) pthread_mutex_unlock (&s->lock); } #endif /* NEED_SEM */ } } } if (result != 0) { errno = result; return -1; } return 0; } /* sem_timedwait */
/* this function deals with the incoming client request */ void *tcpsocket(void *arg) { int n; int status = 0, verbose = 0; int oldcancelstate, oldcanceltype; #ifdef ENABLE_POLLING struct pollfd fds; #endif /* these are used for communication over the TCP socket */ int client = 0; message_t *request = NULL, *response = NULL; threadlocal_t threadlocal; threadlocal.message = NULL; threadlocal.fd = -1; /* the connection to the client has been made by the server */ client = (int)arg; /* this will be closed at cleanup */ threadlocal.fd = client; pthread_cleanup_push(cleanup_tcpsocket, &threadlocal); /* this is for debugging */ pthread_mutex_lock(&mutexsocketcount); socketcount++; pthread_mutex_unlock(&mutexsocketcount); if (verbose>1) fprintf(stderr, "tcpsocket: client = %d, socketcount = %d, threadcount = %d\n", client, socketcount, threadcount); /* keep processing messages untill the connection is closed */ while (1) { int swap = 0; UINT16_T reqCommand; UINT32_T respBufSize; request = (message_t*)malloc(sizeof(message_t)); DIE_BAD_MALLOC(request); request->def = (messagedef_t*)malloc(sizeof(messagedef_t)); DIE_BAD_MALLOC(request->def); request->buf = NULL; #ifdef ENABLE_POLLING /* wait for data to become available or until the connection is closed */ /* thohar: i think this is not neccessary as we dont need a timeout. */ /* roboos: we need it to detect when the socket is closed by the client */ while (1) { fds.fd = client; fds.events = POLLIN | POLLRDNORM | POLLRDBAND | POLLPRI | POLLOUT | POLLWRNORM | POLLWRBAND | POLLERR | POLLNVAL; fds.revents = 0; if (poll(&fds, 1, 1)==-1) { perror("poll"); goto cleanup; } if (fds.revents & POLLHUP) goto cleanup; /* the connection has been closed */ else if (fds.revents & POLLERR) goto cleanup; /* the connection has been closed */ else if (fds.revents & POLLIN) break; /* data is available, process the message */ else usleep(POLLSLEEP); /* wait for data or closed connection */ } #endif if ((n = bufread(client, request->def, sizeof(messagedef_t))) != sizeof(messagedef_t)) { if (verbose>0) fprintf(stderr, "tcpsocket: packet size = %d, should be %d\n", n, sizeof(messagedef_t)); goto cleanup; } if (request->def->version==VERSION_OE) { swap = 1; ft_swap16(2, &request->def->version); /* version + command */ ft_swap32(1, &request->def->bufsize); reqCommand = request->def->command; } if (request->def->version!=VERSION) { if (verbose>0) fprintf(stderr, "tcpsocket: incorrect request version\n"); goto cleanup; } if (request->def->bufsize>0) { request->buf = malloc(request->def->bufsize); DIE_BAD_MALLOC(request->buf); if ((n = bufread(client, request->buf, request->def->bufsize)) != request->def->bufsize) { if (verbose>0) fprintf(stderr, "tcpsocket: read size = %d, should be %d\n", n, request->def->bufsize); goto cleanup; } } if (swap && request->def->bufsize > 0) ft_swap_buf_to_native(reqCommand, request->def->bufsize, request->buf); if (verbose>1) print_request(request->def); if (verbose>1) print_buf(request->buf, request->def->bufsize); if ((status = dmarequest(request, &response)) != 0) { if (verbose>0) fprintf(stderr, "tcpsocket: an unexpected error occurred\n"); goto cleanup; } DIE_BAD_MALLOC(response); DIE_BAD_MALLOC(response->def); if (verbose>1) print_response(response->def); if (verbose>1) print_buf(request->buf, request->def->bufsize); respBufSize = response->def->bufsize; if (swap) ft_swap_from_native(reqCommand, response); /* we don't need the request anymore */ cleanup_message(&request); request = NULL; /* merge response->def and response->buf if they are small, so we can send it in one go over TCP */ if (respBufSize + sizeof(messagedef_t) <= MERGE_THRESHOLD) { int msize = respBufSize + sizeof(messagedef_t); void *merged = NULL; append(&merged, 0, response->def, sizeof(messagedef_t)); DIE_BAD_MALLOC(merged); append(&merged, sizeof(messagedef_t), response->buf, respBufSize); DIE_BAD_MALLOC(merged); if ((n=bufwrite(client, merged, msize) != msize)) { if (verbose>0) fprintf(stderr, "tcpsocket: write size = %d, should be %d\n", n, msize); FREE(merged); goto cleanup; } FREE(merged); } else { if ((n = bufwrite(client, response->def, sizeof(messagedef_t)))!=sizeof(messagedef_t)) { if (verbose>0) fprintf(stderr, "tcpsocket: write size = %d, should be %d\n", n, sizeof(messagedef_t)); goto cleanup; } if ((n = bufwrite(client, response->buf, respBufSize))!=respBufSize) { if (verbose>0) fprintf(stderr, "tcpsocket: write size = %d, should be %d\n", n, respBufSize); goto cleanup; } } cleanup_message(&response); response = NULL; } /* while (1) */ cleanup: printf(""); /* otherwise the pthread_cleanup_pop won't compile */ if (response!=NULL) cleanup_message(&response); response = NULL; /* SK: prevent double free in following pthread_cleanup_pop */ pthread_cleanup_pop(1); /* this is for debugging */ pthread_mutex_lock(&mutexsocketcount); socketcount--; pthread_mutex_unlock(&mutexsocketcount); /* this is for debugging */ pthread_mutex_lock(&mutexthreadcount); threadcount--; pthread_mutex_unlock(&mutexthreadcount); pthread_exit(NULL); return NULL; }
U_EXPORT int sem_timedwait ( sem_t *sem, const struct timespec *abs_timeout) { int result = 0; /* Code returned by this routine 0 or -1 */ /* "Under no circumstances shall the function fail if the semaphore * can be locked immediately". So we try to get it quickly to see if we * can avoid all the timeout overheads. */ if (sem_trywait(sem) == 0) { /* Yes, got it immediately. */ result = 0; } else { /* No, we've got to do it with a sem_wait() call and a thread to run * the timeout. First, work out the time from now to the specified * timeout, which we will pass to the timeout thread in a way that can * be used to pass to nanosleep(). So we need this in seconds and * nanoseconds. Along the way, we check for an invalid passed time, * and for one that's already expired. */ if ((abs_timeout->tv_nsec < 0) || (abs_timeout->tv_nsec > 1000000000)) { /* Passed time is invalid */ result = -1; errno = EINVAL; } else { struct timeval currentTime; /* Time now */ long secsToWait,nsecsToWait; /* Seconds and nsec to delay */ gettimeofday (¤tTime,NULL); secsToWait = abs_timeout->tv_sec - currentTime.tv_sec; nsecsToWait = (abs_timeout->tv_nsec - (currentTime.tv_usec * 1000)); while (nsecsToWait < 0) { nsecsToWait += 1000000000; secsToWait--; } if ((secsToWait < 0) || ((secsToWait == 0) && (nsecsToWait < 0))) { /* Time has passed. Report an immediate timeout. */ result = -1; errno = ETIMEDOUT; } else { /* We're going to have to do a sem_wait() with a timeout thread. * The thread will wait the specified time, then will issue a * SIGUSR2 signal that will interrupt the sem_wait() call. * We pass the thread the id of the current thread, the delay, * and the address of a flag to set on a timeout, so we can * distinguish an interrupt caused by the timeout thread from * one caused by some other signal. */ volatile short timedOut; /* Flag to set on timeout */ timeoutDetails details; /* All the stuff the thread must know */ struct sigaction oldSignalAction; /* Current signal setting */ pthread_t timeoutThread; /* Id of timeout thread */ cleanupDetails cleaningDetails; /* What the cleanup routine needs */ int oldCancelState; /* Previous cancellation state */ int ignoreCancelState; /* Used in call, but ignored */ int createStatus; /* Status of pthread_create() call */ /* If the current thread is cancelled (and CML does do this) * we don't want to leave our timer thread running - if we've * started the thread we want to make sure we join it in order * to release its resources. So we set a cleanup handler to * do this. We pass it the address of the structure that will * hold all it needs to know. While we set all this up, * we prevent ourselves being cancelled, so all this data is * coherent. */ pthread_setcancelstate (PTHREAD_CANCEL_DISABLE,&oldCancelState); timeoutThread = (pthread_t) 0; cleaningDetails.timedOutShort = &timedOut; cleaningDetails.threadIdAddr = &timeoutThread; cleaningDetails.sigHandlerAddr = &oldSignalAction; pthread_cleanup_push (timeoutThreadCleanup,&cleaningDetails); /* Set up the details for the thread. Clear the timeout flag, * record the current SIGUSR2 action settings so we can restore * them later. */ details.delay.tv_sec = secsToWait; details.delay.tv_nsec = nsecsToWait; details.callingThread = pthread_self(); details.timedOutShort = &timedOut; timedOut = FALSE; sigaction (SIGUSR2,NULL,&oldSignalAction); /* Start up the timeout thread. Once we've done that, we can * restore the previous cancellation state. */ createStatus = pthread_create(&timeoutThread,NULL, timeoutThreadMain, (void*)&details); pthread_setcancelstate (oldCancelState,&ignoreCancelState); if (createStatus < 0) { /* Failed to create thread. errno will already be set properly */ result = -1; } else { /* Thread created OK. This is where we wait for the semaphore. */ if (sem_wait(sem) == 0) { /* Got the semaphore OK. We return zero, and all's well. */ result = 0; } else { /* If we got a -1 error from sem_wait(), it may be because * it was interrupted by a timeout, or failed for some * other reason. We check for the expected timeout * condition, which is an 'interrupted' status and the * timeout flag set by the timeout thread. We report that as * a timeout error. Anything else is some other error and * errno is already set properly. */ result = -1; if (errno == EINTR) { if (timedOut) errno = ETIMEDOUT; } } } /* The cleanup routine - timeoutThreadCleanup() - packages up * any tidying up that is needed, including joining with the * timer thread. This will be called if the current thread is * cancelled, but we need it to happen anyway, so we set the * execute flag true here as we remove it from the list of * cleanup routines to be called. So normally, this line amounts * to calling timeoutThreadCleanup(). */ pthread_cleanup_pop (TRUE); } } } return (result); }
/*main thread routine for this bus*/ void *thr_sendrec_DCCAR(void *v) { int addr, ctr; struct timeval akt_time, cmp_time; ga_state_t gatmp; int last_cancel_state, last_cancel_type; bus_thread_t *btd = (bus_thread_t *) malloc(sizeof(bus_thread_t)); if (btd == NULL) pthread_exit((void *) 1); btd->bus = (bus_t) v; btd->fd = -1; pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &last_cancel_state); pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &last_cancel_type); /*register cleanup routine */ pthread_cleanup_push((void *) end_bus_thread, (void *) btd); syslog_bus(btd->bus, DBG_INFO, "DC-Car bus started (device = %s).", buses[btd->bus].device.file.path); /*enter endless loop to process work tasks */ while (true) { buses[btd->bus].watchdog = 1; /*POWER action arrived */ if (buses[btd->bus].power_changed == 1) handle_power_command(btd->bus); /* loop shortcut to prevent processing of GA, GL (and FB) * without power on; arriving commands will flood the command * queue */ if (buses[btd->bus].power_state == 0) { /* wait 1 ms */ if (usleep(1000) == -1) { syslog_bus(btd->bus, DBG_ERROR, "usleep() failed: %s (errno = %d)\n", strerror(errno), errno); } continue; } /*GL action arrived */ if (!queue_GL_isempty(btd->bus)) handle_gl_command(btd->bus); /*FB action arrived */ /* currently nothing to do here */ buses[btd->bus].watchdog++; /* busy wait and continue loop */ /* wait 1 ms */ if (usleep(1000) == -1) { syslog_bus(btd->bus, DBG_ERROR, "usleep() failed: %s (errno = %d)\n", strerror(errno), errno); } } /*run the cleanup routine */ pthread_cleanup_pop(1); return NULL; }