Пример #1
0
static void *read_thread(void *param)
{
	hid_device *dev = param;
	unsigned char *buf;
	const size_t length = dev->input_ep_max_packet_size;

	/* Set up the transfer object. */
	buf = malloc(length);
	dev->transfer = libusb_alloc_transfer(0);
	libusb_fill_interrupt_transfer(dev->transfer,
		dev->device_handle,
		dev->input_endpoint,
		buf,
		length,
		read_callback,
		dev,
		5000/*timeout*/);

	/* Make the first submission. Further submissions are made
	   from inside read_callback() */
	libusb_submit_transfer(dev->transfer);

	/* Notify the main thread that the read thread is up and running. */
	pthread_barrier_wait(&dev->barrier);

	/* Handle all the events. */
	while (!dev->shutdown_thread) {
		int res;
		res = libusb_handle_events(usb_context);
		if (res < 0) {
			/* There was an error. */
			LOG("read_thread(): libusb reports error # %d\n", res);

			/* Break out of this loop only on fatal error.*/
			if (res != LIBUSB_ERROR_BUSY &&
			    res != LIBUSB_ERROR_TIMEOUT &&
			    res != LIBUSB_ERROR_OVERFLOW &&
			    res != LIBUSB_ERROR_INTERRUPTED) {
				break;
			}
		}
	}

	/* Cancel any transfer that may be pending. This call will fail
	   if no transfers are pending, but that's OK. */
	libusb_cancel_transfer(dev->transfer);

	while (!dev->cancelled)
		libusb_handle_events_completed(usb_context, &dev->cancelled);

	/* Now that the read thread is stopping, Wake any threads which are
	   waiting on data (in hid_read_timeout()). Do this under a mutex to
	   make sure that a thread which is about to go to sleep waiting on
	   the condition acutally will go to sleep before the condition is
	   signaled. */
	pthread_mutex_lock(&dev->mutex);
	pthread_cond_broadcast(&dev->condition);
	pthread_mutex_unlock(&dev->mutex);

	/* The dev->transfer->buffer and dev->transfer objects are cleaned up
	   in hid_close(). They are not cleaned up here because this thread
	   could end either due to a disconnect or due to a user
	   call to hid_close(). In both cases the objects can be safely
	   cleaned up after the call to pthread_join() (in hid_close()), but
	   since hid_close() calls libusb_cancel_transfer(), on these objects,
	   they can not be cleaned up here. */

	return NULL;
}
Пример #2
0
static
VOID
ProcessRunnable(
    PKQUEUE_THREAD pThread,
    PKQUEUE_COMMANDS pCommands,
    PRING pRunnable,
    PRING pTimed,
    PRING pWaiting,
    LONG64 llNow
    )
{
    ULONG ulTicks = MAX_TICKS;
    PLW_TASK pTask = NULL;
    PLW_TASK_GROUP pGroup = NULL;
    PRING pRing = NULL;
    PRING pNext = NULL;
    
    /* We are guaranteed to run each task at least once.  If tasks remain
       on the runnable list by yielding, we will continue to run them
       all in a round robin until our ticks are depleted. */
    while (ulTicks && !RingIsEmpty(pRunnable))
    {
        for (pRing = pRunnable->pNext; pRing != pRunnable; pRing = pNext)
        {
            pNext = pRing->pNext;
            
            pTask = LW_STRUCT_FROM_FIELD(pRing, KQUEUE_TASK, QueueRing);
            
            RunTask(pTask, llNow);

            if (ulTicks)
            {
                ulTicks--;
            }
            
            if (pTask->EventWait != LW_TASK_EVENT_COMPLETE)
            {
                if (pTask->EventWait & LW_TASK_EVENT_YIELD)
                {
                    /* Task is yielding.  Set the YIELD flag and
                       leave it on the runnable list for the next iteration. */
                    pTask->EventArgs |= LW_TASK_EVENT_YIELD;
                }   
                else 
                {
                    /* Task is still waiting on events, update kqueue */
                    UpdateEventWait(pCommands, pTask);

                    if (pTask->EventWait & LW_TASK_EVENT_TIME)
                    {
                        /* If the task is waiting for a timeout, 
                           insert it into the timed queue */
                        RingRemove(&pTask->QueueRing);
                        InsertTimedQueue(pTimed, pTask);
                    }
                    else
                    {
                        /* Otherwise, put it in the generic waiting queue */
                        RingRemove(&pTask->QueueRing);
                        RingEnqueue(pWaiting, &pTask->QueueRing);
                    }
                }
            }
            else
            {
                /* Task is complete */
                RingRemove(&pTask->QueueRing);

                /* Remove any associated events from the kqueue */
                if (pTask->Fd >= 0)
                {
                    (void) LwRtlSetTaskFd(pTask, pTask->Fd, 0);
                }

                /* Unsubscribe task from any UNIX signals */
                if (pTask->pUnixSignal)
                {
                    RegisterTaskUnixSignal(pTask, 0, FALSE);
                }

                LOCK_POOL(pThread->pPool);
                pThread->ulLoad--;
                UNLOCK_POOL(pThread->pPool);
                
                pGroup = pTask->pGroup;

                /* If task was in a task group, remove it and notify anyone waiting
                   on the group */
                if (pGroup)
                {
                    LOCK_GROUP(pGroup);
                    pTask->pGroup = NULL;
                    RingRemove(&pTask->GroupRing);
                    pthread_cond_broadcast(&pGroup->Event);
                    UNLOCK_GROUP(pGroup);
                }
                
                LOCK_THREAD(pThread);
                if (--pTask->ulRefCount)
                {
                    /* The task still has a reference, so mark it as completed
                       and notify anyone waiting on it */
                    pTask->EventSignal = TASK_COMPLETE_MASK;
                    pthread_cond_broadcast(&pThread->Event);
                    UNLOCK_THREAD(pThread);
                }
                else
                {
                    /* We held the last reference to the task, so delete it */
                    RingRemove(&pTask->SignalRing);
                    UNLOCK_THREAD(pThread);
                    TaskDelete(pTask);
                }
            }
        }
    }

    /* Update kevent commands for yielding tasks */
    for (pRing = pRunnable->pNext; pRing != pRunnable; pRing = pRing->pNext)
    {
        pTask = LW_STRUCT_FROM_FIELD(pRing, KQUEUE_TASK, QueueRing);

        if (pTask->EventArgs & LW_TASK_EVENT_YIELD)
        {
            UpdateEventWait(pCommands, pTask);
        }
    }
}
Пример #3
0
//int ThreadPool::AddWork( ThreadPoolWorker *worker, void *arg )
//int ThreadPool::AddWork( ThreadPoolWorker *worker, Data *data )
int ThreadPool::AddWork( void (*workerThreadFunction)(void *), void *arg )
{
    int             rtn;
    ThreadPoolWork *workp;

    if( (rtn = pthread_mutex_lock(&queueLock)) != 0 )
    {
        fprintf( stderr, "pthread_mutex_lock %s", strerror(rtn) );
        return( 1 );
    }

    // No space and this caller doesn't want to wait.
    if( currentQueueSize == maxQueueSize )
    {
        if( doNotBlockWhenFull )
        {
            if( (rtn = pthread_mutex_unlock(&queueLock)) != 0 )
            {
                fprintf( stderr, "pthread_mutex_unlock %s", strerror(rtn) );
                return( 2 );
            }

            // User did not want to wait to add the work.
            return( -1 );
        }
        else
        {
            while( (currentQueueSize == maxQueueSize) && (! (shutdown || queueClosed)) )
            {
                if( (rtn = pthread_cond_wait(&queueNotFull, &queueLock)) != 0 )
                {
                    fprintf( stderr, "pthread_cond_wait %s", strerror(rtn) );
                    return( 3 );
                }
            }
        }
    }

    // the pool is in the process of being destroyed.
    if( shutdown || queueClosed )
    {
        if( (rtn = pthread_mutex_unlock(&queueLock)) != 0 )
        {
            fprintf( stderr, "pthread_mutex_unlock %s", strerror(rtn) );
            return( 4 );
        }

        return( -1 );
    }

    // Allocate work structure.
    if( (workp = (ThreadPoolWork *)malloc(sizeof(ThreadPoolWork))) == NULL )
    {
        fprintf( stderr, "ThreadPoolWork Malloc failed" );
        return( 5 );
    }
    workp->routine = workerThreadFunction;
    workp->arg     = arg;
    workp->next    = NULL;

    if( currentQueueSize == 0 )
    {
        queueTail = queueHead = workp;
    }
    else
    {
        queueTail->next = workp;
        queueTail       = workp;
    }

    currentQueueSize++;

    if( (rtn = pthread_cond_broadcast(&queueNotEmpty)) != 0 )
    {
        fprintf( stderr, "pthread_cond_signal %s", strerror(rtn) );
        return( 6 );
    }
    if( (rtn = pthread_mutex_unlock(&queueLock)) != 0 )
    {
        fprintf( stderr, "pthread_mutex_unlock %s", strerror(rtn) );
        return( 7 );
    }

    return( 0 );
}
Пример #4
0
void startServer(char * addr, recordid hash) {
  struct addrinfo hints;
  struct addrinfo *result, *rp;
  int sfd, s;
  struct sockaddr peer_addr;
  socklen_t peer_addr_len;
  //  ssize_t nread;
  //char buf[BUF_SIZE];

  memset(&hints, 0, sizeof(struct addrinfo));
  hints.ai_family = AF_INET; //AF_UNSPEC;    /* Allow IPv4 or IPv6 */
  hints.ai_socktype = SOCK_STREAM; /* Datagram socket */
  hints.ai_flags = AI_PASSIVE;    /* For wildcard IP address */
  hints.ai_protocol = 0;          /* Any protocol */

    printf("Listening on socket\n");

  s = getaddrinfo("127.0.0.1", addr, &hints, &result);
  if (s != 0) {
    fprintf(stderr, "getaddrinfo: %s\n", gai_strerror(s));
    exit(EXIT_FAILURE);
  }

  /* getaddrinfo() returns a list of address structures.
     Try each address until we successfully bind().
     If socket(2) (or bind(2)) fails, we (close the socket
     and) try the next address. */

  for (rp = result; rp != NULL; rp = rp->ai_next) {
    sfd = socket(rp->ai_family, rp->ai_socktype,
		 rp->ai_protocol);
    if (sfd == -1)
      continue;

    if (bind(sfd, rp->ai_addr, rp->ai_addrlen) == 0)
      break;                  /* Success */

    close(sfd);
  }

  if (rp == NULL) {               /* No address succeeded */
    perror("Could not bind\n");
    exit(EXIT_FAILURE);
  }

  freeaddrinfo(result);           /* No longer needed */

  int err = listen(sfd, MAX_CONN_QUEUE);
  if(err == -1) {
    perror("Couldn't listen()");
    return;
  }

  printf("Spawning servers.\n"); fflush(NULL);

  for(int i = 0; i < THREAD_POOL; i++) {
    thread_arg * arg = malloc(sizeof(thread_arg));
    arg->id = i;
    arg->hash = hash;
    interpreterStates[i] = INTERPRETER_IDLE;
    interpreterConnections[i] = 0;
    pthread_create(&interpreters[i], 0, interpreterThread, arg);
  }

  printf("Ready for connections.\n"); fflush(NULL);

  /* Read datagrams and echo them back to sender */

  for (;;) {
    int fd = accept(sfd, &peer_addr, &peer_addr_len);
    if(fd == -1) {
      perror("Error accepting connection");
    } else {
      FILE * sock = fdopen(fd, "w+");

      pthread_mutex_lock(&interpreter_mut);

      //      int ret = openInterpreter(sock, sock, hash);
      //      fclose(sock);
      //      if(ret) {
      while(nextSocket) {
	pthread_cond_wait(&nextSocket_cond, &interpreter_mut);
      }

      nextSocket = sock;
      pthread_cond_signal(&interpreter_cond);

      pthread_mutex_unlock(&interpreter_mut);
      if(shuttingdown) {
	break;
      }

    }
  }

  pthread_cond_broadcast(&interpreter_cond);
  for(int i = 0; i < THREAD_POOL; i++) {
    pthread_join(interpreters[i],0);
  }
  close(sfd);

}
Пример #5
0
MU_TEST(stress, parallel)
{
    Data data;
    pthread_t threads[NUM_THREADS];
    int i;
    LWMsgContext* context = NULL;
    LWMsgProtocol* protocol = NULL;
    LWMsgPeer* client = NULL;
    LWMsgPeer* server = NULL;
    CounterRequest request;
    CounterReply* reply;
    LWMsgCall* call;
    LWMsgParams in = LWMSG_PARAMS_INITIALIZER;
    LWMsgParams out = LWMSG_PARAMS_INITIALIZER;
    LWMsgTime timeout = {1, 0};

    MU_TRY(lwmsg_context_new(NULL, &context));
    lwmsg_context_set_log_function(context, lwmsg_test_log_function, NULL);

    MU_TRY(lwmsg_protocol_new(context, &protocol));
    MU_TRY(lwmsg_protocol_add_protocol_spec(protocol, counterprotocol_spec));

    MU_TRY(lwmsg_peer_new(context, protocol, &server));
    MU_TRY(lwmsg_peer_add_dispatch_spec(server, counter_dispatch));
    MU_TRY(lwmsg_peer_add_listen_endpoint(server, LWMSG_CONNECTION_MODE_LOCAL, TEST_ENDPOINT, 0600));
    MU_TRY(lwmsg_peer_set_max_listen_clients(server, MAX_CLIENTS));
    MU_TRY(lwmsg_peer_set_timeout(server, LWMSG_TIMEOUT_IDLE, &timeout));
    MU_TRY(lwmsg_peer_start_listen(server));

    MU_TRY(lwmsg_peer_new(context, protocol, &client));
    MU_TRY(lwmsg_peer_add_connect_endpoint(client, LWMSG_CONNECTION_MODE_LOCAL, TEST_ENDPOINT));

    request.counter = 0;

    MU_TRY(lwmsg_peer_acquire_call(client, &call));
    in.tag = COUNTER_OPEN;
    in.data = &request;

    MU_TRY(lwmsg_call_dispatch(call, &in, &out, NULL, NULL));
    
    MU_ASSERT_EQUAL(MU_TYPE_INTEGER, out.tag, COUNTER_OPEN_SUCCESS);
    lwmsg_call_release(call);

    data.client = client;
    data.handle = out.data;
    data.iters = NUM_ITERS;
    data.go = 0;
    
    pthread_mutex_init(&data.lock, NULL);
    pthread_cond_init(&data.event, NULL);

    pthread_mutex_lock(&data.lock);
    for (i = 0; i < NUM_THREADS; i++)
    {
        pthread_create(&threads[i], NULL, add_thread, &data);
    }
    data.go = 1;
    pthread_cond_broadcast(&data.event);
    pthread_mutex_unlock(&data.lock);

    for (i = 0; i < NUM_THREADS; i++)
    {
        pthread_join(threads[i], NULL);
    }

    MU_TRY(lwmsg_peer_acquire_call(client, &call));
    in.tag = COUNTER_READ;
    in.data = data.handle;

    MU_TRY(lwmsg_call_dispatch(call, &in, &out, NULL, NULL));
    
    MU_ASSERT_EQUAL(MU_TYPE_INTEGER, out.tag, COUNTER_READ_SUCCESS);
    reply = out.data;

    MU_ASSERT_EQUAL(MU_TYPE_INTEGER, reply->counter, NUM_THREADS * NUM_ITERS);

    lwmsg_call_destroy_params(call, &out);
    lwmsg_call_release(call);
    
    MU_TRY(lwmsg_peer_acquire_call(client, &call));
    in.tag = COUNTER_CLOSE;
    in.data = data.handle;

    MU_TRY(lwmsg_call_dispatch(call, &in, &out, NULL, NULL));
    
    MU_ASSERT_EQUAL(MU_TYPE_INTEGER, out.tag, COUNTER_CLOSE_SUCCESS);

    lwmsg_call_destroy_params(call, &out);
    lwmsg_call_release(call);

    MU_TRY(lwmsg_peer_disconnect(client));
    lwmsg_peer_delete(client);

    MU_TRY(lwmsg_peer_stop_listen(server));
    lwmsg_peer_delete(server);

    pthread_mutex_destroy(&data.lock);
    pthread_cond_destroy(&data.event);
}
Пример #6
0
static void
get_keys (void)
{
  int search;
  int c, quit = 1, scrll, offset, ok_mouse;
  int *scroll_ptr, *offset_ptr;
  int exp_size = DASH_EXPANDED - DASH_NON_DATA;
  MEVENT event;

  char buf[LINE_BUFFER];
  FILE *fp = NULL;
  unsigned long long size1 = 0, size2 = 0;

  if (!logger->piping)
    size1 = file_size (conf.ifile);
  while (quit) {
    c = wgetch (stdscr);
    switch (c) {
    case 'q':  /* quit */
      if (!scrolling.expanded) {
        quit = 0;
        break;
      }
      collapse_current_module ();
      break;
    case KEY_F (1):
    case '?':
    case 'h':
      load_help_popup (main_win);
      render_screens ();
      break;
    case 49:   /* 1 */
      /* reset expanded module */
      set_module_to (&scrolling, VISITORS);
      break;
    case 50:   /* 2 */
      /* reset expanded module */
      set_module_to (&scrolling, REQUESTS);
      break;
    case 51:   /* 3 */
      /* reset expanded module */
      set_module_to (&scrolling, REQUESTS_STATIC);
      break;
    case 52:   /* 4 */
      /* reset expanded module */
      set_module_to (&scrolling, NOT_FOUND);
      break;
    case 53:   /* 5 */
      /* reset expanded module */
      set_module_to (&scrolling, HOSTS);
      break;
    case 54:   /* 6 */
      /* reset expanded module */
      set_module_to (&scrolling, OS);
      break;
    case 55:   /* 7 */
      /* reset expanded module */
      set_module_to (&scrolling, BROWSERS);
      break;
    case 56:   /* 8 */
      /* reset expanded module */
      set_module_to (&scrolling, REFERRERS);
      break;
    case 57:   /* 9 */
      /* reset expanded module */
      set_module_to (&scrolling, REFERRING_SITES);
      break;
    case 48:   /* 0 */
      /* reset expanded module */
      set_module_to (&scrolling, KEYPHRASES);
      break;
    case 33:   /* Shift+1 */
      /* reset expanded module */
#ifdef HAVE_LIBGEOIP
      set_module_to (&scrolling, GEO_LOCATION);
#else
      set_module_to (&scrolling, STATUS_CODES);
#endif
      break;
#ifdef HAVE_LIBGEOIP
    case 64:   /* Shift+2 */
      /* reset expanded module */
      set_module_to (&scrolling, STATUS_CODES);
      break;
#endif
    case 9:    /* TAB */
      /* reset expanded module */
      collapse_current_module ();
      scrolling.current++;
      if (scrolling.current == TOTAL_MODULES)
        scrolling.current = 0;
      render_screens ();
      break;
    case 353:  /* Shift TAB */
      /* reset expanded module */
      collapse_current_module ();
      if (scrolling.current == 0)
        scrolling.current = TOTAL_MODULES - 1;
      else
        scrolling.current--;
      render_screens ();
      break;
    case 'g':  /* g = top */
      if (!scrolling.expanded)
        scrolling.dash = 0;
      else {
        scrolling.module[scrolling.current].scroll = 0;
        scrolling.module[scrolling.current].offset = 0;
      }
      display_content (main_win, logger, dash, &scrolling);
      break;
    case 'G':  /* G = down */
      if (!scrolling.expanded)
        scrolling.dash = dash->total_alloc - real_size_y;
      else {
        scrll = offset = 0;
        scrll = dash->module[scrolling.current].idx_data - 1;
        if (scrll >= exp_size && scrll >= offset + exp_size)
          offset = scrll < exp_size - 1 ? 0 : scrll - exp_size + 1;
        scrolling.module[scrolling.current].scroll = scrll;
        scrolling.module[scrolling.current].offset = offset;
      }
      display_content (main_win, logger, dash, &scrolling);
      break;
      /* expand dashboard module */
    case KEY_RIGHT:
    case 0x0a:
    case 0x0d:
    case 32:   /* ENTER */
    case 79:   /* o */
    case 111:  /* O */
    case KEY_ENTER:
      if (scrolling.expanded && scrolling.current == HOSTS) {
        /* make sure we have a valid IP */
        int sel = scrolling.module[scrolling.current].scroll;
        if (!invalid_ipaddr (dash->module[HOSTS].data[sel].data))
          load_agent_list (main_win, dash->module[HOSTS].data[sel].data);
        break;
      }
      if (scrolling.expanded)
        break;
      reset_scroll_offsets (&scrolling);
      scrolling.expanded = 1;

      free_holder_by_module (&holder, scrolling.current);
      free_dashboard (dash);
      allocate_holder_by_module (scrolling.current);
      allocate_data ();

      display_content (main_win, logger, dash, &scrolling);
      break;
    case KEY_DOWN:     /* scroll main dashboard */
      if ((scrolling.dash + real_size_y) < (unsigned) dash->total_alloc) {
        scrolling.dash++;
        display_content (main_win, logger, dash, &scrolling);
      }
      break;
    case KEY_MOUSE:    /* handles mouse events */
      ok_mouse = getmouse (&event);
      if (conf.mouse_support && ok_mouse == OK) {
        if (event.bstate & BUTTON1_CLICKED) {
          /* ignore header/footer clicks */
          if (event.y < MAX_HEIGHT_HEADER || event.y == LINES - 1)
            break;

          if (set_module_from_mouse_event (&scrolling, dash, event.y))
            break;
          reset_scroll_offsets (&scrolling);
          scrolling.expanded = 1;

          free_holder_by_module (&holder, scrolling.current);
          free_dashboard (dash);
          allocate_holder_by_module (scrolling.current);
          allocate_data ();

          render_screens ();
        }
      }
      break;
    case 106:  /* j - DOWN expanded module */
      scroll_ptr = &scrolling.module[scrolling.current].scroll;
      offset_ptr = &scrolling.module[scrolling.current].offset;

      if (!scrolling.expanded)
        break;
      if (*scroll_ptr >= dash->module[scrolling.current].idx_data - 1)
        break;
      ++(*scroll_ptr);
      if (*scroll_ptr >= exp_size && *scroll_ptr >= *offset_ptr + exp_size)
        ++(*offset_ptr);
      display_content (main_win, logger, dash, &scrolling);
      break;
      /* scroll up main_win */
    case KEY_UP:
      if (scrolling.dash > 0) {
        scrolling.dash--;
        display_content (main_win, logger, dash, &scrolling);
      }
      break;
    case 2:    /* ^ b - page up */
    case 339:  /* ^ PG UP */
      scroll_ptr = &scrolling.module[scrolling.current].scroll;
      offset_ptr = &scrolling.module[scrolling.current].offset;

      if (!scrolling.expanded)
        break;
      /* decrease scroll and offset by exp_size */
      *scroll_ptr -= exp_size;
      if (*scroll_ptr < 0)
        *scroll_ptr = 0;

      if (*scroll_ptr < *offset_ptr)
        *offset_ptr -= exp_size;
      if (*offset_ptr <= 0)
        *offset_ptr = 0;
      display_content (main_win, logger, dash, &scrolling);
      break;
    case 6:    /* ^ f - page down */
    case 338:  /* ^ PG DOWN */
      scroll_ptr = &scrolling.module[scrolling.current].scroll;
      offset_ptr = &scrolling.module[scrolling.current].offset;

      if (!scrolling.expanded)
        break;

      *scroll_ptr += exp_size;
      if (*scroll_ptr >= dash->module[scrolling.current].idx_data - 1)
        *scroll_ptr = dash->module[scrolling.current].idx_data - 1;
      if (*scroll_ptr >= exp_size && *scroll_ptr >= *offset_ptr + exp_size)
        *offset_ptr += exp_size;
      if (*offset_ptr + exp_size >=
          dash->module[scrolling.current].idx_data - 1)
        *offset_ptr = dash->module[scrolling.current].idx_data - exp_size;
      if (*scroll_ptr < exp_size - 1)
        *offset_ptr = 0;

      display_content (main_win, logger, dash, &scrolling);
      break;
    case 107:  /* k - UP expanded module */
      scroll_ptr = &scrolling.module[scrolling.current].scroll;
      offset_ptr = &scrolling.module[scrolling.current].offset;

      if (!scrolling.expanded)
        break;
      if (*scroll_ptr <= 0)
        break;
      --(*scroll_ptr);
      if (*scroll_ptr < *offset_ptr)
        --(*offset_ptr);
      display_content (main_win, logger, dash, &scrolling);
      break;
    case 'n':
      pthread_mutex_lock (&gdns_thread.mutex);
      search = perform_next_find (holder, &scrolling);
      pthread_mutex_unlock (&gdns_thread.mutex);
      if (search == 0) {
        free_dashboard (dash);
        allocate_data ();
        render_screens ();
      }
      break;
    case '/':
      if (render_find_dialog (main_win, &scrolling))
        break;
      pthread_mutex_lock (&gdns_thread.mutex);
      search = perform_next_find (holder, &scrolling);
      pthread_mutex_unlock (&gdns_thread.mutex);
      if (search == 0) {
        free_dashboard (dash);
        allocate_data ();
        render_screens ();
      }
      break;
    case 99:   /* c */
      if (conf.no_color)
        break;
      load_schemes_win (main_win);
      free_dashboard (dash);
      allocate_data ();
      render_screens ();
      break;
    case 115:  /* s */
      load_sort_win (main_win, scrolling.current,
                     &module_sort[scrolling.current]);
      pthread_mutex_lock (&gdns_thread.mutex);
      free_holder (&holder);
      pthread_cond_broadcast (&gdns_thread.not_empty);
      pthread_mutex_unlock (&gdns_thread.mutex);
      free_dashboard (dash);
      allocate_holder ();
      allocate_data ();
      render_screens ();
      break;
    case 269:
    case KEY_RESIZE:
      endwin ();
      refresh ();
      werase (header_win);
      werase (main_win);
      werase (stdscr);
      term_size (main_win);
      refresh ();
      render_screens ();
      break;
    default:
      if (logger->piping)
        break;
      size2 = file_size (conf.ifile);

      /* file has changed */
      if (size2 != size1) {
        if (!(fp = fopen (conf.ifile, "r")))
          FATAL ("Unable to read log file %s.", strerror (errno));
        if (!fseeko (fp, size1, SEEK_SET))
          while (fgets (buf, LINE_BUFFER, fp) != NULL)
            parse_log (&logger, buf, -1);
        fclose (fp);

        size1 = size2;
        pthread_mutex_lock (&gdns_thread.mutex);
        free_holder (&holder);
        pthread_cond_broadcast (&gdns_thread.not_empty);
        pthread_mutex_unlock (&gdns_thread.mutex);

        free_dashboard (dash);
        allocate_holder ();
        allocate_data ();

        term_size (main_win);
        render_screens ();
        usleep (200000);        /* 0.2 seconds */
      }
      break;
    }
  }
}
Пример #7
0
void * jpeg_highlighter_algorithm(JOB_ARG *job)
{
	GimpRunMode mode = GIMP_RUN_NONINTERACTIVE;
	int num_return_vals;
	gint32 layer, temp_layer;
	char temp_file_name[256];
	int ii;

	printf("inside %s thread %d\n", jpeg_plugin.name, job->thread);

// i really only want to run this once and the plugin doesnt know how many threads will get kicked off because its dynamic
	if(job->thread == 0)
	{

		sleep(1);


		sprintf(temp_file_name,"%stemp.jpg",job->file_name);


//		mkstemp(file_name);
		printf("using filename %s\n", temp_file_name);


		printf("saving jpeg at %f compression\n", jpeg_compress);
		gimp_progress_set_text("waiting for jpeg save\n");

		gimp_run_procedure("file-jpeg-save",&num_return_vals, GIMP_PDB_INT32, mode, GIMP_PDB_IMAGE, job->image_id , GIMP_PDB_DRAWABLE, job->drawable->drawable_id, GIMP_PDB_STRING, temp_file_name, GIMP_PDB_STRING, "temp", GIMP_PDB_FLOAT, jpeg_compress, GIMP_PDB_FLOAT, 0.0, GIMP_PDB_INT32, 0, GIMP_PDB_INT32, 0, GIMP_PDB_STRING,"created with Koi", GIMP_PDB_INT32, 0, GIMP_PDB_INT32, 1, GIMP_PDB_INT32, 0, GIMP_PDB_INT32, 1, GIMP_PDB_END);
//		for(ii = 0; ii < SLEEP_TIME; ii++)
//		{
//			job->progress = ((float)ii/SLEEP_TIME) * 4;
			sleep(1);
//		}



		printf("saved jpeg\n");
//		sleep(1);
		// reload our saved image and suck a layer off of it to subtract against or original image
		temp_layer = gimp_file_load_layer(mode, job->image_id, temp_file_name);

		printf("loaded new layer %d in image %d\n", temp_layer, job->image_id);

		//gimp_layer_add_alpha(temp_layer);

		gimp_layer_set_mode(temp_layer, 8);

		printf("set layer mode %d\n", temp_layer);

		/* Add the new layer to this image as the top layer */
		if (gimp_image_add_layer(job->image_id, temp_layer, -1) != TRUE)
		{
			printf("failed to create layer\n");
			return;
		}


		printf("set layer as top\n");

		layer = gimp_image_get_active_layer(job->image_id);
		if (layer == -1)
		{
			printf("failed to get active layer\n");
			return;
		}

		gimp_image_merge_down(job->image_id, layer, 2);

		printf("merged layers\n");
		job->drawable->drawable_id = gimp_image_get_active_drawable(job->image_id);
//		printf("get active drawable\n");
//		gimp_brightness_contrast(job->drawable->drawable_id, 126, 125);
//		printf("adjust contrast\n");
//
//		printf("Jpeg threshold: %d\n",jpeg_threshold);
//
//		//I should have this subtract against an edge detection layer and then threshold it
//
//		gimp_threshold(job->drawable->drawable_id, jpeg_threshold,255 );
//		printf("threshold\n");

//			if(! gimp_drawable_has_alpha (job->drawable->drawable_id))
//			{
//				 /* some filtermacros do not work with layer that do not have an alpha channel
//				 * and cause gimp to fail on attempt to call gimp_pixel_rgn_init
//				  * with both dirty and shadow flag set to TRUE
//				  * in this situation GIMP displays the error message
//				  *    "expected tile ack and received: 5"
//				  *    and causes the called plug-in to exit immediate without success
//				  * Therfore always add an alpha channel before calling a filtermacro.
//				  */
//				  gimp_layer_add_alpha(layer);
//				  printf("adding alpha channel\n");
//		   }

		remove(temp_file_name);

		sleep(1);

//		job->progress = 4;

		pthread_cond_broadcast(&jpeg_cond);
		pthread_mutex_lock(&jpeg_mutex);
		printf("got lock\n");
		jpeg_wait = 0;
		pthread_mutex_unlock(&jpeg_mutex);

		printf("drawable ID after jpeg %d\n",gimp_image_get_active_drawable(job->image_id));

	}
	else
	{
		printf("thread %d waiting\n", job->thread);
		pthread_mutex_lock(&jpeg_mutex);
		while (jpeg_wait)
		{
			pthread_cond_wait(&jpeg_cond, &jpeg_mutex);
		}
		pthread_mutex_unlock(&jpeg_mutex);
	}


	job->progress = 1;

	return NULL;
}
Пример #8
0
 void ConditionSys::broadcast() { pthread_cond_broadcast((pthread_cond_t*)cond); }
Пример #9
0
static void *
event_dispatch_epoll_worker (void *data)
{
        struct epoll_event  event;
        int                 ret = -1;
        struct event_thread_data *ev_data = data;
	struct event_pool  *event_pool;
        int                 myindex = -1;
        int                 timetodie = 0;

        GF_VALIDATE_OR_GOTO ("event", ev_data, out);

        event_pool = ev_data->event_pool;
        myindex = ev_data->event_index;

        GF_VALIDATE_OR_GOTO ("event", event_pool, out);

        gf_msg ("epoll", GF_LOG_INFO, 0, LG_MSG_STARTED_EPOLL_THREAD, "Started"
                " thread with index %d", myindex);

        pthread_mutex_lock (&event_pool->mutex);
        {
                event_pool->activethreadcount++;
        }
        pthread_mutex_unlock (&event_pool->mutex);

	for (;;) {
                if (event_pool->eventthreadcount < myindex) {
                        /* ...time to die, thread count was decreased below
                         * this threads index */
                        /* Start with extra safety at this point, reducing
                         * lock conention in normal case when threads are not
                         * reconfigured always */
                        pthread_mutex_lock (&event_pool->mutex);
                        {
                                if (event_pool->eventthreadcount <
                                    myindex) {
                                        /* if found true in critical section,
                                         * die */
                                        event_pool->pollers[myindex - 1] = 0;
                                        event_pool->activethreadcount--;
                                        timetodie = 1;
                                        pthread_cond_broadcast (&event_pool->cond);
                                }
                        }
                        pthread_mutex_unlock (&event_pool->mutex);
                        if (timetodie) {
                                gf_msg ("epoll", GF_LOG_INFO, 0,
                                        LG_MSG_EXITED_EPOLL_THREAD, "Exited "
                                        "thread with index %d", myindex);
                                goto out;
                        }
                }

                ret = epoll_wait (event_pool->fd, &event, 1, -1);

                if (ret == 0)
                        /* timeout */
                        continue;

                if (ret == -1 && errno == EINTR)
                        /* sys call */
                        continue;

		ret = event_dispatch_epoll_handler (event_pool, &event);
        }
out:
        if (ev_data)
                GF_FREE (ev_data);
        return NULL;
}
Пример #10
0
 void broadcast() {
   assert(pthread_cond_broadcast(&native_) == 0);
 }
Пример #11
0
/* Post to all threads */
static void bsem_post_all(bsem *bsem_p) {
	pthread_mutex_lock(&bsem_p->mutex);
	bsem_p->v = 1;
	pthread_cond_broadcast(&bsem_p->cond);
	pthread_mutex_unlock(&bsem_p->mutex);
}
Пример #12
0
      /* Recursive helper function.
      * Returns a pointer to the node if found.
      * Stores an optional pointer to the
      * parent, or what should be the parent if not found.
      *
      * If it returns a node, there will be a lock on that node
      */
      struct trie_node *
      _delete (struct trie_node *node, const char *string,
        size_t strlen) {
          int keylen, cmp;
          int rc;
          // First things first, check if we are NULL

          if (node == NULL) return NULL;


          // lock the node here,
          rc = pthread_mutex_lock(&(node->lock));
          assert(rc == 0);


          assert(node->strlen < 64);

          // See if this key is a substring of the string passed in
          cmp = compare_keys (node->key, node->strlen, string, strlen, &keylen);
          if (cmp == 0) {
            // Yes, either quit, or recur on the children

            // If this key is longer than our search string, the key isn't here
            if (node->strlen > keylen)
            {
              // unlock node
              rc = pthread_mutex_unlock(&(node->lock));
              assert(rc == 0);

              return NULL;
            }
            else if (strlen > keylen)
            {
              // found wont be unlocked by this method
              struct trie_node *found =  _delete(node->children, string, strlen - keylen);

              if (found)
              {
                if(found->waiting > 0 && allow_squatting)
                {
                  pthread_cond_broadcast(&(found->c));
                  rc = pthread_mutex_unlock(&(found->lock));
                  assert(rc == 0);

                }
                else
                {
                  /* If the node doesn't have children, delete it.
                  * Otherwise, keep it around to find the kids */
                  if (found->children == NULL && found->ip4_address == 0)
                  {
                    // shouldnt need to lock found->next because as long as parent is locked,
                    // nothing bad should be able to happen to it (check what insert does)
                    assert(node->children == found);
                    node->children = found->next;
                    // unlock found
                    rc = pthread_mutex_unlock(&(found->lock));
                    assert(rc == 0);


                    free(found);

                  }
                  else
                  {
                    rc = pthread_mutex_unlock(&(found->lock));
                    assert(rc == 0);

                  }
                }

                // unlock found

                // TODO get root lock
                /* Delete the root node if we empty the tree */
                if (node == root && node->children == NULL && node->ip4_address == 0)
                {
                  if(node->waiting > 0 && allow_squatting)
                  {
                    pthread_cond_broadcast(&(node->c));
                    rc = pthread_mutex_unlock(&(node->lock));
                    assert(rc == 0);

                  }
                  else
                  {
                    root = node->next;

                    rc = pthread_mutex_unlock(&(node->lock));
                    assert(rc == 0);


                    free(node);
                  }
                }

                // dont unlock node, we are still doing operations on it
                return node; /* Recursively delete needless interior nodes */
              }
              else
              {
                rc = pthread_mutex_unlock(&(node->lock));
                assert(rc == 0);

                return NULL;
              }
            }
            else
            {
              assert (strlen == keylen);

              /* We found it! Clear the ip4 address and return. */
              if (node->ip4_address)
              {
                node->ip4_address = 0;

                //TODO lock root
                /* Delete the root node if we empty the tree */
                if (node == root && node->children == NULL && node->ip4_address == 0)
                {

                  root = node->next;


                  rc = pthread_mutex_unlock(&(node->lock));
                  assert(rc == 0);


                  // dont free if waiting > 0

                  free(node);
                  return (struct trie_node *) 0x100100; /* XXX: Don't use this pointer for anything except
                  * comparison with NULL, since the memory is freed.
                  * Return a "poison" pointer that will probably
                  * segfault if used.
                  */
                }

                // dont return node, still doing stuff
                return node;
              }
              else
              {
                /* Just an interior node with no value */
                rc = pthread_mutex_unlock(&(node->lock));
                assert(rc == 0);

                return NULL;
              }
            }
          }
          else if (cmp < 0) {
            // No, look right (the node's key is "less" than  the search key)

            // found should be locked
            struct trie_node *found = _delete(node->next, string, strlen);

            if (found)
            {
              if(found->waiting > 0 && allow_squatting)
              {
                pthread_cond_broadcast(&(found->c));
                rc = pthread_mutex_unlock(&(found->lock));
                assert(rc == 0);

              }
              else
              {
                /* If the node doesn't have children, delete it.
                * Otherwise, keep it around to find the kids */
                if (found->children == NULL && found->ip4_address == 0)
                {
                  assert(node->next == found);
                  node->next = found->next;
                  rc = pthread_mutex_unlock(&(found->lock));
                  assert(rc == 0);


                  free(found);
                }
                else
                {
                  rc = pthread_mutex_unlock(&(found->lock));
                  assert(rc == 0);

                }
              }

              return node; /* Recursively delete needless interior nodes */
            }

            rc = pthread_mutex_unlock(&(node->lock));
            assert(rc == 0);

            return NULL;
          }
          else
          {
            // Quit early
            rc = pthread_mutex_unlock(&(node->lock));
            assert(rc == 0);

            return NULL;
          }

        }
Пример #13
0
void silc_cond_broadcast(SilcCond cond)
{
#ifdef SILC_THREADS
  pthread_cond_broadcast(&cond->cond);
#endif /* SILC_THREADS*/
}
Пример #14
0
int main(int argc, char *argv[]) {

    /* read command line arguments */
    int c;
    char *err = NULL;
    char *p, *q;

    int auth_mode = AUTH_OFF;
    char *auth_username = NULL;
    char *auth_password = NULL;
    char *auth_realm = NULL;

    opterr = 0;
    while ((c = getopt(argc, argv, "a:c:dhlm:p:qs:t:")) != -1) {
        switch (c) {
            case 'a': /* authentication */
                if (!strcmp(optarg, "basic")) {
                    auth_mode = AUTH_BASIC;
                }
                break;

            case 'c': /* credentials */
                p = q = optarg;
                while (*q && *q != ':') {
                    q++;
                }
                auth_username = strndup(p, q - p);

                if (!*q) {
                    ERROR("invalid credentials");
                    return -1;
                }
                p = q = q + 1;
                while (*q && *q != ':') {
                    q++;
                }
                auth_password = strndup(p, q - p);

                if (!*q) {
                    ERROR("invalid credentials");
                    return -1;
                }
                p = q = q + 1;
                while (*q && *q != ':') {
                    q++;
                }
                auth_realm = strndup(p, q - p);

                break;

            case 'd': /* debug */
                log_level = 2;
                break;

            case 'h': /* help */
                print_help();
                return 0;

            case 'l': /* listen on localhost */
                listen_localhost = 1;
                break;

            case 'm': /* max clients */
                max_clients = strtol(optarg, &err, 10);
                if (*err != 0) {
                    ERROR("invalid clients number \"%s\"", optarg);
                    return -1;
                }
                break;

            case 'p': /* tcp port */
                tcp_port = strtol(optarg, &err, 10);
                if (*err != 0) {
                    ERROR("invalid port \"%s\"", optarg);
                    return -1;
                }
                break;

            case 'q': /* quiet */
                log_level = 0;
                break;

            case 's': /* input separator */
                input_separator = strdup(optarg);
                break;

            case 't': /* client timeout */
                client_timeout = strtol(optarg, &err, 10);
                if (*err != 0) {
                    ERROR("invalid client timeout \"%s\"", optarg);
                    return -1;
                }
                break;

            case '?':
                ERROR("unknown or incomplete option \"-%c\"", optopt);
                return -1;

            default:
                print_help();
                return -1;
        }
    }

    if (auth_mode) {
        if (!auth_username || !auth_password || !auth_realm) {
            ERROR("credentials are required when using authentication");
            return -1;
        }

        set_auth(auth_mode, auth_username, auth_password, auth_realm);
    }

    if (!tcp_port) {
        tcp_port = DEF_TCP_PORT;
    }

    INFO("streamEye %s", STREAM_EYE_VERSION);
    INFO("hello!");

    if (input_separator && strlen(input_separator) < 4) {
        INFO("the input separator supplied is very likely to appear in the actual frame data (consider a longer one)");
    }

    /* signals */
    DEBUG("installing signal handlers");
    struct sigaction act;
    act.sa_handler = bye_handler;
    act.sa_flags = 0;
    sigemptyset(&act.sa_mask);

    if (sigaction(SIGINT, &act, NULL) < 0) {
        ERRNO("sigaction() failed");
        return -1;
    }
    if (sigaction(SIGTERM, &act, NULL) < 0) {
        ERRNO("sigaction() failed");
        return -1;
    }
    if (signal(SIGPIPE, SIG_IGN) == SIG_ERR) {
        ERRNO("signal() failed");
        return -1;
    }

    /* threading */
    DEBUG("initializing thread synchronization");
    if (pthread_cond_init(&jpeg_cond, NULL)) {
        ERROR("pthread_cond_init() failed");
        return -1;
    }
    if (pthread_mutex_init(&jpeg_mutex, NULL)) {
        ERROR("pthread_mutex_init() failed");
        return -1;
    }
    if (pthread_mutex_init(&clients_mutex, NULL)) {
        ERROR("pthread_mutex_init() failed");
        return -1;
    }

    /* tcp server */
    DEBUG("starting server");
    int socket_fd = init_server();
    if (socket_fd < 0) {
        ERROR("failed to start server");
        return -1;
    }

    INFO("listening on %s:%d", listen_localhost ? "127.0.0.1" : "0.0.0.0", tcp_port);

    /* main loop */
    char input_buf[INPUT_BUF_LEN];
    char *sep = NULL;
    int size, rem_len = 0, i;

    double now, min_client_frame_int;
    double frame_int_adj;
    double frame_int = 0;
    double last_frame_time = get_now();

    int auto_separator = 0;
    int input_separator_len;
    if (!input_separator) {
        auto_separator = 1;
        input_separator_len = 4; /* strlen(JPEG_START) + strlen(JPEG_END) */;
        input_separator = malloc(input_separator_len + 1);
        snprintf(input_separator, input_separator_len + 1, "%s%s", JPEG_END, JPEG_START);
    }
    else {
        input_separator_len = strlen(input_separator);
    }

    while (running) {
        size = read(STDIN_FILENO, input_buf, INPUT_BUF_LEN);
        if (size < 0) {
            if (errno == EINTR) {
                break;
            }

            ERRNO("input: read() failed");
            return -1;
        }
        else if (size == 0) {
            DEBUG("input: end of stream");
            running = 0;
            break;
        }

        if (size > JPEG_BUF_LEN - 1 - jpeg_size) {
            ERROR("input: jpeg size too large, discarding buffer");
            jpeg_size = 0;
            continue;
        }

        if (pthread_mutex_lock(&jpeg_mutex)) {
            ERROR("pthread_mutex_lock() failed");
            return -1;
        }

        /* clear the ready flag for all clients,
         * as we start building the next frame */
        for (i = 0; i < num_clients; i++) {
            clients[i]->jpeg_ready = 0;
        }

        if (rem_len) {
            /* copy the remainder of data from the previous iteration back to the jpeg buffer */
            memmove(jpeg_buf, sep + (auto_separator ? 2 /* strlen(JPEG_END) */ : input_separator_len), rem_len);
            jpeg_size = rem_len;
        }

        memcpy(jpeg_buf + jpeg_size, input_buf, size);
        jpeg_size += size;

        /* look behind at most 2 * INPUT_BUF_LEN for a separator */
        sep = (char *) memmem(jpeg_buf + jpeg_size - MIN(2 * INPUT_BUF_LEN, jpeg_size), MIN(2 * INPUT_BUF_LEN, jpeg_size),
                input_separator, input_separator_len);

        if (sep) { /* found a separator, jpeg frame is ready */
            if (auto_separator) {
                rem_len = jpeg_size - (sep - jpeg_buf) - 2 /* strlen(JPEG_START) */;
                jpeg_size = sep - jpeg_buf + 2 /* strlen(JPEG_END) */;
            }
            else {
                rem_len = jpeg_size - (sep - jpeg_buf) - input_separator_len;
                jpeg_size = sep - jpeg_buf;
            }

            DEBUG("input: jpeg buffer ready with %d bytes", jpeg_size);

            /* set the ready flag and notify all client threads about it */
            for (i = 0; i < num_clients; i++) {
                clients[i]->jpeg_ready = 1;
            }
            if (pthread_cond_broadcast(&jpeg_cond)) {
                ERROR("pthread_cond_broadcast() failed");
                return -1;
            }

            now = get_now();
            frame_int = frame_int * 0.7 + (now - last_frame_time) * 0.3;
            last_frame_time = now;
        }
        else {
            rem_len = 0;
        }

        if (pthread_mutex_unlock(&jpeg_mutex)) {
            ERROR("pthread_mutex_unlock() failed");
            return -1;
        }

        if (sep) {
            DEBUG("current fps: %.01lf", 1 / frame_int);

            if (num_clients) {
                min_client_frame_int = clients[0]->frame_int;
                for (i = 0; i < num_clients; i++) {
                    if (clients[i]->frame_int < min_client_frame_int) {
                        min_client_frame_int = clients[i]->frame_int;
                    }
                }

                frame_int_adj = (min_client_frame_int - frame_int) * 1000000;
                if (frame_int_adj > 0) {
                    DEBUG("input frame int.: %.0lf us, client frame int.: %.0lf us, frame int. adjustment: %.0lf us",
                            frame_int * 1000000, min_client_frame_int * 1000000, frame_int_adj);

                    /* sleep between 1000 and 50000 us, depending on the frame interval adjustment */
                    usleep(MAX(1000, MIN(4 * frame_int_adj, 50000)));
                }
            }

            /* check for incoming clients;
             * placing this code inside the if (sep) will simply
             * reduce the number of times we check for incoming clients,
             * with no particular relation to the frame separator we've just found */
            client_t *client = NULL;

            if (!max_clients || num_clients < max_clients) {
                client = wait_for_client(socket_fd);
            }

            if (client) {
                if (pthread_create(&client->thread, NULL, (void *(*) (void *)) handle_client, client)) {
                    ERROR("pthread_create() failed");
                    return -1;
                }

                if (pthread_mutex_lock(&clients_mutex)) {
                    ERROR("pthread_mutex_lock() failed");
                    return -1;
                }

                clients = realloc(clients, sizeof(client_t *) * (num_clients + 1));
                clients[num_clients++] = client;

                DEBUG("current clients: %d", num_clients);

                if (pthread_mutex_unlock(&clients_mutex)) {
                    ERROR("pthread_mutex_unlock() failed");
                    return -1;
                }
            }
        }
    }
    
    running = 0;

    DEBUG("closing server");
    close(socket_fd);

    DEBUG("waiting for clients to finish");
    for (i = 0; i < num_clients; i++) {
        clients[i]->jpeg_ready = 1;
    }
    if (pthread_cond_broadcast(&jpeg_cond)) {
        ERROR("pthread_cond_broadcast() failed");
        return -1;
    }

    for (i = 0; i < num_clients; i++) {
        pthread_join(clients[i]->thread, NULL);
    }

    if (pthread_mutex_destroy(&clients_mutex)) {
        ERROR("pthread_mutex_destroy() failed");
        return -1;
    }
    if (pthread_mutex_destroy(&jpeg_mutex)) {
        ERROR("pthread_mutex_destroy() failed");
        return -1;
    }
    if (pthread_cond_destroy(&jpeg_cond)) {
        ERROR("pthread_cond_destroy() failed");
        return -1;
    }

    INFO("bye!");

    return 0;
}
Пример #15
0
void printData(void *args)
{
	printThreadArgs *threadArgs = (printThreadArgs *)args;
	
	pthread_mutex_t *mtx;
	pthread_cond_t *cnd;

	//pthread_cond_t cnd;
	//pthread_mutex_t mtx;

        //memcpy(&mtx,&threadArgs->mutex,sizeof(pthread_mutex_t));
        //memcpy(&cnd,&threadArgs->cond,sizeof(pthread_cond_t));

	mtx = &threadArgs->mutex;
	cnd = &threadArgs->cond;
	
	int empty,eof=0;

	do
	{
		printf("R_PRINT : WAITING FOR THE LOCK\n");
       		pthread_mutex_lock(mtx);
		printf("R_PRINT : WAITING FOR THE COND\n");
		while(client_wndw.CWP<client_wndw.wndw_size)
		pthread_cond_wait(cnd,mtx);


		if(client_wndw.CRP == -1 && client_wndw.CWP>0)
		{
			printf("R_PRINT : GOT THE LOCK\n");
			printf("client_wndw.CRP : %d\n",client_wndw.CRP);
			printf("client_wndw.CWP : %d\n",client_wndw.CWP);

			while(client_wndw.CRP != client_wndw.CWP-1)
			{
				client_wndw.CRP = (client_wndw.CRP + 1);
				fprintf(file,"%s",client_wndw.windElems[client_wndw.CRP].data);
				fputs(client_wndw.windElems[client_wndw.CRP].data,stdout);
				if(client_wndw.windElems[client_wndw.CRP].header.type == 104)
				{
					eof = 1;
					fclose(file);
					break;
	
				}
	
			}	
		}

		if( (client_wndw.CRP == client_wndw.CWP-1) || (eof==1))
		{
			client_wndw.CRP=client_wndw.CWP-1;
			printf("SIGNALLED BACK TO R_READ\n");
			pthread_cond_broadcast(cnd);
		}

       		pthread_mutex_unlock(mtx);

		sleep(1);

	}while(eof==0);
       	//pthread_mutex_unlock(mtx);
	
}
Пример #16
0
void SipperProxyQueue::stopQueue(void)
{
   int oldstate;
   pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
   pthread_mutex_lock(&tEventQueueMutex);

   if(bQueueStopped)
   {
      pthread_mutex_unlock(&tEventQueueMutex);
      pthread_setcancelstate(oldstate, NULL);
      return;
   }

   bQueueStopped = true;

   pthread_cond_broadcast(&waitingFeederCond);
   pthread_cond_broadcast(&waitingConsumerCond);

   for(unsigned int idx = 0; idx < MAX_QUEUE_THR; idx++)
   {
      if(feederData[idx].count)
      {
         pthread_cond_signal(&feederData[idx].condition);
      }

      if(consumerData[idx].count)
      {
         pthread_cond_signal(&consumerData[idx].condition);
      }
   }

   while(ptHeadPtr != NULL)
   {
      t_EventQueueNodePtr currNode = ptHeadPtr;
      ptHeadPtr  = ptHeadPtr->ptNextNode;
      iQueueCount--;

      if(_cleanupFunc != NULL)
      {
         _cleanupFunc(currNode->_queueData);
      }

      if(iFreeNodes < highWaterMark)
      {
         currNode->ptNextNode = ptFreeList;
         ptFreeList = currNode;

         iFreeNodes++;
      }
      else
      {
         delete currNode;
      }

      if(ptHeadPtr == NULL)
      {
         ptTailPtr = NULL;
      }
   }

   pthread_mutex_unlock(&tEventQueueMutex);
   pthread_setcancelstate(oldstate, NULL);
}
Пример #17
0
int
main()
{
  int failed = 0;
  int i;
  pthread_t t[NUMTHREADS + 1];

  struct _timeb currSysTime;
  const DWORD NANOSEC_PER_MILLISEC = 1000000;

  cvthing.shared = 0;

  assert((t[0] = pthread_self()).p != NULL);

  assert(cvthing.notbusy == PTHREAD_COND_INITIALIZER);

  assert(cvthing.lock == PTHREAD_MUTEX_INITIALIZER);

  assert(pthread_mutex_lock(&start_flag) == 0);

  _ftime(&currSysTime);

  abstime.tv_sec = currSysTime.time;
  abstime.tv_nsec = NANOSEC_PER_MILLISEC * currSysTime.millitm;

  abstime.tv_sec += 10;

  assert((t[0] = pthread_self()).p != NULL);

  awoken = 0;

  for (i = 1; i <= NUMTHREADS; i++)
    {
      threadbag[i].started = 0;
      threadbag[i].threadnum = i;
      assert(pthread_create(&t[i], NULL, mythread, (void *) &threadbag[i]) == 0);
    }

  /*
   * Code to control or munipulate child threads should probably go here.
   */

  assert(pthread_mutex_unlock(&start_flag) == 0);

  /*
   * Give threads time to start.
   */
  Sleep(1000);

  /*
   * Cancel one of the threads.
   */
  assert(pthread_cancel(t[1]) == 0);
  assert(pthread_join(t[1], NULL) == 0);

  assert(pthread_mutex_lock(&cvthing.lock) == 0);

  cvthing.shared++;

  /*
   * Signal all remaining waiting threads.
   */
  assert(pthread_cond_broadcast(&cvthing.notbusy) == 0);

  assert(pthread_mutex_unlock(&cvthing.lock) == 0);

  /*
   * Wait for all threads to complete.
   */
  for (i = 2; i <= NUMTHREADS; i++)
    assert(pthread_join(t[i], NULL) == 0);

  /* 
   * Cleanup the CV.
   */
  
  assert(pthread_mutex_destroy(&cvthing.lock) == 0);

  assert(cvthing.lock == NULL);

  assert(pthread_cond_destroy(&cvthing.notbusy) == 0);

  assert(cvthing.notbusy == NULL);

  /*
   * Standard check that all threads started.
   */
  for (i = 1; i <= NUMTHREADS; i++)
    { 
      failed = !threadbag[i].started;

      if (failed)
	{
	  fprintf(stderr, "Thread %d: started %d\n", i, threadbag[i].started);
	}
    }

  assert(!failed);

  /*
   * Check any results here.
   */

  assert(awoken == (NUMTHREADS - 1));

  /*
   * Success.
   */
  return 0;
}
Пример #18
0
unsigned int SipperProxyQueue::eventDequeueBlk(SipperProxyQueueData *outdata, unsigned int count, 
                                      unsigned int timeout, bool blockFlag)
{
   int oldstate;

   pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
   pthread_mutex_lock(&tEventQueueMutex);

   while(sleepingConsumers == MAX_QUEUE_THR && bQueueStopped == false)
   {
      waitingConsumers++;
      pthread_cond_wait(&waitingConsumerCond, &tEventQueueMutex);
      waitingConsumers--;
   }

   unsigned int ret = 0;
   t_EventQueueNodePtr currNode = NULL;

   int consumerIdx = -1;

   int alreadyTimedOut = 0; 
   int timeCalculated = 0;
   struct timespec WaitTime;

   do
   {
      //Logically this if condition should not be there. When the queue is 
      //stopped still deque should be successful till there are messages held in
      //queue. This condition will raise memory leak as consumers cant get the 
      //messages. Only reason I thought of why this condition was added is 
      //during shutdown we want the consumer to come out quickly- Suriya.
      //Now a CleanupFunction registeration is added to take care of the memory 
      //leak caused because of this condition.
      if(bQueueStopped == true)
      {
         break;
      }

      for(; ret < count; ret++)
      {
         if(ptHeadPtr != NULL)
         {
            currNode = ptHeadPtr;
            ptHeadPtr  = ptHeadPtr->ptNextNode;
            iQueueCount--;

            outdata[ret] = currNode->_queueData;

            if(iFreeNodes < highWaterMark)
            {
               currNode->ptNextNode = ptFreeList;
               ptFreeList = currNode;

               iFreeNodes++;
            }
            else
            {
               delete currNode;
            }

            if(ptHeadPtr == NULL)
            {
               ptTailPtr = NULL;
            }
         }
         else
         {
            break;
         }
      }

      if(ret == count || blockFlag == false)
      {
         break;
      }

      if(bQueueStopped == true)
      {
         pthread_cond_broadcast(&waitingConsumerCond);
         break;
      }

      if(consumerIdx == -1)
      {
         consumerIdx = _getFreeThr(consumerData);
      }

      if((count - ret) >= highWaterMark)
      {
         consumerData[consumerIdx].count = highWaterMark;
      }
      else
      {
         consumerData[consumerIdx].count = count - ret;
      }

      if(minConsumer != -1)
      {
         if(consumerData[minConsumer].count > consumerData[consumerIdx].count)
         {
            minConsumer = consumerIdx;
         }
      }
      else
      {
         minConsumer = consumerIdx;
      }

      //Queue is Empty.

      if(sleepingFeeders)
      {
         int locid = _calculateMax(feederData);

         pthread_cond_signal(&feederData[locid].condition);
      }

      if(alreadyTimedOut)
      {
         break;
      }

      sleepingConsumers++;

      if(timeout == 0)
      {
         pthread_cond_wait(&consumerData[consumerIdx].condition, 
                           &tEventQueueMutex);
      }
      else
      {
         if(timeCalculated == 0)
         {
            struct timeval currTime;

            gettimeofday(&currTime, NULL);

            WaitTime.tv_sec = currTime.tv_sec;
            WaitTime.tv_nsec = (currTime.tv_usec * 1000);

            WaitTime.tv_sec += (timeout / 1000);
            long millisec = timeout % 1000;
            WaitTime.tv_nsec += (millisec * 1000000);

            if(WaitTime.tv_nsec >= 1000000000L)
            {
               WaitTime.tv_nsec -= 1000000000L;
               WaitTime.tv_sec++;
            }

            timeCalculated = 1;
         }

         if(pthread_cond_timedwait(&consumerData[consumerIdx].condition,
                                &tEventQueueMutex, &WaitTime) == ETIMEDOUT)
         {
            alreadyTimedOut = 1;
         }
      }

      sleepingConsumers--;
   }while(1);

   if(consumerIdx != -1)
   {
      consumerData[consumerIdx].count = 0;

      if(minConsumer == consumerIdx)
      {
         minConsumer = _calculateMin(consumerData);
      }
   }

   if(sleepingFeeders)
   {
      if((feederData[minFeeder].count <= iFreeNodes) ||
         (iQueueCount <= lowWaterMark))
      {
         pthread_cond_signal(&feederData[minFeeder].condition);
      }
   }

   if(sleepingConsumers)
   {
      if(consumerData[minConsumer].count <= iQueueCount)
      {
         pthread_cond_signal(&consumerData[minConsumer].condition);
      }
   }

   if(waitingConsumers)
   {
      pthread_cond_signal(&waitingConsumerCond);
   }

   pthread_mutex_unlock(&tEventQueueMutex);
   pthread_setcancelstate(oldstate, NULL);
   return ret;
}
Пример #19
0
static int cond_lock_broadcast(cond_lock_t *cond)
{
    return pthread_cond_broadcast(&cond->cond);
}
Пример #20
0
static int
vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
    bool from_idle)
{
	int error;
	const struct timespec ts = {.tv_sec = 1, .tv_nsec = 0}; /* 1 second */

	/*
	 * State transitions from the vmmdev_ioctl() must always begin from
	 * the VCPU_IDLE state. This guarantees that there is only a single
	 * ioctl() operating on a vcpu at any point.
	 */
	if (from_idle) {
		while (vcpu->state != VCPU_IDLE) {
			pthread_mutex_lock(&vcpu->state_sleep_mtx);
			vcpu_unlock(vcpu);
			pthread_cond_timedwait_relative_np(&vcpu->state_sleep_cnd,
				&vcpu->state_sleep_mtx, &ts);
			vcpu_lock(vcpu);
			pthread_mutex_unlock(&vcpu->state_sleep_mtx);
			//msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
		}
	} else {
		KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
		    "vcpu idle state"));
	}

	/*
	 * The following state transitions are allowed:
	 * IDLE -> FROZEN -> IDLE
	 * FROZEN -> RUNNING -> FROZEN
	 * FROZEN -> SLEEPING -> FROZEN
	 */
	switch (vcpu->state) {
	case VCPU_IDLE:
	case VCPU_RUNNING:
	case VCPU_SLEEPING:
		error = (newstate != VCPU_FROZEN);
		break;
	case VCPU_FROZEN:
		error = (newstate == VCPU_FROZEN);
		break;
	}

	if (error)
		return (EBUSY);

	vcpu->state = newstate;

	if (newstate == VCPU_IDLE)
		pthread_cond_broadcast(&vcpu->state_sleep_cnd);
		//wakeup(&vcpu->state);

	return (0);
}

static void
vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
{
	int error;

	if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
		xhyve_abort("Error %d setting state to %d\n", error, newstate);
}
Пример #21
0
int main(int argc,char** argv)
{
	int ret;
	int fd = -1;
	signal(SIGPIPE,signalproc);//忽略SIGPIPE信号,必须要
#ifdef QSTP //===========================只包含QSTP部分=============================================================
	int isPushAudio = 0;
	//第三方友商首先需要向后台服务器获取token和推流服务器IP、端口,然后组成连接url;若需要指定推流服务器,则在url ip和端口字段指定推流服务器ip和端口
	//使用羚羊调度的推流服务器推流url示例:
	char* url = "topvdn://183.57.151.111:1935?protocolType=2&connectType=1&token=1003791_3222536192_1493481600_b118f1066b417b6751a4804253d6d236&mode=2";

	if(argc < 2)
	{
		printf("usage:./%s [videoName] [audioName]\n",argv[0]);
		return 0;
	}
	
	if(argc > 2)
	{
		isPushAudio = 1;
	}
	while(1)
	{
		fd = LY_connectToRelayServer(url);
		if(fd < 0)
		{
			printf("connect to lingyang cloud relay server failed,retry...\n");
			usleep(300*1000);
			continue;
		}
		break;
	}
	
	videoName = argv[1];
	if(argc > 2)
	{
		audioName = argv[2];
	}

	if(startPushMedia(fd,isPushAudio) != 0)
	{
		printf("start push failed\n");
		return -1;
	}
	while(1)
	{
		sleep(3);
	}
//======================================包含云平台====================================================//
#elif defined(LY_PLATFORM)
	if(argc < 2)
	{
		printf("Usage:%s <videoFileName> [audioFileName]\n",argv[0]);
		return -1;
	}
	videoName = argv[1];
	if(argc > 2)
		audioName = argv[2];
	//step1.向后台获取token和configString,第三方对接厂商,这里按照自己业务逻辑实现。
	//羚羊示例:
	char* deviceToken = "1003791_3222536192_1493481600_b118f1066b417b6751a4804253d6d236";
	char* configStr = "[Config]\r\nIsDebug=0\r\nLocalBasePort=8200\r\nIsCaptureDev=1\r\nIsPlayDev=1\r\nU \
					  dpSendInterval=2\r\nConnectTimeout=10000\r\nTransferTimeout=10000\r\n[Tracker]\r\nCount=3\r\nIP1=121.42.156.148\r\nPort1=80 \
					  \r\nIP2=182.254.149.39\r\nPort2=80\r\nIP3=203.195.157.248\r\nPort3=80\r\n[LogServer]\r\nCount=1\r\nIP1=120.26.74.53\r\nPort1=80\r\n";
	
	//step2.启动羚羊云服务
	ret = startLyCloudService(deviceToken,configStr);
	if(ret != 0)
	{
		printf("!!!start ly cloud service failed\n");
		return 0;
	}
	
	//step3.根据羚羊云的回调消息实现自己的业务逻辑
#if 1
	//这里是因为没有和后台服务器交互,模拟后台服务器的消息
	sleep(3);
	pthread_mutex_lock(&endPointConfig.mutex);
	endPointConfig.audioFlag = 1;//音频开关
	endPointConfig.type = 2;//工作模式
	endPointConfig.trans_config.qstpOpenFlag = 1;
	endPointConfig.typeChanged = 1;
	pthread_cond_broadcast(&endPointConfig.cond);
	pthread_mutex_unlock(&endPointConfig.mutex);
	sleep(3);
	//startPushAudio(endPointConfig.fds[0]);
	sleep(10);
	printf("main stop push audio\n");
	//stopPushAudio(endPointConfig.fds[0])
#endif
	while(1)
	{
		//keep alive
		sleep(1);
	}
	//step4.停止羚羊云服务
	stopLyCloudService();
#endif
	printf("app exit\n");
	return 0;
}
Пример #22
0
int start_command(JNIEnv *env, jclass clazz __attribute__((unused)), jstring jhandler, jstring jcmd, jobjectArray jenv) {
  const char *utf;
  handler *h;
  int id;
  struct cmd_start_info *start_info;
  message *m;
  child_node *c;
  
  id = -1;
  m=NULL;
  c=NULL;
  utf=NULL;
  
  if(!authenticated()) {
    LOGE("%s: not authenticated", __func__);
    return -1;
  }
  
  if(!jhandler) {
    LOGE("%s: handler cannot be null", __func__);
    return -1;
  }
  
  utf = (*env)->GetStringUTFChars(env, jhandler, NULL);
  
  if(!utf) {
    LOGE("%s: cannot get handler name", __func__);
    goto jni_error;
  }
  
  h = get_handler_by_name(utf);
  
  if(!h) {
    LOGE("%s: handler \"%s\" not found", __func__, utf);
    goto exit;
  }
  
  (*env)->ReleaseStringUTFChars(env, jhandler, utf);
  utf=NULL;
  
  m = create_message(get_sequence(&ctrl_seq, &ctrl_seq_lock),
                      sizeof(struct cmd_start_info), CTRL_ID);
  
  if(!m) {
    LOGE("%s: cannot create messages", __func__);
    goto exit;
  }
  
  start_info = (struct cmd_start_info *) m->data;
  start_info->cmd_action = CMD_START;
  start_info->hid = h->id;
  
  if(jcmd && parse_cmd(env, h, jcmd, m)) {
    LOGE("%s: cannot parse command", __func__);
    goto exit;
  }
  
  if(jenv && parse_env(env, jenv, m)) {
    LOGE("%s: cannot parse environment", __func__);
    goto exit;
  }
  
  // create child
  
  c = create_child(m->head.seq);
  
  if(!c) {
    LOGE("%s: cannot craete child", __func__);
    goto exit;
  }
  
  c->handler = h;
  
  // add child to list
  
  pthread_mutex_lock(&(children.control.mutex));
  list_add(&(children.list), (node *) c);
  pthread_mutex_unlock(&(children.control.mutex));
  
  // send message to cSploitd
  
  pthread_mutex_lock(&write_lock);
  // OPTIMIZATION: use id to store return value for later check
  id = send_message(sockfd, m);
  pthread_mutex_unlock(&write_lock);
  
  if(id) {
    LOGE("%s: cannot send messages", __func__);
    // mark it as failed
    c->id = CTRL_ID;
    c->seq = 0;
  }
  
  id=-1;
  
  // wait for CMD_STARTED or CMD_FAIL
  
  pthread_mutex_lock(&(children.control.mutex));
  
  while(c->seq && children.control.active)
    pthread_cond_wait(&(children.control.cond), &(children.control.mutex));
  
  if(c->id == CTRL_ID || c->seq) { // command failed
    list_del(&(children.list), (node *) c);
  } else {
    id = c->id;
  }
  
  c->pending = 0;
  
  pthread_mutex_unlock(&(children.control.mutex));
  
  pthread_cond_broadcast(&(children.control.cond));
  
  if(id != -1) {
    LOGI("%s: child #%d started", __func__, id);
  } else if(c->seq) {
    LOGW("%s: pending child cancelled", __func__);
  } else {
    LOGW("%s: cannot start command", __func__);
  }
  
  goto exit;
  
  jni_error:
  if((*env)->ExceptionCheck(env)) {
    (*env)->ExceptionDescribe(env);
    (*env)->ExceptionClear(env);
  }
  
  exit:
  
  if(m)
    free_message(m);
  
  if(utf)
    (*env)->ReleaseStringUTFChars(env, jhandler, utf);
  
  return id;
}
Пример #23
0
static int
do_test (void)
{
  struct
  {
    pthread_mutex_t m;
    pthread_cond_t c;
    int var;
  } *p = mmap (NULL, sizeof (*p), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
  if (p == MAP_FAILED)
    {
      printf ("initial mmap failed: %m\n");
      return 1;
    }

  pthread_mutexattr_t ma;
  if (pthread_mutexattr_init (&ma) != 0)
    {
      puts ("mutexattr_init failed");
      return 1;
    }
  if (pthread_mutexattr_setpshared (&ma, 1) != 0)
    {
      puts ("mutexattr_setpshared failed");
      return 1;
    }
  if (pthread_mutex_init (&p->m, &ma) != 0)
    {
      puts ("mutex_init failed");
      return 1;
    }
  if (pthread_mutexattr_destroy (&ma) != 0)
    {
      puts ("mutexattr_destroy failed");
      return 1;
    }

  pthread_condattr_t ca;
  if (pthread_condattr_init (&ca) != 0)
    {
      puts ("condattr_init failed");
      return 1;
    }
  if (pthread_condattr_setpshared (&ca, 1) != 0)
    {
      puts ("condattr_setpshared failed");
      return 1;
    }
  if (pthread_cond_init (&p->c, &ca) != 0)
    {
      puts ("mutex_init failed");
      return 1;
    }
  if (pthread_condattr_destroy (&ca) != 0)
    {
      puts ("condattr_destroy failed");
      return 1;
    }

  if (pthread_mutex_lock (&p->m) != 0)
    {
      puts ("initial mutex_lock failed");
      return 1;
    }

  p->var = 42;

  pid_t pid = fork ();
  if (pid == -1)
    {
      printf ("fork failed: %m\n");
      return 1;
    }

  if (pid == 0)
    {
      void *oldp = p;
      p = mmap (NULL, sizeof (*p), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);

      if (p == oldp)
	{
	  puts ("child: mapped to same address");
	  kill (getppid (), SIGKILL);
	  exit (1);
	}

      munmap (oldp, sizeof (*p));

      if (pthread_mutex_lock (&p->m) != 0)
	{
	  puts ("child: mutex_lock failed");
	  kill (getppid (), SIGKILL);
	  exit (1);
	}

      p->var = 0;

#ifndef USE_COND_SIGNAL
      if (pthread_cond_broadcast (&p->c) != 0)
	{
	  puts ("child: cond_broadcast failed");
	  kill (getppid (), SIGKILL);
	  exit (1);
	}
#else
      if (pthread_cond_signal (&p->c) != 0)
	{
	  puts ("child: cond_signal failed");
	  kill (getppid (), SIGKILL);
	  exit (1);
	}
#endif

      if (pthread_mutex_unlock (&p->m) != 0)
	{
	  puts ("child: mutex_unlock failed");
	  kill (getppid (), SIGKILL);
	  exit (1);
	}

      exit (0);
    }

  do
    pthread_cond_wait (&p->c, &p->m);
  while (p->var != 0);

  if (TEMP_FAILURE_RETRY (waitpid (pid, NULL, 0)) != pid)
    {
      printf ("waitpid failed: %m\n");
      kill (pid, SIGKILL);
      return 1;
    }

  return 0;
}
Пример #24
0
Файл: 2-1.c Проект: jiezh/h5vcc
int main (int argc, char * argv[])
{
	int ret;
	
	pthread_mutexattr_t ma;
	pthread_condattr_t ca;
	
	int scenar;
	
	pid_t p_child[NTHREADS];
	pthread_t t_child[NTHREADS];
	int ch;
	pid_t pid;
	int status;
	
	testdata_t alternativ;
	
	output_init();
	
/**********
 * Allocate space for the testdata structure
 */
	/* Cannot mmap a file, we use an alternative method */
	td = &alternativ;
	#if VERBOSE > 0
	output("Testdata allocated in the process memory.\n");
	#endif
	
	/* Do the test for each test scenario */
	for (scenar=0; scenar < NSCENAR; scenar++)
	{
		/* set / reset everything */
		ret = pthread_mutexattr_init(&ma);
		if (ret != 0)  {  UNRESOLVED(ret, "[parent] Unable to initialize the mutex attribute object");  }
		ret = pthread_condattr_init(&ca);
		if (ret != 0)  {  UNRESOLVED(ret, "[parent] Unable to initialize the cond attribute object");  }
		
		#ifndef WITHOUT_XOPEN
		/* Set the mutex type */
		ret = pthread_mutexattr_settype(&ma, scenarii[scenar].m_type);
		if (ret != 0)  {  UNRESOLVED(ret, "[parent] Unable to set mutex type");  }
		#endif
		
	/* Proceed to testing */
		/* initialize the mutex */
		ret = pthread_mutex_init(&td->mtx1, &ma);
		if (ret != 0)  {  UNRESOLVED(ret, "Mutex init failed");  }
		
		ret = pthread_mutex_init(&td->mtx2, &ma);
		if (ret != 0)  {  UNRESOLVED(ret, "Mutex init failed");  }
		
		ret = pthread_mutex_lock(&td->mtx2);
		if (ret != 0)  {  UNRESOLVED(ret, "Mutex lock failed");  }
		
		/* initialize the condvar */
		ret = pthread_cond_init(&td->cnd, &ca);
		if (ret != 0)  {  UNRESOLVED(ret, "Cond init failed");  }
		
		#if VERBOSE > 2
		output("[parent] Starting 1st pass of test %s\n", scenarii[scenar].descr);
		#endif
		
		td->count1=0;
		td->count2=0;
		td->predicate1=0;
		td->predicate2=0;
		
		/* Create all the children */
		for (ch=0; ch < NTHREADS; ch++)
		{
			ret = pthread_create(&t_child[ch], NULL, child, NULL);
			if (ret != 0)  {  UNRESOLVED(ret, "Failed to create a child thread");  }
		}
		#if VERBOSE > 4
		output("[parent] All children are running\n");
		#endif
		
		/* Make sure all children are waiting */
		ret = pthread_mutex_lock(&td->mtx1);
		if (ret != 0) {  UNRESOLVED_KILLALL(ret, "Failed to lock mutex", p_child);  }
		ch = td->count1;
		while (ch < NTHREADS)
		{
			ret = pthread_mutex_unlock(&td->mtx1);
			if (ret != 0)  {  UNRESOLVED_KILLALL(ret, "Failed to unlock mutex",p_child);  }
			sched_yield();
			ret = pthread_mutex_lock(&td->mtx1);
			if (ret != 0) {  UNRESOLVED_KILLALL(ret, "Failed to lock mutex",p_child);  }
			ch = td->count1;
		}
		
		#if VERBOSE > 4
		output("[parent] All children are waiting\n");
		#endif
		
		/* Wakeup the children */
		td->predicate1=1;
		ret = pthread_cond_broadcast(&td->cnd);
		if (ret != 0)  {  UNRESOLVED_KILLALL(ret, "Failed to signal the condition.", p_child);  }
		
		ret = pthread_mutex_unlock(&td->mtx1);
		if (ret != 0)  {  UNRESOLVED_KILLALL(ret, "Failed to unlock mutex",p_child);  }

		/* Destroy the condvar (this must be safe) */
		do {
			ret = pthread_cond_destroy(&td->cnd);
			usleep(10 * 1000);
		} while (ret == EBUSY);

		if (ret != 0)  {  FAILED_KILLALL("Unable to destroy the cond while no thread is blocked inside", p_child);  }
		
		/* Reuse the cond memory */
		memset(&td->cnd, 0xFF, sizeof(pthread_cond_t));
		
		#if VERBOSE > 4
		output("[parent] Condition was broadcasted, and condvar destroyed.\n");
		#endif
		
		/* Make sure all children have exited the first wait */
		ret = pthread_mutex_lock(&td->mtx1);
		if (ret != 0) {  UNRESOLVED_KILLALL(ret, "Failed to lock mutex",p_child);  }
		ch = td->count1;
		while (ch > 0)
		{
			ret = pthread_mutex_unlock(&td->mtx1);
			if (ret != 0)  {  UNRESOLVED_KILLALL(ret, "Failed to unlock mutex",p_child);  }
			sched_yield();
			ret = pthread_mutex_lock(&td->mtx1);
			if (ret != 0) {  UNRESOLVED_KILLALL(ret, "Failed to lock mutex",p_child);  }
			ch = td->count1;
		}

		ret = pthread_mutex_unlock(&td->mtx1);
		if (ret != 0)  {  UNRESOLVED_KILLALL(ret, "Failed to unlock mutex",p_child);  }
		
	/* Go toward the 2nd pass */
		/* Now, all children are waiting to lock the 2nd mutex, which we own here. */
		/* reinitialize the condvar */
		ret = pthread_cond_init(&td->cnd, &ca);
		if (ret != 0)  {  UNRESOLVED(ret, "Cond init failed");  }
		
		#if VERBOSE > 2
		output("[parent] Starting 2nd pass of test %s\n", scenarii[scenar].descr);
		#endif
		
		/* Make sure all children are waiting */
		ch = td->count2;
		while (ch < NTHREADS)
		{
			ret = pthread_mutex_unlock(&td->mtx2);
			if (ret != 0)  {  UNRESOLVED_KILLALL(ret, "Failed to unlock mutex",p_child);  }
			sched_yield();
			ret = pthread_mutex_lock(&td->mtx2);
			if (ret != 0) {  UNRESOLVED_KILLALL(ret, "Failed to lock mutex",p_child);  }
			ch = td->count2;
		}
		
		#if VERBOSE > 4
		output("[parent] All children are waiting\n");
		#endif
		
		/* Wakeup the children */
		td->predicate2=1;
		ret = pthread_cond_broadcast(&td->cnd);
		if (ret != 0)  {  UNRESOLVED_KILLALL(ret, "Failed to signal the condition.", p_child);  }
		
		/* Allow the children to terminate */
		ret = pthread_mutex_unlock(&td->mtx2);
		if (ret != 0)  {  UNRESOLVED_KILLALL(ret, "Failed to unlock mutex",p_child);  }

		/* Destroy the condvar (this must be safe) */
		ret = pthread_cond_destroy(&td->cnd);
		if (ret != 0)  {  FAILED_KILLALL("Unable to destroy the cond while no thread is blocked inside", p_child);  }
		
		/* Reuse the cond memory */
		memset(&td->cnd, 0x00, sizeof(pthread_cond_t));
		
		#if VERBOSE > 4
		output("[parent] Condition was broadcasted, and condvar destroyed.\n");
		#endif
		
		#if VERBOSE > 4
		output("[parent] Joining the children\n");
		#endif

		/* join the children */
		for (ch=(NTHREADS - 1); ch >= 0 ; ch--)
		{
			ret = pthread_join(t_child[ch], NULL);
			if (ret != 0)  {  UNRESOLVED(ret, "Failed to join a child thread");  }
		}
		if (ret != 0)
		{
			output_fini();
			exit(ret);
		}
		#if VERBOSE > 4
		output("[parent] All children terminated\n");
		#endif
		
		/* Destroy the datas */
#if 0
// This seems wrong, as it was just destroyed and zeroed out above.
		ret = pthread_cond_destroy(&td->cnd);
		if (ret != 0)  {  UNRESOLVED(ret, "Failed to destroy the condvar");  }
#endif
		
		ret = pthread_mutex_destroy(&td->mtx1);
		if (ret != 0)  {  UNRESOLVED(ret, "Failed to destroy the mutex");  }

		ret = pthread_mutex_destroy(&td->mtx2);
		if (ret != 0)  {  UNRESOLVED(ret, "Failed to destroy the mutex");  }

		/* Destroy the attributes */
		ret = pthread_condattr_destroy(&ca);
		if (ret != 0)  {  UNRESOLVED(ret, "Failed to destroy the cond var attribute object");  }
		
		ret = pthread_mutexattr_destroy(&ma);
		if (ret != 0)  {  UNRESOLVED(ret, "Failed to destroy the mutex attribute object");  }
		
		
	}
	
	/* exit */
	PASSED;
	return 0;
}
Пример #25
0
void *dictionary_reader(void *thread_arg) { // password reader from dictionry
	FILE *fp;
	char *line = NULL;
	size_t len = 0;
	ssize_t read = -1;
	struct timespec ts;
	struct thread_data *t_data = (struct thread_data *) thread_arg;
	if (t_data->verbose > 1) printf("Reader(%ld): starting!\n", syscall(SYS_gettid));
	fp = fopen(shared_data.dict_path,"r"); //otworz plik slownika
	if (!fp) // sprawdz czy plik zostal otworzony prawidlowo
		printf("\nReader: Failed to open dictionary. Path: %s\n", shared_data.dict_path);
	else
		read = getline(&line, &len, fp);
	pthread_rwlock_wrlock(&c_proc_lock);
	pthread_rwlock_wrlock(&c_read_lock);
	shared_data.pass_read_c = shared_data.pass_proced_c = 0;
	pthread_rwlock_unlock(&c_read_lock);
	pthread_rwlock_unlock(&c_proc_lock);
	pthread_rwlock_rdlock(&pass_lock);
	while (read > -1 && shared_data.password == NULL) { //odczytaj linie ze slownika
		pthread_rwlock_unlock(&pass_lock);			
		if (t_data->verbose > 2) printf("Reader: %s", line);
		pthread_rwlock_wrlock(&c_read_lock);
		shared_data.pass_read_c++;
		pthread_rwlock_unlock(&c_read_lock);
		pthread_mutex_lock(&mtx_reader);
		while (shared_data.line != NULL) {
			pthread_cond_signal(&cnd_p_pass);		
			pthread_cond_wait(&cnd_r_pass, &mtx_reader);
			pthread_rwlock_rdlock(&pass_lock);
			if (shared_data.password != NULL) {
				pthread_mutex_unlock(&mtx_reader);	
				goto exit;
			}
			pthread_rwlock_unlock(&pass_lock);		
		}
		shared_data.line = line;
		shared_data.read = read;
		line = NULL;
		pthread_cond_signal(&cnd_p_pass);	//WTF
		pthread_mutex_unlock(&mtx_reader);
		len = 0;
		read = getline(&line, &len, fp);
		pthread_rwlock_rdlock(&pass_lock);
	}
	exit:
	pthread_rwlock_rdlock(&c_proc_lock);
	pthread_rwlock_rdlock(&c_read_lock);
	while (shared_data.password == NULL && shared_data.pass_read_c != shared_data.pass_proced_c) {
		if (t_data->verbose > 2) printf("Reader: waiting for comparers!\n");
		pthread_rwlock_unlock(&c_read_lock);
		pthread_rwlock_unlock(&c_proc_lock);		
		pthread_rwlock_unlock(&pass_lock);
		clock_gettime(CLOCK_REALTIME, &ts);
		ts.tv_sec += 1;
		pthread_mutex_lock(&mtx_reader);
		pthread_cond_broadcast(&cnd_p_pass);
		pthread_cond_timedwait(&cnd_r_pass, &mtx_reader, &ts);
		pthread_mutex_unlock(&mtx_reader);
		pthread_rwlock_rdlock(&pass_lock);
		pthread_rwlock_rdlock(&c_proc_lock);
		pthread_rwlock_rdlock(&c_read_lock);
	}
	pthread_rwlock_unlock(&c_read_lock);
	pthread_rwlock_unlock(&c_proc_lock);
	pthread_rwlock_unlock(&pass_lock);	
	if (fp != NULL) fclose(fp);
	free(line);
	if (read == -1) {
		pthread_rwlock_wrlock(&pass_lock);
		shared_data.password = "";
		pthread_rwlock_unlock(&pass_lock);
	}
	pthread_mutex_lock(&mtx_comparer);
	pthread_cond_broadcast(&done);
	pthread_mutex_unlock(&mtx_comparer);
	if (t_data->verbose > 1) printf("Reader: exiting!\n");	
	pthread_exit(NULL);
}
Пример #26
0
	void wakeup()
	{
		pthread_mutex_lock( &m_mutex );
		pthread_cond_broadcast( &m_condition );
		pthread_mutex_unlock( &m_mutex );
	}
Пример #27
0
//add to priority queue. Due to threading, it must wait for all popping functions to end to ensure that the lowest priority item will always be removed
//NOTE: It is probably true that if you didn't care so much about always having the highest priority then you could allow both add and pop operations to run concurrently for increased performance
queue_t *addToQueue(void* data, int priority, queue_t *queue, int tid)
{
    int child;
    int parent;
    int i;
    //pthread_mutex_t* parentLock;
    //pthread_mutex_t* childLock;
    queue_item_t* heap;

    //check if the item about to be added will fit
    pthread_mutex_lock(&queue->itemCountLock);

    //check for popper exclusion
    pthread_mutex_lock(&queue->accessorLock);
    while(queue->accessors < 0)
    {
        pthread_cond_wait(&queue->accessorCond, &queue->accessorLock);
    }
    queue->accessors++;
    pthread_mutex_unlock(&queue->accessorLock);


    //assign child location and give lock
    child = queue->itemCount++;
    //childLock = &queue->heap[child].itemLock;
    pthread_mutex_lock(&queue->heap[child].itemLock);                      //because adding and popping is mutually exclusive, locking the itemCount and giving the child it's lock before unlocking ensures no children get created before this child is locke


    if (queue->itemCount >= queue->heapSize)
    {
        //this process waits for accessors to = 0 (all other threads leave)
        pthread_mutex_lock(&queue->accessorLock);
        while(queue->accessors != 1)
        {
            pthread_cond_wait(&queue->accessorCond, &queue->accessorLock);
        }
        pthread_mutex_unlock(&queue->accessorLock);

        if((heap = realloc(queue->heap, sizeof(queue_item_t) * (queue->heapSize * 2 + 1))))        //could do this math faster
        {   //success
            queue->heap = heap;

            //initialize mutexes
            for (i = queue->heapSize, queue->heapSize = queue->heapSize * 2 + 1; i < queue->heapSize; i++)
            {
                queue->heap[i].itemLock = (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER;
            }

            pthread_cond_broadcast(&queue->resizeCond);
        }
        else
        {   //failure

            //remove itself from the accessor Count
            pthread_mutex_lock(&queue->accessorLock);
            queue->accessors--;
            pthread_mutex_unlock(&queue->accessorLock);

            pthread_cond_signal(&queue->accessorCond);
            pthread_mutex_unlock(&heap[child].itemLock);
            return NULL;
        }
    }
    else
    {
        heap = queue->heap;
    }
    pthread_mutex_unlock(&queue->itemCountLock);

    //create child
    heap[child].data = data;
    heap[child].priority = priority;

    while (child > 0)
    {
        parent = (child + child%2 - 2)/2;
        //parentLock = &heap[parent].itemLock;

        pthread_mutex_lock(&heap[parent].itemLock);

        if(heap[parent].priority > heap[child].priority)
        {
            swap(heap, parent, child);
            pthread_mutex_unlock(&heap[child].itemLock);
            child = parent;

        }
        else
        {
            pthread_mutex_unlock(&heap[parent].itemLock);
            break;
        }
    }
    pthread_mutex_unlock(&queue->heap[child].itemLock);

    //remove itself from the accessor Count
    pthread_mutex_lock(&queue->accessorLock);
    queue->accessors--;
    pthread_mutex_unlock(&queue->accessorLock);

    //done signal waiting threads. NOTE: MIGHT NEED TO BE BROADCASTED
    pthread_cond_signal(&queue->accessorCond);
    return queue;
}
Пример #28
0
void reliableRecvUDP(void *args)
{

/* static LFR = 0, LFA = 0; 
*  this function when called, will send back UPTO n_bytes of data to the calling program. 
*  it maintains a window of size m msgs, and keeps filling it as the server sends the datagram. 
*  each msg in the window can be of size x bytes.
*  if the requested size n_bytes is greater than the size of data that can be stored in the window, then the function will return m*x bytes. So the function has to be called in a while loop till we receive all the data. 
*  peek into the msgs from the socket, and find the LFR in order and send an ACK to the server. 
*  now read the data and add msgs with seqno in the range LFR - LFA, discard rest of the data. 
*  Keep looping till we fill up the whole window or we read n_bytes.
*  if the connection gets reset in the mid way return -1 
*  else return bytes_read; 
*  reliableRecvUDP will make sure the returned bytes are in order!! - FLOW CONTROL. 
*  reliableRecvUDP will also send the ACK packets to the server. 
*/

	recvThreadArgs *threadArgs = (recvThreadArgs *)args;

	pthread_mutex_t *mtx;
	pthread_cond_t *cnd;
	peerInfo peer;
	fd_set rset;
	int maxfdp;
	int rc;

	int flags;
	int bytes_recvd,packets_recvd,packets_drpd;



	//memcpy(&mtx,&threadArgs->mutex,sizeof(pthread_mutex_t));
	//memcpy(&cnd,&threadArgs->cond,sizeof(pthread_cond_t));
	
	mtx = &threadArgs->mutex;
	cnd = &threadArgs->cond;

	memcpy(&peer,&threadArgs->peer,sizeof(peerInfo));

	struct sockaddr_in sa;
	memcpy(&sa,&peer.saddr,sizeof(sa));

	//printf("R-RECV : SERVER INFO \n");
	//printf("\tSERV SOCK FD : %d\n\tSERV PORT  : %d\n",peer.sockfd,ntohs(sa.sin_port));


	//printf("peer.sockfd : %d\n",peer.sockfd);


	tcpPacket tpkt;
		pthread_mutex_lock(mtx);	
	int closed = 1;
		//pthread_mutex_lock(&mtx);	
	do
	{
		FD_ZERO(&rset);
		FD_SET(peer.sockfd,&rset);
		maxfdp = peer.sockfd + 1;
		//printf("R-RECV : Waiting in select\n");
		rc = select(maxfdp,&rset,NULL,NULL,NULL);

		//printf("R-RECV : SELECT RETURNED : %d\n",rc);

		if(rc == 0)
			printf("TIME OUT\n");
		else if (rc < 0)
		{
			printf("ERROR : BREAKING OUT\n");
			break;
		}
		else
		{
			if(FD_ISSET(peer.sockfd,&rset))
			{
				//peek in the mesage... 
				flags = 0;
				//printf("R-RECV : GOING TO READ DATA\n");
				//printf("\tCURRENT WINDOW WRITE POINTER : %d\n",client_wndw.CWP);
				//printf("\tCURRENT WINDOW READ POINTER  : %d\n",client_wndw.CRP);
				if(client_wndw.CWP <= client_wndw.wndw_size)
				{

					//bytes_recvd = readData(peer,client_wndw.windElems[client_wndw.CWP],sizeof(tcpPacket),flags);
					bytes_recvd = readData(peer,&tpkt,sizeof(tcpPacket),flags);

					//if(tcpPacket.header.seqNo == client_wndw.LFR+1)	

					memcpy((client_wndw.windElems+client_wndw.CWP),&tpkt,sizeof(tcpPacket));
					//printf("%d, %lld\n",client_wndw.CWP,tpkt.header.seqNo);
					
					client_wndw.CWP++;
					//printf("R-RECV : Data recvd from server of size : %d\n",bytes_recvd);
					printf("RECVD : P-SEQ_NO  : %lld",client_wndw.windElems[client_wndw.CWP-1].header.seqNo);
					//printf("\tP-MSG_LEN : %d",client_wndw.windElems[client_wndw.CWP-1].header.msglen);
					//printf("\tP-MSG_TYPE: %d\n",client_wndw.windElems[client_wndw.CWP-1].header.type);
					file_transfer_status = client_wndw.windElems[client_wndw.CWP-1].header.type;


					client_wndw.LFR = client_wndw.windElems[client_wndw.CWP-1].header.seqNo;
					client_wndw.LFA = client_wndw.LFR+1;
					client_wndw.cwndw_size = client_wndw.wndw_size - client_wndw.CWP;
					tcpHeader ackPacket;
					ackPacket.seqNo = client_wndw.LFA-1;
					ackPacket.ackNo = client_wndw.LFA;
					ackPacket.type = ACK;
					ackPacket.msglen = 0;
					ackPacket.cwndw_size = client_wndw.cwndw_size-1;
					flags = 0;
					if ( file_transfer_status == 104)
						client_wndw.CWP = client_wndw.wndw_size;
					if(client_wndw.CWP == client_wndw.wndw_size)
					{
					#if 1
						pthread_cond_broadcast(cnd);
						//pthread_cond_signal(cnd);
		        			pthread_mutex_unlock(mtx);

						printf("R_READ : SIGNALLED TO R_PRINT\n");
						sleep(3);	
						pthread_mutex_lock(mtx);
						printf("R_READ : GOT THE CONTROL BACK\n");

						while(client_wndw.CRP==0)
							pthread_cond_wait(cnd,mtx);

					#endif
						//send an ack here...
						client_wndw.CWP = 0;
						client_wndw.CRP = -1;
                                       		ackPacket.cwndw_size = 10;
                                       		flags = 0;
						//printf("after reseting client_wndw.CRP : %d\n",client_wndw.CRP);
					}
					printf("\tSENT : P-ACK_NO : %lld\n",ackPacket.ackNo);
					sendData(peer,&ackPacket,sizeof(tcpHeader),flags);
					
						
				}
				if(file_transfer_status == 104)
				{
					printf("-----------------------PASSIVE CLOSE---------------------\n");
					passiveClose(peer);
					file_transfer_status = 1;
				}
				
			}
		}


	}while(file_transfer_status!=closed);

	printf("FILE TRANSFER COMPLETE...\n");
	//pthread_mutex_unlock(&mtx);
	pthread_mutex_unlock(mtx);
	//pthread_mutex_lock(&mtx);
	//pthread_cond_wait(&cnd,&mtx);
	
	


}
Пример #29
0
/***********************************************************************
return:
0 if everything work
# will be the error code.
*/
int ThreadPool::Destroy( int finish )
{
    int             rtn;
    ThreadPoolWork *cur_nodep;

    if( (rtn = pthread_mutex_lock(&queueLock)) != 0 )
    {
        fprintf( stderr, "pthread_mutex_lock %s", strerror(rtn) );
        return( 1 );
    }

    // Is a shutdown already in progress?
    if( queueClosed || shutdown )
    {
        if( (rtn = pthread_mutex_unlock(&queueLock)) != 0 )
        {
            fprintf( stderr, "pthread_mutex_unlock %s", strerror(rtn) );
            return( 1 );
        }
        return( 0 );
    }

    queueClosed = 1;

    // If the finish flag is set, wait for workers to drain queue.
    if( finish )
    {
        while( currentQueueSize )
        {
            if( (rtn = pthread_cond_wait(&queueEmpty, &queueLock)) != 0 )
            {
                fprintf( stderr, "pthread_cond_wait %s", strerror(rtn) );
                return( 1 );
            }
        }
    }

    shutdown = 1;

    if( (rtn = pthread_mutex_unlock(&queueLock)) != 0 )
    {
        fprintf( stderr, "pthread_mutex_unlock %s", strerror(rtn) );
        return( 1 );
    }

    // Wake up any workers so they recheck shutdown flag.
    if( (rtn = pthread_cond_broadcast(&queueNotEmpty)) != 0 )
    {
        fprintf( stderr, "pthread_cond_broadcast %s", strerror(rtn) );
        return( 1 );
    }
    if( (rtn = pthread_cond_broadcast(&queueNotFull)) != 0 )
    {
        fprintf( stderr, "pthread_cond_broadcast %s", strerror(rtn) );
        return( 1 );
    }

    // Wait for workers to exit.
    for(int i=0; i < numberThreads ;i++)
    {
        if( (rtn = pthread_join(threads[i], NULL)) != 0 )
        {
            fprintf( stderr, "pthread_join %s", strerror(rtn) );
            return( 1 );
        }
    }

    // Now free pool structures.
    free( threads );
    while( queueHead != NULL )
    {
        cur_nodep = queueHead->next;
        queueHead = queueHead->next;
        free( cur_nodep );
    }

    if( (rtn = pthread_mutex_destroy(&queueLock)) != 0 )
    {
        fprintf( stderr, "pthread_mutex_destroy %s", strerror(rtn) );
    }
    /*
    if( (rtn = pthread_mutex_destroy(&totalTimeLock)) != 0 )
    {
        fprintf( stderr, "pthread_mutex_destroy %s", strerror(rtn) );
    }
    */
    if( (rtn = pthread_cond_destroy(&queueNotEmpty)) != 0 )
    {
        fprintf( stderr, "pthread_cond_destroy %s", strerror(rtn) );
    }
    if( (rtn = pthread_cond_destroy(&queueNotFull)) != 0 )
    {
        fprintf( stderr, "pthread_cond_destroy %s", strerror(rtn) );
    }
    if( (rtn = pthread_cond_destroy(&queueEmpty)) != 0 )
    {
        fprintf( stderr, "pthread_cond_destroy %s", strerror(rtn) );
    }

//fprintf( stderr, "Thread wait time: %e\n", GetTime() );

    delete this;

    return( 0 );
}
Пример #30
0
void ODCondition::Broadcast()
{
   pthread_cond_broadcast(condition);
}