Example #1
0
void local_actor::quit(uint32_t reason) {
  CAF_LOG_TRACE("reason = " << reason << ", class "
                            << detail::demangle(typeid(*this)));
  planned_exit_reason(reason);
  if (is_blocking()) {
    throw actor_exited(reason);
  }
}
Example #2
0
TEST(mock_MeterS0, basic_blocking_no_send_zero)
{
	mock_S0hwif *hwif = new mock_S0hwif();
	std::list<Option> opt;
	opt.push_back(Option("send_zero", false));

	EXPECT_CALL(*hwif, _open()).Times(1).WillRepeatedly(Return(true));
	EXPECT_CALL(*hwif, _close()).Times(1).WillOnce(Return(true));
	EXPECT_CALL(*hwif, is_blocking()).Times(2).WillRepeatedly(Return(true));
	MeterS0 m(opt, hwif);
	ASSERT_EQ(SUCCESS, m.open());
	m.close(); // this might be called and should not cause problems
}
Example #3
0
int
_ofp_select(int nfds, ofp_fd_set *readfds, ofp_fd_set *writefds,
	    ofp_fd_set *exceptfds, struct ofp_timeval *timeout,
	    int (*sleeper)(void *channel, odp_rwlock_t *mtx, int priority,
			   const char *wmesg, uint32_t timeout))
{
	(void)writefds;
	(void)exceptfds;

	if (is_blocking(timeout) && none_of_ready(nfds, readfds, is_readable))
		sleeper(NULL, NULL, 0, "select", to_usec(timeout));

	return set_ready_fds(nfds, readfds, is_readable);
}
Example #4
0
TEST(mock_MeterS0, basic_non_blocking_read)
{
	mock_S0hwif *hwif = new mock_S0hwif();
	std::list<Option> opt;
	opt.push_back(Option("send_zero", true));

	EXPECT_CALL(*hwif, _open()).Times(1).WillRepeatedly(Return(true));
	EXPECT_CALL(*hwif, _close()).Times(1).WillOnce(Return(true));
	EXPECT_CALL(*hwif, is_blocking()).Times(2).WillRepeatedly(Return(false));
	EXPECT_CALL(*hwif, status()).Times(AtLeast(1)).WillRepeatedly(Return(0));
	MeterS0 m(opt, hwif);
	ASSERT_EQ(SUCCESS, m.open());
	std::vector<Reading> rds(4);
	ASSERT_EQ(m.read(rds, 4), 4);

	m.close(); // this might be called and should not cause problems
}
Example #5
0
JOB* create_job(const char* command)
{
	JOB* job = NULL;
	char* cleancmd, ***iter;

	if (command == NULL)
		return NULL;

	job = (JOB*) malloc(sizeof(JOB));
	error(job == NULL, NULL);

	job->name = (char*) malloc(sizeof(char)*(strlen(command)+1));
	error(job->name == NULL, (free(job), NULL));
	strcpy(job->name, command);

	job->inputfd = get_io_redir_file(command, SLSH_INPUT);
	job->outputfd = get_io_redir_file(command, SLSH_OUTPUT);

	job->blocking = is_blocking(command);

	cleancmd = clean_command(command);
	error(cleancmd == NULL, (destroy_job(&job), NULL));

	job->cmd = make_cmd_array(cleancmd);
	free(cleancmd);
	error(job->cmd == NULL, (destroy_job(&job), NULL));

	job->ncmd = 0;
	iter = job->cmd;
	while (*iter != NULL)
	{
		job->ncmd++;
		iter++;
	}

	job->run_count = 0;
	
	job->pgid = 0;

	job->pid = (pid_t*) calloc(job->ncmd, sizeof(pid_t));
	error(job->pid == NULL, (destroy_job(&job), NULL));

	job->lastmodified = -1;
	return job;
}
Example #6
0
void
pb_SwitchToSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID category) 
{
  struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[timers->current];
  struct pb_SubTimer *curr = (subtimerlist != NULL) ? subtimerlist->current : NULL;
  
  if (timers->current != pb_TimerID_NONE) {
    if (!is_async(timers->current) ) {
      if (timers->current != category) {
        if (curr != NULL) {
          pb_StopTimerAndSubTimer(&timers->timers[timers->current], &curr->timer);
        } else {
          pb_StopTimer(&timers->timers[timers->current]);
        }
      } else {
        if (curr != NULL) {
          pb_StopTimer(&curr->timer);
        }
      }
    } else {
      insert_submarker(timers, label, category);
      if (!is_async(category)) { // if switching to async too, keep driver going
        pb_StopTimer(&timers->timers[pb_TimerID_DRIVER]);
      }
    }
  }

  pb_Timestamp currentTime = get_time();

  /* The only cases we check for asynchronous task completion is 
   * when an overlapping CPU operation completes, or the next 
   * segment blocks on completion of previous async operations */
  if( asyncs_outstanding(timers) && 
      (!is_async(timers->current) || is_blocking(category) ) ) {

    struct pb_async_time_marker_list * last_event = get_last_async(timers);
    /* cudaSuccess if completed */
    cudaError_t async_done = cudaEventQuery(*((cudaEvent_t *)last_event->marker));

    if(is_blocking(category)) {
      /* Async operations completed after previous CPU operations: 
       * overlapped time is the total CPU time since this set of async 
       * operations were first issued */
       
      // timer to switch to is COPY or NONE 
      // if it hasn't already finished, then just take now and use that as the elapsed time in OVERLAP
      // anything happening after now isn't OVERLAP because everything is being stopped to wait for synchronization
      // it seems that the extra sync wall time isn't being recorded anywhere
      if(async_done != cudaSuccess) 
        accumulate_time(&(timers->timers[pb_TimerID_OVERLAP].elapsed), 
	                  timers->async_begin,currentTime);

      /* Wait on async operation completion */
      cudaEventSynchronize(*((cudaEvent_t *)last_event->marker));
      pb_Timestamp total_async_time = record_async_times(timers);

      /* Async operations completed before previous CPU operations: 
       * overlapped time is the total async time */
       // If it did finish, then accumulate all the async time that did happen into OVERLAP
       // the immediately preceding EventSynchronize theoretically didn't have any effect since it was already completed.
      if(async_done == cudaSuccess)
        timers->timers[pb_TimerID_OVERLAP].elapsed += total_async_time;

    } else 
    /* implies (!is_async(timers->current) && asyncs_outstanding(timers)) */
    // i.e. Current Not Async (not KERNEL/COPY_ASYNC) but there are outstanding
    // so something is deeper in stack
    if(async_done == cudaSuccess) {
      /* Async operations completed before previous CPU operations: 
       * overlapped time is the total async time */
      timers->timers[pb_TimerID_OVERLAP].elapsed += record_async_times(timers);
    }   
    // else, this isn't blocking, so just check the next time around
  }
  
  subtimerlist = timers->sub_timer_list[category];
  struct pb_SubTimer *subtimer = NULL;
  
  if (label != NULL) {  
    subtimer = subtimerlist->subtimer_list;
    while (subtimer != NULL) {
      if (strcmp(subtimer->label, label) == 0) {
        break;
      } else {
        subtimer = subtimer->next;
      }
    }
  }

  /* Start the new timer */
  if (category != pb_TimerID_NONE) {
    if(!is_async(category)) {
    
      if (subtimerlist != NULL) {
        subtimerlist->current = subtimer;
      }
    
      if (category != timers->current && subtimer != NULL) {
        pb_StartTimerAndSubTimer(&timers->timers[category], &subtimer->timer);
      } else if (subtimer != NULL) {
        pb_StartTimer(&subtimer->timer);
      } else {
        pb_StartTimer(&timers->timers[category]);
      }            
    } else {
      if (subtimerlist != NULL) {
        subtimerlist->current = subtimer;
      }
    
      // toSwitchTo Is Async (KERNEL/COPY_ASYNC)
      if (!asyncs_outstanding(timers)) {
        /* No asyncs outstanding, insert a fresh async marker */
        insert_submarker(timers, label, category);
        timers->async_begin = currentTime;
      } else if(!is_async(timers->current)) {
        /* Previous asyncs still in flight, but a previous SwitchTo
         * already marked the end of the most recent async operation, 
         * so we can rename that marker as the beginning of this async 
         * operation */
                  
        struct pb_async_time_marker_list * last_event = get_last_async(timers);
        last_event->timerID = category;
        last_event->label = label;
      } // else, marker for switchToThis was already inserted
      
      //toSwitchto is already asynchronous, but if current/prev state is async too, then DRIVER is already running
      if (!is_async(timers->current)) {
        pb_StartTimer(&timers->timers[pb_TimerID_DRIVER]);
      }
    }
  }
  
  timers->current = category;  
}
Example #7
0
void
pb_SwitchToTimer(struct pb_TimerSet *timers, enum pb_TimerID timer)
{
  /* Stop the currently running timer */
  if (timers->current != pb_TimerID_NONE) {
    struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[timers->current];
    struct pb_SubTimer *currSubTimer = (subtimerlist != NULL) ? subtimerlist->current : NULL;
  
    if (!is_async(timers->current) ) {
      if (timers->current != timer) {
        if (currSubTimer != NULL) {
          pb_StopTimerAndSubTimer(&timers->timers[timers->current], &currSubTimer->timer);
        } else {
          pb_StopTimer(&timers->timers[timers->current]);
        }
      } else {
        if (currSubTimer != NULL) {
          pb_StopTimer(&currSubTimer->timer);
        }
      }
    } else {
      insert_marker(timers, timer);
      if (!is_async(timer)) { // if switching to async too, keep driver going
        pb_StopTimer(&timers->timers[pb_TimerID_DRIVER]);
      }
    }
  }
  
  pb_Timestamp currentTime = get_time();

  /* The only cases we check for asynchronous task completion is 
   * when an overlapping CPU operation completes, or the next 
   * segment blocks on completion of previous async operations */
  if( asyncs_outstanding(timers) && 
      (!is_async(timers->current) || is_blocking(timer) ) ) {

    struct pb_async_time_marker_list * last_event = get_last_async(timers);
    /* cudaSuccess if completed */
    cudaError_t async_done = cudaEventQuery(*((cudaEvent_t *)last_event->marker));

    if(is_blocking(timer)) {
      /* Async operations completed after previous CPU operations: 
       * overlapped time is the total CPU time since this set of async 
       * operations were first issued */
       
      // timer to switch to is COPY or NONE 
      if(async_done != cudaSuccess) 
        accumulate_time(&(timers->timers[pb_TimerID_OVERLAP].elapsed), 
	                  timers->async_begin,currentTime);

      /* Wait on async operation completion */
      cudaEventSynchronize(*((cudaEvent_t *)last_event->marker));
      pb_Timestamp total_async_time = record_async_times(timers);

      /* Async operations completed before previous CPU operations: 
       * overlapped time is the total async time */
      if(async_done == cudaSuccess)
        timers->timers[pb_TimerID_OVERLAP].elapsed += total_async_time;

    } else 
    /* implies (!is_async(timers->current) && asyncs_outstanding(timers)) */
    // i.e. Current Not Async (not KERNEL/COPY_ASYNC) but there are outstanding
    // so something is deeper in stack
    if(async_done == cudaSuccess) {
      /* Async operations completed before previous CPU operations: 
       * overlapped time is the total async time */
      timers->timers[pb_TimerID_OVERLAP].elapsed += record_async_times(timers);
    }   
  }

  /* Start the new timer */
  if (timer != pb_TimerID_NONE) {
    if(!is_async(timer)) {
      pb_StartTimer(&timers->timers[timer]);
    } else {
      // toSwitchTo Is Async (KERNEL/COPY_ASYNC)
      if (!asyncs_outstanding(timers)) {
        /* No asyncs outstanding, insert a fresh async marker */
      
        insert_marker(timers, timer);
        timers->async_begin = currentTime;
      } else if(!is_async(timers->current)) {
        /* Previous asyncs still in flight, but a previous SwitchTo
         * already marked the end of the most recent async operation, 
         * so we can rename that marker as the beginning of this async 
         * operation */
         
        struct pb_async_time_marker_list * last_event = get_last_async(timers);
        last_event->label = NULL;
        last_event->timerID = timer;
      }
      if (!is_async(timers->current)) {
        pb_StartTimer(&timers->timers[pb_TimerID_DRIVER]);
      }
    }
  }
  timers->current = timer;

}
Example #8
0
 void         set_jvmci_compiler_thread(CompilerThread* t) {
   assert(is_blocking(), "must be");
   assert((t == NULL) != (_jvmci_compiler_thread == NULL), "must be");
   _jvmci_compiler_thread = t;
 }
Example #9
0
int query_blocking(string dir) {
   return is_blocking(dir);
}
Example #10
0
blocking_actor::blocking_actor() {
  is_blocking(true);
}
int main(int argc, char *argv[])
{
    int source_port;
    char *destination_address;
    int destination_port;
    int server_sockfd = -1, client_sockfd = -1, target_sockfd = -1, admin_sockfd = -1, admin_client_sockfd = -1;
    char *lock_filename;
    
    /* Check parameters */
    
    if(argc != 5)
    {
	print_usage();
	_exit(1);
    }
    
    /* Get parameter values */
    source_port = atoi(argv[1]);
    destination_address = strdup(argv[2]);
    destination_port = atoi(argv[3]);
    lock_filename = strdup(argv[4]);
    
    /* Create signal handlers */
    signal(SIGINT, cleanup); /* Event handler for interruption */
    signal(SIGCHLD, sigreap); /* Event handler when a child terminates */
    
    /* Create server socket */
    server_sockfd = create_server_socket(source_port);
    set_nonblock(server_sockfd);
    
    /* Create admin socket */
    admin_sockfd = create_admin_socket("/tmp/disnix-tcp-proxy.sock");
    set_nonblock(admin_sockfd);
    
    /* Main loop */ 
       
    while(TRUE)
    {
	int status;
	
	/* Create admin client socket if there is an incoming connection */
	if((admin_client_sockfd = wait_for_connection(admin_sockfd)) >= 0)
	{
	    char msg[BUFFER_SIZE];
		
	    printf("Admin connection from client\n");
		
	    sprintf(msg, "%d", num_of_connections);
	    if(send(admin_client_sockfd, msg, strlen(msg), 0) < 0)
	        fprintf(stderr, "Error sending message to admin client: %s\n", strerror(errno));
		
	    close(admin_client_sockfd);
	    admin_client_sockfd = -1;    
	}
    
	/* If we want to block do not accept any incoming client connections */
        if(is_blocking(lock_filename))
	    continue;
	    
	/* Create client if there is an incoming connection */
	if((client_sockfd = wait_for_connection(server_sockfd)) < 0)
	    continue;
	    
	/* Connect to the remote host */
	if((target_sockfd = open_remote_host(destination_address, destination_port)) < 0)
	{
	    close(client_sockfd);
	    client_sockfd = -1;
	    continue;
	}
	
	/* Fork a new process for each incoming client */
	status = fork();
	    
	if(status == 0)
	{
	    printf("Connection from client\n");
	    close(server_sockfd);
	    close(admin_sockfd);
	    do_proxy(client_sockfd, target_sockfd);
	    abort();
	}
	else if(status == -1)
	    fprintf(stderr, "Error in forking process\n");
	else
	    num_of_connections++;
	    
	/* Close the connections to the remote host and client */
	close(client_sockfd);
	client_sockfd = -1;
	close(target_sockfd);
	target_sockfd = -1;
    }
    
    return 0;
}
Example #12
0
File: mcs.c Project: pacinodev/moxi
SOCKET mcs_connect(const char *hostname, int portnum,
                   int *errno_out, bool blocking) {
    SOCKET ret = INVALID_SOCKET;
    struct addrinfo *ai   = NULL;
    struct addrinfo *next = NULL;
    struct addrinfo hints;
    char port[50];
    int error;

    if (errno_out != NULL) {
        *errno_out = -1;
    }

    memset(&hints, 0, sizeof(0));
    hints.ai_flags = AI_PASSIVE;
    hints.ai_socktype = SOCK_STREAM;
    hints.ai_family = AF_UNSPEC;

    snprintf(port, sizeof(port), "%d", portnum);

    error = getaddrinfo(hostname, port, &hints, &ai);
    if (error != 0) {
#if 0
        if (error != EAI_SYSTEM) {
            /* settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL, */
            /*                                 "getaddrinfo(): %s\n", gai_strerror(error)); */
        } else {
            /* settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL, */
            /*                                 "getaddrinfo(): %s\n", strerror(error)); */
        }
#endif
        return INVALID_SOCKET;
    }

    for (next = ai; next; next = next->ai_next) {
        SOCKET sock = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
        if (sock == INVALID_SOCKET) {
            /* settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL, */
            /*                                 "Failed to create socket: %s\n", */
            /*                                 strerror(errno)); */
            continue;
        }

        /* If the caller wants non-blocking, set the sock options */
        /* now so even the connect() becomes non-blocking. */

        if (!blocking && (mcs_set_sock_opt(sock) != MCS_SUCCESS)) {
            closesocket(sock);
            continue;
        }

        if (connect(sock, ai->ai_addr, (socklen_t)ai->ai_addrlen) == SOCKET_ERROR) {
#ifdef WIN32
            DWORD errno_last = WSAGetLastError();
#else
            int errno_last = errno;
#endif
            if (errno_out != NULL) {
                *errno_out = errno_last;
            }

            if (!blocking && (is_in_progress(errno_last) ||
                              is_blocking(errno_last))) {
                ret = sock;
                break;
            }

            /* settings.extensions.logger->log(EXTENSION_LOG_WARNING, NULL, */
            /*                                 "Failed to connect socket: %s\n", */
            /*                                 strerror(errno)); */
            closesocket(sock);
            continue;
        }

        if (mcs_set_sock_opt(sock) == MCS_SUCCESS) {
            ret = sock;
            break;
        }

        closesocket(sock);
    }

    freeaddrinfo(ai);

    return ret;
}