Esempio n. 1
0
/* osi_TimedSleep
 * 
 * Arguments:
 * event - event to sleep on
 * ams --- max sleep time in milliseconds
 * aintok - 1 if should sleep interruptibly
 *
 * Returns 0 if timeout and EINTR if signalled.
 */
static int
osi_TimedSleep(char *event, afs_int32 ams, int aintok)
{
    int code = 0;
    struct afs_event *evp;
    int ticks;

    ticks = (ams * afs_hz) / 1000;


    evp = afs_getevent(event);

    assert_wait((vm_offset_t) (&evp->cond), aintok);
    AFS_GUNLOCK();
    thread_set_timeout(ticks);
    thread_block();
    AFS_GLOCK();
    /*    if (current_thread()->wait_result != THREAD_AWAKENED)
     * code = EINTR; */

    relevent(evp);
    return code;
}
Esempio n. 2
0
void
task_swapper(void)
{
	task_t	outtask, intask;
	int timeout;
	int loopcnt = 0;
	boolean_t start_swapping;
	boolean_t stop_swapping;
	int local_page_free_avg;
	extern int hz;

	thread_swappable(current_act(), FALSE);
	stack_privilege(current_thread());

	spllo();

	for (;;) {
	local_page_free_avg = vm_page_free_avg;
	while (TRUE) {
#if	0
		if (task_swap_debug)
			printf("task_swapper: top of loop; cnt = %d\n",loopcnt);
#endif
		intask = pick_intask();

		start_swapping = ((vm_pageout_rate_avg > swap_start_pageout_rate) ||
				  (vm_grab_rate_avg > max_grab_rate));
		stop_swapping = (vm_pageout_rate_avg < swap_stop_pageout_rate);

		/*
		 * If a lot of paging is going on, or another task should come
		 * in but memory is tight, find something to swap out and start
		 * it.  Don't swap any task out if task swapping is disabled.
		 * vm_page_queue_free_lock protects the vm globals.
		 */
		outtask = TASK_NULL;
		if (start_swapping ||
		    (!stop_swapping && intask &&
		     ((local_page_free_avg / AVE_SCALE) < vm_page_free_target))
		   ) {
			if (task_swap_enable &&
			    (outtask = pick_outtask()) &&
			    (task_swapout(outtask) == KERN_SUCCESS)) {
				unsigned long rss;
#if	TASK_SW_DEBUG
				if (task_swap_debug)
				    print_pid(outtask, local_page_free_avg / AVE_SCALE,
					      vm_page_free_target, "<",
					      "out");
#endif
				rss = outtask->swap_rss;
				if (outtask->swap_nswap == 1)
					rss /= 2; /* divide by 2 if never out */
				local_page_free_avg += (rss/short_avg_interval) * AVE_SCALE;
			}
			if (outtask != TASK_NULL)
				task_deallocate(outtask);
		}

		/*
		 * If there is an eligible task to bring in and there are at
		 * least vm_page_free_target free pages, swap it in.  If task
		 * swapping has been disabled, bring the task in anyway.
		 */
		if (intask && ((local_page_free_avg / AVE_SCALE) >=
							vm_page_free_target ||
				stop_swapping || !task_swap_enable)) {
			if (task_swapin(intask, FALSE) == KERN_SUCCESS) {
				unsigned long rss;
#if	TASK_SW_DEBUG
				if (task_swap_debug)
				    print_pid(intask, local_page_free_avg / AVE_SCALE,
					      vm_page_free_target, ">=",
					      "in");
#endif
				rss = intask->swap_rss;
				if (intask->swap_nswap == 1)
					rss /= 2; /* divide by 2 if never out */
				local_page_free_avg -= (rss/short_avg_interval) * AVE_SCALE;
			}
		}
		/*
		 * XXX
		 * Here we have to decide whether to continue swapping
		 * in and/or out before sleeping.  The decision should
		 * be made based on the previous action (swapin/out) and
		 * current system parameters, such as paging rates and
		 * demand.
		 * The function, compute_vm_averages, which does these
		 * calculations, depends on being called every second,
		 * so we can't just do the same thing.
		 */
		if (++loopcnt < MAX_LOOP)
			continue;

		/*
		 * Arrange to be awakened if paging is still heavy or there are
		 * any tasks partially or completely swapped out.  (Otherwise,
		 * the wakeup will come from the external trigger(s).)
		 */
		timeout = 0;
		if (start_swapping)
			timeout = task_swap_cycle_time;
		else {
			task_swapper_lock();
			if (!queue_empty(&swapped_tasks))
				timeout = min_swap_time;
			task_swapper_unlock();
		}
		assert_wait((event_t)&swapped_tasks, FALSE);
		if (timeout) {
			if (task_swap_debug)
				printf("task_swapper: set timeout of %d\n",
								timeout);
			thread_set_timeout(timeout*hz);
		}
		if (task_swap_debug)
			printf("task_swapper: blocking\n");
		thread_block((void (*)(void)) 0);
		if (timeout) {
			reset_timeout_check(&current_thread()->timer);
		}
		/* reset locals */
		loopcnt = 0;
		local_page_free_avg = vm_page_free_avg;
	}
	}
}
Esempio n. 3
0
nw_buffer_t mk_receive(nw_ep ep, int time_out) {
  nw_buffer_t rc;
  nw_pv_t pv;
  nw_ecb_t ecb;
  nw_rx_header_t header;
  nw_hecb_t hecb;
  nw_waiter_t w;
  nw_ep_owned_t waited;

  nw_lock();
  if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
    rc = NW_BUFFER_ERROR;
  } else {
    while (pv != NULL && pv->owner != current_task())
      pv = pv->next;
    if (pv == NULL) {
      rc = NW_BUFFER_ERROR;
    } else {
      ecb = &ect[ep];
      header = ecb->rx_first;
      if (header != NULL) {
	rc = (nw_buffer_t) ((char *) header->buffer - ecb->buf_start +
			     pv->buf_start);
	ecb->rx_first = header->next;
	if (ecb->rx_first == NULL)
	  ecb->rx_last = NULL;
	nc_rx_header_deallocate(header);
      } else if (time_out != 0 && nw_free_waiter != NULL &&
		 (time_out == -1 || nw_free_waited != NULL)) {
	w = nw_free_waiter;
	nw_free_waiter = w->next;
	w->waiter = current_thread();
	w->next = NULL;
	hecb = &hect[ep];
	if (hecb->rx_last == NULL)
	  hecb->rx_first = hecb->rx_last = w;
	else
	  hecb->rx_last = hecb->rx_last->next = w;
	assert_wait(0, TRUE);
	if (time_out != -1) {
	  waited = nw_free_waited;
	  nw_free_waited = waited->next;
	  waited->ep = ep;
	  waited->next = NULL;
	  current_thread()->nw_ep_waited = waited;
	  current_thread()->wait_result = NULL;
	  if (!current_thread()->timer.set) 
	    thread_set_timeout(time_out);
	} else {
	  current_thread()->nw_ep_waited = NULL;
	}
	simple_unlock(&nw_simple_lock);
	thread_block(mk_return);
      } else {
	rc = NULL;
      }
    }
  }
  nw_unlock();
  return rc;
}
Esempio n. 4
0
nw_buffer_t mk_rpc(nw_ep ep, nw_buffer_t msg, nw_options options,
		   int time_out) {
  nw_buffer_t rc;
  nw_result nrc;
  nw_ep sender;
  int dev;
  nw_pv_t pv;
  nw_ecb_t ecb;
  nw_tx_header_t header, first_header, previous_header;
  nw_hecb_t hecb;
  nw_waiter_t w;
  nw_ep_owned_t waited;

  nw_lock();
  if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
    rc = NW_BUFFER_ERROR;
  } else {
    while (pv != NULL && pv->owner != current_task())
      pv = pv->next;
    if (pv == NULL) {
      rc = NW_BUFFER_ERROR;
    } else {
      ecb = &ect[ep];
      if (ecb->state == NW_INEXISTENT ||
	  (ecb->protocol == NW_SEQ_PACKET && ecb->conn == NULL)) {
	rc = NW_BUFFER_ERROR;
      } else {
	first_header = header = nc_tx_header_allocate();
	previous_header = NULL;
	rc = NULL;
	while (header != NULL) {
	  if ((char *) msg < pv->buf_start ||
	      (char *) msg + sizeof(nw_buffer_s) > pv->buf_end ||
	      ((int) msg & 0x3) || (msg->block_offset & 0x3) ||
	      (msg->block_length & 0x3) || !msg->buf_used ||
	      (char *) msg + msg->buf_length > pv->buf_end ||
	      msg->block_offset + msg->block_length > msg->buf_length) {
	    rc = NW_BUFFER_ERROR;
	    break;
	  } else {
	    if (previous_header == NULL) {
	      if (ecb->protocol == NW_SEQ_PACKET)
		header->peer = ecb->conn->peer;
	      else
		header->peer = msg->peer;
	    } else {
	      previous_header->next = header;
	    }
	    header->buffer = (nw_buffer_t) ((char *) msg - pv->buf_start +
					    ecb->buf_start);
	    header->block = (char *) header->buffer + msg->block_offset;
	    if (!msg->block_deallocate)
	      header->buffer = NULL;
	    header->msg_length = 0;
	    header->block_length = msg->block_length;
	    first_header->msg_length += header->block_length;
	    header->next = NULL;
	    if (msg->buf_next == NULL)
	      break;
	    msg = msg->buf_next;
	    previous_header = header;
	    header = nc_tx_header_allocate();
	  }
	}
	if (header == NULL) {
	  nc_tx_header_deallocate(first_header);
	  rc = NW_BUFFER_ERROR;
	} else if (rc != NW_BUFFER_ERROR) {
	  dev = NW_DEVICE(first_header->peer.rem_addr_1);
	  if (ecb->protocol != NW_DATAGRAM ||
	      devct[dev].type != NW_CONNECTION_ORIENTED) {
	    sender = first_header->peer.local_ep;
	    nrc = NW_SUCCESS;
	  } else {
	    sender = nc_line_lookup(&first_header->peer);
	    if (sender == -1) {
	      nrc = NW_BAD_ADDRESS;
	    } else if (sender > 0) {
	      nrc = NW_SUCCESS;
	    } else {
	      nrc = mk_endpoint_allocate_internal(&sender, NW_LINE,
						  NW_AUTO_ACCEPT, 0, TRUE);
	      if (nrc == NW_SUCCESS) {
		nrc = mk_connection_open_internal(sender,
			                first_header->peer.rem_addr_1,
					first_header->peer.rem_addr_2,
					MASTER_LINE_EP);
		if (nrc == NW_SUCCESS) 
		  nc_line_update(&first_header->peer, sender);
	      }
	    }
	  }
	  if (nrc == NW_SUCCESS) {
	    first_header->sender = sender;
	    first_header->options = options;
	    rc = (*(devct[dev].entry->rpc)) (sender, first_header, options);
	    if (rc != NULL && rc != NW_BUFFER_ERROR) {
	      rc = (nw_buffer_t) ((char *) rc - ecb->buf_start +
				  pv->buf_start);
	    } else if (rc == NULL && time_out != 0 && nw_free_waiter != NULL &&
		       (time_out == -1 || nw_free_waited != NULL)) {
	      w = nw_free_waiter;
	      nw_free_waiter = w->next;
	      w->waiter = current_thread();
	      w->next = NULL;
	      hecb = &hect[ep];
	      if (hecb->rx_last == NULL)
		hecb->rx_first = hecb->rx_last = w;
	      else
		hecb->rx_last = hecb->rx_last->next = w;
	      assert_wait(0, TRUE);
	      if (time_out != -1) {
		waited = nw_free_waited;
		nw_free_waited = waited->next;
		waited->ep = ep;
		waited->next = NULL;
		current_thread()->nw_ep_waited = waited;
		current_thread()->wait_result = NULL;
		if (!current_thread()->timer.set) 
		  thread_set_timeout(time_out);
	      } else {
		current_thread()->nw_ep_waited = NULL;
	      }
	      simple_unlock(&nw_simple_lock);
	      thread_block(mk_return);
	    }
	  }
	}
      }
    }
  }
  nw_unlock();
  return rc;
}
Esempio n. 5
0
nw_buffer_t mk_select(u_int nep, nw_ep_t epp, int time_out) {
  nw_buffer_t rc;
  nw_pv_t pv;
  int i;
  nw_ep ep;
  nw_ecb_t ecb;
  nw_rx_header_t header;
  nw_hecb_t hecb;
  nw_waiter_t w, w_next;
  nw_ep_owned_t waited;

  if (invalid_user_access(current_task()->map, (vm_offset_t) epp,
			  (vm_offset_t) epp + nep*sizeof(nw_ep) - 1,
			  VM_PROT_READ)) {
    rc = NW_BUFFER_ERROR;
  } else {
    nw_lock();
    for (i = 0; i < nep; i++) {
      ep = epp[i];
      if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
	rc = NW_BUFFER_ERROR;
	break;
      } else {
	while (pv != NULL && pv->owner != current_task())
	  pv = pv->next;
	if (pv == NULL) {
	  rc = NW_BUFFER_ERROR;
	  break;
	} else {
	  ecb = &ect[ep];
	  header = ecb->rx_first;
	  if (header != NULL) {
	    rc = (nw_buffer_t) ((char *) header->buffer - ecb->buf_start +
				 pv->buf_start);
	    ecb->rx_first = header->next;
	    if (ecb->rx_first == NULL)
	      ecb->rx_last = NULL;
	    nc_rx_header_deallocate(header);
	    break;
	  }
	}
      }
    }
    if (i == nep) {
      if (time_out == 0) {
	rc = NULL;
      } else {
	w = nw_free_waiter;
	waited = nw_free_waited;
	i = 0;
	while (i < nep &&
	       nw_free_waiter != NULL && nw_free_waited != NULL) {
	  nw_free_waiter = nw_free_waiter->next;
	  nw_free_waited = nw_free_waited->next;
	  i++;
	}
	if (i < nep) {
	  nw_free_waiter = w;
	  nw_free_waited = waited;
	  rc = NW_BUFFER_ERROR;
	} else {
	  current_thread()->nw_ep_waited = waited;
	  for (i = 0; i < nep; i++) {
	    ep = epp[i];
	    waited->ep = ep;
	    if (i < nep-1)
	      waited = waited->next;
	    else
	      waited->next = NULL;
	    w->waiter = current_thread();
	    w_next = w->next;
	    w->next = NULL;
	    hecb = &hect[ep];
	    if (hecb->rx_last == NULL)
	      hecb->rx_first = hecb->rx_last = w;
	    else
	      hecb->rx_last = hecb->rx_last->next = w;
	    w = w_next;
	  }
	  assert_wait(0, TRUE);
	  if (time_out != -1) {
	    current_thread()->wait_result = NULL;
	    if (!current_thread()->timer.set) 
	      thread_set_timeout(time_out);
	  }
	  simple_unlock(&nw_simple_lock);
	  thread_block(mk_return);
	}
      }
    }
    nw_unlock();
  }
  return rc;
}