Example #1
0
/**
 * Try to get "desired_ntokens" number of tokens from the given
 * filestream f.  Store them (destructively) in the given list
 * "tokens" of dynamically created extstring_t objects.  Store in
 * "gotten_ntokens" the actual number of tokens that we got. 
 */
int
bebop_get_tokens (list_t* tokens, 
		 int* gotten_ntokens, 
		 FILE* f,
		 const int desired_ntokens,
		 const char delimiters[],
		 const int ndelimiters)
{
  list_t current = *tokens;
  list_t outlist = *tokens;

  if (desired_ntokens < 0)
    {
      bebop_error ("bebop_get_tokens", 
		  "desired_ntokens = %d < 0", 
		  desired_ntokens);
      return -1;
    }
  for (*gotten_ntokens = 0; 
       *gotten_ntokens < desired_ntokens;
       (*gotten_ntokens)++)
    {
      extstring_t* token = NULL;
      int result = 0;

      if (list_empty_p (current))
	  /* We're out of nodes to reuse, so we have to create a new
	     extstring_t */
	token = extstring_create (0);
      else
	token = list_car (current);

      /* Try to get the next token */
      result = bebop_next_token (token, f, delimiters, ndelimiters);
      if (result != 0)
	{
	  extstring_destroy (token);
	  break;
	}
      else
	{
	  if (list_empty_p (current))
	    outlist = list_append_node (outlist, token);
	  else
	    {
	      list_set_car (current, token);
	      current = list_cdr (current);
	    }
	}
    }
  return 0;
}
Example #2
0
/* If no request is currently being processed and there's new requests in
   the queue, process the first one. This can be called from an interrupt
   or the normal kernel context. */
void do_request(blkreq_t *req)
{
    fd_dev_t *dev;
    u_long track, sect, cyl, head, big_sect, sects;
    u_long flags;
    int i;

    save_flags(flags);

    /* This label is used to eliminate tail-recursion. */
top:

    cli();
    if(current_req != NULL)
    {
	if(req != NULL)
	    append_node(&fd_reqs, &req->node);
	load_flags(flags);
	return;
    }
    for(i = 0; i < 2; i++)
    {
	if(fd_devs[i].recalibrate)
	{
	    fdc_recal(&fd_devs[i]);
	    if(req != NULL)
		append_node(&fd_reqs, &req->node);
	    load_flags(flags);
	    return;
	}
    }
    if(req == NULL)
    {
	if(!list_empty_p(&fd_reqs))
	{
	    req = (blkreq_t *)fd_reqs.head;
	    remove_node(&req->node);
	}
	else
	{
	    load_flags(flags);
	    return;
	}
    }
    current_req = req;
#if 0
    req->retries = 0;
#endif
    load_flags(flags);

    dev = REQ_FD_DEV(req);

    DB(("fd:do_request: req=%p drive=%d block=%d nblocks=%d cmd=%d buf=%p\n",
	req, dev->drvno, req->block, req->nblocks, req->command, req->buf));

    switch(req->command)
    {
    case FD_CMD_SEEK:	/* We wanna MOVE DA HEAD! */
        /* Do da seek. */
	if(fdc_seek(dev, req->block) == FALSE)
	{
		handle_error("FD_CMD_SEEK, seek");
		goto top;
		break;
	}

	/* Then Sense Interrupt Status */
	if(fdc_sense() == FALSE)
	{
		handle_error("FD_CMD_SEEK, fdc_sense");
		goto top;
		break;
	}

	/* and now we have to Read the ID */
	if(fdc_read_id(dev) == FALSE)
	{
		handle_error("FD_CMD_SEEK, read_id");
		goto top;
		break;
	}

        fd_end_request(0);
	req = NULL;
        goto top;

    case FD_CMD_TIMER:
	fd_end_request(0);
	req = NULL;
	goto top;
    }

    if(req->block >= dev->total_blocks)
    {
	kprintf("fd: Device %s (%p) doesn't have a block %d!\n",
		dev->name, dev, req->block);
	fd_end_request(-1);
	req = NULL;
	goto top;
    }

    big_sect = req->block;
    sects = req->nblocks;

    track = big_sect / dev->disk_p->sectors;
    sect = big_sect % dev->disk_p->sectors + 1;
    head = track % dev->disk_p->heads;
    cyl = track / dev->disk_p->heads;

    DB(("fd:do_request: cyl=%d sect=%d head=%d sects=%d\n", cyl, sect, head, sects));

    switch(req->command)
    {
    case FD_CMD_READ:	/* We wanna READ the floppy! */

#if 0
        fd_end_request(0);
	req = NULL;
        goto top;
#endif

	/* We need to seek to the right cylinder. */
	if(fdc_seek(dev, cyl) == FALSE)
	{
		handle_error("FD_CMD_READ, seek");
		goto top;
		break;
	}

	/* Then Sense Interrupt Status */
	if(fdc_sense() == FALSE)
	{
		handle_error("FD_CMD_READ, fdc_sense");
		goto top;
		break;
	}

	/* and now we have to Read the ID */
	if(fdc_read_id(dev) == FALSE)
	{
		handle_error("FD_CMD_READ, read_id");
		goto top;
		break;
	}

#define TPA(XX) ((u_long)TO_PHYSICAL(XX))

	/* Tell the DMA what to do, and hope for the best! */
	/* Should move this inside fdc, in fdc_read() i think */
	DMAbuf.Buffer = track_buf;
	DMAbuf.Page = (u_int8)((TPA(track_buf) >> 16) & 0xff);
	DMAbuf.Offset = (u_int16)(TPA(track_buf) & 0xffff);
	DMAbuf.Len = (u_int16)(dev->disk_p->sectors * dev->disk_p->heads *
		FD_SECTSIZ) - 1;
	DMAbuf.Chan = FLOPPY_DMA;
	kernel->setup_dma(&DMAbuf, DMA_READ);

	/* Now we issue a read command. */
	if(fdc_read(dev, cyl) == FALSE)
	{
		handle_error("FD_CMD_READ, read");
		goto top;
		break;
	}

	break;

    case FD_CMD_WRITE:	/* We wanna WRITE it too! */

        fd_end_request(0);
	req = NULL;
        goto top;

    default:
	kprintf("fd:do_request: Unknown command in fd_req, %d\n",
		req->command);
	fd_end_request(-1);
	req = NULL;
	goto top;
    }
}
Example #3
0
/* The scheduler; if possible, switch to the next task in the run queue.

   Note that the only reason to *ever* call this function is when the current
   task has suspended itself and needs to actually stop executing. Otherwise
   just set the `need_resched' flag to TRUE and the scheduler will be called
   as soon as is safe.

   Never ever *ever* call this from an interrupt handler! It should be safe
   to be called from an exception handler though.

   Also note that there are no `sliding' priority levels; tasks with high
   priority levels can totally block lower-priority tasks.  */
void
schedule(void)
{
    u_long flags;
    save_flags(flags);
    cli();

#ifdef PARANOID
    if(intr_nest_count != 0)
	kprintf("schedule: Oops, being called with intr_nest_count=%d\n",
		intr_nest_count);
#endif

    /* First reclaim any dead processes.. */
    while(zombies)
    {
	struct task *zombie = (struct task *)zombie_tasks.head;
	remove_node(&zombie->node);
	reclaim_task(zombie);
	zombies--;
    }

    if((current_task->forbid_count > 0)
       && (current_task->flags & TASK_RUNNING))
    {
	/* Non pre-emptible task. */
	load_flags(flags);
	return;
    }

    need_resched = kernel_module.need_resched = FALSE;

    /* Now do the scheduling.. */
    if(current_task->flags & TASK_RUNNING)
    {
	/* Task is still runnable so put it onto the end of the run
	   queue (paying attention to priority levels). */
	remove_node(&current_task->node);
	enqueue_task(&running_tasks, current_task);
    }
    if(!list_empty_p(&running_tasks))
    {
	struct task *next = (struct task *)running_tasks.head;
	if(next->time_left <= 0)
	    next->time_left = next->quantum;
	if(current_task != next)
	{
	    current_task->cpu_time += timer_ticks - current_task->last_sched;
	    if(current_task->flags & TASK_ZOMBIE)
	    {
		append_node(&zombie_tasks, &current_task->node);
		zombies++;
	    }
	    next->sched_count++;
	    next->last_sched = timer_ticks;
	    current_task = next;
	    kernel_module.current_task = next;
	    switch_to_task(next);
#if 1
	    /* Currently we don't handle the math-copro *at all*; clearing
	       this flag simply stops us getting dna exceptions.. */
	    asm volatile ("clts");
#endif
	}
    }