/* Remove the task TASK from the scheduler's data structures. */ void remove_task(struct task *task) { u_long flags; save_flags(flags); cli(); task->flags |= TASK_ZOMBIE; task->flags &= ~TASK_RUNNING; remove_node(&task->node); load_flags(flags); }
/* Add DELTA to the kernel's break address, returning the old break address. Note that DELTA may be negative if you want. Returns -1 if no more memory is available. */ void * kernel_sbrk(long delta) { #ifndef SBRK_DOESNT_ALLOC u_char *old = kernel_brk, *ptr; u_long flags; save_flags(flags); cli(); kernel_brk += round_to(delta, 4); if((u_long)kernel_brk < (PHYS_MAP_ADDR - KERNEL_BASE_ADDR)) { ptr = (u_char *)round_to((u_long)old, PAGE_SIZE); while(ptr < kernel_brk) { page *p = alloc_page(); if(p == NULL) goto error; map_page(logical_kernel_pd, p, TO_LINEAR(ptr), PTE_PRESENT); ptr += PAGE_SIZE; } load_flags(flags); return old; } error: kernel_brk = old; load_flags(flags); return (void *)-1; #else /* Don't need to map in any pages or anything; let the page-fault- handler do that. Should really release any unneeded pages if DELTA is negative. */ register void *ptr = kernel_brk; kernel_brk += round_to(delta, 4); if((u_long)kernel_brk < (PHYS_MAP_ADDR - KERNEL_BASE_ADDR)) return ptr; kernel_brk = ptr; return (void *)-1; #endif }
/* If the task TASK is running, set it's state to suspended and put it into the list of suspended tasks. This function may be called from interrupts. */ void suspend_task(struct task *task) { if(task->flags & TASK_RUNNING) { u_long flags; save_flags(flags); cli(); remove_node(&task->node); append_node(&suspended_tasks, &task->node); task->flags &= ~TASK_RUNNING; need_resched = kernel_module.need_resched = TRUE; load_flags(flags); } }
void load_config(const char *dir) { load_flags(dir); switch(slurp("key",&key,512)) { case -1: strerr_die4sys(111,FATAL,ERR_READ,dir,"/key: "); case 0: strerr_die4x(100,FATAL,dir,"/key",ERR_NOEXIST); } /* There are problems with using getconf_line to fetch the ezmlmrc * pointer, since the alt location for "ezmlmrc" turns out to be the * whole ezmlmrc file itself. */ switch (slurp("ezmlmrc",&ezmlmrc,64)) { case -1: strerr_die4sys(111,FATAL,ERR_READ,dir,"/ezmlmrc: "); case 0: ezmlmrc.len = 0; } ezmlmrc.len = byte_chr(ezmlmrc.s,ezmlmrc.len,'\n'); getconf_line(&outhost,"outhost",1,dir); getconf_line(&outlocal,"outlocal",1,dir); if (!stralloc_copy(&local,&outlocal)) die_nomem(); getconf_line(&listid,"listid",0,dir); if (getconf_line(&charset,"charset",0,dir)) { if (charset.len >= 2 && charset.s[charset.len - 2] == ':') { if (charset.s[charset.len - 1] == 'B' || charset.s[charset.len - 1] == 'Q') { flagcd = charset.s[charset.len - 1]; charset.s[charset.len - 2] = '\0'; } } } else if (!stralloc_copys(&charset,TXT_DEF_CHARSET)) die_nomem(); if (!stralloc_0(&charset)) die_nomem(); // FIXME: need to handle escapes in mailinglist getconf_line(&mailinglist,"mailinglist",1,dir); }
/* If the task TASK is suspended change its state to running and move it to the end of the run queue. This function may be called from interrupts. */ void wake_task(struct task *task) { if((task->flags & (TASK_RUNNING | TASK_FROZEN | TASK_ZOMBIE)) == 0) { u_long flags; save_flags(flags); cli(); remove_node(&task->node); task->flags |= TASK_RUNNING; enqueue_task(&running_tasks, task); if(task->pri > current_task->pri) { if(intr_nest_count == 0) schedule(); else need_resched = kernel_module.need_resched = TRUE; } load_flags(flags); } }
static void load_config(void) { load_flags(); key.len = 0; switch(slurp("key",&key,512)) { case -1: strerr_die2sys(111,FATAL,MSG1(ERR_READ,"key")); case 0: strerr_die4x(100,FATAL,listdir,"/key",MSG(ERR_NOEXIST)); } /* There are problems with using getconf_line to fetch the ezmlmrc * pointer, since the alt location for "ezmlmrc" turns out to be the * whole ezmlmrc file itself. */ switch (slurp("ezmlmrc",&ezmlmrc,64)) { case -1: strerr_die2sys(111,FATAL,MSG1(ERR_READ,"ezmlmrc")); case 0: ezmlmrc.len = 0; } ezmlmrc.len = byte_chr(ezmlmrc.s,ezmlmrc.len,'\n'); getconf_line(&outhost,"outhost",1); getconf_line(&outlocal,"outlocal",1); if (!stralloc_copy(&local,&outlocal)) die_nomem(); getconf_line(&listid,"listid",0); if (getconf_line(&charset,"charset",0)) { if (charset.len >= 2 && charset.s[charset.len - 2] == ':') { if (charset.s[charset.len - 1] == 'B' || charset.s[charset.len - 1] == 'Q') { flagcd = charset.s[charset.len - 1]; charset.s[charset.len - 2] = '\0'; } } } else if (!stralloc_copys(&charset,TXT_DEF_CHARSET)) die_nomem(); if (!stralloc_0(&charset)) die_nomem(); }
/* Add the task TASK to the correct queue, running_tasks if it's runnable, suspended_tasks otherwise. */ void append_task(struct task *task) { u_long flags; save_flags(flags); cli(); if(task->flags & TASK_RUNNING) { enqueue_task(&running_tasks, task); if(current_task->pri < task->pri) { /* A higher priority task ready to run always gets priority. */ if(intr_nest_count == 0) schedule(); else need_resched = kernel_module.need_resched = TRUE; } } else append_node(&suspended_tasks, &task->node); load_flags(flags); }
/* Clean up after an error. The caller should usually call do_request() after this function returns. It can be called from an IRQ handler or the normal kernel context. */ void handle_error(const char *from) { u_long flags; if(current_req == NULL) return; save_flags(flags); cli(); kprintf("\nfd: %s (%s): Error (retry number %d)\n", from, REQ_FD_DEV(current_req)->name, current_req->retries); dump_stat(); fd_intr = NULL; if(current_req->retries++ < MAX_RETRIES) { #if 0 if((current_req->retries % RESET_FREQ) == 0) reset_pending = TRUE; #endif if((current_req->retries % RECAL_FREQ) == 0) REQ_FD_DEV(current_req)->recalibrate = TRUE; /* Retry the current request, this simply means stacking it on the front of the queue and calling do_request(). */ prepend_node(&fd_reqs, ¤t_req->node); current_req = NULL; DB(("fd:handle_error: Retrying request %p\n", current_req)); } else { #if 0 reset_pending = TRUE; #endif REQ_FD_DEV(current_req)->recalibrate = TRUE; DB(("\nfd: handle_error: Request %p has no more retries available.\n", current_req)); fd_end_request(-1); } load_flags(flags); }
/* If no request is currently being processed and there's new requests in the queue, process the first one. This can be called from an interrupt or the normal kernel context. */ void do_request(blkreq_t *req) { fd_dev_t *dev; u_long track, sect, cyl, head, big_sect, sects; u_long flags; int i; save_flags(flags); /* This label is used to eliminate tail-recursion. */ top: cli(); if(current_req != NULL) { if(req != NULL) append_node(&fd_reqs, &req->node); load_flags(flags); return; } for(i = 0; i < 2; i++) { if(fd_devs[i].recalibrate) { fdc_recal(&fd_devs[i]); if(req != NULL) append_node(&fd_reqs, &req->node); load_flags(flags); return; } } if(req == NULL) { if(!list_empty_p(&fd_reqs)) { req = (blkreq_t *)fd_reqs.head; remove_node(&req->node); } else { load_flags(flags); return; } } current_req = req; #if 0 req->retries = 0; #endif load_flags(flags); dev = REQ_FD_DEV(req); DB(("fd:do_request: req=%p drive=%d block=%d nblocks=%d cmd=%d buf=%p\n", req, dev->drvno, req->block, req->nblocks, req->command, req->buf)); switch(req->command) { case FD_CMD_SEEK: /* We wanna MOVE DA HEAD! */ /* Do da seek. */ if(fdc_seek(dev, req->block) == FALSE) { handle_error("FD_CMD_SEEK, seek"); goto top; break; } /* Then Sense Interrupt Status */ if(fdc_sense() == FALSE) { handle_error("FD_CMD_SEEK, fdc_sense"); goto top; break; } /* and now we have to Read the ID */ if(fdc_read_id(dev) == FALSE) { handle_error("FD_CMD_SEEK, read_id"); goto top; break; } fd_end_request(0); req = NULL; goto top; case FD_CMD_TIMER: fd_end_request(0); req = NULL; goto top; } if(req->block >= dev->total_blocks) { kprintf("fd: Device %s (%p) doesn't have a block %d!\n", dev->name, dev, req->block); fd_end_request(-1); req = NULL; goto top; } big_sect = req->block; sects = req->nblocks; track = big_sect / dev->disk_p->sectors; sect = big_sect % dev->disk_p->sectors + 1; head = track % dev->disk_p->heads; cyl = track / dev->disk_p->heads; DB(("fd:do_request: cyl=%d sect=%d head=%d sects=%d\n", cyl, sect, head, sects)); switch(req->command) { case FD_CMD_READ: /* We wanna READ the floppy! */ #if 0 fd_end_request(0); req = NULL; goto top; #endif /* We need to seek to the right cylinder. */ if(fdc_seek(dev, cyl) == FALSE) { handle_error("FD_CMD_READ, seek"); goto top; break; } /* Then Sense Interrupt Status */ if(fdc_sense() == FALSE) { handle_error("FD_CMD_READ, fdc_sense"); goto top; break; } /* and now we have to Read the ID */ if(fdc_read_id(dev) == FALSE) { handle_error("FD_CMD_READ, read_id"); goto top; break; } #define TPA(XX) ((u_long)TO_PHYSICAL(XX)) /* Tell the DMA what to do, and hope for the best! */ /* Should move this inside fdc, in fdc_read() i think */ DMAbuf.Buffer = track_buf; DMAbuf.Page = (u_int8)((TPA(track_buf) >> 16) & 0xff); DMAbuf.Offset = (u_int16)(TPA(track_buf) & 0xffff); DMAbuf.Len = (u_int16)(dev->disk_p->sectors * dev->disk_p->heads * FD_SECTSIZ) - 1; DMAbuf.Chan = FLOPPY_DMA; kernel->setup_dma(&DMAbuf, DMA_READ); /* Now we issue a read command. */ if(fdc_read(dev, cyl) == FALSE) { handle_error("FD_CMD_READ, read"); goto top; break; } break; case FD_CMD_WRITE: /* We wanna WRITE it too! */ fd_end_request(0); req = NULL; goto top; default: kprintf("fd:do_request: Unknown command in fd_req, %d\n", req->command); fd_end_request(-1); req = NULL; goto top; } }
/* The scheduler; if possible, switch to the next task in the run queue. Note that the only reason to *ever* call this function is when the current task has suspended itself and needs to actually stop executing. Otherwise just set the `need_resched' flag to TRUE and the scheduler will be called as soon as is safe. Never ever *ever* call this from an interrupt handler! It should be safe to be called from an exception handler though. Also note that there are no `sliding' priority levels; tasks with high priority levels can totally block lower-priority tasks. */ void schedule(void) { u_long flags; save_flags(flags); cli(); #ifdef PARANOID if(intr_nest_count != 0) kprintf("schedule: Oops, being called with intr_nest_count=%d\n", intr_nest_count); #endif /* First reclaim any dead processes.. */ while(zombies) { struct task *zombie = (struct task *)zombie_tasks.head; remove_node(&zombie->node); reclaim_task(zombie); zombies--; } if((current_task->forbid_count > 0) && (current_task->flags & TASK_RUNNING)) { /* Non pre-emptible task. */ load_flags(flags); return; } need_resched = kernel_module.need_resched = FALSE; /* Now do the scheduling.. */ if(current_task->flags & TASK_RUNNING) { /* Task is still runnable so put it onto the end of the run queue (paying attention to priority levels). */ remove_node(¤t_task->node); enqueue_task(&running_tasks, current_task); } if(!list_empty_p(&running_tasks)) { struct task *next = (struct task *)running_tasks.head; if(next->time_left <= 0) next->time_left = next->quantum; if(current_task != next) { current_task->cpu_time += timer_ticks - current_task->last_sched; if(current_task->flags & TASK_ZOMBIE) { append_node(&zombie_tasks, ¤t_task->node); zombies++; } next->sched_count++; next->last_sched = timer_ticks; current_task = next; kernel_module.current_task = next; switch_to_task(next); #if 1 /* Currently we don't handle the math-copro *at all*; clearing this flag simply stops us getting dna exceptions.. */ asm volatile ("clts"); #endif } }