Beispiel #1
0
/* Unregister an interrupt handler.  */
PUBLIC void rm_irq_handler( irq_hook_t* hook ) {
    int irq = hook->irq;
    int id = hook->id;
    irq_hook_t **line;

    if( irq < 0 || irq >= NR_IRQ_VECTORS )
        minix_panic("invalid call to rm_irq_handler", irq);

    /* disable the irq.  */
    irq_actids[hook->irq] |= hook->id;
    hw_intr_mask(hook->irq);

    /* remove the hook.  */
    line = &irq_handlers[irq];

    while( (*line) != NULL ) {
        if((*line)->id == id) {
            (*line) = (*line)->next;
            if(!irq_handlers[irq])
                irq_use &= ~(1 << irq);
            if (irq_actids[irq] & id)
                irq_actids[irq] &= ~id;
            return;
        }
        line = &(*line)->next;
    }
    /* When the handler is not found, normally return here. */
}
Beispiel #2
0
/*===========================================================================*
 *				clock_task				     *
 *===========================================================================*/
PUBLIC void clock_task()
{
/* Main program of clock task. If the call is not HARD_INT it is an error.
 */
  message m;       /* message buffer for both input and output */
  int result;      /* result returned by the handler */

  init_clock();    /* initialize clock task */
    
  /* Main loop of the clock task.  Get work, process it. Never reply. */
  while(TRUE) {
	/* Go get a message. */
	result = receive(ANY, &m);

	if(result != OK)
		minix_panic("receive() failed", result);

	/* Handle the request. Only clock ticks are expected. */
	switch (m.m_type) {
	case HARD_INT:      
		do_clocktick(&m); /* handle clock tick */
		break;
	default: /* illegal request type */
		kprintf("CLOCK: illegal request %d from %d.\n",
			m.m_type, m.m_source);
    }
  }
}
Beispiel #3
0
void pagefault(struct proc *pr, int trap_errno)
{
	int s;
	vir_bytes ph;
	u32_t pte;

	if(pagefault_count != 1)
		minix_panic("recursive pagefault", pagefault_count);

	/* Don't schedule this process until pagefault is handled. */
	if(RTS_ISSET(pr, PAGEFAULT))
		minix_panic("PAGEFAULT set", pr->p_endpoint);
	RTS_LOCK_SET(pr, PAGEFAULT);

	if(pr->p_endpoint <= INIT_PROC_NR) {
		/* Page fault we can't / don't want to
		 * handle.
		 */
		kprintf("pagefault for process %d ('%s'), pc = 0x%x\n",
			pr->p_endpoint, pr->p_name, pr->p_reg.pc);
		proc_stacktrace(pr);
  		minix_panic("page fault in system process", pr->p_endpoint);

		return;
	}

	/* Save pagefault details, suspend process,
	 * add process to pagefault chain,
	 * and tell VM there is a pagefault to be
	 * handled.
	 */
	pr->p_pagefault.pf_virtual = pagefault_cr2;
	pr->p_pagefault.pf_flags = trap_errno;
	pr->p_nextpagefault = pagefaults;
	pagefaults = pr;
	lock_notify(HARDWARE, VM_PROC_NR);

	pagefault_count = 0;

#if 0
	kprintf("pagefault for process %d ('%s'), pc = 0x%x\n",
			pr->p_endpoint, pr->p_name, pr->p_reg.pc);
	proc_stacktrace(pr);
#endif

	return;
}
Beispiel #4
0
/*===========================================================================*
 *			panic                                        *
 *===========================================================================*/
PUBLIC void panic(char *what, char *mess,int nr)
{
    /* This function is for when a library call wants to panic.
     * The library call calls printf() and tries to exit a process,
     * which isn't applicable in the kernel.
     */
    minix_panic(mess, nr);
}
Beispiel #5
0
PUBLIC void vm_init(struct proc *newptproc)
{
	if(vm_running)
		minix_panic("vm_init: vm_running", NO_NUM);
	vm_set_cr3(newptproc);
	level0(vm_enable_paging);
	vm_running = 1;

}
Beispiel #6
0
/* Register an interrupt handler.  */
PUBLIC void put_irq_handler( irq_hook_t* hook, int irq, irq_handler_t handler)
{
    int id;
    irq_hook_t **line;
    unsigned long bitmap;

    if( irq < 0 || irq >= NR_IRQ_VECTORS )
        minix_panic("invalid call to put_irq_handler", irq);

    line = &irq_handlers[irq];

    bitmap = 0;
    while ( *line != NULL ) {
        if(hook == *line) return; /* extra initialization */
        bitmap |= (*line)->id;	/* mark ids in use */
        line = &(*line)->next;
    }

    /* find the lowest id not in use */
    for (id = 1; id != 0; id <<= 1)
        if (!(bitmap & id)) break;

    if(id == 0)
        minix_panic("Too many handlers for irq", irq);

    hook->next = NULL;
    hook->handler = handler;
    hook->irq = irq;
    hook->id = id;
    *line = hook;
    irq_use |= 1 << irq;  /* this does not work for irq >= 32 */

    /* And as last enable the irq at the hardware.
     *
     * Internal this activates the line or source of the given interrupt.
     */
    if((irq_actids[hook->irq] &= ~hook->id) == 0) {
        hw_intr_unmask(hook->irq);
    }
}
Beispiel #7
0
PRIVATE u32_t phys_get32(phys_bytes addr)
{
	u32_t v;
	int r;

	if(!vm_running) {
		phys_copy(addr, vir2phys(&v), sizeof(v));
		return v;
	}

	if((r=lin_lin_copy(NULL, addr, 
		proc_addr(SYSTEM), vir2phys(&v), sizeof(v))) != OK) {
		minix_panic("lin_lin_copy for phys_get32 failed", r);
	}

	return v;
}
Beispiel #8
0
/*===========================================================================*
 *				sys_task				     *
 *===========================================================================*/
PUBLIC void sys_task()
{
/* Main entry point of sys_task.  Get the message and dispatch on type. */
  static message m;
  register int result;
  register struct proc *caller_ptr;
  int s;
  int call_nr;
  int n = 0;

  /* Initialize the system task. */
  initialize();


  while (TRUE) {
      struct proc *restarting;

      restarting = vmrestart_check(&m);

      if(!restarting) {
        int r;
	/* Get work. Block and wait until a request message arrives. */
	if((r=receive(ANY, &m)) != OK)
		minix_panic("receive() failed", r);
      } 

      sys_call_code = (unsigned) m.m_type;
      call_nr = sys_call_code - KERNEL_CALL;	
      who_e = m.m_source;
      okendpt(who_e, &who_p);
      caller_ptr = proc_addr(who_p);

      /* See if the caller made a valid request and try to handle it. */
      if (call_nr < 0 || call_nr >= NR_SYS_CALLS) {	/* check call number */
	  kprintf("SYSTEM: illegal request %d from %d.\n",
		call_nr,m.m_source);
	  result = EBADREQUEST;			/* illegal message type */
      } 
      else if (!GET_BIT(priv(caller_ptr)->s_k_call_mask, call_nr)) {
	  result = ECALLDENIED;			/* illegal message type */
      }
      else {
          result = (*call_vec[call_nr])(&m); /* handle the system call */
      }

      if(result == VMSUSPEND) {
	/* Special case: message has to be saved for handling
	 * until VM tells us it's allowed. VM has been notified
	 * and we must wait for its reply to restart the call.
	 */
        vmassert(RTS_ISSET(caller_ptr, VMREQUEST));
	vmassert(caller_ptr->p_vmrequest.type == VMSTYPE_KERNELCALL);
	memcpy(&caller_ptr->p_vmrequest.saved.reqmsg, &m, sizeof(m));
      } else if (result != EDONTREPLY) {
	/* Send a reply, unless inhibited by a handler function.
	 * Use the kernel function lock_send() to prevent a system
	 * call trap.
	 */
		if(restarting) {
        		vmassert(!RTS_ISSET(restarting, VMREQUEST));
#if 0
        		vmassert(!RTS_ISSET(restarting, VMREQTARGET));
#endif
		}
		m.m_type = result;		/* report status of call */
		if(WILLRECEIVE(caller_ptr, SYSTEM)) {
		  if (OK != (s=lock_send(m.m_source, &m))) {
			kprintf("SYSTEM, reply to %d failed: %d\n",
			m.m_source, s);
		  }
		} else {
			kprintf("SYSTEM: not replying to %d; not ready\n", 
				caller_ptr->p_endpoint);
		}
	}
  }
}
Beispiel #9
0
/*===========================================================================*
 *				lin_lin_copy				     *
 *===========================================================================*/
PRIVATE int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr, 
	struct proc *dstproc, vir_bytes dstlinaddr, vir_bytes bytes)
{
	u32_t addr;
	int procslot;

	NOREC_ENTER(linlincopy);

	vmassert(vm_running);
	vmassert(nfreepdes >= 3);

	vmassert(ptproc);
	vmassert(proc_ptr);
	vmassert(read_cr3() == ptproc->p_seg.p_cr3);

	procslot = ptproc->p_nr;

	vmassert(procslot >= 0 && procslot < I386_VM_DIR_ENTRIES);

	while(bytes > 0) {
		phys_bytes srcptr, dstptr;
		vir_bytes chunk = bytes;
		int srcpde, dstpde;
		int srctype, dsttype;

		/* Set up 4MB ranges. */
		inusepde = NOPDE;
		CREATEPDE(srcproc, srcptr, srclinaddr, chunk, bytes, srcpde, srctype);
		CREATEPDE(dstproc, dstptr, dstlinaddr, chunk, bytes, dstpde, dsttype);

		/* Copy pages. */
		PHYS_COPY_CATCH(srcptr, dstptr, chunk, addr);

		DONEPDE(srcpde);
		DONEPDE(dstpde);

		if(addr) {
			/* If addr is nonzero, a page fault was caught. */

			if(addr >= srcptr && addr < (srcptr + chunk)) {
				WIPEPDE(srcpde);
				WIPEPDE(dstpde);
				NOREC_RETURN(linlincopy, EFAULT_SRC);
			}
			if(addr >= dstptr && addr < (dstptr + chunk)) {
				WIPEPDE(srcpde);
				WIPEPDE(dstpde);
				NOREC_RETURN(linlincopy, EFAULT_DST);
			}

			minix_panic("lin_lin_copy fault out of range", NO_NUM);

			/* Not reached. */
			NOREC_RETURN(linlincopy, EFAULT);
		}

		WIPEPDE(srcpde);
		WIPEPDE(dstpde);

		/* Update counter and addresses for next iteration, if any. */
		bytes -= chunk;
		srclinaddr += chunk;
		dstlinaddr += chunk;
	}

	NOREC_RETURN(linlincopy, OK);
}
Beispiel #10
0
PUBLIC void vm_init(void)
{
	int o;
	phys_bytes p, pt_size;
	phys_bytes vm_dir_base, vm_pt_base, phys_mem;
	u32_t entry;
	unsigned pages;
	struct proc* rp;
	struct proc *sys = proc_addr(SYSTEM);
	static int init_done = 0;

	if (!vm_size)
		minix_panic("i386_vm_init: no space for page tables", NO_NUM);

	if(init_done)
		return;

	/* Align page directory */
	o= (vm_base % I386_PAGE_SIZE);
	if (o != 0)
		o= I386_PAGE_SIZE-o;
	vm_dir_base= vm_base+o;

	/* Page tables start after the page directory */
	vm_pt_base= vm_dir_base+I386_PAGE_SIZE;

	pt_size= (vm_base+vm_size)-vm_pt_base;
	pt_size -= (pt_size % I386_PAGE_SIZE);

	/* Compute the number of pages based on vm_mem_high */
	pages= (vm_mem_high-1)/I386_PAGE_SIZE + 1;

	if (pages * I386_VM_PT_ENT_SIZE > pt_size)
		minix_panic("i386_vm_init: page table too small", NO_NUM);

	for (p= 0; p*I386_VM_PT_ENT_SIZE < pt_size; p++)
	{
		phys_mem= p*I386_PAGE_SIZE;
		entry= phys_mem | I386_VM_USER | I386_VM_WRITE |
			I386_VM_PRESENT;
		if (phys_mem >= vm_mem_high)
			entry= 0;
#if VM_KERN_NOPAGEZERO
		if (phys_mem == (sys->p_memmap[T].mem_phys << CLICK_SHIFT) ||
		    phys_mem == (sys->p_memmap[D].mem_phys << CLICK_SHIFT)) {
			entry = 0;
		}
#endif
		phys_put32(vm_pt_base + p*I386_VM_PT_ENT_SIZE, entry);
	}

	for (p= 0; p < I386_VM_DIR_ENTRIES; p++)
	{
		phys_mem= vm_pt_base + p*I386_PAGE_SIZE;
		entry= phys_mem | I386_VM_USER | I386_VM_WRITE |
			I386_VM_PRESENT;
		if (phys_mem >= vm_pt_base + pt_size)
			entry= 0;
		phys_put32(vm_dir_base + p*I386_VM_PT_ENT_SIZE, entry);
	}


       /* Set this cr3 in all currently running processes for
        * future context switches.
        */
       for (rp=BEG_PROC_ADDR; rp<END_PROC_ADDR; rp++) {
               u32_t mycr3;
               if(isemptyp(rp)) continue;
               rp->p_seg.p_cr3 = vm_dir_base;
       }

	kernel_cr3 = vm_dir_base;

	/* Set this cr3 now (not active until paging enabled). */
	vm_set_cr3(vm_dir_base);

	/* Actually enable paging (activating cr3 load above). */
	level0(vm_enable_paging);

	/* Don't do this init in the future. */
	init_done = 1;
	vm_running = 1;
}