Ejemplo n.º 1
0
/*
 * Read character
 */
getchar() {
  	char c;

	if(!standalone) {
		return(unix_read_char(0, &c, 1)) ;
	}
	else {
		while (!char_ready)
			thread_switch(MACH_PORT_NULL,
				      SWITCH_OPTION_DEPRESS, 0);
		char_ready = FALSE;
		return(cur_char);
	}
}
Ejemplo n.º 2
0
Archivo: tty.c Proyecto: DIKU-EDU/kudos
/**
 * Writes len bytes from buffer buf to tty-device
 * pointed by gcd. Implements write from the gbd interface.
 *
 * @param gcd Pointer to the tty-device.
 * @param buf Buffer to be written from.
 * @param len number of bytes to be written.
 *
 * @return Number of succesfully writeten characters.
 */
static int tty_write(gcd_t *gcd, const void *buf, int len) {
  interrupt_status_t intr_status;
  volatile tty_io_area_t *iobase = (tty_io_area_t *)gcd->device->io_address;
  volatile tty_real_device_t *tty_rd
    = (tty_real_device_t *)gcd->device->real_device;
  int i;

  intr_status = _interrupt_disable();
  spinlock_acquire(tty_rd->slock);

  i = 0;
  while (i < len) {
    while (tty_rd->write_count > 0) {
      /* buffer contains data, so wait until empty. */
      sleepq_add((void *)tty_rd->write_buf);
      spinlock_release(tty_rd->slock);
      thread_switch();
      spinlock_acquire(tty_rd->slock);
    }

    /* Fill internal buffer. */
    while (tty_rd->write_count < TTY_BUF_SIZE  && i < len) {
      int index;
      index = (tty_rd->write_head + tty_rd->write_count) % TTY_BUF_SIZE;
      tty_rd->write_buf[index] = ((char *)buf)[i++];
      tty_rd->write_count++;
    }

    /* If device is not currently busy, write one charater to
       cause interrupt. Head and count are adjusted not to write
       first character twice. Rest of the buffer is written by
       interrupt handler.

       If the device is busy, interrupt will appear by itself and
       the whole buffer will be written by interrupt handler.
    */
    if (!TTY_STATUS_WBUSY(iobase->status)) {
      iobase->data = tty_rd->write_buf[tty_rd->write_head];
      tty_rd->write_head = (tty_rd->write_head + 1) % TTY_BUF_SIZE;
      tty_rd->write_count--;
    }

  }

  spinlock_release(tty_rd->slock);
  _interrupt_set_state(intr_status);

  return i;
}
Ejemplo n.º 3
0
void do_test(void)
{
	thread_setup(&a_thr, (reg_t) a_thread, SP(a_stack));
	thread_setup(&b_thr, (reg_t) b_thread, SP(b_stack));
	thread_setup(&c_thr, (reg_t) c_thread, SP(c_stack));
	initrd_thrs[INITRD_APPS-1].next = &a_thr;
	a_thr.next = &b_thr;
	b_thr.next = &c_thr;
	c_thr.next = &initrd_thrs[0];

	CAP_TCB(&a_thr, 1) = &initrd_thrs[0];//ipc server /usr/wtest
	CAP_TCB(&b_thr, 1) = &initrd_thrs[0];//ipc server /usr/wtest
	CAP_TCB(&c_thr, 1) = &initrd_thrs[0];//ipc server /usr/wtest
	CAP_TCB(&a_thr, 0) = &idle_thr;//ipc server idle
	CAP_TCB(&b_thr, 0) = &idle_thr;//ipc server idle
	CAP_TCB(&c_thr, 0) = &idle_thr;//ipc server idle
	CAP_TCB(&c_thr, 2) = &initrd_thrs[5];//fault server /dev/ata

	thread_switch(&initrd_thrs[4]);//fault server /dev/fd0
	ad_t vir = 0x80000000, phy = 0x100000;
	printstr("/dev/fd0: server mem v_map to fd0mem...");
	for (int i = 0; i < 0x168; i++) {
		/*printstr("v_map(vir=");
		printint(vir);
		printstr(", phy=");
		printint(phy);
		printstr(")\n");*/
		v_map(vir, phy, PG_W | PG_U);
		vir += PGSIZE;
		phy += PGSIZE;
	}
	printstr("done\n");
	/*printstr("/svr/fat: set mem server (cid 2) to /dev/fd0\n");
	CAP_TCB(&initrd_thrs[5], 2) = &initrd_thrs[4];//fault server /dev/fd0*/
	thread_switch(&initrd_thrs[5]);//fault server /dev/ata
}
Ejemplo n.º 4
0
/*
 * Yield the cpu to another process, and go to sleep, on the specified
 * wait channel WC, whose associated spinlock is LK. Calling wakeup on
 * the channel will make the thread runnable again. The spinlock must
 * be locked. The call to thread_switch unlocks it; we relock it
 * before returning.
 */
void
wchan_sleep(struct wchan *wc, struct spinlock *lk)
{
	/* may not sleep in an interrupt handler */
	KASSERT(!curthread->t_in_interrupt);

	/* must hold the spinlock */
	KASSERT(spinlock_do_i_hold(lk));

	/* must not hold other spinlocks */
	KASSERT(curcpu->c_spinlocks == 1);

	thread_switch(S_SLEEP, wc, lk);
	spinlock_acquire(lk);
}
Ejemplo n.º 5
0
// yield is very similar to thread_fork, with the main difference being that 
// it is pulling the next thread to run off of the ready list instead of creating it. yield should:
void yield(){
	// If the current thread is not DONE, set its state to READY and enqueue it on the ready list.
	if(current_thread->state != DONE && current_thread->state != BLOCKED){
		current_thread->state = READY;
		thread_enqueue(&ready_list, current_thread);
	}
	// Dequeue the next thread from the ready list and set its state to RUNNING.
	struct thread* next_thread = thread_dequeue(&ready_list);
	next_thread->state = RUNNING;
	// Save a pointer to the current thread in a temporary variable, then set the current thread to the next thread.
	struct thread* temp = current_thread;
	current_thread = next_thread;
	// Call thread_switch with the old current thread as old and the new current thread as new.
	thread_switch(temp, current_thread);
};
Ejemplo n.º 6
0
/* We add the thread to the sleepqueue, and sleep, when we wake again, we
 * acquire the lock */
void condition_wait(cond_t *cond, lock_t *condition_lock){

    interrupt_status_t intr_status;
    intr_status = _interrupt_disable();
    spinlock_acquire(&(cond->spinlock));

    sleepq_add(cond);

    spinlock_release(&(cond->spinlock));

    thread_switch();

    lock_acquire(condition_lock);

    _interrupt_set_state(intr_status);
}
Ejemplo n.º 7
0
void lock_acquire( lock_t *lock ) {
    interrupt_status_t intr_status;

    intr_status = _interrupt_disable();
    spinlock_acquire(&lock->slock);

    while (lock->locked == LOCK_LOCKED) {
        sleepq_add(lock);
        spinlock_release(&lock->slock);
        thread_switch();
    }
    lock->locked = LOCK_LOCKED;
    
    spinlock_release(&lock->slock);
    _interrupt_set_state(intr_status);
    
}
Ejemplo n.º 8
0
void semaphore_P(semaphore_t *sem)
{
  interrupt_status_t intr_status;
  
  intr_status = _interrupt_disable();
  spinlock_acquire(&sem->slock);
  
  sem->value--;
  if (sem->value < 0) {
    sleepq_add(sem);
    spinlock_release(&sem->slock);
    thread_switch();
  } else {
    spinlock_release(&sem->slock);
  }
  _interrupt_set_state(intr_status);
}
Ejemplo n.º 9
0
Archivo: usr_sem.c Proyecto: kazyka/OSM
int usr_sem_procure(usr_sem_t* sem) {
  interrupt_status_t intr_status;
  intr_status = _interrupt_disable();

  spinlock_acquire(&(sem->sem_slock));

  sem->value--;
  while (sem->value < 0) {
    sleepq_add(&(sem->value));
    spinlock_release(&(sem->sem_slock));
    thread_switch();
    spinlock_acquire(&(sem->sem_slock));
  }

  spinlock_release(&(sem->sem_slock));
  _interrupt_set_state(intr_status);
  return 0;
}
QTSSvrControlThread::QTSSvrControlThread()
:   fMessagePort(0),
    fDone(false), fErrorOccurred(false), fDoneStartingUp(false), fThreadsAllocated(false)
{
    kern_return_t r;
    
    r = ::port_allocate(task_self(), &fMessagePort);
    if (r != SCNoError)
    {
        QTSSModuleUtils::LogError(qtssFatalVerbosity, sCantAllocateErr, 0);
        fErrorOccurred = true;
        fDoneStartingUp = true;
        return;
    }
    
    for (int x = 0; x < 5; x++)
    {
        r = ::bootstrap_register(bootstrap_port, "QuickTimeStreamingServer", fMessagePort);
        //sometimes when restarting the server right after the server has gone away,
        //this can fail... so let's retry a couple of times
        if (r != SCNoError)
            thread_switch(THREAD_NULL, SWITCH_OPTION_WAIT, 1000);
        else    
            break;          
    }

    if (r != SCNoError)
    {
        QTSSModuleUtils::LogError(qtssFatalVerbosity, sCantRegisterErr, 0);
        fErrorOccurred = true;
        fDoneStartingUp = true;
        return;
    }
    
    //I'm just assuming this always succeeds cause the mach documentation doesn't say
    //anything about it failing!
    fThreadID = ::cthread_fork((cthread_fn_t)_Entry, (any_t)this);
    fHistoryThreadID = ::cthread_fork((cthread_fn_t)_HistoryEntry, (any_t)this);
    fThreadsAllocated = true;
    
    while (!fDoneStartingUp)
        ::cthread_yield();
}
Ejemplo n.º 11
0
/*
 * Helper function for creating schedulers. Force a switch into a specific
 * thread. Nothing special about it, except that target thread must remain
 * locked across the switch. 
 */
void
pthread_sched_switchto(pthread_thread_t *pthread)
{
	pthread_thread_t	*cur = CURPTHREAD();
	int			enabled, preemptable;

	save_preemption_enable(preemptable);
	enabled = save_disable_interrupts();
	
	/* Don't worry about the lock */
	thread_switch(pthread, &cur->schedlock, CURPTHREAD());

	/* Must set the current thread pointer! */
	SETCURPTHREAD(cur);

	restore_interrupt_enable(enabled);
	/* Need to really *restore* the flag since we are in a new context */
	PREEMPT_ENABLE = preemptable;
}
Ejemplo n.º 12
0
int process_join(process_id_t pid) {
// kprintf("PROCESS JOIN ER STARTET\n");

 spinlock_t lock;
   if (!(process_table[pid].parent_id = process_get_current_process()))
     return PROCESS_ILLEGAL_JOIN;

//  kprintf("PROCESS JOIN ER LEGAL\n");
  // disable interrupts.
  _interrupt_disable();
//  kprintf("interrupts disabled\n"); 
  //acquire the resource spinlock
  spinlock_reset(&lock);
  spinlock_acquire(&lock);
//  kprintf("LOCK er ACQUIRED\n");
  //add to sleeq..
  process_table[process_get_current_process()].state = WAITING;
  while(!(process_table[pid].state == ZOMBIE)) {
   sleepq_add(&process_table[pid]);

  //release the resource spinlock.
   spinlock_release(&lock);
//  kprintf("TRÅD BLIVER SAT I SENG\n");

  //thread_switch()
   thread_switch();

  //Acquire the resource spinlock.
   spinlock_acquire(&lock);
  }

  //Do your duty with the resource (Frigøre processen, nu hvor den er færdig)
  process_table[pid].state = FREE;

  //release the resource spinlock
  spinlock_release(&lock);
  process_table[process_get_current_process()].state = RUNNING;
  //Restore the interrupt mask.
  _interrupt_enable();

//  kprintf("PROCESS_JOIN ER KOMMET IGENNEM\n");
  return process_table[process_get_current_process()].retval;
}
Ejemplo n.º 13
0
void lock_acquire(lock_t *lock){

    interrupt_status_t intr_status;
    intr_status = _interrupt_disable();
    spinlock_acquire(&(lock->spinlock));

    /* If the lock is locked we set the thread to sleep, and check agin when
     * the thread is awoken */
    while(lock->locked){
        sleepq_add(lock);
        spinlock_release(&(lock->spinlock));

        /* let the thread sleep */
        thread_switch();
        spinlock_acquire(&(lock->spinlock));
    }

    lock->locked = 1;

    spinlock_release(&(lock->spinlock));
    _interrupt_set_state(intr_status);
}
Ejemplo n.º 14
0
/**
 * Fault Handler: General Protection Fault
 *
 * Kernel: Panic
 * User: Terminate Process
 */
void fault_gp(cpu_int_state_t *state) {
    // Is in kernel?
    if (state->cs == 0x8) {
        console_print("PANIC: General Protection Fault in kernel at ");
        console_print_hex(state->rip);
        console_print(" (error code: ");
        console_print_hex(state->error_code);
        console_print(").\n");
        while (1);
    }

    // TODO: Remove this debug warning
    DEBUG("General Protection Fault at ");
    DEBUG_HEX(state->rip);
    DEBUG(" (error code: ");
    DEBUG_HEX(state->error_code);
    DEBUG(").\n");

    // Terminate process
    process_terminate(process_current->pid);
    thread_switch(scheduler_next(), state);
}
Ejemplo n.º 15
0
uint32_t process_join(process_id_t pid)
{
    process_id_t my_pid;
    uint32_t retval;
    interrupt_status_t intr_status;
  
    my_pid = process_get_current_process();
    if (pid < 0
        || pid >= MAX_PROCESSES
        || process_table[pid].parent != my_pid) {
        return -1;
    }

    intr_status = _interrupt_disable();
    spinlock_acquire(&process_table_slock);

    while (process_table[pid].state != PROCESS_ZOMBIE) {
        sleepq_add(&process_table[pid]);
        spinlock_release(&process_table_slock);
        thread_switch();
        spinlock_acquire(&process_table_slock);
    }
    retval = process_table[pid].retval;
    process_table[my_pid].children--;

    /* Let children see it is gone. */
    process_table[pid].retval = -1;
    /* Make sure we can't join it again. */
    process_table[pid].parent = -1;

    if (process_table[pid].children == 0) {
        process_table[pid].state = PROCESS_FREE;
    }

    spinlock_release(&process_table_slock);
    _interrupt_set_state(intr_status);
    return retval;
}
void QTSSvrControlThread::HistoryEntry()
{
    //compute how often to run this thread.
    QTSSModuleUtils::GetAttribute(sPrefs, "history_update_interval", qtssAttrDataTypeUInt32,
                                &sHistoryIntervalInSecs, &sDefaultHistoryIntervalInSecs, sizeof(sHistoryIntervalInSecs));
    UInt32 theSampleInterval = (sHistoryIntervalInSecs * 1000) / kNumSamplesPerEntry;
    UInt32 theEntryInterval = sHistoryIntervalInSecs * 1000;
    
    //use local time to figure out when we need move onto a new entry. This
    //will eliminate the possibility that we drift off time.
    
    SInt64 theStartTime = QTSS_Milliseconds();
    Assert(theStartTime > 0);
    
    while (!fDone)
    {
        //sleep for the kHistoryUpdateInterval
        //kHistoryUpdateInterval is in minutes. Convert to msec.
        thread_switch(THREAD_NULL, SWITCH_OPTION_WAIT, theSampleInterval);
        
        //if server is doing a graceful shutdown, this thread is used to periodically
        //poll, checking if all connections are complete
        CheckShutdown();
        
        //every time we wake up, first thing we want to do is sample the
        //current state of the server for the history
        AddHistorySample();
    
        SInt64 theCurrentTime = QTSS_Milliseconds();
        Assert(theCurrentTime > 0);

        if ((theCurrentTime - theStartTime) > theEntryInterval)
        {
            UpdateHistoryArray();
            theStartTime += theEntryInterval;
        }
    }
}
Ejemplo n.º 17
0
void yield()
{
	/* If the current thread is not DONE, set its state to READY and 
	enqueue it on the ready list. */
	if (current_thread->state != DONE) {
		current_thread->state = READY;
		thread_enqueue(&ready_list, current_thread);
//printf("YIELD: current thread not finished, back on queue\n");
	}

	// Dequeue the next thread from the ready list and set its state to RUNNING.
	thread *next_thread = thread_dequeue(&ready_list);

	/* Save a pointer to the current thread in a temporary variable, 
	then set the current thread to the next thread. */
	thread *temp = current_thread;
	current_thread = next_thread;
	
	/* Call thread_switch with the old current thread as old and 
	the new current thread as new. */
	thread_switch(temp, current_thread);
//printf("YIELD: Running dequeued thread\n");
}
Ejemplo n.º 18
0
struct thread *syscall_fork(struct thread *image) {
	struct process *parent;
	struct process *child;

	parent = image->proc;
	child = process_clone(parent, image);

	if (!child) {
		image->eax = -1;
		return image;
	}

	/* (still in parent) Set return value to child's PID */
	image->eax = child->pid;

	/* Switch to child */
	image = thread_switch(image, child->thread[0]);

	/* (now in child) Set return value to 0 */
	image->eax = 0;

	return image;
}
Ejemplo n.º 19
0
void schedule(void)
{
	struct kthread *best, *test;
	struct list_node *ctr;
	int bp;

	/* initialize these guys */
	best = list_node_owner(run_list.next, struct kthread);
	bp = best->priority;

	/* just find the best priority right now */
	/* note that the idle thread should always be on the run list */
	for(ctr = run_list.next->next; ctr != &run_list; ctr = ctr->next) {
		test = list_node_owner(ctr, struct kthread);

		if(test->priority > bp) {
			best = test;
			bp = best->priority;
		}
	}
	if(best != current) {
		thread_switch(current, best);
	}
}
Ejemplo n.º 20
0
/*
 * Cause the current thread to exit.
 *
 * The parts of the thread structure we don't actually need to run
 * should be cleaned up right away. The rest has to wait until
 * thread_destroy is called from exorcise().
 *
 * Does not return.
 */
void
thread_exit(void)
{
	struct thread *cur;

	cur = curthread;

	/*
	 * Detach from our process. You might need to move this action
	 * around, depending on how your wait/exit works.
	 */
	proc_remthread(cur);

	/* Make sure we *are* detached (move this only if you're sure!) */
	KASSERT(cur->t_proc == NULL);

	/* Check the stack guard band. */
	thread_checkstack(cur);

	/* Interrupts off on this processor */
        splhigh();
	thread_switch(S_ZOMBIE, NULL, NULL);
	panic("braaaaaaaiiiiiiiiiiinssssss\n");
}
Ejemplo n.º 21
0
uint32_t process_join(process_id_t pid) {
	interrupt_status_t intr_status;
	uint32_t retval;

	// Disable interrupts and acquire resource lock
	intr_status = _interrupt_disable();
	spinlock_acquire(&process_table_slock);

	// Sleep while the process isn't in its "dying" state.
	while(process_table[pid].state != PROCESS_DYING) {
		sleepq_add(&process_table[pid]);
		spinlock_release(&process_table_slock);
		thread_switch();
		spinlock_acquire(&process_table_slock);
	}

	retval = process_table[pid].retval;
	process_table[pid].state = PROCESS_SLOT_AVAILABLE;

    // Restore interrupts and free our lock
	spinlock_release(&process_table_slock);
	_interrupt_set_state(intr_status);
	return retval;
}
Ejemplo n.º 22
0
/* uVisor expects all calls to its API to be executed in the highest priority
 * interrupt (SVC) since they are not re-entrant.
 * If this is not the case, we need to make sure that no other interrupt can
 * preempt these calls by wrapping them in an atomic section.
 */
static void thread_switch_atomic(void * c)
{
    atomic_call_wrapper(
        thread_switch(c)
    );
}
Ejemplo n.º 23
0
Archivo: main.c Proyecto: cfrost/buenos
void init(void)
{
    TID_t startup_thread;
    int numcpus;

    /* Initialize polling TTY driver for kprintf() usage. */
    polltty_init();

    kwrite("BUENOS is a University Educational Nutshell Operating System\n");
    kwrite("==========================================================\n");
    kwrite("\n");

    kwrite("Copyright (C) 2003-2006  Juha Aatrokoski, Timo Lilja,\n");
    kwrite("  Leena Salmela, Teemu Takanen, Aleksi Virtanen\n");
    kwrite("See the file COPYING for licensing details.\n");
    kwrite("\n");

    kwrite("Initializing memory allocation system\n");
    kmalloc_init();

    kwrite("Reading boot arguments\n");
    bootargs_init();

    /* Seed the random number generator. */
    if (bootargs_get("randomseed") == NULL) {
	_set_rand_seed(0);
    } else {
	int seed = atoi(bootargs_get("randomseed"));
	kprintf("Seeding pseudorandom number generator with %i\n", seed);
	_set_rand_seed(seed);
    }

    numcpus = cpustatus_count();
    kprintf("Detected %i CPUs\n", numcpus);
    KERNEL_ASSERT(numcpus <= CONFIG_MAX_CPUS);

    kwrite("Initializing interrupt handling\n");
    interrupt_init(numcpus);

    kwrite("Initializing threading system\n");
    thread_table_init();

    kwrite("Initializing user process system\n");
    process_init();

    kwrite("Initializing sleep queue\n");
    sleepq_init();

    kwrite("Initializing semaphores\n");
    semaphore_init();

    kwrite("Initializing device drivers\n");
    device_init();

    kprintf("Initializing virtual filesystem\n");
    vfs_init();

    kwrite("Initializing scheduler\n");
    scheduler_init();

    kwrite("Initializing virtual memory\n");
    vm_init();

    kprintf("Creating initialization thread\n");
    startup_thread = thread_create(&init_startup_thread, 0);
    thread_run(startup_thread);

    kprintf("Starting threading system and SMP\n");

    /* Let other CPUs run */
    kernel_bootstrap_finished = 1;
    
    _interrupt_clear_bootstrap();
    _interrupt_enable();

    /* Enter context switch, scheduler will be run automatically,
       since thread_switch() behaviour is identical to timer tick
       (thread timeslice is over). */
    thread_switch();

    /* We should never get here */
    KERNEL_PANIC("Threading system startup failed.");
}
Ejemplo n.º 24
0
void condition_wait (cond_t *cond, lock_t *lock ) {
    lock_release(lock);
    sleepq_add(cond);
    thread_switch();
    lock_acquire(lock);
}
Ejemplo n.º 25
0
int schedule()
{
	tcb_t *scheduled = schedule_select();
	thread_switch(scheduled);
	return 1;
}
Ejemplo n.º 26
0
void handle_kernel_bp_hits(mach_port_t target_thread_port, uint64_t looper_pc, uint64_t breakpoint, breakpoint_callback callback) {
  // get the target thread's thread_t
  uint64_t thread_port_addr = find_port_address(target_thread_port, MACH_MSG_TYPE_COPY_SEND);
  uint64_t thread_t_addr = rk64(thread_port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT));
  
  while (1) {
    uint64_t looper_saved_state = 0;
    int found_it = 0;
    while (!found_it) {
      if (syscall_complete) {
        return;
      }
      // we've pinned ourself to the same core, so if we're running, it isn't...
      // in some ways this code is very racy, but when we actually have detected that the target
      // thread has hit the breakpoint it should be safe until we restart it
      // and up until then we don't do anything too dangerous...
      
      
      // get the kstack pointer
      uint64_t kstackptr = rk64(thread_t_addr + koffset(KSTRUCT_OFFSET_THREAD_KSTACKPTR));
      
      printf("kstackptr: %llx\n", kstackptr);
      
      // get the thread_kernel_state
      // the stack lives below kstackptr, and kstackptr itself points to a struct thread_kernel_state:
      // the first bit of that is just an arm_context_t:
      // this is the scheduled-off state
      arm_context_t saved_ksched_state = {0};
      kmemcpy((uint64_t)&saved_ksched_state, kstackptr, sizeof(arm_context_t));
      
      // get the saved stack pointer
      uint64_t sp = saved_ksched_state.ss.ss_64.sp;
      printf("sp: %llx\n", sp);
      
      if (sp == 0) {
        continue;
      }
      
      uint64_t stack[128] = {0};
      
      // walk up from there and look for the saved state dumped by the fiq:
      // note that it won't be right at the bottom of the stack
      // instead there are the frames for:
      //   ast_taken_kernel       <-- above this is the saved state which will get restored when the hw bp spinner gets rescheduled
      //     thread_block_reason
      //       thread_invoke
      //         machine_switch_context
      //           Switch_context <-- the frame actually at the bottom of the stack
      
      // should probably walk those stack frame properly, but this will do...
      
      // grab the stack
      kmemcpy((uint64_t)&stack[0], sp, sizeof(stack));
      //for (int i = 0; i < 128; i++) {
      //  printf("%016llx\n", stack[i]);
      //}
      
      for (int i = 0; i < 128; i++) {
        uint64_t flavor_and_count = stack[i];
        if (flavor_and_count != (ARM_SAVED_STATE64 | (((uint64_t)ARM_SAVED_STATE64_COUNT) << 32))) {
          continue;
        }
        
        arm_context_t* saved_state = (arm_context_t*)&stack[i];
        
        if (saved_state->ss.ss_64.pc != looper_pc) {
          continue;
        }
        
        found_it = 1;
        looper_saved_state = sp + (i*sizeof(uint64_t));
        printf("found the saved state probably at %llx\n", looper_saved_state); // should walk the stack properly..
        break;
      }
      
      if (!found_it) {
        printf("unable to find the saved scheduler tick state on the stack, waiting a bit then trying again...\n");
        sleep(1);
        return;
      }
      
    }
    
    
    
    // now keep walking up and find the saved state for the code which hit the BP:
    uint64_t bp_hitting_state = looper_saved_state + sizeof(arm_context_t);
    found_it = 0;
    for (int i = 0; i < 1000; i++) {
      uint64_t flavor_and_count = rk64(bp_hitting_state);
      if (flavor_and_count != (ARM_SAVED_STATE64 | (((uint64_t)ARM_SAVED_STATE64_COUNT) << 32))) {
        bp_hitting_state += 8;
        continue;
      }
      
      arm_context_t bp_context;
      kmemcpy((uint64_t)&bp_context, bp_hitting_state, sizeof(arm_context_t));
      
      for (int i = 0; i < 40; i++) {
        uint64_t* buf = (uint64_t*)&bp_context;
        printf("%016llx\n", buf[i]);
      }
      
      if (bp_context.ss.ss_64.pc != breakpoint) {
        printf("hummm, found an unexpected breakpoint: %llx\n", bp_context.ss.ss_64.pc);
      }
      
      found_it = 1;
      break;
    }
    
    if (!found_it) {
      printf("unable to find bp hitting state\n");
    }
    
    // fix up the bp hitting state so it will continue (with whatever modifications we want:)
    // get a copy of the state:
    arm_context_t bp_context;
    kmemcpy((uint64_t)&bp_context, bp_hitting_state, sizeof(arm_context_t));
    
    callback(&bp_context);
    
    // write that new state back:
    kmemcpy(bp_hitting_state, (uint64_t)&bp_context, sizeof(arm_context_t));
    
    // unblock the looper:
    wk64(looper_saved_state + offsetof(arm_context_t, ss.ss_64.pc), ksym(KSYMBOL_SLEH_SYNC_EPILOG));
    
    // when it runs again it should break out of the loop and continue the syscall
    // forces us off the core and hopefully it on:
    thread_switch(target_thread_port, 0, 0);
    swtch_pri(0);
    
  }
}
Ejemplo n.º 27
0
Archivo: ipc.c Proyecto: zrho/Carbon
void syscall_ipc_respond(cpu_int_state_t *state) {
	// Check thread role
	if (THREAD_ROLE_IPC_RECEIVER != thread_current->role)
		SYSCALL_RETURN_ERROR(1);

	// Extract arguments
	uint16_t flags = (uint16_t) state->state.rbx;
	uint32_t length = (uint32_t) state->state.rcx;

	// Check length
	if (length > thread_current->ipc_buffer_sz[IPC_BUFFER_SEND])
		SYSCALL_RETURN_ERROR(2);

	// Extract info from role ctx
	ipc_role_ctx_t *role_ctx = (ipc_role_ctx_t *) thread_current->role_ctx;
	uint32_t sender_pid = role_ctx->sender_process;
	uint32_t sender_tid = role_ctx->sender_thread;

	// Sender process still exists?
	process_t *sender_process = process_get(sender_pid);

	if (0 == sender_process) {
		thread_stop(process_current, thread_current);
		thread_switch(scheduler_next(), state);
		return;
	}

	// Sender thread still exists?
	thread_t *sender_thread = thread_get(sender_process, sender_tid);

	if (0 == sender_thread) {
		thread_stop(process_current, thread_current);
		thread_switch(scheduler_next(), state);
		return;
	}

	// Response ignored?
	if (0 != (role_ctx->flags & IPC_FLAG_IGNORE_RESPONSE)) {
		thread_stop(process_current, thread_current);
		thread_switch(sender_thread, state);
		return;
	}

	// Move buffer to sender thread (if length > 0)
	if (length > 0)
		ipc_buffer_move(
				thread_current,
				IPC_BUFFER_SEND,
				sender_thread,
				IPC_BUFFER_RECV,
				sender_process);

	// Write header to registers
	ipc_message_header(
			IPC_BUFFER_RECV,
			length,
			flags,
			process_current->pid,
			sender_thread->tid,
			&sender_thread->state);

	// Thaw thread
	thread_thaw(sender_thread, 0);

	// Stop current thread and switch to sender
	thread_stop(process_current, thread_current);
	thread_switch(sender_thread, state);
}
Ejemplo n.º 28
0
Archivo: main.c Proyecto: JanmanX/KUDOS
void init(void)
{
    TID_t startup_thread;
    int numcpus;

    /* Initialise Static Allocation */
    stalloc_init();

    /* Initialize polling TTY driver for kprintf() usage. */
    polltty_init();

    kwrite("Kudos is an educational operating system by the University of Copenhagen\n");
    kwrite("========================================================================\n");
    kwrite("Based on the Buenos operating system skeleton\n");
    kwrite("\n");

    kprintf("Copyright (C) 2003-2016  Juha Aatrokoski, Timo Lilja,\n");
    kprintf("  Leena Salmela, Teemu Takanen, Aleksi Virtanen, Philip Meulengracht,\n");
    kprintf("  Troels Henriksen, Annie Jane Pinder, Niels Gustav Westphal Serup,\n");
    kprintf("  Nicklas Warming Jacobsen, Oleksandr Shturmov.\n");
    kwrite("See the file COPYING for licensing details.\n");
    kwrite("\n");

    kwrite("Reading boot arguments\n");
    bootargs_init((void*)BOOT_ARGUMENT_AREA);

    /* Seed the random number generator. */
    if (bootargs_get("randomseed") == NULL) {
        _set_rand_seed(0);
    } else {
        int seed = atoi(bootargs_get("randomseed"));
        kprintf("Seeding pseudorandom number generator with %i\n", seed);
        _set_rand_seed(seed);
    }

    numcpus = cpustatus_count();
    kprintf("Detected %i CPUs\n", numcpus);
    KERNEL_ASSERT(numcpus <= CONFIG_MAX_CPUS);

    kwrite("Initializing interrupt handling\n");
    interrupt_init(numcpus);

    kwrite("Initializing threading system\n");
    thread_table_init();

    kwrite("Initializing sleep queue\n");
    sleepq_init();

    kwrite("Initializing semaphores\n");
    semaphore_init();

    kwrite("Initializing device drivers\n");
    device_init();

    kprintf("Initializing virtual filesystem\n");
    vfs_init();

    kwrite("Initializing scheduler\n");
    scheduler_init();

    kwrite("Initializing virtual memory\n");
    vm_init();

    kprintf("Creating initialization thread\n");
    startup_thread = thread_create(&init_startup_thread, 0);
    thread_run(startup_thread);

    kprintf("Starting threading system and SMP\n");

    /* Let other CPUs run */
    kernel_bootstrap_finished = 1;

    _interrupt_clear_bootstrap();
    _interrupt_enable();

    /* Enter context switch, scheduler will be run automatically,
       since thread_switch() behaviour is identical to timer tick
       (thread timeslice is over). */
    thread_switch();

    /* We should never get here */
    KERNEL_PANIC("Threading system startup failed.");
}
Ejemplo n.º 29
0
/*
 * Yield the cpu to another process, but stay runnable.
 */
void
thread_yield(void)
{
	thread_switch(S_READY, NULL, NULL);
}
Ejemplo n.º 30
0
struct thread *init(struct multiboot *mboot, uint32_t mboot_magic) {
	struct process *idle, *init;
	struct module *module;
	struct memory_map *mem_map;
	size_t mem_map_count, i, addr;
	uintptr_t boot_image_size;
	void *boot_image;
	struct elf32_ehdr *init_image;
	struct elf32_ehdr *dl_image;

	/* initialize debugging output */
	debug_init();
	debug_printf("Rhombus Operating System Kernel v0.8a\n");

	/* check multiboot header */
	if (mboot_magic != 0x2BADB002) {
		debug_panic("bootloader is not multiboot compliant");
	}

	/* touch pages for the kernel heap */
	for (i = KSPACE; i < KERNEL_HEAP_END; i += SEGSZ) {
		page_touch(i);
	}

	/* identity map kernel boot frames */
	for (i = KSPACE + KERNEL_BOOT; i < KSPACE + KERNEL_BOOT_END; i += PAGESZ) {
		page_set(i, page_fmt(i - KSPACE, PF_PRES | PF_RW));
	}

	/* parse the multiboot memory map to find the size of memory */
	mem_map       = (void*) (mboot->mmap_addr + KSPACE);
	mem_map_count = mboot->mmap_length / sizeof(struct memory_map);

	for (i = 0; i < mem_map_count; i++) {
		if (mem_map[i].type == 1 && mem_map[i].base_addr_low <= 0x100000) {
			for (addr = 0; addr < mem_map[i].length_low; addr += PAGESZ) {
				frame_add(mem_map[i].base_addr_low + addr);
			}
		}
	}

	/* bootstrap process 0 (idle) */
	idle = process_alloc();
	idle->space = cpu_get_cr3();
	idle->user  = 0;

	/* fork process 1 (init) and switch */
	init = process_clone(idle, NULL);
	process_switch(init);

	/* get multiboot module information */
	if (mboot->mods_count < 3) {
		if (mboot->mods_count < 2) {
			if (mboot->mods_count < 1) {
				debug_panic("no boot or init or dl modules found");
			}
			else {
				debug_panic("no boot or dl modules found");
			}
		}
		else {
			debug_panic("no dl module found");
		}
	}
	module     = (void*) (mboot->mods_addr + KSPACE);
	init_image = (void*) (module[0].mod_start + KSPACE);
	boot_image = (void*) (module[1].mod_start + KSPACE);
	dl_image   = (void*) (module[2].mod_start + KSPACE);
	boot_image_size = module[1].mod_end - module[1].mod_start;

	/* move boot image to BOOT_IMAGE in userspace */
	mem_alloc(BOOT_IMAGE, boot_image_size, PF_PRES | PF_USER | PF_RW);
	memcpy((void*) BOOT_IMAGE, boot_image, boot_image_size);

	/* bootstrap thread 0 in init */
	thread_bind(init->thread[0], init);
	init->thread[0]->useresp = init->thread[0]->stack + SEGSZ;
	init->thread[0]->esp     = (uintptr_t) &init->thread[0]->num;
	init->thread[0]->ss      = 0x23;
	init->thread[0]->ds      = 0x23;
	init->thread[0]->cs      = 0x1B;
	init->thread[0]->eflags  = cpu_get_eflags() | 0x3200; /* IF, IOPL = 3 */

	/* bootstrap idle thread */
	idle->thread[0] = &__idle_thread;
	__idle_thread.proc = idle;

	/* load dl */
	if (elf_check_file(dl_image)) {
		debug_panic("dl.so is not a valid ELF executable");
	}
	elf_load_file(dl_image);

	/* execute init */
	if (elf_check_file(init_image)) {
		debug_panic("init is not a valid ELF executable");
	}
	elf_load_file(init_image);
	init->thread[0]->eip = init_image->e_entry;

	/* register system calls */
	int_set_handler(SYSCALL_SEND, syscall_send);
	int_set_handler(SYSCALL_DONE, syscall_done);
	int_set_handler(SYSCALL_WHEN, syscall_when);
	int_set_handler(SYSCALL_RIRQ, syscall_rirq);
	int_set_handler(SYSCALL_ALSO, syscall_also);
	int_set_handler(SYSCALL_STAT, syscall_stat);
	int_set_handler(SYSCALL_PAGE, syscall_page);
	int_set_handler(SYSCALL_PHYS, syscall_phys);
	int_set_handler(SYSCALL_FORK, syscall_fork);
	int_set_handler(SYSCALL_EXIT, syscall_exit);
	int_set_handler(SYSCALL_STOP, syscall_stop);
	int_set_handler(SYSCALL_WAKE, syscall_wake);
	int_set_handler(SYSCALL_GPID, syscall_gpid);
	int_set_handler(SYSCALL_TIME, syscall_time);
	int_set_handler(SYSCALL_USER, syscall_user);
	int_set_handler(SYSCALL_AUTH, syscall_auth);
	int_set_handler(SYSCALL_PROC, syscall_proc);
	int_set_handler(SYSCALL_KILL, syscall_kill);
	int_set_handler(SYSCALL_VM86, syscall_vm86);
	int_set_handler(SYSCALL_NAME, syscall_name);
	int_set_handler(SYSCALL_REAP, syscall_reap);

	/* register fault handlers */
	int_set_handler(FAULT_DE, fault_float);
	int_set_handler(FAULT_DB, fault_generic);
	int_set_handler(FAULT_NI, fault_generic);
	int_set_handler(FAULT_BP, fault_generic);
	int_set_handler(FAULT_OF, fault_generic);
	int_set_handler(FAULT_BR, fault_generic);
	int_set_handler(FAULT_UD, fault_generic);
	int_set_handler(FAULT_NM, fault_nomath);
	int_set_handler(FAULT_DF, fault_double);
	int_set_handler(FAULT_CO, fault_float);
	int_set_handler(FAULT_TS, fault_generic);
	int_set_handler(FAULT_NP, fault_generic);
	int_set_handler(FAULT_SS, fault_generic);
	int_set_handler(FAULT_GP, fault_gpf);
	int_set_handler(FAULT_PF, fault_page);
	int_set_handler(FAULT_MF, fault_float);
	int_set_handler(FAULT_AC, fault_generic);
	int_set_handler(FAULT_MC, fault_generic);
	int_set_handler(FAULT_XM, fault_nomath);

	/* start timer (for preemption) */
	timer_set_freq(64);

	/* initialize FPU/MMX/SSE */
	cpu_init_fpu();

	/* drop to usermode, scheduling the next thread */
	debug_printf("dropping to usermode\n");
	return thread_switch(NULL, schedule_next());
}