Beispiel #1
0
/*
 * Exit the current thread.
 * Calling this function initiates a context switch.
 */
void Exit(int exitCode)
{
    struct Kernel_Thread* current = g_currentThread;

    if (Interrupts_Enabled())
	Disable_Interrupts();

    /* Thread is dead */
    current->exitCode = exitCode;
    current->alive = false;

    /* Clean up any thread-local memory */
    Tlocal_Exit(g_currentThread);

    /* Notify the thread's owner, if any */
    Wake_Up(&current->joinQueue);

    /* Remove the thread's implicit reference to itself. */
    Detach_Thread(g_currentThread);

    /*
     * Schedule a new thread.
     * Since the old thread wasn't placed on any
     * thread queue, it won't get scheduled again.
     */
    Schedule();

    /* Shouldn't get here */
    KASSERT(false);
}
Beispiel #2
0
int Start_Timer(int ticks, timerCallback cb) {
    int returned_timer_id;

    KASSERT(!Interrupts_Enabled());

    if(timeEventCount == MAX_TIMER_EVENTS) {
        Print
            ("timeEventCount == %d == MAX_TIMER_EVENTS; cannot start a new timer",
             MAX_TIMER_EVENTS);
        int i;
        for(i = 0; i < MAX_TIMER_EVENTS; i++) {
            Print("%d: cb 0x%p in %d/%d ticks", i,
                  pendingTimerEvents[i].callBack,
                  pendingTimerEvents[i].ticks,
                  pendingTimerEvents[i].origTicks);
        }
        return -1;
    } else {
        returned_timer_id = ++nextEventID;      /* avoid returning 0. */
        pendingTimerEvents[timeEventCount].id = returned_timer_id;
        pendingTimerEvents[timeEventCount].callBack = cb;
        pendingTimerEvents[timeEventCount].ticks = ticks;
        pendingTimerEvents[timeEventCount].origTicks = ticks;
        timeEventCount++;

        return returned_timer_id;
    }
}
Beispiel #3
0
/*
 * Wait for given thread to die.
 * Interrupts must be enabled.
 * Returns the thread exit code.
 */
int Join(struct Kernel_Thread* kthread)
{
    int exitCode;

    KASSERT(Interrupts_Enabled());

    /* It is only legal for the owner to join */
    KASSERT(kthread->owner == g_currentThread);

    Disable_Interrupts();

    /* Wait for it to die */
    while (kthread->alive) {
	Wait(&kthread->joinQueue);
    }

    /* Get thread exit code. */
    exitCode = kthread->exitCode;

    /* Release our reference to the thread */
    Detach_Thread(kthread);

    Enable_Interrupts();

    return exitCode;
}
Beispiel #4
0
/*
 * Wake up all threads waiting on the given condition.
 * The mutex guarding the condition should be held!
 */
void Cond_Broadcast(struct Condition* cond)
{
    KASSERT(Interrupts_Enabled());
    Disable_Interrupts();  /* prevent scheduling */
    Wake_Up(&cond->waitQueue);
    Enable_Interrupts();  /* resume scheduling */
}
Beispiel #5
0
/*
 * If the given thread has a User_Context,
 * switch to its memory space.
 *
 * Params:
 *   kthread - the thread that is about to execute
 *   state - saved processor registers describing the state when
 *      the thread was interrupted
 */
void Switch_To_User_Context(struct Kernel_Thread* kthread, struct Interrupt_State* state)
{
    /*
     * Hint: Before executing in user mode, you will need to call
     * the Set_Kernel_Stack_Pointer() and Switch_To_Address_Space()
     * functions.
     */
    //TODO("Switch to a new user address space, if necessary");
    	 Set_Kernel_Stack_Pointer((ulong_t)((kthread->stackPage)+PAGE_SIZE));
	// ERROR: only user thread need this function
	if(Interrupts_Enabled())
		Disable_Interrupts();
	if (kthread->userContext != 0)
	{
		//Print("SuC: %d ldt: %d\n", (int)(kthread->userContext), 
		//	(int)));
		//Set_Kernel_Stack_Pointer((ulong_t)(kthread->esp));
		Switch_To_Address_Space(kthread->userContext);
		//Print("Switch to Address Space!\n");
		//Print("jump to %d\n", (int)(kthread->userContext->entryAddr));
		//Dump_Interrupt_State(state);
		//KASSERT(0);
	}
	Enable_Interrupts();

}
Beispiel #6
0
/*
 * Calibrate the given drive.
 */
static bool Calibrate(int drive)
{
    int numAttempts = 4;
    bool success = false;
    uchar_t st0, pcn;

    KASSERT(!Interrupts_Enabled());

    while (numAttempts-- > 0) {
	/* Issue the calibrate command */
	Floppy_Out(FDC_COMMAND_CALIBRATE);
	Floppy_Out((uchar_t) drive);
	Wait_For_Interrupt();

	/* Check interrupt status, to see if calibrate succeeded */
	Sense_Interrupt_Status(&st0, &pcn);
	Debug("Calibrate: st0=%02x, pcn=%02x\n", st0, pcn);
	if (st0 & FDC_ST0_SEEK_END) {
	    success = true;
	    break;
	}
    }

    Debug("Drive %d calibration %s\n", drive, success?"succeeded":"failed");
    return success;
}
Beispiel #7
0
/*
 * Wait for the controller to issue an interrupt.
 * Must be called with interrupts disabled.
 */
static void Wait_For_Interrupt(void)
{
    KASSERT(!Interrupts_Enabled());

    /* Wait for interrupt */
    Wait(&s_floppyInterruptWaitQueue);
}
Beispiel #8
0
/*
 * Unlock given mutex.
 */
void Mutex_Unlock(struct Mutex* mutex)
{
    KASSERT(Interrupts_Enabled());

    g_preemptionDisabled = true;
    Mutex_Unlock_Imp(mutex);
    g_preemptionDisabled = false;
}
Beispiel #9
0
/*
 * Called when a reference to the thread is broken.
 */
static void Detach_Thread(struct Kernel_Thread* kthread)
{
    KASSERT(!Interrupts_Enabled());
    KASSERT(kthread->refCount > 0);

    --kthread->refCount;
    if (kthread->refCount == 0) {
	Reap_Thread(kthread);
    }
}
Beispiel #10
0
void NE2000_Reset(struct Net_Device *device) {
    ulong_t baseAddr = device->baseAddr;

    KASSERT(!Interrupts_Enabled());

    Out_Byte(baseAddr + NE2K_RESET_PORT, In_Byte(NE2K_RESET_PORT));

    while (In_Byte(baseAddr + NE2K0R_ISR) & NE2K_ISR_RST) {
        Print("NIC has not reset yet\n");
    }
}
Beispiel #11
0
/*
 * Wait on given wait queue.
 * Must be called with interrupts disabled!
 * Note that the function will return with interrupts
 * disabled.  This is desirable, because it allows us to
 * atomically test a condition that can be affected by an interrupt
 * and wait for it to be satisfied (if necessary).
 * See the Wait_For_Key() function in keyboard.c
 * for an example.
 */
void Wait(struct Thread_Queue* waitQueue)
{
    struct Kernel_Thread* current = g_currentThread;

    KASSERT(!Interrupts_Enabled());

    /* Add the thread to the wait queue. */
    Enqueue_Thread(waitQueue, current);

    /* Find another thread to run. */
    Schedule();
}
Beispiel #12
0
int Get_Remaing_Timer_Ticks(int id) {
    int i;

    KASSERT(!Interrupts_Enabled());
    for(i = 0; i < timeEventCount; i++) {
        if(pendingTimerEvents[i].id == id) {
            return pendingTimerEvents[i].ticks;
        }
    }

    return -1;
}
Beispiel #13
0
/*
 * Wake up a single thread waiting on given wait queue
 * (if there are any threads waiting).  Chooses the highest priority thread.
 * Interrupts must be disabled!
 */
void Wake_Up_Thread(struct Thread_Queue* waitQueue, int pid)
{
  struct Kernel_Thread* thread = Lookup_Thread(pid);;

  KASSERT(!Interrupts_Enabled());
  
  
  if (thread != 0) {
    Remove_Thread(waitQueue, thread);
    Make_Runnable(thread);
	/*Print("Wake_Up_One: waking up %x from %x\n", best, g_currentThread); */
  }
}
Beispiel #14
0
/*
 * Wait for the controller to issue an interrupt.
 * Must be called with interrupts disabled.
 */
static void Wait_For_Interrupt(void)
{
    KASSERT(!Interrupts_Enabled());

    /* Spin wait */

    s_interruptOccurred = 0;
    Enable_Interrupts();
    while (!s_interruptOccurred) {
	/* FIXME: Could sleep here */
    }
    Disable_Interrupts();
}
Beispiel #15
0
int Cancel_Timer(int id) {
    int i;
    KASSERT(!Interrupts_Enabled());
    for (i = 0; i < timeEventCount; i++) {
        if (pendingTimerEvents[i].id == id) {
            pendingTimerEvents[i] = pendingTimerEvents[timeEventCount - 1];
            timeEventCount--;
            return 0;
        }
    }

    Print("timer: unable to find timer id %d to cancel it\n", id);
    return -1;
}
Beispiel #16
0
/*
 * Wake up a single thread waiting on given wait queue
 * (if there are any threads waiting).  Chooses the highest priority thread.
 * Interrupts must be disabled!
 */
void Wake_Up_One(struct Thread_Queue* waitQueue)
{
    struct Kernel_Thread* best;

    KASSERT(!Interrupts_Enabled());

    best = Find_Best(waitQueue);

    if (best != 0) {
	Remove_Thread(waitQueue, best);
	Make_Runnable(best);
	/*Print("Wake_Up_One: waking up %x from %x\n", best, g_currentThread); */
    }
}
Beispiel #17
0
/*
 * If the given thread has a user context, detach it
 * and destroy it.  This is called when a thread is
 * being destroyed.
 */
void Detach_User_Context(struct Kernel_Thread* kthread)
{
    struct User_Context* old = kthread->userContext;

    kthread->userContext = 0;

    if (old != 0) {
	int refCount;
	if(Interrupts_Enabled())
		Disable_Interrupts();
        --old->refCount;
	refCount = old->refCount;
	Enable_Interrupts();

	/*Print("User context refcount == %d\n", refCount);*/
        if (refCount == 0)
            Destroy_User_Context(old);
    }
}
Beispiel #18
0
/*
 * Wake up all threads waiting on given wait queue.
 * Must be called with interrupts disabled!
 * See Keyboard_Interrupt_Handler() function in keyboard.c
 * for an example.
 */
void Wake_Up(struct Thread_Queue* waitQueue)
{
    struct Kernel_Thread *kthread = waitQueue->head, *next;

    KASSERT(!Interrupts_Enabled());

    /*
     * Walk throught the list of threads in the wait queue,
     * transferring each one to the run queue.
     */
    while (kthread != 0) {
	next = Get_Next_In_Thread_Queue(kthread);
	Make_Runnable(kthread);
	kthread = next;
    }

    /* The wait queue is now empty. */
    Clear_Thread_Queue(waitQueue);
}
Beispiel #19
0
/*
 * Add given thread to the run queue, so that it
 * may be scheduled.  Must be called with interrupts disabled!
 */
void Make_Runnable(struct Kernel_Thread* kthread)
{
    KASSERT(!Interrupts_Enabled());

    { int currentQ = kthread->currentReadyQueue;
      KASSERT(currentQ >= 0 && currentQ < MAX_QUEUE_LEVEL);

	  /* If the process is blocked, the priority level will increase by one level */
      if(kthread->blocked == true && currentQ > 0)
      	kthread->currentReadyQueue--;

	  /* Prevent to idle process move out queue */
	  if(kthread->priority == PRIORITY_IDLE)
	  	kthread->currentReadyQueue = MAX_QUEUE_LEVEL - 1 ;
        	
      kthread->blocked = false;
      Enqueue_Thread(&s_runQueue[kthread->currentReadyQueue], kthread);
    }
}
Beispiel #20
0
int Cancel_Timer(int id) {
    int i;
    KASSERT(!Interrupts_Enabled());
    for(i = 0; i < timeEventCount; i++) {
        if(pendingTimerEvents[i].id == id) {
            if(timerDebug)
                Print
                    ("timer: event %d at %d ticks cancelled subscript %d/%d\n",
                     pendingTimerEvents[i].id,
                     pendingTimerEvents[i].ticks, i, timeEventCount);
            pendingTimerEvents[i] =
                pendingTimerEvents[timeEventCount - 1];
            timeEventCount--;
            return 0;
        }
    }

    Print("timer: unable to find timer id %d to cancel it\n", id);
    return -1;
}
Beispiel #21
0
/* actually reads the data out of the device */
void NE2000_Receive(struct Net_Device *device, void *buffer, ulong_t length,
                    ulong_t pageOffset) {
    ulong_t baseAddr = device->baseAddr;

    KASSERT(!Interrupts_Enabled());

    int i;
    int newLength = length >> 1;
    unsigned short *newBuffer = (unsigned short *)buffer;

    /* Set the Command Register */
    Out_Byte(baseAddr + NE2K_CR, 0x22);

    Out_Byte(baseAddr + NE2K0W_RCR, 0x0C);

    /* Load the packet size into the registers */
    Out_Byte(baseAddr + NE2K0W_RBCR0, length & 0xFF);
    Out_Byte(baseAddr + NE2K0W_RBCR1, length >> 8);

    /* Load the page start  into the RSARX registers */
    Out_Byte(baseAddr + NE2K0W_RSAR0, pageOffset & 0xFF);
    Out_Byte(baseAddr + NE2K0W_RSAR1, pageOffset >> 8);

    /* Start the remote write */
    Out_Byte(baseAddr + NE2K_CR, NE2K_CR_DMA_RREAD | NE2K_CR_STA);

    /* Read the data in through the I/O port */
    for (i = 0; i < newLength; ++i) {
        newBuffer[i] = In_Word(baseAddr + NE2K_IO_PORT);
    }

    /* Receive the last byte of data if we have an odd length */
    if (length & 0x1) {
        ((uchar_t *) buffer)[length - 1] = In_Byte(baseAddr + NE2K_IO_PORT);
    }

    /* Ack the remote DMA interrupt */
    Out_Byte(baseAddr + NE2K0R_ISR, NE2K_ISR_RDC);

    device->rxBytes += length;
}
Beispiel #22
0
/*
 * Schedule a thread that is waiting to run.
 * Must be called with interrupts off!
 * The current thread should already have been placed
 * on whatever queue is appropriate (i.e., either the
 * run queue if it is still runnable, or a wait queue
 * if it is waiting for an event to occur).
 */
void Schedule(void)
{
    struct Kernel_Thread* runnable;

    /* Make sure interrupts really are disabled */
    KASSERT(!Interrupts_Enabled());

    /* Preemption should not be disabled. */
    KASSERT(!g_preemptionDisabled);

    /* Get next thread to run from the run queue */
    runnable = Get_Next_Runnable();

    /*
     * Activate the new thread, saving the context of the current thread.
     * Eventually, this thread will get re-activated and Switch_To_Thread()
     * will "return", and then Schedule() will return to wherever
     * it was called from.
     */
    Switch_To_Thread(runnable);
}
Beispiel #23
0
/*
 * Clean up any thread-local data upon thread exit.  Assumes
 * this is called with interrupts disabled.  We follow the POSIX style
 * of possibly invoking a destructor more than once, because a
 * destructor to some thread-local data might cause other thread-local
 * data to become alive once again.  If everything is NULL by the end
 * of an iteration, we are done.
 */
static void Tlocal_Exit(struct Kernel_Thread* curr) {
    int i, j, called = 0;

    KASSERT(!Interrupts_Enabled());

    for (j = 0; j<MIN_DESTRUCTOR_ITERATIONS; j++) {

        for (i = 0; i<MAX_TLOCAL_KEYS; i++) {

	    void *x = (void *)curr->tlocalData[i];
	    if (x != NULL && s_tlocalDestructors[i] != NULL) {

	        curr->tlocalData[i] = NULL;
		called = 1;

		Enable_Interrupts();
		s_tlocalDestructors[i](x);
		Disable_Interrupts();
	    }
	}
	if (!called) break;
    }
}
Beispiel #24
0
/*
 * Wait on given condition (protected by given mutex).
 */
void Cond_Wait(struct Condition* cond, struct Mutex* mutex)
{
    KASSERT(Interrupts_Enabled());

    /* Ensure mutex is held. */
    KASSERT(IS_HELD(mutex));

    /* Turn off scheduling. */
    g_preemptionDisabled = true;

    /*
     * Release the mutex, but leave preemption disabled.
     * No other threads will be able to run before this thread
     * is able to wait.  Therefore, this thread will not
     * miss the eventual notification on the condition.
     */
    Mutex_Unlock_Imp(mutex);

    /*
     * Atomically reenable preemption and wait in the condition wait queue.
     * Other threads can run while this thread is waiting,
     * and eventually one of them will call Cond_Signal() or Cond_Broadcast()
     * to wake up this thread.
     * On wakeup, disable preemption again.
     */
    Disable_Interrupts();
    g_preemptionDisabled = false;
    Wait(&cond->waitQueue);
    g_preemptionDisabled = true;
    Enable_Interrupts();

    /* Reacquire the mutex. */
    Mutex_Lock_Imp(mutex);

    /* Turn scheduling back on. */
    g_preemptionDisabled = false;
}
Beispiel #25
0
int Eth_Transmit(struct Net_Device *device, struct Net_Buf *nBuf,
                 uchar_t * destAddr, ushort_t type) {
    struct Ethernet_Header header;
    int rc;

    KASSERT(Interrupts_Enabled());

    /* all you have to do in this function is fill in the header. */
    TODO_P(PROJECT_RAW_ETHERNET,
           "construct the ethernet header for the destination, this device's address, and the type.");

    rc = Net_Buf_Prepend(nBuf, &header, sizeof(header), NET_BUF_ALLOC_COPY);
    if (rc != 0)
        return rc;

    ulong_t size = MAX(NET_BUF_SIZE(nBuf), ETH_MIN_DATA);       /* buffer size must be at least ETH_MIN_DATA, 
                                                                   even if we don't use it. */

    KASSERT0(size >= ETH_MIN_DATA, "input to Eth_Transmit should be at least ETH_MIN_DATA long");       /* paranoia. */

    void *buffer = Malloc(size);
    if (buffer == 0)
        return ENOMEM;

    rc = Net_Buf_Extract_All(nBuf, buffer);
    if (rc != 0) {
        Free(buffer);
        return rc;
    }

    Disable_Interrupts();
    device->transmit(device, buffer, size);
    Enable_Interrupts();

    return 0;
}
Beispiel #26
0
 */
void Switch_To_User_Context(struct Kernel_Thread *kthread,
                            struct Interrupt_State *state
                            __attribute__ ((unused))) {
    int cpuID;
    extern int userDebug;
    struct User_Context *userContext = kthread->userContext;

    /*
     * FIXME: could avoid resetting ss0/esp0 if not returning
     * to user space.
     */

    cpuID = Get_CPU_ID();

    KASSERT(!Interrupts_Enabled());

    if (CPUs[cpuID].s_currentUserContext && userContext == 0) {
        /* Kernel mode thread: muse switch kernel address space. 
           another core could delete a user context while this thread is 
           using it otherwise. */
		Set_PDBR((void *)Kernel_Page_Dir());
        CPUs[cpuID].s_currentUserContext = NULL;
        return;
    }
    /* Switch only if the user context is indeed different */
    if (userContext != CPUs[cpuID].s_currentUserContext) {

        if (userDebug)
            Print("A[%p]\n", kthread);
Beispiel #27
0
/*
 * Add given thread to the run queue, so that it
 * may be scheduled.  Must be called with interrupts disabled!
 */
void Make_Runnable(struct Kernel_Thread* kthread)
{
    KASSERT(!Interrupts_Enabled());

    Enqueue_Thread(&s_runQueue, kthread);
}
static int Sys_Spawn( struct Interrupt_State* state )
{

    int i;
    int ret;
    int argc;
    char *argv[MAX_ARGS];
    struct Kernel_Thread *thread;
    struct User_Program *programPtr;
    const void* userPtr = (const void*) state->ebx;
    unsigned int length = state->ecx;
    unsigned char* buf;

    // Make sure buf is a reasonable size.
    if ( length > 1024 )
	return -1;

    buf = Malloc_Atomic( length + 1 );
    if ( buf == 0 )
	return -1;

    if ( !Copy_From_User( buf, userPtr, length ) ) {
	Free_Atomic( buf );
	return -1;
    }
    buf[ length ] = '\0';

    // convert buf to argc and argv;
    i = 0;
    argc = 0;
    argv[0] = buf;
    while (i < 1024 && buf[i]) {
         if (buf[i] == ' ') {
             buf[i] = '\0';
             i++;

             // skip sequence of white space
             while (i < 1024 && buf[i] == ' ') i++;

             if (i < 1024) {
                 ++argc;
                 if (argc == MAX_ARGS) return -1;
                 argv[argc] = &buf[i];
             }
         } else {
             i++;
         }
    }
    argc++;

    programPtr = loadElfProgram(argv[0]);
    if (programPtr) {
	ret = thread->pid;

        // setup argc and argv
        thread = Start_User_Program(programPtr, FALSE, argc, argv, buf, length);

        ret = thread->pid;

	if (programPtr->program) {
	    Free_Atomic( (char *) programPtr->executable );
	}
    } else {
        thread = 0;
	ret = -1;
    }

    KASSERT( Interrupts_Enabled() );

    return ret;
}
Beispiel #29
0
/*
 * Hand given thread to the reaper for destruction.
 * Must be called with interrupts disabled!
 */
static void Reap_Thread(struct Kernel_Thread* kthread)
{
    KASSERT(!Interrupts_Enabled());
    Enqueue_Thread(&s_graveyardQueue, kthread);
    Wake_Up(&s_reaperWaitQueue);
}
Beispiel #30
0
/*
 * Write a block at the logical block number indicated.
 */
static int IDE_Write(int driveNum, int blockNum, char *buffer) {
    int i;
    int head;
    int sector;
    int cylinder;
    short *bufferW;
    int reEnable = 0;

    if (driveNum < 0 || driveNum > (numDrives - 1)) {
        return IDE_ERROR_BAD_DRIVE;
    }

    if (blockNum < 0 || blockNum >= IDE_getNumBlocks(driveNum)) {
        return IDE_ERROR_INVALID_BLOCK;
    }

    if (Interrupts_Enabled()) {
        Disable_Interrupts();
        reEnable = 1;
    }

    /* now compute the head, cylinder, and sector */
    sector = blockNum % drives[driveNum].num_SectorsPerTrack + 1;
    cylinder = blockNum / (drives[driveNum].num_Heads *
                           drives[driveNum].num_SectorsPerTrack);
    head = (blockNum / drives[driveNum].num_SectorsPerTrack) %
           drives[driveNum].num_Heads;

    if (ideDebug) {
        Print("request to write block %d\n", blockNum);
        Print("    head %d\n", head);
        Print("    cylinder %d\n", cylinder);
        Print("    sector %d\n", sector);
    }

    Out_Byte(IDE_SECTOR_COUNT_REGISTER, 1);
    Out_Byte(IDE_SECTOR_NUMBER_REGISTER, sector);
    Out_Byte(IDE_CYLINDER_LOW_REGISTER, LOW_BYTE(cylinder));
    Out_Byte(IDE_CYLINDER_HIGH_REGISTER, HIGH_BYTE(cylinder));
    Out_Byte(IDE_DRIVE_HEAD_REGISTER, IDE_DRIVE(driveNum) | head);

    Out_Byte(IDE_COMMAND_REGISTER, IDE_COMMAND_WRITE_SECTORS);


    /* wait for the drive */
    while (In_Byte(IDE_STATUS_REGISTER) & IDE_STATUS_DRIVE_BUSY);

    bufferW = (short *)buffer;
    for (i = 0; i < 256; i++) {
        Out_Word(IDE_DATA_REGISTER, bufferW[i]);
    }

    if (ideDebug)
        Print("About to wait for Write \n");

    /* wait for the drive */
    while (In_Byte(IDE_STATUS_REGISTER) & IDE_STATUS_DRIVE_BUSY);

    if (In_Byte(IDE_STATUS_REGISTER) & IDE_STATUS_DRIVE_ERROR) {
        Print("ERROR: Got Read %d\n", In_Byte(IDE_STATUS_REGISTER));
        return IDE_ERROR_DRIVE_ERROR;
    }

    if (reEnable)
        Enable_Interrupts();

    return IDE_ERROR_NO_ERROR;
}