/*
 * In this function, you will be modifying the run queue, which can
 * also be modified from an interrupt context. In order for thread
 * contexts and interrupt contexts to play nicely, you need to mask
 * all interrupts before reading or modifying the run queue and
 * re-enable interrupts when you are done. This is analagous to
 * locking a mutex before modifying a data structure shared between
 * threads. Masking interrupts is accomplished by setting the IPL to
 * high.
 *
 * Once you have masked interrupts, you need to remove a thread from
 * the run queue and switch into its context from the currently
 * executing context.
 *
 * If there are no threads on the run queue (assuming you do not have
 * any bugs), then all kernel threads are waiting for an interrupt
 * (for example, when reading from a block device, a kernel thread
 * will wait while the block device seeks). You will need to re-enable
 * interrupts and wait for one to occur in the hopes that a thread
 * gets put on the run queue from the interrupt context.
 *
 * The proper way to do this is with the intr_wait call. See
 * interrupt.h for more details on intr_wait.
 *
 * Note: When waiting for an interrupt, don't forget to modify the
 * IPL. If the IPL of the currently executing thread masks the
 * interrupt you are waiting for, the interrupt will never happen, and
 * your run queue will remain empty. This is very subtle, but
 * _EXTREMELY_ important.
 *
 * Note: Don't forget to set curproc and curthr. When sched_switch
 * returns, a different thread should be executing than the thread
 * which was executing when sched_switch was called.
 *
 * Note: The IPL is process specific.
 */
void
sched_switch(void)
{
	/* Do I need to enque prevthr?
	 * Do I need to continue running prevthr if it is still runnable */
	/*for bugs check Run Queue Access Slide */

	uint8_t prev_ipl = intr_getipl();
	intr_setipl(IPL_HIGH);
	
	kthread_t *prevthr = curthr;
	
	if (prevthr->kt_state == KT_RUN)
		ktqueue_enqueue(&kt_runq, prevthr);

	while (sched_queue_empty(&kt_runq)) { 
		panic("I never actually wait on things here");
		intr_setipl(IPL_LOW);
		intr_wait();
		intr_setipl(IPL_HIGH);
	}
	/*If there is a thread*/
	kthread_t *t = ktqueue_dequeue(&kt_runq);
	
	curproc = t->kt_proc;
	curthr = t; 
       /* NOT_YET_IMPLEMENTED("PROCS: sched_switch");*/
	context_switch(&prevthr->kt_ctx, &curthr->kt_ctx);
		intr_setipl(prev_ipl);	

}
/*
 * In this function, you will be modifying the run queue, which can
 * also be modified from an interrupt context. In order for thread
 * contexts and interrupt contexts to play nicely, you need to mask
 * all interrupts before reading or modifying the run queue and
 * re-enable interrupts when you are done. This is analagous to
 * locking a mutex before modifying a data structure shared between
 * threads. Masking interrupts is accomplished by setting the IPL to
 * high.
 *
 * Once you have masked interrupts, you need to remove a thread from
 * the run queue and switch into its context from the currently
 * executing context.
 *
 * If there are no threads on the run queue (assuming you do not have
 * any bugs), then all kernel threads are waiting for an interrupt
 * (for example, when reading from a block device, a kernel thread
 * will wait while the block device seeks). You will need to re-enable
 * interrupts and wait for one to occur in the hopes that a thread
 * gets put on the run queue from the interrupt context.
 *
 * The proper way to do this is with the intr_wait call. See
 * interrupt.h for more details on intr_wait.
 *
 * Note: When waiting for an interrupt, don't forget to modify the
 * IPL. If the IPL of the currently executing thread masks the
 * interrupt you are waiting for, the interrupt will never happen, and
 * your run queue will remain empty. This is very subtle, but
 * _EXTREMELY_ important.
 *
 * Note: Don't forget to set curproc and curthr. When sched_switch
 * returns, a different thread should be executing than the thread
 * which was executing when sched_switch was called.
 *
 * Note: The IPL is process specific.
 */
void
sched_switch(void)
{
	/*----Kernel1:PROCS:sched_switch:Begins---*/
	uint8_t old_ipl = apic_getipl();
	apic_setipl(IPL_HIGH);
	kthread_t *nextthr = ktqueue_dequeue(&kt_runq);
	kthread_t *oldthr = NULL;

	/*loop thru interupt wait for next runnable thread*/
	while (nextthr == NULL)
	{
		apic_setipl(IPL_LOW);
		intr_wait();
		apic_setipl(IPL_HIGH);
		nextthr = ktqueue_dequeue(&kt_runq);
	}

	/*Switch to next thr*/
	apic_setipl(old_ipl);
	oldthr = curthr;
	curthr = nextthr;
	curproc = nextthr->kt_proc;
	KASSERT(nextthr->kt_state == KT_RUN);
	
	context_switch(&oldthr->kt_ctx,&curthr->kt_ctx);
	/*----Ends---*/
}
/*
 * In this function, you will be modifying the run queue, which can
 * also be modified from an interrupt context. In order for thread
 * contexts and interrupt contexts to play nicely, you need to mask
 * all interrupts before reading or modifying the run queue and
 * re-enable interrupts when you are done. This is analagous to
 * locking a mutex before modifying a data structure shared between
 * threads. Masking interrupts is accomplished by setting the IPL to
 * high.
 *
 * Once you have masked interrupts, you need to remove a thread from
 * the run queue and switch into its context from the currently
 * executing context.
 *
 * If there are no threads on the run queue (assuming you do not have
 * any bugs), then all kernel threads are waiting for an interrupt
 * (for example, when reading from a block device, a kernel thread
 * will wait while the block device seeks). You will need to re-enable
 * interrupts and wait for one to occur in the hopes that a thread
 * gets put on the run queue from the interrupt context.
 *
 * The proper way to do this is with the intr_wait call. See
 * interrupt.h for more details on intr_wait.
 *
 * Note: When waiting for an interrupt, don't forget to modify the
 * IPL. If the IPL of the currently executing thread masks the
 * interrupt you are waiting for, the interrupt will never happen, and
 * your run queue will remain empty. This is very subtle, but
 * _EXTREMELY_ important.
 *
 * Note: Don't forget to set curproc and curthr. When sched_switch
 * returns, a different thread should be executing than the thread
 * which was executing when sched_switch was called.
 *
 * Note: The IPL is process specific.
 */
void
sched_switch(void)
{
       /*MASKING THE INTERRUPT LEVELS*/
        uint8_t curr_intr_level = apic_getipl();
        apic_setipl(IPL_HIGH);

        if(list_empty(&(kt_runq.tq_list)))
        {
                apic_setipl(IPL_LOW);
                intr_wait();
                apic_setipl(curr_intr_level);
                sched_switch();          
        }
        else
        {
                kthread_t *old_thr = curthr;

                dbg(DBG_THR,"PROCESS FORMERLY EXECUTING: %s\n", curthr->kt_proc->p_comm);

                if(kt_runq.tq_size > 0)
                    {
                            while(1)
                            {
                                curthr = ktqueue_dequeue(&kt_runq);
                                curproc = curthr->kt_proc;
                                if(curthr->kt_state == KT_EXITED)
                                    continue;
                                else
                                    break;
                                
                                if(kt_runq.tq_size == 0)
                                    sched_switch();
                            }

                    }
                else
                    sched_switch();

                     if(curthr->kt_cancelled == 1)
                    {
                        dbg(DBG_THR,"%s was cancelled\n", curproc->p_comm);
                        do_exit(0); 
                    }
                 
                    
                    apic_setipl(curr_intr_level);
                 dbg(DBG_THR,"PROCESS CURRENTLY EXECUTING: %s\n", curthr->kt_proc->p_comm);

                context_switch(&(old_thr->kt_ctx), &(curthr->kt_ctx));

        }
}
Beispiel #4
0
int
mmchs_send_cmd(uint32_t command, uint32_t arg)
{

	/* Read current interrupt status and fail it an interrupt is already
	 * asserted */

	/* Set arguments */
	write32(base_address + MMCHS_SD_ARG, arg);
	/* Set command */
	set32(base_address + MMCHS_SD_CMD, MMCHS_SD_CMD_MASK, command);

	if (intr_wait(MMCHS_SD_STAT_CC | MMCHS_SD_IE_TC_ENABLE_CLEAR)) {
		intr_assert(MMCHS_SD_STAT_CC);
		mmc_log_warn(&log, "Failure waiting for interrupt\n");
		return 1;
	}

	if ((command & MMCHS_SD_CMD_RSP_TYPE) ==
	    MMCHS_SD_CMD_RSP_TYPE_48B_BUSY) {
		/* 
		 * Command with busy response *CAN* also set the TC bit if they exit busy
		 */
		if ((read32(base_address + MMCHS_SD_STAT)
			& MMCHS_SD_IE_TC_ENABLE_ENABLE) == 0) {
			mmc_log_warn(&log, "TC should be raised\n");
		}
		write32(base_address + MMCHS_SD_STAT,
		    MMCHS_SD_IE_TC_ENABLE_CLEAR);

		if (intr_wait(MMCHS_SD_STAT_CC | MMCHS_SD_IE_TC_ENABLE_CLEAR)) {
			intr_assert(MMCHS_SD_STAT_CC);
			mmc_log_warn(&log, "Failure waiting for clear\n");
			return 1;
		}
	}
	intr_assert(MMCHS_SD_STAT_CC);
	return 0;
}
Beispiel #5
0
/*
 * In this function, you will be modifying the run queue, which can
 * also be modified from an interrupt context. In order for thread
 * contexts and interrupt contexts to play nicely, you need to mask
 * all interrupts before reading or modifying the run queue and
 * re-enable interrupts when you are done. This is analagous to
 * locking a mutex before modifying a data structure shared between
 * threads. Masking interrupts is accomplished by setting the IPL to
 * high.
 *
 * Once you have masked interrupts, you need to remove a thread from
 * the run queue and switch into its context from the currently
 * executing context.
 *
 * If there are no threads on the run queue (assuming you do not have
 * any bugs), then all kernel threads are waiting for an interrupt
 * (for example, when reading from a block device, a kernel thread
 * will wait while the block device seeks). You will need to re-enable
 * interrupts and wait for one to occur in the hopes that a thread
 * gets put on the run queue from the interrupt context.
 *
 * The proper way to do this is with the intr_wait call. See
 * interrupt.h for more details on intr_wait.
 *
 * Note: When waiting for an interrupt, don't forget to modify the
 * IPL. If the IPL of the currently executing thread masks the
 * interrupt you are waiting for, the interrupt will never happen, and
 * your run queue will remain empty. This is very subtle, but
 * _EXTREMELY_ important.
 *
 * Note: Don't forget to set curproc and curthr. When sched_switch
 * returns, a different thread should be executing than the thread
 * which was executing when sched_switch was called.
 *
 * Note: The IPL is process specific.
 */
void
sched_switch(void)
{
        //NOT_YET_IMPLEMENTED("PROCS: sched_switch");
        uint8_t orig_ipl = intr_getipl();
        intr_setipl(IPL_HIGH);
        while (sched_queue_empty(&kt_runq)) {
                intr_setipl(IPL_LOW);
                intr_wait();
                intr_setipl(IPL_HIGH);
        }
        kthread_t *thr = ktqueue_dequeue(&kt_runq);
        context_t *old_context = &(curthr->kt_ctx);
        context_t *new_context = &(thr->kt_ctx);
        curthr = thr;
        curproc = thr->kt_proc;
        context_switch(old_context, new_context);
        intr_setipl(orig_ipl);
}
Beispiel #6
0
/*
 * In this function, you will be modifying the run queue, which can
 * also be modified from an interrupt context. In order for thread
 * contexts and interrupt contexts to play nicely, you need to mask
 * all interrupts before reading or modifying the run queue and
 * re-enable interrupts when you are done. This is analagous to
 * locking a mutex before modifying a data structure shared between
 * threads. Masking interrupts is accomplished by setting the IPL to
 * high.
 *
 * Once you have masked interrupts, you need to remove a thread from
 * the run queue and switch into its context from the currently
 * executing context.
 *
 * If there are no threads on the run queue (assuming you do not have
 * any bugs), then all kernel threads are waiting for an interrupt
 * (for example, when reading from a block device, a kernel thread
 * will wait while the block device seeks). You will need to re-enable
 * interrupts and wait for one to occur in the hopes that a thread
 * gets put on the run queue from the interrupt context.
 *
 * The proper way to do this is with the intr_wait call. See
 * interrupt.h for more details on intr_wait.
 *
 * Note: When waiting for an interrupt, don't forget to modify the
 * IPL. If the IPL of the currently executing thread masks the
 * interrupt you are waiting for, the interrupt will never happen, and
 * your run queue will remain empty. This is very subtle, but
 * _EXTREMELY_ important.
 *
 * Note: Don't forget to set curproc and curthr. When sched_switch
 * returns, a different thread should be executing than the thread
 * which was executing when sched_switch was called.
 *
 * Note: The IPL is process specific.
 */
void
sched_switch(void)
{
    /* dbg(DBG_PRINT, "(In sched switch)\n");*/
    kthread_t *oldthr;
    int oldIPL;
    oldIPL=intr_getipl();
    intr_setipl(IPL_HIGH);
    while(sched_queue_empty(&kt_runq)) {
        intr_disable();
        intr_setipl(0);
        intr_wait();
        intr_setipl(IPL_HIGH);
    }
    oldthr=curthr;
    curthr=ktqueue_dequeue(&kt_runq);
    curproc = curthr->kt_proc;
    context_switch(&(oldthr->kt_ctx),&(curthr->kt_ctx));
    intr_setipl(oldIPL);
    dbg(DBG_PRINT, "(GRADING1C 1)\n");
    /*NOT_YET_IMPLEMENTED("PROCS: sched_switch");*/
}
Beispiel #7
0
/*
 * In this function, you will be modifying the run queue, which can
 * also be modified from an interrupt context. In order for thread
 * contexts and interrupt contexts to play nicely, you need to mask
 * all interrupts before reading or modifying the run queue and
 * re-enable interrupts when you are done. This is analagous to
 * locking a mutex before modifying a data structure shared between
 * threads. Masking interrupts is accomplished by setting the IPL to
 * high.
 *
 * Once you have masked interrupts, you need to remove a thread from
 * the run queue and switch into its context from the currently
 * executing context.
 *
 * If there are no threads on the run queue (assuming you do not have
 * any bugs), then all kernel threads are waiting for an interrupt
 * (for example, when reading from a block device, a kernel thread
 * will wait while the block device seeks). You will need to re-enable
 * interrupts and wait for one to occur in the hopes that a thread
 * gets put on the run queue from the interrupt context.
 *
 * The proper way to do this is with the intr_wait call. See
 * interrupt.h for more details on intr_wait.
 *
 * Note: When waiting for an interrupt, don't forget to modify the
 * IPL. If the IPL of the currently executing thread masks the
 * interrupt you are waiting for, the interrupt will never happen, and
 * your run queue will remain empty. This is very subtle, but
 * _EXTREMELY_ important.
 *
 * Note: Don't forget to set curproc and curthr. When sched_switch
 * returns, a different thread should be executing than the thread
 * which was executing when sched_switch was called.
 *
 * Note: The IPL is process specific.
 */
void
sched_switch(void)
{
        kthread_t *next_thr;

        /* Somewhere in here: set interrupts to protect run queue
            intr_setipl(IPL_LOW) or IPL_HIGH, in include/main/interrupt.h
        */
        uint8_t oldIPL = intr_getipl(); /* Check what currently running IPL is */
        intr_setipl(IPL_HIGH); /* Block all hardware interrupts */

        /* Enqueue requesting thread on run queue if still runnable
            (dead threads become unschedulable)
        */
        if (curthr->kt_state == KT_RUN) ktqueue_enqueue(&kt_runq, curthr);

        /* Pick a runnable thread. Take someone off the run queue. */

        /* If no threads on run queue, re-enable interrupts and wait for one to occur */
        if (sched_queue_empty(&kt_runq)) {
          intr_wait();
          /* Once this returns, there should be a process in the run queue */
        }

        /* Remove a thread from the run queue */
        next_thr = ktqueue_dequeue(&kt_runq);

        /* Manage curproc, curthr */
        kthread_t *old_thr = curthr;
        proc_t *old_proc = curproc;

        curthr = next_thr;
        curproc = next_thr->kt_proc;

        /* Switch context from old context to new context */
        context_switch(&old_thr->kt_ctx, &curthr->kt_ctx);

        /* NOT_YET_IMPLEMENTED("PROCS: sched_switch"); */
}
Beispiel #8
0
int
mmc_send_cmd(struct mmc_command *c)
{

	/* convert the command to a hsmmc command */
	int ret;
	uint32_t cmd, arg;
	uint32_t count;
	uint32_t value;
	cmd = MMCHS_SD_CMD_INDX_CMD(c->cmd);
	arg = c->args;

	switch (c->resp_type) {
	case RESP_LEN_48_CHK_BUSY:
		cmd |= MMCHS_SD_CMD_RSP_TYPE_48B_BUSY;
		break;
	case RESP_LEN_48:
		cmd |= MMCHS_SD_CMD_RSP_TYPE_48B;
		break;
	case RESP_LEN_136:
		cmd |= MMCHS_SD_CMD_RSP_TYPE_136B;
		break;
	case NO_RESPONSE:
		cmd |= MMCHS_SD_CMD_RSP_TYPE_NO_RESP;
		break;
	default:
		return 1;
	}

	/* read single block */
	if (c->cmd == MMC_READ_BLOCK_SINGLE) {
		cmd |= MMCHS_SD_CMD_DP_DATA;	/* Command with data transfer */
		cmd |= MMCHS_SD_CMD_MSBS_SINGLE;	/* single block */
		cmd |= MMCHS_SD_CMD_DDIR_READ;	/* read data from card */

	}

	/* write single block */
	if (c->cmd == MMC_WRITE_BLOCK_SINGLE) {
		cmd |= MMCHS_SD_CMD_DP_DATA;	/* Command with data transfer */
		cmd |= MMCHS_SD_CMD_MSBS_SINGLE;	/* single block */
		cmd |= MMCHS_SD_CMD_DDIR_WRITE;	/* write to the card */
	}

	/* check we are in a sane state */
	if ((read32(base_address + MMCHS_SD_STAT) & 0xffffu)) {
		mmc_log_warn(&log, "%s, interrupt already raised stat  %08x\n",
		    __FUNCTION__, read32(base_address + MMCHS_SD_STAT));
		write32(base_address + MMCHS_SD_STAT,
		    MMCHS_SD_IE_CC_ENABLE_CLEAR);
	}

	if (cmd & MMCHS_SD_CMD_DP_DATA) {
		if (cmd & MMCHS_SD_CMD_DDIR_READ) {
			/* if we are going to read enable the buffer ready
			 * interrupt */
			set32(base_address + MMCHS_SD_IE,
			    MMCHS_SD_IE_BRR_ENABLE,
			    MMCHS_SD_IE_BRR_ENABLE_ENABLE);
		} else {
			set32(base_address + MMCHS_SD_IE,
			    MMCHS_SD_IE_BWR_ENABLE,
			    MMCHS_SD_IE_BWR_ENABLE_ENABLE);
		}
	}

	set32(base_address + MMCHS_SD_BLK, MMCHS_SD_BLK_BLEN, 512);

	ret = mmchs_send_cmd(cmd, arg);

	/* copy response into cmd->resp */
	switch (c->resp_type) {
	case RESP_LEN_48_CHK_BUSY:
	case RESP_LEN_48:
		c->resp[0] = read32(base_address + MMCHS_SD_RSP10);
		break;
	case RESP_LEN_136:
		c->resp[0] = read32(base_address + MMCHS_SD_RSP10);
		c->resp[1] = read32(base_address + MMCHS_SD_RSP32);
		c->resp[2] = read32(base_address + MMCHS_SD_RSP54);
		c->resp[3] = read32(base_address + MMCHS_SD_RSP76);
		break;
	case NO_RESPONSE:
		break;
	default:
		return 1;
	}

	if (cmd & MMCHS_SD_CMD_DP_DATA) {
		count = 0;
		assert(c->data_len);
		if (cmd & MMCHS_SD_CMD_DDIR_READ) {
			if (intr_wait(MMCHS_SD_IE_BRR_ENABLE_ENABLE)) {
				intr_assert(MMCHS_SD_IE_BRR_ENABLE_ENABLE);
				mmc_log_warn(&log,
				    "Timeout waiting for interrupt\n");
				return 1;
			}

			if (!(read32(base_address +
				    MMCHS_SD_PSTATE) & MMCHS_SD_PSTATE_BRE_EN))
			{
				mmc_log_warn(&log,
				    "Problem BRE should be true\n");
				return 1;	/* We are not allowed to read
						 * data from the data buffer */
			}

			for (count = 0; count < c->data_len; count += 4) {
				value = read32(base_address + MMCHS_SD_DATA);
				c->data[count] = *((char *) &value);
				c->data[count + 1] = *((char *) &value + 1);
				c->data[count + 2] = *((char *) &value + 2);
				c->data[count + 3] = *((char *) &value + 3);
			}

			/* Wait for TC */
			if (intr_wait(MMCHS_SD_IE_TC_ENABLE_ENABLE)) {
				intr_assert(MMCHS_SD_IE_TC_ENABLE_ENABLE);
				mmc_log_warn(&log,
				    "Timeout waiting for interrupt\n");
				return 1;
			}

			write32(base_address + MMCHS_SD_STAT,
			    MMCHS_SD_IE_TC_ENABLE_CLEAR);

			/* clear and disable the bbr interrupt */
			write32(base_address + MMCHS_SD_STAT,
			    MMCHS_SD_IE_BRR_ENABLE_CLEAR);
			set32(base_address + MMCHS_SD_IE,
			    MMCHS_SD_IE_BRR_ENABLE,
			    MMCHS_SD_IE_BRR_ENABLE_DISABLE);
		} else {
			/* Wait for the MMCHS_SD_IE_BWR_ENABLE interrupt */
			if (intr_wait(MMCHS_SD_IE_BWR_ENABLE)) {
				intr_assert(MMCHS_SD_IE_BWR_ENABLE);
				mmc_log_warn(&log, "WFI failed\n");
				return 1;
			}
			/* clear the interrupt directly */
			intr_assert(MMCHS_SD_IE_BWR_ENABLE);

			if (!(read32(base_address +
				    MMCHS_SD_PSTATE) & MMCHS_SD_PSTATE_BWE_EN))
			{
				mmc_log_warn(&log,
				    "Error expected Buffer to be write enabled\n");
				return 1;	/* not ready to write data */
			}

			for (count = 0; count < 512; count += 4) {
				while (!(read32(base_address +
					    MMCHS_SD_PSTATE) &
					MMCHS_SD_PSTATE_BWE_EN)) {
					mmc_log_trace(&log,
					    "Error expected Buffer to be write enabled(%d)\n",
					    count);
				}
				*((char *) &value) = c->data[count];
				*((char *) &value + 1) = c->data[count + 1];
				*((char *) &value + 2) = c->data[count + 2];
				*((char *) &value + 3) = c->data[count + 3];
				write32(base_address + MMCHS_SD_DATA, value);
			}

			/* Wait for TC */
			if (intr_wait(MMCHS_SD_IE_TC_ENABLE_CLEAR)) {
				intr_assert(MMCHS_SD_IE_TC_ENABLE_CLEAR);
				mmc_log_warn(&log,
				    "(Write) Timeout waiting for transfer complete\n");
				return 1;
			}
			intr_assert(MMCHS_SD_IE_TC_ENABLE_CLEAR);
			set32(base_address + MMCHS_SD_IE,
			    MMCHS_SD_IE_BWR_ENABLE,
			    MMCHS_SD_IE_BWR_ENABLE_DISABLE);

		}
	}
	return ret;
}