/** * The default context needs to exist per ring that uses contexts. It stores the * context state of the GPU for applications that don't utilize HW contexts, as * well as an idle case. */ static int create_default_context(struct drm_i915_private *dev_priv) { struct i915_hw_context *ctx; int ret; DRM_LOCK_ASSERT(dev_priv->dev); ret = create_hw_context(dev_priv->dev, NULL, &ctx); if (ret != 0) return (ret); /* We may need to do things with the shrinker which require us to * immediately switch back to the default context. This can cause a * problem as pinning the default context also requires GTT space which * may not be available. To avoid this we always pin the * default context. */ dev_priv->rings[RCS].default_context = ctx; ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); if (ret) goto err_destroy; ret = do_switch(ctx); if (ret) goto err_unpin; DRM_DEBUG_DRIVER("Default HW context loaded\n"); return 0; err_unpin: i915_gem_object_unpin(ctx->obj); err_destroy: do_destroy(ctx); return ret; }
/** * i915_switch_context() - perform a GPU context switch. * @ring: ring for which we'll execute the context switch * @file_priv: file_priv associated with the context, may be NULL * @id: context id number * * The context life cycle is simple. The context refcount is incremented and * decremented by 1 and create and destroy. If the context is in use by the GPU, * it will have a refoucnt > 1. This allows us to destroy the context abstract * object while letting the normal object tracking destroy the backing BO. */ int i915_switch_context(struct intel_ring_buffer *ring, struct drm_file *file, int to_id) { struct drm_i915_private *dev_priv = ring->dev->dev_private; struct i915_hw_context *to; if (!HAS_HW_CONTEXTS(ring->dev)) return 0; WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); if (ring != &dev_priv->ring[RCS]) return 0; if (to_id == DEFAULT_CONTEXT_ID) { to = ring->default_context; } else { if (file == NULL) return -EINVAL; to = i915_gem_context_get(file->driver_priv, to_id); if (to == NULL) return -ENOENT; } return do_switch(to); }
void scheduler::local::remove(scheduler::thread * t) { INTL(); LOCK(_lock); switch (t->policy) { case scheduling_policy::top: _top.remove(t); return; case scheduling_policy::normal: _normal.remove(t); return; case scheduling_policy::background: _background.remove(t); return; default: ; } if (t == current_thread()) { do_switch(); } }
void q3(void) { ptr_proc1_stack=proc1_stack; ptr_proc2_stack=proc2_stack; vector[0]=print; vector[1]=readline; vector[2]=getarg; vector[3]=yield12; vector[4]=yield21; vector[5]=uexit; FILE* fp=NULL; fp=fopen("q3prog1","r"); if(fp==NULL){ perror("fopen q3prog1:"); exit(-1); }; fread(proc1,4091,1,fp); fclose(fp); fp=fopen("q3prog2","r"); if(fp==NULL){ perror("fopen q3prog2:"); exit(-1); }; fread(proc2,4096,1,fp); fclose(fp); ptr_proc1_stack=setup_stack(ptr_proc1_stack,proc1); ptr_proc2_stack=setup_stack(ptr_proc2_stack,proc2); do_switch(&main_stack,ptr_proc1_stack); /* load q3prog1 into process 1 and q3prog2 into process 2 */ /* then switch to process 1 */ }
/* 处理缺页中断 */ void do_page_fault(Ptr_PageTableItem ptr_pageTabIt) { unsigned int i; printf("产生缺页中断,开始进行调页...\n"); for (i = 0; i < PAGE_SUM; i++) { pageTable[i].count /= 2; if (pageTable + i == ptr_pageTabIt) { pageTable[i].count += 128; } } for (i = 0; i < BLOCK_SUM; i++) { if (!blockStatus[i]) { /* 读辅存内容,写入到实存 */ do_page_in(ptr_pageTabIt, i); /* 更新页表内容 */ ptr_pageTabIt->blockNum = i; ptr_pageTabIt->filled = TRUE; ptr_pageTabIt->edited = FALSE; blockStatus[i] = TRUE; return; } } /* 没有空闲物理块,进行页面替换 */ do_switch(ptr_pageTabIt); }
/* Switch to another slot if needed */ static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot) { struct cvm_mmc_host *host = slot->host; struct cvm_mmc_slot *old_slot; u64 emm_sample, emm_switch; if (slot->bus_id == host->last_slot) return; if (host->last_slot >= 0 && host->slot[host->last_slot]) { old_slot = host->slot[host->last_slot]; old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host)); old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host)); } writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host)); emm_switch = slot->cached_switch; set_bus_id(&emm_switch, slot->bus_id); do_switch(host, emm_switch); emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) | FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt); writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host)); host->last_slot = slot->bus_id; }
/** * The default context needs to exist per ring that uses contexts. It stores the * context state of the GPU for applications that don't utilize HW contexts, as * well as an idle case. */ static int create_default_context(struct drm_i915_private *dev_priv) { struct i915_hw_context *ctx; int ret; BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); ctx = create_hw_context(dev_priv->dev, NULL); if (IS_ERR(ctx)) return PTR_ERR(ctx); /* We may need to do things with the shrinker which require us to * immediately switch back to the default context. This can cause a * problem as pinning the default context also requires GTT space which * may not be available. To avoid this we always pin the * default context. */ dev_priv->ring[RCS].default_context = ctx; ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); if (ret) { do_destroy(ctx); return ret; } ret = do_switch(NULL, ctx, 0); if (ret) { i915_gem_object_unpin(ctx->obj); do_destroy(ctx); } else { DRM_DEBUG_DRIVER("Default HW context loaded\n"); } return ret; }
/** * i915_switch_context() - perform a GPU context switch. * @ring: ring for which we'll execute the context switch * @file_priv: file_priv associated with the context, may be NULL * @id: context id number * @seqno: sequence number by which the new context will be switched to * @flags: * * The context life cycle is simple. The context refcount is incremented and * decremented by 1 and create and destroy. If the context is in use by the GPU, * it will have a refoucnt > 1. This allows us to destroy the context abstract * object while letting the normal object tracking destroy the backing BO. */ int i915_switch_context(struct intel_ring_buffer *ring, struct drm_file *file, int to_id) { struct drm_i915_private *dev_priv = ring->dev->dev_private; struct i915_hw_context *to; if (dev_priv->hw_contexts_disabled) return 0; if (ring != &dev_priv->rings[RCS]) return 0; if (to_id == DEFAULT_CONTEXT_ID) { to = ring->default_context; } else { if (file == NULL) return -EINVAL; to = i915_gem_context_get(file->driver_priv, to_id); if (to == NULL) return -ENOENT; } return do_switch(to); }
/** * i915_switch_context() - perform a GPU context switch. * @ring: ring for which we'll execute the context switch * @file_priv: file_priv associated with the context, may be NULL * @id: context id number * @seqno: sequence number by which the new context will be switched to * @flags: * * The context life cycle is simple. The context refcount is incremented and * decremented by 1 and create and destroy. If the context is in use by the GPU, * it will have a refoucnt > 1. This allows us to destroy the context abstract * object while letting the normal object tracking destroy the backing BO. */ int i915_switch_context(struct intel_ring_buffer *ring, struct drm_file *file, int to_id) { struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_file_private *file_priv = NULL; struct i915_hw_context *to; struct drm_i915_gem_object *from_obj = ring->last_context_obj; if (dev_priv->hw_contexts_disabled) return 0; if (ring != &dev_priv->ring[RCS]) return 0; if (file) file_priv = file->driver_priv; if (to_id == DEFAULT_CONTEXT_ID) { to = ring->default_context; } else { to = i915_gem_context_get(file_priv, to_id); if (to == NULL) return -ENOENT; } if (from_obj == to->obj) return 0; return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring)); }
static void on_login_screen_activate (GtkMenuItem *item, ShellStatusMenu *status) { GdmUser *user; user = NULL; do_switch (status, user); }
void splx(int pl) { irq_disable(); CIPL=pl; //jeszcze nie ustawiaj masek if(pl==0 && wantSched) do_switch(); /* podmieniamy wątek wykonania ... jeżeli gdzie indziej też wywołujemy do_switch() to może wyjść nie tu, a gdzie indziej. Może też uruchomić nowy wątek. A chcemy, żeby działał on z CIPL=0. Niech ustawianie cipl na 0 zajdzie w środku.. */ else i8259a_reset_mask(); //CIPL już jest, tylko wyślij do PIC'a irq_enable(); }
static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot) { struct cvm_mmc_host *host = slot->host; u64 emm_switch, wdog; emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host)); emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 | MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2); set_bus_id(&emm_switch, slot->bus_id); wdog = readq(slot->host->base + MIO_EMM_WDOG(host)); do_switch(slot->host, emm_switch); slot->cached_switch = emm_switch; msleep(20); writeq(wdog, slot->host->base + MIO_EMM_WDOG(host)); }
static PyObject * Fiber_func_switch(Fiber *self, PyObject *args) { Fiber *current; PyObject *value = Py_None; if (!PyArg_ParseTuple(args, "|O:switch", &value)) { return NULL; } if (!CHECK_STATE) { return NULL; } current = _global_state.current; if (self == current) { PyErr_SetString(PyExc_FiberError, "cannot switch from a Fiber to itself"); return NULL; } if (self->stacklet_h == EMPTY_STACKLET_HANDLE) { PyErr_SetString(PyExc_FiberError, "Fiber has ended"); return NULL; } if (self->thread_h != current->thread_h) { PyErr_SetString(PyExc_FiberError, "cannot switch to a Fiber on a different thread"); return NULL; } if (self->stacklet_h == NULL && value != Py_None) { PyErr_SetString(PyExc_ValueError, "cannot specify a value when the Fiber wasn't started"); return NULL; } Py_INCREF(value); return do_switch(self, value); }
void q3(void) { vector[0] = &print; vector[1] = &readline; vector[2] = &getarg; vector[3] = &yield12; vector[4] = &yield21; vector[5] = &uexit; /* load q3prog1 into process 1 and q3prog2 into process 2 */ if (!readfile("q3prog1", proc1)) { return; } if (!readfile("q3prog2", proc2)) { return; } process1 = setup_stack(proc1_stack, proc1); process2 = setup_stack(proc2_stack, proc2); do_switch(&homework_stack, process1); }
void do_response() { Ptr_PageTableItem ptr_pageTabIt; unsigned int pageNum, offAddr; unsigned int actAddr; static int counter=10; if(--counter<=0) { div2(); counter=10; } if(ptr_memAccReq->reqType==REQUEST_SWITCH) { do_switch(); return; } /* 检查地址是否越界 */ if (ptr_memAccReq->virAddr < 0 || ptr_memAccReq->virAddr >= VIRTUAL_MEMORY_SIZE) { printf("error1\n"); do_error(ERROR_OVER_BOUNDARY); return; } /* 计算页号和页内偏移值 */ pageNum = ptr_memAccReq->virAddr / PAGE_SIZE; offAddr = ptr_memAccReq->virAddr % PAGE_SIZE; printf("页号为:%u\t页内偏移为:%u\n", pageNum, offAddr); /* 获取对应页表项 */ ptr_pageTabIt = (*pageTable)[pageNum/8]+pageNum%8;; /* 根据特征位决定是否产生缺页中断 */ if (!ptr_pageTabIt->filled) { do_page_fault(ptr_pageTabIt); } actAddr = ptr_pageTabIt->blockNum * PAGE_SIZE + offAddr; printf("实地址为:%u\n", actAddr); /* 检查页面访问权限并处理访存请求 */ switch (ptr_memAccReq->reqType) { case REQUEST_READ: //读请求 { ptr_pageTabIt->count++; aaaaaaccess(ptr_pageTabIt->blockNum); if (!(ptr_pageTabIt->proType & READABLE)) //页面不可读 { printf("error2\n"); do_error(ERROR_READ_DENY); return; } /* 读取实存中的内容 */ printf("读操作成功:值为%02X\n", actMem[actAddr]); break; } case REQUEST_WRITE: //写请求 { ptr_pageTabIt->count++; aaaaaaccess(ptr_pageTabIt->blockNum); if (!(ptr_pageTabIt->proType & WRITABLE)) //页面不可写 { printf("error3\n"); do_error(ERROR_WRITE_DENY); return; } /* 向实存中写入请求的内容 */ actMem[actAddr] = ptr_memAccReq->value; ptr_pageTabIt->edited = TRUE; printf("写操作成功\n"); break; } case REQUEST_EXECUTE: //执行请求 { ptr_pageTabIt->count++; aaaaaaccess(ptr_pageTabIt->blockNum); if (!(ptr_pageTabIt->proType & EXECUTABLE)) //页面不可执行 { printf("error4\n"); do_error(ERROR_EXECUTE_DENY); return; } printf("执行成功\n"); break; } default: //非法请求类型 { printf("error5\n"); do_error(ERROR_INVALID_REQUEST); return; } } }
void yield12(void) /* vector index = 3 */ { do_switch(&process1, process2); }
void yield21(void) /* vector index = 4 */ { do_switch(&process2, process1); }
void uexit(void) /* vector index = 5 */ { do_switch(&ptr_proc1_stack,main_stack); }
void yield21(void) /* vector index = 4 */ { /* Your code here */ do_switch(&ptr_proc2_stack,ptr_proc1_stack); }
void yield12(void) /* vector index = 3 */ { do_switch(&ptr_proc1_stack,ptr_proc2_stack); }
void uexit(void) /* vector index = 5 */ { do_switch(NULL, homework_stack); }
static PyObject * Fiber_func_throw(Fiber *self, PyObject *args) { Fiber *current; PyObject *typ, *val, *tb; val = tb = NULL; if (!PyArg_ParseTuple(args, "O|OO:throw", &typ, &val, &tb)) { return NULL; } /* First, check the traceback argument, replacing None, with NULL */ if (tb == Py_None) { tb = NULL; } else if (tb != NULL && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "throw() third argument must be a traceback object"); return NULL; } Py_INCREF(typ); Py_XINCREF(val); Py_XINCREF(tb); if (PyExceptionClass_Check(typ)) { PyErr_NormalizeException(&typ, &val, &tb); } else if (PyExceptionInstance_Check(typ)) { /* Raising an instance. The value should be a dummy. */ if (val && val != Py_None) { PyErr_SetString(PyExc_TypeError, "instance exceptions cannot have a separate value"); goto error; } else { /* Normalize to raise <class>, <instance> */ Py_XDECREF(val); val = typ; typ = PyExceptionInstance_Class(typ); Py_INCREF(typ); } } else { /* Not something you can raise. throw() fails. */ PyErr_Format(PyExc_TypeError, "exceptions must be classes, or instances, not %s", Py_TYPE(typ)->tp_name); goto error; } if (!CHECK_STATE) { goto error; } current = _global_state.current; if (self == current) { PyErr_SetString(PyExc_FiberError, "cannot throw from a Fiber to itself"); goto error; } if (self->stacklet_h == EMPTY_STACKLET_HANDLE) { PyErr_SetString(PyExc_FiberError, "Fiber has ended"); goto error; } if (self->thread_h != current->thread_h) { PyErr_SetString(PyExc_FiberError, "cannot switch to a Fiber on a different thread"); return NULL; } /* set error and do a switch with NULL as the value */ PyErr_Restore(typ, val, tb); return do_switch(self, NULL); error: /* Didn't use our arguments, so restore their original refcounts */ Py_DECREF(typ); Py_XDECREF(val); Py_XDECREF(tb); return NULL; }