int main(int argc, char *argv[]) { struct sigaltstack ss; int status; pid_t kid; void *buf; if ((buf = malloc(SIGSTKSZ)) == NULL) err(1, "malloc failed"); bzero(&ss, sizeof(ss)); ss.ss_sp = buf; ss.ss_size = SIGSTKSZ; if (sigaltstack(&ss, NULL) != 0) err(1, "failed to set sigaltstack"); check_stack(buf, "parent"); if ((kid = fork()) == -1) err(1, "fork failed"); if (kid == 0) { check_stack(buf, "child"); _exit(0); } if (waitpid(kid, &status, 0) != kid) err(1, "waitpid failed"); if (!WIFEXITED(status)) errx(1, "child did not exit normally"); return (WEXITSTATUS(status)); }
bool mt_task_queue::check_deps(gtask const & t) { check_stack("mt_task_queue::check_deps"); lean_always_assert(get_data(t)); buffer<gtask> deps; try { get_data(t)->m_imp->get_dependencies(deps); } catch (...) {} auto prio = get_prio(t); for (auto & dep : deps) { if (dep) { submit_core(dep, prio); bump_prio(dep, prio); } } for (auto & dep : deps) { if (!dep) continue; switch (get_state(dep).load()) { case task_state::Waiting: case task_state::Queued: case task_state::Running: lean_always_assert(get_imp(dep)); get_sched_info(dep).m_reverse_deps.push_back(t); return false; case task_state::Success: break; case task_state::Failed: break; default: lean_unreachable(); } } return true; }
bool VM::check_interrupts(CallFrame* call_frame, void* end) { // First, we might be here because someone reset the stack_limit_ so that // we'd fall into here to check interrupts even if the stack is fine, // // So fix up the stack_limit_ if thats the case first. // If this is true, stack_limit_ was just changed to get our attention, reset // it now. if(stack_limit_ == stack_start_) { reset_stack_limit(); } else { if(!check_stack(call_frame, end)) return false; } if(unlikely(check_local_interrupts)) { if(!process_async(call_frame)) return false; } // If the current thread is trying to step, debugger wise, then assist! if(thread_step()) { clear_thread_step(); if(!Helpers::yield_debugger(this, call_frame, Qnil)) return false; } return true; }
E_NANDERRORCODE Nand_ReadFlag(T_PNANDFLASH pNand, T_U32 nChip, T_U32 nBlock, T_U32 nPage, T_U8 *pAdd, T_U32 nAddLen) { E_NANDERRORCODE eRet = NF_SUCCESS; T_U32 nDevRet; T_NAND_DATA Data; T_NAND_ADDR Addr; T_U8 aAdd[NAND_MAX_ADD_LEN]; if ((nChip >= g_nChipCnt) || (nBlock >= g_nBlockPerChip) \ || (nPage >= g_nPagePerBlock) || (AK_NULL == pAdd)) { AK_DEBUG_OUTPUT("Reading Error Arg: Chip %ld, Block %ld, Page %ld .\n", nChip, nBlock, nPage); return NF_FAIL; } config_device_data(&Data, AK_NULL, aAdd, 1, 0, AREA_FSA); config_device_addr(&Addr, nChip, nBlock, nPage, bSmall ? 0 : Data.pEccCtrl->nMainSectCnt); check_stack(); nDevRet = m_pDevice->FunSet.read(m_pDevice, &Addr, &Data); copy_add(pAdd, aAdd, nAddLen); if (NAND_FAIL_MASK & nDevRet) { AK_DEBUG_OUTPUT("Read Flag Fail: Chip %ld, Block %ld Page %ld\n", nChip, nBlock, nPage); eRet = NF_FAIL; } return eRet; }
int main(int argc, char *argv[]) { int ret; ret = check_list(); if(ret) { fprintf(stderr, "LIST fails: %d\n", ret); return 1; } ret = check_stack(); if(ret) { fprintf(stderr, "STACK fails: %d\n", ret); return 2; } ret = check_queue(); if(ret) { fprintf(stderr, "QUEUE fails: %d\n", ret); return 3; } return 0; }
BlockListScanInfo(BlockList* blocks) : _info(new RegisterManager()), _had_call(false) { for (int n = 0; n < blocks->length(); n++) { BlockBegin* b = blocks->at(n); if (b->lir() != NULL) { traverse(b, b->lir()->instructions_list()); } // Registers may be used to hold the value // on the top of stack so check for that. check_stack(b->state()); check_stack(b->end()->state()); } if (_had_call) { for (int i = 0; i < FrameMap::nof_caller_save_cpu_regs; i++) { _info->lock(FrameMap::caller_save_cpu_reg_at(i)->as_rinfo()); } } }
bool VM::push_call_frame(STATE, CallFrame* frame, CallFrame*& previous_frame) { if(!check_stack(state, frame)) return false; previous_frame = call_frame_; frame->previous = call_frame_; call_frame_ = frame; return true; }
/*! * \param pmach machine en cours d'exécution * \param instr instruction en cours * \param addr adresse de l'instruction en cours */ bool pop(Machine *pmach, Instruction instr, unsigned addr) { check_immediate(instr,addr); unsigned int address = get_address(pmach, instr); check_data_addr(pmach, address, addr); ++pmach->_sp; check_stack(pmach, addr); pmach->_data[address] = pmach->_data[pmach->_sp]; return true; }
/*! * \param pmach machine en cours d'exécution * \param instr instruction en cours * \param addr adresse de l'instruction en cours */ bool call(Machine *pmach, Instruction instr, unsigned addr) { check_immediate(instr, addr); check_stack(pmach, addr); if (allowed_condition(pmach, instr, addr)) { pmach->_data[pmach->_sp--] = pmach->_pc; unsigned int address = get_address(pmach, instr); pmach->_pc = address; } return true; }
/*! * \param pmach machine en cours d'exécution * \param instr instruction en cours * \param addr adresse de l'instruction en cours */ bool push(Machine *pmach, Instruction instr, unsigned addr) { check_stack(pmach, addr); if (instr.instr_generic._immediate) { // Immediat pmach->_data[pmach->_sp--] = instr.instr_immediate._value; } else { unsigned int address = get_address(pmach, instr); check_data_addr(pmach, address, addr); pmach->_data[pmach->_sp--] = pmach->_data[address]; } return true; }
int AbstractStatement::exec(AbstractQoreNode **return_value, ExceptionSink *xsink) { printd(1, "AbstractStatement::exec() this: %p file: %s line: %d\n", this, loc.file, loc.start_line); QoreProgramLocationHelper l(loc); #ifdef QORE_MANAGE_STACK if (check_stack(xsink)) return 0; #endif pthread_testcancel(); QoreProgramBlockParseOptionHelper bh(pwo.parse_options); return execImpl(return_value, xsink); }
unsigned long check_stack_start(unsigned long *base) { unsigned long i; unsigned long from,to; to = check_stack(&from); *base = from; /* run from the stack pointer down to the base */ for (i = from; i<to; i+=4) { /* fill up the pattern */ *(long *)i = 0xdeadbeef; } /* printk("check_stack_start: from =%x to=%x data=%x\n",from,to,*(long *)(from+4));*/ return to; }
unsigned long check_stack_stop(unsigned long *base) { unsigned long i; unsigned long from,to; to = check_stack(&from); *base = from; /* run from the stack pointer down to the base */ for (i = from; i<to; i+=4) { /* check up the pattern */ if ((*(long *)i) != 0xdeadbeef) break; } /*printk("check_stack_stop: from =%x to=%x data=%x data=%x i=0x%x\n",from,to,*(long *)from,*(long *)(from+4),i);*/ /* return the first time when the pattern doesn't match */ return i; }
T_BOOL lock_valid_addr(T_U32 data, T_U32 nBufLen, T_BOOL bLock, T_BOOL bEverLocked) { T_S32 index; T_U8 ret = 0; if (bLock) { check_stack(); if (0 == data) { return AK_FALSE; } store_all_int(); //aligned with 4k index = remap_get_vaddrindex(data & ~(VPAGE_SIZE - 1)); if (-1 != index) { ret = remap_page_is_resv(index); } restore_all_int(); if (0 == ret) { remap_lock_page(data, nBufLen, AK_TRUE); return AK_TRUE; } } else { if (bEverLocked) { remap_lock_page(data, nBufLen, AK_FALSE); } } return AK_FALSE; }
bool CRegExp::lowParse(SRegInfo *re, SRegInfo *prev, int toParse) { int i, sv, wlen; bool leftenter = true; bool br = false; const String &pattern = *global_pattern; int action=-1; if (!re){ re = prev->parent; leftenter = false; }; while (true){ while(re || action!=-1){ if (re && action==-1) switch(re->op){ case ReEmpty: break; case ReBrackets: case ReNamedBrackets: if (leftenter){ re->s = toParse; re = re->un.param; leftenter = true; continue; }; if (re->param0 == -1) break; if (re->op == ReBrackets){ if (re->param0 || !startChange) matches->s[re->param0] = re->s; if (re->param0 || !endChange) matches->e[re->param0] = toParse; if (matches->e[re->param0] < matches->s[re->param0]) matches->s[re->param0] = matches->e[re->param0]; }else{ #ifndef NAMED_MATCHES_IN_HASH matches->ns[re->param0] = re->s; matches->ne[re->param0] = toParse; if (matches->ne[re->param0] < matches->ns[re->param0]) matches->ns[re->param0] = matches->ne[re->param0]; #else SMatch mt = { re->s, toParse }; namedMatches->setItem(re->namedata, mt); #endif }; break; case ReSymb: if (toParse >= end){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } if (ignoreCase){ if (Character::toLowerCase(pattern[toParse]) != Character::toLowerCase(re->un.symbol) && Character::toUpperCase(pattern[toParse]) != Character::toUpperCase(re->un.symbol)){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } }else if (pattern[toParse] != re->un.symbol){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } toParse++; break; case ReMetaSymb: if (!checkMetaSymbol(re->un.metaSymbol, toParse)){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } break; case ReWord: wlen = re->un.word->length(); if (toParse+wlen > end) { check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } if (ignoreCase){ if (!DString(&pattern, toParse, wlen).equalsIgnoreCase(re->un.word)){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } toParse += wlen; }else{ br = false; for(i = 0; i < wlen; i++){ if(pattern[toParse+i] != (*re->un.word)[i]){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); br = true; break; } }; if (br) continue; toParse += wlen; } break; case ReEnum: if (toParse >= end){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } if (!re->un.charclass->inClass(pattern[toParse])) { check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } toParse++; break; case ReNEnum: if (toParse >= end){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } if (re->un.charclass->inClass(pattern[toParse])){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } toParse++; break; #ifdef COLORERMODE case ReBkTrace: sv = re->param0; if (!backStr || !backTrace || sv == -1){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } br = false; for (i = backTrace->s[sv]; i < backTrace->e[sv]; i++){ if (toParse >= end || pattern[toParse] != (*backStr)[i]){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); br = true; break; } toParse++; }; if (br) continue; break; case ReBkTraceN: sv = re->param0; if (!backStr || !backTrace || sv == -1){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } br = false; for (i = backTrace->s[sv]; i < backTrace->e[sv]; i++){ if (toParse >= end || Character::toLowerCase(pattern[toParse]) != Character::toLowerCase((*backStr)[i])) { check_stack(false,&re,&prev,&toParse,&leftenter,&action); br = true; break; } toParse++; }; if (br) continue; break; case ReBkTraceName: #ifndef NAMED_MATCHES_IN_HASH sv = re->param0; if (!backStr || !backTrace || sv == -1) { check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } br = false; for (i = backTrace->ns[sv]; i < backTrace->ne[sv]; i++){ if (toParse >= end || pattern[toParse] != (*backStr)[i]) { check_stack(false,&re,&prev,&toParse,&leftenter,&action); br = true; break; } toParse++; }; if (br) continue; break; #else // !!!; { check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } #endif // NAMED_MATCHES_IN_HASH case ReBkTraceNName: #ifndef NAMED_MATCHES_IN_HASH sv = re->param0; if (!backStr || !backTrace || sv == -1) { check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } br = false; for (i = backTrace->s[sv]; i < backTrace->e[sv]; i++){ if (Character::toLowerCase(pattern[toParse]) != Character::toLowerCase((*backStr)[i]) || toParse >= end) { check_stack(false,&re,&prev,&toParse,&leftenter,&action); br = true; break; } toParse++; }; if (br) continue; break; #else // !!; { check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } #endif // NAMED_MATCHES_IN_HASH #endif // COLORERMODE case ReBkBrackName: #ifndef NAMED_MATCHES_IN_HASH sv = re->param0; if (sv == -1 || cnMatch <= sv) { check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } if (matches->ns[sv] == -1 || matches->ne[sv] == -1) { check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } br = false; for (i = matches->ns[sv]; i < matches->ne[sv]; i++){ if (toParse >= end || pattern[toParse] != pattern[i]) { check_stack(false,&re,&prev,&toParse,&leftenter,&action); br = true; break; } toParse++; }; if (br) continue; break; #else { SMatch *mt = namedMatches->getItem(re->namedata); if (!mt) { check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } if (mt->s == -1 || mt->e == -1) { check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } br = false; for (i = mt->s; i < mt->e; i++){ if (toParse >= end || pattern[toParse] != pattern[i]){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); br = true; break; } toParse++; }; if (br) continue; }; break; #endif // NAMED_MATCHES_IN_HASH case ReBkBrack: sv = re->param0; if (sv == -1 || cMatch <= sv){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } if (matches->s[sv] == -1 || matches->e[sv] == -1){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } br = false; for (i = matches->s[sv]; i < matches->e[sv]; i++){ if (toParse >= end || pattern[toParse] != pattern[i]){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); br = true; break; } toParse++; }; if (br) continue; break; case ReAhead: if (!leftenter){ check_stack(true,&re,&prev,&toParse,&leftenter,&action); continue; } { insert_stack(&re,&prev,&toParse,&leftenter,rea_Break,rea_False,&re->un.param,0,toParse); continue; } case ReNAhead: if (!leftenter){ check_stack(true,&re,&prev,&toParse,&leftenter,&action); continue; } { insert_stack(&re,&prev,&toParse,&leftenter,rea_False,rea_Break,&re->un.param,0,toParse); continue; } case ReBehind: if (!leftenter){ check_stack(true,&re,&prev,&toParse,&leftenter,&action); continue; } if (toParse - re->param0 < 0) { check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } else{ insert_stack(&re,&prev,&toParse,&leftenter,rea_Break,rea_False,&re->un.param,0,toParse - re->param0); continue; } case ReNBehind: if (!leftenter){ check_stack(true,&re,&prev,&toParse,&leftenter,&action); continue; } if (toParse - re->param0 >= 0){ insert_stack(&re,&prev,&toParse,&leftenter,rea_False,rea_Break,&re->un.param,0,toParse - re->param0); continue; } break; case ReOr: if (!leftenter){ while (re->next) re = re->next; break; }; { insert_stack(&re,&prev,&toParse,&leftenter,rea_True,rea_Break,&re->un.param,0,toParse ); continue; } case ReRangeN: // first enter into op if (leftenter){ re->param0 = re->s; re->oldParse = -1; }; if (!re->param0 && re->oldParse == toParse) break; re->oldParse = toParse; // making branch if (!re->param0){ insert_stack(&re,&prev,&toParse,&leftenter,rea_True,rea_RangeN_step2,&re->un.param,0,toParse ); continue; }; // go into if (re->param0) re->param0--; re = re->un.param; leftenter = true; continue; case ReRangeNM: if (leftenter){ re->param0 = re->s; re->param1 = re->e - re->s; re->oldParse = -1; }; if (!re->param0){ if (re->param1) re->param1--; else{ insert_stack(&re,&prev,&toParse,&leftenter,rea_True,rea_False,&re->next,&re,toParse ); continue; } { insert_stack(&re,&prev,&toParse,&leftenter,rea_True,rea_RangeNM_step2,&re->un.param,0,toParse ); continue; } }; if (re->param0) re->param0--; re = re->un.param; leftenter = true; continue; case ReNGRangeN: if (leftenter){ re->param0 = re->s; re->oldParse = -1; }; if (!re->param0 && re->oldParse == toParse) break; re->oldParse = toParse; if (!re->param0){ insert_stack(&re,&prev,&toParse,&leftenter,rea_True,rea_NGRangeN_step2,&re->next,&re,toParse ); continue; } if (re->param0) re->param0--; re = re->un.param; leftenter = true; continue; case ReNGRangeNM: if (leftenter){ re->param0 = re->s; re->param1 = re->e - re->s; re->oldParse = -1; }; if (!re->param0){ if (re->param1) re->param1--; else { insert_stack(&re,&prev,&toParse,&leftenter,rea_True,rea_False,&re->next,&re,toParse ); continue; } { insert_stack(&re,&prev,&toParse,&leftenter,rea_True,rea_NGRangeNM_step2,&re->next,&re,toParse ); continue; } }; if (re->param0) re->param0--; re = re->un.param; leftenter = true; continue; default: break; }; switch (action){ case rea_False: if (count_elem){ check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; }else return false; case rea_True: if (count_elem){ check_stack(true,&re,&prev,&toParse,&leftenter,&action); continue; }else return true; case rea_Break: action = -1; break; case rea_RangeN_step2: action = -1; insert_stack(&re,&prev,&toParse,&leftenter,rea_True,rea_False,&re->next,&re,toParse); continue; case rea_RangeNM_step2: action = -1; insert_stack(&re,&prev,&toParse,&leftenter,rea_True,rea_RangeNM_step3,&re->next,&re,toParse); continue; case rea_RangeNM_step3: action = -1; re->param1++; check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; case rea_NGRangeN_step2: action = -1; if (re->param0) re->param0--; re = re->un.param; leftenter = true; continue; case rea_NGRangeNM_step2: action = -1; insert_stack(&re,&prev,&toParse,&leftenter,rea_True,rea_NGRangeNM_step3,&re->un.param,0,toParse); continue; case rea_NGRangeNM_step3: action = -1; re->param1++; check_stack(false,&re,&prev,&toParse,&leftenter,&action); continue; } if (!re->next){ re = re->parent; leftenter = false; }else{ re = re->next; leftenter = true; }; }; check_stack(true,&re,&prev,&toParse,&leftenter,&action); } };
/* 1D: process from packet0 -> packet0 */ static void pdp_ca_process_ca_1D(t_pdp_ca *x) { t_pdp *header = pdp_packet_header(x->x_packet0); uint32_t *data = (uint32_t *)pdp_packet_data (x->x_packet0); int width = pdp_type_ca_info(header)->width; int height = pdp_type_ca_info(header)->height; int i; uint32_t saved; /* load TOS in middle of buffer to limit the effect of stack errors */ uint32_t *tos = &x->x_data->stack[2*(PDP_CA_STACKSIZE/2)]; uint32_t *env = &x->x_data->env[0]; uint32_t *reg = &x->x_data->reg[0]; void *ca_routine = x->x_ca_routine; uint32_t rtos; /* double word width: number of uint32_ts per row */ int dwwidth = width >> 5; int currow = pdp_type_ca_info(header)->currow; unsigned long long result = 0; unsigned short temp; unsigned short *usdata; /* set destination row to 4th row from top (ca time horizon is 3 deep) */ int dwrow0 = (((currow + height - 3) % height) * width) >> 5; int dwrow1 = (((currow + height - 2) % height) * width) >> 5; int dwrow2 = (((currow + height - 1) % height) * width) >> 5; int dwrow3 = (currow * width) >> 5; /* exit if there isn't a valid routine */ if(!ca_routine) return; /* compute new row */ for(i=0; i < (dwwidth-1) ; i+=1){ env[0] = data[dwrow0 + i]; env[1] = data[dwrow0 + i + 1]; env[2] = data[dwrow1 + i]; env[3] = data[dwrow1 + i + 1]; env[4] = data[dwrow2 + i]; env[5] = data[dwrow2 + i + 1]; result = scaf_feeder(tos, reg, ca_routine, env); data[dwrow3 + i] = result & 0xffffffff; } // i == dwwidth-1 /* compute last column in row */ env[0] = data[dwrow0 + i]; env[1] = data[dwrow0]; env[2] = data[dwrow1 + i]; env[3] = data[dwrow1]; env[4] = data[dwrow2 + i]; env[5] = data[dwrow2]; result = scaf_feeder(tos, reg, ca_routine, env); data[dwrow3 + i] = result & 0xffffffff; /* undo the shift */ usdata = (unsigned short *)(&data[dwrow3]); temp = usdata[(dwwidth*2)-1]; for (i = (dwwidth*2 - 1); i > 0; i--){ usdata[i] = usdata[i-1]; } usdata[0] = temp; check_stack(x, tos, env); /* save current row */ pdp_type_ca_info(header)->currow = (currow + 1) % height; }
bool tetris::loop(){ char c=0x00; int state=0; while(Serial.available()>0){ _delay_ms(5); if(state==0){ //pOLED->string(0,"0",0,0); if(Serial.read()==MESSAGE_START){ state=1; } else { state=0; } } else if(state==1){ //pOLED->string(0,"1",0,0); if(Serial.read()==0x01){ state=2; } else { state=0; } } else if(state==2){ //pOLED->string(0,"2",0,0); if(Serial.read()==0x00){ state=3; } else { state=0; } } else if(state==3){ //pOLED->string(0,"3",0,0); if(Serial.read()==0x01){ state=4; } else { state=0; } } else if(state==4){ //pOLED->string(0,"4",0,0); if(Serial.read()==TOKEN){ state=5; } else { state=0; } } else if(state==5){ //pOLED->string(0,"5",0,0); char d=Serial.read(); if(d==CMD_GO_LEFT){ //pOLED->string(0,"6",0,0); c=1; state=6; } else if(d==CMD_GO_RIGHT){ //pOLED->string(0,"6",0,0); c=2; state=6; } else if(d==CMD_GO_UP){ //pOLED->string(0,"6",0,0); c=3; state=6; } else if(d==CMD_GO_DOWN){ //pOLED->string(0,"6",0,0); c=4; state=6; } else { state=0; c=0; } } else if(state==6){ //pOLED->string(0,"7",0,0); state=0; Serial.flush(); break; } // left // right // up // down }; // check if you are stil in the race if(you_loose){ // Nope, you loose show the box once if(pSpeedo->disp_zeile_bak[0]!=123){ // show a animation depending on line count int ani=1; // simpsons if(lines>50){ ani=4; // JTM } else if(lines>30){ ani=5; } // 1sec geben damit der user realisiert // dann sicher sein das er keine taste // mehr drückt _delay_ms(1000); while(Serial.available()>0 || !(PINJ & (1<<menu_button_links)) || !(PINJ & (1<<menu_button_rechts)) || !(PINJ & (1<<menu_button_oben)) || !(PINJ & (1<<menu_button_unten)) ){ Serial.flush(); _delay_ms(50); } pOLED->animation(ani); initDrawField(); // draw the field again, to show the line and level counter // make sure to draw this box only once pSpeedo->disp_zeile_bak[0]=123; // draw box pOLED->highlight_bar(8,8,104,48); pOLED->string_P(pSpeedo->default_font,PSTR("You loose"),5,2,15,0,0); pOLED->string_P(pSpeedo->default_font,PSTR("L Quit"),5,4,15,0,0); char temp[2]; sprintf(temp,"%c",126); pOLED->string(pSpeedo->default_font,temp,5,4,15,0,0); pOLED->string_P(pSpeedo->default_font,PSTR("R Retry"),5,5,15,0,0); sprintf(temp,"%c",127); pOLED->string(pSpeedo->default_font,temp,5,5,15,0,0); // way at least one second to prevent unnoticed button push _delay_ms(1000); // if you loose and the box has been drawn, way on key down } else { if(c==1 || !(PINJ & (1<<menu_button_links))){ _delay_ms(MIN_SIDE_PUSH_TIME); return false; } else if (c==2 || !(PINJ & (1<<menu_button_rechts))){ _delay_ms(MIN_SIDE_PUSH_TIME); pOLED->clear_screen(); // ja das ist ungeschickt, init leert uns die line variable, drawfield malt das hin, das problem ist nur // das wir mit drawfield die in init gezeichneten "next" übermalen .. also einfach 2x init .. is ja wurst init(); initDrawField(); init(); updateField(); }; } // nope you haven't lost by now .. go on } else { ////////////////// if there is a button pushed, ////////////////// if(!(PINJ & (1<<menu_button_links))){ c=1; _delay_ms(MIN_SIDE_PUSH_TIME); } else if(!(PINJ & (1<<menu_button_oben))){ c=3; _delay_ms(MIN_TURN_PUSH_TIME); } else if(!(PINJ & (1<<menu_button_unten))){ c=4; _delay_ms(MIN_DOWN_PUSH_TIME); } else if(!(PINJ & (1<<menu_button_rechts))){ c=2; _delay_ms(MIN_SIDE_PUSH_TIME); } // now lets see if any action is required ////////////////// rotate element ////////////////// if(c==3){ // element drehen bool copy[3][3]; copy[0][0]=active_element[0][2]; // 00 10 20 02 01 00 copy[1][0]=active_element[0][1]; // 01 11 21 ==> 12 11 10 copy[2][0]=active_element[0][0]; // 02 12 22 22 21 20 copy[0][1]=active_element[1][2]; copy[1][1]=active_element[1][1]; copy[2][1]=active_element[1][0]; copy[0][2]=active_element[2][2]; copy[1][2]=active_element[2][1]; copy[2][2]=active_element[2][0]; // check if a rotation would result in a collision bool rotate_possible=true; bool check_running=true; for(int y=0;y<3 && check_running;y++){ for(int x=0;x<3 && check_running;x++){ // check if in the rotated figure the px is in use if(copy[x][y]){ // if so, check if it is already in use by the area if(area[active_y+y] & 1<<(COLS-active_x-x)){ // if so, rotation is impossible rotate_possible=false; check_running=false; }; }; }; }; // if rotation is possible, move the content from copy -> active_element if(rotate_possible){ for(int x=0;x<3;x++){ for(int y=0;y<3;y++){ active_element[x][y]=copy[x][y]; } }; // element drehen updateField(); } ////////////////// move element down ////////////////// } else if(c==4){ if(pSpeedo->disp_zeile_bak[1]!=111){ active_y++; }; updateField(); check_stack(); last_update=millis(); ////////////////// move element left ////////////////// } else if(c==1){ // find out the leftmost positions short most_left[3]; for(int y=0;y<3;y++){ most_left[y]=-1; for(int x=0;x<3;x++){ if(active_element[x][y]){ most_left[y]=x; break; } } } // check collisions if(!((most_left[0]!=-1 && (area[active_y+0] & (1<<(COLS-active_x+1-most_left[0])))) || (most_left[1]!=-1 && (area[active_y+1] & (1<<(COLS-active_x+1-most_left[1])))) || (most_left[2]!=-1 && (area[active_y+2] & (1<<(COLS-active_x+1-most_left[2])))))){ active_x--; }; if(active_x<0){ active_x=0; if(!active_element[0][0] && !active_element[0][1] && !active_element[0][2]){ // 1. reihe inhalt der 0. reihe active_element[0][0]=active_element[1][0]; active_element[0][1]=active_element[1][1]; active_element[0][2]=active_element[1][2]; // 2.reihe mit dem inhalt der 1. reihe active_element[1][0]=active_element[2][0]; active_element[1][1]=active_element[2][1]; active_element[1][2]=active_element[2][2]; // 0. Reihe nullen active_element[2][0]=false; active_element[2][1]=false; active_element[2][2]=false; } }; updateField(); ////////////////// move element right ////////////////// } else if(c==2){ // find out the leftmost positions short most_right[3]; for(int y=0;y<3;y++){ most_right[y]=-1; for(int x=2;x>=0;x--){ if(active_element[x][y]){ most_right[y]=x; break; } } } // check collisions if(!((most_right[0]!=-1 && (area[active_y+0] & (1<<(COLS-active_x-1-most_right[0])))) || (most_right[1]!=-1 && (area[active_y+1] & (1<<(COLS-active_x-1-most_right[1])))) || (most_right[2]!=-1 && (area[active_y+2] & (1<<(COLS-active_x-1-most_right[2])))))){ active_x++; }; if(active_x>9){ active_x=9; // wenn wir eigentlich ganz nach rechts wollten, dann versuch dochmal // den inhalt weiter nach rechts zu schieben if(!active_element[2][0] && !active_element[2][1] && !active_element[2][2]){ // 2.reihe mit dem inhalt der 1. reihe active_element[2][0]=active_element[1][0]; active_element[2][1]=active_element[1][1]; active_element[2][2]=active_element[1][2]; // 1. reihe inhalt der 0. reihe active_element[1][0]=active_element[0][0]; active_element[1][1]=active_element[0][1]; active_element[1][2]=active_element[0][2]; // 0. Reihe nullen active_element[0][0]=false; active_element[0][1]=false; active_element[0][2]=false; } } updateField(); } ////////////////// auto move down ////////////////// // einen tiefer setzen nach ablauf von zeit if(last_update+time_between_steps<millis()){ check_stack(); // erst checken, dann verschieben active_y++; updateField(); last_update=millis(); // 100ms nach dem automatisch runtersetzen checken obs kollisionen gibt }; }; return true; };
/* 2D: process from packet0 -> packet1 */ static void pdp_ca_process_ca_2D(t_pdp_ca *x) { t_pdp *header0 = pdp_packet_header(x->x_packet0); t_pdp *header1 = pdp_packet_header(x->x_packet1); uint32_t *data0 = (uint32_t *)pdp_packet_data (x->x_packet0); uint32_t *data1 = (uint32_t *)pdp_packet_data (x->x_packet1); int width = pdp_type_ca_info(header0)->width; int height = pdp_type_ca_info(header0)->height; int i,j; /* load TOS in middle of buffer to limit the effect of stack errors */ uint32_t *tos = &x->x_data->stack[2*(PDP_CA_STACKSIZE/2)]; uint32_t *env = &x->x_data->env[0]; uint32_t *reg = &x->x_data->reg[0]; void *ca_routine = x->x_ca_routine; uint32_t rtos; int offset = pdp_type_ca_info(header0)->offset; int xoffset = offset % width; int yoffset = offset / width; /* double word width: number of uint32_ts per row */ int dwwidth = width >> 5; unsigned long long result = 0; /* exit if there isn't a valid routine */ if(!ca_routine) return; if(!header0) return; if(!header1) return; //post("pdp_ca: PRE offset: %d, xoffset: %d, yoffset: %d", offset, xoffset, yoffset); /* calculate new offset: lines shift up, rows shift left by 16 cells */ xoffset = (xoffset + width - 16) % width; yoffset = (yoffset + height - 1) % height; offset = yoffset * width + xoffset; //post("pdp_ca: PST offset: %d, xoffset: %d, yoffset: %d", offset, xoffset, yoffset); pdp_type_ca_info(header1)->offset = offset; for(j=0; j<dwwidth*(height - 2); j+=(dwwidth<<1)){ for(i=0; i < (dwwidth-1) ; i+=1){ env[0] = data0[i + j]; env[1] = data0[i + j + 1]; env[2] = data0[i + j + dwwidth]; env[3] = data0[i + j + dwwidth + 1]; env[4] = data0[i + j + (dwwidth<<1)]; env[5] = data0[i + j + (dwwidth<<1) + 1]; env[6] = data0[i + j + (dwwidth<<1) + dwwidth]; env[7] = data0[i + j + (dwwidth<<1) + dwwidth + 1]; result = scaf_feeder(tos, reg, ca_routine, env); data1[i + j] = result & 0xffffffff; data1[i + j + dwwidth] = result >> 32; } // i == dwwidth-1 env[0] = data0[i + j]; env[1] = data0[j]; env[2] = data0[i + j + dwwidth]; env[3] = data0[j + dwwidth]; env[4] = data0[i + j + (dwwidth<<1)]; env[5] = data0[j + (dwwidth<<1)]; env[6] = data0[i + j + (dwwidth<<1) + dwwidth]; env[7] = data0[j + (dwwidth<<1) + dwwidth]; result = scaf_feeder(tos, reg, ca_routine, env); data1[i + j] = result & 0xffffffff; data1[i + j + dwwidth] = result >> 32; } // j == dwwidth*(height - 2) for(i=0; i < (dwwidth-1) ; i+=1){ env[0] = data0[i + j]; env[1] = data0[i + j + 1]; env[2] = data0[i + j + dwwidth]; env[3] = data0[i + j + dwwidth + 1]; env[4] = data0[i]; env[5] = data0[i + 1]; env[6] = data0[i + dwwidth]; env[7] = data0[i + dwwidth + 1]; result = scaf_feeder(tos, reg, ca_routine, env); data1[i + j] = result & 0xffffffff; data1[i + j + dwwidth] = result >> 32; } // j == dwwidth*(height - 2) // i == dwwidth-1 env[0] = data0[i + j]; env[1] = data0[j]; env[2] = data0[i + j + dwwidth]; env[3] = data0[j + dwwidth]; env[4] = data0[i]; env[5] = data0[0]; env[6] = data0[i + dwwidth]; env[7] = data0[dwwidth]; result = scaf_feeder(tos, reg, ca_routine, env); data1[i + j] = result & 0xffffffff; data1[i + j + dwwidth] = result >> 32; check_stack(x, tos, env); return; }
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long vector, int write_acc) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; siginfo_t info; int fault; check_stack(NULL, __FILE__, __FUNCTION__, __LINE__); tsk = current; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. * * NOTE2: This is done so that, when updating the vmalloc * mappings we don't have to walk all processes pgdirs and * add the high mappings all at once. Instead we do it as they * are used. However vmalloc'ed page entries have the PAGE_GLOBAL * bit set so sometimes the TLB can use a lingering entry. * * This verifies that the fault happens in kernel space * and that the fault was not a protection error. */ D(phx_mmu("dpf :: addr %x, vect %x, write %x, regs %x, user %x\n", address, vector, write_acc, regs, user_mode(regs))); if (address >= VMALLOC_START && (vector != 0x300 && vector != 0x400) && !user_mode(regs)) goto vmalloc_fault; /* we can and should enable interrupts at this point */ local_irq_enable(); mm = tsk->mm; info.si_code = SEGV_MAPERR; /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ if (in_interrupt() || !mm) goto no_context; down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (user_mode(regs)) { /* * accessing the stack below usp is always a bug. * we get page-aligned addresses so we can only check * if we're within a page from usp, but that might be * enough to catch brutal errors at least. */ if (address + PAGE_SIZE < regs->sp) goto bad_area; } if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: info.si_code = SEGV_ACCERR; /* first do some preliminary protection checks */ if (write_acc) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { /* not present */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } /* are we trying to execute nonexecutable area */ if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC)) goto bad_area; /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(mm, vma, address, write_acc); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); }/*RGD modeled on Cris*/ if (fault & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up_read(&mm->mmap_sem); bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { printk("USERSPACE: SIGSEGV (current %p, pid %d)\n", current, current->pid); info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ info.si_addr = (void *)address; force_sig_info(SIGSEGV, &info, tsk); DPG(show_regs(regs)); __asm__ __volatile__("l.nop 1"); return; } // DPG(show_regs(regs)); no_context: /* Are we prepared to handle this kernel fault? * * (The kernel has valid exception-points in the source * when it acesses user-memory. When it fails in one * of those points, we find it in a table and do a jump * to some fixup code that loads an appropriate error * code) */ { const struct exception_table_entry *entry; __asm__ __volatile__("l.nop 42"); // phx_mmu("search exception table"); if ((entry = search_exception_tables(regs->pc)) != NULL) { /* Adjust the instruction pointer in the stackframe */ // phx_mmu("kernel: doing fixup at EPC=0x%lx to 0x%lx\n", regs->pc, fixup); regs->pc = entry->fixup; return; } } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ if ((unsigned long) (address) < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); else printk(KERN_ALERT "Unable to handle kernel access"); printk(" at virtual address 0x%08lx\n",address); die("Oops", regs, write_acc); do_exit(SIGKILL); /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: __asm__ __volatile__("l.nop 42"); __asm__ __volatile__("l.nop 1"); up_read(&mm->mmap_sem); printk("VM: killing process %s\n", tsk->comm); if (user_mode(regs)) do_exit(SIGKILL); goto no_context; do_sigbus: up_read(&mm->mmap_sem); /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void *)address; force_sig_info(SIGBUS, &info, tsk); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) goto no_context; return; vmalloc_fault: { /* * Synchronize this task's top level page-table * with the 'reference' page table. * * Use current_pgd instead of tsk->active_mm->pgd * since the latter might be unavailable if this * code is executed in a misfortunately run irq * (like inside schedule() between switch_mm and * switch_to...). */ int offset = pgd_index(address); pgd_t *pgd, *pgd_k; pmd_t *pmd, *pmd_k; pte_t *pte_k; phx_warn("do_page_fault(): vmalloc_fault will not work, " "since current_pgd assign a proper value somewhere\n" "anyhow we don't need this at the moment\n"); phx_mmu("vmalloc_fault"); pgd = (pgd_t *)current_pgd + offset; pgd_k = init_mm.pgd + offset; /* Since we're two-level, we don't need to do both * set_pgd and set_pmd (they do the same thing). If * we go three-level at some point, do the right thing * with pgd_present and set_pgd here. * * Also, since the vmalloc area is global, we don't * need to copy individual PTE's, it is enough to * copy the pgd pointer into the pte page of the * root task. If that is there, we'll find our pte if * it exists. */ pmd = pmd_offset(pgd, address); pmd_k = pmd_offset(pgd_k, address); if (!pmd_present(*pmd_k)) goto bad_area_nosemaphore; set_pmd(pmd, *pmd_k); /* Make sure the actual PTE exists as well to * catch kernel vmalloc-area accesses to non-mapped * addresses. If we don't do this, this will just * silently loop forever. */ pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) goto no_context; return; } }
void run(char *s) { int i, n; /*if (strncmp(s, "selftest", 8) == 0) { selftest(); return; }*/ if (setjmp(stop_return)) return; init(); while (1) { n = scan(s); p1 = pop(); check_stack(); if (n == 0) break; // if debug mode then print the source text if (equaln(get_binding(symbol(TRACE)), 1)) { for (i = 0; i < n; i++) if (s[i] != '\r') printchar(s[i]); if (s[n - 1] != '\n') // n is not zero, see above printchar('\n'); } s += n; push(p1); top_level_eval(); p2 = pop(); check_stack(); if (p2 == symbol(NIL)) continue; // print string w/o quotes if (isstr(p2)) { printstr(p2->u.str); printstr("\n"); continue; } if (equaln(get_binding(symbol(TTY)), 1) || test_flag) // tty mode? printline(p2); else { //#ifdef LINUX display(p2); /*#else push(p2); cmdisplay(); #endif*/ } } }
static void f_create_process( INT32 args ) { struct perishables storage; struct array *cmd = 0; struct mapping *optional = 0; struct svalue *tmp; int e; int stds[3]; int *fds; int num_fds = 3; int wanted_gid=0, wanted_uid=0; int gid_request=0, uid_request=0; char *tmp_cwd = NULL; pid_t pid=-2; extern char **environ; fds = stds; storage.env = NULL; storage.argv = NULL; storage.disabled = 0; storage.fds = NULL; storage.limits = NULL; check_all_args("create_process",args, BIT_ARRAY, BIT_MAPPING | BIT_VOID, 0); switch(args) { default: optional=Pike_sp[1-args].u.mapping; mapping_fix_type_field(optional); if(m_ind_types(optional) & ~BIT_STRING) Pike_error("Bad index type in argument 2 to Caudium.create_process()\n"); case 1: cmd=Pike_sp[-args].u.array; if(cmd->size < 1) Pike_error("Too few elements in argument array.\n"); for(e=0; e<cmd->size; e++) if(ITEM(cmd)[e].type!=T_STRING) Pike_error("Argument is not a string.\n"); array_fix_type_field(cmd); if(cmd->type_field & ~BIT_STRING) Pike_error("Bad argument 1 to Caudium.create_process().\n"); } if (optional) { if ((tmp = simple_mapping_string_lookup(optional, "gid"))) { switch(tmp->type) { case T_INT: wanted_gid = tmp->u.integer; gid_request = 1; break; default: Pike_error("Invalid argument for gid."); break; } } if ((tmp = simple_mapping_string_lookup(optional, "uid"))) { switch(tmp->type) { case T_INT: wanted_uid = tmp->u.integer; uid_request = 1; break; default: Pike_error("Invalid argument for uid."); break; } } if((tmp = simple_mapping_string_lookup( optional, "cwd" )) && tmp->type == T_STRING && !tmp->u.string->size_shift) tmp_cwd = tmp->u.string->str; if((tmp = simple_mapping_string_lookup( optional, "stdin" )) && tmp->type == T_OBJECT) { fds[0] = fd_from_object( tmp->u.object ); if(fds[0] == -1) Pike_error("Invalid stdin file\n"); } if((tmp = simple_mapping_string_lookup( optional, "stdout" )) && tmp->type == T_OBJECT) { fds[1] = fd_from_object( tmp->u.object ); if(fds[1] == -1) Pike_error("Invalid stdout file\n"); } if((tmp = simple_mapping_string_lookup( optional, "stderr" )) && tmp->type == T_OBJECT) { fds[2] = fd_from_object( tmp->u.object ); if(fds[2] == -1) Pike_error("Invalid stderr file\n"); } if((tmp=simple_mapping_string_lookup(optional, "rlimit"))) { struct svalue *tmp2; if(tmp->type != T_MAPPING) Pike_error("Wrong type of argument for the 'rusage' option. " "Should be mapping.\n"); #define ADD_LIMIT(X,Y,Z) internal_add_limit(&storage,X,Y,Z); #ifdef RLIMIT_NPROC if((tmp2=simple_mapping_string_lookup(tmp->u.mapping, "nproc"))) ADD_LIMIT( "nproc", RLIMIT_NPROC, tmp2 ); #endif #ifdef RLIMIT_MEMLOCK if((tmp2=simple_mapping_string_lookup(tmp->u.mapping, "memlock"))) ADD_LIMIT( "memlock", RLIMIT_MEMLOCK, tmp2 ); #endif #ifdef RLIMIT_RSS if((tmp2=simple_mapping_string_lookup(tmp->u.mapping, "rss"))) ADD_LIMIT( "rss", RLIMIT_RSS, tmp2 ); #endif #ifdef RLIMIT_CORE if((tmp2=simple_mapping_string_lookup(tmp->u.mapping, "core"))) ADD_LIMIT( "core", RLIMIT_CORE, tmp2 ); #endif #ifdef RLIMIT_CPU if((tmp2=simple_mapping_string_lookup(tmp->u.mapping, "cpu"))) ADD_LIMIT( "cpu", RLIMIT_CPU, tmp2 ); #endif #ifdef RLIMIT_DATA if((tmp2=simple_mapping_string_lookup(tmp->u.mapping, "data"))) ADD_LIMIT( "data", RLIMIT_DATA, tmp2 ); #endif #ifdef RLIMIT_FSIZE if((tmp2=simple_mapping_string_lookup(tmp->u.mapping, "fsize"))) ADD_LIMIT( "fsize", RLIMIT_FSIZE, tmp2 ); #endif #ifdef RLIMIT_NOFILE if((tmp2=simple_mapping_string_lookup(tmp->u.mapping, "nofile"))) ADD_LIMIT( "nofile", RLIMIT_NOFILE, tmp2 ); #endif #ifdef RLIMIT_STACK if((tmp2=simple_mapping_string_lookup(tmp->u.mapping, "stack"))) ADD_LIMIT( "stack", RLIMIT_STACK, tmp2 ); #endif #ifdef RLIMIT_VMEM if((tmp2=simple_mapping_string_lookup(tmp->u.mapping, "map_mem")) ||(tmp2=simple_mapping_string_lookup(tmp->u.mapping, "vmem"))) ADD_LIMIT( "map_mem", RLIMIT_VMEM, tmp2 ); #endif #ifdef RLIMIT_AS if((tmp2=simple_mapping_string_lookup(tmp->u.mapping, "as")) ||(tmp2=simple_mapping_string_lookup(tmp->u.mapping, "mem"))) ADD_LIMIT( "mem", RLIMIT_AS, tmp2 ); #endif #undef ADD_LIMIT } } if((tmp=simple_mapping_string_lookup(optional, "env"))) { if(tmp->type == T_MAPPING) { struct mapping *m=tmp->u.mapping; struct array *i,*v; int ptr=0; i=mapping_indices(m); v=mapping_values(m); storage.env=(char **)xalloc((1+m_sizeof(m)) * sizeof(char *)); for(e=0;e<i->size;e++) { if(ITEM(i)[e].type == T_STRING && ITEM(v)[e].type == T_STRING) { check_stack(3); ref_push_string(ITEM(i)[e].u.string); push_string(make_shared_string("=")); ref_push_string(ITEM(v)[e].u.string); f_add(3); storage.env[ptr++]=Pike_sp[-1].u.string->str; } } storage.env[ptr++]=0; free_array(i); free_array(v); } } storage.argv = (char **)xalloc((1 + cmd->size) * sizeof(char *)); for (e = 0; e < cmd->size; e++) storage.argv[e] = ITEM(cmd)[e].u.string->str; storage.argv[e] = 0; th_atfork_prepare(); pid = fork(); if (pid) { th_atfork_parent(); } else { th_atfork_child(); } if (pid == -1) { Pike_error("Caudium.create_process() failed."); } else if (pid) { pop_n_elems(args); push_int(pid); return; } else { if(storage.limits) { struct plimit *l = storage.limits; while(l) { int tmpres = setrlimit( l->resource, &l->rlp ); l = l->next; } } if(storage.env) environ = storage.env; chdir(tmp_cwd); seteuid(0); setegid(0); setgroups(0, NULL); if (gid_request) setgid(wanted_gid); if (uid_request) setuid(wanted_uid); dup2(fds[0], 0); dup2(fds[1], 1); dup2(fds[2], 2); set_close_on_exec(0,0); set_close_on_exec(1,0); set_close_on_exec(2,0); execvp(storage.argv[0],storage.argv); exit(99); } pop_n_elems(args); push_int(0); }
/*! * \param pmach machine en cours d'exécution * \param instr instruction en cours * \param addr adresse de l'instruction en cours */ bool ret(Machine *pmach, Instruction instr, unsigned addr) { ++pmach->_sp; check_stack(pmach, addr); pmach->_pc = pmach->_data[pmach->_sp]; return true; }
int main(void) { struct task *next; /* Set the CPU speed */ uint32_t skuid = read32(DEVICEID_BASE + DEVICEID_SKUID_OFFSET); uint32_t cpuspeed_id = skuid & DEVICEID_SKUID_CPUSPEED_MASK; uint32_t clksel_val = (1<<19) | 12; if(cpuspeed_id == DEVICEID_SKUID_CPUSPEED_720) clksel_val |= (720 << 8); else if(cpuspeed_id == DEVICEID_SKUID_CPUSPEED_600) clksel_val |= (600 << 8); else panic("Unsupported CPU!"); write32(CM_MPU_BASE + PRM_CLKSEL1_PLL_MPU_OFFSET, clksel_val); /* Basic hardware initialization */ init_cpumodes(); // set up CPU modes for interrupt handling intc_init(); // initialize interrupt controller gpio_init(); // initialize gpio interrupt system /* Start up hardware */ timers_init(); // must come first, since it initializes the watchdog eth_init(); uart_init(); /* For some reason, turning on the caches causes the kernel to hang after finishing the third invocation. Maybe we have to clear the caches here, or enable the MMU. */ printk("mmu init\n"); prep_pagetable(); init_mmu(); printk("cache init\n"); init_cache(); /* Initialize other interrupts */ init_interrupts(); /* Initialize task queues */ init_tasks(); /* Initialize idle task */ syscall_spawn(NULL, 7, idle_task, NULL, 0, SPAWN_DAEMON); pmu_enable(); trace_init(); printk("userspace init\n"); /* Initialize first user program */ syscall_spawn(NULL, 6, init_task, NULL, 0, 0); while (nondaemon_count > 0) { next = schedule(); task_activate(next); check_stack(next); } pmu_disable(); intc_reset(); eth_deinit(); deinit_mmu(); return 0; }
~stack_guard() { check_stack(); }