/* ChapelDistribution.chpl:133 */ static int64_t destroyDom(BaseDom this8, int64_t _ln, c_string _fn) { memory_order local_memory_order_seq_cst; int64_t cnt; _ref_atomic_refcnt call_tmp = NULL; _ref_atomic_int64 call_tmp2 = NULL; memory_order default_argorder; _ref_atomic_int_least64_t call_tmp3 = NULL; int64_t call_tmp4; int64_t call_tmp5; chpl_bool call_tmp6; chpl_bool call_tmp7; chpl_bool T; chpl_bool call_tmp8; int32_t _virtual_method_tmp_; BaseDist dist2 = NULL; BaseDist call_tmp9 = NULL; chpl_bool T2; _ref_atomicflag call_tmp10 = NULL; memory_order default_argorder2; _ref_atomic_flag call_tmp11 = NULL; chpl_bool call_tmp12; _ref_atomicflag call_tmp13 = NULL; memory_order default_argorder3; _ref_atomic_flag call_tmp14 = NULL; chpl_bool call_tmp15; _ref_list_BaseDom call_tmp16 = NULL; _ref_atomicflag call_tmp17 = NULL; memory_order default_argorder4; _ref_atomic_flag call_tmp18 = NULL; int64_t call_tmp19; chpl_bool call_tmp20; int32_t _virtual_method_tmp_2; chpl_opaque call_tmp21; local_memory_order_seq_cst = memory_order_seq_cst; compilerAssert(); compilerAssert(); call_tmp = &((this8)->_domCnt); call_tmp2 = &((call_tmp)->_cnt); default_argorder = local_memory_order_seq_cst; call_tmp3 = &((call_tmp2)->_v); call_tmp4 = atomic_fetch_sub_explicit_int_least64_t(call_tmp3, INT64(1), default_argorder); call_tmp5 = (call_tmp4 - INT64(1)); call_tmp6 = (call_tmp5 < INT64(0)); if (call_tmp6) { halt("domain reference count is negative!", _ln, _fn); } cnt = call_tmp5; call_tmp7 = (call_tmp5 == INT64(0)); if (call_tmp7) { _virtual_method_tmp_ = ((object)(this8))->chpl__cid; call_tmp8 = ((chpl_bool(*)(BaseDom))chpl_vmtable[((INT64(8) * _virtual_method_tmp_) + INT64(1))])(this8); T = call_tmp8; } else { T = false; } if (T) { call_tmp9 = dsiMyDist(this8, _ln, _fn); dist2 = call_tmp9; call_tmp10 = &((dist2)->_domsLock); default_argorder2 = local_memory_order_seq_cst; call_tmp11 = &((call_tmp10)->_v); call_tmp12 = atomic_flag_test_and_set_explicit(call_tmp11, default_argorder2); T2 = call_tmp12; while (T2) { chpl_task_yield(); call_tmp13 = &((dist2)->_domsLock); default_argorder3 = local_memory_order_seq_cst; call_tmp14 = &((call_tmp13)->_v); call_tmp15 = atomic_flag_test_and_set_explicit(call_tmp14, default_argorder3); T2 = call_tmp15; } call_tmp16 = &((dist2)->_doms); remove3(call_tmp16, this8, _ln, _fn); call_tmp17 = &((dist2)->_domsLock); default_argorder4 = local_memory_order_seq_cst; call_tmp18 = &((call_tmp17)->_v); atomic_flag_clear_explicit(call_tmp18, default_argorder4); call_tmp19 = destroyDist(dist2, _ln, _fn); call_tmp20 = (call_tmp19 == INT64(0)); if (call_tmp20) { _virtual_method_tmp_2 = ((object)(dist2))->chpl__cid; ((void(*)(BaseDist, int64_t, c_string))chpl_vmtable[((INT64(8) * _virtual_method_tmp_2) + INT64(0))])(dist2, _ln, _fn); call_tmp21 = ((void*)(dist2)); chpl_here_free(call_tmp21, _ln, _fn); } } return cnt; }
void _busfault(void) { printf("busfault\n"); halt(); }
void _svc(void) { printf("svc\n"); halt(); }
static long conswrite(Chan *c, void *va, long n, vlong offset) { vlong t; long l, bp; char *a = va; Cmdbuf *cb; Cmdtab *ct; char buf[256]; int x; switch((ulong)c->qid.path){ case Qcons: /* * Can't page fault in putstrn, so copy the data locally. */ l = n; while(l > 0){ bp = l; if(bp > sizeof buf) bp = sizeof buf; memmove(buf, a, bp); putstrn0(a, bp, 1); a += bp; l -= bp; } break; case Qconsctl: if(n >= sizeof(buf)) n = sizeof(buf)-1; strncpy(buf, a, n); buf[n] = 0; for(a = buf; a;){ if(strncmp(a, "rawon", 5) == 0){ qlock(&kbd); flushkbdline(kbdq); kbd.raw = 1; qunlock(&kbd); } else if(strncmp(a, "rawoff", 6) == 0){ qlock(&kbd); kbd.raw = 0; kbd.x = 0; qunlock(&kbd); } if(a = strchr(a, ' ')) a++; } break; case Qkeyboard: for(x=0; x<n; ) { Rune r; x += chartorune(&r, &a[x]); kbdputc(kbdq, r); } break; case Qtime: if(n >= sizeof(buf)) n = sizeof(buf)-1; strncpy(buf, a, n); buf[n] = 0; t = strtoll(buf, 0, 0)/1000000; boottime = t - TK2SEC(MACHP(0)->ticks); break; case Qhostowner: if(!iseve()) error(Eperm); if(offset != 0 || n >= sizeof(buf)) error(Ebadarg); memmove(buf, a, n); buf[n] = '\0'; if(n > 0 && buf[n-1] == '\n') buf[--n] = 0; if(n <= 0) error(Ebadarg); renameuser(eve, buf); renameproguser(eve, buf); kstrdup(&eve, buf); kstrdup(&up->env->user, buf); break; case Quser: if(!iseve()) error(Eperm); if(offset != 0) error(Ebadarg); if(n <= 0 || n >= sizeof(buf)) error(Ebadarg); strncpy(buf, a, n); buf[n] = 0; if(buf[n-1] == '\n') buf[n-1] = 0; kstrdup(&up->env->user, buf); break; case Qjit: if(n >= sizeof(buf)) n = sizeof(buf)-1; strncpy(buf, va, n); buf[n] = '\0'; x = atoi(buf); if(x < 0 || x > 9) error(Ebadarg); cflag = x; return n; case Qnull: break; case Qsysname: if(offset != 0) error(Ebadarg); if(n <= 0 || n >= sizeof(buf)) error(Ebadarg); strncpy(buf, a, n); buf[n] = 0; if(buf[n-1] == '\n') buf[n-1] = 0; kstrdup(&sysname, buf); break; case Qsysctl: if(!iseve()) error(Eperm); cb = parsecmd(a, n); if(waserror()){ free(cb); nexterror(); } ct = lookupcmd(cb, sysctlcmd, nelem(sysctlcmd)); switch(ct->index){ case CMreboot: reboot(); break; case CMhalt: halt(); break; case CMpanic: panic("sysctl"); case CMconsole: consoleprint = strcmp(cb->f[1], "off") != 0; break; case CMbroken: keepbroken = 1; break; case CMnobroken: keepbroken = 0; break; } poperror(); free(cb); break; default: print("conswrite: %llud\n", c->qid.path); error(Egreg); } return n; }
void _hardfault(void) { printf("hardfault\n"); halt(); }
static void common_shutdown_1(void *generic_ptr) { struct halt_info *how = (struct halt_info *)generic_ptr; struct percpu_struct *cpup; unsigned long *pflags, flags; int cpuid = smp_processor_id(); /* No point in taking interrupts anymore. */ local_irq_disable(); cpup = (struct percpu_struct *) ((unsigned long)hwrpb + hwrpb->processor_offset + hwrpb->processor_size * cpuid); pflags = &cpup->flags; flags = *pflags; /* Clear reason to "default"; clear "bootstrap in progress". */ flags &= ~0x00ff0001UL; #ifdef CONFIG_SMP /* Secondaries halt here. */ if (cpuid != boot_cpuid) { flags |= 0x00040000UL; /* "remain halted" */ *pflags = flags; set_cpu_present(cpuid, false); set_cpu_possible(cpuid, false); halt(); } #endif if (how->mode == LINUX_REBOOT_CMD_RESTART) { if (!how->restart_cmd) { flags |= 0x00020000UL; /* "cold bootstrap" */ } else { /* For SRM, we could probably set environment variables to get this to work. We'd have to delay this until after srm_paging_stop unless we ever got srm_fixup working. At the moment, SRM will use the last boot device, but the file and flags will be the defaults, when doing a "warm" bootstrap. */ flags |= 0x00030000UL; /* "warm bootstrap" */ } } else { flags |= 0x00040000UL; /* "remain halted" */ } *pflags = flags; #ifdef CONFIG_SMP /* Wait for the secondaries to halt. */ set_cpu_present(boot_cpuid, false); set_cpu_possible(boot_cpuid, false); while (cpus_weight(cpu_present_map)) barrier(); #endif /* If booted from SRM, reset some of the original environment. */ if (alpha_using_srm) { #ifdef CONFIG_DUMMY_CONSOLE /* If we've gotten here after SysRq-b, leave interrupt context before taking over the console. */ if (in_interrupt()) irq_exit(); /* This has the effect of resetting the VGA video origin. */ take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1); #endif pci_restore_srm_config(); set_hae(srm_hae); } if (alpha_mv.kill_arch) alpha_mv.kill_arch(how->mode); if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) { /* Unfortunately, since MILO doesn't currently understand the hwrpb bits above, we can't reliably halt the processor and keep it halted. So just loop. */ return; } if (alpha_using_srm) srm_paging_stop(); halt(); }
void ExitPrompter::handleExit() { // HACK IsFrontendOnly() triggers a popup if there is no BE connection. // We really don't need that right now. This hack prevents it. gContext->SetDisableEventPopup(true); // first of all find out, if this is a frontend only host... bool frontendOnly = gCoreContext->IsFrontendOnly(); // HACK Undo the hack, just in case we _don't_ quit: gContext->SetDisableEventPopup(false); // how do you want to quit today? bool allowExit = false; bool allowReboot = false; bool allowShutdown = false; switch (gCoreContext->GetNumSetting("OverrideExitMenu", 0)) { case 0: allowExit = true; if (frontendOnly) allowShutdown = true; break; case 1: allowExit = true; break; case 2: allowExit = true; allowShutdown = true; break; case 3: allowExit = true; allowReboot = true; allowShutdown = true; break; case 4: allowShutdown = true; break; case 5: allowReboot = true; break; case 6: allowReboot = true; allowShutdown = true; break; } MythScreenStack *ss = GetMythMainWindow()->GetStack("popup stack"); MythDialogBox *dlg = new MythDialogBox( tr("Do you really want to exit MythTV?"), ss, "exit prompt"); if (!dlg->Create()) { LOG(VB_GENERAL, LOG_ERR, "Can't create Exit Prompt dialog?"); delete dlg; quit(); } dlg->AddButton(tr("No")); if (allowExit) dlg->AddButton(QObject::tr("Yes, Exit now"), SLOT(quit())); if (allowReboot) dlg->AddButton(QObject::tr("Yes, Exit and Reboot"), SLOT(reboot())); if (allowShutdown) dlg->AddButton(QObject::tr("Yes, Exit and Shutdown"), SLOT(halt())); // This is a hack so that the button clicks target the correct slot: dlg->SetReturnEvent(this, QString()); ss->AddScreen(dlg); }
/* ChapelDistribution.chpl:361 */ static void _preserveArrayElement(BaseArr this8, int64_t oldslot, int64_t newslot, int64_t _ln, c_string _fn) { halt("_preserveArrayElement() not supported for non-associative arrays", _ln, _fn); return; }
/*! * Set up context (normal and interrupt=kernel) * * Create memory map: * - find place for heap */ mseg_t *arch_memory_init () { extern char kernel_code_addr, kernel_end_addr; multiboot_info_t *mbi; multiboot_memory_map_t *mmap, *iter; mseg_t *mseg; uint32 mmap_end, mmap_size; uint32 heap_id, heap_start, heap_end; int mmap_cnt, mseg_cnt, i; /* Is system booted by a Multiboot-compliant boot loader? */ if ( arch_mb_magic != MULTIBOOT_BOOTLOADER_MAGIC ) { /* no Multiboot information! get them "hard way" :) */ return arch_memseg_from_script (); } else { mbi = (void *) arch_mb_info; /* from multiboot info structure */ mmap = NULL; /* if memory map is available use it */ if ( mbi->flags & MULTIBOOT_INFO_MEM_MAP ) { /* find largest available segment */ iter = (void *) mbi->mmap_addr; mmap_end = (uint32)(mbi->mmap_addr + mbi->mmap_length); mmap_cnt = 0; mmap_size = 0; heap_id = 0; while ( iter && (uint32) iter < mmap_end ) { mmap_cnt++; if ( iter->type == MULTIBOOT_MEMORY_AVAILABLE && mmap_size < (uint32) ( iter->len & 0x0ffffffff ) ) { heap_id = mmap_cnt; mmap = iter; mmap_size = (uint32) ( iter->len & 0x0ffffffff ); } iter = (void *) iter + iter->size + sizeof (iter->size); } } if ( !mmap ) return arch_memseg_from_script (); heap_start = (uint32) ( mmap->addr & 0x0ffffffff ); heap_end = heap_start + (uint32) ( mmap->len & 0x0ffffffff ); /* if this segment contains/intersect with kernel code/data, * reduce it */ if ( heap_start >= (uint32) &kernel_code_addr && heap_start < (uint32) &kernel_end_addr ) heap_start = (uint32) &kernel_end_addr; if ( heap_end >= (uint32) &kernel_code_addr && heap_end < (uint32) &kernel_end_addr ) heap_end = (uint32) &kernel_code_addr; if ( heap_start >= heap_end ) { LOG ( ERROR, "No space for heap!\n" ); halt (); } /* * TODO: check if some other data is saved in segment * [heap_start, heap_end] */ /* "map" "mmap" segmets to "mseg" */ mseg_cnt = mmap_cnt + 1; if ( heap_start != (uint32) ( mmap->addr & 0x0ffffffff ) ) mseg_cnt++; mseg = (void *) heap_start; heap_start += mseg_cnt * sizeof (mseg_t); iter = (void *) mbi->mmap_addr; mmap_end = (uint32) (mbi->mmap_addr + mbi->mmap_length); for ( i = 0; i < mmap_cnt; i++ ) { mseg[i].type = iter->type; mseg[i].name = "mmap segment"; mseg[i].start = (void *) (uint32) ( iter->addr & 0x0ffffffff ); mseg[i].size = (uint32) ( iter->len & 0x0ffffffff ); iter = (void *) iter + iter->size + sizeof (iter->size); } i = mmap_cnt; if ( mseg_cnt == mmap_cnt + 1 ) { mseg[heap_id].type = MS_KHEAP; mseg[heap_id].name = "heap"; } else { mseg[i].type = MS_KHEAP; mseg[i].name = "heap"; mseg[i].start = (void *) heap_start; mseg[i].size = heap_end - heap_start; i++; } mseg[i].type = MS_END; return mseg; } }
/* ChapelDistribution.chpl:349 */ static void clearEntry(BaseArr this8, chpl_taskID_t idx, chpl_bool haveLock, int64_t _ln, c_string _fn) { halt("clearEntry() not supported for non-associative arrays", _ln, _fn); return; }
/* ChapelDistribution.chpl:357 */ static void _removeArrayBackup(BaseArr this8, int64_t _ln, c_string _fn) { halt("_removeArrayBackup() not supported for non-associative arrays", _ln, _fn); return; }
/* ChapelDistribution.chpl:314 */ static void dsiReallocate(BaseArr this8, DefaultRectangularDom_1_int64_t_F d, int64_t _ln, c_string _fn) { halt("reallocating not supported for this array type", _ln, _fn); return; }
/* ChapelDistribution.chpl:286 */ static int64_t destroyArr(BaseArr this8, int64_t _ln, c_string _fn) { memory_order local_memory_order_seq_cst; int64_t cnt; _ref_atomic_refcnt call_tmp = NULL; _ref_atomic_int64 call_tmp2 = NULL; memory_order default_argorder; _ref_atomic_int_least64_t call_tmp3 = NULL; int64_t call_tmp4; int64_t call_tmp5; chpl_bool call_tmp6; chpl_bool call_tmp7; BaseArr ret = NULL; object call_tmp8 = NULL; chpl_bool call_tmp9; BaseArr ret2 = NULL; int64_t call_tmp10; chpl_bool call_tmp11; BaseArr ret3 = NULL; int32_t _virtual_method_tmp_; chpl_opaque call_tmp12; int32_t _virtual_method_tmp_2; chpl_bool call_tmp13; BaseDom dom = NULL; BaseDom call_tmp14 = NULL; int32_t _virtual_method_tmp_3; chpl_bool T; _ref_atomicflag call_tmp15 = NULL; memory_order default_argorder2; _ref_atomic_flag call_tmp16 = NULL; chpl_bool call_tmp17; _ref_atomicflag call_tmp18 = NULL; memory_order default_argorder3; _ref_atomic_flag call_tmp19 = NULL; chpl_bool call_tmp20; _ref_list_BaseArr call_tmp21 = NULL; _ref_atomicflag call_tmp22 = NULL; memory_order default_argorder4; _ref_atomic_flag call_tmp23 = NULL; int64_t call_tmp24; chpl_bool call_tmp25; int32_t _virtual_method_tmp_4; chpl_opaque call_tmp26; local_memory_order_seq_cst = memory_order_seq_cst; compilerAssert(); compilerAssert(); call_tmp = &((this8)->_arrCnt); call_tmp2 = &((call_tmp)->_cnt); default_argorder = local_memory_order_seq_cst; call_tmp3 = &((call_tmp2)->_v); call_tmp4 = atomic_fetch_sub_explicit_int_least64_t(call_tmp3, INT64(1), default_argorder); call_tmp5 = (call_tmp4 - INT64(1)); call_tmp6 = (call_tmp5 < INT64(0)); if (call_tmp6) { halt("array reference count is negative!", _ln, _fn); } cnt = call_tmp5; call_tmp7 = (call_tmp5 == INT64(0)); if (call_tmp7) { ret = (this8)->_arrAlias; call_tmp8 = ((object)(ret)); call_tmp9 = (call_tmp8 != nil); if (call_tmp9) { ret2 = (this8)->_arrAlias; call_tmp10 = destroyArr(ret2, _ln, _fn); call_tmp11 = (call_tmp10 == INT64(0)); if (call_tmp11) { ret3 = (this8)->_arrAlias; _virtual_method_tmp_ = ((object)(ret3))->chpl__cid; ((void(*)(BaseArr, int64_t, c_string))chpl_vmtable[((INT64(8) * _virtual_method_tmp_) + INT64(0))])(ret3, _ln, _fn); call_tmp12 = ((void*)(ret3)); chpl_here_free(call_tmp12, _ln, _fn); } } else { _virtual_method_tmp_2 = ((object)(this8))->chpl__cid; ((void(*)(BaseArr, int64_t, c_string))chpl_vmtable[((INT64(8) * _virtual_method_tmp_2) + INT64(5))])(this8, _ln, _fn); } } call_tmp13 = (call_tmp5 == INT64(0)); if (call_tmp13) { _virtual_method_tmp_3 = ((object)(this8))->chpl__cid; call_tmp14 = ((BaseDom(*)(BaseArr, int64_t, c_string))chpl_vmtable[((INT64(8) * _virtual_method_tmp_3) + INT64(6))])(this8, _ln, _fn); dom = call_tmp14; call_tmp15 = &((dom)->_arrsLock); default_argorder2 = local_memory_order_seq_cst; call_tmp16 = &((call_tmp15)->_v); call_tmp17 = atomic_flag_test_and_set_explicit(call_tmp16, default_argorder2); T = call_tmp17; while (T) { chpl_task_yield(); call_tmp18 = &((dom)->_arrsLock); default_argorder3 = local_memory_order_seq_cst; call_tmp19 = &((call_tmp18)->_v); call_tmp20 = atomic_flag_test_and_set_explicit(call_tmp19, default_argorder3); T = call_tmp20; } call_tmp21 = &((dom)->_arrs); remove4(call_tmp21, this8, _ln, _fn); call_tmp22 = &((dom)->_arrsLock); default_argorder4 = local_memory_order_seq_cst; call_tmp23 = &((call_tmp22)->_v); atomic_flag_clear_explicit(call_tmp23, default_argorder4); call_tmp24 = destroyDom(dom, _ln, _fn); call_tmp25 = (call_tmp24 == INT64(0)); if (call_tmp25) { _virtual_method_tmp_4 = ((object)(dom))->chpl__cid; ((void(*)(BaseDom, int64_t, c_string))chpl_vmtable[((INT64(8) * _virtual_method_tmp_4) + INT64(0))])(dom, _ln, _fn); call_tmp26 = ((void*)(dom)); chpl_here_free(call_tmp26, _ln, _fn); } } return cnt; }
/* ChapelDistribution.chpl:280 */ static BaseDom dsiGetBaseDom(BaseArr this8, int64_t _ln, c_string _fn) { BaseDom ret = NULL; halt("internal error: dsiGetBaseDom is not implemented", _ln, _fn); ret = nil; return ret; }
/* should be the first function */ void decompress_entry(unsigned long reg_a0, unsigned long reg_a1, unsigned long reg_a2, unsigned long reg_a3, unsigned long icache_size, unsigned long icache_lsize, unsigned long dcache_size, unsigned long dcache_lsize) { unsigned char props[LZMA_PROPERTIES_SIZE]; unsigned int i; /* temp value */ SizeT osize; /* uncompressed size */ int res; board_init(); printf("\n\nLZMA loader for " CONFIG_BOARD_NAME ", Copyright (C) 2007-2008 OpenWrt.org\n\n"); decompress_init(); /* lzma args */ for (i = 0; i < LZMA_PROPERTIES_SIZE; i++) props[i] = get_byte(); /* skip rest of the LZMA coder property */ /* read the lower half of uncompressed size in the header */ osize = ((SizeT)get_byte()) + ((SizeT)get_byte() << 8) + ((SizeT)get_byte() << 16) + ((SizeT)get_byte() << 24); /* skip rest of the header (upper half of uncompressed size) */ for (i = 0; i < 4; i++) get_byte(); res = LzmaDecodeProperties(&lzma_state.Properties, props, LZMA_PROPERTIES_SIZE); if (res != LZMA_RESULT_OK) { printf("Incorrect LZMA stream properties!\n"); halt(); } printf("decompressing kernel... "); lzma_state.Probs = (CProb *)workspace; res = decompress_data(&lzma_state, (unsigned char *)LOADADDR, osize); if (res != LZMA_RESULT_OK) { printf("failed, "); switch (res) { case LZMA_RESULT_DATA_ERROR: printf("data error!\n"); break; default: printf("unknown error %d!\n", res); } halt(); } else printf("done!\n"); blast_dcache(dcache_size, dcache_lsize); blast_icache(icache_size, icache_lsize); printf("launching kernel...\n\n"); #ifdef CONFIG_PASS_KARGS reg_a0 = 0; reg_a1 = 0; reg_a2 = (unsigned long)env_vars; reg_a3 = 0; #endif /* Jump to load address */ ((kernel_entry) LOADADDR)(reg_a0, reg_a1, reg_a2, reg_a3); }
void round_end() { //uend(); printf("\nRound end"); halt(); }
task main() { initializeRobot(); wait1Msec(2); waitForStart(); // Wait for the beginning of autonomous phase. int count = 0; // STEP 1: Drive straight until irsensor resetEncoders(); while(SensorValue[irsensor] < 4 && nMotorEncoder[RightDrive] < 4*360*3.2){ nxtDisplayCenteredTextLine(3, "IR: %d", SensorValue[irsensor]); moveForward(SPEED); wait1Msec(5); count++; if( count > 1000) { halt(); wait1Msec(30000); } } moveForward(SPEED); //halt(); Disabled. We're going to try and deposit the block without stopping wait1Msec (500); // STEP 2: Deploy auto-scoring arm servoTarget[autoServo] = 200; wait1Msec(250); servoTarget[autoServo] = 255; wait1Msec(300); // STEP 3: long drive along wall with IR score count = 0; while(nMotorEncoder[RightDrive] < 4*360*5.4) { moveForward(SPEED); wait1Msec(5); count++; if( count > 1000) { halt(); wait1Msec(30000); } } halt(); tareHeading(); //STEP 4: Turn 90 degrees first count = 0; motor[LeftDrive] = -70; motor[RightDrive] = 70; while(true) { nxtDisplayCenteredTextLine(3, "Heading: %d", currHeading); //wait1Msec(10); if (currHeading >= 300.0 && currHeading < 315) break; wait1Msec(5); count++; if( count > 500) { halt(); wait1Msec(30000); } } halt(); resetEncoders(); //STEP 5: Drive 2 feet before ramp turn count = 0; while(nMotorEncoder[RightDrive] < 4*360*2.0) { moveForward(SPEED); wait1Msec(5); count++; if( count > 500) { halt(); wait1Msec(30000); } } halt(); tareHeading(); //STEP 6: Second 90 degree turn count = 0; motor[LeftDrive] = -90; motor[RightDrive] = 70; while(true) { nxtDisplayCenteredTextLine(3, "Heading: %d", currHeading); wait1Msec(10); if (currHeading >= 235.0 && currHeading < 255.0) break; wait1Msec(5); count++; if( count > 1000) { halt(); wait1Msec(30000); } } halt(); resetEncoders(); //STEP 7: Drive onto ramp count = 0; while(nMotorEncoder[RightDrive] < 4*360*3.8) { moveForward(70); wait1Msec(5); count++; if( count > 500) { halt(); wait1Msec(30000); } } halt(); tareHeading(); }
void test_main (void) { halt (); fail ("should have halted"); }
void _exit(void) { halt(); }
/* * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Ralph Campbell. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)boot.c 8.1 (Berkeley) 6/10/93 */ #include <lib/libsa/stand.h> #include <lib/libkern/libkern.h> #include <sys/param.h> #include <sys/exec.h> #include <sys/exec_ecoff.h> #include <machine/autoconf.h> #include <machine/prom.h> #include <machine/rpb.h> #include <machine/pte.h> #include "common.h" #if !defined(UNIFIED_BOOTBLOCK) && !defined(SECONDARY_BOOTBLOCK) #error not UNIFIED_BOOTBLOCK and not SECONDARY_BOOTBLOCK #endif int loadfile __P((char *, u_int64_t *)); char boot_file[128]; char boot_flags[128]; struct bootinfo_v1 bootinfo_v1; extern char bootprog_rev[], bootprog_date[], bootprog_maker[]; paddr_t ffp_save, ptbr_save; extern vaddr_t ssym, esym; int debug; char *kernelnames[] = { "netbsd", "netbsd.gz", "netbsd.bak", "netbsd.bak.gz", "netbsd.old", "netbsd.old.gz", "onetbsd", "onetbsd.gz", NULL }; void #if defined(UNIFIED_BOOTBLOCK) main(void) #else /* defined(SECONDARY_BOOTBLOCK) */ main(long fd) #endif { char *name, **namep; u_int64_t entry; int win; /* Init prom callback vector. */ init_prom_calls(); /* print a banner */ printf("\n"); printf("NetBSD/alpha " NETBSD_VERS " " BOOT_TYPE_NAME " Bootstrap, Revision %s\n", bootprog_rev); printf("(%s, %s)\n", bootprog_maker, bootprog_date); printf("\n"); /* set up the booted device descriptor */ #if defined(UNIFIED_BOOTBLOCK) if (!booted_dev_open()) { printf("Boot device (%s) open failed.\n", booted_dev_name[0] ? booted_dev_name : "unknown"); goto fail; } #else /* defined(SECONDARY_BOOTBLOCK) */ booted_dev_setfd(fd); #endif /* switch to OSF pal code. */ OSFpal(); printf("\n"); prom_getenv(PROM_E_BOOTED_FILE, boot_file, sizeof(boot_file)); prom_getenv(PROM_E_BOOTED_OSFLAGS, boot_flags, sizeof(boot_flags)); if (boot_file[0] != 0) (void)printf("Boot file: %s\n", boot_file); (void)printf("Boot flags: %s\n", boot_flags); if (strchr(boot_flags, 'i') || strchr(boot_flags, 'I')) { printf("Boot file: "); gets(boot_file); } if (boot_file[0] != '\0') win = (loadfile(name = boot_file, &entry) == 0); else for (namep = kernelnames, win = 0; *namep != NULL && !win; namep++) win = (loadfile(name = *namep, &entry) == 0); booted_dev_close(); printf("\n"); if (!win) { goto fail; } /* * Fill in the bootinfo for the kernel. */ bzero(&bootinfo_v1, sizeof(bootinfo_v1)); bootinfo_v1.ssym = ssym; bootinfo_v1.esym = esym; bcopy(name, bootinfo_v1.booted_kernel, sizeof(bootinfo_v1.booted_kernel)); bcopy(boot_flags, bootinfo_v1.boot_flags, sizeof(bootinfo_v1.boot_flags)); bootinfo_v1.hwrpb = (void *)HWRPB_ADDR; bootinfo_v1.hwrpbsize = ((struct rpb *)HWRPB_ADDR)->rpb_size; bootinfo_v1.cngetc = NULL; bootinfo_v1.cnputc = NULL; bootinfo_v1.cnpollc = NULL; (void)printf("Entering %s at 0x%lx...\n", name, entry); alpha_pal_imb(); (*(void (*)(u_int64_t, u_int64_t, u_int64_t, void *, u_int64_t, u_int64_t))entry)(ffp_save, ptbr_save, BOOTINFO_MAGIC, &bootinfo_v1, 1, 0); (void)printf("KERNEL RETURNED!\n"); fail: (void)printf("Boot failed! Halting...\n"); halt(); }
int cpr(int fcn, void *mdep) { #if defined(__sparc) static const char noswapstr[] = "reusable statefile requires " "that no swap area be configured.\n"; static const char blockstr[] = "reusable statefile must be " "a block device. See power.conf(4) and pmconfig(1M).\n"; static const char normalfmt[] = "cannot run normal " "checkpoint/resume when in reusable statefile mode. " "use uadmin A_FREEZE AD_REUSEFINI (uadmin %d %d) " "to exit reusable statefile mode.\n"; static const char modefmt[] = "%s in reusable mode.\n"; #endif register int rc = 0; int cpr_sleeptype; /* * First, reject commands that we don't (yet) support on this arch. * This is easier to understand broken out like this than grotting * through the second switch below. */ switch (fcn) { #if defined(__sparc) case AD_CHECK_SUSPEND_TO_RAM: case AD_SUSPEND_TO_RAM: return (ENOTSUP); case AD_CHECK_SUSPEND_TO_DISK: case AD_SUSPEND_TO_DISK: case AD_CPR_REUSEINIT: case AD_CPR_NOCOMPRESS: case AD_CPR_FORCE: case AD_CPR_REUSABLE: case AD_CPR_REUSEFINI: case AD_CPR_TESTZ: case AD_CPR_TESTNOZ: case AD_CPR_TESTHALT: case AD_CPR_SUSP_DEVICES: cpr_sleeptype = CPR_TODISK; break; #endif #if defined(__x86) case AD_CHECK_SUSPEND_TO_DISK: case AD_SUSPEND_TO_DISK: case AD_CPR_REUSEINIT: case AD_CPR_NOCOMPRESS: case AD_CPR_FORCE: case AD_CPR_REUSABLE: case AD_CPR_REUSEFINI: case AD_CPR_TESTZ: case AD_CPR_TESTNOZ: case AD_CPR_TESTHALT: case AD_CPR_PRINT: return (ENOTSUP); /* The DEV_* values need to be removed after sys-syspend is fixed */ case DEV_CHECK_SUSPEND_TO_RAM: case DEV_SUSPEND_TO_RAM: case AD_CPR_SUSP_DEVICES: case AD_CHECK_SUSPEND_TO_RAM: case AD_SUSPEND_TO_RAM: case AD_LOOPBACK_SUSPEND_TO_RAM_PASS: case AD_LOOPBACK_SUSPEND_TO_RAM_FAIL: case AD_FORCE_SUSPEND_TO_RAM: case AD_DEVICE_SUSPEND_TO_RAM: cpr_sleeptype = CPR_TORAM; break; #endif } #if defined(__sparc) /* * Need to know if we're in reusable mode, but we will likely have * rebooted since REUSEINIT, so we have to get the info from the * file system */ if (!cpr_reusable_mode) cpr_reusable_mode = cpr_get_reusable_mode(); cpr_forget_cprconfig(); #endif switch (fcn) { #if defined(__sparc) case AD_CPR_REUSEINIT: if (!i_cpr_reusable_supported()) return (ENOTSUP); if (!cpr_statefile_is_spec()) { cpr_err(CE_CONT, blockstr); return (EINVAL); } if ((rc = cpr_check_spec_statefile()) != 0) return (rc); if (swapinfo) { cpr_err(CE_CONT, noswapstr); return (EINVAL); } cpr_test_mode = 0; break; case AD_CPR_NOCOMPRESS: case AD_CPR_COMPRESS: case AD_CPR_FORCE: if (cpr_reusable_mode) { cpr_err(CE_CONT, normalfmt, A_FREEZE, AD_REUSEFINI); return (ENOTSUP); } cpr_test_mode = 0; break; case AD_CPR_REUSABLE: if (!i_cpr_reusable_supported()) return (ENOTSUP); if (!cpr_statefile_is_spec()) { cpr_err(CE_CONT, blockstr); return (EINVAL); } if ((rc = cpr_check_spec_statefile()) != 0) return (rc); if (swapinfo) { cpr_err(CE_CONT, noswapstr); return (EINVAL); } if ((rc = cpr_reusable_mount_check()) != 0) return (rc); cpr_test_mode = 0; break; case AD_CPR_REUSEFINI: if (!i_cpr_reusable_supported()) return (ENOTSUP); cpr_test_mode = 0; break; case AD_CPR_TESTZ: case AD_CPR_TESTNOZ: case AD_CPR_TESTHALT: if (cpr_reusable_mode) { cpr_err(CE_CONT, normalfmt, A_FREEZE, AD_REUSEFINI); return (ENOTSUP); } cpr_test_mode = 1; break; case AD_CPR_CHECK: if (!i_cpr_is_supported(cpr_sleeptype) || cpr_reusable_mode) return (ENOTSUP); return (0); case AD_CPR_PRINT: CPR_STAT_EVENT_END("POST CPR DELAY"); cpr_stat_event_print(); return (0); #endif case AD_CPR_DEBUG0: cpr_debug = 0; return (0); case AD_CPR_DEBUG1: case AD_CPR_DEBUG2: case AD_CPR_DEBUG3: case AD_CPR_DEBUG4: case AD_CPR_DEBUG5: case AD_CPR_DEBUG7: case AD_CPR_DEBUG8: cpr_debug |= CPR_DEBUG_BIT(fcn); return (0); case AD_CPR_DEBUG9: cpr_debug |= CPR_DEBUG6; return (0); /* The DEV_* values need to be removed after sys-syspend is fixed */ case DEV_CHECK_SUSPEND_TO_RAM: case DEV_SUSPEND_TO_RAM: case AD_CHECK_SUSPEND_TO_RAM: case AD_SUSPEND_TO_RAM: cpr_test_point = LOOP_BACK_NONE; break; case AD_LOOPBACK_SUSPEND_TO_RAM_PASS: cpr_test_point = LOOP_BACK_PASS; break; case AD_LOOPBACK_SUSPEND_TO_RAM_FAIL: cpr_test_point = LOOP_BACK_FAIL; break; case AD_FORCE_SUSPEND_TO_RAM: cpr_test_point = FORCE_SUSPEND_TO_RAM; break; case AD_DEVICE_SUSPEND_TO_RAM: if (mdep == NULL) { /* Didn't pass enough arguments */ return (EINVAL); } cpr_test_point = DEVICE_SUSPEND_TO_RAM; cpr_device = (major_t)atoi((char *)mdep); break; case AD_CPR_SUSP_DEVICES: cpr_test_point = FORCE_SUSPEND_TO_RAM; if (cpr_suspend_devices(ddi_root_node()) != DDI_SUCCESS) cmn_err(CE_WARN, "Some devices did not suspend " "and may be unusable"); (void) cpr_resume_devices(ddi_root_node(), 0); return (0); default: return (ENOTSUP); } if (!i_cpr_is_supported(cpr_sleeptype)) return (ENOTSUP); #if defined(__sparc) if ((cpr_sleeptype == CPR_TODISK && !cpr_is_ufs(rootvfs) && !cpr_is_zfs(rootvfs))) return (ENOTSUP); #endif if (fcn == AD_CHECK_SUSPEND_TO_RAM || fcn == DEV_CHECK_SUSPEND_TO_RAM) { ASSERT(i_cpr_is_supported(cpr_sleeptype)); return (0); } #if defined(__sparc) if (fcn == AD_CPR_REUSEINIT) { if (mutex_tryenter(&cpr_slock) == 0) return (EBUSY); if (cpr_reusable_mode) { cpr_err(CE_CONT, modefmt, "already"); mutex_exit(&cpr_slock); return (EBUSY); } rc = i_cpr_reuseinit(); mutex_exit(&cpr_slock); return (rc); } if (fcn == AD_CPR_REUSEFINI) { if (mutex_tryenter(&cpr_slock) == 0) return (EBUSY); if (!cpr_reusable_mode) { cpr_err(CE_CONT, modefmt, "not"); mutex_exit(&cpr_slock); return (EINVAL); } rc = i_cpr_reusefini(); mutex_exit(&cpr_slock); return (rc); } #endif /* * acquire cpr serial lock and init cpr state structure. */ if (rc = cpr_init(fcn)) return (rc); #if defined(__sparc) if (fcn == AD_CPR_REUSABLE) { if ((rc = i_cpr_check_cprinfo()) != 0) { mutex_exit(&cpr_slock); return (rc); } } #endif /* * Call the main cpr routine. If we are successful, we will be coming * down from the resume side, otherwise we are still in suspend. */ cpr_err(CE_CONT, "System is being suspended"); if (rc = cpr_main(cpr_sleeptype)) { CPR->c_flags |= C_ERROR; PMD(PMD_SX, ("cpr: Suspend operation failed.\n")) cpr_err(CE_NOTE, "Suspend operation failed."); } else if (CPR->c_flags & C_SUSPENDING) { /* * In the suspend to RAM case, by the time we get * control back we're already resumed */ if (cpr_sleeptype == CPR_TORAM) { PMD(PMD_SX, ("cpr: cpr CPR_TORAM done\n")) cpr_done(); return (rc); } #if defined(__sparc) PMD(PMD_SX, ("cpr: Suspend operation succeeded.\n")) /* * Back from a successful checkpoint */ if (fcn == AD_CPR_TESTZ || fcn == AD_CPR_TESTNOZ) { mdboot(0, AD_BOOT, "", B_FALSE); /* NOTREACHED */ } /* make sure there are no more changes to the device tree */ PMD(PMD_SX, ("cpr: dev tree freeze\n")) devtree_freeze(); /* * stop other cpus and raise our priority. since there is only * one active cpu after this, and our priority will be too high * for us to be preempted, we're essentially single threaded * from here on out. */ PMD(PMD_SX, ("cpr: stop other cpus\n")) i_cpr_stop_other_cpus(); PMD(PMD_SX, ("cpr: spl6\n")) (void) spl6(); /* * try and reset leaf devices. reset_leaves() should only * be called when there are no other threads that could be * accessing devices */ PMD(PMD_SX, ("cpr: reset leaves\n")) reset_leaves(); /* * If i_cpr_power_down() succeeds, it'll not return * * Drives with write-cache enabled need to flush * their cache. */ if (fcn != AD_CPR_TESTHALT) { PMD(PMD_SX, ("cpr: power down\n")) (void) i_cpr_power_down(cpr_sleeptype); } ASSERT(cpr_sleeptype == CPR_TODISK); /* currently CPR_TODISK comes back via a boot path */ CPR_DEBUG(CPR_DEBUG1, "(Done. Please Switch Off)\n"); halt(NULL); /* NOTREACHED */ #endif } PMD(PMD_SX, ("cpr: cpr done\n")) cpr_done(); return (rc); }
void *core_processing_thread_func(void *priv) { struct core_processing_thread *tpriv = priv; if (packet_info_pool_init()) { halt("Error while initializing the packet_info_pool", 1); return NULL; } registry_perf_inc(perf_thread_active, 1); pom_mutex_lock(&tpriv->pkt_queue_lock); while (core_run) { while (!tpriv->pkt_queue_head) { // We are not active while waiting for a packet registry_perf_dec(perf_thread_active, 1); debug_core("thread %u : waiting", tpriv->thread_id); if (registry_perf_getval(perf_thread_active) == 0) { if (core_get_state() == core_state_finishing) core_set_state(core_state_idle); } if (!core_run) { pom_mutex_unlock(&tpriv->pkt_queue_lock); goto end; } int res = pthread_cond_wait(&tpriv->pkt_queue_cond, &tpriv->pkt_queue_lock); if (res) { pomlog(POMLOG_ERR "Error while waiting for restart condition : %s", pom_strerror(res)); abort(); return NULL; } registry_perf_inc(perf_thread_active, 1); } // Dequeue a packet struct core_packet_queue *tmp = tpriv->pkt_queue_head; tpriv->pkt_queue_head = tmp->next; if (!tpriv->pkt_queue_head) tpriv->pkt_queue_tail = NULL; // Add it to the unused list tmp->next = tpriv->pkt_queue_unused; tpriv->pkt_queue_unused = tmp; tpriv->pkt_count--; registry_perf_dec(perf_pkt_queue, 1); __sync_fetch_and_sub(&core_pkt_queue_count, 1); if (tpriv->pkt_count < CORE_THREAD_PKT_QUEUE_MIN) { pom_mutex_lock(&core_pkt_queue_wait_lock); // Tell the input processes that they can continue queuing packets int res = pthread_cond_broadcast(&core_pkt_queue_wait_cond); if (res) { pomlog(POMLOG_ERR "Error while signaling the main pkt_queue condition : %s", pom_strerror(res)); abort(); } pom_mutex_unlock(&core_pkt_queue_wait_lock); } // Keep track of our packet struct packet *pkt = tmp->pkt; debug_core("thread %u : Processing packet %p (%u.%06u)", tpriv->thread_id, pkt, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts)); pom_mutex_unlock(&tpriv->pkt_queue_lock); // Lock the processing lock pom_rwlock_rlock(&core_processing_lock); // Update the current clock if (core_clock[tpriv->thread_id] < pkt->ts) // Make sure we keep it monotonous core_clock[tpriv->thread_id] = pkt->ts; //pomlog(POMLOG_DEBUG "Thread %u processing ...", pthread_self()); if (core_process_packet(pkt) == POM_ERR) { core_run = 0; pom_rwlock_unlock(&core_processing_lock); break; } // Process timers if (timers_process() != POM_OK) { pom_rwlock_unlock(&core_processing_lock); break; } pom_rwlock_unlock(&core_processing_lock); if (packet_release(pkt) != POM_OK) { pomlog(POMLOG_ERR "Error while releasing the packet"); break; } debug_core("thread %u : Processed packet %p (%u.%06u)", tpriv->thread_id, pkt, pom_ptime_sec(pkt->ts), pom_ptime_usec(pkt->ts)); // Re-lock our queue for the next run pom_mutex_lock(&tpriv->pkt_queue_lock); } halt("Processing thread encountered an error", 1); end: packet_info_pool_cleanup(); return NULL; }
void _nmi(void) { printf("nmi\n"); halt(); }
static void __attribute__((noreturn)) reset(void) { write32(clk_rst_rst_devices_l_ptr, SWR_TRIG_SYS_RST); halt(); }
void _memmanage(void) { printf("memmanage\n"); halt(); }
boolean CC3000connect(const char* wlan_ssid, const char* wlan_pass, uint8_t wlan_security) { Watchdog.reset(); // Check for compatible firmware if (checkFirmwareVersion() < 0x113) halt("Wrong firmware version!"); // Delete any old connection data on the module Serial.println(F("\nDeleting old connection profiles")); if (!cc3000.deleteProfiles()) halt("Failed!"); #ifdef STATICIP Serial.println(F("Setting static IP")); uint32_t ipAddress = cc3000.IP2U32(10, 0, 1, 19); uint32_t netMask = cc3000.IP2U32(255, 255, 255, 0); uint32_t defaultGateway = cc3000.IP2U32(10, 0, 1, 1); uint32_t dns = cc3000.IP2U32(8, 8, 4, 4); if (!cc3000.setStaticIPAddress(ipAddress, netMask, defaultGateway, dns)) { Serial.println(F("Failed to set static IP!")); while(1); } #endif // Attempt to connect to an access point Serial.print(F("\nAttempting to connect to ")); Serial.print(wlan_ssid); Serial.print(F("...")); Watchdog.disable(); // try 3 times if (!cc3000.connectToAP(wlan_ssid, wlan_pass, wlan_security, 3)) { return false; } Watchdog.enable(8000); Serial.println(F("\nConnected!")); uint8_t retries; #ifndef STATICIP /* Wait for DHCP to complete */ Serial.println(F("Requesting DHCP")); retries = 10; while (!cc3000.checkDHCP()) { Watchdog.reset(); delay(1000); retries--; if (!retries) return false; } #endif /* Display the IP address DNS, Gateway, etc. */ retries = 10; while (! displayConnectionDetails()) { Watchdog.reset(); delay(1000); retries--; if (!retries) return false; } Watchdog.reset(); return true; }
void _usagefault(void) { printf("usagefault\n"); halt(); }
qemuVm_qprocess::~qemuVm_qprocess(){ halt(); }
void _pendsv(void) { printf("pendsv\n"); halt(); }
/* ChapelDistribution.chpl:127 */ static BaseDist dsiMyDist(BaseDom this8, int64_t _ln, c_string _fn) { BaseDist ret = NULL; halt("internal error: dsiMyDist is not implemented", _ln, _fn); ret = nil; return ret; }