BOOST_FORCEINLINE void await_next(std::size_t depth, Iter iter, boost::mpl::true_, boost::mpl::false_) { typedef typename boost::fusion::result_of::next<Iter>::type next_type; typedef typename util::decay_unwrap< typename boost::fusion::result_of::deref<Iter>::type >::type future_type; future_type & f_ = boost::fusion::deref(iter); typedef typename traits::future_traits< future_type >::type future_result_type; boost::intrusive_ptr< lcos::detail::future_data<future_result_type> > next_future_data = lcos::detail::get_shared_state(f_); if(!next_future_data->is_ready()) { next_future_data->execute_deferred(); // execute_deferred might have made the future ready if (!next_future_data->is_ready()) { void (dataflow_frame::*f)( std::size_t, Iter, boost::mpl::true_, boost::mpl::false_ ) = &dataflow_frame::await_next_respawn; boost::intrusive_ptr<dataflow_frame> this_(this); next_future_data->set_on_completed( hpx::util::bind( f , std::move(this_) , ++depth , std::move(iter) , boost::mpl::true_() , boost::mpl::false_() ) ); return; } } typedef boost::mpl::bool_< boost::is_same<next_type, end_type>::value > is_at_end; // re-spawn on a new thread to avoid stack overflows if (depth >= HPX_CONTINUATION_MAX_RECURSION_DEPTH) { respawn_await(boost::fusion::next(iter), is_at_end()); return; } await(++depth, boost::fusion::next(iter), is_at_end()); }
void main(void) { uint32 nrftx = 0, nrxuf = 0; int i = 0; static __xdata Rcall c; uint8 state = Idle; uint8 waitflag = 0, clearflag = 0; uint8 laststate, lastflag, lastMARCSTATE; laststate = state; lastflag = flag; lastMARCSTATE = MARCSTATE; memset(&curcall, 0, sizeof curcall); SLEEP &= ~SLEEP_OSC_PD; await(SLEEP & SLEEP_XOSC_S); CLKCON = (CLKCON & ~(CLKCON_CLKSPD | CLKCON_OSC)) | CLKSPD_DIV_1; await(!(CLKCON & CLKCON_OSC)); SLEEP |= SLEEP_OSC_PD; await(SLEEP & SLEEP_XOSC_S); P1DIR |= LEDBITS; printinit(); srvinit(); rfinit(); GREEN = RED = 0; // Enables interrupts. (Go go go) EA = 1; wdinit(); dprint("pingrf started.\n"); srvrx(); for(;;){ wdreset(); if(flag&Fpanic){ GREEN = 0; RED = 0; for(;;){ RED ^= 1; sleep(1000); /* TODO: reset */ } } if((flag&clearflag) != 0) flag &= ~(flag&clearflag); switch(state){ case Idle: if(peekcall()->type == Nop) break; nextcall(&c); waitflag = clearflag = 0; call(&c, &state, &waitflag, &clearflag); break; case Reply: reply(&c); state = Replying; break; case Replying: if(flag&Ftxcall){ flag &= ~Ftxcall; state = Idle; } // GREEN=1; break; default: if((flag&waitflag) != 0){ waitflag = flag&waitflag; flag &= ~waitflag; call(&c, &state, &waitflag, &clearflag); } /* if((flag&waitflag)==waitflag){ flag &= ~waitflag; call(&c, &state, &waitflag, &clearflag); } */ break; } #ifdef DEBUG if((i++%100000 == 0) || (flag != lastflag || state != laststate | lastMARCSTATE != MARCSTATE)){ //__critical { wdreset(); switch(state){ case Idle: dprint("idle"); break; case Replying: case Reply: dprint("replying %d", c.type); break; default: dprint("handling(%d) %d", state, c.type);; break; } lastflag = flag; laststate = state; lastMARCSTATE = MARCSTATE; dprint(" flag="); printflag(flag); dprint(" waitflag="); printflag(waitflag); dprint(" radio=%s\n", strmarcstate(lastMARCSTATE)); // dprint(" flag=%F waitflag=%F radio=%S\n", flag, waitflag, lastMARCSTATE); // // putchar('.'); // putchar('\n'); //} } #endif } }
void await_range(std::size_t depth, TupleIter iter, Iter next, Iter end) { void (dataflow_frame::*f)( std::size_t, TupleIter, Iter, Iter ) = &dataflow_frame::await_range_respawn; for (/**/; next != end; ++next) { typedef typename std::iterator_traits< Iter >::value_type future_type; typedef typename traits::future_traits< future_type >::type future_result_type; boost::intrusive_ptr< lcos::detail::future_data<future_result_type> > next_future_data = lcos::detail::get_shared_state(*next); if (!next_future_data->is_ready()) { next_future_data->execute_deferred(); // execute_deferred might have made the future ready if (!next_future_data->is_ready()) { boost::intrusive_ptr<dataflow_frame> this_(this); next_future_data->set_on_completed( boost::bind( f , std::move(this_) , ++depth , std::move(iter) , std::move(next) , std::move(end) ) ); return; } } } typedef typename boost::fusion::result_of::next<TupleIter>::type next_type; typedef boost::mpl::bool_< boost::is_same<next_type, end_type>::value > is_at_end; // re-spawn on a new thread to avoid stack overflows if (depth >= HPX_CONTINUATION_MAX_RECURSION_DEPTH) { respawn_await(boost::fusion::next(iter), is_at_end()); return; } await(++depth, boost::fusion::next(iter), is_at_end()); }
Future<Nothing> CurlFetcherPlugin::fetch( const URI& uri, const string& directory) { // TODO(jieyu): Validate the given URI. if (!uri.has_path()) { return Failure("URI path is not specified"); } Try<Nothing> mkdir = os::mkdir(directory); if (mkdir.isError()) { return Failure( "Failed to create directory '" + directory + "': " + mkdir.error()); } // TODO(jieyu): Allow user to specify the name of the output file. const string output = path::join(directory, Path(uri.path()).basename()); const vector<string> argv = { "curl", "-s", // Don’t show progress meter or error messages. "-S", // Makes curl show an error message if it fails. "-L", // Follow HTTP 3xx redirects. "-w", "%{http_code}", // Display HTTP response code on stdout. "-o", output, // Write output to the file. strings::trim(stringify(uri)) }; Try<Subprocess> s = subprocess( "curl", argv, Subprocess::PATH("/dev/null"), Subprocess::PIPE(), Subprocess::PIPE()); if (s.isError()) { return Failure("Failed to exec the curl subprocess: " + s.error()); } return await( s.get().status(), io::read(s.get().out().get()), io::read(s.get().err().get())) .then([](const tuple< Future<Option<int>>, Future<string>, Future<string>>& t) -> Future<Nothing> { Future<Option<int>> status = std::get<0>(t); if (!status.isReady()) { return Failure( "Failed to get the exit status of the curl subprocess: " + (status.isFailed() ? status.failure() : "discarded")); } if (status->isNone()) { return Failure("Failed to reap the curl subprocess"); } if (status->get() != 0) { Future<string> error = std::get<2>(t); if (!error.isReady()) { return Failure( "Failed to perform 'curl'. Reading stderr failed: " + (error.isFailed() ? error.failure() : "discarded")); } return Failure("Failed to perform 'curl': " + error.get()); } Future<string> output = std::get<1>(t); if (!output.isReady()) { return Failure( "Failed to read stdout from 'curl': " + (output.isFailed() ? output.failure() : "discarded")); } // Parse the output and get the HTTP response code. Try<int> code = numify<int>(output.get()); if (code.isError()) { return Failure("Unexpected output from 'curl': " + output.get()); } if (code.get() != http::Status::OK) { return Failure( "Unexpected HTTP response code: " + http::Status::string(code.get())); } return Nothing(); }); }
int execute(TREPTR argt, int execflg, int *pf1, int *pf2) { /* `stakbot' is preserved by this routine */ register TREPTR t; STKPTR sav = savstak(); sigchk(); if ((t = argt) && execbrk == 0) { register int treeflgs; int oldexit, type; register char **com; treeflgs = t->tretyp; type = treeflgs & COMMSK; oldexit = exitval; exitval = 0; switch (type) { case TCOM: { STRING a1; int argn, internal; ARGPTR schain = gchain; IOPTR io = t->treio; gchain = 0; argn = getarg((void *)t);/*FIXME*/ com = scan(argn); a1 = com[1]; gchain = schain; if ((internal = syslook(com[0], commands)) || argn == 0) setlist(((COMPTR) t)->comset, 0); if (argn && (flags & noexec) == 0) { /* print command if execpr */ if (flags & execpr) { argn = 0; prs(execpmsg); while (com[argn] != ENDARGS) { prs(com[argn++]); blank(); } newline(); } switch (internal) { case SYSDOT: if (a1) { register int f; if ((f = pathopen(getpath(a1), a1)) < 0) failed(a1, notfound); else execexp(0, f); } break; case SYSTIMES: { struct tms t; times(&t); prt(t.tms_cutime); blank(); prt(t.tms_cstime); newline(); } break; case SYSEXIT: exitsh(a1 ? stoi(a1) : oldexit); case SYSNULL: io = 0; break; case SYSCONT: execbrk = -loopcnt; break; case SYSBREAK: if ((execbrk = loopcnt) && a1) breakcnt = stoi(a1); break; case SYSTRAP: if (a1) { BOOL clear; if ((clear = digit(*a1)) == 0) ++com; while (*++com) { int i; if ((i = stoi(*com)) >= MAXTRAP || i < MINTRAP) failed(*com, badtrap); else if (clear) clrsig(i); else { replace(&trapcom[i], a1); if (*a1) getsig(i); else ignsig(i); } } } else { /* print out current traps */ int i; for (i = 0; i < MAXTRAP; i++) { if (trapcom[i]) { prn(i); prs(colon); prs(trapcom[i]); newline(); } } } break; case SYSEXEC: com++; initio(io); ioset = 0; io = 0; if (a1 == 0) break; case SYSLOGIN: flags |= forked; oldsigs(); execa((const char **)com); done(); case SYSCD: if (flags & rshflg) failed(com[0], restricted); else if ((a1 == 0 && (a1 = (char *)homenod.namval) == 0) || chdir(a1) < 0) /* FIXME */ failed(a1, baddir); break; case SYSSHFT: if (dolc < 1) error(badshift); else { dolv++; dolc--; } assnum(&dolladr, dolc); break; case SYSWAIT: await(-1); break; case SYSREAD: exitval = readvar(&com[1]); break; /* case SYSTST: exitval=testcmd(com); break; */ case SYSSET: if (a1) { int argc; argc = options(argn, (const char **)com); if (argc > 1) setargs((const char **)com + argn - argc); } else if (((COMPTR) t)->comset == 0) /* Scan name chain and print */ namscan(printnam); break; case SYSRDONLY: exitval = N_RDONLY; case SYSXPORT: if (exitval == 0) exitval = N_EXPORT;; if (a1) { while (*++com) attrib(lookup(*com), exitval); } else { namscan(printflg); } exitval = 0; break; case SYSEVAL: if (a1) execexp(a1, (UFD)&com[2]); /* FIXME */ break; case SYSUMASK: if (a1) { int c, i; i = 0; while ((c = *a1++) >= '0' && c <= '7') i = (i << 3) + c - '0'; umask(i); } else { int i, j; umask(i = umask(0)); prc('0'); for (j = 6; j >= 0; j -= 3) prc(((i >> j) & 07) + '0'); newline(); } break; default: internal = builtin(argn, com); } if (internal) { if (io) error(illegal); chktrap(); break; } } else if (t->treio == 0) break; } case TFORK: if (execflg && (treeflgs & (FAMP | FPOU)) == 0) parent = 0; else { while ((parent = fork()) == -1) { sigchk(); alarm(10); pause(); } } if (parent) { /* This is the parent branch of fork; */ /* it may or may not wait for the child. */ if (treeflgs & FPRS && flags & ttyflg) { prn(parent); newline(); } if (treeflgs & FPCL) closepipe(pf1); if ((treeflgs & (FAMP | FPOU)) == 0) await(parent); else if ((treeflgs & FAMP) == 0) post(parent); else assnum(&pcsadr, parent); chktrap(); break; } else { /* this is the forked branch (child) of execute */ flags |= forked; iotemp = 0; postclr(); settmp(); /* Turn off INTR and QUIT if `FINT' */ /* Reset ramaining signals to parent */ /* except for those `lost' by trap */ oldsigs(); if (treeflgs & FINT) { signal(INTR, SIG_IGN); signal(QUIT, SIG_IGN); } /* pipe in or out */ if (treeflgs & FPIN) { sh_rename(pf1[INPIPE], 0); close(pf1[OTPIPE]); } if (treeflgs & FPOU) { sh_rename(pf2[OTPIPE], 1); close(pf2[INPIPE]); } /* default std input for & */ if (treeflgs & FINT && ioset == 0) sh_rename(chkopen(devnull), 0); /* io redirection */ initio(t->treio); if (type != TCOM) execute(((FORKPTR) t)->forktre, 1, NULL, NULL); else if (com[0] != ENDARGS) { setlist(((COMPTR) t)->comset, N_EXPORT); execa((const char **)com); } done(); } case TPAR: sh_rename(dup(2), output); execute(((PARPTR) t)->partre, execflg, NULL, NULL); done(); case TFIL: { int pv[2]; chkpipe(pv); if (execute(((LSTPTR) t)->lstlef, 0, pf1, pv) == 0) execute(((LSTPTR) t)->lstrit, execflg, pv, pf2); else closepipe(pv); break; } case TLST: execute(((LSTPTR) t)->lstlef, 0, NULL, NULL); execute(((LSTPTR) t)->lstrit, execflg, NULL, NULL); break; case TAND: if (execute(((LSTPTR) t)->lstlef, 0, NULL, NULL) == 0) execute(((LSTPTR) t)->lstrit, execflg, NULL, NULL); break; case TORF: if (execute(((LSTPTR) t)->lstlef, 0, NULL, NULL) != 0) execute(((LSTPTR) t)->lstrit, execflg, NULL, NULL); break; case TFOR: { NAMPTR n = lookup(((FORPTR) t)->fornam); char **args; DOLPTR argsav = 0; if (((FORPTR) t)->forlst == 0) { args = (char **)dolv + 1; argsav = useargs(); } else { ARGPTR schain = gchain; gchain = 0; trim((args = scan(getarg(((FORPTR) t)->forlst)))[0]); gchain = schain; } loopcnt++; while (*args != ENDARGS && execbrk == 0) { assign(n, *args++); execute(((FORPTR) t)->fortre, 0, NULL, NULL); if (execbrk < 0) { execbrk = 0; } } if (breakcnt) breakcnt--; execbrk = breakcnt; loopcnt--; argfor = freeargs(argsav); break; } case TWH: case TUN: { int i = 0; loopcnt++; while (execbrk == 0 && (execute(((WHPTR) t)->whtre, 0, NULL, NULL) == 0) == (type == TWH)) { i = execute(((WHPTR) t)->dotre, 0, NULL, NULL); if (execbrk < 0) execbrk = 0; } if (breakcnt) breakcnt--; execbrk = breakcnt; loopcnt--; exitval = i; break; } case TIF: if (execute(((IFPTR) t)->iftre, 0, NULL, NULL) == 0) execute(((IFPTR) t)->thtre, execflg, NULL, NULL); else execute(((IFPTR) t)->eltre, execflg, NULL, NULL); break; case TSW: { register char *r = mactrim(((SWPTR) t)->swarg); t = (TREPTR) ((SWPTR) t)->swlst; while (t) { ARGPTR rex = ((REGPTR) t)->regptr; while (rex) { register char *s; if (gmatch(r, s = macro(rex->argval)) || (trim(s), eq(r, s))) { execute(((REGPTR)t)->regcom, 0, NULL, NULL); t = 0; break; } else rex = ((ARGPTR)rex)->argnxt; } if (t) t = (TREPTR) ((REGPTR) t)->regnxt; } } break; } exitset(); }
/*ARGSUSED*/ scsi_ret_t scdisk_open( target_info_t *tgt, io_req_t req) { register int i; register scsi_ret_t ret = SCSI_RET_SUCCESS; unsigned int disk_size, secs_per_cyl, sector_size; unsigned int nsectors, ntracks, ncylinders; scsi_rcap_data_t *cap; struct disklabel *label = 0; io_req_t ior; void (*readfun)( target_info_t *tgt, unsigned int secno, io_req_t ior) = scdisk_read; char *data, *rdata = 0; boolean_t look_for_label; if (tgt->flags & TGT_ONLINE) return SCSI_RET_SUCCESS; tgt->lun = rzlun(req->io_unit); /* * Dummy ior for proper sync purposes */ io_req_alloc(ior); bzero((char *)ior, sizeof(*ior)); simple_lock_init(&(ior)->io_req_lock, ETAP_IO_REQ); #if 0 /* * Set the LBN to tgt->block_size with a MODE SELECT. * xxx do a MODE SENSE instead ? */ /* * Ugh. Can't use tgt->block_size here -- not set up * yet. Also can't use DEV_BSIZE. So...since MK6 * dispenses with this command entirely, let's do * so as well. */ for (i = 0; i < 5; i++) { ior->io_op = IO_INTERNAL; ior->io_error = 0; ret = scdisk_mode_select(tgt, tgt->block_size, ior,0,0,FALSE); if (ret == SCSI_RET_SUCCESS) break; if (ret == SCSI_RET_RETRY) { timeout((timeout_fcn_t)wakeup, tgt, 2*hz); await(tgt); } if (ret == SCSI_RET_DEVICE_DOWN) goto done; } if (ret != SCSI_RET_SUCCESS) { scsi_error( tgt, SCSI_ERR_MSEL, ret, 0); ret = D_INVALID_SIZE; goto done; } #endif #ifdef hp_pa if (tgt->flags & TGT_REMOVABLE_MEDIA) { scsi_mode_sense_page5_t *page5; unsigned char mode_save[0xff]; scsi_mode_select_param_t *parm; int length; if((ret = scsi_mode_sense(tgt, 0x05, 0xff, 0)) != SCSI_RET_SUCCESS) goto done; length = *tgt->cmd_ptr + 1; bcopy(tgt->cmd_ptr, mode_save, length); page5 = (scsi_mode_sense_page5_t *)(mode_save + 4 + mode_save[3]); #if 1 /* force sector size to 512 */ parm = (scsi_mode_select_param_t *)mode_save; parm->reserved1 = 0; parm->medium_type = 2; parm->device_spec &= ~0x90; parm->descs[0].density_code = 2; parm->descs[0].nblocks1 = 0; parm->descs[0].nblocks2 = 0; parm->descs[0].nblocks3 = 0; parm->descs[0].reclen1 = 0x2; parm->descs[0].reclen2 = 0; parm->descs[0].reclen3 = 0; page5->ps = 0; page5->page_code &= ~0x80; page5->sectors_per_track = page5->sectors_per_track * (page5->bytes_per_sector_msb << 8 | page5->bytes_per_sector_lsb) / 512; page5->bytes_per_sector_msb = 2; page5->bytes_per_sector_lsb = 0; length -= parm->desc_len; parm->desc_len = 0; bcopy((const char*)page5, mode_save+4, sizeof(*page5)); if((ret = scdisk_mode_select(tgt, 0, 0, mode_save, length, 0)) != SCSI_RET_SUCCESS) goto done; if((ret = scsi_mode_sense(tgt, 0x05, 0xff, 0)) != SCSI_RET_SUCCESS) goto done; length = *tgt->cmd_ptr + 1; bcopy(tgt->cmd_ptr, mode_save, length); #endif ntracks = page5->number_of_heads; nsectors = page5->sectors_per_track; sector_size = page5->bytes_per_sector_msb << 8 | page5->bytes_per_sector_lsb; ncylinders = page5->number_of_cylinders_msb << 8 | page5->number_of_cylinders_lsb; secs_per_cyl = nsectors * ntracks; look_for_label = FALSE; geom_done = TRUE; } #endif /* * Do a READ CAPACITY to get max size. Check LBN too. */ for (i = 0; i < 5; i++) { ior->io_op = IO_INTERNAL; ior->io_error = 0; ret = scsi_read_capacity(tgt, 0, ior); if (ret == SCSI_RET_SUCCESS) break; if (ret == SCSI_RET_RETRY) { timeout((timeout_fcn_t)wakeup, tgt, 2*hz); await(tgt); } if (ret == SCSI_RET_DEVICE_DOWN) goto done; } if (ret != SCSI_RET_SUCCESS) { scsi_error( tgt, SCSI_ERR_MSEL, ret, 0); ret = D_INVALID_SIZE; goto done; } cap = (scsi_rcap_data_t*) tgt->cmd_ptr; disk_size = (cap->lba1<<24) | (cap->lba2<<16) | (cap->lba3<< 8) | cap->lba4 + 1; if (scsi_debug) printf("rz%d holds %d blocks\n", tgt->unit_no, disk_size); tgt->block_size = (cap->blen1<<24) | (cap->blen2<<16) | (cap->blen3<<8 ) | cap->blen4; if (scsi_debug) { printf("rz%d block size is %d bytes/block\n", tgt->unit_no, tgt->block_size); } if (tgt->block_size > RZDISK_MAX_SECTOR || tgt->block_size <= 0) { ret = D_INVALID_SIZE; goto done; } rdata = (char *) kalloc(2*tgt->block_size); if (round_page(rdata) == round_page(rdata + tgt->block_size)) data = rdata; else data = (char *)round_page(rdata); #ifdef POWERMAC /* XXX TODO NMGS remove! must be cache aligned for now */ if ((unsigned long)data & 0x1f) data = (char*)((unsigned long)(data + 0x1f) & ~0x1f); if (round_page(data) != round_page(data + tgt->block_size)) data = (char *)round_page(data); #endif /* POWERMAC */ if (disk_size > SCSI_CMD_READ_MAX_LBA) tgt->flags |= TGT_BIG; /* * Mandatory long-form commands ? */ if (BGET(scsi_use_long_form,(unsigned char)tgt->masterno,tgt->target_id)) tgt->flags |= TGT_BIG; if (tgt->flags & TGT_BIG) readfun = scsi_long_read; ior->io_op = IO_INTERNAL; ior->io_error = 0; #ifdef hp_pa if(geom_done) goto setup_label; #endif /* * Find out about the phys disk geometry */ #if PARAGON860 /* * The NCR RAID controller does not support a read capacity command * with the Partial Medium Indicator (PMI) bit set. Therefore we * have to calculate the number of sectors per cylinder from data * in the mode select pages. This method should work for standalone * disks as well. */ /* * Read page 3 to find the number of sectors/track and bytes/sector */ ret = scsi_mode_sense(tgt, 0x03, 0xff, ior); /* scsi_error(...) */ { scsi_mode_sense_page3_t *page3; page3 = (scsi_mode_sense_page3_t *) (((scsi_mode_sense_data_t *)tgt->cmd_ptr) + 1); nsectors = (page3->sectors_per_track_msb << 8) | page3->sectors_per_track_lsb; sector_size = (page3->bytes_per_sector_msb << 8) | page3->bytes_per_sector_lsb; } ior->io_op = IO_INTERNAL; ior->io_error = 0; /* * Read page 4 to find the number of cylinders and tracks/cylinder */ ret = scsi_mode_sense(tgt, 0x04, 0xff, ior); /* scsi_error(...) */ { scsi_mode_sense_page4_t *page4; page4 = (scsi_mode_sense_page4_t *) (((scsi_mode_sense_data_t *)tgt->cmd_ptr) + 1); ncylinders = (page4->number_of_cylinders_msb << 16) | (page4->number_of_cylinders << 8) | page4->number_of_cylinders_lsb; ntracks = page4->number_of_heads; } /* * Calculate the sectors per cylinder (sec/track * tracks/cyl) */ secs_per_cyl = nsectors * ntracks; if (scsi_debug) { printf("rz%d: %d bytes/sec %d sec/track\n", tgt->unit_no, sector_size, nsectors); printf(" %d tracks/cyl %d cyl/unit\n", ntracks, ncylinders); } #else /* PARAGON860 */ /* Read page one to get read / write error recovery info */ ret = scsi_mode_sense(tgt, 0x01, 0xff, ior); if(ret == SCSI_RET_SUCCESS) { scsi_mode_sense_page1_t *page1; unsigned char mode_save[0xff]; int length; length = *tgt->cmd_ptr + 1; bcopy(tgt->cmd_ptr, mode_save, length); page1 = (scsi_mode_sense_page1_t *)(mode_save + 4 + mode_save[3]); *mode_save = 0; /* mode data length */ page1->ps = 0; page1->flags = PAGE1_AWRE | PAGE1_ARRE | PAGE1_TB | PAGE1_PER; /* * Enable automatic reallocation of bad blocks, * Report any recovered errors. */ ior->io_op = IO_INTERNAL; ior->io_error = 0; ret = scdisk_mode_select(tgt, 0, ior, mode_save, length, 0); if(ret != SCSI_RET_SUCCESS) { if (scsi_debug) printf("rz%d: Can't change error recovery parameters\n", tgt->unit_no); } } ior->io_op = IO_INTERNAL; ior->io_error = 0; #ifdef POWERMAC tgt->flags |= TGT_OPTIONAL_CMD; #endif ret = scsi_read_capacity( tgt, 1, ior); #ifdef POWERMAC tgt->flags &= ~TGT_OPTIONAL_CMD; #endif /* scsi_error(...) */ if (ret) { secs_per_cyl = 16; sector_size = tgt->block_size; } else { cap = (scsi_rcap_data_t*) tgt->cmd_ptr; secs_per_cyl = (cap->lba1<<24) | (cap->lba2<<16) | (cap->lba3<< 8) | cap->lba4; secs_per_cyl += 1; sector_size = (cap->blen1<<24) | (cap->blen2<<16) | (cap->blen3<<8 ) | cap->blen4; } if (scsi_debug) printf("rz%d: %d sect/cyl %d bytes/sec\n", tgt->unit_no, secs_per_cyl, sector_size); #endif /* PARAGON860 */ #if NSCSI2 > 0 /* * ... and a bunch of other things for scsi2 */ #endif /* NSCSI2 > 0 */ /* * Get partition table off pack */ if (tgt->dev_ops == &scsi_devsw[SCSI_CDROM]) { /* no label on a CD-ROM */ look_for_label = FALSE; } else { look_for_label = TRUE; } setup_label: if (look_for_label) { /* first look for a BSD label */ ior->io_data = data; ior->io_count = tgt->block_size; ior->io_op = IO_READ; ior->io_error = 0; tgt->ior = ior; (*readfun)( tgt, LABELOFFSET/tgt->block_size, ior); iowait(ior); if (!ior->io_error) { /* Search for BSD label, might be a bit further along */ register int j; for (i = LABELOFFSET % tgt->block_size; i < (tgt->block_size-sizeof(struct disklabel)); i += sizeof(int)) { label = (struct disklabel *) &data[i]; if (label->d_magic == DISKMAGIC && label->d_magic2 == DISKMAGIC) { break; } else label = (struct disklabel *) 0; } } } else { label = (struct disklabel *) 0; } if (label) { if (scsi_debug) printf("{Using BSD label}"); tgt->dev_info.disk.l = *label; } else { /* then look for a vendor's label, but first fill in defaults and what we found */ label = &tgt->dev_info.disk.l; *label = scsi_default_label; label->d_secsize = sector_size; label->d_nsectors = nsectors; label->d_ntracks = ntracks; label->d_ncylinders = ncylinders; label->d_secpercyl = secs_per_cyl; label->d_secperunit = disk_size; ior->io_data = data; if (!look_for_label || !rz_vendor_label(tgt, label, ior)) { if (look_for_label) { printf("%s rz%d, %s\n", "WARNING: No valid disk label on", tgt->unit_no, "using defaults"); } /* Validate partitions a and c for initialization */ tgt->dev_info.disk.l.d_partitions[0].p_offset = 0; tgt->dev_info.disk.l.d_partitions[0].p_size = disk_size; tgt->dev_info.disk.l.d_partitions[2].p_offset = 0; tgt->dev_info.disk.l.d_partitions[2].p_size = disk_size; tgt->dev_info.disk.l.d_partitions[MAXPARTITIONS].p_offset = 0; tgt->dev_info.disk.l.d_partitions[MAXPARTITIONS].p_size = -1; } label->d_checksum = 0; label->d_checksum = dkcksum(label); } ret = SCSI_RET_SUCCESS; done: if (rdata) kfree((vm_offset_t) rdata, 2 * tgt->block_size); io_req_free(ior); return ret; }
static void *Worker( void *arg ) { TYPE id = (size_t)arg; uint64_t entry; int other = inv( id ); // int is better than TYPE #ifdef FAST unsigned int cnt = 0, oid = id; #endif // FAST for ( int r = 0; r < RUNS; r += 1 ) { entry = 0; while ( stop == 0 ) { for ( ;; ) { #ifdef FLICKER for ( int i = 0; i < 100; i += 1 ) intents[id] = i % 2; // flicker #endif // FLICKER intents[id] = WantIn; // declare intent // Necessary to prevent the read of intents[other] from floating above the assignment // intents[id] = WantIn, when the hardware determines the two subscripts are different. Fence(); // force store before more loads if ( FASTPATH( intents[other] == DontWantIn ) ) break; if ( last == id ) { #ifdef FLICKER for ( int i = 0; i < 100; i += 1 ) intents[id] = i % 2; // flicker #endif // FLICKER intents[id] = DontWantIn; // Optional fence to prevent LD of "last" from being lifted above store of // intends[id]=DontWantIn. Because a thread only writes its own id into "last", // and because of eventual consistency (writes eventually become visible), // the fence is conservative. //Fence(); // force store before more loads await( last != id ); // low priority busy wait } // if } // for CriticalSection( id ); #ifdef FLICKER for ( int i = id; i < 100; i += 1 ) last = i % 2; // flicker #endif // FLICKER last = id; // exit protocol #ifdef FLICKER for ( int i = 0; i < 100; i += 1 ) intents[id] = i % 2; // flicker #endif // FLICKER intents[id] = DontWantIn; #ifdef FAST id = startpoint( cnt ); // different starting point each experiment other = inv( id ); cnt = cycleUp( cnt, NoStartPoints ); #endif // FAST entry += 1; } // while #ifdef FAST id = oid; other = inv( id ); #endif // FAST entries[r][id] = entry; __sync_fetch_and_add( &Arrived, 1 ); while ( stop != 0 ) Pause(); __sync_fetch_and_add( &Arrived, -1 ); } // for return NULL; } // Worker
int oscmdwait(void*, char *buf, int n) { return await(buf, n); }
static inline void blink(void) { GPIO_SET(1 << PIN); await(1000000); GPIO_CLR(1 << PIN); }
/// Same as await() pubnub_res get() { return await(); }
/// Just wait for the transaction to end, don't get the /// outcome void wait() /*const*/ { await(); }
static int sys(char *str) { char *s, *t; char *argv[100], path[100]; char *inname, *outname; int append = 0; int wait_pid; int argc; if(debugflag) fprintf(diagfile, "%s\n", str); inname = NULL; outname = NULL; argv[0] = shellname; argc = 1; t = str; while( isspace((int)*t) ) ++t; while(*t) { if(*t == '<') inname = t+1; else if(*t == '>') { if(t[1] == '>') { append = YES; outname = t+2; } else { append = NO; outname = t+1; } } else argv[argc++] = t; while( !isspace((int)*t) && *t!='\0' ) ++t; if(*t) { *t++ = '\0'; while( isspace((int)*t) ) ++t; } } if(argc == 1) /* no command */ return(-1); argv[argc] = 0; s = path; t = "/usr/bin/"; while(*t) *s++ = *t++; for(t = argv[1] ; (*s++ = *t++) ; ) ; if((wait_pid = fork()) == 0) { if(inname) freopen(inname, "r", stdin); if(outname) freopen(outname, (append ? "a" : "w"), stdout); enbint(SIG_DFL); texec(path+9, argv); /* command */ texec(path+4, argv); /* /bin/command */ texec(path , argv); /* /usr/bin/command */ fatal1("Cannot load %s",path+9); } return( await(wait_pid) ); }
int main() { char const *msg; enum pubnub_res res; struct UserData user_data; struct UserData user_data_2; char const *chan = "hello_world"; pubnub_t *pbp = pubnub_alloc(); pubnub_t *pbp_2 = pubnub_alloc(); if (NULL == pbp) { printf("Failed to allocate Pubnub context!\n"); return -1; } if (NULL == pbp_2) { printf("Failed to allocate Pubnub context!\n"); return -1; } InitUserData(&user_data, pbp); InitUserData(&user_data_2, pbp_2); pubnub_init(pbp, "demo", "demo"); pubnub_register_callback(pbp, sample_callback, &user_data); pubnub_init(pbp_2, "demo", "demo"); pubnub_register_callback(pbp_2, sample_callback, &user_data_2); puts("-----------------------"); puts("Subscribing..."); puts("-----------------------"); /* First subscribe, to get the time token */ res = pubnub_subscribe(pbp, chan, NULL); if (res != PNR_STARTED) { printf("pubnub_subscribe() returned unexpected: %d\n", res); pubnub_free(pbp); pubnub_free(pbp_2); return -1; } puts("Await subscribe"); res = await(&user_data); if (res == PNR_STARTED) { printf("await() returned unexpected: PNR_STARTED(%d)\n", res); pubnub_free(pbp); pubnub_free(pbp_2); return -1; } if (PNR_OK == res) { puts("Subscribed!"); } else { printf("Subscribing failed with code: %d\n", res); } /* The "real" subscribe, with the just acquired time token */ res = pubnub_subscribe(pbp, chan, NULL); if (res != PNR_STARTED) { printf("pubnub_subscribe() returned unexpected: %d\n", res); pubnub_free(pbp); pubnub_free(pbp_2); return -1; } /* Don't do "full" await here, because we didn't publish anything yet! */ start_await(&user_data); puts("-----------------------"); puts("Publishing..."); puts("-----------------------"); /* Since the subscribe is ongoing in the `pbp` context, we can't publish on it, so we use a different context to publish */ res = pubnub_publish(pbp_2, chan, "\"Hello world from subscribe-publish callback sample!\""); if (res != PNR_STARTED) { printf("pubnub_publish() returned unexpected: %d\n", res); pubnub_free(pbp); pubnub_free(pbp_2); return -1; } puts("Await publish"); res = await(&user_data_2); if (res == PNR_STARTED) { printf("await() returned unexpected: PNR_STARTED(%d)\n", res); pubnub_free(pbp); pubnub_free(pbp_2); return -1; } if (PNR_OK == res) { printf("Published! Response from Pubnub: %s\n", pubnub_last_publish_result(pbp_2)); } else if (PNR_PUBLISH_FAILED == res) { printf("Published failed on Pubnub, description: %s\n", pubnub_last_publish_result(pbp_2)); } else { printf("Publishing failed with code: %d\n", res); } /* Don't need `pbp_2` no more */ if (pubnub_free(pbp_2) != 0) { printf("Failed to free the Pubnub context `pbp_2`\n"); } /* Now we await the subscribe on `pbp` */ puts("Await subscribe"); res = end_await(&user_data); if (res == PNR_STARTED) { printf("await() returned unexpected: PNR_STARTED(%d)\n", res); pubnub_free(pbp); return -1; } if (PNR_OK == res) { puts("Subscribed! Got messages:"); for (;;) { msg = pubnub_get(pbp); if (NULL == msg) { break; } puts(msg); } } else { printf("Subscribing failed with code: %d\n", res); } /* We're done */ if (pubnub_free(pbp) != 0) { printf("Failed to free the Pubnub context `pbp`\n"); } puts("Pubnub subscribe-publish callback demo over."); return 0; }
/* * Start/completion routine for disks */ void scdisk_start( target_info_t *tgt, boolean_t done) { register io_req_t ior = tgt->ior; #ifdef CHECKSUM register unsigned secno; #endif register io_req_t rdone = NULL; register unsigned part, secno; scsi_ret_t ret; if (ior == 0) return; if (tgt->flags & TGT_BBR_ACTIVE) { scdisk_bbr_start(tgt, done); return; } if (done) { unsigned int xferred; unsigned int max_dma_data; max_dma_data = scsi_softc[(unsigned char)tgt->masterno]->max_dma_data; /* see if we must retry */ if ((tgt->done == SCSI_RET_RETRY) && ((ior->io_op & IO_INTERNAL) == 0)) { if(tgt->dev_info.disk.b.retry_count++ == DISK_RETRIES) { tgt->done = SCSI_RET_DEVICE_DOWN; } else { timeout((timeout_fcn_t)wakeup, tgt, hz); await(tgt); goto start; } } /* got a bus reset ? pifff.. */ if ((tgt->done == (SCSI_RET_ABORTED|SCSI_RET_RETRY)) && ((ior->io_op & IO_INTERNAL) == 0)) { if (xferred = ior->io_residual) { /* * No special thing to do for * chained IOs, should work as well */ ior->io_data -= xferred; ior->io_count += xferred; ior->io_recnum -= xferred / tgt->block_size; ior->io_residual = 0; } goto start; } else if ((tgt->cur_cmd == SCSI_CMD_REQUEST_SENSE) && (!rzpassthru(ior->io_unit))) { /* * Quickly check for errors: if anything goes wrong * we do a request sense, see if that is what we did. */ scsi_sense_data_t *sns; unsigned int blockno; char *outcome; ior->io_op = ior->io_temporary; sns = (scsi_sense_data_t *)tgt->cmd_ptr; if (sns->addr_valid) blockno = sns->u.xtended.info0 << 24 | sns->u.xtended.info1 << 16 | sns->u.xtended.info2 << 8 | sns->u.xtended.info3; else { part = rzpartition(ior->io_unit); blockno = tgt->dev_info.disk.l.d_partitions[part].p_offset * (tgt->dev_info.disk.l.d_secsize / tgt->block_size); blockno += ior->io_recnum; } if ((blockno + btodb(ior->io_count + tgt->block_size - 1) >= tgt->dev_info.disk.l.d_secperunit)) { ior->io_error = D_INVALID_RECNUM; ior->io_op |= IO_ERROR; outcome = "Unrecoverable"; } else if (scsi_check_sense_data(tgt, sns)) { ior->io_error = 0; if ((tgt->done == SCSI_RET_RETRY) && ((ior->io_op & IO_INTERNAL) == 0)) { timeout((timeout_fcn_t)wakeup, tgt, hz); await(tgt); goto start; } outcome = "Recovered"; #if CHAINED_IOS } else if (ior->io_op & IO_CHAINED) { /* * Since multiple IOs are chained, split them * and restart prior to error handling */ simple_lock(&tgt->target_lock); split_io_reqs(ior); simple_unlock(&tgt->target_lock); ior->io_residual = 0; goto start; #endif /* CHAINED_IOS */ } else { outcome = "Unrecoverable"; ior->io_error = D_IO_ERROR; ior->io_op |= IO_ERROR; } if ((tgt->flags & TGT_OPTIONAL_CMD) == 0) { printf("%s Error, rz%d: %s%s%d\n", outcome, tgt->target_id + (tgt->masterno * 8), (ior->io_op & IO_READ) ? "Read" : ((ior->io_op & IO_INTERNAL) ? "(command)" : "Write"), " disk error, phys block no. ", blockno); scsi_print_sense_data(sns); printf("\n"); #ifndef POWERMAC /* * On fatal read/write errors try replacing the * bad block. The bbr routine will return TRUE * iff it took control over the target for all * subsequent operations. In this event, the * queue of requests is effectively frozen. */ if (ior->io_error && ((sns->error_class == SCSI_SNS_XTENDED_SENSE_DATA) && ((sns->u.xtended.sense_key == SCSI_SNS_HW_ERR) || (sns->u.xtended.sense_key == SCSI_SNS_MEDIUM_ERR))) && scdisk_bad_block_repl(tgt, blockno)) return; #endif } } else if ((tgt->done != SCSI_RET_SUCCESS) && #ifdef POWERMAC (!rzpassthru(ior->io_unit) && (ior->io_op & IO_INTERNAL) == 0)) { #else !rzpassthru(ior->io_unit)) { #endif #if CHAINED_IOS if (ior->io_op & IO_CHAINED) { /* * Since multiple IOs are chained, split them * and restart prior to error handling */ simple_lock(&tgt->target_lock); split_io_reqs(ior); simple_unlock(&tgt->target_lock); ior->io_residual = 0; goto start; } #endif /* CHAINED_IOS */ /* * See if we had errors */ if (tgt->done == SCSI_RET_NEED_SENSE) { ior->io_temporary = ior->io_op; ior->io_op = IO_INTERNAL; scsi_request_sense(tgt, ior, 0); return; } else if (tgt->done == SCSI_RET_DEVICE_DOWN) { part = rzpartition(ior->io_unit); secno = ior->io_recnum; secno += tgt->dev_info.disk.l.d_partitions[part].p_offset * (tgt->dev_info.disk.l.d_secsize / tgt->block_size); secno += btodb(ior->io_count + tgt->block_size - 1); if (secno >= tgt->dev_info.disk.l.d_secperunit) ior->io_error = D_INVALID_RECNUM; else ior->io_error = D_DEVICE_DOWN; ior->io_op |= IO_ERROR; } else { printf("%s%x\n", "?rz_disk Disk error, ret=x", tgt->done); ior->io_error = D_IO_ERROR; ior->io_op |= IO_ERROR; } } else if (ior->io_count > (xferred = max_dma_data)) { /* * No errors. * See if we requested more than the max * (We use io_residual in a flip-side way here) */ #if CHAINED_IOS if (ior->io_op & IO_CHAINED) { /* Should not happen since we checked max_data & max_segs before */ panic("chained io to large: 1"); } #endif /* CHAINED_IOS */ ior->io_residual += xferred; ior->io_count -= xferred; ior->io_data += xferred; ior->io_recnum += xferred / tgt->block_size; goto start; } else if (xferred = ior->io_residual) { #if CHAINED_IOS if (ior->io_op & IO_CHAINED) { /* Should not happen since we checked max_data & max_segs before */ panic("chained io to large: 2"); } #endif /* CHAINED_IOS */ ior->io_data -= xferred; ior->io_count += xferred; ior->io_recnum -= xferred / tgt->block_size; ior->io_residual = 0; } /* that's it */ #ifdef CHECKSUM if ((ior->io_op & IO_READ) && (ior->io_count < max_checksum_size)) { part = rzpartition(ior->io_unit); secno = ior->io_recnum + tgt->dev_info.disk.l.d_partitions[part].p_offset * (tgt->dev_info.disk.l.d_secsize / tgt->block_size); scdisk_bcheck(secno, ior->io_data, ior->io_count); } #endif /* CHECKSUM */ /* If this is a pass-through device, save the target result */ if (rzpassthru(ior->io_unit)) ior->io_error = tgt->done; /* dequeue next one */ { io_req_t next; simple_lock(&tgt->target_lock); next = ior->io_next; tgt->ior = next; simple_unlock(&tgt->target_lock); if (next == 0) { #if CHAINED_IOS if (ior->io_op & IO_CHAINED) chained_iodone(ior); else #endif /* CHAINED_IOS */ iodone(ior); return; } rdone = ior; ior = next; } #ifdef CHECKSUM if (((ior->io_op & IO_READ) == 0) && (ior->io_count < max_checksum_size)) { part = rzpartition(ior->io_unit); secno = ior->io_recnum + tgt->dev_info.disk.l.d_partitions[part].p_offset * (tgt->dev_info.disk.l.d_secsize / tgt->block_size); scdisk_checksum(secno, ior->io_data, ior->io_count); } #endif /* CHECKSUM */ }
void pxgstrf_mark_busy_descends(int pnum, int jcol, int *etree, pxgstrf_shared_t *pxgstrf_shared, int *bcol, int *lbusy) { /* * -- SuperLU MT routine (version 1.0) -- * Univ. of California Berkeley, Xerox Palo Alto Research Center, * and Lawrence Berkeley National Lab. * August 15, 1997 * * Purpose * ======= * * Mark busy panels in local "lbusy" array, used for linear pipelining. * * When jcol begins, its busy descendant panels (if any) are bcol and * all the e-tree ancestors of bcol between bcol and jcol. This routine * marks those columns in the array lbusy, which is local to this * processor, to preserve a snapshot regardless of what the other * processors are doing meanwhile. * * Arguments * ========= * * jcol (input) int * Current panel, with leading column jcol. * * etree (input) int* * Elimination tree parent pointers. * * bcol (input/output) int* * Farthest busy descendant of jcol. * On entry, it is the first column of the farthest busy panel. * On exit, it may be adjusted to the first column of the * farthest busy supernode. * * lbusy (input/output) int* * Initially all -1, lbusy(r) = jcol means that r was busy * at the beginning of computing jcol. * */ GlobalLU_t *Glu = pxgstrf_shared->Glu; register int w, kcol, fsupc, bcol_reg; int *xsup; bcol_reg = *bcol; if ( bcol_reg < jcol ) { /* ----------------------------------------------------------- Instead of waiting for the completion of "bcol", we can pessimistically assume supno[bcol] == supno[bcol-1], hence always mark as busy the supernode containing "bcol-1". ----------------------------------------------------------- */ if (pxgstrf_shared->pan_status[bcol_reg].type == RELAXED_SNODE) { #if 0 if ( pxgstrf_shared->pan_status[bcol_reg].size < 0 ) fsupc = bcol_reg + pxgstrf_shared->pan_status[bcol_reg].size; else fsupc = bcol_reg; #endif fsupc = bcol_reg; w = pxgstrf_shared->pan_status[fsupc].size; bcol_reg += w; for (kcol = fsupc; kcol < bcol_reg; ++kcol) lbusy[kcol] = jcol; } else { /* Find leading column "fsupc" in the supernode that contains column "bcol-1" */ #if 0 if ( pxgstrf_shared->spin_locks[bcol_reg] ) /* WORSE PERFORMANCE!! */ await( &pxgstrf_shared->spin_locks[bcol_reg] ); #endif xsup = Glu->xsup; fsupc = SUPER_FSUPC ( Glu->supno[bcol_reg-1] ); for (kcol = fsupc; kcol < bcol_reg; ++kcol) lbusy[kcol] = jcol; } #if ( DEBUGlevel>=1 ) if (jcol >= LOCOL && jcol <= HICOL) printf("(%d) mark_busy_descends[1] jcol %d, bcol_reg %d, fsupc %d\n", pnum, jcol, bcol_reg, fsupc); #endif /* Mark as busy all columns on the path between bcol_reg and jcol */ for (kcol = bcol_reg; kcol < jcol; kcol = etree[kcol]) { lbusy[kcol] = jcol; } /* INVARIANT: *bcol must be the first column of the farthest busy supernode */ *bcol = fsupc; } /* if bcol_reg < jcol */ }
void tree_barrier_await(tree_barrier_t *b, int id, bool *threadSense){ int me = id; node_t *myLeaf = b->leaf[me / b->radix]; await(b, myLeaf, threadSense); }
int main(int argc, char **argv) { char *options, *path; int fd, inotify, option, pwd, waitargs; pid_t pid; time_t started; struct sigaction action; progname = argv[0]; /* Redirect stdin from /dev/null. */ if ((fd = open("/dev/null", O_RDWR)) < 0) error(EXIT_FAILURE, errno, "open /dev/null"); if (fd != STDIN_FILENO) if ((dup2(fd, STDIN_FILENO)) < 0) error(EXIT_FAILURE, errno, "dup2"); /* Redirect stdout and/or stderr to /dev/null if closed. */ while (fd <= STDERR_FILENO) if ((fd = dup(fd)) < 0) error(EXIT_FAILURE, errno, "dup"); close(fd); /* Close all file descriptors apart from stdin, stdout and stderr. */ fd = getdtablesize() - 1; while (fd > STDERR_FILENO) close(fd--); options = "+:cfl:p:ru:w:", waitargs = 0; while ((option = getopt(argc, argv, options)) > 0) switch (option) { case 'c': command.chdir = 1; break; case 'f': /* On by default; ignored for compatibility with BSD daemon(1). */ break; case 'l': logger_setup(optarg); break; case 'p': pidfile_open(optarg); break; case 'r': command.restart = 1; break; case 'u': user_setup(optarg); break; case 'w': waitargs++; break; default: usage(argv[0]); } if (argc <= optind) usage(argv[0]); switch (fork()) { case -1: error(EXIT_FAILURE, errno, "fork"); case 0: setsid(); /* This should work after forking; ignore errors anyway. */ break; default: _exit(EXIT_SUCCESS); /* Don't delete pidfile in atexit() handler. */ } logger_start(); pidfile_write(); /* We can handle all -w command line arguments now we're daemonized. */ if (waitargs > 0) { if ((inotify = inotify_init1(IN_CLOEXEC)) < 0) error(EXIT_FAILURE, errno, "inotify_init1"); /* Open the working directory so we can restore it after each await(). */ if ((pwd = open(".", O_RDONLY | O_DIRECTORY)) < 0) error(EXIT_FAILURE, errno, "open pwd"); optind = 0; /* Need to reset optind to reprocess arguments. */ while ((option = getopt(argc, argv, options)) > 0) if (option == 'w') { if (!(path = strdup(optarg))) error(EXIT_FAILURE, errno, "strdup"); await(path, inotify, 0); free(path); fchdir(pwd); } close(inotify); close(pwd); } if (command.chdir && chdir("/") < 0) error(EXIT_FAILURE, errno, "chdir"); command.argv = argv + optind; if (!command.restart && !pidfile.path) { /* We don't need to supervise in this case, so just exec. */ if (command.gid > 0 && setgid(command.gid) < 0) error(EXIT_FAILURE, errno, "setgid"); if (command.uid > 0 && setuid(command.uid) < 0) error(EXIT_FAILURE, errno, "setuid"); execvp(command.argv[0], command.argv); error(EXIT_FAILURE, errno, "exec"); } /* Handle and pass on HUP, INT, TERM, USR1, USR2 signals. */ sigfillset(&action.sa_mask); action.sa_flags = SA_RESTART; action.sa_handler = handler; sigaction(SIGHUP, &action, NULL); sigaction(SIGINT, &action, NULL); sigaction(SIGTERM, &action, NULL); sigaction(SIGUSR1, &action, NULL); sigaction(SIGUSR2, &action, NULL); do { command.killed = 0; /* Have we signalled the child? */ switch (command.pid = fork()) { case -1: error(EXIT_FAILURE, errno, "fork"); case 0: if (command.gid > 0 && setgid(command.gid) < 0) error(EXIT_FAILURE, errno, "setgid"); if (command.uid > 0 && setuid(command.uid) < 0) error(EXIT_FAILURE, errno, "setuid"); setsid(); /* This should work after forking; ignore errors anyway. */ execvp(command.argv[0], command.argv); error(EXIT_FAILURE, errno, "exec"); } started = time(NULL); while (pid = wait(NULL), pid != (pid_t) command.pid) if (pid < 0 && errno != EINTR) error(EXIT_FAILURE, errno, "wait"); /* Try to avoid restarting a crashing command in a tight loop. */ if (command.restart && !command.killed && time(NULL) < started + 5) error(EXIT_FAILURE, 0, "Child died within 5 seconds: not restarting"); } while (command.restart); return EXIT_SUCCESS; }
static Future<string> launch( const string& path, const vector<string>& argv) { Try<Subprocess> s = subprocess( path, argv, Subprocess::PATH("/dev/null"), Subprocess::PIPE(), Subprocess::PIPE()); string command = strings::join( ", ", path, strings::join(", ", argv)); if (s.isError()) { return Failure( "Failed to execute the subprocess '" + command + "': " + s.error()); } return await( s.get().status(), process::io::read(s.get().out().get()), process::io::read(s.get().err().get())) .then([command](const tuple< Future<Option<int>>, Future<string>, Future<string>>& t) -> Future<string> { Future<Option<int>> status = std::get<0>(t); if (!status.isReady()) { return Failure( "Failed to get the exit status of the subprocess: " + (status.isFailed() ? status.failure() : "discarded")); } if (status->isNone()) { return Failure("Failed to reap the subprocess"); } if (status->get() != 0) { Future<string> error = std::get<2>(t); if (!error.isReady()) { return Failure( "Unexpected result from the subprocess: " + WSTRINGIFY(status->get()) + ", stderr='" + error.get() + "'"); } return Failure("Subprocess '" + command + "' failed: " + error.get()); } Future<string> output = std::get<1>(t); if (!output.isReady()) { return Failure( "Failed to read stdout from '" + command + "': " + (output.isFailed() ? output.failure() : "discarded")); } return output; }); }