int io_start_tls(struct io *io, void *ssl) { int mode; mode = io->flags & IO_RW; if (mode == 0 || mode == IO_RW) errx(1, "io_start_tls(): full-duplex or unset"); if (io->ssl) errx(1, "io_start_tls(): SSL already started"); io->ssl = ssl; if (SSL_set_fd(io->ssl, io->sock) == 0) { ssl_error("io_start_ssl:SSL_set_fd"); return (-1); } if (mode == IO_WRITE) { io->state = IO_STATE_CONNECT_SSL; SSL_set_connect_state(io->ssl); io_reset(io, EV_WRITE, io_dispatch_connect_ssl); } else { io->state = IO_STATE_ACCEPT_SSL; SSL_set_accept_state(io->ssl); io_reset(io, EV_READ, io_dispatch_accept_ssl); } return (0); }
/* * Setup the necessary events as required by the current io state, * honouring duplex mode and i/o pauses. */ void io_reload(struct io *io) { short events; /* io will be reloaded at release time */ if (io->flags & IO_HELD) return; #ifdef IO_SSL if (io->ssl) { io_reload_ssl(io); return; } #endif io_debug("io_reload(%p)\n", io); events = 0; if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) events = EV_READ; if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io)) events |= EV_WRITE; io_reset(io, events, io_dispatch); }
int io_connect(struct io *io, const struct sockaddr *sa, const struct sockaddr *bsa) { int sock, errno_save; if ((sock = socket(sa->sa_family, SOCK_STREAM, 0)) == -1) goto fail; io_set_nonblocking(sock); io_set_nolinger(sock); if (bsa && bind(sock, bsa, bsa->sa_len) == -1) goto fail; if (connect(sock, sa, sa->sa_len) == -1) if (errno != EINPROGRESS) goto fail; io->sock = sock; io_reset(io, EV_WRITE, io_dispatch_connect); return (sock); fail: if (sock != -1) { errno_save = errno; close(sock); errno = errno_save; io->error = strerror(errno); } return (-1); }
// ----------------------------------------------------------------------- // software (MCL) and hardware (CP 'CLEAR') reset void cpu_reset(int hw) { if (hw) { for (int i=0 ; i<R_MAX ; i++) { regs[i] = 0; } } else { regs[0] = 0; regs[R_SR] = 0; } int_update_mask(0); int_clear_all(); cpu_mod_off(); mem_reset(); // TODO: move this before CPU clear routine // I/O reset should return when we're sure that I/O won't change CPU state (backlogged interrupts, memory writes, ...) // this needs MX reset interrupt to become async io_reset(); // TODO: state = STOP, WAIT=0, jakieś inne rejestry? // call even if logging is disabled - user may enable it later // and we still want to know if we're running a known OS log_check_os(); log_reset_process(); log_intlevel_reset(); log_syscall_reset(); }
void io_reload_ssl(struct io *io) { short ev = 0; void (*dispatch)(int, short, void*) = NULL; switch (io->state) { case IO_STATE_CONNECT_SSL: ev = EV_WRITE; dispatch = io_dispatch_connect_ssl; break; case IO_STATE_ACCEPT_SSL: ev = EV_READ; dispatch = io_dispatch_accept_ssl; break; case IO_STATE_UP: ev = 0; if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) { ev = EV_READ; dispatch = io_dispatch_read_ssl; } else if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io)) { ev = EV_WRITE; dispatch = io_dispatch_write_ssl; } if (!ev) return; /* paused */ break; default: errx(1, "io_reload_ssl(): bad state"); } io_reset(io, ev, dispatch); }
void io_dispatch_write_ssl(int fd, short event, void *humppa) { struct io *io = humppa; int n, saved_errno; size_t w2, w; io_frame_enter("io_dispatch_write_ssl", io, event); if (event == EV_TIMEOUT) { io_callback(io, IO_TIMEOUT); goto leave; } w = io_queued(io); switch ((n = iobuf_write_ssl(io->iobuf, (SSL*)io->ssl))) { case IOBUF_WANT_READ: io_reset(io, EV_READ, io_dispatch_write_ssl); break; case IOBUF_WANT_WRITE: io_reset(io, EV_WRITE, io_dispatch_write_ssl); break; case IOBUF_CLOSED: io_callback(io, IO_DISCONNECTED); break; case IOBUF_ERROR: saved_errno = errno; io->error = strerror(errno); errno = saved_errno; io_callback(io, IO_ERROR); break; case IOBUF_SSLERROR: io->error = io_ssl_error(); ssl_error("io_dispatch_write_ssl:SSL_write"); io_callback(io, IO_ERROR); break; default: io_debug("io_dispatch_write_ssl(...) -> w=%d\n", n); w2 = io_queued(io); if (w > io->lowat && w2 <= io->lowat) io_callback(io, IO_LOWAT); break; } leave: io_frame_leave(io); }
void io_dispatch_read_ssl(int fd, short event, void *humppa) { struct io *io = humppa; int n, saved_errno; io_frame_enter("io_dispatch_read_ssl", io, event); if (event == EV_TIMEOUT) { io_callback(io, IO_TIMEOUT); goto leave; } again: iobuf_normalize(&io->iobuf); switch ((n = iobuf_read_ssl(&io->iobuf, (SSL*)io->ssl))) { case IOBUF_WANT_READ: io_reset(io, EV_READ, io_dispatch_read_ssl); break; case IOBUF_WANT_WRITE: io_reset(io, EV_WRITE, io_dispatch_read_ssl); break; case IOBUF_CLOSED: io_callback(io, IO_DISCONNECTED); break; case IOBUF_ERROR: saved_errno = errno; io->error = strerror(errno); errno = saved_errno; io_callback(io, IO_ERROR); break; case IOBUF_SSLERROR: io->error = io_ssl_error(); ssl_error("io_dispatch_read_ssl:SSL_read"); io_callback(io, IO_ERROR); break; default: io_debug("io_dispatch_read_ssl(...) -> r=%d\n", n); io_callback(io, IO_DATAIN); if (current == io && IO_READING(io) && SSL_pending(io->ssl)) goto again; } leave: io_frame_leave(io); }
void subsystem_reset (void) { /* Clear pending external interrupts */ OFF_IC_SERVSIG; OFF_IC_INTKEY; /* Reset the I/O subsystem */ io_reset (); }
int8_t nrf24l01_deinit(int8_t spi_fd) { disable(); /* Power down the radio */ outr(spi_fd, NRF24_CONFIG, nrf24reg_read(spi_fd, NRF24_CONFIG) & ~NRF24_CFG_PWR_UP); /* Deinit SPI and GPIO */ io_reset(spi_fd); return 0; }
static void jump_linenum (int linenum) { FILE *original; int finished; /* Not only does this verify whether the scanner finished, if it has finished, it additionally closes the stream. */ finished = tokenizer_finished(); /* We save this copy in case the scanner wasn't finished. */ original = io_handle(); /* Start a new scanner from the beginning of the file. */ tokenizer_init(io_file()); reset(T_ERROR); io_reset(); /* Search for linenum. */ find_linenum(linenum); /* If the search ended at EOF, linenum could not be found! */ if (tokenizer_finished()) { dprintf( "*warning: could not jump to `%d'\n", E_WARNING, linenum); /* Set back to original stream */ io_set(io_file(), original); /* Prepare scanner to continue. */ if (!finished) { reset(T_NUMBER); io_reset(); io_next(); } } }
void io_dispatch_connect_ssl(int fd, short event, void *humppa) { struct io *io = humppa; int e, ret; io_frame_enter("io_dispatch_connect_ssl", io, event); if (event == EV_TIMEOUT) { io_callback(io, IO_TIMEOUT); goto leave; } if ((ret = SSL_connect(io->ssl)) > 0) { io->state = IO_STATE_UP; io_callback(io, IO_TLSREADY); goto leave; } switch ((e = SSL_get_error(io->ssl, ret))) { case SSL_ERROR_WANT_READ: io_reset(io, EV_READ, io_dispatch_connect_ssl); break; case SSL_ERROR_WANT_WRITE: io_reset(io, EV_WRITE, io_dispatch_connect_ssl); break; default: io->error = io_ssl_error(); ssl_error("io_dispatch_connect_ssl:SSL_connect"); io_callback(io, IO_TLSERROR); break; } leave: io_frame_leave(io); }
void supervision_reset(void) { //fprintf(log_get(), "supervision: reset\n"); memorymap_reset(); io_reset(); gpu_reset(); timer_reset(); controls_reset(); /*sound_reset();*/ interrupts_reset(); Reset6502(&m6502_registers); }
static int modbus_write_file(struct modbus_instance *instance, uint16_t filenum, uint16_t address, uint16_t count, const uint8_t *data) { uint16_t start_address; bool found = false; if (filenum == 0x0001) { start_address = IOSLOT_START_ADDRESS; found = true; } else if (filenum == 0x0002) { start_address = PROGRAM_START_ADDRESS; found = true; } if (found) { uint16_t i; uint16_t j; config_lock(); for (i = 0, j = 0; i < count; ++i, ++address, j += 2) { uint8_t w[2]; w[0] = data[j + 1]; w[1] = data[j]; config_write(start_address + (address << 1), w, 2); } config_unlock(); if (filenum == 0x0001) { io_reset(); } else if (filenum == 0x0002) { program_reset(); } MODBUS_RETURN(instance, MODBUS_SUCCESS); } MODBUS_RETURN(instance, MODBUS_BAD_PARAMS); }
void subsystem_reset (void) { /* Perform subsystem reset * * GA22-7000-10 IBM System/370 Principles of Operation, Chapter 4. * Control, Subsystem Reset, p. 4-34 * SA22-7085-00 IBM System/370 Extended Architecture Principles of * Operation, Chapter 4. Control, Subsystem Reset, * p. 4-28 * SA22-7832-09 z/Architecture Principles of Operation, Chapter 4. * Control, Subsystem Reset, p. 4-57 */ /* Clear pending external interrupts */ OFF_IC_SERVSIG; OFF_IC_INTKEY; /* Reset the I/O subsystem */ RELEASE_INTLOCK(NULL); io_reset (); OBTAIN_INTLOCK(NULL); }
static void machine_reset_exelv(void) { tms3556_reset(); io_reset(); }
/*-------------------------------------------------------------------*/ int ARCH_DEP(system_reset) (int cpu, int clear) { int rc = 0; REGS *regs; /* Configure the cpu if it is not online */ if (!IS_CPU_ONLINE(cpu)) { if (configure_cpu(cpu) != 0) { /* ZZ FIXME: we should probably present a machine-check if we encounter any errors during the reset (rc != 0) */ return -1; } ASSERT(IS_CPU_ONLINE(cpu)); } regs = sysblk.regs[cpu]; HDC1(debug_cpu_state, regs); /* Perform system-reset-normal or system-reset-clear function */ if (!clear) { /* Reset external interrupts */ OFF_IC_SERVSIG; OFF_IC_INTKEY; /* Reset all CPUs in the configuration */ for (cpu = 0; cpu < MAX_CPU; cpu++) if (IS_CPU_ONLINE(cpu)) if (ARCH_DEP(cpu_reset) (sysblk.regs[cpu])) rc = -1; /* Perform I/O subsystem reset */ io_reset (); } else { /* Reset external interrupts */ OFF_IC_SERVSIG; OFF_IC_INTKEY; /* Reset all CPUs in the configuration */ for (cpu = 0; cpu < MAX_CPU; cpu++) { if (IS_CPU_ONLINE(cpu)) { regs=sysblk.regs[cpu]; if (ARCH_DEP(initial_cpu_reset) (regs)) { rc = -1; } /* Clear all the registers (AR, GPR, FPR, VR) as part of the CPU CLEAR RESET operation */ memset (regs->ar,0,sizeof(regs->ar)); memset (regs->gr,0,sizeof(regs->gr)); memset (regs->fpr,0,sizeof(regs->fpr)); #if defined(_FEATURE_VECTOR_FACILITY) memset (regs->vf->vr,0,sizeof(regs->vf->vr)); #endif /*defined(_FEATURE_VECTOR_FACILITY)*/ } } /* Perform I/O subsystem reset */ io_reset (); /* Clear storage */ sysblk.main_clear = sysblk.xpnd_clear = 0; storage_clear(); xstorage_clear(); } #if defined(FEATURE_CONFIGURATION_TOPOLOGY_FACILITY) /* Clear topology-change-report-pending condition */ sysblk.topchnge = 0; #endif /*defined(FEATURE_CONFIGURATION_TOPOLOGY_FACILITY)*/ /* ZZ FIXME: we should probably present a machine-check if we encounter any errors during the reset (rc != 0) */ return rc; } /* end function system_reset */
void SIM_start(void){ sky_pref_t *pref; /* get the current preference for simulator */ pref = get_skyeye_pref(); skyeye_config_t* config = get_current_config(); if(pref->conf_filename) skyeye_read_config(pref->conf_filename); if(config->arch == NULL){ skyeye_log(Error_log, __FUNCTION__, "Should provide valid arch option in your config file.\n"); return; } generic_arch_t *arch_instance = get_arch_instance(config->arch->arch_name); if(config->mach == NULL){ skyeye_log(Error_log, __FUNCTION__, "Should provide valid mach option in your config file.\n"); return; } arch_instance->init(); /* reset all the memory */ mem_reset(); config->mach->mach_init(arch_instance, config->mach); /* reset current arch_instanc */ arch_instance->reset(); /* reset all the values of mach */ io_reset(arch_instance); if(pref->exec_file){ exception_t ret; /* * If no relocation is indicated, we will load elf file by * virtual address */ if((((~pref->exec_load_mask) & pref->exec_load_base) == 0x0) && (arch_instance->mmu_write != NULL)) ret = load_elf(pref->exec_file, Virt_addr); else ret = load_elf(pref->exec_file, Phys_addr); } /* set pc from config */ generic_address_t pc = (config->start_address & pref->exec_load_mask)|pref->exec_load_base; skyeye_log(Info_log, __FUNCTION__, "Set PC to the address 0x%x\n", pc); arch_instance->set_pc(pc); /* Call bootmach callback */ exec_callback(Bootmach_callback, arch_instance); pthread_t id; create_thread(skyeye_loop, arch_instance, &id); /* * At this time, if we set conf file, then we parse it * Or do it later. */ /* if(pref->conf_filename) skyeye_read_config(pref->conf_filename); */ #if 0 else{ /* try to run in batch mode */ if(skyeye_read_config(pref->conf_filename) == No_exp){
/*-------------------------------------------------------------------*/ int ARCH_DEP(system_reset) (int cpu, int clear) { int rc1 = 0, rc; int n; REGS *regs; /* Configure the cpu if it is not online (configure implies init reset) */ if (!IS_CPU_ONLINE(cpu)) if ( (rc = configure_cpu(cpu)) ) return rc; HDC1(debug_cpu_state, sysblk.regs[cpu]); /* Reset external interrupts */ OFF_IC_SERVSIG; OFF_IC_INTKEY; /* Perform system-reset-normal or system-reset-clear function */ if (clear) { /* Reset all CPUs in the configuration */ for (n = 0; n < sysblk.maxcpu; n++) if (IS_CPU_ONLINE(n)) { regs=sysblk.regs[n]; if ((rc = ARCH_DEP(initial_cpu_reset) (regs)) ) rc1 = rc; else { /* Clear all the registers (AR, GPR, FPR, VR) as part of the CPU CLEAR RESET operation */ memset (regs->ar, 0, sizeof(regs->ar)); memset (regs->gr, 0, sizeof(regs->gr)); memset (regs->fpr, 0, sizeof(regs->fpr)); #if defined(_FEATURE_VECTOR_FACILITY) memset (regs->vf->vr, 0, sizeof(regs->vf->vr)); #endif /*defined(_FEATURE_VECTOR_FACILITY)*/ } } sysblk.program_parameter = 0; /* Clear storage */ sysblk.main_clear = sysblk.xpnd_clear = 0; storage_clear(); xstorage_clear(); } else { /* Reset all CPUs in the configuration */ for (n = 0; n < sysblk.maxcpu; n++) if (IS_CPU_ONLINE(n)) { regs=sysblk.regs[n]; if(n == cpu) { /* Perform initial reset on the IPL CPU */ if ( (rc = ARCH_DEP(initial_cpu_reset) (regs)) ) rc1 = rc; } else { /* Perform reset on the other CPUs */ if ( (rc = ARCH_DEP(cpu_reset) (regs)) ) rc1 = rc; } } } /* Perform I/O subsystem reset */ io_reset (); #if defined(FEATURE_CONFIGURATION_TOPOLOGY_FACILITY) /* Clear topology-change-report-pending condition */ sysblk.topchnge = 0; #endif /*defined(FEATURE_CONFIGURATION_TOPOLOGY_FACILITY)*/ /* set default system state to reset */ sysblk.sys_reset = TRUE; return rc1; } /* end function system_reset */
static void io_reset_timer(int dummy) { (void) dummy; io_reset(); }