void keyb_init(void) { if (!keyb_server_init()) { error("can't init keyboard server\n"); leavedos(19); } if (!keyb_client_init()) { error("can't open keyboard client\n"); leavedos(19); } }
void gettermcap(int i, int *co, int *li) { struct winsize ws; /* buffer for TIOCSWINSZ */ *li = *co = 0; if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws) >= 0) { *li = ws.ws_row; *co = ws.ws_col; } if (*co > MAX_COLUMNS || *li > MAX_LINES) { error("Screen size is too large: %dx%d, max is %dx%d\n", *co, *li, MAX_COLUMNS, MAX_LINES); leavedos(0x63); } if (*li == 0 || *co == 0) { error("unknown window sizes li=%d co=%d, setting to 80x25\n", *li, *co); *li = LI; *co = CO; } else v_printf("VID: Setting windows size to li=%d, co=%d\n", *li, *co); }
static void check_tid(int tid) { if (tid < 0 || tid >= coopth_num) { dosemu_error("Wrong tid\n"); leavedos(2); } }
static void ensure_attached(void) { struct coopth_thrdata_t *thdata = co_get_data(co_current()); if (!thdata->attached) { dosemu_error("Not allowed for detached thread\n"); leavedos(2); } }
static void ensure_attached(void) { struct coopth_thrdata_t *thdata = co_get_data(co_current(co_handle)); if (!thdata->attached) { dosemu_error("Not allowed for detached thread %i, %s\n", *thdata->tid, coopthreads[*thdata->tid].name); leavedos(2); } }
static __inline__ void push_priv(saved_priv_status *privs) { if (!privs || *privs != PRIV_MAGIC) { error("Aiiiee... not in-sync saved priv status on push_priv\n"); leavedos(99); } *privs = PRIVS_ARE_ON; #ifdef PRIV_TESTING c_printf("PRIV: pushing %d privs_ptr=%p\n", *privs, privs); #endif }
static void coopth_hlt(Bit16u offs, void *arg) { struct coopth_t *thr = (struct coopth_t *)arg + offs; struct coopth_per_thread_t *pth = current_thr(thr); if (!pth->data.attached) { /* someone used coopth_unsafe_detach()? */ error("HLT on detached thread\n"); leavedos(2); return; } thread_run(thr, pth); }
static void render_text_lock(void) { if (!render_text || text_locked) { dosemu_error("render not in text mode!\n"); leavedos(95); return; } text_locked++; if (!text_really_locked) { dst_image = render_lock(); text_really_locked = 1; } }
char * lowmem_alloc(int size) { char *ptr = smalloc(&mp, size); if (!ptr) { error("builtin %s OOM\n", builtin_name); leavedos(86); } if (size > 1024) { /* well, the lowmem heap is limited, let's be polite! */ error("builtin %s requests too much of a heap: 0x%x\n", builtin_name, size); } return ptr; }
static struct coopth_per_thread_t *current_thr(struct coopth_t *thr) { struct coopth_per_thread_t *pth; assert(thr - coopthreads < MAX_COOPTHREADS); if (!thr->cur_thr) { error("coopth: schedule to inactive thread\n"); leavedos(2); return NULL; } pth = get_pth(thr, thr->cur_thr - 1); /* it must be running */ assert(pth->st.state > COOPTHS_NONE); return pth; }
static __inline__ int pop_priv(saved_priv_status *privs) { int ret; if (!privs || *privs == PRIV_MAGIC) { error("Aiiiee... not in-sync saved priv status on pop_priv\n"); leavedos(99); } #ifdef PRIV_TESTING c_printf("PRIV: poping %d privs_ptr=%p\n", *privs, privs); #endif ret = (int)*privs; *privs = PRIV_MAGIC; return ret; }
void HMA_MAP(int HMA) { int ret; /* destroy simx86 memory protections first */ e_invalidate_full(HMAAREA, HMASIZE); /* Note: MAPPING_HMA is magic, dont be confused by src==dst==HMAAREA here */ off_t src = HMA ? HMAAREA : 0; x_printf("Entering HMA_MAP with HMA=%d\n", HMA); ret = alias_mapping(MAPPING_HMA, HMAAREA, HMASIZE, PROT_READ | PROT_WRITE | PROT_EXEC, LOWMEM(src)); if (ret == -1) { x_printf("HMA: Mapping HMA to HMAAREA %#x unsuccessful: %s\n", HMAAREA, strerror(errno)); leavedos(47); } x_printf("HMA: mapped\n"); }
/* * DANG_BEGIN_FUNCTION add_to_io_select * * arguments: * fd - File handle to add to select statment * want_sigio - want SIGIO (1) if it's available, or not (0). * * description: * Add file handle to one of 2 select FDS_SET's depending on * whether the kernel can handle SIGIO. * * DANG_END_FUNCTION */ void add_to_io_select_new(int new_fd, void (*func)(void *), void *arg, const char *name) { int flags; if ((new_fd+1) > numselectfd) numselectfd = new_fd+1; if (numselectfd > MAX_FD) { error("Too many IO fds used.\n"); leavedos(76); } flags = fcntl(new_fd, F_GETFL); fcntl(new_fd, F_SETOWN, getpid()); fcntl(new_fd, F_SETFL, flags | O_ASYNC); FD_SET(new_fd, &fds_sigio); g_printf("GEN: fd=%d gets SIGIO for %s\n", new_fd, name); io_callback_func[new_fd].func = func; io_callback_func[new_fd].arg = arg; io_callback_func[new_fd].name = name; }
void iodev_add_device(char *dev_name) { int dev_own; if (current_device == -1) { error("add_device() is called not during the init stage!\n"); leavedos(10); } dev_own = find_device_owner(dev_name); if (dev_own != -1) { error("Device conflict: Attempt to use %s for %s and %s\n", dev_name, io_devices[dev_own].name, io_devices[current_device].name); config.exitearly = 1; } if (owned_devices[current_device].devs_owned >= MAX_DEVICES_OWNED) { error("No free slot for device %s\n", dev_name); config.exitearly = 1; } c_printf("registering %s for %s\n",dev_name,io_devices[current_device].name); owned_devices[current_device].dev_names[owned_devices[current_device].devs_owned++] = dev_name; }
/* * DANG_BEGIN_FUNCTION add_to_io_select * * arguments: * fd - File handle to add to select statment * want_sigio - want SIGIO (1) if it's available, or not (0). * * description: * Add file handle to one of 2 select FDS_SET's depending on * whether the kernel can handle SIGIO. * * DANG_END_FUNCTION */ void add_to_io_select(int new_fd, u_char want_sigio, void (*func)(void)) { if ((new_fd+1) > numselectfd) numselectfd = new_fd+1; if (numselectfd > MAX_FD) { error("Too many IO fds used.\n"); leavedos(76); } if (want_sigio) { int flags; flags = fcntl(new_fd, F_GETFL); fcntl(new_fd, F_SETOWN, getpid()); fcntl(new_fd, F_SETFL, flags | O_ASYNC); FD_SET(new_fd, &fds_sigio); g_printf("GEN: fd=%d gets SIGIO\n", new_fd); } else { FD_SET(new_fd, &fds_no_sigio); g_printf("GEN: fd=%d does not get SIGIO\n", new_fd); not_use_sigio++; } io_callback_func[new_fd] = func; }
static int open_mapping_f(int cap) { int mapsize, estsize, padsize; if (cap) Q_printf("MAPPING: open, cap=%s\n", decode_mapping_cap(cap)); padsize = 4*1024; /* first estimate the needed size of the mapfile */ mapsize = HMASIZE >> 10; /* HMA */ /* VGAEMU */ mapsize += config.vgaemu_memsize ? config.vgaemu_memsize : 1024; mapsize += config.ems_size; /* EMS */ mapsize += LOWMEM_SIZE >> 10; /* Low Mem */ estsize = mapsize; /* keep heap fragmentation in mind */ mapsize += (mapsize/4 < padsize ? padsize : mapsize/4); mpool_numpages = mapsize / 4; mapsize = mpool_numpages * PAGE_SIZE; /* make sure we are page aligned */ ftruncate(tmpfile_fd, 0); if (ftruncate(tmpfile_fd, mapsize) == -1) { if (!cap) error("MAPPING: cannot size temp file pool, %s\n",strerror(errno)); discardtempfile(); if (!cap)return 0; leavedos(2); } /* /dev/shm may be mounted noexec, and then mounting PROT_EXEC fails. However mprotect may work around this (maybe not in future kernels) */ mpool = mmap(0, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, tmpfile_fd, 0); if (mpool == MAP_FAILED || mprotect(mpool, mapsize, PROT_READ|PROT_WRITE|PROT_EXEC) == -1) { char *err = strerror(errno); char s[] = "MAPPING: cannot size temp file pool, %s\n"; discardtempfile(); if (!cap) { Q_printf(s,err); return 0; } leavedos(2); } /* the memory pool itself can just be rw though */ mprotect(mpool, mapsize, PROT_READ|PROT_WRITE); Q_printf("MAPPING: open, mpool (min %dK) is %d Kbytes at %p-%p\n", estsize, mapsize/1024, mpool, mpool+mapsize-1); sminit(&pgmpool, mpool, mapsize); /* * Now handle individual cases. * Don't forget that each of the below code pieces should only * be executed once ! */ #if 0 if (cap & MAPPING_OTHER) { /* none for now */ } #endif #if 0 if (cap & MAPPING_EMS) { /* none for now */ } #endif #if 0 if (cap & MAPPING_DPMI) { /* none for now */ } #endif #if 0 if (cap & MAPPING_VIDEO) { /* none for now */ } #endif #if 0 if (cap & MAPPING_VGAEMU) { /* none for now */ } #endif #if 0 if (cap & MAPPING_HGC) { /* none for now */ } #endif #if 0 if (cap & MAPPING_HMA) { /* none for now */ } #endif #if 0 if (cap & MAPPING_SHARED) { /* none for now */ } #endif #if 0 if (cap & MAPPING_INIT_HWRAM) { /* none for now */ } #endif #if 0 if (cap & MAPPING_INIT_LOWRAM) { /* none for now */ } #endif return 1; }
Boolean handle_dosemu_keys(Boolean make, t_keysym key) { Boolean result = TRUE; switch(key) { #ifdef X86_EMULATOR case KEY_DOSEMU_X86EMU_DEBUG: k_printf("KBD: Ctrl-Alt-PgUp\n"); if (config.cpuemu) { if (debug_level('e') < 2) set_debug_level('e', 4); fflush(dbg_fd); } return 1; #endif /* C-A-D is disabled */ case KEY_DOSEMU_REBOOT: if (make) { k_printf("KBD: Ctrl-Alt-Del: rebooting dosemu\n"); dos_ctrl_alt_del(); } break; case KEY_DOSEMU_EXIT: if (make) { k_printf("KBD: Ctrl-Alt-PgDn: bye bye!\n"); leavedos(0); } break; case KEY_DOSEMU_FREEZE: if (make) { if (!dosemu_frozen) { freeze_dosemu_manual(); } else { unfreeze_dosemu(); } } break; case KEY_DOSEMU_VT_1: case KEY_DOSEMU_VT_2: case KEY_DOSEMU_VT_3: case KEY_DOSEMU_VT_4: case KEY_DOSEMU_VT_5: case KEY_DOSEMU_VT_6: case KEY_DOSEMU_VT_7: case KEY_DOSEMU_VT_8: case KEY_DOSEMU_VT_9: case KEY_DOSEMU_VT_10: case KEY_DOSEMU_VT_11: case KEY_DOSEMU_VT_12: if (make) { int vc_num; vc_num = (key - KEY_DOSEMU_VT_1) +1; result = switch_to_console(vc_num); } break; case KEY_MOUSE_UP: case KEY_MOUSE_DOWN: case KEY_MOUSE_LEFT: case KEY_MOUSE_RIGHT: case KEY_MOUSE_UP_AND_LEFT: case KEY_MOUSE_UP_AND_RIGHT: case KEY_MOUSE_DOWN_AND_LEFT: case KEY_MOUSE_DOWN_AND_RIGHT: case KEY_MOUSE_BUTTON_LEFT: case KEY_MOUSE_BUTTON_MIDDLE: case KEY_MOUSE_BUTTON_RIGHT: mouse_keyboard(make, key); /* mouse emulation keys */ break; case KEY_DOSEMU_HELP: case KEY_DOSEMU_REDRAW: case KEY_DOSEMU_SUSPEND: case KEY_DOSEMU_RESET: case KEY_DOSEMU_MONO: case KEY_DOSEMU_PAN_UP: case KEY_DOSEMU_PAN_DOWN: case KEY_DOSEMU_PAN_LEFT: case KEY_DOSEMU_PAN_RIGHT: if (Keyboard->handle_keys) { Keyboard->handle_keys(make, key); } else { result = FALSE; } break; #if 0 case KEY_MOUSE_GRAB: if (Keyboard == &Keyboard_X) { handle_X_keys(make, key); } else { result = FALSE; } #endif default: result = FALSE; break; } return result; }
static void SDL_handle_events(void) { SDL_Event event; assert(pthread_equal(pthread_self(), dosemu_pthread_self)); if (render_is_updating()) return; while (SDL_PollEvent(&event)) { switch (event.type) { case SDL_WINDOWEVENT: switch (event.window.event) { case SDL_WINDOWEVENT_FOCUS_GAINED: v_printf("SDL: focus in\n"); render_gain_focus(); if (config.X_background_pause && !dosemu_user_froze) unfreeze_dosemu(); break; case SDL_WINDOWEVENT_FOCUS_LOST: v_printf("SDL: focus out\n"); render_lose_focus(); if (config.X_background_pause && !dosemu_user_froze) freeze_dosemu(); break; case SDL_WINDOWEVENT_RESIZED: /* very strange things happen: if renderer size was explicitly * set, SDL reports mouse coords relative to that. Otherwise * it reports mouse coords relative to the window. */ SDL_RenderGetLogicalSize(renderer, &m_x_res, &m_y_res); if (!m_x_res || !m_y_res) { m_x_res = event.window.data1; m_y_res = event.window.data2; } update_mouse_coords(); SDL_redraw(); break; case SDL_WINDOWEVENT_EXPOSED: SDL_redraw(); break; case SDL_WINDOWEVENT_ENTER: /* ignore fake enter events */ if (config.X_fullscreen) break; mouse_drag_to_corner(m_x_res, m_y_res); break; } break; case SDL_KEYDOWN: { if (wait_kup) break; SDL_Keysym keysym = event.key.keysym; if ((keysym.mod & KMOD_CTRL) && (keysym.mod & KMOD_ALT)) { if (keysym.sym == SDLK_HOME || keysym.sym == SDLK_k) { force_grab = 0; toggle_grab(keysym.sym == SDLK_k); break; } else if (keysym.sym == SDLK_f) { toggle_fullscreen_mode(); /* some versions of SDL re-send the keydown events after the * full-screen switch. We need to filter them out to prevent * the infinite switching loop. */ wait_kup = 1; break; } } if (vga.mode_class == TEXT && (keysym.sym == SDLK_LSHIFT || keysym.sym == SDLK_RSHIFT)) { copypaste = 1; /* enable cursor for copy/paste */ if (!m_cursor_visible) SDL_ShowCursor(SDL_ENABLE); } } #if CONFIG_SDL_SELECTION clear_if_in_selection(); #endif #ifdef X_SUPPORT #if HAVE_XKB if (x11_display && config.X_keycode) SDL_process_key_xkb(x11_display, event.key); else #endif #endif SDL_process_key(event.key); break; case SDL_KEYUP: { SDL_Keysym keysym = event.key.keysym; wait_kup = 0; if (copypaste && (keysym.sym == SDLK_LSHIFT || keysym.sym == SDLK_RSHIFT)) { copypaste = 0; if (!m_cursor_visible) SDL_ShowCursor(SDL_DISABLE); } #ifdef X_SUPPORT #if HAVE_XKB if (x11_display && config.X_keycode) SDL_process_key_xkb(x11_display, event.key); else #endif #endif SDL_process_key(event.key); break; } case SDL_MOUSEBUTTONDOWN: { int buttons = SDL_GetMouseState(NULL, NULL); #if CONFIG_SDL_SELECTION if (window_has_focus() && !shift_pressed()) { clear_selection_data(); } else if (vga.mode_class == TEXT && !grab_active) { if (event.button.button == SDL_BUTTON_LEFT) start_selection(x_to_col(event.button.x, m_x_res), y_to_row(event.button.y, m_y_res)); else if (event.button.button == SDL_BUTTON_RIGHT) start_extend_selection(x_to_col(event.button.x, m_x_res), y_to_row(event.button.y, m_y_res)); else if (event.button.button == SDL_BUTTON_MIDDLE) { char *paste = SDL_GetClipboardText(); if (paste) paste_text(paste, strlen(paste), "utf8"); } break; } #endif /* CONFIG_SDL_SELECTION */ mouse_move_buttons(buttons & SDL_BUTTON(1), buttons & SDL_BUTTON(2), buttons & SDL_BUTTON(3)); break; } case SDL_MOUSEBUTTONUP: { int buttons = SDL_GetMouseState(NULL, NULL); #if CONFIG_SDL_SELECTION if (vga.mode_class == TEXT && !grab_active) { t_unicode *sel = end_selection(); if (sel) { char *send_text = get_selection_string(sel, "utf8"); SDL_SetClipboardText(send_text); free(send_text); } } #endif /* CONFIG_SDL_SELECTION */ mouse_move_buttons(buttons & SDL_BUTTON(1), buttons & SDL_BUTTON(2), buttons & SDL_BUTTON(3)); break; } case SDL_MOUSEMOTION: #if CONFIG_SDL_SELECTION extend_selection(x_to_col(event.motion.x, m_x_res), y_to_row(event.motion.y, m_y_res)); #endif /* CONFIG_SDL_SELECTION */ if (grab_active) mouse_move_relative(event.motion.xrel, event.motion.yrel, m_x_res, m_y_res); else mouse_move_absolute(event.motion.x, event.motion.y, m_x_res, m_y_res); break; case SDL_MOUSEWHEEL: mouse_move_wheel(-event.wheel.y); break; case SDL_QUIT: leavedos(0); break; default: v_printf("PAS ENCORE TRAITE %x\n", event.type); /* TODO */ break; } } #ifdef X_SUPPORT if (x11_display && !use_bitmap_font && vga.mode_class == TEXT && X_handle_text_expose()) { /* need to check separately because SDL_VIDEOEXPOSE is eaten by SDL */ redraw_text_screen(); } #endif }
static int open_mapping_shm(int cap) { static int first =1; if (cap) Q_printf("MAPPING: open, cap=%s\n", decode_mapping_cap(cap)); if (first) { void *ptr1, *ptr2 = MAP_FAILED; first = 0; /* do a test alias mapping. kernel 2.6.1 doesn't support our mremap trick */ ptr1 = mmap(0, PAGE_SIZE, PROT_NONE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); if (ptr1 != MAP_FAILED) { ptr2 = mremap(ptr1, 0, PAGE_SIZE, MREMAP_MAYMOVE); munmap(ptr1, PAGE_SIZE); if (ptr2 != MAP_FAILED) munmap(ptr2, PAGE_SIZE); } if (ptr2 == MAP_FAILED) { Q_printf("MAPPING: not using mapshm because alias mapping does not work\n"); if (!cap)return 0; leavedos(2); } } /* * Now handle individual cases. * Don't forget that each of the below code pieces should only * be executed once ! */ #if 0 if (cap & MAPPING_OTHER) { /* none for now */ } #endif #if 0 if (cap & MAPPING_EMS) { /* none for now */ } #endif #if 0 if (cap & MAPPING_DPMI) { /* none for now */ } #endif #if 0 if (cap & MAPPING_VIDEO) { /* none for now */ } #endif #if 0 if (cap & MAPPING_VGAEMU) { /* none for now */ } #endif #if 0 if (cap & MAPPING_HGC) { /* none for now */ } #endif #if 0 if (cap & MAPPING_HMA) { /* none for now */ } #endif #if 0 if (cap & MAPPING_SHARED) { /* none for now */ } #endif #if 0 if (cap & MAPPING_INIT_HWRAM) { /* none for now */ } #endif #if 0 if (cap & MAPPING_INIT_LOWRAM) { /* none for now */ } #endif return 1; }
int coopth_start(int tid, coopth_func_t func, void *arg) { struct coopth_t *thr; struct coopth_per_thread_t *pth; int tn; check_tid(tid); thr = &coopthreads[tid]; assert(thr->tid == tid); if (thr->cur_thr >= MAX_COOP_RECUR_DEPTH) { int i; error("Coopthreads recursion depth exceeded, %s off=%x\n", thr->name, thr->off); for (i = 0; i < thr->cur_thr; i++) { error("\tthread %i state %i dbg %#x\n", i, thr->pth[i].st.state, thr->pth[i].dbg); } leavedos(2); return -1; } tn = thr->cur_thr++; pth = &thr->pth[tn]; if (thr->cur_thr > thr->max_thr) { size_t stk_size = COOP_STK_SIZE(); thr->max_thr = thr->cur_thr; #ifndef MAP_STACK #define MAP_STACK 0 #endif pth->stack = mmap(NULL, stk_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); if (pth->stack == MAP_FAILED) { error("Unable to allocate stack\n"); leavedos(21); return 1; } pth->stk_size = stk_size; } pth->data.tid = &thr->tid; pth->data.attached = 0; pth->data.posth_num = 0; pth->data.sleep.func = NULL; pth->data.clnup.func = NULL; pth->data.udata_num = 0; pth->data.cancelled = 0; pth->data.left = 0; pth->args.thr.func = func; pth->args.thr.arg = arg; pth->args.thrdata = &pth->data; pth->dbg = LWORD(eax); // for debug pth->thread = co_create(coopth_thread, &pth->args, pth->stack, pth->stk_size); if (!pth->thread) { error("Thread create failure\n"); leavedos(2); return -1; } pth->st = ST(RUNNING); if (tn == 0) { assert(threads_active < MAX_ACT_THRS); active_tids[threads_active++] = tid; } else if (thr->pth[tn - 1].st.state == COOPTHS_SLEEPING) { static int logged; /* will have problems with wake-up by tid. It is possible * to do a wakeup-specific lookup, but this is nasty, and * the recursion itself is nasty too. Lets just print an * error to force the caller to create a separate thread. * vc.c does this to not sleep in the sighandling thread. */ if (!logged) { dosemu_error("thread %s recursed (%i) over sleep\n", thr->name, thr->cur_thr); logged = 1; } } threads_total++; if (!thr->detached) coopth_callf(thr, pth); return 0; }
static void __thread_run(struct coopth_t *thr, struct coopth_per_thread_t *pth) { switch (pth->st.state) { case COOPTHS_NONE: error("Coopthreads error switch to inactive thread, exiting\n"); leavedos(2); break; case COOPTHS_RUNNING: { int jr, lr; enum CoopthRet tret; /* We have 2 kinds of recursion: * * 1. (call it recursive thread invocation) * main_thread -> coopth_start(thread1_func) -> return * thread1_func() -> coopth_start(thread2_func) -> return * (thread 1 returned, became zombie) * thread2_func() -> return * thread2 joined * thread1 joined * main_thread... * * 2. (call it nested thread invocation) * main_thread -> coopth_start(thread1_func) -> return * thread1_func() -> do_int_call_back() -> * run_int_from_hlt() -> * coopth_start(thread2_func) -> return * thread2_func() -> return * thread2 joined * -> return from do_int_call_back() -> * return from thread1_func() * thread1 joined * main_thread... * * Both cases are supported here, but the nested invocation * is not supposed to be used as being too complex. * Since do_int_call_back() was converted * to coopth API, the nesting is avoided. * If not true, we print an error. */ if (joinable_running) { static int warned; if (!warned) { warned = 1; dosemu_error("Nested thread invocation detected, please fix! " "(at=%i)\n", pth->data.attached); } } jr = joinable_running; if (pth->data.attached) joinable_running++; lr = left_running; if (pth->data.left) { assert(!pth->data.attached); left_running++; } thread_running++; tret = do_run_thread(thr, pth); thread_running--; left_running = lr; joinable_running = jr; if (tret == COOPTH_WAIT && pth->data.attached) dosemu_sleep(); if (tret == COOPTH_SLEEP || tret == COOPTH_WAIT || tret == COOPTH_YIELD) { if (pth->data.sleep.func) { /* oneshot sleep handler */ pth->data.sleep.func(pth->data.sleep.arg); pth->data.sleep.func = NULL; } if (thr->sleeph.pre) thr->sleeph.pre(thr->tid); } /* normally we don't exit with RUNNING state any longer. * this was happening in prev implementations though, so * remove that assert if it ever hurts. */ assert(pth->st.state != COOPTHS_RUNNING); break; } case COOPTHS_SLEEPING: if (pth->data.attached) dosemu_sleep(); break; case COOPTHS_SWITCH: pth->st.switch_fn(thr, pth); break; } }
/* * DANG_BEGIN_FUNCTION low_mem_init * * description: * Initializes the lower 1Meg via mmap & sets up the HMA region * * DANG_END_FUNCTION */ void low_mem_init(void) { void *lowmem, *result; #ifdef __i386__ PRIV_SAVE_AREA #endif open_mapping(MAPPING_INIT_LOWRAM); g_printf ("DOS+HMA memory area being mapped in\n"); lowmem = alloc_mapping(MAPPING_INIT_LOWRAM, LOWMEM_SIZE + HMASIZE, -1); if (lowmem == MAP_FAILED) { perror("LOWRAM alloc"); leavedos(98); } #ifdef __i386__ /* we may need root to mmap address 0 */ enter_priv_on(); result = alias_mapping(MAPPING_INIT_LOWRAM, 0, LOWMEM_SIZE + HMASIZE, PROT_READ | PROT_WRITE | PROT_EXEC, lowmem); leave_priv_setting(); if (result == MAP_FAILED && (errno == EPERM || errno == EACCES)) { #ifndef X86_EMULATOR perror ("LOWRAM mmap"); fprintf(stderr, "Cannot map low DOS memory (the first 640k).\n" "You can most likely avoid this problem by running\n" "sysctl -w vm.mmap_min_addr=0\n" "as root, or by changing the vm.mmap_min_addr setting in\n" "/etc/sysctl.conf or a file in /etc/sysctl.d/ to 0.\n" "If this doesn't help, disable selinux in /etc/selinux/config\n" ); exit(EXIT_FAILURE); #else if (config.cpuemu < 3) { /* switch on vm86-only JIT CPU emulation to with non-zero base */ config.cpuemu = 3; init_emu_cpu(); c_printf("CONF: JIT CPUEMU set to 3 for %d86\n", (int)vm86s.cpu_type); error("Using CPU emulation because vm.mmap_min_addr > 0.\n" "You can most likely avoid this problem by running\n" "sysctl -w vm.mmap_min_addr=0\n" "as root, or by changing the vm.mmap_min_addr setting in\n" "/etc/sysctl.conf or a file in /etc/sysctl.d/ to 0.\n" "If this doesn't help, disable selinux in /etc/selinux/config\n" ); } result = alias_mapping(MAPPING_INIT_LOWRAM, -1, LOWMEM_SIZE + HMASIZE, PROT_READ | PROT_WRITE | PROT_EXEC, lowmem); #endif } #else result = alias_mapping(MAPPING_INIT_LOWRAM, -1, LOWMEM_SIZE + HMASIZE, PROT_READ | PROT_WRITE | PROT_EXEC, lowmem); if (config.cpuemu < 3) { /* switch on vm86-only JIT CPU emulation to with non-zero base */ config.cpuemu = 3; init_emu_cpu(); c_printf("CONF: JIT CPUEMU set to 3 for %d86\n", (int)vm86s.cpu_type); } #endif if (result == MAP_FAILED) { perror ("LOWRAM mmap"); exit(EXIT_FAILURE); } #ifdef X86_EMULATOR if (result) { warn("WARN: using non-zero memory base address %p.\n" "WARN: You can use the better-tested zero based setup using\n" "WARN: sysctl -w vm.mmap_min_addr=0\n" "WARN: as root, or by changing the vm.mmap_min_addr setting in\n" "WARN: /etc/sysctl.conf or a file in /etc/sysctl.d/ to 0.\n", result); } #endif /* keep conventional memory protected as long as possible to protect NULL pointer dereferences */ mprotect_mapping(MAPPING_LOWMEM, result, config.mem_size * 1024, PROT_NONE); }
static void SDL_change_mode(int x_res, int y_res, int w_x_res, int w_y_res) { Uint32 flags; assert(pthread_equal(pthread_self(), dosemu_pthread_self)); v_printf("SDL: using mode %dx%d %dx%d %d\n", x_res, y_res, w_x_res, w_y_res, SDL_csd.bits); if (surface) { SDL_FreeSurface(surface); SDL_DestroyTexture(texture_buf); } if (x_res > 0 && y_res > 0) { texture_buf = SDL_CreateTexture(renderer, pixel_format, SDL_TEXTUREACCESS_STREAMING, x_res, y_res); if (!texture_buf) { error("SDL target texture failed: %s\n", SDL_GetError()); leavedos(99); } surface = SDL_CreateRGBSurface(0, x_res, y_res, SDL_csd.bits, SDL_csd.r_mask, SDL_csd.g_mask, SDL_csd.b_mask, 0); if (!surface) { error("SDL surface failed: %s\n", SDL_GetError()); leavedos(99); } } else { surface = NULL; texture_buf = NULL; } if (config.X_fixed_aspect) SDL_RenderSetLogicalSize(renderer, w_x_res, w_y_res); flags = SDL_GetWindowFlags(window); if (!(flags & SDL_WINDOW_MAXIMIZED)) SDL_SetWindowSize(window, w_x_res, w_y_res); set_resizable(use_bitmap_font || vga.mode_class == GRAPH, w_x_res, w_y_res); if (!initialized) { initialized = 1; SDL_ShowWindow(window); SDL_RaiseWindow(window); m_cursor_visible = 1; if (config.X_fullscreen) render_gain_focus(); } SDL_RenderClear(renderer); SDL_RenderPresent(renderer); if (texture_buf) { SDL_SetRenderTarget(renderer, texture_buf); SDL_RenderClear(renderer); } m_x_res = w_x_res; m_y_res = w_y_res; win_width = x_res; win_height = y_res; /* forget about those rectangles */ sdl_rects_num = 0; update_mouse_coords(); }