bool is386(){ int eflags = get_eflags(); eflags |= 1<<18; set_eflags(eflags); eflags = get_eflags(); if(eflags&(1<<18)){ eflags&=~(1<<18); set_eflags(eflags); return 0; }else{ return 1; } }
void init_TSS(unsigned int task_id,TSS_t *TSS,entry_t entry,unsigned int stack,uint8_t sys,uint32_t interrupt_sp){ TSS->iomap = 0x40000000; TSS->eflags = get_eflags()|1<<9;//IF TSS->ldtr = (GDT_USEABLE_NUM+task_id*2+1)*8; TSS->eip = (uint32_t)entry; TSS->eax = 0; TSS->ecx = 0; TSS->edx = 0; TSS->ebx = 0; TSS->esp = stack; TSS->ebp = 0; TSS->esi = 0; TSS->edi = 0; TSS->ss0 = kernel_data_selector; TSS->esp0 = interrupt_sp; if(!sys){ TSS->cs = (0*8)|0b111; TSS->ss = (1*8)|0b111; TSS->ds = (1*8)|0b111; TSS->es = (1*8)|0b111; TSS->fs = (1*8)|0b111; TSS->gs = (1*8)|0b111; }else{ TSS->cs = (0*8)|0b100; TSS->ss = (1*8)|0b100; TSS->ds = (1*8)|0b100; TSS->es = (1*8)|0b100; TSS->fs = (1*8)|0b100; TSS->gs = (1*8)|0b100; } }
int ttywrite(int dev, char *buf, int nchar) { int baseport, saved_eflags; struct tty *tty; int i=0; baseport = devtab[dev].dvbaseport; /* hardware addr from devtab */ tty = (struct tty *)devtab[dev].dvdata; /* software data for line */ saved_eflags=get_eflags(); /* cpu flags saved */ cli(); /* disable interrupts */ outpt( baseport+UART_IER, UART_IER_RDI); /* enable receiver interrupt */ while ((i <nchar) && (enqueue((tty->tqu), buf[i]) != FULLQUE )) /* loop for enqueue till the queue is full */ i++; sti(); /* allow cpu interrupts */ outpt( baseport+UART_IER, UART_IER_RDI | UART_IER_THRI); /*enable receiver and transmitter interrupts */ set_eflags(saved_eflags); /*restoring flag status*/ while (i<nchar) /* loop for enqueue of remaining nchar */ { cli(); outpt( baseport+UART_IER, UART_IER_RDI); /* enable receiver interrupt*/ if(enqueue((tty->tqu), buf[i]) != FULLQUE) /* check if queue full */ i++; outpt( baseport+UART_IER, UART_IER_RDI | UART_IER_THRI); /* enable Rx and Tx interrupts */ set_eflags(saved_eflags); /* restore flags*/ } return nchar; }
void base_critical_enter(void) { if (entry_count == 0) { saved_eflags = get_eflags(); cli(); } entry_count++; }
/* * Disable interrupts returning the old value. Combo of: * save = osenv_intr_enabled(); * osenv_intr_disable(); */ inline int osenv_intr_save_disable(void) { int enabled; if ((enabled = get_eflags() & EFL_IF) != 0) cli(); return enabled; }
/* * Init the VM code. */ void oskit_uvm_redzone_init(void) { oskit_addr_t addr; /* * We use a task gate to catch page faults, since a stack overflow * will try and dump more stuff on the stack. This is the easiest * way to deal with it. */ if ((addr = (oskit_addr_t) lmm_alloc_aligned(&malloc_lmm, STACKSIZE, 0, 12, 0)) == 0) panic(__FUNCTION__": Could not allocate stack\n"); task_tss.ss0 = KERNEL_DS; task_tss.esp0 = addr + STACKSIZE - sizeof(double); task_tss.esp = task_tss.esp0; task_tss.ss = KERNEL_DS; task_tss.ds = KERNEL_DS; task_tss.es = KERNEL_DS; task_tss.fs = KERNEL_DS; task_tss.gs = KERNEL_DS; task_tss.cs = KERNEL_CS; task_tss.io_bit_map_offset = sizeof(task_tss); task_tss.eip = (int) double_fault_handler; /* Make sure the task is started with interrupts disabled */ osenv_intr_disable(); task_tss.eflags = (int) get_eflags(); osenv_intr_enable(); /* Both TSSs has to know about the page tables */ task_tss.cr3 = get_cr3(); base_tss.cr3 = get_cr3(); /* Initialize the base TSS descriptor. */ fill_descriptor(&base_gdt[KERNEL_TRAP_TSS / 8], kvtolin(&task_tss), sizeof(task_tss) - 1, ACC_PL_K|ACC_TSS|ACC_P, 0); /* * NOTE: The task switch will include an extra word on the stack, * pushed by the CPU. The handler will need to be in assembly code * if we care about that value. As it is, the handler routine * stack is going to be slightly messed up, but since the handler * calls panic, it is not a problem right now. */ fill_gate(&base_idt[T_DOUBLE_FAULT], 0, KERNEL_TRAP_TSS, ACC_TASK_GATE|ACC_P|ACC_PL_K, 0); base_idt_load(); base_gdt_load(); }
/*! \brief down \param sem target semaphore \author Higepon \date create:2003/01/31 update:2003/03/21 */ int Semaphore::down(semaphore* sem) { uint32_t eflags = get_eflags(); disableInterrupt(); if (*sem) { (*sem)--; set_eflags(eflags); return 0; } else { set_eflags(eflags); return -1; } }
static int arm_eabi_poker(elfobj *elf) { unsigned int emachine, eflags; if (ELFOSABI_NONE != elf->data[EI_OSABI]) return -1; emachine = get_emtype(elf); eflags = get_eflags(elf); if (emachine == EM_ARM) return EF_ARM_EABI_VERSION(eflags) >> 24; else return -1;
int ttyread(int dev, char *buf, int nchar) { int baseport, saved_eflags; struct tty *tty; int i=0; baseport = devtab[dev].dvbaseport; /* hardware addr from devtab */ tty = (struct tty *)devtab[dev].dvdata; /* software data for line */ while (i < nchar) /* loops till all chars are dequeued by cpu */ { saved_eflags=get_eflags(); /* saving cpu flag status */ cli(); /* disable cpu ints */ if(queuecount(tty->rqu)) /*if anything in rx queue */ buf[i++]= dequeue((tty->rqu)); /*dequeue char from receiver queue */ set_eflags(saved_eflags); /*restoring flag status */ } return nchar; }
void base_critical_enter(void) { unsigned old_eflags = get_eflags(); unsigned cpu_id = (num_processors > 1) ? smp_find_cur_cpu() : 0; /* First make sure we get no interference from interrupt activity. */ cli(); /* If we already own the lock, just increment the count and return. */ if (critical_cpu_id == cpu_id) { critical_nest_count++; return; } /* Lock the global spin lock, waiting if another processor has it. */ asm volatile("1: movl $-1,%%eax; lock; cmpxchgl %0,(%1); jne 1b" : : "r" (cpu_id), "r" (&critical_cpu_id) : "eax"); critical_nest_count = 0; critical_saved_eflags = old_eflags; }
void base_cpu_init(void) { unsigned int efl, cr0; /* Initialize the processor tables. */ base_trap_init(); base_irq_init(); base_gdt_init(); base_tss_init(); /* * Setting these flags sets up alignment checking of * all memory accesses. */ efl = get_eflags(); efl |= EFL_AC; set_eflags( efl ); cr0 = get_cr0(); cr0 |= CR0_AM; set_cr0( cr0 ); }
int ArchCpu::_task_init(kernel::sys::sched::task::Task *tsk) { uint32_t flags, *adr; struct context *_newctx; struct kern_regs *_newkregs; flags = get_eflags(); interrupt_disable(); ::hwplatform.console << kernel::Console::HEX << "[ ArchCpu ] _task_init(): entry(0x" << (int) tsk->entry << "), PL(0x" << (int) tsk->pl << "), CTX.U_STACK(0x" << (int) tsk->tsk_context.u_stack_top << ")\n"; _newctx = (struct context*) (&tsk->tsk_context); _newkregs = (struct kern_regs*) (&_newctx->kregs); _newctx->uregs = (struct cpu_regs*) ((_newctx->k_stack_top - sizeof(struct cpu_regs))); switch (tsk->pl) { /* * TODO IMPORTANT!!! Think hardly on where kernel task stack must be placed? */ case kernel::sys::sched::task::Task::KERNEL: _newctx->uregs->cs = (uint32_t) KERNEL_CS; _newctx->uregs->ds = _newctx->uregs->es = (uint32_t) KERNEL_DS; _newctx->uregs->eflags = (uint32_t)(EFL_IF | EFL_IOPL_KERN); /* * FIXME Critical!!! * If task is running in kernel mode, * _newctx->uregs->esp points to return address * _newctx->uregs->ss points to first argument. * Warning!! Seems to be potential secuirty hole */ #warning "Warning!! Seems to be a potential secuirty hole" _newctx->uregs->esp = (uint32_t) ArchCpu::_task_end; _newctx->uregs->ss = (uint32_t) NULL; break; case kernel::sys::sched::task::Task::USER: default: _newctx->uregs->cs = (uint32_t)(USER_CS | 3); _newctx->uregs->ds = _newctx->uregs->es = (uint32_t)(USER_DS | 3); _newctx->uregs->ss = (uint32_t)(USER_DS | 3); _newctx->uregs->esp = (uint32_t) _newctx->u_stack_top; _newctx->uregs->eflags = (uint32_t)(EFL_IF | EFL_IOPL_USER); break; } _newctx->uregs->eip = (uint32_t)(tsk->entry); _newctx->uregs->eax = 0; _newctx->esp0 = (uint32_t)(_newctx->k_stack_top); // initial return for first stack switching of this task _newkregs->eip = (uint32_t) & syscall_ret; _newkregs->esp = (uint32_t)(_newctx->uregs) - sizeof(uint32_t); interrupt_enable(); }
/* * Function creates architecture dependent CPU context. It's called by create_task() and * create_kernel_thread() routines */ void archcont_create(archcont_t *ac, uint_t start, uint_t kstack, uint_t stack, uint_t type, pmap_t *pmap) { switch (type) { case KERNEL_TASK: ac->cr3 = KERNEL_PAGE_DIR; ac->esp0 = kstack + PAGE_SIZE - ARCHCONT_SIZE - 12 - 4; /* Prepare startup context for kernel thread */ CONT_EDI(ac->esp0) = 0; CONT_ESI(ac->esp0) = 0; CONT_EBP(ac->esp0) = 0; CONT_EDX(ac->esp0) = 0; CONT_ECX(ac->esp0) = 0; CONT_EBX(ac->esp0) = 0; CONT_EAX(ac->esp0) = 0; CONT_GS(ac->esp0) = KERNEL_DS; CONT_FS(ac->esp0) = KERNEL_DS; CONT_ES(ac->esp0) = KERNEL_DS; CONT_DS(ac->esp0) = KERNEL_DS; CONT_EIP(ac->esp0) = start; CONT_CS(ac->esp0) = KERNEL_CS; CONT_EFLAGS(ac->esp0) = get_eflags(); break; case USER_TASK: if (pmap == NULL) ac->cr3 = KERNEL_PAGE_DIR; else ac->cr3 = (uint_t)KERNEL_TO_PHYS(pmap->pdir); ac->esp0 = kstack + PAGE_SIZE - ARCHCONT_SIZE - 12 - 4 - 8; /* * Prepare startup context for user task. Startup context for user task is * more complex than kernel thread context */ CONT_EDI(ac->esp0) = 0; CONT_ESI(ac->esp0) = 0; CONT_EBP(ac->esp0) = 0; CONT_EDX(ac->esp0) = 0; CONT_ECX(ac->esp0) = 0; CONT_EBX(ac->esp0) = 0; CONT_EAX(ac->esp0) = 0; CONT_GS(ac->esp0) = USER_DS; CONT_FS(ac->esp0) = USER_DS; CONT_ES(ac->esp0) = USER_DS; CONT_DS(ac->esp0) = USER_DS; CONT_EIP(ac->esp0) = start; CONT_CS(ac->esp0) = USER_CS; CONT_EFLAGS(ac->esp0) = get_eflags(); CONT_ESP(ac->esp0) = stack + STACK_SIZE * PAGE_SIZE - 4; CONT_SS(ac->esp0) = USER_DS; break; } return; }
void checkClient(struct Client *sptr, struct Client *acptr) { struct Channel *chptr; struct Membership *lp; char outbuf[BUFSIZE]; char *privs; time_t nowr; /* Header */ send_reply(sptr, RPL_DATASTR, " "); send_reply(sptr, RPL_CHKHEAD, "user", acptr->cli_name); send_reply(sptr, RPL_DATASTR, " "); ircd_snprintf(0, outbuf, sizeof(outbuf), " Nick:: %s (%s%s)", acptr->cli_name, NumNick(acptr)); send_reply(sptr, RPL_DATASTR, outbuf); if (MyUser(acptr)) { ircd_snprintf(0, outbuf, sizeof(outbuf), " Signed on:: %s", myctime(acptr->cli_firsttime)); send_reply(sptr, RPL_DATASTR, outbuf); } ircd_snprintf(0, outbuf, sizeof(outbuf), " Timestamp:: %s (%d)", myctime(acptr->cli_lastnick), acptr->cli_lastnick); send_reply(sptr, RPL_DATASTR, outbuf); ircd_snprintf(0, outbuf, sizeof(outbuf), " User/Hostmask:: %s@%s (%s)", acptr->cli_user->username, acptr->cli_user->host, ircd_ntoa((const char*) &(cli_ip(acptr)))); send_reply(sptr, RPL_DATASTR, outbuf); if (((feature_int(FEAT_HOST_HIDING_STYLE) == 1) ? HasHiddenHost(acptr) : IsHiddenHost(acptr)) || IsSetHost(acptr)) { ircd_snprintf(0, outbuf, sizeof(outbuf), " Real User/Host:: %s@%s", acptr->cli_user->realusername, acptr->cli_user->realhost); send_reply(sptr, RPL_DATASTR, outbuf); } ircd_snprintf(0, outbuf, sizeof(outbuf), " Real Name:: %s%c", cli_info(acptr), COLOR_OFF); send_reply(sptr, RPL_DATASTR, outbuf); if (IsService(cli_user(acptr)->server)) { if (acptr) send_reply(sptr, RPL_DATASTR, " Status:: Network Service"); else if (IsAdmin(acptr)) send_reply(sptr, RPL_DATASTR, " Status:: IRC Administrator (service)"); else if (IsAnOper(acptr)) send_reply(sptr, RPL_DATASTR, " Status:: IRC Operator (service)"); else send_reply(sptr, RPL_DATASTR, " Status:: Client (service)"); } else if (IsAdmin(acptr)) { send_reply(sptr, RPL_DATASTR, " Status:: IRC Administrator"); } else if (IsAnOper(acptr)) { send_reply(sptr, RPL_DATASTR, " Status:: IRC Operator"); } else { send_reply(sptr, RPL_DATASTR, " Status:: Client"); } if (MyUser(acptr)) { ircd_snprintf(0, outbuf, sizeof(outbuf), " Class:: %s", get_client_class(acptr)); send_reply(sptr, RPL_DATASTR, outbuf); } privs = client_print_privs(acptr); if (strlen(privs) > 1) client_check_privs(acptr, sptr); ircd_snprintf(0, outbuf, sizeof(outbuf), " Connected to:: %s", cli_name(acptr->cli_user->server)); send_reply(sptr, RPL_DATASTR, outbuf); if (cli_version(acptr)) { if (strlen(cli_version(acptr)) > 0) { ircd_snprintf(0, outbuf, sizeof(outbuf), " CTCP Version:: %s", cli_version(acptr)); send_reply(sptr, RPL_DATASTR, outbuf); } } if (cli_user(acptr) && !EmptyString(cli_user(acptr)->swhois)) { ircd_snprintf(0, outbuf, sizeof(outbuf), " SWHOIS:: %s", cli_user(acptr)->swhois); send_reply(sptr, RPL_DATASTR, outbuf); } if (cli_webirc(acptr)) { if (strlen(cli_webirc(acptr)) > 0) { ircd_snprintf(0, outbuf, sizeof(outbuf), " WebIRC:: %s", cli_webirc(acptr)); send_reply(sptr, RPL_DATASTR, outbuf); } } if (cli_sslclifp(acptr) && (strlen(cli_sslclifp(acptr)) > 0)) { ircd_snprintf(0, outbuf, sizeof(outbuf), "SSL Fingerprint:: %s", cli_sslclifp(acptr)); send_reply(sptr, RPL_DATASTR, outbuf); } if (MyUser(acptr)) get_eflags(sptr, acptr); /* +s (SERV_NOTICE) is not relayed to us from remote servers, * so we cannot tell if a remote client has that mode set. * And hacking it onto the end of the output of umode_str is EVIL BAD AND WRONG * (and breaks if the user is +r) so we won't do that either. */ if (strlen(umode_str(acptr)) < 1) strcpy(outbuf, " Umode(s):: <none>"); else ircd_snprintf(0, outbuf, sizeof(outbuf), " Umode(s):: +%s", umode_str(acptr)); send_reply(sptr, RPL_DATASTR, outbuf); if (acptr->cli_user->joined == 0) send_reply(sptr, RPL_DATASTR, " Channel(s):: <none>"); else if (acptr->cli_user->joined > 50) { /* NB. As a sanity check, we DO NOT show the individual channels the * client is on if it is on > 50 channels. This is to prevent the ircd * barfing ala Uworld when someone does /quote check Q :).. (I shouldn't imagine * an Oper would want to see every single channel 'x' client is on anyway if * they are on *that* many). */ ircd_snprintf(0, outbuf, sizeof(outbuf), " Channel(s):: - (total: %u)", acptr->cli_user->joined); send_reply(sptr, RPL_DATASTR, outbuf); } else { char chntext[BUFSIZE]; int len = strlen(" Channel(s):: "); int mlen = strlen(me.cli_name) + len + strlen(sptr->cli_name); *chntext = '\0'; strcpy(chntext, " Channel(s):: "); for (lp = acptr->cli_user->channel; lp; lp = lp->next_channel) { chptr = lp->channel; if (len + strlen(chptr->chname) + mlen > BUFSIZE - 5) { send_reply(sptr, RPL_DATASTR, chntext); *chntext = '\0'; strcpy(chntext, " Channel(s):: "); len = strlen(chntext); } if (IsDeaf(acptr)) *(chntext + len++) = '-'; if (is_chan_op(acptr, chptr)) *(chntext + len++) = '@'; if (is_half_op(acptr, chptr)) *(chntext + len++) = '%'; if (IsOper(sptr) && !ShowChannel(sptr,chptr)) *(chntext + len++) = '*'; if (IsZombie(lp)) *(chntext + len++) = '!'; if (len) *(chntext + len) = '\0'; strcpy(chntext + len, chptr->chname); len += strlen(chptr->chname); strcat(chntext + len, " "); len++; } if (chntext[0] != '\0') send_reply(sptr, RPL_DATASTR, chntext); } /* If client processing command ISN'T target (or a registered * Network Service), show idle time since the last time we * parsed something. */ if (MyUser(acptr) && !(IsService(acptr) == -1) && !(strCasediff(acptr->cli_name, sptr->cli_name) == 0)) { nowr = CurrentTime - acptr->cli_user->last; ircd_snprintf(0, outbuf, sizeof(outbuf), " Idle for:: %d days, %02ld:%02ld:%02ld", nowr / 86400, (nowr / 3600) % 24, (nowr / 60) % 60, nowr % 60); send_reply(sptr, RPL_DATASTR, outbuf); } /* Away message (if applicable) */ if (acptr->cli_user->away) { ircd_snprintf(0, outbuf, sizeof(outbuf), " Away message:: %s", acptr->cli_user->away); send_reply(sptr, RPL_DATASTR, outbuf); } /* If local user.. */ if (MyUser(acptr)) { send_reply(sptr, RPL_DATASTR, " "); ircd_snprintf(0, outbuf, sizeof(outbuf), " Ports:: %d -> %d (client -> server)", cli_port(acptr), cli_listener(acptr)->port); send_reply(sptr, RPL_DATASTR, outbuf); if (feature_bool(FEAT_CHECK_EXTENDED)) { /* Note: sendq = receiveq for a client (it makes sense really) */ ircd_snprintf(0, outbuf, sizeof(outbuf), " Data sent:: %u.%0.3u Kb (%u protocol messages)", cli_receiveK(acptr), cli_receiveB(acptr), cli_receiveM(acptr)); send_reply(sptr, RPL_DATASTR, outbuf); ircd_snprintf(0, outbuf, sizeof(outbuf), " Data received:: %u.%0.3u Kb (%u protocol messages)", cli_sendK(acptr), cli_sendB(acptr), cli_sendM(acptr)); send_reply(sptr, RPL_DATASTR, outbuf); ircd_snprintf(0, outbuf, sizeof(outbuf), " receiveQ size:: %d bytes (max. %d bytes)", DBufLength(&(cli_recvQ(acptr))), feature_int(FEAT_CLIENT_FLOOD)); send_reply(sptr, RPL_DATASTR, outbuf); ircd_snprintf(0, outbuf, sizeof(outbuf), " sendQ size:: %d bytes (max. %d bytes)", DBufLength(&(cli_sendQ(acptr))), get_sendq(acptr)); send_reply(sptr, RPL_DATASTR, outbuf); } } /* Send 'END OF CHECK' message */ send_reply(sptr, RPL_ENDOFCHECK, " "); }
/* * Return the current interrupt enable flag. */ inline int osenv_intr_enabled(void) { return get_eflags() & EFL_IF; }