/* * cleanup a mail box * mbox: pointer to mail box */ void mbox_destroy(mbox_t *mbox) { struct dtask *t; uint32_t f; if(mbox && (mbox->flag & IPC_FLAG_VALID)) { SYS_FSAVE(f); if(mbox->flag & IPC_FLAG_FREEMEM) free(mbox->buf.data); #ifdef INCLUDE_JOURNAL journal_ipc_destroy((ipc_t *)mbox); #endif #ifdef INCLUDE_PMCOUNTER PMC_PEG_COUNTER(PMC_sys32_counter[PMC_U32_nMbox], -1); #endif mbox->flag = 0; /* wake up all tasks in mbox's block task queue */ while((t = blockq_select(&mbox->taskq))) { task_wakeup_noschedule(TASK_T(t)); } task_schedule(); /* left mbox->buf.data to be freed by user */ SYS_FRESTORE(f); } }
void FreeCommBuffData(COMMBUFF *commbuff) { INTERRUPT_STATE state; COMMBUFFTAG **tags; COMMBUFFTAG *freetag; int32 index; index = GET_COMMBUFF_TAG_INDEX(commbuff); tags = &commbuff_tags[index]; disable_interrupts(state); while (*tags) { if ((*tags)->commbuff == commbuff) { freetag = *tags; *tags = freetag->next_tag; freetag->next_tag = released_tagged_commbuffs; released_tagged_commbuffs = freetag; enable_interrupts(state); task_schedule(&tagged_commbuff_release_task. releasetask); return; } tags = &(*tags)->next_tag; } enable_interrupts(state); }
void sys_pause(struct cpu_state **cpu) { thread_suspend(current_thread); (*cpu)->CPU_ARG0 = 0; *cpu = (struct cpu_state *)task_schedule(*cpu); }
void task_sleep(unsigned long delay) { timer_t *timer; dbg("start sleep ...\n"); #ifdef DEBUG scheduler->dump(); #endif timer = (timer_t *)kmalloc(sizeof(*timer)); if (NULL == timer) { error("%s: alloc timer failled\n"); } init_timer_value(timer); enter_critical_section(); oneshot_timer_add(timer, delay, (timer_function)task_sleep_function, (void *)current_task); current_task->state = SLEEPING; scheduler->dequeue_task(current_task, 0); task_schedule(); exit_critical_section(); }
/* * TransmitQueryInterface assembles a control query interface packet and initializes the transmit procedure * * Params: * acktype specifies host or client as well as failure or success * host_interface is the interface which belongs to the host of the query * result is the resultant interface id which is relevent only when acktype is host_end(SUCCESS) * mux is the MUX object the data is to be transmitted through */ int32 TransmitQueryInterface (int8 acktype, int8 host_interface, int8 result, sint8* name, MUX* mux) { COMMBUFF* commbuff; QUERYINTERFACE_PACKET* packetdata; int32 namesize; DEBUG("TransmitQueryInterface(%lu, %lu, %lu, %p, %p)\n", (int32)acktype, (int32)host_interface, (int32)result, name, mux); if(name) namesize = strlen((char*)name)+1; else namesize = 0; if(namesize > PACKET_MAXNAME_LENGTH) return DEBUGERROR(ERROR_INVALIDPARAMETER); commbuff = int_alloc_commbuff(sizeof(QUERYINTERFACE_PACKET)); packetdata = (QUERYINTERFACE_PACKET*)commbuff_data(commbuff); packetdata->type = PACKETTYPE_QUERYINTERFACE; commbuff_copyin_byte(commbuff, offsetof(QUERYINTERFACE_PACKET, acktype), acktype); commbuff_copyin_byte(commbuff, offsetof(QUERYINTERFACE_PACKET, host_interface), host_interface); commbuff_copyin_byte(commbuff, offsetof(QUERYINTERFACE_PACKET, result), result); commbuff_copyin(commbuff, offsetof(QUERYINTERFACE_PACKET, name), name, namesize); queue_commbuff(commbuff, &mux->send_queue); mux->total_queued_amount += sizeof(QUERYINTERFACE_PACKET); task_schedule(&mux->send_task); return DEBUGERROR(ERROR_NONE); }
/* * TransmitCredit assembles a control credit packet and initializes the transmit procedure * * Params: * acktype specifies host or client as well as failure or success * channel is the channel the credit should be applied to * bytecredit is the amount the byte credit should be increased * sendcredit is the amount the send credit should be increased * mux is the MUX object the data is to be transmitted through */ int32 TransmitCredit (int8 acktype, int8 channel, int32 bytecredit, int32 sendcredit, MUX* mux) { COMMBUFF* commbuff; CREDIT_PACKET* packetdata; DEBUG( "TransmitCredit(%lu, %lu, %lu, 0x%lu, %p)\n", (int32)acktype, (int32)channel, (int32)bytecredit, (int32)sendcredit, mux ); commbuff = int_alloc_commbuff(sizeof(CREDIT_PACKET)); convert_dword(bytecredit); convert_dword(sendcredit); packetdata = (CREDIT_PACKET*)commbuff_data(commbuff); packetdata->type = PACKETTYPE_CREDIT; commbuff_copyin_byte(commbuff, offsetof(CREDIT_PACKET, acktype), acktype); commbuff_copyin_byte(commbuff, offsetof(CREDIT_PACKET, channel), channel); commbuff_copyin_dword(commbuff, offsetof(CREDIT_PACKET, bytecredit), bytecredit); commbuff_copyin_dword(commbuff, offsetof(CREDIT_PACKET, sendcredit), sendcredit); queue_commbuff(commbuff, &mux->send_queue); mux->total_queued_amount += sizeof(CREDIT_PACKET); task_schedule(&mux->send_task); return DEBUGERROR(ERROR_NONE); }
/* * TransmitDisableChannel assembles a control disable channel packet and initializes the transmit procedure * * Params: * acktype specifies host or client as well as failure or success * channel is the channel to be disabled * host_interface is the interface which belongs to the host and is requesting the disable * client_interface is the interface which belongs to the client and is responding to the disable * mux is the MUX object the data is to be transmitted through */ int32 TransmitDisableChannel (int8 acktype, int8 channel, int8 host_interface, int8 client_interface, MUX* mux) { COMMBUFF* commbuff; DISABLECHANNEL_PACKET* packetdata; DEBUG("TransmitDisableChannel(%lu, %lu, %lu, %lu, %p)\n", (int32)acktype, (int32)channel, (int32)host_interface, (int32)client_interface, mux); commbuff = int_alloc_commbuff(sizeof(DISABLECHANNEL_PACKET)); packetdata = (DISABLECHANNEL_PACKET*)commbuff_data(commbuff); packetdata->type = PACKETTYPE_DISABLECHANNEL; commbuff_copyin_byte(commbuff, offsetof(DISABLECHANNEL_PACKET, acktype), acktype); commbuff_copyin_byte(commbuff, offsetof(DISABLECHANNEL_PACKET, channel), channel); commbuff_copyin_byte(commbuff, offsetof(DISABLECHANNEL_PACKET, host_interface), host_interface); commbuff_copyin_byte(commbuff, offsetof(DISABLECHANNEL_PACKET, client_interface), client_interface); queue_commbuff(commbuff, &mux->send_queue); mux->total_queued_amount += sizeof(DISABLECHANNEL_PACKET); task_schedule(&mux->send_task); return DEBUGERROR(ERROR_NONE); }
signed long schedule_timeout(signed long timeout) { timer_t *timer; unsigned long expire; switch (timeout) { case MAX_SCHEDULE_TIMEOUT: current_task->state = SLEEPING; scheduler->dequeue_task(current_task, 0); task_schedule(); goto out; default: if (timeout < 0) { printk("schedule_timeout: wrong timeout " "value %lx\n", timeout); current_task->state = RUNNING; goto out; } } expire = timeout + current_time(); task_sleep(expire); timeout = expire - current_time(); out: return timeout < 0 ? 0 : timeout; }
error_t event_send (task_handle_t _handle, event_set_t _sent) { interrupt_level_t level; bool wakeup_needed = false; level = global_interrupt_disable (); if (is_invalid_task (_handle)) { global_interrupt_enable (level); return ERROR_T (ERROR_EVENT_RECV_INVRECEIVER); } _handle->event_received_ |= _sent; if (0 == (int)_handle->event_option_) { global_interrupt_enable (level); return 0; } if (EVENT_WAIT_ALL == (_handle->event_option_ & EVENT_WAIT_ALL)) { if ((_handle->event_expected_ & _handle->event_received_) == _handle->event_expected_) { wakeup_needed = true; } } else { if ((_handle->event_expected_ & _handle->event_received_) != 0) { wakeup_needed = true; } } if (!wakeup_needed) { global_interrupt_enable (level); return 0; } (void) task_state_change (_handle, TASK_STATE_READY); global_interrupt_enable (level); task_schedule (null); return 0; }
/* * TransmitChannelSignal transmits a four int8 value to the channel * * Params: * acktype specifies host or client as well as failure or success * channel is the channel to be signaled * signal is the value to be delivered * mux is the MUX object the data is to be transmitted through */ int32 TransmitChannelSignal(int8 acktype, int8 channel, int32 signal, MUX *mux) { COMMBUFF *commbuff; CHANNELSIGNAL_PACKET *packetdata; DEBUG("TransmitChannelSignal(%lu, %lu, %lu, %p)\n", (int32) acktype, (int32) channel, signal, mux); commbuff = int_alloc_commbuff(sizeof(CHANNELSIGNAL_PACKET)); packetdata = (CHANNELSIGNAL_PACKET *) commbuff_data(commbuff); packetdata->type = PACKETTYPE_CHANNELSIGNAL; commbuff_copyin_byte(commbuff, offsetof(CHANNELSIGNAL_PACKET, acktype), acktype); commbuff_copyin_byte(commbuff, offsetof(CHANNELSIGNAL_PACKET, channel), channel); commbuff_copyin_dword(commbuff, offsetof(CHANNELSIGNAL_PACKET, signal), signal); queue_commbuff(commbuff, &mux->send_queue); mux->total_queued_amount += sizeof(CHANNELSIGNAL_PACKET); task_schedule(&mux->send_task); return DEBUGERROR(ERROR_NONE); }
/*-----------------------------------------------------------------------*/ void task_wakeup(task_t t) { uint32_t f; SYS_FSAVE(f); task_wakeup_noschedule(t); task_schedule(); SYS_FRESTORE(f); }
/*-----------------------------------------------------------------------*/ void task_resume(task_t t) { if(t >= MAX_TASK_NUMBER) return; task_resume_noschedule(t); task_schedule(); }
/*-----------------------------------------------------------------------*/ void task_block(blockq_t * tqueue) { uint32_t f; SYS_FSAVE(f); task_block_noschedule(tqueue); task_schedule(); SYS_FRESTORE(f); }
int main(void) { // Hardware Init delay_init(); //ÑÓʱº¯Êý³õʼ»¯ pwr_init(); #ifdef YANMING3 charger_init(); if (check_standby_flag() == SUCCESS && check_charging() != CHARGING) { Key_GPIO_Config(); exti_key_init(); #ifdef DEBUG_POWER_OFF_WAKE_UP Screen_Init(); OLED_Clear(); OLED_Display_On(); draw_about_mesage(); #endif check_standby_wakeup_button_press(); // If we boot up from standby by pressing 5 times, the system will reboot again without // this code block. } #endif // OLED Init Screen_Init(); OLED_Clear(); // low_switch_power_init(); TIMx_Int_DeInit(); EXTIX_DeInit(); // Key Key_GPIO_Config(); // PID related code ADC1_Configuration(); PWM_Configuration(); VoltagePID_Init(); TIMx_Int_Init(); rtc_init(); // Our Init system_init(); #ifndef YANMING3 //iwdg_init(); #endif while(1) { #ifndef YANMING3 //iwdg_feed(); #endif task_schedule(); } }
void task_sleep(struct Task* task) { if(task->state == SLEEPING) panic("SLEEPING SLEEPING"); if(task->state == WAKEN_BEFORE_SLEEP) task->state = RUNNING; else if(task->state == RUNNING) task->state = SLEEPING; task_schedule(); }
static int try_to_wake_up(task_t *t) { t->state = READY; enter_critical_section(); scheduler->enqueue_task(t, 0); task_schedule(); exit_critical_section(); return 0; }
void kmain(void) { task_t *task_shell; int ret; /*************** Init Arch ****************/ arch_early_init(); show_logo(); /*************** Init Platform ****************/ platform_init(); timer_init(); buses_init(); /*************** Init Task ****************/ task_init(); task_create_init(); /*************** Init Workqueu ****************/ init_workqueues(); /*************** Init File System ****************/ register_filesystem(&fat_fs); /*************** Creating Shell TASK ****************/ task_shell = task_alloc("shell", 0x2000, 5); if (NULL == task_shell) { return; } ret = task_create(task_shell, init_shell, 0); if (ret) { printk("Create init shell task failed\n"); } sema_init(&sem, 1); arch_enable_ints(); while(1) { enter_critical_section(); arch_idle(); task_schedule(); exit_critical_section(); } task_free(task_shell); }
void task_exit(int retcode) { enter_critical_section(); current_task->state = EXITED; current_task->ret = retcode; #ifdef DEBUG scheduler->dump(); #endif scheduler->dequeue_task(current_task, 0); task_schedule(); }
/*-----------------------------------------------------------------------*/ void task_yield() { uint32_t f; struct dtask *task; SYS_FSAVE(f); task = current; if(task->state == TASK_STATE_READY) { readyq_dequeue(&sysready_queue, task); readyq_enqueue(&sysready_queue, task); task_schedule(); } SYS_FRESTORE(f); }
/* * Suspend the current task so that other tasks can run. */ void task_yield () { arch_state_t x; arch_intr_disable (&x); /* Enqueue always puts element at the tail of the list. */ list_append (&task_active, &task_current->item); /* Scheduler selects the first task. * If there are several tasks with equal priority, * this will allow them to run round-robin. */ task_schedule (); arch_intr_restore (x); }
STATIC void kernel() { //Create an idle-task task_insert(&idletask, idle, Low); //enable watchdog wdt_enable(WDTO_2S); //init user specific stuff init_user_environment(); //start scheduler task_schedule(); while(1) { /* this should never been reached */ } }
bool_t mutex_group_lockwaiting (mutex_t *m, mutex_group_t *g, mutex_t **lock_ptr, void **msg_ptr) { if (mutex_recurcived_lock(m)) return 1; arch_state_t x; struct mg_signal_ctx signaler = {g, lock_ptr, msg_ptr}; arch_intr_disable (&x); assert_task_good_stack(task_current); assert (task_current->wait == 0); gr_assert_good(g); assert (g->num > 0); if (m != NULL) if (! m->item.next) mutex_init (m); for (;;) { if (m != NULL) if (mutex_trylock_in(m)) { arch_intr_restore (x); return true; } if (mutex_group_signaled(&signaler)) { arch_intr_restore (x); return false; } if (m != NULL) mutex_slaved_yield(m); else { /* Suspend the task. */ list_unlink (&task_current->item); task_schedule (); } } }
void lock(int* mutex) { int aquired = 0; do { asm("pushf"); asm("cli"); if(*mutex == 0) { *mutex = 1; aquired = 1; } asm("popf"); if(!aquired) task_schedule(); } while(!aquired); }
/* * TransmitDisableMUX assembles a control disable mux packet and initializes the transmit procedure * * Params: * acktype specifies host or client as well as failure or success * mux is the MUX object the data is to be transmitted through */ int32 TransmitDisableMUX (int8 acktype, MUX* mux) { COMMBUFF* commbuff; DISABLEMUX_PACKET* packetdata; DEBUG("TransmitDisableMUX(%lu, %p)\n", (int32)acktype, mux); commbuff = int_alloc_commbuff(sizeof(DISABLEMUX_PACKET)); packetdata = (DISABLEMUX_PACKET*)commbuff_data(commbuff); packetdata->type = PACKETTYPE_DISABLEMUX; commbuff_copyin_byte(commbuff, offsetof(DISABLEMUX_PACKET, acktype), acktype); queue_commbuff(commbuff, &mux->send_queue); mux->total_queued_amount += sizeof(DISABLEMUX_PACKET); task_schedule(&mux->send_task); return DEBUGERROR(ERROR_NONE); }
// TODO: To standard this scenario void start_dooloo(void) { interrupt_disable(); os_hw_init(); kprintf("\n\nProbability core Version %s\n", OS_VERSION); kprintf("Author: Puhui Xiong ([email protected]) \n\n"); /* trace irq stack */ _irq_stack_start[0] = _irq_stack_start[1] = '#'; memory_inita(); task_init(); kprintf(" Task subsystem inited.\n"); ipc_init(); kprintf(" IPC subsystem inited.\n"); memory_initb(); kprintf(" Memory subsystem inited.\n"); idletask_init(); root_task_init(); kprintf(" Create task tidle and troot.\n"); task_set_schedule_hook(default_sched_hook); /* set current to an invalid tcb, then begin to schedule */ current = &systask[0]; kprintf(" Ready to first scheduling, go!!\n\n"); /* before schedule, we have to enable interrupt for pendsv() used */ interrupt_enable(); task_schedule(); kprintf("\nSystem halted.\n"); while(1); }
/* * exit current task and switch to a new task. * Warning: Don't exit the current task if you are in a ciritical region. */ void task_exit() { uint32_t f; struct dtask *task; SYS_FSAVE(f); task = current; /* remove task from it's queue */ if(task->state == TASK_STATE_READY) readyq_dequeue(&sysready_queue, task); else if(task->state == TASK_STATE_BLOCK) blockq_dequeue(task->taskq, task); /* remove task from delay queue */ if(task->flags & TASK_FLAGS_DELAYING) task_undelay(TASK_T(task)); task->state = TASK_STATE_DEAD; SYS_FRESTORE(f); #ifdef INCLUDE_JOURNAL journal_task_exit(task); #endif #ifdef INCLUDE_PMCOUNTER PMC_PEG_COUNTER(PMC_sys32_counter[PMC_U32_nTask],-1); #endif /* free task's stack */ if(!(task->flags & TASK_FLAGS_STATICSTACK)) free((void *)task->stack_base); /* switch to new task */ task_schedule(); }
/* * Wait for the signal on any lock in the group. * The calling task is blocked until the mutex_signal(). * Returns the lock and the signalled message. */ void mutex_group_wait (mutex_group_t *g, mutex_t **lock_ptr, void **msg_ptr) { arch_state_t x; struct mg_signal_ctx signaler = {g, lock_ptr, msg_ptr}; arch_intr_disable (&x); assert_task_good_stack(task_current); assert (task_current->wait == 0); gr_assert_good(g); assert (g->num > 0); for (;;) { if (mutex_group_signaled(&signaler)) { arch_intr_restore (x); return; } /* Suspend the task. */ list_unlink (&task_current->item); g->waiter = task_current; task_schedule (); } }
error_t event_receive (event_set_t _expected, event_set_handle_t _received, msecond_t _timeout, event_option_t _option) { interrupt_level_t level; event_option_t wait_option = EVENT_WAIT_ALL | EVENT_WAIT_ANY; event_option_t return_option = EVENT_RETURN_ALL | EVENT_RETURN_EXPECTED; task_handle_t p_task; if (is_in_interrupt ()) { return ERROR_T (ERROR_EVENT_RECV_INVCONTEXT); } if (0 == _expected) { return 0; } if (null == _received) { return ERROR_T (ERROR_EVENT_RECV_INVPTR); } if ((wait_option == (wait_option & _option)) || (0 == (int)(wait_option & _option)) || (return_option == (return_option & _option)) || (0 == (int)(return_option & _option))) { return ERROR_T (ERROR_EVENT_RECV_INVOPT); } level = global_interrupt_disable (); p_task = task_self (); if (is_invalid_task (p_task)) { global_interrupt_enable (level); return ERROR_T (ERROR_EVENT_RECV_INVCONTEXT); } again: // if expected event(s) is/are available, return it if (EVENT_WAIT_ALL == (_option & EVENT_WAIT_ALL)) { if ((_expected & p_task->event_received_) == _expected) { if (EVENT_RETURN_ALL == (_option & EVENT_RETURN_ALL)) { *_received = p_task->event_received_; } else { *_received = _expected; p_task->event_received_ &= ~(*_received); } global_interrupt_enable (level); return 0; } } else { if ((_expected & p_task->event_received_) != 0) { if (EVENT_RETURN_ALL == (_option & EVENT_RETURN_ALL)) { *_received = p_task->event_received_; } else { *_received = _expected & p_task->event_received_; p_task->event_received_ &= ~(*_received); } global_interrupt_enable (level); return 0; } } // run here it means we need to block the task p_task->timeout_ = _timeout; (void) task_state_change (p_task, TASK_STATE_WAITING); p_task->ecode_ = 0; p_task->event_expected_ = _expected; p_task->event_option_ = _option; global_interrupt_enable (level); task_schedule (null); level = global_interrupt_disable (); p_task->event_option_ = (event_option_t) 0; if (0 == p_task->ecode_) { // event(s) has/have received and need to return the event(s) expected goto again; } global_interrupt_enable (level); //lint -e{650} if (ERROR_TASK_WAIT_TIMEOUT == MODULE_ERROR (p_task->ecode_)) { p_task->ecode_ = ERROR_T (ERROR_EVENT_RECV_TIMEOUT); } return p_task->ecode_; }
/* This function is called on a read event from a listening socket, corresponding * to an accept. It tries to accept as many connections as possible, and for each * calls the listener's accept handler (generally the frontend's accept handler). */ int stream_sock_accept(int fd) { struct listener *l = fdtab[fd].owner; struct proxy *p = l->frontend; int max_accept = global.tune.maxaccept; int cfd; int ret; if (unlikely(l->nbconn >= l->maxconn)) { listener_full(l); return 0; } if (global.cps_lim && !(l->options & LI_O_UNLIMITED)) { int max = freq_ctr_remain(&global.conn_per_sec, global.cps_lim, 0); if (unlikely(!max)) { /* frontend accept rate limit was reached */ limit_listener(l, &global_listener_queue); task_schedule(global_listener_queue_task, tick_add(now_ms, next_event_delay(&global.conn_per_sec, global.cps_lim, 0))); return 0; } if (max_accept > max) max_accept = max; } if (p && p->fe_sps_lim) { int max = freq_ctr_remain(&p->fe_sess_per_sec, p->fe_sps_lim, 0); if (unlikely(!max)) { /* frontend accept rate limit was reached */ limit_listener(l, &p->listener_queue); task_schedule(p->task, tick_add(now_ms, next_event_delay(&p->fe_sess_per_sec, p->fe_sps_lim, 0))); return 0; } if (max_accept > max) max_accept = max; } /* Note: if we fail to allocate a connection because of configured * limits, we'll schedule a new attempt worst 1 second later in the * worst case. If we fail due to system limits or temporary resource * shortage, we try again 100ms later in the worst case. */ while (max_accept--) { struct sockaddr_storage addr; socklen_t laddr = sizeof(addr); if (unlikely(actconn >= global.maxconn) && !(l->options & LI_O_UNLIMITED)) { limit_listener(l, &global_listener_queue); task_schedule(global_listener_queue_task, tick_add(now_ms, 1000)); /* try again in 1 second */ return 0; } if (unlikely(p && p->feconn >= p->maxconn)) { limit_listener(l, &p->listener_queue); return 0; } cfd = accept(fd, (struct sockaddr *)&addr, &laddr); if (unlikely(cfd == -1)) { switch (errno) { case EAGAIN: case EINTR: case ECONNABORTED: return 0; /* nothing more to accept */ case ENFILE: if (p) send_log(p, LOG_EMERG, "Proxy %s reached system FD limit at %d. Please check system tunables.\n", p->id, maxfd); limit_listener(l, &global_listener_queue); task_schedule(global_listener_queue_task, tick_add(now_ms, 100)); /* try again in 100 ms */ return 0; case EMFILE: if (p) send_log(p, LOG_EMERG, "Proxy %s reached process FD limit at %d. Please check 'ulimit-n' and restart.\n", p->id, maxfd); limit_listener(l, &global_listener_queue); task_schedule(global_listener_queue_task, tick_add(now_ms, 100)); /* try again in 100 ms */ return 0; case ENOBUFS: case ENOMEM: if (p) send_log(p, LOG_EMERG, "Proxy %s reached system memory limit at %d sockets. Please check system tunables.\n", p->id, maxfd); limit_listener(l, &global_listener_queue); task_schedule(global_listener_queue_task, tick_add(now_ms, 100)); /* try again in 100 ms */ return 0; default: return 0; } } if (unlikely(cfd >= global.maxsock)) { send_log(p, LOG_EMERG, "Proxy %s reached the configured maximum connection limit. Please check the global 'maxconn' value.\n", p->id); close(cfd); limit_listener(l, &global_listener_queue); task_schedule(global_listener_queue_task, tick_add(now_ms, 1000)); /* try again in 1 second */ return 0; } /* increase the per-process number of cumulated connections */ if (!(l->options & LI_O_UNLIMITED)) { update_freq_ctr(&global.conn_per_sec, 1); if (global.conn_per_sec.curr_ctr > global.cps_max) global.cps_max = global.conn_per_sec.curr_ctr; actconn++; } jobs++; totalconn++; l->nbconn++; if (l->counters) { if (l->nbconn > l->counters->conn_max) l->counters->conn_max = l->nbconn; } ret = l->accept(l, cfd, &addr); if (unlikely(ret <= 0)) { /* The connection was closed by session_accept(). Either * we just have to ignore it (ret == 0) or it's a critical * error due to a resource shortage, and we must stop the * listener (ret < 0). */ if (!(l->options & LI_O_UNLIMITED)) actconn--; jobs--; l->nbconn--; if (ret == 0) /* successful termination */ continue; limit_listener(l, &global_listener_queue); task_schedule(global_listener_queue_task, tick_add(now_ms, 100)); /* try again in 100 ms */ return 0; } if (l->nbconn >= l->maxconn) { listener_full(l); return 0; } } /* end of while (p->feconn < p->maxconn) */ return 0; }
/* * unlock task schedule */ void task_unlock() { sys_schedule_flags &= ~SCHEDULE_FLAGS_LOCK; task_schedule(); }