static JSBool note(JSContext *cx, uintN argc, jsval *vp) { struct note_data *note = malloc(sizeof(struct note_data)); int time, duration; if (argc < 5) { JS_ConvertArguments(cx, argc, JS_ARGV(cx, vp), "uuuu", &time, ¬e->pitch, ¬e->velocity, &duration); note->channel = 1; } else { JS_ConvertArguments(cx, argc, JS_ARGV(cx, vp), "uuuuu", &time, ¬e->pitch, ¬e->velocity, &duration, ¬e->channel); } struct task *note_on = task_create(time); struct task *note_off = task_create(time + duration); note_on->c_function = flm_note_on; note_on->fn_data = note; note_on->task_type = C_FUNCTION; note_off->c_function = flm_note_off; note_off->fn_data = note; note_off->task_type = C_FUNCTION; schedule_task(flim->scheduler, note_on); schedule_task(flim->scheduler, note_off); return JS_TRUE; }
Task_t* schedule_periodic_task(uint32_t period, void (*function)(), void* arg) { period+=MIN_TASK_TIME_IN_FUTURE*(period<MIN_TASK_TIME_IN_FUTURE); volatile Task_t* new_task = schedule_task(period, function, arg); new_task->period=period; return new_task; }
/** * Work. * */ void worker_start(worker_type* worker) { ods_log_assert(worker); while (worker->need_to_exit == 0) { ods_log_debug("[worker[%i]]: report for duty", worker->thread_num); /* When no task available this call blocks and waits for event. * Then it will return NULL; */ worker->task = schedule_pop_task(worker->engine->taskq); if (worker->task) { ods_log_debug("[worker[%i]] start working", worker->thread_num); worker_perform_task(worker); ods_log_debug("[worker[%i]] finished working", worker->thread_num); if (worker->task) { if (schedule_task(worker->engine->taskq, worker->task) != ODS_STATUS_OK) { ods_log_error("[worker[%i]] unable to schedule task", worker->thread_num); } worker->task = NULL; } } } }
static void ide_detach(dev_link_t *link) { dev_link_t **linkp; ide_info_t *info = link->priv; int ret; DEBUG(0, "ide_detach(0x%p)\n", link); /* Locate device structure */ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next) if (*linkp == link) break; if (*linkp == NULL) return; if (link->state & DEV_CONFIG) { schedule_task(&info->rel_task); flush_scheduled_tasks(); } if (link->handle) { ret = CardServices(DeregisterClient, link->handle); if (ret != CS_SUCCESS) cs_error(link->handle, DeregisterClient, ret); } /* Unlink, free device structure */ *linkp = link->next; kfree(info); } /* ide_detach */
int ide_event(event_t event, int priority, event_callback_args_t *args) { dev_link_t *link = args->client_data; ide_info_t *info = link->priv; DEBUG(1, "ide_event(0x%06x)\n", event); switch (event) { case CS_EVENT_CARD_REMOVAL: link->state &= ~DEV_PRESENT; if (link->state & DEV_CONFIG) schedule_task(&info->rel_task); break; case CS_EVENT_CARD_INSERTION: link->state |= DEV_PRESENT | DEV_CONFIG_PENDING; ide_config(link); break; case CS_EVENT_PM_SUSPEND: link->state |= DEV_SUSPEND; /* Fall through... */ case CS_EVENT_RESET_PHYSICAL: if (link->state & DEV_CONFIG) CardServices(ReleaseConfiguration, link->handle); break; case CS_EVENT_PM_RESUME: link->state &= ~DEV_SUSPEND; /* Fall through... */ case CS_EVENT_CARD_RESET: if (DEV_OK(link)) CardServices(RequestConfiguration, link->handle, &link->conf); break; } return 0; } /* ide_event */
static void au1000_pcmcia_poll_event(u32 dummy) { poll_timer.function = au1000_pcmcia_poll_event; poll_timer.expires = jiffies + AU1000_PCMCIA_POLL_PERIOD; add_timer(&poll_timer); schedule_task(&au1000_pcmcia_task); }
static void clps6700_interrupt(int irq, void *dev_id, struct pt_regs *regs) { struct clps6700_skt *skt = dev_id; u_int val, events; val = __raw_readl(skt->regbase + PCISR); if (!val) return; __raw_writel(val, skt->regbase + PCICR); events = 0; if (val & (PCM_CD1 | PCM_CD2)) events |= SS_DETECT; if (val & PCM_BVD1) events |= SS_BATWARN; if (val & PCM_BVD2) events |= SS_BATDEAD; if (val & PCM_RDYL) events |= SS_READY; spin_lock(&skt->ev_lock); skt->ev_pending |= events; spin_unlock(&skt->ev_lock); schedule_task(&clps6700_task); }
int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret = 0; down(&ieee->wx_sem); if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)){ ret = -1; goto out; } if ( ieee->state == IEEE80211_LINKED){ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) queue_work(ieee->wq, &ieee->wx_sync_scan_wq); #else schedule_task(&ieee->wx_sync_scan_wq); #endif /* intentionally forget to up sem */ return 0; } out: up(&ieee->wx_sem); return ret; }
static int notify_push(unsigned int cmd, __u32 controller, __u16 applid, __u32 ncci) { struct capi_notifier *np; MOD_INC_USE_COUNT; np = (struct capi_notifier *)kmalloc(sizeof(struct capi_notifier), GFP_ATOMIC); if (!np) { MOD_DEC_USE_COUNT; return -1; } memset(np, 0, sizeof(struct capi_notifier)); np->cmd = cmd; np->controller = controller; np->applid = applid; np->ncci = ncci; notify_enqueue(np); /* * The notifier will result in adding/deleteing * of devices. Devices can only removed in * user process, not in bh. */ MOD_INC_USE_COUNT; if (schedule_task(&tq_state_notify) == 0) MOD_DEC_USE_COUNT; return 0; }
/**************************************************************************** Description : Helper function to get a periodical timer Arguments : arg - pointer to corresponding timer ID Return : NONE ****************************************************************************/ IFX_LOCAL IFX_void_t TAPI_timer_call_back (IFX_int32_t arg) { Timer_ID Timer = (Timer_ID) arg; /* do the operation in process context, not in interrupt context */ schedule_task (&(Timer->timerTask)); }
APU_DECLARE(apr_status_t) apr_thread_pool_schedule(apr_thread_pool_t *me, apr_thread_start_t func, void *param, apr_interval_time_t time, void *owner) { return schedule_task(me, func, param, owner, time); }
/* pxa_pcmcia_poll_event() * ^^^^^^^^^^^^^^^^^^^^^^^^^^ * Let's poll for events in addition to IRQs since IRQ only is unreliable... */ static void pxa_pcmcia_poll_event(unsigned long dummy) { DEBUG(3, "%s(): polling for events\n", __FUNCTION__); poll_timer.function = pxa_pcmcia_poll_event; poll_timer.expires = jiffies + PXA_PCMCIA_POLL_PERIOD; add_timer(&poll_timer); schedule_task(&pxa_pcmcia_task); }
/* Simple wrapper calling power down function. */ static void acpi_sysrq_power_off(int key, struct pt_regs *pt_regs, struct kbd_struct *kbd, struct tty_struct *tty) { static struct tq_struct tq = { .routine = acpi_po_tramp }; if (po_cb_active++) return; schedule_task(&tq); }
TimerWorker::schedule_repeated(timespec run_time, long interval, task_func_t task_func, void* task_arg) { Task task; task.next_run_time = run_time; task.interval = interval; task.task_func_routine = task_func; task.arg = arg; return schedule_task(task); }
void start_test() { int i; for (i = 0; i < GPIO_NB; ++i) configure_gpio_output(&gpio_to_toggle[i], (enum gpio_bank) index_to_mapping[i][0], index_to_mapping[i][1], GPIO_PUSH_PULL); schedule_task(now, (task_handler) toggle_gpio, 0, 0, 0, 0); }
void toggle_gpio(int index) { struct gpio_out *current = &gpio_to_toggle[index].out; struct gpio_out *next = &gpio_to_toggle[(index + 1) % GPIO_NB].out; current->clear(current); next->set(next); schedule_task(1 * ms, (task_handler) toggle_gpio, (index + 1) % GPIO_NB, 0, 0, 0); }
TimerWorker::task_id_t TimerWorker::schedule(timespec run_time, task_func_t task_func, void* arg) { Task task; task.next_run_time = run_time; task.interval = 0; task.task_func_routine = task_func; task.arg = arg; return schedule_task(task); }
/* * Call jiq_print from a task queue */ void jiq_print_tq(void *ptr) { if (jiq_print (ptr)) { struct clientdata *data = (struct clientdata *)ptr; if (data->queue == SCHEDULER_QUEUE) schedule_task(&jiq_task); else if (data->queue) queue_task(&jiq_task, data->queue); if (data->queue == &tq_immediate) mark_bh(IMMEDIATE_BH); /* this one needs to be marked */ } }
static void gen_rtc_timer(unsigned long data) { lostint = get_rtc_ss() - oldsecs ; if (lostint<0) lostint = 60 - lostint; if (time_after(jiffies, tt_exp)) printk(KERN_INFO "genrtc: timer task delayed by %ld jiffies\n", jiffies-tt_exp); ttask_active=0; stask_active=1; if ((schedule_task(&genrtc_task) == 0)) stask_active = 0; }
void ether00_mem_update(void* dev_id) { struct net_device* dev=dev_id; struct net_priv* priv=dev->priv; struct rx_blist_ent* blist_ent_ptr; unsigned long flags; int enable_rx = 0; priv->tq_memupdate.sync=0; priv->memupdate_scheduled=0; /* Fill in any missing buffers from the received queue */ blist_ent_ptr=priv->rx_blist_vp; while(blist_ent_ptr<(priv->rx_blist_vp+RX_NUM_BUFF)){ spin_lock_irqsave(&priv->dma_lock,flags); /* fd.FDSystem of 0 indicates we failed to allocate the buffer in the ISR */ if(!blist_ent_ptr->fd.FDSystem){ struct sk_buff *skb; skb=dev_alloc_skb(PKT_BUF_SZ); blist_ent_ptr->fd.FDSystem=(unsigned int)skb; if(skb){ setup_blist_entry(skb,blist_ent_ptr); enable_rx = 1; } else { /* * reschedule the clean up, since we * didn't patch up all the buffers */ if(!priv->memupdate_scheduled){ schedule_task(&priv->tq_memupdate); priv->memupdate_scheduled=1; } spin_unlock_irqrestore(&priv->dma_lock,flags); break; } } spin_unlock_irqrestore(&priv->dma_lock,flags); blist_ent_ptr++; } if(enable_rx){ if (!priv->rx_disabled){ priv->rx_disabled = 0; writel(ETHER_RX_CTL_RXEN_MSK,ETHER_RX_CTL(dev->base_addr)); } } }
/** * monitor_schedule_bh - */ void monitor_schedule_bh(void) { //printk(KERN_DEBUG "monitor_schedule_bh: schedule bh to %s\n", msg); if (monitor.monitor_bh.sync) { return; } MOD_INC_USE_COUNT; if (!schedule_task(&monitor.monitor_bh)) { printk(KERN_DEBUG "monitor_schedule_bh: failed\n"); MOD_DEC_USE_COUNT; } }
/** * hotplug_schedule_bh - */ void hotplug_schedule_bh(void) { printk(KERN_DEBUG "hotplug_schedule_bh: schedule bh\n"); if (monitor.hotplug_bh.sync) { return; } MOD_INC_USE_COUNT; if (!schedule_task(&monitor.hotplug_bh)) { printk(KERN_DEBUG "monitor_schedule_bh: failed\n"); MOD_DEC_USE_COUNT; } }
/* * Change of state on a DCD line. */ void xmbrs_modem_change(struct xmb_serial *info, int dcd) { if (info->count == 0) return; if (info->flags & ASYNC_CHECK_CD) { if (dcd) { wake_up_interruptible(&info->open_wait); } else if (!((info->flags & ASYNC_CALLOUT_ACTIVE) && (info->flags & ASYNC_CALLOUT_NOHUP))) { schedule_task(&info->tqueue_hangup); } } }
static void enf_schedule_task(int sockfd, engine_type* engine, task_type *task, const char *what) { /* schedule task */ if (!task) { ods_log_crit("[%s] failed to create %s task", module_str, what); } else { ods_status status = schedule_task(engine->taskq, task); if (status != ODS_STATUS_OK) { ods_log_crit("[%s] failed to create %s task", module_str, what); client_printf(sockfd, "Unable to schedule %s task.\n", what); } else { client_printf(sockfd, "Scheduled %s task.\n", what); } } }
int jiq_read_sched(char *buf, char **start, off_t offset, int len, int *eof, void *data) { jiq_data.len = 0; /* nothing printed, yet */ jiq_data.buf = buf; /* print in this place */ jiq_data.jiffies = jiffies; /* initial time */ /* jiq_print will queue_task() again in jiq_data.queue */ jiq_data.queue = SCHEDULER_QUEUE; schedule_task(&jiq_task); /* ready to run */ interruptible_sleep_on(&jiq_wait); /* sleep till completion */ *eof = 1; return jiq_data.len; }
acpi_status acpi_os_queue_for_execution( u32 priority, OSD_EXECUTION_CALLBACK function, void *context) { acpi_status status = AE_OK; struct acpi_os_dpc *dpc = NULL; struct tq_struct *task; ACPI_FUNCTION_TRACE ("os_queue_for_execution"); ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Scheduling function [%p(%p)] for deferred execution.\n", function, context)); if (!function) return_ACPI_STATUS (AE_BAD_PARAMETER); /* * Allocate/initialize DPC structure. Note that this memory will be * freed by the callee. The kernel handles the tq_struct list in a * way that allows us to also free its memory inside the callee. * Because we may want to schedule several tasks with different * parameters we can't use the approach some kernel code uses of * having a static tq_struct. * We can save time and code by allocating the DPC and tq_structs * from the same memory. */ dpc = kmalloc(sizeof(struct acpi_os_dpc)+sizeof(struct tq_struct), GFP_ATOMIC); if (!dpc) return_ACPI_STATUS (AE_NO_MEMORY); dpc->function = function; dpc->context = context; task = (void *)(dpc+1); INIT_TQUEUE(task, acpi_os_execute_deferred, (void*)dpc); if (!schedule_task(task)) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Call to schedule_task() failed.\n")); kfree(dpc); status = AE_ERROR; } return_ACPI_STATUS (status); }
static int sc_data_fixup(const struct ip_ct_sc_expect *exp_sc_info, struct ip_conntrack *ct, unsigned int datalen, struct sk_buff **pskb, enum ip_conntrack_info ctinfo) { u_int32_t newip; struct iphdr *iph = (*pskb)->nh.iph; struct tcphdr *tcph = (void *)iph + iph->ihl*4; u_int16_t port, new_port; struct ip_conntrack_tuple tuple; struct sc_ip_port_data *list_t=sc_list; int ret=0; /* Don't care about source port */ const struct ip_conntrack_tuple mask = { { 0xFFFFFFFF, { 0xFFFFFFFF } }, { 0x0, { 0xFFFF }, 0xFFFF } }; memset(&tuple, 0, sizeof(tuple)); MUST_BE_LOCKED(&ip_sc_lock); #if 0 DEBUGP("SC_NAT: seq %u + %u in %u + %u\n", exp_sc_info->seq, exp_sc_info->len, ntohl(tcph->seq), datalen); #endif /* Change address inside packet to match way we're mapping this connection. */ newip = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip; DEBUGP("sc_data_fixup: %u.%u.%u.%u->%u.%u.%u.%u\n", NIPQUAD(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip), NIPQUAD(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip)); /* Expect something from server->client */ tuple.src.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip; tuple.dst.protonum = IPPROTO_UDP; jiq_task.routine = open_sc_socket; memset(task_data, 0, 64); sprintf(task_data,"IP:%u.%u.%u.%uPORT:%dEND",NIPQUAD(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip), port); jiq_task.data = (void *)task_data; schedule_task(&jiq_task); if (!mangle_packet(pskb, newip, exp_sc_info->seq - ntohl(tcph->seq), exp_sc_info->len, ct, ctinfo)) return 0; return 1; }
static JSBool at(JSContext *cx, uintN argc, jsval *vp) { if (argc < 2) { flm_log("'at' requires at least 2 arguments: <time> <function>"); return JS_FALSE; } jsval *arguments = JS_ARGV(cx, vp); uint32_t time; JS_ValueToECMAUint32(cx, arguments[0], &time); struct task *t = task_create(time); t->js_argc = 0; if (JS_TypeOfValue(cx, arguments[1]) == JSTYPE_FUNCTION) { t->js_function = arguments[1]; } else { flm_log("2nd argument of 'at' should be a function"); return JS_FALSE; } t->js_argc = (argc > 2 ? argc - 2 : 1); jsval *fn_args = malloc(t->js_argc * sizeof(jsval)); if (argc > 2) { int i; for (i = 2; i < argc; i++) { fn_args[i - 2] = arguments[i]; } } else { fn_args[0] = time; } t->js_args = fn_args; t->task_type = JS_FUNCTION; schedule_task(flim->scheduler, t); return JS_TRUE; }
/** * Reschedule task for zone. * */ ods_status zone_reschedule_task(zone_type* zone, schedule_type* taskq, task_id what) { task_type* task = NULL; ods_status status = ODS_STATUS_OK; ods_log_assert(taskq); ods_log_assert(zone); ods_log_assert(zone->name); ods_log_assert(zone->task); ods_log_debug("[%s] reschedule task for zone %s", zone_str, zone->name); lock_basic_lock(&taskq->schedule_lock); task = unschedule_task(taskq, (task_type*) zone->task); if (task != NULL) { if (task->what != what) { task->halted = task->what; task->halted_when = task->when; task->interrupt = what; } /** Only reschedule if what to do is lower than what was scheduled. */ if (task->what > what) { task->what = what; } task->when = time_now(); status = schedule_task(taskq, task, 0); } else { /* task not queued, being worked on? */ ods_log_verbose("[%s] unable to reschedule task for zone %s now: " "task is not queued (task will be rescheduled when it is put " "back on the queue)", zone_str, zone->name); task = (task_type*) zone->task; task->interrupt = what; /* task->halted(_when) set by worker */ } lock_basic_unlock(&taskq->schedule_lock); zone->task = task; return status; }
/* * Routine to poll RTC seconds field for change as often as possible, * after first RTC_UIE use timer to reduce polling */ static void genrtc_troutine(void *data) { unsigned int tmp = get_rtc_ss(); if (stop_rtc_timers) { stask_active = 0; return; } if (oldsecs != tmp){ oldsecs = tmp; timer_task.function = gen_rtc_timer; timer_task.expires = jiffies + HZ - (HZ/10); tt_exp=timer_task.expires; ttask_active=1; stask_active=0; add_timer(&timer_task); gen_rtc_interrupt(0); } else if (schedule_task(&genrtc_task) == 0) stask_active = 0; }