void jade_sched_event(struct BCState *bcs, int event) { bcs->event |= 1 << event; queue_task(&bcs->tqueue, &tq_immediate); mark_bh(IMMEDIATE_BH); }
static void amd7930_dxmit_callback(void *arg, int error) { struct IsdnCardState *cs = (struct IsdnCardState *) arg; static struct tq_struct task; /* NOTE: This function is called directly from an interrupt handler */ /* may wish to do retransmission here, if error indicates collision */ if (cs->debug & L1_DEB_ISAC_FIFO) { char tmp[128]; char *t = tmp; t += sprintf(t, "amd7930 Dxmit cnt %d", cs->tx_skb->len); if (error) t += sprintf(t, " ERR %x", error); QuickHex(t, cs->tx_skb->data, cs->tx_skb->len); debugl1(cs, tmp); } cs->tx_skb = NULL; task.routine = (void *) DChannel_proc_xmt; task.data = (void *) cs; queue_task(&task, &tq_immediate); mark_bh(IMMEDIATE_BH); }
void avmb1_card_ready(avmb1_card * card) { __u16 appl; card->cversion.majorversion = 2; card->cversion.minorversion = 0; card->cversion.majormanuversion = (card->version[VER_DRIVER][0] - '0') << 4; card->cversion.majormanuversion |= (card->version[VER_DRIVER][2] - '0'); card->cversion.minormanuversion = (card->version[VER_DRIVER][3] - '0') << 4; card->cversion.minormanuversion |= (card->version[VER_DRIVER][5] - '0') * 10; card->cversion.minormanuversion |= (card->version[VER_DRIVER][6] - '0'); card->cardstate = CARD_RUNNING; for (appl = 1; appl <= CAPI_MAXAPPL; appl++) { if (VALID_APPLID(appl) && !APPL(appl)->releasing) { B1_send_register(card->port, appl, 1024 * (APPL(appl)->rparam.level3cnt+1), APPL(appl)->rparam.level3cnt, APPL(appl)->rparam.datablkcnt, APPL(appl)->rparam.datablklen); } } set_bit(CARDNR(card), ¬ify_up_set); queue_task(&tq_state_notify, &tq_scheduler); printk(KERN_NOTICE "b1capi: card %d ready.\n", CARDNR(card)); }
static void hycapi_sendmsg_internal(struct capi_ctr *ctrl, struct sk_buff *skb) { hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); hysdn_card *card = cinfo->card; spin_lock_irq(&cinfo->lock); #ifdef HYCAPI_PRINTFNAMES printk(KERN_NOTICE "hycapi_send_message\n"); #endif cinfo->skbs[cinfo->in_idx++] = skb; /* add to buffer list */ if (cinfo->in_idx >= HYSDN_MAX_CAPI_SKB) cinfo->in_idx = 0; /* wrap around */ cinfo->sk_count++; /* adjust counter */ if (cinfo->sk_count >= HYSDN_MAX_CAPI_SKB) { /* inform upper layers we're full */ printk(KERN_ERR "HYSDN Card%d: CAPI-buffer overrun!\n", card->myid); ctrl->suspend_output(ctrl); } cinfo->tx_skb = skb; spin_unlock_irq(&cinfo->lock); queue_task(&card->irq_queue, &tq_immediate); mark_bh(IMMEDIATE_BH); }
void rb_mtp_thread_delete_track (RBMtpThread *thread, LIBMTP_track_t *track) { RBMtpThreadTask *task = create_task (DELETE_TRACK); task->track_id = track->item_id; task->storage_id = track->storage_id; queue_task (thread, task); }
void rb_mtp_thread_set_album_image (RBMtpThread *thread, const char *album, GdkPixbuf *image) { RBMtpThreadTask *task = create_task (SET_ALBUM_IMAGE); task->album = g_strdup (album); task->image = g_object_ref (image); queue_task (thread, task); }
/** * schedule_task - schedule a function for subsequent execution in process context. * @task: pointer to a &tq_struct which defines the function to be scheduled. * * May be called from interrupt context. The scheduled function is run at some * time in the near future by the keventd kernel thread. If it can sleep, it * should be designed to do so for the minimum possible time, as it will be * stalling all other scheduled tasks. * * schedule_task() returns non-zero if the task was successfully scheduled. * If @task is already residing on a task queue then schedule_task() fails * to schedule your task and returns zero. */ int schedule_task(struct tq_struct *task) { int ret; need_keventd(__FUNCTION__); ret = queue_task(task, &tq_context); wake_up(&context_task_wq); return ret; }
static void ctc_tty_transmit_status(ctc_tty_info *info) { if (ctc_tty_shuttingdown) return; info->flags |= CTC_ASYNC_TX_LINESTAT; queue_task(&info->tq, &tq_immediate); mark_bh(IMMEDIATE_BH); }
static void plser_start_tx(struct uart_port *port, u_int nonempty, u_int from_tty) { if (nonempty) { port->read_status_mask |= ST_TX_FULL | ST_CTS; queue_task(&plser_task, &tq_immediate); mark_bh(IMMEDIATE_BH); } }
void rb_mtp_thread_remove_from_album (RBMtpThread *thread, LIBMTP_track_t *track, const char *album) { RBMtpThreadTask *task = create_task (REMOVE_FROM_ALBUM); task->track_id = track->item_id; task->storage_id = track->storage_id; task->album = g_strdup (album); queue_task (thread, task); }
void rb_mtp_thread_add_to_album (RBMtpThread *thread, LIBMTP_track_t *track, const char *album) { RBMtpThreadTask *task = create_task (ADD_TO_ALBUM); task->track_id = track->item_id; task->storage_id = track->storage_id; task->album = g_strdup (album); queue_task (thread, task); }
/* * Schedule a task to call the callback, this is to make avoiding * synchronization problems easier. The scheduled task may use rwlock * as usual - using it from the timer function might cause problems. */ void timer_handler(unsigned long dummy) { INIT_LIST_HEAD(&bcache->callback_task.list); bcache->callback_task.sync = 0; bcache->callback_task.routine = task_handler; bcache->callback_task.data = NULL; queue_task(&bcache->callback_task, &tq_timer); return; }
/* * Queue a message to be send to the card when possible. * * card: the board * skb: the sk_buff containing the message. */ void tpam_enqueue(tpam_card *card, struct sk_buff *skb) { dprintk("TurboPAM(tpam_enqueue): card=%d\n", card->id); /* queue the sk_buff on the board's send queue */ skb_queue_tail(&card->sendq, skb); /* queue the board's send task struct for immediate treatment */ queue_task(&card->send_tq, &tq_immediate); mark_bh(IMMEDIATE_BH); }
void rb_mtp_thread_queue_callback (RBMtpThread *thread, RBMtpThreadCallback func, gpointer data, GDestroyNotify destroy_data) { RBMtpThreadTask *task = create_task (THREAD_CALLBACK); task->callback = func; task->user_data = data; task->destroy_data = destroy_data; queue_task (thread, task); }
/* ctc_tty_write() is the main send-routine. It is called from the upper * levels within the kernel to perform sending data. Depending on the * online-flag it either directs output to the at-command-interpreter or * to the lower level. Additional tasks done here: * - If online, check for escape-sequence (+++) * - If sending audio-data, call ctc_tty_DLEdown() to parse DLE-codes. * - If receiving audio-data, call ctc_tty_end_vrx() to abort if needed. * - If dialing, abort dial. */ static int ctc_tty_write(struct tty_struct *tty, int from_user, const u_char * buf, int count) { int c; int total = 0; ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; if (ctc_tty_shuttingdown) return 0; if (ctc_tty_paranoia_check(info, tty->device, "ctc_tty_write")) return 0; if (!tty) return 0; if (!info->netdev) return -ENODEV; if (from_user) down(&info->write_sem); while (1) { struct sk_buff *skb; int skb_res; c = (count < CTC_TTY_XMIT_SIZE) ? count : CTC_TTY_XMIT_SIZE; if (c <= 0) break; skb_res = info->netdev->hard_header_len + sizeof(info->mcr) + + sizeof(__u32); skb = dev_alloc_skb(skb_res + c); if (!skb) { printk(KERN_WARNING "ctc_tty: Out of memory in %s%d write\n", CTC_TTY_NAME, info->line); break; } skb_reserve(skb, skb_res); if (from_user) copy_from_user(skb_put(skb, c), buf, c); else memcpy(skb_put(skb, c), buf, c); skb_queue_tail(&info->tx_queue, skb); buf += c; total += c; count -= c; } if (skb_queue_len(&info->tx_queue)) { info->lsr &= ~UART_LSR_TEMT; queue_task(&info->tq, &tq_immediate); mark_bh(IMMEDIATE_BH); } if (from_user) up(&info->write_sem); return total; }
/* * Call jiq_print from a task queue */ void jiq_print_tq(void *ptr) { if (jiq_print (ptr)) { struct clientdata *data = (struct clientdata *)ptr; if (data->queue == SCHEDULER_QUEUE) schedule_task(&jiq_task); else if (data->queue) queue_task(&jiq_task, data->queue); if (data->queue == &tq_immediate) mark_bh(IMMEDIATE_BH); /* this one needs to be marked */ } }
/* * Function to safely add raw3215_softint to tq_immediate. * The s390irq spinlock must be held. */ static inline void raw3215_sched_bh(raw3215_info *raw) { if (raw->flags & RAW3215_BH_PENDING) return; /* already pending */ raw->flags |= RAW3215_BH_PENDING; INIT_LIST_HEAD(&raw->tqueue.list); raw->tqueue.sync = 0; raw->tqueue.routine = raw3215_softint; raw->tqueue.data = raw; queue_task(&raw->tqueue, &tq_immediate); mark_bh(IMMEDIATE_BH); }
void rb_mtp_thread_get_track_list (RBMtpThread *thread, RBMtpTrackListCallback callback, gpointer data, GDestroyNotify destroy_data) { RBMtpThreadTask *task = create_task (GET_TRACK_LIST); task->callback = callback; task->user_data = data; task->destroy_data = destroy_data; queue_task (thread, task); }
static void acm_write_bulk(struct urb *urb) { struct acm *acm = (struct acm *)urb->context; if (!ACM_READY(acm)) return; if (urb->status) dbg("nonzero write bulk status received: %d", urb->status); queue_task(&acm->tqueue, &tq_immediate); mark_bh(IMMEDIATE_BH); }
static void on_timer_2(void *arg) { struct r3964_info *pInfo = (struct r3964_info *)arg; if(pInfo->count_down) { if(!--pInfo->count_down) { on_timeout(pInfo); } } queue_task(&pInfo->bh_1, &tq_timer); }
/* * Queue a data message to be send to the card when possible. * * card: the board * skb: the sk_buff containing the message and the data. This parameter * can be NULL if we want just to trigger the send of queued * messages. */ void tpam_enqueue_data(tpam_channel *channel, struct sk_buff *skb) { dprintk("TurboPAM(tpam_enqueue_data): card=%d, channel=%d\n", channel->card->id, channel->num); /* if existant, queue the sk_buff on the channel's send queue */ if (skb) skb_queue_tail(&channel->sendq, skb); /* queue the channel's send task struct for immediate treatment */ queue_task(&channel->card->send_tq, &tq_immediate); mark_bh(IMMEDIATE_BH); }
static void manufacturing_thread(task __xdata*t) { switch (manufacturing_state) { case 0: check_manufacturing_crc_16(); // fall thru P2DIR |= 1<<1; P2INP |= 1<<1; P2_1 = 0; case 1: manufacturing_state = 2; // frame 00110101 P2_1 = 0; break; case 2: manufacturing_state = 3; // frame 00110101 P2_1 = 0; break; case 3: manufacturing_state = 4; // frame 00110101 P2_1 = 1; break; case 4: manufacturing_state = 5; // frame 00110101 P2_1 = 1; break; case 5: manufacturing_state = 6; // frame 00110101 P2_1 = 0; break; case 6: manufacturing_state = 7; // frame 00110101 P2_1 = 1; break; case 7: manufacturing_state = 8; // frame 00110101 P2_1 = 0; break; case 8: manufacturing_state = 9; // frame 00110101 P2_1 = 1; break; case 9: manufacturing_state = 10; // CRC - should be 1 P2_1 = crc_ok; break; case 10:manufacturing_state = 11; // no error - should be 1 if (error_state == 0) { P2_1 = 1; } else { P2_1 = 0; } break; case 11:manufacturing_state = 1; // no error - should be 1 P2_1 = pool_busy; break; default: manufacturing_state = 1; break; } queue_task(&manufacturing_task, HZ/10); }
void rb_mtp_thread_open_device (RBMtpThread *thread, LIBMTP_raw_device_t *raw_device, RBMtpOpenCallback callback, gpointer data, GDestroyNotify destroy_data) { RBMtpThreadTask *task = create_task (OPEN_DEVICE); task->raw_device = raw_device; task->callback = callback; task->user_data = data; task->destroy_data = destroy_data; queue_task (thread, task); }
static void ctc_tty_flush_chars(struct tty_struct *tty) { ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; if (ctc_tty_shuttingdown) return; if (ctc_tty_paranoia_check(info, tty->device, "ctc_tty_flush_chars")) return; if (tty->stopped || tty->hw_stopped || (!skb_queue_len(&info->tx_queue))) return; queue_task(&info->tq, &tq_immediate); mark_bh(IMMEDIATE_BH); }
/*=======================================================================*/ static void aau_irq_handler(int irq, void *dev_id, struct pt_regs *regs) { iop310_aau_t *aau = (iop310_aau_t *) dev_id; u32 int_status = 0; u32 status = 0; u32 thresh; /* get FIQ1 status */ int_status = *(IOP310_FIQ1ISR); DPRINTK("IRQ: irq=%d status=%#x\n", irq, status); /* this is not our interrupt */ if(!(int_status & AAU_INT_MASK)) { return; } /* get accelerator status */ status = *(IOP310_AAUASR); /* get threshold */ thresh = atomic_read(&aau->irq_thresh); /* process while we have INT */ while((int_status & AAU_INT_MASK) && thresh--) { /* clear ASR */ *(IOP310_AAUASR) &= AAU_ASR_MASK; /* flush all with err condition */ if(status & AAU_ASR_DONE_MASK) { aau_process(aau); } /* read accelerator status */ status = *(IOP310_AAUASR); /* get interrupt status */ int_status = *(IOP310_FIQ1ISR); } /* schedule bottom half */ aau->aau_task.data = (void *)aau; /* task goes to the immediate task queue */ queue_task(&aau->aau_task, &tq_immediate); /* mark IMMEDIATE BH for execute */ mark_bh(IMMEDIATE_BH); }
int jiq_read_immed(char *buf, char **start, off_t offset, int len, int *eof, void *data) { jiq_data.len = 0; /* nothing printed, yet */ jiq_data.buf = buf; /* print in this place */ jiq_data.jiffies = jiffies; /* initial time */ jiq_data.queue = &tq_immediate; /* re-register yourself here */ queue_task(&jiq_task, &tq_immediate); /* ready to run */ mark_bh(IMMEDIATE_BH); interruptible_sleep_on(&jiq_wait); /* sleep till completion */ *eof = 1; return jiq_data.len; }
static void ipw_write_bulk_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct ipw_private *priv = usb_get_serial_port_data(port); dbg("%s", __FUNCTION__); priv->write_urb_busy = 0; if (urb->status) dbg("%s - nonzero write bulk status received: %d", __FUNCTION__, urb->status); queue_task(&port->tqueue, &tq_immediate); mark_bh(IMMEDIATE_BH); }
void short_bh_interrupt(int irq, void *dev_id, struct pt_regs *regs) { /* cast to stop 'volatile' warning */ do_gettimeofday((struct timeval *) tv_head); tv_head++; if (tv_head == (tv_data + NR_TIMEVAL) ) tv_head = tv_data; /* wrap */ /* Queue the bh. Don't care for multiple enqueueing */ queue_task(&short_task, &tq_immediate); mark_bh(IMMEDIATE_BH); short_bh_count++; /* record that an interrupt arrived */ }
void rb_mtp_thread_download_track (RBMtpThread *thread, uint32_t track_id, const char *filename, RBMtpDownloadCallback func, gpointer data, GDestroyNotify destroy_data) { RBMtpThreadTask *task = create_task (DOWNLOAD_TRACK); task->track_id = track_id; task->filename = g_strdup (filename); task->callback = func; task->user_data = data; task->destroy_data = destroy_data; queue_task (thread, task); }
void avmb1_handle_capimsg(avmb1_card * card, __u16 appl, struct sk_buff *skb) { if (card->cardstate != CARD_RUNNING) { printk(KERN_INFO "b1capi: controller %d not active, got: %s", card->cnr, capi_message2str(skb->data)); goto error; return; } skb_queue_tail(&recv_queue, skb); queue_task(&tq_recv_notify, &tq_immediate); mark_bh(IMMEDIATE_BH); return; error: kfree_skb(skb); }