static dev_link_t *ide_attach(void) { ide_info_t *info; dev_link_t *link; client_reg_t client_reg; int i, ret; DEBUG(0, "ide_attach()\n"); /* Create new ide device */ info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return NULL; memset(info, 0, sizeof(*info)); link = &info->link; link->priv = info; INIT_TQUEUE(&info->rel_task, ide_release, link); link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; link->io.IOAddrLines = 3; link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID; if (irq_list[0] == -1) link->irq.IRQInfo2 = irq_mask; else for (i = 0; i < 4; i++) link->irq.IRQInfo2 |= 1 << irq_list[i]; link->conf.Attributes = CONF_ENABLE_IRQ; link->conf.Vcc = 50; link->conf.IntType = INT_MEMORY_AND_IO; /* Register with Card Services */ link->next = dev_list; dev_list = link; client_reg.dev_info = &dev_info; client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE; client_reg.EventMask = CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL | CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET | CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME; client_reg.event_handler = &ide_event; client_reg.Version = 0x0210; client_reg.event_callback_args.client_data = link; ret = CardServices(RegisterClient, &link->handle, &client_reg); if (ret != CS_SUCCESS) { cs_error(link->handle, RegisterClient, ret); ide_detach(link); return NULL; } return link; } /* ide_attach */
acpi_status acpi_os_queue_for_execution( u32 priority, OSD_EXECUTION_CALLBACK function, void *context) { acpi_status status = AE_OK; struct acpi_os_dpc *dpc = NULL; struct tq_struct *task; ACPI_FUNCTION_TRACE ("os_queue_for_execution"); ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Scheduling function [%p(%p)] for deferred execution.\n", function, context)); if (!function) return_ACPI_STATUS (AE_BAD_PARAMETER); /* * Allocate/initialize DPC structure. Note that this memory will be * freed by the callee. The kernel handles the tq_struct list in a * way that allows us to also free its memory inside the callee. * Because we may want to schedule several tasks with different * parameters we can't use the approach some kernel code uses of * having a static tq_struct. * We can save time and code by allocating the DPC and tq_structs * from the same memory. */ dpc = kmalloc(sizeof(struct acpi_os_dpc)+sizeof(struct tq_struct), GFP_ATOMIC); if (!dpc) return_ACPI_STATUS (AE_NO_MEMORY); dpc->function = function; dpc->context = context; task = (void *)(dpc+1); INIT_TQUEUE(task, acpi_os_execute_deferred, (void*)dpc); if (!schedule_task(task)) { ACPI_DEBUG_PRINT ((ACPI_DB_ERROR, "Call to schedule_task() failed.\n")); kfree(dpc); status = AE_ERROR; } return_ACPI_STATUS (status); }
static int cdata_open(struct inode *inode, struct file *filp) { struct cdata_t *cdata; cdata = (struct cdata_t *)kmalloc(sizeof(struct cdata_t), GFP_KERNEL); cdata->index = 0; init_waitqueue_head(&cdata->wait); cdata->iomem = ioremap(0x33f00000, LCD_SIZE); #if 0 init_timer(&cdata->timer); #endif INIT_TQUEUE(&cdata->tq, flush_lcd, (void *)cdata); //add for task queue cdata->offset = 0; filp->private_data = (void *)cdata; return 0; }
int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) { struct jffs2_sb_info *c; struct inode *root_i; int ret; c = JFFS2_SB_INFO(sb); c->sector_size = c->mtd->erasesize; c->flash_size = c->mtd->size; #if 0 if (c->sector_size < 0x10000) { printk(KERN_INFO "jffs2: Erase block size too small (%dKiB). Using 64KiB instead\n", c->sector_size / 1024); c->sector_size = 0x10000; } #endif if (c->flash_size < 5*c->sector_size) { printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size); return -EINVAL; } c->cleanmarker_size = sizeof(struct jffs2_unknown_node); /* Jörn -- stick alignment for weird 8-byte-page flash here */ if (jffs2_cleanmarker_oob(c)) { /* Cleanmarker is out-of-band, so inline size zero */ c->cleanmarker_size = 0; } if (c->mtd->type == MTD_NANDFLASH) { /* Initialise write buffer */ c->wbuf_pagesize = c->mtd->oobblock; c->wbuf_ofs = 0xFFFFFFFF; c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf) return -ENOMEM; /* Initialize process for timed wbuf flush */ INIT_TQUEUE(&c->wbuf_task,(void*) jffs2_wbuf_process, (void *)c); /* Initialize timer for timed wbuf flush */ init_timer(&c->wbuf_timer); c->wbuf_timer.function = jffs2_wbuf_timeout; c->wbuf_timer.data = (unsigned long) c; } c->inocache_list = kmalloc(INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *), GFP_KERNEL); if (!c->inocache_list) { ret = -ENOMEM; goto out_wbuf; } memset(c->inocache_list, 0, INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *)); if ((ret = jffs2_do_mount_fs(c))) goto out_inohash; ret = -EINVAL; D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n")); root_i = iget(sb, 1); if (is_bad_inode(root_i)) { D1(printk(KERN_WARNING "get root inode failed\n")); goto out_nodes; } D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); sb->s_root = d_alloc_root(root_i); if (!sb->s_root) goto out_root_i; #if LINUX_VERSION_CODE >= 0x20403 sb->s_maxbytes = 0xFFFFFFFF; #endif sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = JFFS2_SUPER_MAGIC; if (!(sb->s_flags & MS_RDONLY)) jffs2_start_garbage_collect_thread(c); return 0; out_root_i: iput(root_i); out_nodes: jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); kfree(c->blocks); out_inohash: kfree(c->inocache_list); out_wbuf: if (c->wbuf) kfree(c->wbuf); return ret; }