static int startup(struct tty_struct *tty, struct serial_state *info) { struct tty_port *port = &info->tport; unsigned long flags; int retval=0; unsigned long page; page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; local_irq_save(flags); if (tty_port_initialized(port)) { free_page(page); goto errout; } if (info->xmit.buf) free_page(page); else info->xmit.buf = (unsigned char *) page; #ifdef SERIAL_DEBUG_OPEN printk("starting up ttys%d ...", info->line); #endif /* Clear anything in the input buffer */ custom.intreq = IF_RBF; mb(); retval = request_irq(IRQ_AMIGA_VERTB, ser_vbl_int, 0, "serial status", info); if (retval) { if (serial_isroot()) { set_bit(TTY_IO_ERROR, &tty->flags); retval = 0; } goto errout; } /* enable both Rx and Tx interrupts */ custom.intena = IF_SETCLR | IF_RBF | IF_TBE; mb(); info->IER = UART_IER_MSI; /* remember current state of the DCD and CTS bits */ current_ctl_bits = ciab.pra & (SER_DCD | SER_CTS | SER_DSR); info->MCR = 0; if (C_BAUD(tty)) info->MCR = SER_DTR | SER_RTS; rtsdtr_ctrl(info->MCR); clear_bit(TTY_IO_ERROR, &tty->flags); info->xmit.head = info->xmit.tail = 0; /* * and set the speed of the serial port */ change_speed(tty, info, NULL); tty_port_set_initialized(port, 1); local_irq_restore(flags); return 0; errout: local_irq_restore(flags); return retval; }
int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) { struct power_supply *psy = dev_get_drvdata(dev); int ret = 0, j; char *prop_buf; char *attrname; // dev_dbg(dev, "uevent\n"); if (!psy || !psy->dev) { dev_dbg(dev, "No power supply yet\n"); return ret; } // dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->name); ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->name); if (ret) return ret; prop_buf = (char *)get_zeroed_page(GFP_KERNEL); if (!prop_buf) return -ENOMEM; for (j = 0; j < psy->num_properties; j++) { struct device_attribute *attr; char *line; attr = &power_supply_attrs[psy->properties[j]]; ret = power_supply_show_property(dev, attr, prop_buf); if (ret == -ENODEV) { /* When a battery is absent, we expect -ENODEV. Don't abort; send the uevent with at least the the PRESENT=0 property */ ret = 0; continue; } if (ret < 0) goto out; line = strchr(prop_buf, '\n'); if (line) *line = 0; attrname = kstruprdup(attr->attr.name, GFP_KERNEL); if (!attrname) { ret = -ENOMEM; goto out; } //dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf); ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf); kfree(attrname); if (ret) goto out; } out: free_page((unsigned long)prop_buf); return ret; }
static int sel_make_bools(void) { int i, ret; ssize_t len; struct dentry *dentry = NULL; struct dentry *dir = bool_dir; struct inode *inode = NULL; struct inode_security_struct *isec; char **names = NULL, *page; int num; int *values = NULL; u32 sid; /* remove any existing files */ for (i = 0; i < bool_num; i++) kfree(bool_pending_names[i]); kfree(bool_pending_names); kfree(bool_pending_values); bool_num = 0; bool_pending_names = NULL; bool_pending_values = NULL; sel_remove_entries(dir); ret = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); if (!page) goto out; ret = security_get_bools(&num, &names, &values); if (ret) goto out; for (i = 0; i < num; i++) { ret = -ENOMEM; dentry = d_alloc_name(dir, names[i]); if (!dentry) goto out; ret = -ENOMEM; inode = sel_make_inode(dir->d_sb, S_IFREG | S_IRUGO | S_IWUSR); if (!inode) goto out; ret = -ENAMETOOLONG; len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]); if (len >= PAGE_SIZE) goto out; isec = (struct inode_security_struct *)inode->i_security; ret = security_genfs_sid("selinuxfs", page, SECCLASS_FILE, &sid); if (ret) goto out; isec->sid = sid; isec->initialized = 1; inode->i_fop = &sel_bool_ops; inode->i_ino = i|SEL_BOOL_INO_OFFSET; d_add(dentry, inode); } bool_num = num; bool_pending_names = names; bool_pending_values = values; free_page((unsigned long)page); return 0; out: free_page((unsigned long)page); if (names) { for (i = 0; i < num; i++) kfree(names[i]); kfree(names); } kfree(values); sel_remove_entries(dir); return ret; }
int init_new_context_skas(struct task_struct *task, struct mm_struct *mm) { struct mmu_context_skas *from_mm = NULL; struct mmu_context_skas *to_mm = &mm->context.skas; unsigned long stack = 0; int ret = -ENOMEM; if(skas_needs_stub){ stack = get_zeroed_page(GFP_KERNEL); if(stack == 0) goto out; /* This zeros the entry that pgd_alloc didn't, needed since * we are about to reinitialize it, and want mm.nr_ptes to * be accurate. */ mm->pgd[USER_PTRS_PER_PGD] = __pgd(0); ret = init_stub_pte(mm, CONFIG_STUB_CODE, (unsigned long) &__syscall_stub_start); if(ret) goto out_free; ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack); if(ret) goto out_free; mm->nr_ptes--; } to_mm->id.stack = stack; if(current->mm != NULL && current->mm != &init_mm) from_mm = ¤t->mm->context.skas; if(proc_mm){ ret = new_mm(stack); if(ret < 0){ printk("init_new_context_skas - new_mm failed, " "errno = %d\n", ret); goto out_free; } to_mm->id.u.mm_fd = ret; } else { if(from_mm) to_mm->id.u.pid = copy_context_skas0(stack, from_mm->id.u.pid); else to_mm->id.u.pid = start_userspace(stack); } ret = init_new_ldt(to_mm, from_mm); if(ret < 0){ printk("init_new_context_skas - init_ldt" " failed, errno = %d\n", ret); goto out_free; } return 0; out_free: if(to_mm->id.stack != 0) free_page(to_mm->id.stack); out: return ret; }
static int ibmveth_open(struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev_priv(netdev); u64 mac_address; int rxq_entries = 1; unsigned long lpar_rc; int rc; union ibmveth_buf_desc rxq_desc; int i; struct device *dev; netdev_dbg(netdev, "open starting\n"); napi_enable(&adapter->napi); for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) rxq_entries += adapter->rx_buff_pool[i].size; adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); if (!adapter->buffer_list_addr || !adapter->filter_list_addr) { netdev_err(netdev, "unable to allocate filter or buffer list " "pages\n"); rc = -ENOMEM; goto err_out; } dev = &adapter->vdev->dev; adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries; adapter->rx_queue.queue_addr = dma_alloc_coherent(dev, adapter->rx_queue.queue_len, &adapter->rx_queue.queue_dma, GFP_KERNEL); if (!adapter->rx_queue.queue_addr) { rc = -ENOMEM; goto err_out; } adapter->buffer_list_dma = dma_map_single(dev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); adapter->filter_list_dma = dma_map_single(dev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || (dma_mapping_error(dev, adapter->filter_list_dma))) { netdev_err(netdev, "unable to map filter or buffer list " "pages\n"); rc = -ENOMEM; goto err_out; } adapter->rx_queue.index = 0; adapter->rx_queue.num_slots = rxq_entries; adapter->rx_queue.toggle = 1; mac_address = ibmveth_encode_mac_addr(netdev->dev_addr); rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; rxq_desc.fields.address = adapter->rx_queue.queue_dma; netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr); netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr); netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr); h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); if (lpar_rc != H_SUCCESS) { netdev_err(netdev, "h_register_logical_lan failed with %ld\n", lpar_rc); netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq " "desc:0x%llx MAC:0x%llx\n", adapter->buffer_list_dma, adapter->filter_list_dma, rxq_desc.desc, mac_address); rc = -ENONET; goto err_out; } for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { if (!adapter->rx_buff_pool[i].active) continue; if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { netdev_err(netdev, "unable to alloc pool\n"); adapter->rx_buff_pool[i].active = 0; rc = -ENOMEM; goto err_out; } } netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq); rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, netdev); if (rc != 0) { netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", netdev->irq, rc); do { lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); goto err_out; } adapter->bounce_buffer = kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); if (!adapter->bounce_buffer) { rc = -ENOMEM; goto err_out_free_irq; } adapter->bounce_buffer_dma = dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { netdev_err(netdev, "unable to map bounce buffer\n"); rc = -ENOMEM; goto err_out_free_irq; } netdev_dbg(netdev, "initial replenish cycle\n"); ibmveth_interrupt(netdev->irq, netdev); netif_start_queue(netdev); netdev_dbg(netdev, "open complete\n"); return 0; err_out_free_irq: free_irq(netdev->irq, netdev); err_out: ibmveth_cleanup(adapter); napi_disable(&adapter->napi); return rc; }