static void htifbd_transfer(struct htifbd_dev *dev, unsigned long sector, unsigned long nsect, char *buf, int direction) { /* HTIF disk address packet */ volatile struct htifbd_dap { unsigned long address; unsigned long offset; /* offset in bytes */ unsigned long length; /* length in bytes */ unsigned long tag; } req; unsigned long offset, length; unsigned long htif_cmd; offset = (sector << SECTOR_SIZE_SHIFT); length = (nsect << SECTOR_SIZE_SHIFT); if ((offset + length) > dev->size) { pr_notice(DRIVER_NAME "out-of-bounds access to %s with" "offset=%lx length=%lx\n", dev->gd->disk_name, offset, length); return; } req.address = (unsigned long)__pa(buf); req.offset = offset; req.length = length; req.tag = 0; if (direction == READ) { htif_cmd = HTIF_CMD_READ; } else if (direction == WRITE) { htif_cmd = HTIF_CMD_WRITE; } else { return; } mb(); htif_tohost(dev->dev->minor, htif_cmd, __pa(&req)); htif_fromhost(); mb(); }
static void __init jailhouse_get_smp_config(unsigned int early) { struct ioapic_domain_cfg ioapic_cfg = { .type = IOAPIC_DOMAIN_STRICT, .ops = &mp_ioapic_irqdomain_ops, }; struct mpc_intsrc mp_irq = { .type = MP_INTSRC, .irqtype = mp_INT, .irqflag = MP_IRQPOL_ACTIVE_HIGH | MP_IRQTRIG_EDGE, }; unsigned int cpu; jailhouse_x2apic_init(); register_lapic_address(0xfee00000); for (cpu = 0; cpu < setup_data.num_cpus; cpu++) { generic_processor_info(setup_data.cpu_ids[cpu], boot_cpu_apic_version); } smp_found_config = 1; if (setup_data.standard_ioapic) { mp_register_ioapic(0, 0xfec00000, gsi_top, &ioapic_cfg); /* Register 1:1 mapping for legacy UART IRQs 3 and 4 */ mp_irq.srcbusirq = mp_irq.dstirq = 3; mp_save_irq(&mp_irq); mp_irq.srcbusirq = mp_irq.dstirq = 4; mp_save_irq(&mp_irq); } } static void jailhouse_no_restart(void) { pr_notice("Jailhouse: Restart not supported, halting\n"); machine_halt(); }
int disable_secondary_clusters_pwr(void) { int err = 0; if(g_l2c_share_info.share_cluster_num == 1) { pr_notice("L2$ share cluster num is only 1, no needs to disable other cluster's pwr.\n"); } else if(g_l2c_share_info.share_cluster_num == 2) { spm_mtcmos_ctrl_cpusys1(STA_POWER_DOWN, 1); } //else if(TBD...) else { pr_err("[ERROR] Inllegal L2$ share_cluster_num!\n"); err = -1; } return err; }
static ssize_t cur_l2c_store(struct device_driver *driver, const char *buf, size_t count) { char *p = (char *)buf; int option, ret; option = simple_strtoul(p, &p, 10); if(option >= BORROW_NONE) { pr_err("wrong option %d\n", option); return count; } pr_notice("config L2 option: %s\n", log[option]); ret = switch_L2(option); if (ret < 0) pr_err("Config L2 error ret:%d by %s\n", ret, log[option]); return count; }
void __init xen_vmalloc_p2m_tree(void) { static struct vm_struct vm; vm.flags = VM_ALLOC; vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn, PMD_SIZE * PMDS_PER_MID_PAGE); vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE); pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size); xen_max_p2m_pfn = vm.size / sizeof(unsigned long); xen_rebuild_p2m_list(vm.addr); xen_p2m_addr = vm.addr; xen_p2m_size = xen_max_p2m_pfn; xen_inv_extra_mem(); m2p_override_init(); }
/* * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */ void __cpu_die(unsigned int cpu) { int err; if (!cpu_wait_death(cpu, 5)) { pr_crit("CPU%u: cpu didn't die\n", cpu); return; } pr_notice("CPU%u: shutdown\n", cpu); /* * Now that the dying CPU is beyond the point of no return w.r.t. * in-kernel synchronisation, try to get the firwmare to help us to * verify that it has really left the kernel before we consider * clobbering anything it might still be using. */ err = op_cpu_kill(cpu); if (err) pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err); }
void __init xen_vmalloc_p2m_tree(void) { static struct vm_struct vm; unsigned long p2m_limit; p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; vm.flags = VM_ALLOC; vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), PMD_SIZE * PMDS_PER_MID_PAGE); vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE); pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size); xen_max_p2m_pfn = vm.size / sizeof(unsigned long); xen_rebuild_p2m_list(vm.addr); xen_p2m_addr = vm.addr; xen_p2m_size = xen_max_p2m_pfn; xen_inv_extra_mem(); }
/* module_i2c_driver(wacom_i2c_driver); */ static int __init wacom_i2c_init(void) { int ret = 0; /* #if defined(WACOM_SLEEP_WITH_PEN_SLP) printk(KERN_ERR "[E-PEN] %s: Sleep type-PEN_SLP pin\n", __func__); #elif defined(WACOM_SLEEP_WITH_PEN_LDO_EN) printk(KERN_ERR "[E-PEN] %s: Sleep type-PEN_LDO_EN pin\n", __func__); #endif */ #ifdef CONFIG_SAMSUNG_LPM_MODE if (poweroff_charging) { pr_notice("%s : LPM Charging Mode!!\n", __func__); return 0; } #endif ret = i2c_add_driver(&wacom_i2c_driver); if (ret) printk(KERN_ERR "[E-PEN] fail to i2c_add_driver\n"); return ret; }
loff_t dispatch_llseek(struct file *filp, loff_t off, int whence) { struct phys_mem_session * session = filp->private_data; loff_t (*fn) (struct file *, loff_t, int); if (session->status.state >= SESSION_NUM_STATES) { pr_err("Seeking with an invalid session state of %i!\n", session->status.state); return -EIO; } fn = fops_by_session_state[session->status.state].llseek; if (fn) return fn(filp, off, whence); else { pr_notice("Session %llu: llseek not supported in state %i\n", session->session_id, session->status.state); return -EIO; } }
static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event) { struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev); u32 hci_result, value; int retries = 3; int scancode; if (event != 0x80) return; if (dev->info_supported) { scancode = toshiba_acpi_query_hotkey(dev); if (scancode < 0) pr_err("Failed to query hotkey event\n"); else if (scancode != 0) toshiba_acpi_report_hotkey(dev, scancode); } else if (dev->system_event_supported) { do { hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result); switch (hci_result) { case HCI_SUCCESS: toshiba_acpi_report_hotkey(dev, (int)value); break; case HCI_NOT_SUPPORTED: /* */ hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result); pr_notice("Re-enabled hotkeys\n"); /* */ default: retries--; break; } } while (retries && hci_result != HCI_EMPTY); } }
static void __init edmini_v2_init(void) { /* * Setup basic Orion functions. Need to be called early. */ orion5x_init(); orion5x_mpp_conf(edminiv2_mpp_modes); /* * Configure peripherals. */ orion5x_ehci0_init(); orion5x_eth_init(&edmini_v2_eth_data); orion5x_i2c_init(); orion5x_sata_init(&edmini_v2_sata_data); orion5x_uart0_init(); orion5x_setup_dev_boot_win(EDMINI_V2_NOR_BOOT_BASE, EDMINI_V2_NOR_BOOT_SIZE); platform_device_register(&edmini_v2_nor_flash); platform_device_register(&edmini_v2_gpio_leds); platform_device_register(&edmini_v2_gpio_buttons); pr_notice("edmini_v2: USB device port, flash write and power-off " "are not yet supported.\n"); /* Get RTC IRQ and register the chip */ if (gpio_request(EDMINIV2_RTC_GPIO, "rtc") == 0) { if (gpio_direction_input(EDMINIV2_RTC_GPIO) == 0) edmini_v2_i2c_rtc.irq = gpio_to_irq(EDMINIV2_RTC_GPIO); else gpio_free(EDMINIV2_RTC_GPIO); } if (edmini_v2_i2c_rtc.irq == 0) pr_warning("edmini_v2: failed to get RTC IRQ\n"); i2c_register_board_info(0, &edmini_v2_i2c_rtc, 1); }
/* Read the 4 byte, page aligned 8390 specific header. */ static void el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { int boguscount; void __iomem *base = ei_status.mem; unsigned short word; if (base) { /* Use the shared memory. */ void __iomem *hdr_start = base + ((ring_page - EL2_MB1_START_PG)<<8); memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); hdr->count = le16_to_cpu(hdr->count); return; } /* * No shared memory, use programmed I/O. */ word = (unsigned short)ring_page; outb(word&0xFF, E33G_DMAAH); outb(word>>8, E33G_DMAAL); outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT | ECNTRL_START, E33G_CNTRL); boguscount = 0x1000; while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0) { if(!boguscount--) { pr_notice("%s: FIFO blocked in el2_get_8390_hdr.\n", dev->name); memset(hdr, 0x00, sizeof(struct e8390_pkt_hdr)); el2_reset_8390(dev); goto blocked; } } insw(E33G_FIFOH, hdr, (sizeof(struct e8390_pkt_hdr))>> 1); blocked: ; outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); }
unsigned long mce_intel_adjust_timer(unsigned long interval) { int r; if (interval < CMCI_POLL_INTERVAL) return interval; switch (__this_cpu_read(cmci_storm_state)) { case CMCI_STORM_ACTIVE: /* * We switch back to interrupt mode once the poll timer has * silenced itself. That means no events recorded and the * timer interval is back to our poll interval. */ __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED); r = atomic_sub_return(1, &cmci_storm_on_cpus); if (r == 0) pr_notice("CMCI storm subsided: switching to interrupt mode\n"); /* FALLTHROUGH */ case CMCI_STORM_SUBSIDED: /* * We wait for all cpus to go back to SUBSIDED * state. When that happens we switch back to * interrupt mode. */ if (!atomic_read(&cmci_storm_on_cpus)) { __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE); cmci_reenable(); cmci_recheck(); } return CMCI_POLL_INTERVAL; default: /* * We have shiny weather. Let the poll do whatever it * thinks. */ return interval; } }
static void print_vsd_dev_hw_regs(vsd_dev_t *vsd_dev) { if (!LOCAL_DEBUG) return; pr_notice(LOG_TAG "VSD dev hwregs: \n" "CMD: %x \n" "RESULT: %x \n" "TASKLET_VADDR: %llx \n" "dma_paddr: %llx \n" "dma_size: %llx \n" "dev_offset: %llx \n" "dev_size: %llx \n", vsd_dev->hwregs->cmd, vsd_dev->hwregs->result, vsd_dev->hwregs->tasklet_vaddr, vsd_dev->hwregs->dma_paddr, vsd_dev->hwregs->dma_size, vsd_dev->hwregs->dev_offset, vsd_dev->hwregs->dev_size ); }
static int evdi_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { unsigned long vma_start = vma->vm_start; unsigned long vma_size = vma->vm_end - vma->vm_start; unsigned long vma_page_cnt = vma_size >> PAGE_SHIFT; unsigned long smem_page_cnt = info->fix.smem_len >> PAGE_SHIFT; unsigned long smem_offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long smem_pos; if (smem_page_cnt < vma->vm_pgoff) return -EINVAL; if (vma_page_cnt > smem_page_cnt - vma->vm_pgoff) return -EINVAL; smem_pos = (unsigned long)info->fix.smem_start + smem_offset; pr_notice("mmap() framebuffer addr:%lu size:%lu\n", smem_pos, vma_size); while (vma_size > 0) { unsigned long page = vmalloc_to_pfn((void *)smem_pos); if (remap_pfn_range(vma, vma_start, page, PAGE_SIZE, PAGE_SHARED)) return -EAGAIN; vma_start += PAGE_SIZE; smem_pos += PAGE_SIZE; if (vma_size > PAGE_SIZE) vma_size -= PAGE_SIZE; else vma_size = 0; } return 0; }
/* Voltage debugfs support */ static int vp_volt_debug_get(void *data, u64 *val) { struct omap_vdd_info *vdd = (struct omap_vdd_info *) data; u8 vsel; if (!vdd) { pr_warning("Wrong paramater passed\n"); return -EINVAL; } vsel = vdd->read_reg(vdd->vp_reg.prm_mod, vdd->vp_offs.voltage); pr_notice("curr_vsel = %x\n", vsel); if (!vdd->pmic_info->vsel_to_uv) { pr_warning("PMIC function to convert vsel to voltage" "in uV not registerd\n"); return -EINVAL; } *val = vdd->pmic_info->vsel_to_uv(vsel); return 0; }
ssize_t dispatch_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct phys_mem_session * session = filp->private_data; ssize_t(*fn) (struct file *, char __user *, size_t, loff_t *); if (session->status.state >= SESSION_NUM_STATES) { pr_err("Reading with an invalid session state of %i!\n", session->status.state); return -EIO; } fn = fops_by_session_state[session->status.state].read; if (fn) return fn(filp, buf, count, f_pos); else { pr_notice("Session %llu: read not supported in state %i\n", session->session_id, session->status.state); return -EIO; } }
static inline void acpi_processor_cstate_first_run_checks(void) { acpi_status status; static int first_run; if (first_run) return; dmi_check_system(processor_power_dmi_table); max_cstate = acpi_processor_cstate_check(max_cstate); if (max_cstate < ACPI_C_STATES_MAX) pr_notice("ACPI: processor limited to max C-state %d\n", max_cstate); first_run++; if (acpi_gbl_FADT.cst_control && !nocst) { status = acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); if (ACPI_FAILURE(status)) ACPI_EXCEPTION((AE_INFO, status, "Notifying BIOS of _CST ability failed")); } }
static int e_compass_fetch_sysconfig_para(enum input_sensor_type *e_compass_type) { int ret = -1; script_item_u val; script_item_value_type_e type; struct sensor_config_info *data = container_of(e_compass_type, struct sensor_config_info, input_type); type = script_get_item("compass_para", "compass_used", &val); if (SCIRPT_ITEM_VALUE_TYPE_INT != type) { pr_err("%s: type err device_used = %d. \n", __func__, val.val); goto script_get_err; } data->sensor_used = val.val; if (1 == data->sensor_used) { type = script_get_item("compass_para", "compass_twi_id", &val); if(SCIRPT_ITEM_VALUE_TYPE_INT != type){ pr_err("%s: type err twi_id = %d. \n", __func__, val.val); goto script_get_err; } data->twi_id = val.val; ret = 0; } else { pr_err("%s: compass_unused. \n", __func__); ret = -1; } return ret; script_get_err: pr_notice("=========script_get_err============\n"); return ret; }
/** * gyr_fetch_sysconfig_para - get config info from sysconfig.fex file. * return value: * = 0; success; * < 0; err */ static int ir_fetch_sysconfig_para(enum input_sensor_type *ir_type) { int ret = -1; script_item_u val; script_item_value_type_e type; struct ir_config_info *data = container_of(ir_type, struct ir_config_info, input_type); type = script_get_item("s_ir0", "ir_used", &val); if (SCIRPT_ITEM_VALUE_TYPE_INT != type) { pr_err("%s: type err device_used = %d. \n", __func__, val.val); goto script_get_err; } data->ir_used = val.val; if (1 == data->ir_used) { type = script_get_item("s_ir0", "ir_rx", &val); if(SCIRPT_ITEM_VALUE_TYPE_PIO != type){ pr_err("%s: IR gpio type err! \n", __func__); goto script_get_err; } data->ir_gpio = val.gpio; ret = 0; } else { pr_err("%s: ir_unused. \n", __func__); ret = -1; } return ret; script_get_err: pr_notice("=========script_get_err============\n"); return ret; }
static void htifbd_request(struct request_queue *q) { struct request *req; req = blk_fetch_request(q); while (req != NULL) { struct htifbd_dev *dev; dev = req->rq_disk->private_data; if (req->cmd_type != REQ_TYPE_FS) { pr_notice(DRIVER_NAME ": ignoring non-fs request for %s\n", req->rq_disk->disk_name); __blk_end_request_all(req, -EIO); continue; } htifbd_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req)); if (!__blk_end_request_cur(req, 0)) { req = blk_fetch_request(q); } } }
/** * arm_cpuidle_read_ops() - Initialize the cpuidle ops with the device tree * @dn: a pointer to a struct device node corresponding to a cpu node * @cpu: the cpu identifier * * Get the method name defined in the 'enable-method' property, retrieve the * associated cpuidle_ops and do a struct copy. This copy is needed because all * cpuidle_ops are tagged __initdata and will be unloaded after the init * process. * * Return 0 on sucess, -ENOENT if no 'enable-method' is defined, -EOPNOTSUPP if * no cpuidle_ops is registered for the 'enable-method'. */ static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu) { const char *enable_method; struct cpuidle_ops *ops; enable_method = of_get_property(dn, "enable-method", NULL); if (!enable_method) return -ENOENT; ops = arm_cpuidle_get_ops(enable_method); if (!ops) { pr_warn("%s: unsupported enable-method property: %s\n", dn->full_name, enable_method); return -EOPNOTSUPP; } cpuidle_ops[cpu] = *ops; /* structure copy */ pr_notice("cpuidle: enable-method property '%s'" " found operations\n", enable_method); return 0; }
static INT bcm_open(struct net_device *dev) { struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(dev); if (Adapter->fw_download_done == false) { pr_notice(PFX "%s: link up failed (download in progress)\n", dev->name); return -EBUSY; } if (netif_msg_ifup(Adapter)) pr_info(PFX "%s: enabling interface\n", dev->name); if (Adapter->LinkUpStatus) { if (netif_msg_link(Adapter)) pr_info(PFX "%s: link up\n", dev->name); netif_carrier_on(Adapter->dev); netif_start_queue(Adapter->dev); } return 0; }
static int __init acpi_thermal_init(void) { int result = 0; dmi_check_system(thermal_dmi_table); if (off) { pr_notice(PREFIX "thermal control disabled\n"); return -ENODEV; } acpi_thermal_pm_queue = create_workqueue("acpi_thermal_pm"); if (!acpi_thermal_pm_queue) return -ENODEV; result = acpi_bus_register_driver(&acpi_thermal_driver); if (result < 0) { destroy_workqueue(acpi_thermal_pm_queue); return -ENODEV; } return 0; }
/* Set mode callback functions for thermal zone */ static int exynos4_set_mode(struct thermal_zone_device *thermal, enum thermal_device_mode mode) { if (!th_zone->therm_dev) { pr_notice("thermal zone not registered\n"); return 0; } mutex_lock(&th_zone->therm_dev->lock); if (mode == THERMAL_DEVICE_ENABLED) th_zone->therm_dev->polling_delay = IDLE_INTERVAL; else th_zone->therm_dev->polling_delay = 0; mutex_unlock(&th_zone->therm_dev->lock); th_zone->mode = mode; thermal_zone_device_update(th_zone->therm_dev); pr_info("thermal polling set for duration=%d msec\n", th_zone->therm_dev->polling_delay); return 0; }
static void __init d2net_init(void) { /* * Setup basic Orion functions. Need to be called early. */ orion5x_init(); orion5x_mpp_conf(d2net_mpp_modes); /* * Configure peripherals. */ orion5x_ehci0_init(); orion5x_eth_init(&d2net_eth_data); orion5x_i2c_init(); orion5x_uart0_init(); d2net_sata_power_init(); orion5x_sata_init(&d2net_sata_data); mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET, ORION_MBUS_DEVBUS_BOOT_ATTR, D2NET_NOR_BOOT_BASE, D2NET_NOR_BOOT_SIZE); platform_device_register(&d2net_nor_flash); platform_device_register(&d2net_gpio_buttons); d2net_gpio_leds_init(); pr_notice("d2net: Flash write are not yet supported.\n"); i2c_register_board_info(0, d2net_i2c_devices, ARRAY_SIZE(d2net_i2c_devices)); orion_gpio_set_valid(D2NET_GPIO_INHIBIT_POWER_OFF, 1); }
static int do_adj(clockid_t clkid, int cmdc, char *cmdv[]) { double time_arg; int64_t nsecs; enum parser_result r; if (cmdc < 1 || name_is_a_command(cmdv[0])) { pr_err("adj: missing required time argument"); return -2; } /* parse the double time offset argument */ r = get_ranged_double(cmdv[0], &time_arg, DBL_MIN, DBL_MAX); switch (r) { case PARSED_OK: break; case MALFORMED: pr_err("adj: '%s' is not a valid double", cmdv[0]); return -2; case OUT_OF_RANGE: pr_err("adj: '%s' is out of range.", cmdv[0]); return -2; default: pr_err("adj: couldn't process '%s'", cmdv[0]); return -2; } nsecs = (int64_t)(NSEC2SEC * time_arg); clockadj_init(clkid); clockadj_step(clkid, nsecs); pr_notice("adjusted clock by %lf seconds", time_arg); /* adjustment always consumes one argument */ return 1; }
static int __devinit init_card(struct sfax_hw *sf) { int ret, cnt = 3; u_long flags; ret = request_irq(sf->irq, speedfax_irq, IRQF_SHARED, sf->name, sf); if (ret) { pr_info("%s: couldn't get interrupt %d\n", sf->name, sf->irq); return ret; } while (cnt--) { spin_lock_irqsave(&sf->lock, flags); ret = sf->isac.init(&sf->isac); if (ret) { spin_unlock_irqrestore(&sf->lock, flags); pr_info("%s: ISAC init failed with %d\n", sf->name, ret); break; } enable_hwirq(sf); /* */ WriteISAC_IND(sf, ISAC_CMDR, 0x41); spin_unlock_irqrestore(&sf->lock, flags); msleep_interruptible(10); if (debug & DEBUG_HW) pr_notice("%s: IRQ %d count %d\n", sf->name, sf->irq, sf->irqcnt); if (!sf->irqcnt) { pr_info("%s: IRQ(%d) got no requests during init %d\n", sf->name, sf->irq, 3 - cnt); } else return 0; } free_irq(sf->irq, sf); return -EIO; }
int __init integrity_add_key(const unsigned int id, const void *data, off_t size, key_perm_t perm) { key_ref_t key; int rc = 0; if (!keyring[id]) return -EINVAL; key = key_create_or_update(make_key_ref(keyring[id], 1), "asymmetric", NULL, data, size, perm, KEY_ALLOC_NOT_IN_QUOTA); if (IS_ERR(key)) { rc = PTR_ERR(key); pr_err("Problem loading X.509 certificate %d\n", rc); } else { pr_notice("Loaded X.509 cert '%s'\n", key_ref_to_ptr(key)->description); key_ref_put(key); } return rc; }
long dispatch_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct phys_mem_session * session = filp->private_data; long (*fn) (struct file *, unsigned int, unsigned long); #if 0 pr_debug("IOCTL:session state of %i!\n", session->status.state); #endif if (session->status.state >= SESSION_NUM_STATES) { pr_err("IOCTL with an invalid session state of %i!\n", session->status.state); return -EIO; } fn = fops_by_session_state[session->status.state].unlocked_ioctl; if (fn) return fn(filp, cmd, arg); else { pr_notice("Session %llu: ioctl not supported in state %i\n", session->session_id, session->status.state); return -EIO; } }